From 5aba84a5bf552ef31c81b4c4fa7619774c6608f9 Mon Sep 17 00:00:00 2001 From: libo Date: Thu, 2 Jun 2022 20:42:03 +0800 Subject: [PATCH] wifi: update ap6356 driver to bcmdhd.101.10.361.x [1/1] PD#SWPL-81801 BUG=232494797 Problem: update ap6356 driver to bcmdhd.101.10.361.x Solution: update ap6356 driver to bcmdhd.101.10.361.x Verify: adt3 Signed-off-by: libo Change-Id: I61f0851c85fc9bd8ae5802d2811cfb0a5d4bce05 --- bcmdhd.101.10.361.x/Kconfig | 61 + bcmdhd.101.10.361.x/Makefile | 391 + bcmdhd.101.10.361.x/aiutils.c | 2604 ++ bcmdhd.101.10.361.x/bcm_app_utils.c | 1276 + bcmdhd.101.10.361.x/bcm_l2_filter.c | 766 + bcmdhd.101.10.361.x/bcmbloom.c | 233 + bcmdhd.101.10.361.x/bcmevent.c | 445 + bcmdhd.101.10.361.x/bcminternal-android.mk | 88 + bcmdhd.101.10.361.x/bcminternal.mk | 60 + bcmdhd.101.10.361.x/bcmsdh.c | 953 + bcmdhd.101.10.361.x/bcmsdh_linux.c | 594 + bcmdhd.101.10.361.x/bcmsdh_sdmmc.c | 2004 ++ bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c | 388 + bcmdhd.101.10.361.x/bcmsdspi.h | 147 + bcmdhd.101.10.361.x/bcmsdspi_linux.c | 433 + bcmdhd.101.10.361.x/bcmsdstd.c | 5406 +++ bcmdhd.101.10.361.x/bcmsdstd.h | 301 + bcmdhd.101.10.361.x/bcmsdstd_linux.c | 690 + bcmdhd.101.10.361.x/bcmspibrcm.c | 1922 + bcmdhd.101.10.361.x/bcmsrom.c | 6365 ++++ bcmdhd.101.10.361.x/bcmstdlib.c | 1251 + bcmdhd.101.10.361.x/bcmstdlib_s.c | 298 + bcmdhd.101.10.361.x/bcmutils.c | 6097 ++++ bcmdhd.101.10.361.x/bcmwifi_channels.c | 3000 ++ bcmdhd.101.10.361.x/bcmwifi_monitor.c | 1071 + bcmdhd.101.10.361.x/bcmwifi_radiotap.c | 1035 + bcmdhd.101.10.361.x/bcmwifi_rates.c | 607 + bcmdhd.101.10.361.x/bcmwifi_rspec.c | 274 + bcmdhd.101.10.361.x/bcmwpa.c | 2648 ++ bcmdhd.101.10.361.x/bcmxtlv.c | 647 + bcmdhd.101.10.361.x/dbus.c | 2928 ++ bcmdhd.101.10.361.x/dbus_usb.c | 1173 + bcmdhd.101.10.361.x/dbus_usb_linux.c | 3405 ++ bcmdhd.101.10.361.x/dhd.h | 4655 +++ bcmdhd.101.10.361.x/dhd_bitpack.c | 228 + bcmdhd.101.10.361.x/dhd_bitpack.h | 33 + bcmdhd.101.10.361.x/dhd_bus.h | 424 + bcmdhd.101.10.361.x/dhd_buzzz.h | 224 + bcmdhd.101.10.361.x/dhd_ccode.c | 274 + bcmdhd.101.10.361.x/dhd_cdc.c | 1035 + bcmdhd.101.10.361.x/dhd_cfg80211.c | 597 + bcmdhd.101.10.361.x/dhd_cfg80211.h | 49 + bcmdhd.101.10.361.x/dhd_common.c | 11596 ++++++ bcmdhd.101.10.361.x/dhd_config.c | 5175 +++ bcmdhd.101.10.361.x/dhd_config.h | 441 + bcmdhd.101.10.361.x/dhd_csi.c | 219 + bcmdhd.101.10.361.x/dhd_csi.h | 76 + bcmdhd.101.10.361.x/dhd_custom_cis.c | 2010 ++ bcmdhd.101.10.361.x/dhd_custom_exynos.c | 333 + bcmdhd.101.10.361.x/dhd_custom_gpio.c | 437 + bcmdhd.101.10.361.x/dhd_custom_hikey.c | 290 + bcmdhd.101.10.361.x/dhd_custom_memprealloc.c | 500 + bcmdhd.101.10.361.x/dhd_custom_msm.c | 283 + bcmdhd.101.10.361.x/dhd_custom_sec.c | 1040 + bcmdhd.101.10.361.x/dhd_dbg.h | 637 + bcmdhd.101.10.361.x/dhd_dbg_ring.c | 473 + bcmdhd.101.10.361.x/dhd_dbg_ring.h | 146 + bcmdhd.101.10.361.x/dhd_debug.c | 2853 ++ bcmdhd.101.10.361.x/dhd_debug.h | 891 + bcmdhd.101.10.361.x/dhd_debug_linux.c | 528 + bcmdhd.101.10.361.x/dhd_event_log_filter.c | 3236 ++ bcmdhd.101.10.361.x/dhd_event_log_filter.h | 56 + bcmdhd.101.10.361.x/dhd_flowring.c | 1466 + bcmdhd.101.10.361.x/dhd_flowring.h | 350 + bcmdhd.101.10.361.x/dhd_fwtrace.c | 563 + bcmdhd.101.10.361.x/dhd_fwtrace.h | 55 + bcmdhd.101.10.361.x/dhd_gpio.c | 497 + bcmdhd.101.10.361.x/dhd_ip.c | 1425 + bcmdhd.101.10.361.x/dhd_ip.h | 96 + bcmdhd.101.10.361.x/dhd_linux.c | 29878 ++++++++++++++++ bcmdhd.101.10.361.x/dhd_linux.h | 523 + bcmdhd.101.10.361.x/dhd_linux_exportfs.c | 2994 ++ bcmdhd.101.10.361.x/dhd_linux_lb.c | 1402 + bcmdhd.101.10.361.x/dhd_linux_pktdump.c | 1578 + bcmdhd.101.10.361.x/dhd_linux_pktdump.h | 132 + bcmdhd.101.10.361.x/dhd_linux_platdev.c | 1108 + bcmdhd.101.10.361.x/dhd_linux_priv.h | 518 + bcmdhd.101.10.361.x/dhd_linux_sched.c | 47 + bcmdhd.101.10.361.x/dhd_linux_sock_qos.c | 1034 + bcmdhd.101.10.361.x/dhd_linux_sock_qos.h | 118 + bcmdhd.101.10.361.x/dhd_linux_wq.c | 413 + bcmdhd.101.10.361.x/dhd_linux_wq.h | 89 + bcmdhd.101.10.361.x/dhd_macdbg.c | 746 + bcmdhd.101.10.361.x/dhd_macdbg.h | 34 + bcmdhd.101.10.361.x/dhd_mschdbg.c | 796 + bcmdhd.101.10.361.x/dhd_mschdbg.h | 36 + bcmdhd.101.10.361.x/dhd_msgbuf.c | 15512 ++++++++ bcmdhd.101.10.361.x/dhd_pcie.c | 17674 +++++++++ bcmdhd.101.10.361.x/dhd_pcie.h | 1048 + bcmdhd.101.10.361.x/dhd_pcie_linux.c | 3379 ++ bcmdhd.101.10.361.x/dhd_pktlog.c | 1684 + bcmdhd.101.10.361.x/dhd_pktlog.h | 311 + bcmdhd.101.10.361.x/dhd_plat.h | 58 + bcmdhd.101.10.361.x/dhd_pno.c | 4871 +++ bcmdhd.101.10.361.x/dhd_pno.h | 586 + bcmdhd.101.10.361.x/dhd_proto.h | 302 + bcmdhd.101.10.361.x/dhd_qos_algo.h | 90 + bcmdhd.101.10.361.x/dhd_rtt.c | 4855 +++ bcmdhd.101.10.361.x/dhd_rtt.h | 555 + bcmdhd.101.10.361.x/dhd_sdio.c | 11777 ++++++ bcmdhd.101.10.361.x/dhd_sec_feature.h | 226 + bcmdhd.101.10.361.x/dhd_static_buf.c | 657 + bcmdhd.101.10.361.x/dhd_statlog.c | 1081 + bcmdhd.101.10.361.x/dhd_statlog.h | 221 + bcmdhd.101.10.361.x/dhd_timesync.c | 1239 + bcmdhd.101.10.361.x/dhd_timesync.h | 68 + bcmdhd.101.10.361.x/dhd_wet.c | 1187 + bcmdhd.101.10.361.x/dhd_wet.h | 60 + bcmdhd.101.10.361.x/dhd_wlfc.c | 4988 +++ bcmdhd.101.10.361.x/dhd_wlfc.h | 596 + bcmdhd.101.10.361.x/frag.c | 108 + bcmdhd.101.10.361.x/frag.h | 32 + bcmdhd.101.10.361.x/ftdi_sio_external.h | 39 + bcmdhd.101.10.361.x/hnd_pktpool.c | 2130 ++ bcmdhd.101.10.361.x/hnd_pktq.c | 1548 + bcmdhd.101.10.361.x/hndlhl.c | 1241 + bcmdhd.101.10.361.x/hndmem.c | 423 + bcmdhd.101.10.361.x/hndpmu.c | 9929 +++++ bcmdhd.101.10.361.x/include/802.11.h | 5920 +++ bcmdhd.101.10.361.x/include/802.11ah.h | 281 + bcmdhd.101.10.361.x/include/802.11ax.h | 1180 + bcmdhd.101.10.361.x/include/802.11e.h | 133 + bcmdhd.101.10.361.x/include/802.11r.h | 55 + bcmdhd.101.10.361.x/include/802.11s.h | 337 + bcmdhd.101.10.361.x/include/802.1d.h | 47 + bcmdhd.101.10.361.x/include/802.3.h | 49 + bcmdhd.101.10.361.x/include/aidmp.h | 438 + bcmdhd.101.10.361.x/include/bcm_fwtrace.h | 111 + bcmdhd.101.10.361.x/include/bcm_l2_filter.h | 99 + bcmdhd.101.10.361.x/include/bcm_mpool_pub.h | 344 + bcmdhd.101.10.361.x/include/bcm_ring.h | 585 + bcmdhd.101.10.361.x/include/bcmarp.h | 84 + bcmdhd.101.10.361.x/include/bcmbloom.h | 73 + bcmdhd.101.10.361.x/include/bcmcdc.h | 115 + bcmdhd.101.10.361.x/include/bcmdefs.h | 909 + bcmdhd.101.10.361.x/include/bcmdevs.h | 626 + bcmdhd.101.10.361.x/include/bcmdevs_legacy.h | 188 + bcmdhd.101.10.361.x/include/bcmdhcp.h | 86 + bcmdhd.101.10.361.x/include/bcmendian.h | 451 + bcmdhd.101.10.361.x/include/bcmerror.h | 573 + bcmdhd.101.10.361.x/include/bcmeth.h | 109 + bcmdhd.101.10.361.x/include/bcmevent.h | 1617 + bcmdhd.101.10.361.x/include/bcmicmp.h | 83 + bcmdhd.101.10.361.x/include/bcmiov.h | 353 + bcmdhd.101.10.361.x/include/bcmip.h | 286 + bcmdhd.101.10.361.x/include/bcmipv6.h | 160 + bcmdhd.101.10.361.x/include/bcmmsgbuf.h | 1706 + bcmdhd.101.10.361.x/include/bcmnvram.h | 162 + bcmdhd.101.10.361.x/include/bcmpcie.h | 559 + bcmdhd.101.10.361.x/include/bcmpcispi.h | 204 + bcmdhd.101.10.361.x/include/bcmperf.h | 33 + bcmdhd.101.10.361.x/include/bcmproto.h | 275 + bcmdhd.101.10.361.x/include/bcmrand.h | 65 + bcmdhd.101.10.361.x/include/bcmsdbus.h | 187 + bcmdhd.101.10.361.x/include/bcmsdh.h | 290 + bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h | 142 + bcmdhd.101.10.361.x/include/bcmsdpcm.h | 304 + bcmdhd.101.10.361.x/include/bcmspi.h | 37 + bcmdhd.101.10.361.x/include/bcmspibrcm.h | 165 + bcmdhd.101.10.361.x/include/bcmsrom.h | 72 + bcmdhd.101.10.361.x/include/bcmsrom_fmt.h | 1028 + bcmdhd.101.10.361.x/include/bcmsrom_tbl.h | 1303 + bcmdhd.101.10.361.x/include/bcmstdlib_s.h | 54 + bcmdhd.101.10.361.x/include/bcmtcp.h | 86 + bcmdhd.101.10.361.x/include/bcmtlv.h | 375 + bcmdhd.101.10.361.x/include/bcmudp.h | 54 + bcmdhd.101.10.361.x/include/bcmutils.h | 1639 + .../include/bcmwifi_channels.h | 888 + bcmdhd.101.10.361.x/include/bcmwifi_monitor.h | 98 + .../include/bcmwifi_radiotap.h | 382 + bcmdhd.101.10.361.x/include/bcmwifi_rates.h | 1262 + bcmdhd.101.10.361.x/include/bcmwifi_rspec.h | 286 + bcmdhd.101.10.361.x/include/bcmwpa.h | 634 + bcmdhd.101.10.361.x/include/brcm_nl80211.h | 77 + bcmdhd.101.10.361.x/include/d11.h | 6055 ++++ bcmdhd.101.10.361.x/include/d11_cfg.h | 115 + .../include/d11reglist_proto.h | 66 + bcmdhd.101.10.361.x/include/d11regs.h | 180 + bcmdhd.101.10.361.x/include/dbus.h | 627 + bcmdhd.101.10.361.x/include/dhd_daemon.h | 55 + bcmdhd.101.10.361.x/include/dhdioctl.h | 478 + bcmdhd.101.10.361.x/include/dngl_rtlv.h | 66 + bcmdhd.101.10.361.x/include/dngl_stats.h | 388 + bcmdhd.101.10.361.x/include/dngl_wlhdr.h | 39 + bcmdhd.101.10.361.x/include/dnglevent.h | 174 + bcmdhd.101.10.361.x/include/dnglioctl.h | 177 + bcmdhd.101.10.361.x/include/eap.h | 121 + bcmdhd.101.10.361.x/include/eapol.h | 292 + bcmdhd.101.10.361.x/include/epivers.h | 51 + bcmdhd.101.10.361.x/include/etd.h | 636 + bcmdhd.101.10.361.x/include/ethernet.h | 252 + bcmdhd.101.10.361.x/include/event_log.h | 666 + .../include/event_log_payload.h | 1775 + bcmdhd.101.10.361.x/include/event_log_set.h | 142 + bcmdhd.101.10.361.x/include/event_log_tag.h | 617 + bcmdhd.101.10.361.x/include/event_trace.h | 187 + bcmdhd.101.10.361.x/include/fils.h | 424 + bcmdhd.101.10.361.x/include/hnd_armtrap.h | 86 + bcmdhd.101.10.361.x/include/hnd_cons.h | 98 + bcmdhd.101.10.361.x/include/hnd_debug.h | 250 + bcmdhd.101.10.361.x/include/hnd_pktpool.h | 288 + bcmdhd.101.10.361.x/include/hnd_pktq.h | 330 + bcmdhd.101.10.361.x/include/hnd_trap.h | 33 + bcmdhd.101.10.361.x/include/hndchipc.h | 47 + bcmdhd.101.10.361.x/include/hndd11.h | 121 + bcmdhd.101.10.361.x/include/hnddma.h | 338 + bcmdhd.101.10.361.x/include/hndlhl.h | 94 + bcmdhd.101.10.361.x/include/hndmem.h | 74 + bcmdhd.101.10.361.x/include/hndoobr.h | 93 + bcmdhd.101.10.361.x/include/hndpmu.h | 348 + bcmdhd.101.10.361.x/include/hndsoc.h | 353 + .../include/ieee80211_radiotap.h | 400 + bcmdhd.101.10.361.x/include/linux_osl.h | 868 + bcmdhd.101.10.361.x/include/linux_pkt.h | 421 + bcmdhd.101.10.361.x/include/linuxver.h | 945 + bcmdhd.101.10.361.x/include/lpflags.h | 39 + bcmdhd.101.10.361.x/include/mbo.h | 279 + bcmdhd.101.10.361.x/include/miniopt.h | 73 + bcmdhd.101.10.361.x/include/monitor.h | 230 + bcmdhd.101.10.361.x/include/msf.h | 60 + bcmdhd.101.10.361.x/include/msgtrace.h | 56 + bcmdhd.101.10.361.x/include/nan.h | 1562 + bcmdhd.101.10.361.x/include/nci.h | 96 + bcmdhd.101.10.361.x/include/osl.h | 482 + bcmdhd.101.10.361.x/include/osl_decl.h | 31 + bcmdhd.101.10.361.x/include/osl_ext.h | 759 + bcmdhd.101.10.361.x/include/p2p.h | 695 + .../include/packed_section_end.h | 62 + .../include/packed_section_start.h | 117 + bcmdhd.101.10.361.x/include/pcicfg.h | 730 + bcmdhd.101.10.361.x/include/pcie_core.h | 1485 + bcmdhd.101.10.361.x/include/sbchipc.h | 5282 +++ bcmdhd.101.10.361.x/include/sbconfig.h | 279 + bcmdhd.101.10.361.x/include/sbgci.h | 424 + bcmdhd.101.10.361.x/include/sbhndarm.h | 414 + bcmdhd.101.10.361.x/include/sbhnddma.h | 481 + bcmdhd.101.10.361.x/include/sbhndpio.h | 60 + bcmdhd.101.10.361.x/include/sbpcmcia.h | 415 + bcmdhd.101.10.361.x/include/sbsdio.h | 185 + bcmdhd.101.10.361.x/include/sbsdpcmdev.h | 307 + bcmdhd.101.10.361.x/include/sbsocram.h | 198 + bcmdhd.101.10.361.x/include/sbsprom.h | 236 + bcmdhd.101.10.361.x/include/sbsysmem.h | 191 + bcmdhd.101.10.361.x/include/sdio.h | 644 + bcmdhd.101.10.361.x/include/sdioh.h | 459 + bcmdhd.101.10.361.x/include/sdiovar.h | 124 + bcmdhd.101.10.361.x/include/sdspi.h | 72 + bcmdhd.101.10.361.x/include/siutils.h | 1057 + bcmdhd.101.10.361.x/include/spid.h | 164 + bcmdhd.101.10.361.x/include/trxhdr.h | 93 + bcmdhd.101.10.361.x/include/typedefs.h | 408 + bcmdhd.101.10.361.x/include/usbrdl.h | 134 + bcmdhd.101.10.361.x/include/vlan.h | 91 + bcmdhd.101.10.361.x/include/wl_bam.h | 74 + bcmdhd.101.10.361.x/include/wl_bigdata.h | 81 + bcmdhd.101.10.361.x/include/wldev_common.h | 135 + bcmdhd.101.10.361.x/include/wlfc_proto.h | 496 + bcmdhd.101.10.361.x/include/wlioctl.h | 25850 +++++++++++++ bcmdhd.101.10.361.x/include/wlioctl_defs.h | 2514 ++ bcmdhd.101.10.361.x/include/wlioctl_utils.h | 60 + bcmdhd.101.10.361.x/include/wpa.h | 306 + bcmdhd.101.10.361.x/include/wps.h | 379 + bcmdhd.101.10.361.x/linux_osl.c | 2197 ++ bcmdhd.101.10.361.x/linux_osl_priv.h | 188 + bcmdhd.101.10.361.x/linux_pkt.c | 897 + bcmdhd.101.10.361.x/nciutils.c | 3095 ++ bcmdhd.101.10.361.x/pcie_core.c | 227 + bcmdhd.101.10.361.x/pom.h | 70 + bcmdhd.101.10.361.x/sbutils.c | 1111 + bcmdhd.101.10.361.x/siutils.c | 10249 ++++++ bcmdhd.101.10.361.x/siutils_priv.h | 513 + bcmdhd.101.10.361.x/wb_regon_coordinator.c | 444 + bcmdhd.101.10.361.x/wifi_stats.h | 377 + bcmdhd.101.10.361.x/wl_android.c | 14244 ++++++++ bcmdhd.101.10.361.x/wl_android.h | 252 + bcmdhd.101.10.361.x/wl_android_ext.c | 4043 +++ bcmdhd.101.10.361.x/wl_android_ext.h | 175 + bcmdhd.101.10.361.x/wl_bam.c | 643 + bcmdhd.101.10.361.x/wl_bigdata.c | 575 + bcmdhd.101.10.361.x/wl_cfg80211.c | 22880 ++++++++++++ bcmdhd.101.10.361.x/wl_cfg80211.h | 3087 ++ bcmdhd.101.10.361.x/wl_cfg_btcoex.c | 601 + bcmdhd.101.10.361.x/wl_cfgnan.c | 9473 +++++ bcmdhd.101.10.361.x/wl_cfgnan.h | 959 + bcmdhd.101.10.361.x/wl_cfgp2p.c | 2811 ++ bcmdhd.101.10.361.x/wl_cfgp2p.h | 488 + bcmdhd.101.10.361.x/wl_cfgscan.c | 5637 +++ bcmdhd.101.10.361.x/wl_cfgscan.h | 178 + bcmdhd.101.10.361.x/wl_cfgvendor.c | 10061 ++++++ bcmdhd.101.10.361.x/wl_cfgvendor.h | 855 + bcmdhd.101.10.361.x/wl_cfgvif.c | 6601 ++++ bcmdhd.101.10.361.x/wl_cfgvif.h | 251 + bcmdhd.101.10.361.x/wl_dbg.h | 1544 + bcmdhd.101.10.361.x/wl_escan.c | 1767 + bcmdhd.101.10.361.x/wl_escan.h | 89 + bcmdhd.101.10.361.x/wl_event.c | 556 + bcmdhd.101.10.361.x/wl_event.h | 18 + bcmdhd.101.10.361.x/wl_export.h | 285 + bcmdhd.101.10.361.x/wl_ext_genl.c | 568 + bcmdhd.101.10.361.x/wl_iapsta.c | 5748 +++ bcmdhd.101.10.361.x/wl_iapsta.h | 85 + bcmdhd.101.10.361.x/wl_iw.c | 4302 +++ bcmdhd.101.10.361.x/wl_iw.h | 171 + bcmdhd.101.10.361.x/wl_linux_mon.c | 412 + bcmdhd.101.10.361.x/wl_roam.c | 548 + bcmdhd.101.10.361.x/wlc_types.h | 714 + bcmdhd.101.10.361.x/wldev_common.c | 537 + 307 files changed, 437997 insertions(+) create mode 100755 bcmdhd.101.10.361.x/Kconfig create mode 100755 bcmdhd.101.10.361.x/Makefile create mode 100755 bcmdhd.101.10.361.x/aiutils.c create mode 100755 bcmdhd.101.10.361.x/bcm_app_utils.c create mode 100755 bcmdhd.101.10.361.x/bcm_l2_filter.c create mode 100755 bcmdhd.101.10.361.x/bcmbloom.c create mode 100755 bcmdhd.101.10.361.x/bcmevent.c create mode 100755 bcmdhd.101.10.361.x/bcminternal-android.mk create mode 100755 bcmdhd.101.10.361.x/bcminternal.mk create mode 100755 bcmdhd.101.10.361.x/bcmsdh.c create mode 100755 bcmdhd.101.10.361.x/bcmsdh_linux.c create mode 100755 bcmdhd.101.10.361.x/bcmsdh_sdmmc.c create mode 100755 bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c create mode 100755 bcmdhd.101.10.361.x/bcmsdspi.h create mode 100755 bcmdhd.101.10.361.x/bcmsdspi_linux.c create mode 100755 bcmdhd.101.10.361.x/bcmsdstd.c create mode 100755 bcmdhd.101.10.361.x/bcmsdstd.h create mode 100755 bcmdhd.101.10.361.x/bcmsdstd_linux.c create mode 100755 bcmdhd.101.10.361.x/bcmspibrcm.c create mode 100755 bcmdhd.101.10.361.x/bcmsrom.c create mode 100755 bcmdhd.101.10.361.x/bcmstdlib.c create mode 100755 bcmdhd.101.10.361.x/bcmstdlib_s.c create mode 100755 bcmdhd.101.10.361.x/bcmutils.c create mode 100755 bcmdhd.101.10.361.x/bcmwifi_channels.c create mode 100755 bcmdhd.101.10.361.x/bcmwifi_monitor.c create mode 100755 bcmdhd.101.10.361.x/bcmwifi_radiotap.c create mode 100755 bcmdhd.101.10.361.x/bcmwifi_rates.c create mode 100755 bcmdhd.101.10.361.x/bcmwifi_rspec.c create mode 100755 bcmdhd.101.10.361.x/bcmwpa.c create mode 100755 bcmdhd.101.10.361.x/bcmxtlv.c create mode 100755 bcmdhd.101.10.361.x/dbus.c create mode 100755 bcmdhd.101.10.361.x/dbus_usb.c create mode 100755 bcmdhd.101.10.361.x/dbus_usb_linux.c create mode 100755 bcmdhd.101.10.361.x/dhd.h create mode 100755 bcmdhd.101.10.361.x/dhd_bitpack.c create mode 100755 bcmdhd.101.10.361.x/dhd_bitpack.h create mode 100755 bcmdhd.101.10.361.x/dhd_bus.h create mode 100755 bcmdhd.101.10.361.x/dhd_buzzz.h create mode 100755 bcmdhd.101.10.361.x/dhd_ccode.c create mode 100755 bcmdhd.101.10.361.x/dhd_cdc.c create mode 100755 bcmdhd.101.10.361.x/dhd_cfg80211.c create mode 100755 bcmdhd.101.10.361.x/dhd_cfg80211.h create mode 100755 bcmdhd.101.10.361.x/dhd_common.c create mode 100755 bcmdhd.101.10.361.x/dhd_config.c create mode 100755 bcmdhd.101.10.361.x/dhd_config.h create mode 100755 bcmdhd.101.10.361.x/dhd_csi.c create mode 100755 bcmdhd.101.10.361.x/dhd_csi.h create mode 100755 bcmdhd.101.10.361.x/dhd_custom_cis.c create mode 100755 bcmdhd.101.10.361.x/dhd_custom_exynos.c create mode 100755 bcmdhd.101.10.361.x/dhd_custom_gpio.c create mode 100755 bcmdhd.101.10.361.x/dhd_custom_hikey.c create mode 100755 bcmdhd.101.10.361.x/dhd_custom_memprealloc.c create mode 100755 bcmdhd.101.10.361.x/dhd_custom_msm.c create mode 100755 bcmdhd.101.10.361.x/dhd_custom_sec.c create mode 100755 bcmdhd.101.10.361.x/dhd_dbg.h create mode 100755 bcmdhd.101.10.361.x/dhd_dbg_ring.c create mode 100755 bcmdhd.101.10.361.x/dhd_dbg_ring.h create mode 100755 bcmdhd.101.10.361.x/dhd_debug.c create mode 100755 bcmdhd.101.10.361.x/dhd_debug.h create mode 100755 bcmdhd.101.10.361.x/dhd_debug_linux.c create mode 100755 bcmdhd.101.10.361.x/dhd_event_log_filter.c create mode 100755 bcmdhd.101.10.361.x/dhd_event_log_filter.h create mode 100755 bcmdhd.101.10.361.x/dhd_flowring.c create mode 100755 bcmdhd.101.10.361.x/dhd_flowring.h create mode 100755 bcmdhd.101.10.361.x/dhd_fwtrace.c create mode 100755 bcmdhd.101.10.361.x/dhd_fwtrace.h create mode 100755 bcmdhd.101.10.361.x/dhd_gpio.c create mode 100755 bcmdhd.101.10.361.x/dhd_ip.c create mode 100755 bcmdhd.101.10.361.x/dhd_ip.h create mode 100755 bcmdhd.101.10.361.x/dhd_linux.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux.h create mode 100755 bcmdhd.101.10.361.x/dhd_linux_exportfs.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_lb.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_pktdump.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_pktdump.h create mode 100755 bcmdhd.101.10.361.x/dhd_linux_platdev.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_priv.h create mode 100755 bcmdhd.101.10.361.x/dhd_linux_sched.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_sock_qos.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_sock_qos.h create mode 100755 bcmdhd.101.10.361.x/dhd_linux_wq.c create mode 100755 bcmdhd.101.10.361.x/dhd_linux_wq.h create mode 100755 bcmdhd.101.10.361.x/dhd_macdbg.c create mode 100755 bcmdhd.101.10.361.x/dhd_macdbg.h create mode 100755 bcmdhd.101.10.361.x/dhd_mschdbg.c create mode 100755 bcmdhd.101.10.361.x/dhd_mschdbg.h create mode 100755 bcmdhd.101.10.361.x/dhd_msgbuf.c create mode 100755 bcmdhd.101.10.361.x/dhd_pcie.c create mode 100755 bcmdhd.101.10.361.x/dhd_pcie.h create mode 100755 bcmdhd.101.10.361.x/dhd_pcie_linux.c create mode 100755 bcmdhd.101.10.361.x/dhd_pktlog.c create mode 100755 bcmdhd.101.10.361.x/dhd_pktlog.h create mode 100755 bcmdhd.101.10.361.x/dhd_plat.h create mode 100755 bcmdhd.101.10.361.x/dhd_pno.c create mode 100755 bcmdhd.101.10.361.x/dhd_pno.h create mode 100755 bcmdhd.101.10.361.x/dhd_proto.h create mode 100755 bcmdhd.101.10.361.x/dhd_qos_algo.h create mode 100755 bcmdhd.101.10.361.x/dhd_rtt.c create mode 100755 bcmdhd.101.10.361.x/dhd_rtt.h create mode 100755 bcmdhd.101.10.361.x/dhd_sdio.c create mode 100755 bcmdhd.101.10.361.x/dhd_sec_feature.h create mode 100755 bcmdhd.101.10.361.x/dhd_static_buf.c create mode 100755 bcmdhd.101.10.361.x/dhd_statlog.c create mode 100755 bcmdhd.101.10.361.x/dhd_statlog.h create mode 100755 bcmdhd.101.10.361.x/dhd_timesync.c create mode 100755 bcmdhd.101.10.361.x/dhd_timesync.h create mode 100755 bcmdhd.101.10.361.x/dhd_wet.c create mode 100755 bcmdhd.101.10.361.x/dhd_wet.h create mode 100755 bcmdhd.101.10.361.x/dhd_wlfc.c create mode 100755 bcmdhd.101.10.361.x/dhd_wlfc.h create mode 100755 bcmdhd.101.10.361.x/frag.c create mode 100755 bcmdhd.101.10.361.x/frag.h create mode 100755 bcmdhd.101.10.361.x/ftdi_sio_external.h create mode 100755 bcmdhd.101.10.361.x/hnd_pktpool.c create mode 100755 bcmdhd.101.10.361.x/hnd_pktq.c create mode 100755 bcmdhd.101.10.361.x/hndlhl.c create mode 100755 bcmdhd.101.10.361.x/hndmem.c create mode 100755 bcmdhd.101.10.361.x/hndpmu.c create mode 100755 bcmdhd.101.10.361.x/include/802.11.h create mode 100755 bcmdhd.101.10.361.x/include/802.11ah.h create mode 100755 bcmdhd.101.10.361.x/include/802.11ax.h create mode 100755 bcmdhd.101.10.361.x/include/802.11e.h create mode 100755 bcmdhd.101.10.361.x/include/802.11r.h create mode 100755 bcmdhd.101.10.361.x/include/802.11s.h create mode 100755 bcmdhd.101.10.361.x/include/802.1d.h create mode 100755 bcmdhd.101.10.361.x/include/802.3.h create mode 100755 bcmdhd.101.10.361.x/include/aidmp.h create mode 100755 bcmdhd.101.10.361.x/include/bcm_fwtrace.h create mode 100755 bcmdhd.101.10.361.x/include/bcm_l2_filter.h create mode 100755 bcmdhd.101.10.361.x/include/bcm_mpool_pub.h create mode 100755 bcmdhd.101.10.361.x/include/bcm_ring.h create mode 100755 bcmdhd.101.10.361.x/include/bcmarp.h create mode 100755 bcmdhd.101.10.361.x/include/bcmbloom.h create mode 100755 bcmdhd.101.10.361.x/include/bcmcdc.h create mode 100755 bcmdhd.101.10.361.x/include/bcmdefs.h create mode 100755 bcmdhd.101.10.361.x/include/bcmdevs.h create mode 100755 bcmdhd.101.10.361.x/include/bcmdevs_legacy.h create mode 100755 bcmdhd.101.10.361.x/include/bcmdhcp.h create mode 100755 bcmdhd.101.10.361.x/include/bcmendian.h create mode 100755 bcmdhd.101.10.361.x/include/bcmerror.h create mode 100755 bcmdhd.101.10.361.x/include/bcmeth.h create mode 100755 bcmdhd.101.10.361.x/include/bcmevent.h create mode 100755 bcmdhd.101.10.361.x/include/bcmicmp.h create mode 100755 bcmdhd.101.10.361.x/include/bcmiov.h create mode 100755 bcmdhd.101.10.361.x/include/bcmip.h create mode 100755 bcmdhd.101.10.361.x/include/bcmipv6.h create mode 100755 bcmdhd.101.10.361.x/include/bcmmsgbuf.h create mode 100755 bcmdhd.101.10.361.x/include/bcmnvram.h create mode 100755 bcmdhd.101.10.361.x/include/bcmpcie.h create mode 100755 bcmdhd.101.10.361.x/include/bcmpcispi.h create mode 100755 bcmdhd.101.10.361.x/include/bcmperf.h create mode 100755 bcmdhd.101.10.361.x/include/bcmproto.h create mode 100755 bcmdhd.101.10.361.x/include/bcmrand.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsdbus.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsdh.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsdpcm.h create mode 100755 bcmdhd.101.10.361.x/include/bcmspi.h create mode 100755 bcmdhd.101.10.361.x/include/bcmspibrcm.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsrom.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsrom_fmt.h create mode 100755 bcmdhd.101.10.361.x/include/bcmsrom_tbl.h create mode 100755 bcmdhd.101.10.361.x/include/bcmstdlib_s.h create mode 100755 bcmdhd.101.10.361.x/include/bcmtcp.h create mode 100755 bcmdhd.101.10.361.x/include/bcmtlv.h create mode 100755 bcmdhd.101.10.361.x/include/bcmudp.h create mode 100755 bcmdhd.101.10.361.x/include/bcmutils.h create mode 100755 bcmdhd.101.10.361.x/include/bcmwifi_channels.h create mode 100755 bcmdhd.101.10.361.x/include/bcmwifi_monitor.h create mode 100755 bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h create mode 100755 bcmdhd.101.10.361.x/include/bcmwifi_rates.h create mode 100755 bcmdhd.101.10.361.x/include/bcmwifi_rspec.h create mode 100755 bcmdhd.101.10.361.x/include/bcmwpa.h create mode 100755 bcmdhd.101.10.361.x/include/brcm_nl80211.h create mode 100755 bcmdhd.101.10.361.x/include/d11.h create mode 100755 bcmdhd.101.10.361.x/include/d11_cfg.h create mode 100755 bcmdhd.101.10.361.x/include/d11reglist_proto.h create mode 100755 bcmdhd.101.10.361.x/include/d11regs.h create mode 100755 bcmdhd.101.10.361.x/include/dbus.h create mode 100755 bcmdhd.101.10.361.x/include/dhd_daemon.h create mode 100755 bcmdhd.101.10.361.x/include/dhdioctl.h create mode 100755 bcmdhd.101.10.361.x/include/dngl_rtlv.h create mode 100755 bcmdhd.101.10.361.x/include/dngl_stats.h create mode 100755 bcmdhd.101.10.361.x/include/dngl_wlhdr.h create mode 100755 bcmdhd.101.10.361.x/include/dnglevent.h create mode 100755 bcmdhd.101.10.361.x/include/dnglioctl.h create mode 100755 bcmdhd.101.10.361.x/include/eap.h create mode 100755 bcmdhd.101.10.361.x/include/eapol.h create mode 100755 bcmdhd.101.10.361.x/include/epivers.h create mode 100755 bcmdhd.101.10.361.x/include/etd.h create mode 100755 bcmdhd.101.10.361.x/include/ethernet.h create mode 100755 bcmdhd.101.10.361.x/include/event_log.h create mode 100755 bcmdhd.101.10.361.x/include/event_log_payload.h create mode 100755 bcmdhd.101.10.361.x/include/event_log_set.h create mode 100755 bcmdhd.101.10.361.x/include/event_log_tag.h create mode 100755 bcmdhd.101.10.361.x/include/event_trace.h create mode 100755 bcmdhd.101.10.361.x/include/fils.h create mode 100755 bcmdhd.101.10.361.x/include/hnd_armtrap.h create mode 100755 bcmdhd.101.10.361.x/include/hnd_cons.h create mode 100755 bcmdhd.101.10.361.x/include/hnd_debug.h create mode 100755 bcmdhd.101.10.361.x/include/hnd_pktpool.h create mode 100755 bcmdhd.101.10.361.x/include/hnd_pktq.h create mode 100755 bcmdhd.101.10.361.x/include/hnd_trap.h create mode 100755 bcmdhd.101.10.361.x/include/hndchipc.h create mode 100755 bcmdhd.101.10.361.x/include/hndd11.h create mode 100755 bcmdhd.101.10.361.x/include/hnddma.h create mode 100755 bcmdhd.101.10.361.x/include/hndlhl.h create mode 100755 bcmdhd.101.10.361.x/include/hndmem.h create mode 100755 bcmdhd.101.10.361.x/include/hndoobr.h create mode 100755 bcmdhd.101.10.361.x/include/hndpmu.h create mode 100755 bcmdhd.101.10.361.x/include/hndsoc.h create mode 100755 bcmdhd.101.10.361.x/include/ieee80211_radiotap.h create mode 100755 bcmdhd.101.10.361.x/include/linux_osl.h create mode 100755 bcmdhd.101.10.361.x/include/linux_pkt.h create mode 100755 bcmdhd.101.10.361.x/include/linuxver.h create mode 100755 bcmdhd.101.10.361.x/include/lpflags.h create mode 100755 bcmdhd.101.10.361.x/include/mbo.h create mode 100755 bcmdhd.101.10.361.x/include/miniopt.h create mode 100755 bcmdhd.101.10.361.x/include/monitor.h create mode 100755 bcmdhd.101.10.361.x/include/msf.h create mode 100755 bcmdhd.101.10.361.x/include/msgtrace.h create mode 100755 bcmdhd.101.10.361.x/include/nan.h create mode 100755 bcmdhd.101.10.361.x/include/nci.h create mode 100755 bcmdhd.101.10.361.x/include/osl.h create mode 100755 bcmdhd.101.10.361.x/include/osl_decl.h create mode 100755 bcmdhd.101.10.361.x/include/osl_ext.h create mode 100755 bcmdhd.101.10.361.x/include/p2p.h create mode 100755 bcmdhd.101.10.361.x/include/packed_section_end.h create mode 100755 bcmdhd.101.10.361.x/include/packed_section_start.h create mode 100755 bcmdhd.101.10.361.x/include/pcicfg.h create mode 100755 bcmdhd.101.10.361.x/include/pcie_core.h create mode 100755 bcmdhd.101.10.361.x/include/sbchipc.h create mode 100755 bcmdhd.101.10.361.x/include/sbconfig.h create mode 100755 bcmdhd.101.10.361.x/include/sbgci.h create mode 100755 bcmdhd.101.10.361.x/include/sbhndarm.h create mode 100755 bcmdhd.101.10.361.x/include/sbhnddma.h create mode 100755 bcmdhd.101.10.361.x/include/sbhndpio.h create mode 100755 bcmdhd.101.10.361.x/include/sbpcmcia.h create mode 100755 bcmdhd.101.10.361.x/include/sbsdio.h create mode 100755 bcmdhd.101.10.361.x/include/sbsdpcmdev.h create mode 100755 bcmdhd.101.10.361.x/include/sbsocram.h create mode 100755 bcmdhd.101.10.361.x/include/sbsprom.h create mode 100755 bcmdhd.101.10.361.x/include/sbsysmem.h create mode 100755 bcmdhd.101.10.361.x/include/sdio.h create mode 100755 bcmdhd.101.10.361.x/include/sdioh.h create mode 100755 bcmdhd.101.10.361.x/include/sdiovar.h create mode 100755 bcmdhd.101.10.361.x/include/sdspi.h create mode 100755 bcmdhd.101.10.361.x/include/siutils.h create mode 100755 bcmdhd.101.10.361.x/include/spid.h create mode 100755 bcmdhd.101.10.361.x/include/trxhdr.h create mode 100755 bcmdhd.101.10.361.x/include/typedefs.h create mode 100755 bcmdhd.101.10.361.x/include/usbrdl.h create mode 100755 bcmdhd.101.10.361.x/include/vlan.h create mode 100755 bcmdhd.101.10.361.x/include/wl_bam.h create mode 100755 bcmdhd.101.10.361.x/include/wl_bigdata.h create mode 100755 bcmdhd.101.10.361.x/include/wldev_common.h create mode 100755 bcmdhd.101.10.361.x/include/wlfc_proto.h create mode 100755 bcmdhd.101.10.361.x/include/wlioctl.h create mode 100755 bcmdhd.101.10.361.x/include/wlioctl_defs.h create mode 100755 bcmdhd.101.10.361.x/include/wlioctl_utils.h create mode 100755 bcmdhd.101.10.361.x/include/wpa.h create mode 100755 bcmdhd.101.10.361.x/include/wps.h create mode 100755 bcmdhd.101.10.361.x/linux_osl.c create mode 100755 bcmdhd.101.10.361.x/linux_osl_priv.h create mode 100755 bcmdhd.101.10.361.x/linux_pkt.c create mode 100755 bcmdhd.101.10.361.x/nciutils.c create mode 100755 bcmdhd.101.10.361.x/pcie_core.c create mode 100755 bcmdhd.101.10.361.x/pom.h create mode 100755 bcmdhd.101.10.361.x/sbutils.c create mode 100755 bcmdhd.101.10.361.x/siutils.c create mode 100755 bcmdhd.101.10.361.x/siutils_priv.h create mode 100755 bcmdhd.101.10.361.x/wb_regon_coordinator.c create mode 100755 bcmdhd.101.10.361.x/wifi_stats.h create mode 100755 bcmdhd.101.10.361.x/wl_android.c create mode 100755 bcmdhd.101.10.361.x/wl_android.h create mode 100755 bcmdhd.101.10.361.x/wl_android_ext.c create mode 100755 bcmdhd.101.10.361.x/wl_android_ext.h create mode 100755 bcmdhd.101.10.361.x/wl_bam.c create mode 100755 bcmdhd.101.10.361.x/wl_bigdata.c create mode 100755 bcmdhd.101.10.361.x/wl_cfg80211.c create mode 100755 bcmdhd.101.10.361.x/wl_cfg80211.h create mode 100755 bcmdhd.101.10.361.x/wl_cfg_btcoex.c create mode 100755 bcmdhd.101.10.361.x/wl_cfgnan.c create mode 100755 bcmdhd.101.10.361.x/wl_cfgnan.h create mode 100755 bcmdhd.101.10.361.x/wl_cfgp2p.c create mode 100755 bcmdhd.101.10.361.x/wl_cfgp2p.h create mode 100755 bcmdhd.101.10.361.x/wl_cfgscan.c create mode 100755 bcmdhd.101.10.361.x/wl_cfgscan.h create mode 100755 bcmdhd.101.10.361.x/wl_cfgvendor.c create mode 100755 bcmdhd.101.10.361.x/wl_cfgvendor.h create mode 100755 bcmdhd.101.10.361.x/wl_cfgvif.c create mode 100755 bcmdhd.101.10.361.x/wl_cfgvif.h create mode 100755 bcmdhd.101.10.361.x/wl_dbg.h create mode 100755 bcmdhd.101.10.361.x/wl_escan.c create mode 100755 bcmdhd.101.10.361.x/wl_escan.h create mode 100755 bcmdhd.101.10.361.x/wl_event.c create mode 100755 bcmdhd.101.10.361.x/wl_event.h create mode 100755 bcmdhd.101.10.361.x/wl_export.h create mode 100755 bcmdhd.101.10.361.x/wl_ext_genl.c create mode 100755 bcmdhd.101.10.361.x/wl_iapsta.c create mode 100755 bcmdhd.101.10.361.x/wl_iapsta.h create mode 100755 bcmdhd.101.10.361.x/wl_iw.c create mode 100755 bcmdhd.101.10.361.x/wl_iw.h create mode 100755 bcmdhd.101.10.361.x/wl_linux_mon.c create mode 100755 bcmdhd.101.10.361.x/wl_roam.c create mode 100755 bcmdhd.101.10.361.x/wlc_types.h create mode 100755 bcmdhd.101.10.361.x/wldev_common.c diff --git a/bcmdhd.101.10.361.x/Kconfig b/bcmdhd.101.10.361.x/Kconfig new file mode 100755 index 0000000..f49ae76 --- /dev/null +++ b/bcmdhd.101.10.361.x/Kconfig @@ -0,0 +1,61 @@ +config BCMDHD + tristate "Broadcom FullMAC wireless cards support" + ---help--- + This module adds support for wireless adapters based on + Broadcom FullMAC chipset. + +config BCMDHD_FW_PATH + depends on BCMDHD + string "Firmware path" + default "/system/etc/firmware/fw_bcmdhd.bin" + ---help--- + Path to the firmware file. + +config BCMDHD_NVRAM_PATH + depends on BCMDHD + string "NVRAM path" + default "/system/etc/firmware/nvram.txt" + ---help--- + Path to the calibration file. + +config BCMDHD_WEXT + bool "Enable WEXT support" + depends on BCMDHD && CFG80211 = n + select WIRELESS_EXT + select WEXT_PRIV + help + Enables WEXT support + +choice + prompt "Enable Chip Interface" + depends on BCMDHD + ---help--- + Enable Chip Interface. +config BCMDHD_SDIO + bool "SDIO bus interface support" + depends on BCMDHD && MMC +config BCMDHD_PCIE + bool "PCIe bus interface support" + depends on BCMDHD && PCI +config BCMDHD_USB + bool "USB bus interface support" + depends on BCMDHD && USB +endchoice + +choice + depends on BCMDHD && BCMDHD_SDIO + prompt "Interrupt type" + ---help--- + Interrupt type +config BCMDHD_OOB + depends on BCMDHD && BCMDHD_SDIO + bool "Out-of-Band Interrupt" + default y + ---help--- + Interrupt from WL_HOST_WAKE. +config BCMDHD_SDIO_IRQ + depends on BCMDHD && BCMDHD_SDIO + bool "In-Band Interrupt" + ---help--- + Interrupt from SDIO DAT[1] +endchoice diff --git a/bcmdhd.101.10.361.x/Makefile b/bcmdhd.101.10.361.x/Makefile new file mode 100755 index 0000000..faf1fa5 --- /dev/null +++ b/bcmdhd.101.10.361.x/Makefile @@ -0,0 +1,391 @@ +# bcmdhd + +# if not confiure pci mode, we use sdio mode as default +ifeq ($(CONFIG_BCMDHD_PCIE),) +$(info bcm SDIO driver configured) +CONFIG_DHD_USE_STATIC_BUF := y +endif + +ifeq ($(CONFIG_BCMDHD_SDIO),y) +MODULE_NAME := dhd +else +ifeq ($(CONFIG_BCMDHD_USB),y) +MODULE_NAME := bcmdhd +else +MODULE_NAME := dhdpci +endif +endif + +CONFIG_BCMDHD_ANDROID_VERSION := 13 + +CONFIG_BCMDHD ?= m + +#CONFIG_BCMDHD_SDIO := y +#CONFIG_BCMDHD_PCIE := y +#CONFIG_BCMDHD_USB := y + +CONFIG_BCMDHD_OOB := y +#CONFIG_BCMDHD_CUSB := y +#CONFIG_BCMDHD_NO_POWER_OFF := y +CONFIG_BCMDHD_PROPTXSTATUS := y +CONFIG_DHD_USE_STATIC_BUF := y +#CONFIG_BCMDHD_STATIC_BUF_IN_DHD := y +CONFIG_BCMDHD_ANDROID_VERSION := 11 +CONFIG_BCMDHD_AUTO_SELECT := y +CONFIG_BCMDHD_DEBUG := y +#CONFIG_BCMDHD_TIMESTAMP := y +#CONFIG_BCMDHD_WAPI := y +#CONFIG_BCMDHD_RANDOM_MAC := y +#CONFIG_BCMDHD_MULTIPLE_DRIVER := y +CONFIG_BCMDHD_TPUT := y + +CONFIG_MACH_PLATFORM := y +#CONFIG_BCMDHD_DTS := y + +ifndef CONFIG_KASAN + KBUILD_CFLAGS_MODULE += -Wframe-larger-than=3000 +endif + +DHDCFLAGS = -Wall -Wstrict-prototypes -Wno-date-time \ + -Dlinux -DLINUX -DBCMDRIVER \ + -Wno-unknown-warning-option \ + -Wno-maybe-uninitialized -Wno-error -Wno-format-security \ + -Wno-implicit-fallthrough \ + -DBCMDONGLEHOST -DBCMDMA32 -DBCMFILEIMAGE \ + -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DGET_OTP_MAC_ENABLE \ + -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \ + -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \ + -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DOEM_ANDROID \ + -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP -DDHD_8021X_DUMP \ + -DPOWERUP_MAX_RETRY=1 -DIFACE_HANG_FORCE_DEV_CLOSE -DWAIT_DEQUEUE \ + -DUSE_NEW_RSPEC_DEFS \ + -DWL_EXT_IAPSTA -DWL_ESCAN -DCCODE_LIST -DSUSPEND_EVENT \ + -DEAPOL_RESEND -DEAPOL_DYNAMATIC_RESEND \ + -DENABLE_INSMOD_NO_FW_LOAD + +DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \ + dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \ + dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \ + bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \ + dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \ + hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o frag.o \ + dhd_linux_exportfs.o dhd_linux_pktdump.o dhd_mschdbg.o \ + dhd_config.o dhd_ccode.o wl_event.o wl_android_ext.o \ + wl_iapsta.o wl_escan.o + +ifneq ($(CONFIG_WIRELESS_EXT),) + DHDOFILES += wl_iw.o + DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW +endif +ifneq ($(CONFIG_CFG80211),) + DHDOFILES += wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o + DHDOFILES += wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o + DHDOFILES += dhd_cfg80211.o wl_cfgvif.o + DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT + DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS + DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10 + DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL + DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES + DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT + DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8 + DHDCFLAGS += -DWL_VIRTUAL_APSTA + DHDCFLAGS += -DPNO_SUPPORT -DEXPLICIT_DISCIF_CLEANUP + DHDCFLAGS += -DDHD_USE_SCAN_WAKELOCK + DHDCFLAGS += -DSPECIFIC_MAC_GEN_SCHEME + DHDCFLAGS += -DWL_IFACE_MGMT + DHDCFLAGS += -DWLFBT + DHDCFLAGS += -DWL_EXT_RECONNECT + DHDCFLAGS += -DDHD_LOSSLESS_ROAMING + DHDCFLAGS += -DGTK_OFFLOAD_SUPPORT + DHDCFLAGS += -DWL_STATIC_IF + DHDCFLAGS += -DWL_CLIENT_SAE -DWL_OWE +endif + +#BCMDHD_SDIO +ifneq ($(CONFIG_BCMDHD_SDIO),) +BUS_TYPE := "sdio" +DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DMMC_SW_RESET -DBCMLXSDMMC \ + -DUSE_SDIOFIFO_IOVAR -DSDTEST \ + -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \ + -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD \ + -DDHDENABLE_TAILPAD -DSUPPORT_P2P_GO_PS \ + -DBCMSDIO_RXLIM_POST -DBCMSDIO_TXSEQ_SYNC -DCONSOLE_DPC \ + -DBCMSDIO_INTSTATUS_WAR +ifeq ($(CONFIG_BCMDHD_OOB),y) + DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB +ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y) + DHDCFLAGS += -DDISABLE_WOWLAN +endif +else + DHDCFLAGS += -DSDIO_ISR_THREAD +endif +DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \ + dhd_sdio.o dhd_cdc.o dhd_wlfc.o +endif + +#BCMDHD_PCIE +ifneq ($(CONFIG_BCMDHD_PCIE),) +BUS_TYPE := "pcie" +DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \ + -DDONGLE_ENABLE_ISOLATION +DHDCFLAGS += -DDHD_LB -DDHD_LB_RXP -DDHD_LB_STATS -DDHD_LB_TXP +DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED +DHDCFLAGS += -DINSMOD_FW_LOAD +DHDCFLAGS += -DCONFIG_HAS_WAKELOCK +#DHDCFLAGS += -DDHD_PCIE_RUNTIMEPM -DMAX_IDLE_COUNT=11 -DCUSTOM_DHD_RUNTIME_MS=100 +ifeq ($(CONFIG_BCMDHD_OOB),y) + DHDCFLAGS += -DCUSTOMER_OOB -DBCMPCIE_OOB_HOST_WAKE +endif +ifneq ($(CONFIG_PCI_MSI),) + DHDCFLAGS += -DDHD_MSI_SUPPORT +endif +CONFIG_BCMDHD_NO_POWER_OFF := y +DHDCFLAGS += -DDHD_DISABLE_ASPM +#DHDCFLAGS += -DUSE_AML_PCIE_TEE_MEM +DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \ + dhd_msgbuf.o dhd_linux_lb.o +endif + +#BCMDHD_USB +ifneq ($(CONFIG_BCMDHD_USB),) +BUS_TYPE := "usb" +DHDCFLAGS += -DUSBOS_TX_THREAD -DBCMDBUS -DBCMTRXV2 -DDBUS_USB_LOOPBACK \ + -DBDC +DHDCFLAGS += -DBCM_REQUEST_FW -DEXTERNAL_FW_PATH +CONFIG_BCMDHD_NO_POWER_OFF := y +ifneq ($(CONFIG_BCMDHD_CUSB),) + DHDCFLAGS += -DBCMUSBDEV_COMPOSITE + CONFIG_BCMDHD_NO_POWER_OFF := y +endif +DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o +endif + +ifeq ($(CONFIG_BCMDHD_NO_POWER_OFF),y) + DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD + DHDCFLAGS += -DENABLE_INSMOD_NO_POWER_OFF -DNO_POWER_OFF_AFTER_OPEN +endif + +ifeq ($(CONFIG_BCMDHD_MULTIPLE_DRIVER),y) + DHDCFLAGS += -DBCMDHD_MDRIVER + DHDCFLAGS += -DBUS_TYPE=\"-$(BUS_TYPE)\" + DHDCFLAGS += -DDHD_LOG_PREFIX=\"[dhd-$(BUS_TYPE)]\" + MODULE_NAME := dhd$(BUS_TYPE) +else + DHDCFLAGS += -DBUS_TYPE=\"\" +endif + +ifeq ($(CONFIG_BCMDHD_TIMESTAMP),y) + DHDCFLAGS += -DKERNEL_TIMESTAMP + DHDCFLAGS += -DSYSTEM_TIMESTAMP +endif + +#PROPTXSTATUS +ifeq ($(CONFIG_BCMDHD_PROPTXSTATUS),y) +ifneq ($(CONFIG_BCMDHD_USB),) + DHDCFLAGS += -DPROP_TXSTATUS +endif +ifneq ($(CONFIG_BCMDHD_SDIO),) + DHDCFLAGS += -DPROP_TXSTATUS -DPROPTX_MAXCOUNT +endif +ifneq ($(CONFIG_CFG80211),) + DHDCFLAGS += -DPROP_TXSTATUS_VSDB +endif +endif + +ifeq ($(CONFIG_64BIT),y) + DHDCFLAGS := $(filter-out -DBCMDMA32,$(DHDCFLAGS)) + DHDCFLAGS += -DBCMDMA64OSL +endif + +# For Android VTS +ifneq ($(CONFIG_BCMDHD_ANDROID_VERSION),) + DHDCFLAGS += -DANDROID_VERSION=$(CONFIG_BCMDHD_ANDROID_VERSION) + DHDCFLAGS += -DDHD_NOTIFY_MAC_CHANGED +ifneq ($(CONFIG_CFG80211),) + DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DLINKSTAT_SUPPORT + DHDCFLAGS += -DCUSTOM_COUNTRY_CODE -DDHD_GET_VALID_CHANNELS + DHDCFLAGS += -DDEBUGABILITY -DDBG_PKT_MON + DHDCFLAGS += -DDHD_LOG_DUMP -DDHD_FW_COREDUMP + DHDCFLAGS += -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT + DHDCFLAGS += -DDHD_WAKE_STATUS + DHDOFILES += dhd_rtt.o bcm_app_utils.o +endif +else + DHDCFLAGS += -DANDROID_VERSION=0 +endif + +# For Debug +ifeq ($(CONFIG_BCMDHD_DEBUG),y) + DHDCFLAGS += -DDHD_ARP_DUMP -DDHD_DHCP_DUMP -DDHD_ICMP_DUMP + DHDCFLAGS += -DDHD_DNS_DUMP -DDHD_TRX_DUMP + DHDCFLAGS += -DTPUT_MONITOR +# DHDCFLAGS += -DSCAN_SUPPRESS -DBSSCACHE + DHDCFLAGS += -DCHECK_DOWNLOAD_FW + DHDCFLAGS += -DPKT_STATICS + DHDCFLAGS += -DKSO_DEBUG +# DHDCFLAGS += -DDHD_PKTDUMP_TOFW +endif + +# For Debug2 +ifeq ($(CONFIG_BCMDHD_DEBUG2),y) + DHDCFLAGS += -DDEBUGFS_CFG80211 + DHDCFLAGS += -DSHOW_LOGTRACE -DDHD_LOG_DUMP -DDHD_FW_COREDUMP + DHDCFLAGS += -DBCMASSERT_LOG -DSI_ERROR_ENFORCE +ifneq ($(CONFIG_BCMDHD_PCIE),) + DHDCFLAGS += -DEWP_EDL + DHDCFLAGS += -DDNGL_EVENT_SUPPORT + DHDCFLAGS += -DDHD_SSSR_DUMP +endif +endif + +# MESH support for kernel 3.10 later +ifeq ($(CONFIG_WL_MESH),y) + DHDCFLAGS += -DWLMESH +ifneq ($(CONFIG_CFG80211),) + DHDCFLAGS += -DWLMESH_CFG80211 +endif +ifneq ($(CONFIG_BCMDHD_PCIE),) + DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000 +endif + DHDCFLAGS += -DDHD_UPDATE_INTF_MAC + DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS)) + DHDCFLAGS :=$(filter-out -DWL_STATIC_IF,$(DHDCFLAGS)) +endif + +ifeq ($(CONFIG_BCMDHD_EASYMESH),y) + DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS)) + DHDCFLAGS :=$(filter-out -DDHD_LOG_DUMP,$(DHDCFLAGS)) + DHDCFLAGS += -DWLEASYMESH -DWL_STATIC_IF -DWLDWDS -DFOURADDR_AUTO_BRG +endif + +#CSI_SUPPORT +ifeq ($(CONFIG_CSI_SUPPORT),y) + DHDCFLAGS += -DCSI_SUPPORT + DHDOFILES += dhd_csi.o +endif + +# For TPUT_IMPROVE +ifeq ($(CONFIG_BCMDHD_TPUT),y) + DHDCFLAGS += -DDHD_TPUT_PATCH + DHDCFLAGS += -DTCPACK_INFO_MAXNUM=10 -DTCPDATA_INFO_MAXNUM=10 +ifneq ($(CONFIG_BCMDHD_SDIO),) + DHDCFLAGS += -DDYNAMIC_MAX_HDR_READ + DHDCFLAGS :=$(filter-out -DSDTEST,$(DHDCFLAGS)) +endif +ifneq ($(CONFIG_BCMDHD_PCIE),) + DHDCFLAGS += -DDHD_LB_TXP_DEFAULT_ENAB + DHDCFLAGS += -DSET_RPS_CPUS -DSET_XPS_CPUS + DHDCFLAGS += -DDHD_LB_PRIMARY_CPUS=0xF0 -DDHD_LB_SECONDARY_CPUS=0x0E +endif +endif + +# For Zero configure +ifeq ($(CONFIG_BCMDHD_ZEROCONFIG),y) + DHDCFLAGS += -DWL_EXT_GENL -DSENDPROB + DHDOFILES += wl_ext_genl.o +endif + +# For WAPI +ifeq ($(CONFIG_BCMDHD_WAPI),y) + DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI +endif + +# For scan random mac +ifneq ($(CONFIG_BCMDHD_RANDOM_MAC),) +ifneq ($(CONFIG_CFG80211),) + DHDCFLAGS += -DSUPPORT_RANDOM_MAC_SCAN -DWL_USE_RANDOMIZED_SCAN +endif +endif + +# For NAN +ifneq ($(CONFIG_BCMDHD_NAN),) + DHDCFLAGS += -DWL_NAN -DWL_NAN_DISC_CACHE + DHDOFILES += wl_cfgnan.o bcmbloom.o +endif + +# For Module auto-selection +ifeq ($(CONFIG_BCMDHD_AUTO_SELECT),y) + DHDCFLAGS += -DUPDATE_MODULE_NAME +ifneq ($(CONFIG_BCMDHD_SDIO),) + DHDCFLAGS += -DGET_OTP_MODULE_NAME -DCOMPAT_OLD_MODULE +endif +endif + +ifeq ($(CONFIG_BCMDHD),m) + DHDCFLAGS += -DBCMDHD_MODULAR +endif + +ifeq ($(CONFIG_MACH_PLATFORM),y) + DHDOFILES += dhd_gpio.o +ifeq ($(CONFIG_BCMDHD_DTS),y) + DHDCFLAGS += -DBCMDHD_DTS +endif + DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT + DHDCFLAGS += -DCUSTOMER_HW_AMLOGIC + +# for config custom MAC +# DHDCFLAGS += -DGET_CUSTOM_MAC_ENABLE -DCUSTOM_MULTI_MAC +# if also need config AP MAC +# DHDCFLAGS += -DCUSTOM_AP_MAC +# +endif + +ifeq ($(CONFIG_BCMDHD_AG),y) + DHDCFLAGS += -DBAND_AG +endif + +ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y) +ifeq ($(CONFIG_BCMDHD_STATIC_BUF_IN_DHD),y) + DHDOFILES += dhd_static_buf.o + DHDCFLAGS += -DDHD_STATIC_IN_DRIVER +else +# obj-m += dhd_static_buf.o +endif + DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF + DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF + DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP +ifneq ($(CONFIG_BCMDHD_PCIE),) + DHDCFLAGS += -DDHD_USE_STATIC_CTRLBUF +endif +endif + +ARCH ?= arm64 +CROSS_COMPILE ?=aarch64-linux-gnu- +KDIR ?=../../../../../../common + +BCMDHD_ROOT = $(src) +#$(warning "BCMDHD_ROOT=$(BCMDHD_ROOT)") +EXTRA_CFLAGS = $(DHDCFLAGS) +EXTRA_CFLAGS += -DDHD_COMPILED=\"$(BCMDHD_ROOT)\" +EXTRA_CFLAGS += -I$(BCMDHD_ROOT)/include/ -I$(BCMDHD_ROOT)/ +ifeq ($(CONFIG_BCMDHD),m) +EXTRA_LDFLAGS += --strip-debug +endif + +obj-$(CONFIG_BCMDHD) += $(MODULE_NAME).o +$(MODULE_NAME)-objs += $(DHDOFILES) +ccflags-y := $(EXTRA_CFLAGS) + +#all: bcmdhd_sdio bcmdhd_usb +all: bcmdhd_sdio + +EXTRA_CFLAGS += -I$(KERNEL_SRC)/$(M)/include -I$(KERNEL_SRC)/$(M)/ +modules_install: + @$(MAKE) INSTALL_MOD_STRIP=1 M=$(M) -C $(KERNEL_SRC) modules_install + mkdir -p ${OUT_DIR}/../private/modules + cd ${OUT_DIR}/$(M)/; find -name "*.ko" -exec cp {} ${OUT_DIR}/../private/modules/ \; + +bcmdhd_sdio: + $(warning "building BCMDHD_SDIO..........") + $(MAKE) -C $(KERNEL_SRC) M=$(M) CONFIG_DHD_USE_STATIC_BUF=y CONFIG_BCMDHD_SDIO=y modules + +bcmdhd_usb: + $(warning "building BCMDHD_USB..........") + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules CONFIG_BCMDHD_USB=y + mv dhd.ko dhd_usb.ko + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) clean + $(RM) Module.markers + $(RM) modules.order diff --git a/bcmdhd.101.10.361.x/aiutils.c b/bcmdhd.101.10.361.x/aiutils.c new file mode 100755 index 0000000..b8b9555 --- /dev/null +++ b/bcmdhd.101.10.361.x/aiutils.c @@ -0,0 +1,2604 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" +#include + +#if defined(ETD) +#include +#endif + +#if !defined(BCMDONGLEHOST) +#define PMU_DMP() (cores_info->coreid[sii->curidx] == PMU_CORE_ID) +#define GCI_DMP() (cores_info->coreid[sii->curidx] == GCI_CORE_ID) +#else +#define PMU_DMP() (0) +#define GCI_DMP() (0) +#endif /* !defined(BCMDONGLEHOST) */ + +#if defined(AXI_TIMEOUTS_NIC) +static bool ai_get_apb_bridge(const si_t *sih, uint32 coreidx, uint32 *apb_id, + uint32 *apb_coreunit); +#endif /* AXI_TIMEOUTS_NIC */ + +#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) +static void ai_reset_axi_to(const si_info_t *sii, aidmp_t *ai); +#endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */ + +#ifdef DONGLEBUILD +static uint32 ai_get_sizeof_wrapper_offsets_to_dump(void); +static uint32 ai_get_wrapper_base_addr(uint32 **offset); +#endif /* DONGLEBUILD */ + +/* AXI ID to CoreID + unit mappings */ +typedef struct axi_to_coreidx { + uint coreid; + uint coreunit; +} axi_to_coreidx_t; + +static const axi_to_coreidx_t axi2coreidx_4369[] = { + {CC_CORE_ID, 0}, /* 00 Chipcommon */ + {PCIE2_CORE_ID, 0}, /* 01 PCIe */ + {D11_CORE_ID, 0}, /* 02 D11 Main */ + {ARMCR4_CORE_ID, 0}, /* 03 ARM */ + {BT_CORE_ID, 0}, /* 04 BT AHB */ + {D11_CORE_ID, 1}, /* 05 D11 Aux */ + {D11_CORE_ID, 0}, /* 06 D11 Main l1 */ + {D11_CORE_ID, 1}, /* 07 D11 Aux l1 */ + {D11_CORE_ID, 0}, /* 08 D11 Main l2 */ + {D11_CORE_ID, 1}, /* 09 D11 Aux l2 */ + {NODEV_CORE_ID, 0}, /* 10 M2M DMA */ + {NODEV_CORE_ID, 0}, /* 11 unused */ + {NODEV_CORE_ID, 0}, /* 12 unused */ + {NODEV_CORE_ID, 0}, /* 13 unused */ + {NODEV_CORE_ID, 0}, /* 14 unused */ + {NODEV_CORE_ID, 0} /* 15 unused */ +}; + +/* EROM parsing */ + +static uint32 +get_erom_ent(const si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) +{ + uint32 ent; + uint inv = 0, nom = 0; + uint32 size = 0; + + while (TRUE) { + ent = R_REG(SI_INFO(sih)->osh, *eromptr); + (*eromptr)++; + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + /* escape condition related EROM size if it has invalid values */ + size += sizeof(*eromptr); + if (size >= ER_SZ_MAX) { + SI_ERROR(("Failed to find end of EROM marker\n")); + break; + } + + nom++; + } + + SI_VMSG(("get_erom_ent: Returning ent 0x%08x\n", ent)); + if (inv + nom) { + SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); + } + return ent; +} + +static uint32 +get_asd(const si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, + uint32 *sizel, uint32 *sizeh) +{ + uint32 asd, sz, szd; + + BCM_REFERENCE(ad); + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + /* This is not what we want, "push" it back */ + (*eromptr)--; + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", + sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); + + return asd; +} + +/* Parse the enumeration rom to identify all cores */ +void +BCMATTACHFN(ai_scan)(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = (chipcregs_t *)regs; + uint32 erombase, *eromptr, *eromlim; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + + BCM_REFERENCE(devid); + + erombase = R_REG(sii->osh, &cc->eromptr); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + break; + + case PCI_BUS: + /* Set wrappers address */ + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* Now point the window at the erom */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); + eromptr = regs; + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + eromptr = (uint32 *)(uintptr)erombase; + break; +#endif /* BCMSDIO */ + + default: + SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype)); + ASSERT(0); + return; + } + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + sii->axi_num_wrappers = 0; + + SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", + OSL_OBFUSCATE_BUF(regs), erombase, + OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSCATE_BUF(eromlim))); + while (eromptr < eromlim) { + uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; + uint32 mpd, asd, addrl, addrh, sizel, sizeh; + uint i, j, idx; + bool br; + + br = FALSE; + + /* Grok a component */ + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); + return; + } + + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + SI_ERROR(("CIA not followed by CIB\n")); + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + +#ifdef BCMDBG_SI + SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " + "nsw = %d, nmp = %d & nsp = %d\n", + mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp)); +#else + BCM_REFERENCE(crev); +#endif + + /* Include Default slave wrapper for timeout monitoring */ + if ((nsp == 0 && nsw == 0) || +#if !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC) + ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || +#else + ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) && + (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || +#endif /* !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC) */ + FALSE) { + continue; + } + + if ((nmw + nsw == 0)) { + /* A component which is not a core */ + /* Should record some info */ + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) { + if ((sii->oob_router != 0) && (sii->oob_router != addrl)) { + sii->oob_router1 = addrl; + } else { + sii->oob_router = addrl; + } + } + } + if ((cid != NS_CCB_CORE_ID) && (cid != PMU_CORE_ID) && + (cid != GCI_CORE_ID) && (cid != SR_CORE_ID) && + (cid != HUB_CORE_ID) && (cid != HND_OOBR_CORE_ID) && + (cid != CCI400_CORE_ID) && (cid != SPMI_SLAVE_CORE_ID)) { + continue; + } + } + + idx = sii->numcores; + + cores_info->cia[idx] = cia; + cores_info->cib[idx] = cib; + cores_info->coreid[idx] = cid; + + /* workaround the fact the variable buscoretype is used in _ai_set_coreidx() + * when checking PCIE_GEN2() for PCI_BUS case before it is setup later..., + * both use and setup happen in si_buscore_setup(). + */ + if (BUSTYPE(sih->bustype) == PCI_BUS && + (cid == PCI_CORE_ID || cid == PCIE_CORE_ID || cid == PCIE2_CORE_ID)) { + sii->pub.buscoretype = (uint16)cid; + } + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); + goto error; + } + /* Record something? */ + SI_VMSG((" Master port %d, mp: %d id: %d\n", i, + (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, + (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); + } + + /* First Slave Address Descriptor should be port 0: + * the main register space for the core + */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + do { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + if (asd != 0) + br = TRUE; + else { + break; + } + } while (1); + } else { + if (addrl == 0 || sizel == 0) { + SI_ERROR((" Invalid ASD %x for slave port \n", asd)); + goto error; + } + cores_info->coresba[idx] = addrl; + cores_info->coresba_size[idx] = sizel; + } + + /* Get any more ASDs in first port */ + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + /* Support ARM debug core ASD with address space > 4K */ + if ((asd != 0) && (j == 1)) { + SI_VMSG(("Warning: sizel > 0x1000\n")); + cores_info->coresba2[idx] = addrl; + cores_info->coresba2_size[idx] = sizel; + } + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + /* To get the first base address of second slave port */ + if ((asd != 0) && (i == 1) && (j == 0)) { + cores_info->csp2ba[idx] = addrl; + cores_info->csp2ba_size[idx] = sizel; + } + if (asd == 0) + break; + j++; + } while (1); + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + goto error; + } + } + + /* Now get master wrappers */ + for (i = 0; i < nmw; i++) { + asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for MW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Master wrapper %d is not 4KB\n", i)); + goto error; + } + if (i == 0) { + cores_info->wrapba[idx] = addrl; + } else if (i == 1) { + cores_info->wrapba2[idx] = addrl; + } else if (i == 2) { + cores_info->wrapba3[idx] = addrl; + } + + if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) { + axi_wrapper[sii->axi_num_wrappers].mfg = mfg; + axi_wrapper[sii->axi_num_wrappers].cid = cid; + axi_wrapper[sii->axi_num_wrappers].rev = crev; + axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER; + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl; + sii->axi_num_wrappers++; + SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x," + "rev:%x, addr:%x, size:%x\n", + sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel)); + } + } + + /* And finally slave wrappers */ + for (i = 0; i < nsw; i++) { + uint fwp = (nsp <= 1) ? 0 : 1; + asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for SW %d cid %x eromp %p fwp %d \n", + i, cid, eromptr, fwp)); + goto error; + } + + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); + goto error; + } + + /* cache APB bridge wrapper address for set/clear timeout */ + if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) { + ASSERT(sii->num_br < SI_MAXBR); + sii->br_wrapba[sii->num_br++] = addrl; + } + + if ((mfg == MFGID_ARM) && (cid == ADB_BRIDGE_ID)) { + br = TRUE; + } + + BCM_REFERENCE(br); + + if ((nmw == 0) && (i == 0)) { + cores_info->wrapba[idx] = addrl; + } else if ((nmw == 0) && (i == 1)) { + cores_info->wrapba2[idx] = addrl; + } else if ((nmw == 0) && (i == 2)) { + cores_info->wrapba3[idx] = addrl; + } + + /* Include all slave wrappers to the list to + * enable and monitor watchdog timeouts + */ + + if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) { + axi_wrapper[sii->axi_num_wrappers].mfg = mfg; + axi_wrapper[sii->axi_num_wrappers].cid = cid; + axi_wrapper[sii->axi_num_wrappers].rev = crev; + axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER; + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl; + + sii->axi_num_wrappers++; + + SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x," + "rev:%x, addr:%x, size:%x\n", + sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel)); + } + } + +#ifndef AXI_TIMEOUTS_NIC + /* Don't record bridges and core with 0 slave ports */ + if (br || (nsp == 0)) { + continue; + } +#endif + + /* Done with core */ + sii->numcores++; + } + + SI_ERROR(("Reached end of erom without finding END\n")); + +error: + sii->numcores = 0; + return; +} + +#define AI_SETCOREIDX_MAPSIZE(coreid) \ + (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE) + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static volatile void * +BCMPOSTTRAPFN(_ai_setcoreidx)(si_t *sih, uint coreidx, uint use_wrapn) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 addr, wrap, wrap2, wrap3; + volatile void *regs; + + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return (NULL); + + addr = cores_info->coresba[coreidx]; + wrap = cores_info->wrapba[coreidx]; + wrap2 = cores_info->wrapba2[coreidx]; + wrap3 = cores_info->wrapba3[coreidx]; + +#ifdef AXI_TIMEOUTS_NIC + /* No need to disable interrupts while entering/exiting APB bridge core */ + if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) && + (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID)) +#endif /* AXI_TIMEOUTS_NIC */ + { + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || + !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + } + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(addr, + AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx])); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + sii->curmap = regs = cores_info->regs[coreidx]; + if (!cores_info->wrappers[coreidx] && (wrap != 0)) { + cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers[coreidx])); + } + if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) { + cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers2[coreidx])); + } + if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) { + cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers3[coreidx])); + } + + if (use_wrapn == 2) { + sii->curwrap = cores_info->wrappers3[coreidx]; + } else if (use_wrapn == 1) { + sii->curwrap = cores_info->wrappers2[coreidx]; + } else { + sii->curwrap = cores_info->wrappers[coreidx]; + } + break; + + case PCI_BUS: + regs = sii->curmap; + + /* point bar0 2nd 4KB window to the primary wrapper */ + if (use_wrapn == 2) { + wrap = wrap3; + } else if (use_wrapn == 1) { + wrap = wrap2; + } + + /* Use BAR0 Window to support dual mac chips... */ + + /* TODO: the other mac unit can't be supportd by the current BAR0 window. + * need to find other ways to access these cores. + */ + + switch (sii->slice) { + case 0: /* main/first slice */ +#ifdef AXI_TIMEOUTS_NIC + /* No need to set the BAR0 if core is APB Bridge. + * This is to reduce 2 PCI writes while checkng for errlog + */ + if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) +#endif /* AXI_TIMEOUTS_NIC */ + { + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); + } + + if (PCIE_GEN2(sii)) + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); + else + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap); + + break; + + case 1: /* aux/second slice */ + /* PCIE GEN2 only for other slices */ + if (!PCIE_GEN2(sii)) { + /* other slices not supported */ + SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice)); + ASSERT(0); + break; + } + + /* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */ + regs = (volatile uint8 *)regs + PCI_SEC_BAR0_WIN_OFFSET; + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, addr); + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, 4, wrap); + break; + + case 2: /* scan/third slice */ + /* PCIE GEN2 only for other slices */ + if (!PCIE_GEN2(sii)) { + /* other slices not supported */ + SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice)); + ASSERT(0); + break; + } + + /* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */ + regs = (volatile uint8 *)regs + PCI_TER_BAR0_WIN_OFFSET; + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* point bar0 window */ + ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, addr); + ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, wrap); + break; + + default: /* other slices */ + SI_ERROR(("BAR0 Window not supported for slice %d\n", sii->slice)); + ASSERT(0); + break; + } + + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sii->curmap = regs = (void *)((uintptr)addr); + if (use_wrapn) + sii->curwrap = (void *)((uintptr)wrap2); + else + sii->curwrap = (void *)((uintptr)wrap); + break; +#endif /* BCMSDIO */ + + default: + ASSERT(0); + sii->curmap = regs = NULL; + break; + } + + sii->curidx = coreidx; + + return regs; +} + +volatile void * +BCMPOSTTRAPFN(ai_setcoreidx)(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 0); +} + +volatile void * +BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 1); +} + +volatile void * +BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 2); +} + +void +ai_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = NULL; + uint32 erombase, *eromptr, *eromlim; + uint i, j, cidx; + uint32 cia, cib, nmp, nsp; + uint32 asd, addrl, addrh, sizel, sizeh; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == CC_CORE_ID) { + cc = (chipcregs_t *)cores_info->regs[i]; + break; + } + } + if (cc == NULL) + goto error; + + BCM_REFERENCE(erombase); + erombase = R_REG(sii->osh, &cc->eromptr); + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + cidx = sii->curidx; + cia = cores_info->cia[cidx]; + cib = cores_info->cib[cidx]; + + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + /* scan for cores */ + while (eromptr < eromlim) { + if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) && + (get_erom_ent(sih, &eromptr, 0, 0) == cib)) { + break; + } + } + + /* skip master ports */ + for (i = 0; i < nmp; i++) + get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + + /* Skip ASDs in port 0 */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + } + + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) + break; + + if (!asidx--) { + *addr = addrl; + *size = sizel; + return; + } + j++; + } while (1); + + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + break; + } + } + +error: + *size = 0; + return; +} + +/* Return the number of address spaces in current core */ +int +ai_numaddrspaces(const si_t *sih) +{ + /* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */ + BCM_REFERENCE(sih); + + return 2; +} + +/* Return the address of the nth address space in the current core + * Arguments: + * sih : Pointer to struct si_t + * spidx : slave port index + * baidx : base address index + */ +uint32 +ai_addrspace(const si_t *sih, uint spidx, uint baidx) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + + if (spidx == CORE_SLAVE_PORT_0) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->coresba[cidx]; + else if (baidx == CORE_BASE_ADDR_1) + return cores_info->coresba2[cidx]; + } + else if (spidx == CORE_SLAVE_PORT_1) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->csp2ba[cidx]; + } + + SI_ERROR(("ai_addrspace: Need to parse the erom again to find %d base addr" + " in %d slave port\n", + baidx, spidx)); + + return 0; + +} + +/* Return the size of the nth address space in the current core +* Arguments: +* sih : Pointer to struct si_t +* spidx : slave port index +* baidx : base address index +*/ +uint32 +ai_addrspacesize(const si_t *sih, uint spidx, uint baidx) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + if (spidx == CORE_SLAVE_PORT_0) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->coresba_size[cidx]; + else if (baidx == CORE_BASE_ADDR_1) + return cores_info->coresba2_size[cidx]; + } + else if (spidx == CORE_SLAVE_PORT_1) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->csp2ba_size[cidx]; + } + + SI_ERROR(("ai_addrspacesize: Need to parse the erom again to find %d" + " base addr in %d slave port\n", + baidx, spidx)); + + return 0; +} + +uint +ai_flag(si_t *sih) +{ + const si_info_t *sii = SI_INFO(sih); +#if !defined(BCMDONGLEHOST) + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; +#endif + aidmp_t *ai; + + if (PMU_DMP()) { + uint idx, flag; + idx = sii->curidx; + ai_setcoreidx(sih, SI_CC_IDX); + flag = ai_flag_alt(sih); + ai_setcoreidx(sih, idx); + return flag; + } + + ai = sii->curwrap; + ASSERT(ai != NULL); + + return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); +} + +uint +ai_flag_alt(const si_t *sih) +{ + const si_info_t *sii = SI_INFO(sih); + aidmp_t *ai = sii->curwrap; + + return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK); +} + +void +ai_setint(const si_t *sih, int siflag) +{ + BCM_REFERENCE(sih); + BCM_REFERENCE(siflag); + + /* TODO: Figure out how to set interrupt mask in ai */ +} + +uint +BCMPOSTTRAPFN(ai_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + const si_info_t *sii = SI_INFO(sih); + uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset); + + if (mask || val) { + uint32 w = R_REG(sii->osh, addr); + w &= ~mask; + w |= val; + W_REG(sii->osh, addr, w); + } + return (R_REG(sii->osh, addr)); +} + +uint +ai_corevendor(const si_t *sih) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + uint32 cia; + + cia = cores_info->cia[sii->curidx]; + return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); +} + +uint +BCMPOSTTRAPFN(ai_corerev)(const si_t *sih) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + uint32 cib; + + cib = cores_info->cib[sii->curidx]; + return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT); +} + +uint +ai_corerev_minor(const si_t *sih) +{ + return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) & + SISF_MINORREV_D11_MASK; +} + +bool +BCMPOSTTRAPFN(ai_iscoreup)(const si_t *sih) +{ + const si_info_t *sii = SI_INFO(sih); + aidmp_t *ai = sii->curwrap; + + return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && + ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +BCMPOSTTRAPFN(ai_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + volatile uint32 *r = NULL; + uint w; + bcm_int_bitmask_t intr_val; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx, sii->numcores)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, &intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) + + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + /* readback */ + w = R_REG(sii->osh, r); + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, &intr_val); + } + + return (w); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + volatile uint32 *r = NULL; + uint w = 0; + bcm_int_bitmask_t intr_val; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx, sii->numcores)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, &intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) + + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, &intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +volatile uint32 * +BCMPOSTTRAPFN(ai_corereg_addr)(si_t *sih, uint coreidx, uint regoff) +{ + volatile uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx, sii->numcores)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + ASSERT(sii->curidx == coreidx); + r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff); + } + + return (r); +} + +void +ai_core_disable(const si_t *sih, uint32 bits) +{ + const si_info_t *sii = SI_INFO(sih); + volatile uint32 dummy; + uint32 status; + aidmp_t *ai; + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* if core is already in reset, just return */ + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) { + return; + } + + /* ensure there are no pending backplane operations */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + /* if pending backplane ops still, try waiting longer */ + if (status != 0) { + /* 300usecs was sufficient to allow backplane ops to clear for big hammer */ + /* during driver load we may need more time */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000); + /* if still pending ops, continue on and try disable anyway */ + /* this is in big hammer path, so don't call wl_reinit in this case... */ +#ifdef BCMDBG + if (status != 0) { + SI_ERROR(("ai_core_disable: WARN: resetstatus=%0x on core disable\n", + status)); + } +#endif + } + + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + dummy = R_REG(sii->osh, &ai->resetctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + W_REG(sii->osh, &ai->ioctrl, bits); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(10); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +static void +BCMPOSTTRAPFN(_ai_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits) +{ + const si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + volatile uint32 dummy; + uint loop_counter = 10; + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + +#ifdef BCMDBG_ERR + if (dummy != 0) { + SI_ERROR(("_ai_core_reset: WARN1: resetstatus=0x%0x\n", dummy)); + } +#endif /* BCMDBG_ERR */ + + /* put core into reset state */ + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + OSL_DELAY(10); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + + W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); +#ifdef UCM_CORRUPTION_WAR + if (si_coreid(sih) == D11_CORE_ID) { + /* Reset FGC */ + OSL_DELAY(1); + W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC))); + } +#endif /* UCM_CORRUPTION_WAR */ + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + +#ifdef BCMDBG_ERR + if (dummy != 0) + SI_ERROR(("_ai_core_reset: WARN2: resetstatus=0x%0x\n", dummy)); +#endif + + while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) { + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + +#ifdef BCMDBG_ERR + if (dummy != 0) + SI_ERROR(("_ai_core_reset: WARN3 resetstatus=0x%0x\n", dummy)); +#endif + + /* take core out of reset */ + W_REG(sii->osh, &ai->resetctrl, 0); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + } + +#ifdef BCMDBG_ERR + if (loop_counter == 0) { + SI_ERROR(("_ai_core_reset: Failed to take core 0x%x out of reset\n", + si_coreid(sih))); + } +#endif + +#ifdef UCM_CORRUPTION_WAR + /* Pulse FGC after lifting Reset */ + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); +#else + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); +#endif /* UCM_CORRUPTION_WAR */ + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); +#ifdef UCM_CORRUPTION_WAR + if (si_coreid(sih) == D11_CORE_ID) { + /* Reset FGC */ + OSL_DELAY(1); + W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC))); + } +#endif /* UCM_CORRUPTION_WAR */ + OSL_DELAY(1); +} + +void +BCMPOSTTRAPFN(ai_core_reset)(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + uint idx = sii->curidx; + + if (cores_info->wrapba3[idx] != 0) { + ai_setcoreidx_3rdwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + if (cores_info->wrapba2[idx] != 0) { + ai_setcoreidx_2ndwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + _ai_core_reset(sih, bits, resetbits); +} + +#ifdef BOOKER_NIC400_INF +void +BCMPOSTTRAPFN(ai_core_reset_ext)(const si_t *sih, uint32 bits, uint32 resetbits) +{ + _ai_core_reset(sih, bits, resetbits); +} +#endif /* BOOKER_NIC400_INF */ + +void +ai_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val) +{ + const si_info_t *sii = SI_INFO(sih); +#if !defined(BCMDONGLEHOST) + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; +#endif + aidmp_t *ai; + uint32 w; + + if (PMU_DMP()) { + SI_ERROR(("ai_core_cflags_wo: Accessing PMU DMP register (ioctrl)\n")); + return; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } +} + +uint32 +BCMPOSTTRAPFN(ai_core_cflags)(const si_t *sih, uint32 mask, uint32 val) +{ + const si_info_t *sii = SI_INFO(sih); +#if !defined(BCMDONGLEHOST) + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; +#endif + aidmp_t *ai; + uint32 w; + + if (PMU_DMP()) { + SI_ERROR(("ai_core_cflags: Accessing PMU DMP register (ioctrl)\n")); + return 0; + } + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } + + return R_REG(sii->osh, &ai->ioctrl); +} + +uint32 +ai_core_sflags(const si_t *sih, uint32 mask, uint32 val) +{ + const si_info_t *sii = SI_INFO(sih); +#if !defined(BCMDONGLEHOST) + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; +#endif + aidmp_t *ai; + uint32 w; + + if (PMU_DMP()) { + SI_ERROR(("ai_core_sflags: Accessing PMU DMP register (ioctrl)\n")); + return 0; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); + W_REG(sii->osh, &ai->iostatus, w); + } + + return R_REG(sii->osh, &ai->iostatus); +} + +#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP) +/* print interesting aidmp registers */ +void +ai_dumpregs(const si_t *sih, struct bcmstrbuf *b) +{ + const si_info_t *sii = SI_INFO(sih); + osl_t *osh; + aidmp_t *ai; + uint i; + uint32 prev_value = 0; + const axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + uint32 cfg_reg = 0; + uint bar0_win_offset = 0; + + osh = sii->osh; + + /* Save and restore wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } else { + cfg_reg = PCI_BAR0_WIN2; + bar0_win_offset = PCI_BAR0_WIN2_OFFSET; + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + + if (prev_value == ID32_INVALID) { + SI_PRINT(("ai_dumpregs, PCI_BAR0_WIN2 - %x\n", prev_value)); + return; + } + } + + bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n", + sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor); + + for (i = 0; i < sii->axi_num_wrappers; i++) { + + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0 window to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset); + } else { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid, + axi_wrapper[i].rev, + axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER", + axi_wrapper[i].wrapper_addr); + + bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x " + "ioctrlwidth 0x%x iostatuswidth 0x%x\n" + "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n" + "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x " + "errlogaddrlo 0x%x errlogaddrhi 0x%x\n" + "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n" + "intstatus 0x%x config 0x%x itcr 0x%x\n\n", + R_REG(osh, &ai->ioctrlset), + R_REG(osh, &ai->ioctrlclear), + R_REG(osh, &ai->ioctrl), + R_REG(osh, &ai->iostatus), + R_REG(osh, &ai->ioctrlwidth), + R_REG(osh, &ai->iostatuswidth), + R_REG(osh, &ai->resetctrl), + R_REG(osh, &ai->resetstatus), + R_REG(osh, &ai->resetreadid), + R_REG(osh, &ai->resetwriteid), + R_REG(osh, &ai->errlogctrl), + R_REG(osh, &ai->errlogdone), + R_REG(osh, &ai->errlogstatus), + R_REG(osh, &ai->errlogaddrlo), + R_REG(osh, &ai->errlogaddrhi), + R_REG(osh, &ai->errlogid), + R_REG(osh, &ai->errloguser), + R_REG(osh, &ai->errlogflags), + R_REG(osh, &ai->intstatus), + R_REG(osh, &ai->config), + R_REG(osh, &ai->itcr)); + } + + /* Restore the initial wrapper space */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (prev_value && cfg_reg) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } + } +} +#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */ + +#ifdef BCMDBG +static void +_ai_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose) +{ + uint32 config; + + config = R_REG(osh, &ai->config); + SI_PRINT(("\nCore ID: 0x%x, addr 0x%x, config 0x%x\n", cid, addr, config)); + + if (config & AICFG_RST) + SI_PRINT(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n", + R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus), + R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid))); + + if (config & AICFG_IOC) + SI_PRINT(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl), + R_REG(osh, &ai->ioctrlwidth))); + + if (config & AICFG_IOS) + SI_PRINT(("iostatus 0x%x, width %d\n", R_REG(osh, &ai->iostatus), + R_REG(osh, &ai->iostatuswidth))); + + if (config & AICFG_ERRL) { + SI_PRINT(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n", + R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone), + R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus))); + SI_PRINT(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr " + "0x%x/0x%x\n", + R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser), + R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi), + R_REG(osh, &ai->errlogaddrlo))); + } + + if (verbose && (config & AICFG_OOB)) { + SI_PRINT(("oobselina30 0x%x, oobselina74 0x%x\n", + R_REG(osh, &ai->oobselina30), R_REG(osh, &ai->oobselina74))); + SI_PRINT(("oobselinb30 0x%x, oobselinb74 0x%x\n", + R_REG(osh, &ai->oobselinb30), R_REG(osh, &ai->oobselinb74))); + SI_PRINT(("oobselinc30 0x%x, oobselinc74 0x%x\n", + R_REG(osh, &ai->oobselinc30), R_REG(osh, &ai->oobselinc74))); + SI_PRINT(("oobselind30 0x%x, oobselind74 0x%x\n", + R_REG(osh, &ai->oobselind30), R_REG(osh, &ai->oobselind74))); + SI_PRINT(("oobselouta30 0x%x, oobselouta74 0x%x\n", + R_REG(osh, &ai->oobselouta30), R_REG(osh, &ai->oobselouta74))); + SI_PRINT(("oobseloutb30 0x%x, oobseloutb74 0x%x\n", + R_REG(osh, &ai->oobseloutb30), R_REG(osh, &ai->oobseloutb74))); + SI_PRINT(("oobseloutc30 0x%x, oobseloutc74 0x%x\n", + R_REG(osh, &ai->oobseloutc30), R_REG(osh, &ai->oobseloutc74))); + SI_PRINT(("oobseloutd30 0x%x, oobseloutd74 0x%x\n", + R_REG(osh, &ai->oobseloutd30), R_REG(osh, &ai->oobseloutd74))); + SI_PRINT(("oobsynca 0x%x, oobseloutaen 0x%x\n", + R_REG(osh, &ai->oobsynca), R_REG(osh, &ai->oobseloutaen))); + SI_PRINT(("oobsyncb 0x%x, oobseloutben 0x%x\n", + R_REG(osh, &ai->oobsyncb), R_REG(osh, &ai->oobseloutben))); + SI_PRINT(("oobsyncc 0x%x, oobseloutcen 0x%x\n", + R_REG(osh, &ai->oobsyncc), R_REG(osh, &ai->oobseloutcen))); + SI_PRINT(("oobsyncd 0x%x, oobseloutden 0x%x\n", + R_REG(osh, &ai->oobsyncd), R_REG(osh, &ai->oobseloutden))); + SI_PRINT(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n", + R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth), + R_REG(osh, &ai->oobaoutwidth))); + SI_PRINT(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n", + R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth), + R_REG(osh, &ai->oobboutwidth))); + SI_PRINT(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n", + R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth), + R_REG(osh, &ai->oobcoutwidth))); + SI_PRINT(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n", + R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth), + R_REG(osh, &ai->oobdoutwidth))); + } +} + +void +ai_view(const si_t *sih, bool verbose) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + osl_t *osh; + aidmp_t *ai; + uint32 cid, addr; + + ai = sii->curwrap; + osh = sii->osh; + + if (PMU_DMP()) { + SI_ERROR(("Cannot access pmu DMP\n")); + return; + } + cid = cores_info->coreid[sii->curidx]; + addr = cores_info->wrapba[sii->curidx]; + _ai_view(osh, ai, cid, addr, verbose); +} + +void +ai_viewall(si_t *sih, bool verbose) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + osl_t *osh; + aidmp_t *ai; + uint32 cid, addr; + uint i; + + osh = sii->osh; + for (i = 0; i < sii->numcores; i++) { + si_setcoreidx(sih, i); + + if (PMU_DMP()) { + SI_ERROR(("Skipping pmu DMP\n")); + continue; + } + ai = sii->curwrap; + cid = cores_info->coreid[sii->curidx]; + addr = cores_info->wrapba[sii->curidx]; + _ai_view(osh, ai, cid, addr, verbose); + } +} +#endif /* BCMDBG */ + +void +ai_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout_exp, uint32 cid) +{ +#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC) + const si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 i; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) | + ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK); + +#ifdef AXI_TIMEOUTS_NIC + uint32 prev_value = 0; + osl_t *osh = sii->osh; + uint32 cfg_reg = 0; + uint32 offset = 0; +#endif /* AXI_TIMEOUTS_NIC */ + + if ((sii->axi_num_wrappers == 0) || +#ifdef AXI_TIMEOUTS_NIC + (!PCIE(sii)) || +#endif /* AXI_TIMEOUTS_NIC */ + FALSE) { + SI_VMSG((" iai_update_backplane_timeouts, axi_num_wrappers:%d, Is_PCIE:%d," + " BUS_TYPE:%d, ID:%x\n", + sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return; + } + +#ifdef AXI_TIMEOUTS_NIC + /* Save and restore the wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN1(sii)) { + cfg_reg = PCI_BAR0_WIN2; + offset = PCI_BAR0_WIN2_OFFSET; + } else if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } + else { + ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2"); + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + if (prev_value == ID32_INVALID) { + SI_PRINT(("ai_update_backplane_timeouts, PCI_BAR0_WIN2 - %x\n", + prev_value)); + return; + } + } +#endif /* AXI_TIMEOUTS_NIC */ + + for (i = 0; i < sii->axi_num_wrappers; ++i) { + /* WAR for wrong EROM entries w.r.t slave and master wrapper + * for ADB bridge core...so checking actual wrapper config to determine type + * http://jira.broadcom.com/browse/HW4388-905 + */ + if ((cid == 0 || cid == ADB_BRIDGE_ID) && + (axi_wrapper[i].cid == ADB_BRIDGE_ID)) { + /* WAR is applicable only to 89B0 and 89C0 */ + if (CCREV(sih->ccrev) == 70) { + ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr; + if (R_REG(sii->osh, &ai->config) & WRAPPER_TIMEOUT_CONFIG) { + axi_wrapper[i].wrapper_type = AI_SLAVE_WRAPPER; + } else { + axi_wrapper[i].wrapper_type = AI_MASTER_WRAPPER; + } + } + } + if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER || ((BCM4389_CHIP(sih->chip) || + BCM4388_CHIP(sih->chip)) && + (axi_wrapper[i].wrapper_addr == WL_BRIDGE1_S || + axi_wrapper[i].wrapper_addr == WL_BRIDGE2_S))) { + SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n", + axi_wrapper[i].mfg, + axi_wrapper[i].cid, + axi_wrapper[i].wrapper_addr)); + continue; + } + + /* Update only given core if requested */ + if ((cid != 0) && (axi_wrapper[i].cid != cid)) { + continue; + } + +#ifdef AXI_TIMEOUTS_NIC + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0_CORE2_WIN2 to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */ + ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset); + } + else +#endif /* AXI_TIMEOUTS_NIC */ + { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + W_REG(sii->osh, &ai->errlogctrl, errlogctrl); + + SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n", + axi_wrapper[i].mfg, + axi_wrapper[i].cid, + axi_wrapper[i].wrapper_addr, + R_REG(sii->osh, &ai->errlogctrl))); + } + +#ifdef AXI_TIMEOUTS_NIC + /* Restore the initial wrapper space */ + if (prev_value) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } +#endif /* AXI_TIMEOUTS_NIC */ + +#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */ +} + +#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) + +/* slave error is ignored, so account for those cases */ +static uint32 si_ignore_errlog_cnt = 0; + +static bool +BCMPOSTTRAPFN(ai_ignore_errlog)(const si_info_t *sii, const aidmp_t *ai, + uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts) +{ + uint32 ignore_errsts = AIELS_SLAVE_ERR; + uint32 ignore_errsts_2 = 0; + uint32 ignore_hi = BT_CC_SPROM_BADREG_HI; + uint32 ignore_lo = BT_CC_SPROM_BADREG_LO; + uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE; + bool address_check = TRUE; + uint32 axi_id = 0; + uint32 axi_id2 = 0; + bool extd_axi_id_mask = FALSE; + uint32 axi_id_mask; + + SI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n", + ai, errsts, err_axi_id, hi_addr, lo_addr)); + + /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */ + switch (CHIPID(sii->pub.chip)) { +#if defined(BT_WLAN_REG_ON_WAR) + /* + * 4389B0/C0 - WL and BT turn on WAR, ignore AXI error originating from + * AHB-AXI bridge i.e, any slave error or timeout from BT access + */ + case BCM4389_CHIP_GRPID: + axi_id = BCM4389_BT_AXI_ID; + ignore_errsts = AIELS_SLAVE_ERR; + axi_id2 = BCM4389_BT_AXI_ID; + ignore_errsts_2 = AIELS_TIMEOUT; + address_check = FALSE; + extd_axi_id_mask = TRUE; + break; +#endif /* BT_WLAN_REG_ON_WAR */ +#ifdef BTOVERPCIE + case BCM4388_CHIP_GRPID: + axi_id = BCM4388_BT_AXI_ID; + /* For BT over PCIE, ignore any slave error from BT. */ + /* No need to check any address range */ + address_check = FALSE; + ignore_errsts_2 = AIELS_DECODE; + break; + case BCM4369_CHIP_GRPID: + axi_id = BCM4369_BT_AXI_ID; + /* For BT over PCIE, ignore any slave error from BT. */ + /* No need to check any address range */ + address_check = FALSE; + ignore_errsts_2 = AIELS_DECODE; + break; +#endif /* BTOVERPCIE */ + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: +#ifdef BTOVERPCIE + axi_id = BCM4378_BT_AXI_ID; + /* For BT over PCIE, ignore any slave error from BT. */ + /* No need to check any address range */ + address_check = FALSE; +#endif /* BTOVERPCIE */ + axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID; + extd_axi_id_mask = TRUE; + ignore_errsts_2 = AIELS_DECODE; + break; +#ifdef USE_HOSTMEM + case BCM43602_CHIP_ID: + axi_id = BCM43602_BT_AXI_ID; + address_check = FALSE; + break; +#endif /* USE_HOSTMEM */ + default: + return FALSE; + } + + axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK; + + /* AXI ID check */ + err_axi_id &= axi_id_mask; + errsts &= AIELS_ERROR_MASK; + + /* check the ignore error cases. 2 checks */ + if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) || + ((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) { + /* not the error ignore cases */ + return FALSE; + + } + + /* check the specific address checks now, if specified */ + if (address_check) { + /* address range check */ + if ((hi_addr != ignore_hi) || + (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) { + return FALSE; + } + } + + SI_PRINT(("err check: ignored\n")); + return TRUE; +} +#endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */ + +#ifdef AXI_TIMEOUTS_NIC + +/* Function to return the APB bridge details corresponding to the core */ +static bool +ai_get_apb_bridge(const si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreunit) +{ + uint i; + uint32 core_base, core_end; + const si_info_t *sii = SI_INFO(sih); + static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0; + uint32 tmp_coreunit = 0; + const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return FALSE; + + /* Most of the time apb bridge query will be for d11 core. + * Maintain the last cache and return if found rather than iterating the table + */ + if (coreidx_cached == coreidx) { + *apb_id = apb_id_cached; + *apb_coreunit = apb_coreunit_cached; + return TRUE; + } + + core_base = cores_info->coresba[coreidx]; + core_end = core_base + cores_info->coresba_size[coreidx]; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == APB_BRIDGE_ID) { + uint32 apb_base; + uint32 apb_end; + + apb_base = cores_info->coresba[i]; + apb_end = apb_base + cores_info->coresba_size[i]; + + if ((core_base >= apb_base) && + (core_end <= apb_end)) { + /* Current core is attached to this APB bridge */ + *apb_id = apb_id_cached = APB_BRIDGE_ID; + *apb_coreunit = apb_coreunit_cached = tmp_coreunit; + coreidx_cached = coreidx; + return TRUE; + } + /* Increment the coreunit */ + tmp_coreunit++; + } + } + + return FALSE; +} + +uint32 +ai_clear_backplane_to_fast(si_t *sih, void *addr) +{ + const si_info_t *sii = SI_INFO(sih); + volatile const void *curmap = sii->curmap; + bool core_reg = FALSE; + + /* Use fast path only for core register access */ + if (((uintptr)addr >= (uintptr)curmap) && + ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) { + /* address being accessed is within current core reg map */ + core_reg = TRUE; + } + + if (core_reg) { + uint32 apb_id, apb_coreunit; + + if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub), + &apb_id, &apb_coreunit) == TRUE) { + /* Found the APB bridge corresponding to current core, + * Check for bus errors in APB wrapper + */ + return ai_clear_backplane_to_per_core(sih, + apb_id, apb_coreunit, NULL); + } + } + + /* Default is to poll for errors on all slave wrappers */ + return si_clear_backplane_to(sih); +} +#endif /* AXI_TIMEOUTS_NIC */ + +#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) +static bool g_disable_backplane_logs = FALSE; + +static uint32 last_axi_error = AXI_WRAP_STS_NONE; +static uint32 last_axi_error_log_status = 0; +static uint32 last_axi_error_core = 0; +static uint32 last_axi_error_wrap = 0; +static uint32 last_axi_errlog_lo = 0; +static uint32 last_axi_errlog_hi = 0; +static uint32 last_axi_errlog_id = 0; + +/* + * API to clear the back plane timeout per core. + * Caller may pass optional wrapper address. If present this will be used as + * the wrapper base address. If wrapper base address is provided then caller + * must provide the coreid also. + * If both coreid and wrapper is zero, then err status of current bridge + * will be verified. + */ +uint32 +BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void *wrap) +{ + int ret = AXI_WRAP_STS_NONE; + aidmp_t *ai = NULL; + uint32 errlog_status = 0; + const si_info_t *sii = SI_INFO(sih); + uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0; + uint32 current_coreidx = si_coreidx(sih); + uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit); + +#if defined(AXI_TIMEOUTS_NIC) + si_axi_error_t * axi_error = sih->err_info ? + &sih->err_info->axi_error[sih->err_info->count] : NULL; +#endif /* AXI_TIMEOUTS_NIC */ + bool restore_core = FALSE; + + if ((sii->axi_num_wrappers == 0) || +#ifdef AXI_TIMEOUTS_NIC + (!PCIE(sii)) || +#endif /* AXI_TIMEOUTS_NIC */ + FALSE) { + SI_VMSG(("ai_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d," + " BUS_TYPE:%d, ID:%x\n", + sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return AXI_WRAP_STS_NONE; + } + + if (wrap != NULL) { + ai = (aidmp_t *)wrap; + } else if (coreid && (target_coreidx != current_coreidx)) { + + if (ai_setcoreidx(sih, target_coreidx) == NULL) { + /* Unable to set the core */ + SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n", + coreid, coreunit, target_coreidx)); + errlog_lo = target_coreidx; + ret = AXI_WRAP_STS_SET_CORE_FAIL; + goto end; + } + + restore_core = TRUE; + ai = (aidmp_t *)si_wrapperregs(sih); + } else { + /* Read error status of current wrapper */ + ai = (aidmp_t *)si_wrapperregs(sih); + + /* Update CoreID to current Code ID */ + coreid = si_coreid(sih); + } + + /* read error log status */ + errlog_status = R_REG(sii->osh, &ai->errlogstatus); + + if (errlog_status == ID32_INVALID) { + /* Do not try to peek further */ + SI_PRINT(("ai_clear_backplane_to_per_core, errlogstatus:%x - Slave Wrapper:%x\n", + errlog_status, coreid)); + ret = AXI_WRAP_STS_WRAP_RD_ERR; + errlog_lo = (uint32)(uintptr)&ai->errlogstatus; + goto end; + } + + if ((errlog_status & AIELS_ERROR_MASK) != 0) { + uint32 tmp; + uint32 count = 0; + /* set ErrDone to clear the condition */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + /* SPINWAIT on errlogstatus timeout status bits */ + while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) { + + if (tmp == ID32_INVALID) { + SI_PRINT(("ai_clear_backplane_to_per_core: prev errlogstatus:%x," + " errlogstatus:%x\n", + errlog_status, tmp)); + ret = AXI_WRAP_STS_WRAP_RD_ERR; + errlog_lo = (uint32)(uintptr)&ai->errlogstatus; + goto end; + } + /* + * Clear again, to avoid getting stuck in the loop, if a new error + * is logged after we cleared the first timeout + */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + count++; + OSL_DELAY(10); + if ((10 * count) > AI_REG_READ_TIMEOUT) { + errlog_status = tmp; + break; + } + } + + errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo); + errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi); + errlog_id = R_REG(sii->osh, &ai->errlogid); + errlog_flags = R_REG(sii->osh, &ai->errlogflags); + + /* we are already in the error path, so OK to check for the slave error */ + if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id, + errlog_status)) { + si_ignore_errlog_cnt++; + goto end; + } + + /* only reset APB Bridge on timeout (not slave error, or dec error) */ + switch (errlog_status & AIELS_ERROR_MASK) { + case AIELS_SLAVE_ERR: + SI_PRINT(("AXI slave error\n")); + ret |= AXI_WRAP_STS_SLAVE_ERR; + break; + + case AIELS_TIMEOUT: + ai_reset_axi_to(sii, ai); + ret |= AXI_WRAP_STS_TIMEOUT; + break; + + case AIELS_DECODE: + SI_PRINT(("AXI decode error\n")); +#ifdef USE_HOSTMEM + /* Ignore known cases of CR4 prefetch abort bugs */ + if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) != + (BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID)) +#endif + { + ret |= AXI_WRAP_STS_DECODE_ERR; + } + break; + default: + ASSERT(0); /* should be impossible */ + } + + if (errlog_status & AIELS_MULTIPLE_ERRORS) { + SI_PRINT(("Multiple AXI Errors\n")); + /* Set multiple errors bit only if actual error is not ignored */ + if (ret) { + ret |= AXI_WRAP_STS_MULTIPLE_ERRORS; + } + } + + SI_PRINT(("\tCoreID: %x\n", coreid)); + SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x" + ", status 0x%08x\n", + errlog_lo, errlog_hi, errlog_id, errlog_flags, + errlog_status)); + } + +end: + if (ret != AXI_WRAP_STS_NONE) { + last_axi_error = ret; + last_axi_error_log_status = errlog_status; + last_axi_error_core = coreid; + last_axi_error_wrap = (uint32)ai; + last_axi_errlog_lo = errlog_lo; + last_axi_errlog_hi = errlog_hi; + last_axi_errlog_id = errlog_id; + } + +#if defined(AXI_TIMEOUTS_NIC) + if (axi_error && (ret != AXI_WRAP_STS_NONE)) { + axi_error->error = ret; + axi_error->coreid = coreid; + axi_error->errlog_lo = errlog_lo; + axi_error->errlog_hi = errlog_hi; + axi_error->errlog_id = errlog_id; + axi_error->errlog_flags = errlog_flags; + axi_error->errlog_status = errlog_status; + sih->err_info->count++; + + if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) { + sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1; + SI_PRINT(("AXI Error log overflow\n")); + } + } +#endif /* AXI_TIMEOUTS_NIC */ + + if (restore_core) { + if (ai_setcoreidx(sih, current_coreidx) == NULL) { + /* Unable to set the core */ + return ID32_INVALID; + } + } + + return ret; +} + +/* reset AXI timeout */ +static void +BCMPOSTTRAPFN(ai_reset_axi_to)(const si_info_t *sii, aidmp_t *ai) +{ + /* reset APB Bridge */ + OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + /* clear Reset bit */ + AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET)); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + SI_PRINT(("AXI timeout\n")); + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) { + SI_PRINT(("reset failed on wrapper %p\n", ai)); + g_disable_backplane_logs = TRUE; + } +} + +void +BCMPOSTTRAPFN(ai_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core, + uint32 *lo, uint32 *hi, uint32 *id) +{ + *error_status = last_axi_error_log_status; + *core = last_axi_error_core; + *lo = last_axi_errlog_lo; + *hi = last_axi_errlog_hi; + *id = last_axi_errlog_id; +} + +/* Function to check whether AXI timeout has been registered on a core */ +uint32 +ai_get_axi_timeout_reg(void) +{ + return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0); +} +#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */ + +uint32 +BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid) +{ + uint coreid = 0; + uint coreunit = 0; + const axi_to_coreidx_t *axi2coreidx = NULL; + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + axi2coreidx = axi2coreidx_4369; + break; + default: + SI_PRINT(("Chipid mapping not found\n")); + break; + } + + if (!axi2coreidx) + return (BADIDX); + + coreid = axi2coreidx[axiid].coreid; + coreunit = axi2coreidx[axiid].coreunit; + + return si_findcoreidx(sih, coreid, coreunit); + +} + +/* + * This API polls all slave wrappers for errors and returns bit map of + * all reported errors. + * return - bit map of + * AXI_WRAP_STS_NONE + * AXI_WRAP_STS_TIMEOUT + * AXI_WRAP_STS_SLAVE_ERR + * AXI_WRAP_STS_DECODE_ERR + * AXI_WRAP_STS_PCI_RD_ERR + * AXI_WRAP_STS_WRAP_RD_ERR + * AXI_WRAP_STS_SET_CORE_FAIL + * On timeout detection, correspondign bridge will be reset to + * unblock the bus. + * Error reported in each wrapper can be retrieved using the API + * si_get_axi_errlog_info() + */ +uint32 +BCMPOSTTRAPFN(ai_clear_backplane_to)(si_t *sih) +{ + uint32 ret = 0; +#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) + const si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 i; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + +#ifdef AXI_TIMEOUTS_NIC + uint32 prev_value = 0; + osl_t *osh = sii->osh; + uint32 cfg_reg = 0; + uint32 offset = 0; + + if ((sii->axi_num_wrappers == 0) || (!PCIE(sii))) +#else + if (sii->axi_num_wrappers == 0) +#endif + { + SI_VMSG(("ai_clear_backplane_to, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d," + " ID:%x\n", + sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return AXI_WRAP_STS_NONE; + } + +#ifdef AXI_TIMEOUTS_NIC + /* Save and restore wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN1(sii)) { + cfg_reg = PCI_BAR0_WIN2; + offset = PCI_BAR0_WIN2_OFFSET; + } else if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } + else { + ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2"); + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + + if (prev_value == ID32_INVALID) { + si_axi_error_t * axi_error = + sih->err_info ? + &sih->err_info->axi_error[sih->err_info->count] : + NULL; + + SI_PRINT(("ai_clear_backplane_to, PCI_BAR0_WIN2 - %x\n", prev_value)); + if (axi_error) { + axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR; + axi_error->errlog_lo = cfg_reg; + sih->err_info->count++; + + if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) { + sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1; + SI_PRINT(("AXI Error log overflow\n")); + } + } + + return ret; + } + } +#endif /* AXI_TIMEOUTS_NIC */ + + for (i = 0; i < sii->axi_num_wrappers; ++i) { + uint32 tmp; + + if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) { + continue; + } + +#ifdef AXI_TIMEOUTS_NIC + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0_CORE2_WIN2 to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */ + ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset); + } + else +#endif /* AXI_TIMEOUTS_NIC */ + { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, + DISCARD_QUAL(ai, void)); + + ret |= tmp; + } + +#ifdef AXI_TIMEOUTS_NIC + /* Restore the initial wrapper space */ + if (prev_value) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } +#endif /* AXI_TIMEOUTS_NIC */ + +#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */ + + return ret; +} + +uint +ai_num_slaveports(const si_t *sih, uint coreidx) +{ + const si_info_t *sii = SI_INFO(sih); + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + uint32 cib; + + cib = cores_info->cib[coreidx]; + return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT); +} + +#ifdef UART_TRAP_DBG +void +ai_dump_APB_Bridge_registers(const si_t *sih) +{ + aidmp_t *ai; + const si_info_t *sii = SI_INFO(sih); + + ai = (aidmp_t *)sii->br_wrapba[0]; + printf("APB Bridge 0\n"); + printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x", + R_REG(sii->osh, &ai->errlogaddrlo), + R_REG(sii->osh, &ai->errlogaddrhi), + R_REG(sii->osh, &ai->errlogid), + R_REG(sii->osh, &ai->errlogflags)); + printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus)); +} +#endif /* UART_TRAP_DBG */ + +void +ai_force_clocks(const si_t *sih, uint clock_state) +{ + const si_info_t *sii = SI_INFO(sih); + aidmp_t *ai, *ai_sec = NULL; + volatile uint32 dummy; + uint32 ioctrl; + const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info; + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + if (cores_info->wrapba2[sii->curidx]) + ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + + if (clock_state == FORCE_CLK_ON) { + ioctrl = R_REG(sii->osh, &ai->ioctrl); + W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC)); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + if (ai_sec) { + ioctrl = R_REG(sii->osh, &ai_sec->ioctrl); + W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC)); + dummy = R_REG(sii->osh, &ai_sec->ioctrl); + BCM_REFERENCE(dummy); + } + } else { + ioctrl = R_REG(sii->osh, &ai->ioctrl); + W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC))); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + if (ai_sec) { + ioctrl = R_REG(sii->osh, &ai_sec->ioctrl); + W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC))); + dummy = R_REG(sii->osh, &ai_sec->ioctrl); + BCM_REFERENCE(dummy); + } + } + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); +} + +#ifdef DONGLEBUILD +/* + * this is not declared as static const, although that is the right thing to do + * reason being if declared as static const, compile/link process would that in + * read only section... + * currently this code/array is used to identify the registers which are dumped + * during trap processing + * and usually for the trap buffer, .rodata buffer is reused, so for now just static +*/ +static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = { + OFFSETOF(aidmp_t, ioctrlset), + OFFSETOF(aidmp_t, ioctrlclear), + OFFSETOF(aidmp_t, ioctrl), + OFFSETOF(aidmp_t, iostatus), + OFFSETOF(aidmp_t, ioctrlwidth), + OFFSETOF(aidmp_t, iostatuswidth), + OFFSETOF(aidmp_t, resetctrl), + OFFSETOF(aidmp_t, resetstatus), + OFFSETOF(aidmp_t, resetreadid), + OFFSETOF(aidmp_t, resetwriteid), + OFFSETOF(aidmp_t, errlogctrl), + OFFSETOF(aidmp_t, errlogdone), + OFFSETOF(aidmp_t, errlogstatus), + OFFSETOF(aidmp_t, errlogaddrlo), + OFFSETOF(aidmp_t, errlogaddrhi), + OFFSETOF(aidmp_t, errlogid), + OFFSETOF(aidmp_t, errloguser), + OFFSETOF(aidmp_t, errlogflags), + OFFSETOF(aidmp_t, intstatus), + OFFSETOF(aidmp_t, config), + OFFSETOF(aidmp_t, itipoobaout), + OFFSETOF(aidmp_t, itipoobbout), + OFFSETOF(aidmp_t, itipoobcout), + OFFSETOF(aidmp_t, itipoobdout)}; + +#ifdef ETD + +/* This is used for dumping wrapper registers for etd when axierror happens. + * This should match with the structure hnd_ext_trap_bp_err_t + */ +static uint32 BCMPOST_TRAP_RODATA(etd_wrapper_offsets_axierr)[] = { + OFFSETOF(aidmp_t, ioctrl), + OFFSETOF(aidmp_t, iostatus), + OFFSETOF(aidmp_t, resetctrl), + OFFSETOF(aidmp_t, resetstatus), + OFFSETOF(aidmp_t, resetreadid), + OFFSETOF(aidmp_t, resetwriteid), + OFFSETOF(aidmp_t, errlogctrl), + OFFSETOF(aidmp_t, errlogdone), + OFFSETOF(aidmp_t, errlogstatus), + OFFSETOF(aidmp_t, errlogaddrlo), + OFFSETOF(aidmp_t, errlogaddrhi), + OFFSETOF(aidmp_t, errlogid), + OFFSETOF(aidmp_t, errloguser), + OFFSETOF(aidmp_t, errlogflags), + OFFSETOF(aidmp_t, itipoobaout), + OFFSETOF(aidmp_t, itipoobbout), + OFFSETOF(aidmp_t, itipoobcout), + OFFSETOF(aidmp_t, itipoobdout)}; +#endif /* ETD */ + +/* wrapper function to access the global array wrapper_offsets_to_dump */ +static uint32 +BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)(void) +{ + return (sizeof(wrapper_offsets_to_dump)); +} + +static uint32 +BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)(uint32 **offset) +{ + uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump); + + *offset = &wrapper_offsets_to_dump[0]; + return arr_size; +} + +uint32 +BCMATTACHFN(ai_wrapper_dump_buf_size)(const si_t *sih) +{ + uint32 buf_size = 0; + uint32 wrapper_count = 0; + const si_info_t *sii = SI_INFO(sih); + + wrapper_count = sii->axi_num_wrappers; + if (wrapper_count == 0) + return 0; + + /* cnt indicates how many registers, tag_id 0 will say these are address/value */ + /* address/value pairs */ + buf_size += 2 * (ai_get_sizeof_wrapper_offsets_to_dump() * wrapper_count); + + return buf_size; +} + +static uint32* +BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)(const si_info_t *sii, uint32 *p32, uint32 wrap_ba) +{ + uint i; + uint32 *addr; + uint32 arr_size; + uint32 *offset_base; + + arr_size = ai_get_wrapper_base_addr(&offset_base); + + for (i = 0; i < arr_size; i++) { + addr = (uint32 *)(wrap_ba + *(offset_base + i)); + *p32++ = (uint32)addr; + *p32++ = R_REG(sii->osh, addr); + } + return p32; +} + +#if defined(ETD) +static uint32 +BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)(uint32 **offset) +{ + uint32 arr_size = ARRAYSIZE(etd_wrapper_offsets_axierr); + + *offset = &etd_wrapper_offsets_axierr[0]; + return arr_size; +} + +uint32 +BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core, + uint32 *ba, uchar *p) +{ +#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) + uint32 *p32; + uint32 wrap_ba = last_axi_error_wrap; + uint i; + uint32 *addr; + + const si_info_t *sii = SI_INFO(sih); + + if (last_axi_error != AXI_WRAP_STS_NONE) + { + if (wrap_ba) + { + p32 = (uint32 *)p; + uint32 arr_size; + uint32 *offset_base; + + arr_size = ai_get_wrapper_base_addr_etd_axierr(&offset_base); + for (i = 0; i < arr_size; i++) { + addr = (uint32 *)(wrap_ba + *(offset_base + i)); + *p32++ = R_REG(sii->osh, addr); + } + } + *error = last_axi_error; + *core = last_axi_error_core; + *ba = wrap_ba; + } +#else + *error = 0; + *core = 0; + *ba = 0; +#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */ + return 0; +} +#endif /* ETD */ + +uint32 +BCMPOSTTRAPFN(ai_wrapper_dump_binary)(const si_t *sih, uchar *p) +{ + uint32 *p32 = (uint32 *)p; + uint32 i; + const si_info_t *sii = SI_INFO(sih); + + for (i = 0; i < sii->axi_num_wrappers; i++) { + p32 = ai_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr); + } + return 0; +} + +bool +BCMPOSTTRAPFN(ai_check_enable_backplane_log)(const si_t *sih) +{ +#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) + if (g_disable_backplane_logs) { + return FALSE; + } + else { + return TRUE; + } +#else /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */ + return FALSE; +#endif /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */ +} +#endif /* DONGLEBUILD */ diff --git a/bcmdhd.101.10.361.x/bcm_app_utils.c b/bcmdhd.101.10.361.x/bcm_app_utils.c new file mode 100755 index 0000000..62d0507 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcm_app_utils.c @@ -0,0 +1,1276 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else /* BCMDRIVER */ +#include +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wlexe/Makefile.wlm_dll */ +#endif + +#include +#include +#include + +#ifndef BCMDRIVER +/* Take an array of measurments representing a single channel over time and return + a summary. Currently implemented as a simple average but could easily evolve + into more cpomplex alogrithms. +*/ +cca_congest_channel_req_t * +cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent) +{ + int sec; + cca_congest_t totals; + + totals.duration = 0; + totals.congest_ibss = 0; + totals.congest_obss = 0; + totals.interference = 0; + avg->num_secs = 0; + + for (sec = 0; sec < input->num_secs; sec++) { + if (input->secs[sec].duration) { + totals.duration += input->secs[sec].duration; + totals.congest_ibss += input->secs[sec].congest_ibss; + totals.congest_obss += input->secs[sec].congest_obss; + totals.interference += input->secs[sec].interference; + avg->num_secs++; + } + } + avg->chanspec = input->chanspec; + + if (!avg->num_secs || !totals.duration) + return (avg); + + if (percent) { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration; + avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration; + avg->secs[0].interference = totals.interference * 100/totals.duration; + } else { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs; + avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs; + avg->secs[0].interference = totals.interference / avg->num_secs; + } + + return (avg); +} + +static void +cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos) +{ + int i; + for (*left = 0, i = 0; i < num_bits; i++) { + if (isset(bitmap, i)) { + (*left)++; + *bit_pos = i; + } + } +} + +static uint8 +spec_to_chan(chanspec_t chspec) +{ + uint8 center_ch, edge, primary, sb; + + center_ch = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_IS20(chspec)) { + return center_ch; + } else { + /* the lower edge of the wide channel is half the bw from + * the center channel. + */ + if (CHSPEC_IS40(chspec)) { + edge = center_ch - CH_20MHZ_APART; + } else { + /* must be 80MHz (until we support more) */ + ASSERT(CHSPEC_IS80(chspec)); + edge = center_ch - CH_40MHZ_APART; + } + + /* find the channel number of the lowest 20MHz primary channel */ + primary = edge + CH_10MHZ_APART; + + /* select the actual subband */ + sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT; + primary = primary + sb * CH_20MHZ_APART; + + return primary; + } +} + +/* + Take an array of measumrements representing summaries of different channels. + Return a recomended channel. + Interference is evil, get rid of that first. + Then hunt for lowest Other bss traffic. + Don't forget that channels with low duration times may not have accurate readings. + For the moment, do not overwrite input array. +*/ +int +cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer) +{ + uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */ + int i, left, winner, ret_val = 0; + uint32 min_obss = 1 << 30; + uint bitmap_sz; + + bitmap_sz = CEIL(num_chans, NBBY); + bitmap = (uint8 *)malloc(bitmap_sz); + if (bitmap == NULL) { + printf("unable to allocate memory\n"); + return BCME_NOMEM; + } + + memset(bitmap, 0, bitmap_sz); + /* Initially, all channels are up for consideration */ + for (i = 0; i < num_chans; i++) { + if (input[i]->chanspec) + setbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_TOO_FEW; + goto f_exit; + } + + /* Filter for 2.4 GHz Band */ + if (flags & CCA_FLAG_2G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS2G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for 5 GHz Band */ + if (flags & CCA_FLAG_5G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS5G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for Duration */ + if (!(flags & CCA_FLAG_IGNORE_DURATION)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].duration < CCA_THRESH_MILLI) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_DURATION; + goto f_exit; + } + + /* Filter for 1 6 11 on 2.4 Band */ + if (flags & CCA_FLAGS_PREFER_1_6_11) { + int tmp_channel = spec_to_chan(input[i]->chanspec); + int is2g = CHSPEC_IS2G(input[i]->chanspec); + for (i = 0; i < num_chans; i++) { + if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_PREF_CHAN; + goto f_exit; + } + + /* Toss high interference interference */ + if (!(flags & CCA_FLAG_IGNORE_INTERFER)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE) + clrbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_INTERFER; + goto f_exit; + } + } + + /* Now find lowest obss */ + winner = 0; + for (i = 0; i < num_chans; i++) { + if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) { + winner = i; + min_obss = input[i]->secs[0].congest_obss; + } + } + *answer = input[winner]->chanspec; + f_exit: + free(bitmap); /* free the allocated memory for bitmap */ + return ret_val; +} +#endif /* !BCMDRIVER */ + +/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */ +#define IDX_IN_WL_CNT_VER_6_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32)) + +#define IDX_IN_WL_CNT_VER_7_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_7_t, cntmember) - OFFSETOF(wl_cnt_ver_7_t, txframe)) / sizeof(uint32)) + +#define IDX_IN_WL_CNT_VER_11_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \ + / sizeof(uint32)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \ + ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_7_T \ + ((sizeof(wl_cnt_ver_7_t) - 2 * sizeof(uint16)) / sizeof(uint32)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude 64 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_wlc_t)) / sizeof(uint32)) + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */ +static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = { + IDX_IN_WL_CNT_VER_6_T(txframe), + IDX_IN_WL_CNT_VER_6_T(txbyte), + IDX_IN_WL_CNT_VER_6_T(txretrans), + IDX_IN_WL_CNT_VER_6_T(txerror), + IDX_IN_WL_CNT_VER_6_T(txctl), + IDX_IN_WL_CNT_VER_6_T(txprshort), + IDX_IN_WL_CNT_VER_6_T(txserr), + IDX_IN_WL_CNT_VER_6_T(txnobuf), + IDX_IN_WL_CNT_VER_6_T(txnoassoc), + IDX_IN_WL_CNT_VER_6_T(txrunt), + IDX_IN_WL_CNT_VER_6_T(txchit), + IDX_IN_WL_CNT_VER_6_T(txcmiss), + IDX_IN_WL_CNT_VER_6_T(txuflo), + IDX_IN_WL_CNT_VER_6_T(txphyerr), + IDX_IN_WL_CNT_VER_6_T(txphycrs), + IDX_IN_WL_CNT_VER_6_T(rxframe), + IDX_IN_WL_CNT_VER_6_T(rxbyte), + IDX_IN_WL_CNT_VER_6_T(rxerror), + IDX_IN_WL_CNT_VER_6_T(rxctl), + IDX_IN_WL_CNT_VER_6_T(rxnobuf), + IDX_IN_WL_CNT_VER_6_T(rxnondata), + IDX_IN_WL_CNT_VER_6_T(rxbadds), + IDX_IN_WL_CNT_VER_6_T(rxbadcm), + IDX_IN_WL_CNT_VER_6_T(rxfragerr), + IDX_IN_WL_CNT_VER_6_T(rxrunt), + IDX_IN_WL_CNT_VER_6_T(rxgiant), + IDX_IN_WL_CNT_VER_6_T(rxnoscb), + IDX_IN_WL_CNT_VER_6_T(rxbadproto), + IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_6_T(rxbadda), + IDX_IN_WL_CNT_VER_6_T(rxfilter), + IDX_IN_WL_CNT_VER_6_T(rxoflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_6_T(dmade), + IDX_IN_WL_CNT_VER_6_T(dmada), + IDX_IN_WL_CNT_VER_6_T(dmape), + IDX_IN_WL_CNT_VER_6_T(reset), + IDX_IN_WL_CNT_VER_6_T(tbtt), + IDX_IN_WL_CNT_VER_6_T(txdmawar), + IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_6_T(txfrag), + IDX_IN_WL_CNT_VER_6_T(txmulti), + IDX_IN_WL_CNT_VER_6_T(txfail), + IDX_IN_WL_CNT_VER_6_T(txretry), + IDX_IN_WL_CNT_VER_6_T(txretrie), + IDX_IN_WL_CNT_VER_6_T(rxdup), + IDX_IN_WL_CNT_VER_6_T(txrts), + IDX_IN_WL_CNT_VER_6_T(txnocts), + IDX_IN_WL_CNT_VER_6_T(txnoack), + IDX_IN_WL_CNT_VER_6_T(rxfrag), + IDX_IN_WL_CNT_VER_6_T(rxmulti), + IDX_IN_WL_CNT_VER_6_T(rxcrc), + IDX_IN_WL_CNT_VER_6_T(txfrmsnt), + IDX_IN_WL_CNT_VER_6_T(rxundec), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_6_T(tkipreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpundec), + IDX_IN_WL_CNT_VER_6_T(fourwayfail), + IDX_IN_WL_CNT_VER_6_T(wepundec), + IDX_IN_WL_CNT_VER_6_T(wepicverr), + IDX_IN_WL_CNT_VER_6_T(decsuccess), + IDX_IN_WL_CNT_VER_6_T(tkipicverr), + IDX_IN_WL_CNT_VER_6_T(wepexcluded), + IDX_IN_WL_CNT_VER_6_T(txchanrej), + IDX_IN_WL_CNT_VER_6_T(psmwds), + IDX_IN_WL_CNT_VER_6_T(phywatchdog), + IDX_IN_WL_CNT_VER_6_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_6_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_6_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_6_T(rx1mbps), + IDX_IN_WL_CNT_VER_6_T(rx2mbps), + IDX_IN_WL_CNT_VER_6_T(rx5mbps5), + IDX_IN_WL_CNT_VER_6_T(rx6mbps), + IDX_IN_WL_CNT_VER_6_T(rx9mbps), + IDX_IN_WL_CNT_VER_6_T(rx11mbps), + IDX_IN_WL_CNT_VER_6_T(rx12mbps), + IDX_IN_WL_CNT_VER_6_T(rx18mbps), + IDX_IN_WL_CNT_VER_6_T(rx24mbps), + IDX_IN_WL_CNT_VER_6_T(rx36mbps), + IDX_IN_WL_CNT_VER_6_T(rx48mbps), + IDX_IN_WL_CNT_VER_6_T(rx54mbps), + IDX_IN_WL_CNT_VER_6_T(rx108mbps), + IDX_IN_WL_CNT_VER_6_T(rx162mbps), + IDX_IN_WL_CNT_VER_6_T(rx216mbps), + IDX_IN_WL_CNT_VER_6_T(rx270mbps), + IDX_IN_WL_CNT_VER_6_T(rx324mbps), + IDX_IN_WL_CNT_VER_6_T(rx378mbps), + IDX_IN_WL_CNT_VER_6_T(rx432mbps), + IDX_IN_WL_CNT_VER_6_T(rx486mbps), + IDX_IN_WL_CNT_VER_6_T(rx540mbps), + IDX_IN_WL_CNT_VER_6_T(rfdisable), + IDX_IN_WL_CNT_VER_6_T(txexptime), + IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_6_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst) +}; + +#define INVALID_IDX ((uint8)(-1)) + +/* Index conversion table from wl_cnt_ver_7_t to wl_cnt_wlc_t */ +static const uint8 wlcntver7t_to_wlcntwlct[] = { + IDX_IN_WL_CNT_VER_7_T(txframe), + IDX_IN_WL_CNT_VER_7_T(txbyte), + IDX_IN_WL_CNT_VER_7_T(txretrans), + IDX_IN_WL_CNT_VER_7_T(txerror), + IDX_IN_WL_CNT_VER_7_T(txctl), + IDX_IN_WL_CNT_VER_7_T(txprshort), + IDX_IN_WL_CNT_VER_7_T(txserr), + IDX_IN_WL_CNT_VER_7_T(txnobuf), + IDX_IN_WL_CNT_VER_7_T(txnoassoc), + IDX_IN_WL_CNT_VER_7_T(txrunt), + IDX_IN_WL_CNT_VER_7_T(txchit), + IDX_IN_WL_CNT_VER_7_T(txcmiss), + IDX_IN_WL_CNT_VER_7_T(txuflo), + IDX_IN_WL_CNT_VER_7_T(txphyerr), + IDX_IN_WL_CNT_VER_7_T(txphycrs), + IDX_IN_WL_CNT_VER_7_T(rxframe), + IDX_IN_WL_CNT_VER_7_T(rxbyte), + IDX_IN_WL_CNT_VER_7_T(rxerror), + IDX_IN_WL_CNT_VER_7_T(rxctl), + IDX_IN_WL_CNT_VER_7_T(rxnobuf), + IDX_IN_WL_CNT_VER_7_T(rxnondata), + IDX_IN_WL_CNT_VER_7_T(rxbadds), + IDX_IN_WL_CNT_VER_7_T(rxbadcm), + IDX_IN_WL_CNT_VER_7_T(rxfragerr), + IDX_IN_WL_CNT_VER_7_T(rxrunt), + IDX_IN_WL_CNT_VER_7_T(rxgiant), + IDX_IN_WL_CNT_VER_7_T(rxnoscb), + IDX_IN_WL_CNT_VER_7_T(rxbadproto), + IDX_IN_WL_CNT_VER_7_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_7_T(rxbadda), + IDX_IN_WL_CNT_VER_7_T(rxfilter), + IDX_IN_WL_CNT_VER_7_T(rxoflo), + IDX_IN_WL_CNT_VER_7_T(rxuflo), + IDX_IN_WL_CNT_VER_7_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_7_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_7_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_7_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_7_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_7_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_7_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_7_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_7_T(dmade), + IDX_IN_WL_CNT_VER_7_T(dmada), + IDX_IN_WL_CNT_VER_7_T(dmape), + IDX_IN_WL_CNT_VER_7_T(reset), + IDX_IN_WL_CNT_VER_7_T(tbtt), + IDX_IN_WL_CNT_VER_7_T(txdmawar), + IDX_IN_WL_CNT_VER_7_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_7_T(txfrag), + IDX_IN_WL_CNT_VER_7_T(txmulti), + IDX_IN_WL_CNT_VER_7_T(txfail), + IDX_IN_WL_CNT_VER_7_T(txretry), + IDX_IN_WL_CNT_VER_7_T(txretrie), + IDX_IN_WL_CNT_VER_7_T(rxdup), + IDX_IN_WL_CNT_VER_7_T(txrts), + IDX_IN_WL_CNT_VER_7_T(txnocts), + IDX_IN_WL_CNT_VER_7_T(txnoack), + IDX_IN_WL_CNT_VER_7_T(rxfrag), + IDX_IN_WL_CNT_VER_7_T(rxmulti), + IDX_IN_WL_CNT_VER_7_T(rxcrc), + IDX_IN_WL_CNT_VER_7_T(txfrmsnt), + IDX_IN_WL_CNT_VER_7_T(rxundec), + IDX_IN_WL_CNT_VER_7_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_7_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_7_T(tkipreplay), + IDX_IN_WL_CNT_VER_7_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_7_T(ccmpreplay), + IDX_IN_WL_CNT_VER_7_T(ccmpundec), + IDX_IN_WL_CNT_VER_7_T(fourwayfail), + IDX_IN_WL_CNT_VER_7_T(wepundec), + IDX_IN_WL_CNT_VER_7_T(wepicverr), + IDX_IN_WL_CNT_VER_7_T(decsuccess), + IDX_IN_WL_CNT_VER_7_T(tkipicverr), + IDX_IN_WL_CNT_VER_7_T(wepexcluded), + IDX_IN_WL_CNT_VER_7_T(txchanrej), + IDX_IN_WL_CNT_VER_7_T(psmwds), + IDX_IN_WL_CNT_VER_7_T(phywatchdog), + IDX_IN_WL_CNT_VER_7_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_7_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_7_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_7_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_7_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_7_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_7_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_7_T(rx1mbps), + IDX_IN_WL_CNT_VER_7_T(rx2mbps), + IDX_IN_WL_CNT_VER_7_T(rx5mbps5), + IDX_IN_WL_CNT_VER_7_T(rx6mbps), + IDX_IN_WL_CNT_VER_7_T(rx9mbps), + IDX_IN_WL_CNT_VER_7_T(rx11mbps), + IDX_IN_WL_CNT_VER_7_T(rx12mbps), + IDX_IN_WL_CNT_VER_7_T(rx18mbps), + IDX_IN_WL_CNT_VER_7_T(rx24mbps), + IDX_IN_WL_CNT_VER_7_T(rx36mbps), + IDX_IN_WL_CNT_VER_7_T(rx48mbps), + IDX_IN_WL_CNT_VER_7_T(rx54mbps), + IDX_IN_WL_CNT_VER_7_T(rx108mbps), + IDX_IN_WL_CNT_VER_7_T(rx162mbps), + IDX_IN_WL_CNT_VER_7_T(rx216mbps), + IDX_IN_WL_CNT_VER_7_T(rx270mbps), + IDX_IN_WL_CNT_VER_7_T(rx324mbps), + IDX_IN_WL_CNT_VER_7_T(rx378mbps), + IDX_IN_WL_CNT_VER_7_T(rx432mbps), + IDX_IN_WL_CNT_VER_7_T(rx486mbps), + IDX_IN_WL_CNT_VER_7_T(rx540mbps), + IDX_IN_WL_CNT_VER_7_T(rfdisable), + IDX_IN_WL_CNT_VER_7_T(txexptime), + IDX_IN_WL_CNT_VER_7_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_7_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_7_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_7_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_7_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_7_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_7_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_7_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_7_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_7_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_7_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_7_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_7_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_7_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_7_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_7_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_7_T(wepexcluded_mcst), + IDX_IN_WL_CNT_VER_7_T(dma_hang), + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + IDX_IN_WL_CNT_VER_7_T(rxrtry) +}; + +/* Max wl_cnt_wlc_t fields including rxrtry */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_7_T \ + (sizeof(wlcntver7t_to_wlcntwlct) / sizeof(uint8)) + +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */ +static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = { + IDX_IN_WL_CNT_VER_11_T(txframe), + IDX_IN_WL_CNT_VER_11_T(txbyte), + IDX_IN_WL_CNT_VER_11_T(txretrans), + IDX_IN_WL_CNT_VER_11_T(txerror), + IDX_IN_WL_CNT_VER_11_T(txctl), + IDX_IN_WL_CNT_VER_11_T(txprshort), + IDX_IN_WL_CNT_VER_11_T(txserr), + IDX_IN_WL_CNT_VER_11_T(txnobuf), + IDX_IN_WL_CNT_VER_11_T(txnoassoc), + IDX_IN_WL_CNT_VER_11_T(txrunt), + IDX_IN_WL_CNT_VER_11_T(txchit), + IDX_IN_WL_CNT_VER_11_T(txcmiss), + IDX_IN_WL_CNT_VER_11_T(txuflo), + IDX_IN_WL_CNT_VER_11_T(txphyerr), + IDX_IN_WL_CNT_VER_11_T(txphycrs), + IDX_IN_WL_CNT_VER_11_T(rxframe), + IDX_IN_WL_CNT_VER_11_T(rxbyte), + IDX_IN_WL_CNT_VER_11_T(rxerror), + IDX_IN_WL_CNT_VER_11_T(rxctl), + IDX_IN_WL_CNT_VER_11_T(rxnobuf), + IDX_IN_WL_CNT_VER_11_T(rxnondata), + IDX_IN_WL_CNT_VER_11_T(rxbadds), + IDX_IN_WL_CNT_VER_11_T(rxbadcm), + IDX_IN_WL_CNT_VER_11_T(rxfragerr), + IDX_IN_WL_CNT_VER_11_T(rxrunt), + IDX_IN_WL_CNT_VER_11_T(rxgiant), + IDX_IN_WL_CNT_VER_11_T(rxnoscb), + IDX_IN_WL_CNT_VER_11_T(rxbadproto), + IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_11_T(rxbadda), + IDX_IN_WL_CNT_VER_11_T(rxfilter), + IDX_IN_WL_CNT_VER_11_T(rxoflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_11_T(dmade), + IDX_IN_WL_CNT_VER_11_T(dmada), + IDX_IN_WL_CNT_VER_11_T(dmape), + IDX_IN_WL_CNT_VER_11_T(reset), + IDX_IN_WL_CNT_VER_11_T(tbtt), + IDX_IN_WL_CNT_VER_11_T(txdmawar), + IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_11_T(txfrag), + IDX_IN_WL_CNT_VER_11_T(txmulti), + IDX_IN_WL_CNT_VER_11_T(txfail), + IDX_IN_WL_CNT_VER_11_T(txretry), + IDX_IN_WL_CNT_VER_11_T(txretrie), + IDX_IN_WL_CNT_VER_11_T(rxdup), + IDX_IN_WL_CNT_VER_11_T(txrts), + IDX_IN_WL_CNT_VER_11_T(txnocts), + IDX_IN_WL_CNT_VER_11_T(txnoack), + IDX_IN_WL_CNT_VER_11_T(rxfrag), + IDX_IN_WL_CNT_VER_11_T(rxmulti), + IDX_IN_WL_CNT_VER_11_T(rxcrc), + IDX_IN_WL_CNT_VER_11_T(txfrmsnt), + IDX_IN_WL_CNT_VER_11_T(rxundec), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_11_T(tkipreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpundec), + IDX_IN_WL_CNT_VER_11_T(fourwayfail), + IDX_IN_WL_CNT_VER_11_T(wepundec), + IDX_IN_WL_CNT_VER_11_T(wepicverr), + IDX_IN_WL_CNT_VER_11_T(decsuccess), + IDX_IN_WL_CNT_VER_11_T(tkipicverr), + IDX_IN_WL_CNT_VER_11_T(wepexcluded), + IDX_IN_WL_CNT_VER_11_T(txchanrej), + IDX_IN_WL_CNT_VER_11_T(psmwds), + IDX_IN_WL_CNT_VER_11_T(phywatchdog), + IDX_IN_WL_CNT_VER_11_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_11_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_11_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_11_T(rx1mbps), + IDX_IN_WL_CNT_VER_11_T(rx2mbps), + IDX_IN_WL_CNT_VER_11_T(rx5mbps5), + IDX_IN_WL_CNT_VER_11_T(rx6mbps), + IDX_IN_WL_CNT_VER_11_T(rx9mbps), + IDX_IN_WL_CNT_VER_11_T(rx11mbps), + IDX_IN_WL_CNT_VER_11_T(rx12mbps), + IDX_IN_WL_CNT_VER_11_T(rx18mbps), + IDX_IN_WL_CNT_VER_11_T(rx24mbps), + IDX_IN_WL_CNT_VER_11_T(rx36mbps), + IDX_IN_WL_CNT_VER_11_T(rx48mbps), + IDX_IN_WL_CNT_VER_11_T(rx54mbps), + IDX_IN_WL_CNT_VER_11_T(rx108mbps), + IDX_IN_WL_CNT_VER_11_T(rx162mbps), + IDX_IN_WL_CNT_VER_11_T(rx216mbps), + IDX_IN_WL_CNT_VER_11_T(rx270mbps), + IDX_IN_WL_CNT_VER_11_T(rx324mbps), + IDX_IN_WL_CNT_VER_11_T(rx378mbps), + IDX_IN_WL_CNT_VER_11_T(rx432mbps), + IDX_IN_WL_CNT_VER_11_T(rx486mbps), + IDX_IN_WL_CNT_VER_11_T(rx540mbps), + IDX_IN_WL_CNT_VER_11_T(rfdisable), + IDX_IN_WL_CNT_VER_11_T(txexptime), + IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_11_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst), + IDX_IN_WL_CNT_VER_11_T(dma_hang), + IDX_IN_WL_CNT_VER_11_T(reinit), + IDX_IN_WL_CNT_VER_11_T(pstatxucast), + IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc), + IDX_IN_WL_CNT_VER_11_T(pstarxucast), + IDX_IN_WL_CNT_VER_11_T(pstarxbcmc), + IDX_IN_WL_CNT_VER_11_T(pstatxbcmc), + IDX_IN_WL_CNT_VER_11_T(cso_passthrough), + IDX_IN_WL_CNT_VER_11_T(cso_normal), + IDX_IN_WL_CNT_VER_11_T(chained), + IDX_IN_WL_CNT_VER_11_T(chainedsz1), + IDX_IN_WL_CNT_VER_11_T(unchained), + IDX_IN_WL_CNT_VER_11_T(maxchainsz), + IDX_IN_WL_CNT_VER_11_T(currchainsz), + IDX_IN_WL_CNT_VER_11_T(pciereset), + IDX_IN_WL_CNT_VER_11_T(cfgrestore), + IDX_IN_WL_CNT_VER_11_T(reinitreason), + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7, + IDX_IN_WL_CNT_VER_11_T(rxrtry), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu), + IDX_IN_WL_CNT_VER_11_T(txbar), + IDX_IN_WL_CNT_VER_11_T(rxbar), + IDX_IN_WL_CNT_VER_11_T(txpspoll), + IDX_IN_WL_CNT_VER_11_T(rxpspoll), + IDX_IN_WL_CNT_VER_11_T(txnull), + IDX_IN_WL_CNT_VER_11_T(rxnull), + IDX_IN_WL_CNT_VER_11_T(txqosnull), + IDX_IN_WL_CNT_VER_11_T(rxqosnull), + IDX_IN_WL_CNT_VER_11_T(txassocreq), + IDX_IN_WL_CNT_VER_11_T(rxassocreq), + IDX_IN_WL_CNT_VER_11_T(txreassocreq), + IDX_IN_WL_CNT_VER_11_T(rxreassocreq), + IDX_IN_WL_CNT_VER_11_T(txdisassoc), + IDX_IN_WL_CNT_VER_11_T(rxdisassoc), + IDX_IN_WL_CNT_VER_11_T(txassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxassocrsp), + IDX_IN_WL_CNT_VER_11_T(txreassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxreassocrsp), + IDX_IN_WL_CNT_VER_11_T(txauth), + IDX_IN_WL_CNT_VER_11_T(rxauth), + IDX_IN_WL_CNT_VER_11_T(txdeauth), + IDX_IN_WL_CNT_VER_11_T(rxdeauth), + IDX_IN_WL_CNT_VER_11_T(txprobereq), + IDX_IN_WL_CNT_VER_11_T(rxprobereq), + IDX_IN_WL_CNT_VER_11_T(txprobersp), + IDX_IN_WL_CNT_VER_11_T(rxprobersp), + IDX_IN_WL_CNT_VER_11_T(txaction), + IDX_IN_WL_CNT_VER_11_T(rxaction), + IDX_IN_WL_CNT_VER_11_T(ampdu_wds), + IDX_IN_WL_CNT_VER_11_T(txlost), + IDX_IN_WL_CNT_VER_11_T(txdatamcast), + IDX_IN_WL_CNT_VER_11_T(txdatabcast), + INVALID_IDX, + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + IDX_IN_WL_CNT_VER_11_T(txbcast), + IDX_IN_WL_CNT_VER_11_T(txdropped), + IDX_IN_WL_CNT_VER_11_T(rxbcast), + IDX_IN_WL_CNT_VER_11_T(rxdropped) +}; + +/* Index conversion table from wl_cnt_ver_11_t to + * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t + */ +static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + IDX_IN_WL_CNT_VER_11_T(txmpdu), + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + IDX_IN_WL_CNT_VER_11_T(rxnodelim), + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* For mcst offsets that were not used. (2 Pads) */ +#define INVALID_MCST_IDX ((uint8)(-1)) +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_6_T(txallfrm), + IDX_IN_WL_CNT_VER_6_T(txrtsfrm), + IDX_IN_WL_CNT_VER_6_T(txctsfrm), + IDX_IN_WL_CNT_VER_6_T(txackfrm), + IDX_IN_WL_CNT_VER_6_T(txdnlfrm), + IDX_IN_WL_CNT_VER_6_T(txbcnfrm), + IDX_IN_WL_CNT_VER_6_T(txfunfl), + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_6_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(txtplunfl), + IDX_IN_WL_CNT_VER_6_T(txphyerror), + IDX_IN_WL_CNT_VER_6_T(pktengrxducast), + IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_6_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_6_T(rxbadfcs), + IDX_IN_WL_CNT_VER_6_T(rxbadplcp), + IDX_IN_WL_CNT_VER_6_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxstrt), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_6_T(rxrtsucast), + IDX_IN_WL_CNT_VER_6_T(rxctsucast), + IDX_IN_WL_CNT_VER_6_T(rxackucast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxrtsocast), + IDX_IN_WL_CNT_VER_6_T(rxctsocast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_6_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_6_T(rxrsptmout), + IDX_IN_WL_CNT_VER_6_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_6_T(txsfovfl), + IDX_IN_WL_CNT_VER_6_T(pmqovfl), + IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_6_T(txcgprsfail), + IDX_IN_WL_CNT_VER_6_T(txcgprssuc), + IDX_IN_WL_CNT_VER_6_T(prs_timeout), + IDX_IN_WL_CNT_VER_6_T(rxnack), + IDX_IN_WL_CNT_VER_6_T(frmscons), + IDX_IN_WL_CNT_VER_6_T(txnack), + IDX_IN_WL_CNT_VER_6_T(rxback), + IDX_IN_WL_CNT_VER_6_T(txback), + IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxdrop20s), + IDX_IN_WL_CNT_VER_6_T(rxtoolate), + IDX_IN_WL_CNT_VER_6_T(bphy_badplcp) +}; + +/* Index conversion table from wl_cnt_ver_7_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver7t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_7_T(txallfrm), + IDX_IN_WL_CNT_VER_7_T(txrtsfrm), + IDX_IN_WL_CNT_VER_7_T(txctsfrm), + IDX_IN_WL_CNT_VER_7_T(txackfrm), + IDX_IN_WL_CNT_VER_7_T(txdnlfrm), + IDX_IN_WL_CNT_VER_7_T(txbcnfrm), + IDX_IN_WL_CNT_VER_7_T(txfunfl), + IDX_IN_WL_CNT_VER_7_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_7_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_7_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_7_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_7_T(txfunfl) + 5, + INVALID_MCST_IDX, + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_7_T(txtplunfl), + IDX_IN_WL_CNT_VER_7_T(txphyerror), + IDX_IN_WL_CNT_VER_7_T(pktengrxducast), + IDX_IN_WL_CNT_VER_7_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_7_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_7_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_7_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_7_T(rxbadfcs), + IDX_IN_WL_CNT_VER_7_T(rxbadplcp), + IDX_IN_WL_CNT_VER_7_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_7_T(rxstrt), + IDX_IN_WL_CNT_VER_7_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_7_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_7_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_7_T(rxrtsucast), + IDX_IN_WL_CNT_VER_7_T(rxctsucast), + IDX_IN_WL_CNT_VER_7_T(rxackucast), + IDX_IN_WL_CNT_VER_7_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_7_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_7_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_7_T(rxrtsocast), + IDX_IN_WL_CNT_VER_7_T(rxctsocast), + IDX_IN_WL_CNT_VER_7_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_7_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_7_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_7_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_7_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_7_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_7_T(rxrsptmout), + IDX_IN_WL_CNT_VER_7_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_7_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_7_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_7_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_7_T(txsfovfl), + IDX_IN_WL_CNT_VER_7_T(pmqovfl), + IDX_IN_WL_CNT_VER_7_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_7_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_7_T(txcgprsfail), + IDX_IN_WL_CNT_VER_7_T(txcgprssuc), + IDX_IN_WL_CNT_VER_7_T(prs_timeout), + IDX_IN_WL_CNT_VER_7_T(rxnack), + IDX_IN_WL_CNT_VER_7_T(frmscons), + IDX_IN_WL_CNT_VER_7_T(txnack), + INVALID_MCST_IDX, + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_7_T(bphy_rxcrsglitch), + INVALID_MCST_IDX, + INVALID_MCST_IDX, + INVALID_MCST_IDX +}; + +/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */ +static int +wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) +{ + uint i; + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + /* Init wlccnt with invalid value. Unchanged value will not be printed out */ + for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) { + dst[i] = INVALID_CNT_VAL; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) { + if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver6t_to_wlcntwlct[i]]; + } + } else if (cntver == WL_CNT_VERSION_7) { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_7_T; i++) { + if (wlcntver7t_to_wlcntwlct[i] >= src_max_idx || + wlcntver7t_to_wlcntwlct[i] == INVALID_IDX) { + continue; + } + dst[i] = src[wlcntver7t_to_wlcntwlct[i]]; + } + } else { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) { + if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) { + if (wlcntver11t_to_wlcntwlct[i] == INVALID_IDX) { + continue; + } + else { + /* src buffer does not have counters from here */ + break; + } + } + dst[i] = src[wlcntver11t_to_wlcntwlct[i]]; + } + } + return BCME_OK; +} + +/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */ +static int +wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_6_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]]; + } + } + } else if (cntver == WL_CNT_VERSION_7) { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver7t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_7_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver7t_to_wlcntvle10mcstt[i]]; + } + } + } else { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_11_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]]; + } + } + } + return BCME_OK; +} + +static int +wl_copy_macstat_ver11(uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]]; + } + return BCME_OK; +} + +/** + * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format. + * Parameters: + * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW. + * Newly translated xtlv format is written to this pointer. + * buflen: length of the "cntbuf" without any padding. + * corerev: chip core revision of the driver/FW. + */ +int +wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) +{ + wl_cnt_wlc_t *wlccnt = NULL; + uint32 *macstat = NULL; + xtlv_desc_t xtlv_desc[3]; + uint16 mcst_xtlv_id; + int res = BCME_OK; + wl_cnt_info_t *cntinfo = cntbuf; + uint8 *xtlvbuf_p = cntinfo->data; + uint16 ver = cntinfo->version; + uint16 xtlvbuflen = (uint16)buflen; + uint16 src_max_idx; +#ifdef BCMDRIVER + osl_t *osh = ctx; +#else + BCM_REFERENCE(ctx); +#endif + + if (ver >= WL_CNT_VERSION_XTLV) { + /* Already in xtlv format. */ + goto exit; + } + +#ifdef BCMDRIVER + wlccnt = MALLOC(osh, sizeof(*wlccnt)); + macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ); +#else + wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt)); + macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ); +#endif + if (!wlccnt || !macstat) { + printf("wl_cntbuf_to_xtlv_format: malloc fail!\n"); + res = BCME_NOMEM; + goto exit; + } + + /* Check if the max idx in the struct exceeds the boundary of uint8 */ + if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) || + NUM_OF_CNT_IN_WL_CNT_VER_7_T > ((uint8)(-1) + 1) || + NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n"); + res = BCME_ERROR; + goto exit; + } + + /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */ + src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32); + if (src_max_idx > (uint8)(-1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n" + "Try updating wl utility to the latest.\n"); + src_max_idx = (uint8)(-1); + } + + /* Copy wlc layer counters to wl_cnt_wlc_t */ + res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx); + if (res != BCME_OK) { + printf("wl_copy_wlccnt fail!\n"); + goto exit; + } + + /* Copy macstat counters to wl_cnt_wlc_t */ + if (ver == WL_CNT_VERSION_11) { + res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_ver11 fail!\n"); + goto exit; + } + if (corerev >= 40) { + mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1; + } else { + mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1; + } + } else { + res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_upto_ver10 fail!\n"); + goto exit; + } + mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE; + } + + xtlv_desc[0].type = WL_CNT_XTLV_WLC; + xtlv_desc[0].len = sizeof(*wlccnt); + xtlv_desc[0].ptr = wlccnt; + + xtlv_desc[1].type = mcst_xtlv_id; + xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ; + xtlv_desc[1].ptr = macstat; + + xtlv_desc[2].type = 0; + xtlv_desc[2].len = 0; + xtlv_desc[2].ptr = NULL; + + memset(cntbuf, 0, buflen); + + res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen, + xtlv_desc, BCM_XTLV_OPTION_ALIGN32); + cntinfo->datalen = (buflen - xtlvbuflen); +exit: +#ifdef BCMDRIVER + if (wlccnt) { + MFREE(osh, wlccnt, sizeof(*wlccnt)); + } + if (macstat) { + MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ); + } +#else + if (wlccnt) { + free(wlccnt); + } + if (macstat) { + free(macstat); + } +#endif + return res; +} diff --git a/bcmdhd.101.10.361.x/bcm_l2_filter.c b/bcmdhd.101.10.361.x/bcm_l2_filter.c new file mode 100755 index 0000000..5a5ca2c --- /dev/null +++ b/bcmdhd.101.10.361.x/bcm_l2_filter.c @@ -0,0 +1,766 @@ +/* + * L2 Filter handling functions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include <802.11.h> +#include + +#ifdef BCMDBG_ERR +#define L2_FILTER_ERROR(args) printf args +#else +#define L2_FILTER_ERROR(args) +#endif /* BCMDBG_ERR */ + +#ifdef BCMDBG_MSG +#define L2_FILTER_MSG(args) printf args +#else +#define L2_FILTER_MSG(args) +#endif /* BCMDBG_msg */ + +struct arp_table { + parp_entry_t *parp_table[BCM_PARP_TABLE_SIZE]; /* proxyarp entries in cache table */ + parp_entry_t *parp_candidate_list; /* proxyarp entries in candidate list */ + uint8 parp_smac[ETHER_ADDR_LEN]; /* L2 SMAC from DHCP Req */ + uint8 parp_cmac[ETHER_ADDR_LEN]; /* Bootp Client MAC from DHCP Req */ +}; +#ifdef DHD_DUMP_ARPTABLE +void bcm_l2_parp_dump_table(arp_table_t* arp_tbl); + +void +bcm_l2_parp_dump_table(arp_table_t* arp_tbl) +{ + parp_entry_t *entry; + uint16 idx, ip_len; + arp_table_t *ptable; + ip_len = IPV4_ADDR_LEN; + ptable = arp_tbl; + for (idx = 0; idx < BCM_PARP_TABLE_SIZE; idx++) { + entry = ptable->parp_table[idx]; + while (entry) { + printf("Cached entries..\n"); + printf("%d: %d.%d.%d.%d", idx, entry->ip.data[0], entry->ip.data[1], + entry->ip.data[2], entry->ip.data[3]); + printf("%02x:%02x:%02x:%02x:%02x:%02x", entry->ea.octet[0], + entry->ea.octet[1], entry->ea.octet[2], entry->ea.octet[3], + entry->ea.octet[4], entry->ea.octet[5]); + printf("\n"); + entry = entry->next; + } + } + entry = ptable->parp_candidate_list; + while (entry) { + printf("Candidate entries..\n"); + printf("%d.%d.%d.%d", entry->ip.data[0], entry->ip.data[1], + entry->ip.data[2], entry->ip.data[3]); + printf("%02x:%02x:%02x:%02x:%02x:%02x", entry->ea.octet[0], + entry->ea.octet[1], entry->ea.octet[2], entry->ea.octet[3], + entry->ea.octet[4], entry->ea.octet[5]); + + printf("\n"); + entry = entry->next; + } +} +#endif /* DHD_DUMP_ARPTABLE */ + +arp_table_t* init_l2_filter_arp_table(osl_t* osh) +{ + return ((arp_table_t*)MALLOCZ(osh, sizeof(arp_table_t))); +} + +void deinit_l2_filter_arp_table(osl_t* osh, arp_table_t* ptable) +{ + MFREE(osh, ptable, sizeof(arp_table_t)); +} +/* returns 0 if gratuitous ARP or unsolicited neighbour advertisement */ +int +bcm_l2_filter_gratuitous_arp(osl_t *osh, void *pktbuf) +{ + uint8 *frame = PKTDATA(osh, pktbuf); + uint16 ethertype; + int send_ip_offset, target_ip_offset; + int iplen; + int minlen; + uint8 *data; + int datalen; + bool snap; + + if (get_pkt_ether_type(osh, pktbuf, &data, &datalen, ðertype, &snap) != BCME_OK) + return BCME_ERROR; + + if (!ETHER_ISBCAST(frame + ETHER_DEST_OFFSET) && + bcmp(ðer_ipv6_mcast, frame + ETHER_DEST_OFFSET, sizeof(ether_ipv6_mcast))) { + return BCME_ERROR; + } + + if (ethertype == ETHER_TYPE_ARP) { + L2_FILTER_MSG(("bcm_l2_filter_gratuitous_arp: ARP RX data : %p: datalen : %d\n", + data, datalen)); + send_ip_offset = ARP_SRC_IP_OFFSET; + target_ip_offset = ARP_TGT_IP_OFFSET; + iplen = IPV4_ADDR_LEN; + minlen = ARP_DATA_LEN; + } else if (ethertype == ETHER_TYPE_IPV6) { + send_ip_offset = NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET; + target_ip_offset = NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET; + iplen = IPV6_ADDR_LEN; + minlen = target_ip_offset + iplen; + + /* check for neighbour advertisement */ + if (datalen >= minlen && (data[IPV6_NEXT_HDR_OFFSET] != IP_PROT_ICMP6 || + data[NEIGHBOR_ADVERTISE_TYPE_OFFSET] != NEIGHBOR_ADVERTISE_TYPE)) + return BCME_ERROR; + + /* Dont drop Unsolicitated NA fm AP with allnode mcast dest addr (HS2-4.5.E) */ + if (datalen >= minlen && + (data[IPV6_NEXT_HDR_OFFSET] == IP_PROT_ICMP6) && + (data[NEIGHBOR_ADVERTISE_TYPE_OFFSET] == NEIGHBOR_ADVERTISE_TYPE) && + (data[NEIGHBOR_ADVERTISE_OPTION_OFFSET] == OPT_TYPE_TGT_LINK_ADDR)) { + L2_FILTER_MSG(("Unsolicitated Neighbour Advertisement from AP " + "with allnode mcast dest addr tx'ed (%d)\n", datalen)); + return -1; + } + + } else { + return BCME_ERROR; + } + + if (datalen < minlen) { + L2_FILTER_MSG(("BCM: dhd_gratuitous_arp: truncated packet (%d)\n", datalen)); + return BCME_ERROR; + } + + if (bcmp(data + send_ip_offset, data + target_ip_offset, iplen) == 0) { + L2_FILTER_MSG((" returning BCME_OK in bcm_l2_filter_gratuitous_arp\n")); + return BCME_OK; + } + + return BCME_ERROR; +} +int +get_pkt_ether_type(osl_t *osh, void *pktbuf, + uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr) +{ + uint8 *frame = PKTDATA(osh, pktbuf); + int length = PKTLEN(osh, pktbuf); + uint8 *pt; /* Pointer to type field */ + uint16 ethertype; + bool snap = FALSE; + /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ + if (length < ETHER_HDR_LEN) { + L2_FILTER_MSG(("BCM: get_pkt_ether_type: short eth frame (%d)\n", + length)); + return BCME_ERROR; + } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) { + /* Frame is Ethernet II */ + pt = frame + ETHER_TYPE_OFFSET; + } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && + !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { + pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; + snap = TRUE; + } else { + L2_FILTER_MSG((" get_pkt_ether_type: non-SNAP 802.3 frame\n")); + return BCME_ERROR; + } + + ethertype = ntoh16_ua(pt); + + /* Skip VLAN tag, if any */ + if (ethertype == ETHER_TYPE_8021Q) { + pt += VLAN_TAG_LEN; + + if ((pt + ETHER_TYPE_LEN) > (frame + length)) { + L2_FILTER_MSG(("BCM: get_pkt_ether_type: short VLAN frame (%d)\n", + length)); + return BCME_ERROR; + } + ethertype = ntoh16_ua(pt); + } + *data_ptr = pt + ETHER_TYPE_LEN; + *len_ptr = length - (int32)(pt + ETHER_TYPE_LEN - frame); + *et_ptr = ethertype; + *snap_ptr = snap; + return BCME_OK; +} + +int +get_pkt_ip_type(osl_t *osh, void *pktbuf, + uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr) +{ + struct ipv4_hdr *iph; /* IP frame pointer */ + int iplen; /* IP frame length */ + uint16 ethertype, iphdrlen, ippktlen; + uint16 iph_frag; + uint8 prot; + bool snap; + + if (get_pkt_ether_type(osh, pktbuf, (uint8 **)&iph, + &iplen, ðertype, &snap) != 0) + return BCME_ERROR; + + if (ethertype != ETHER_TYPE_IP) { + return BCME_ERROR; + } + + /* We support IPv4 only */ + if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) { + return BCME_ERROR; + } + + /* Header length sanity */ + iphdrlen = IPV4_HLEN(iph); + + /* + * Packet length sanity; sometimes we receive eth-frame size bigger + * than the IP content, which results in a bad tcp chksum + */ + ippktlen = ntoh16(iph->tot_len); + if (ippktlen < iplen) { + L2_FILTER_MSG(("get_pkt_ip_type: extra frame length ignored\n")); + iplen = ippktlen; + } else if (ippktlen > iplen) { + L2_FILTER_MSG(("get_pkt_ip_type: truncated IP packet (%d)\n", + ippktlen - iplen)); + return BCME_ERROR; + } + + if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) { + L2_FILTER_ERROR((" get_pkt_ip_type: IP-header-len (%d) out of range (%d-%d)\n", + iphdrlen, IPV4_OPTIONS_OFFSET, iplen)); + return BCME_ERROR; + } + + /* + * We don't handle fragmented IP packets. A first frag is indicated by the MF + * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset. + */ + iph_frag = ntoh16(iph->frag); + + if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) { + L2_FILTER_ERROR(("get_pkt_ip_type: IP fragment not handled\n")); + return BCME_ERROR; + } + prot = IPV4_PROT(iph); + *data_ptr = (((uint8 *)iph) + iphdrlen); + *len_ptr = iplen - iphdrlen; + *prot_ptr = prot; + return BCME_OK; +} + +/* Check if packet type is ICMP ECHO */ +int bcm_l2_filter_block_ping(osl_t *osh, void *pktbuf) +{ + struct bcmicmp_hdr *icmph; + int udpl; + uint8 prot; + + if (get_pkt_ip_type(osh, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0) + return BCME_ERROR; + if (prot == IP_PROT_ICMP) { + if (icmph->type == ICMP_TYPE_ECHO_REQUEST) + return BCME_OK; + } + return BCME_ERROR; +} + +int bcm_l2_filter_get_mac_addr_dhcp_pkt(osl_t *osh, void *pktbuf, + int ifidx, uint8** mac_addr) +{ + uint8 *eh = PKTDATA(osh, pktbuf); + uint8 *udph; + uint8 *dhcp; + int udpl; + int dhcpl; + uint16 port; + uint8 prot; + + if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET)) + return BCME_ERROR; + if (get_pkt_ip_type(osh, pktbuf, &udph, &udpl, &prot) != 0) + return BCME_ERROR; + if (prot != IP_PROT_UDP) + return BCME_ERROR; + /* check frame length, at least UDP_HDR_LEN */ + if (udpl < UDP_HDR_LEN) { + L2_FILTER_MSG(("BCM: bcm_l2_filter_get_mac_addr_dhcp_pkt: short UDP frame," + " ignored\n")); + return BCME_ERROR; + } + port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET); + /* only process DHCP packets from server to client */ + if (port != DHCP_PORT_CLIENT) + return BCME_ERROR; + + dhcp = udph + UDP_HDR_LEN; + dhcpl = udpl - UDP_HDR_LEN; + + if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) { + L2_FILTER_MSG(("BCM: bcm_l2_filter_get_mac_addr_dhcp_pkt: short DHCP frame," + " ignored\n")); + return BCME_ERROR; + } + /* only process DHCP reply(offer/ack) packets */ + if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY) + return BCME_ERROR; + /* chaddr = dhcp + DHCP_CHADDR_OFFSET; */ + *mac_addr = dhcp + DHCP_CHADDR_OFFSET; + return BCME_OK; +} +/* modify the mac address for IP, in arp table */ +int +bcm_l2_filter_parp_modifyentry(arp_table_t* arp_tbl, struct ether_addr *ea, + uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt) +{ + parp_entry_t *entry; + uint8 idx, ip_len; + arp_table_t *ptable; + + if (ip_ver == IP_VER_4 && !IPV4_ADDR_NULL(ip) && !IPV4_ADDR_BCAST(ip)) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]); + ip_len = IPV4_ADDR_LEN; + } + else if (ip_ver == IP_VER_6 && !IPV6_ADDR_NULL(ip)) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]); + ip_len = IPV6_ADDR_LEN; + } + else { + return BCME_ERROR; + } + + ptable = arp_tbl; + if (cached) { + entry = ptable->parp_table[idx]; + } else { + entry = ptable->parp_candidate_list; + } + while (entry) { + if (bcmp(entry->ip.data, ip, ip_len) == 0) { + /* entry matches, overwrite mac content and return */ + bcopy((void *)ea, (void *)&entry->ea, ETHER_ADDR_LEN); + entry->used = entry_tickcnt; +#ifdef DHD_DUMP_ARPTABLE + bcm_l2_parp_dump_table(arp_tbl); +#endif + return BCME_OK; + } + entry = entry->next; + } +#ifdef DHD_DUMP_ARPTABLE + bcm_l2_parp_dump_table(arp_tbl); +#endif + return BCME_ERROR; +} + +/* Add the IP entry in ARP table based on Cached argument, if cached argument is + * non zero positive value: it adds to parp_table, else adds to + * parp_candidate_list + */ +int +bcm_l2_filter_parp_addentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea, + uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt) +{ + parp_entry_t *entry; + uint8 idx, ip_len; + arp_table_t *ptable; + + if (ip_ver == IP_VER_4 && !IPV4_ADDR_NULL(ip) && !IPV4_ADDR_BCAST(ip)) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]); + ip_len = IPV4_ADDR_LEN; + } + else if (ip_ver == IP_VER_6 && !IPV6_ADDR_NULL(ip)) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]); + ip_len = IPV6_ADDR_LEN; + } + else { + return BCME_ERROR; + } + + if ((entry = MALLOCZ(osh, sizeof(parp_entry_t) + ip_len)) == NULL) { + L2_FILTER_MSG(("Allocating new parp_entry for IPv%d failed!!\n", ip_ver)); + return BCME_NOMEM; + } + + bcopy((void *)ea, (void *)&entry->ea, ETHER_ADDR_LEN); + entry->used = entry_tickcnt; + entry->ip.id = ip_ver; + entry->ip.len = ip_len; + bcopy(ip, entry->ip.data, ip_len); + ptable = arp_tbl; + if (cached) { + entry->next = ptable->parp_table[idx]; + ptable->parp_table[idx] = entry; + } else { + entry->next = ptable->parp_candidate_list; + ptable->parp_candidate_list = entry; + } +#ifdef DHD_DUMP_ARPTABLE + bcm_l2_parp_dump_table(arp_tbl); +#endif + return BCME_OK; +} + +/* Delete the IP entry in ARP table based on Cached argument, if cached argument is + * non zero positive value: it delete from parp_table, else delete from + * parp_candidate_list + */ +int +bcm_l2_filter_parp_delentry(osl_t* osh, arp_table_t *arp_tbl, struct ether_addr *ea, + uint8 *ip, uint8 ip_ver, bool cached) +{ + parp_entry_t *entry, *prev = NULL; + uint8 idx, ip_len; + arp_table_t *ptable; + + if (ip_ver == IP_VER_4) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]); + ip_len = IPV4_ADDR_LEN; + } + else if (ip_ver == IP_VER_6) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]); + ip_len = IPV6_ADDR_LEN; + } + else { + return BCME_ERROR; + } + ptable = arp_tbl; + if (cached) { + entry = ptable->parp_table[idx]; + } else { + entry = ptable->parp_candidate_list; + } + while (entry) { + if (entry->ip.id == ip_ver && + bcmp(entry->ip.data, ip, ip_len) == 0 && + bcmp(&entry->ea, ea, ETHER_ADDR_LEN) == 0) { + if (prev == NULL) { + if (cached) { + ptable->parp_table[idx] = entry->next; + } else { + ptable->parp_candidate_list = entry->next; + } + } else { + prev->next = entry->next; + } + break; + } + prev = entry; + entry = entry->next; + } + if (entry != NULL) + MFREE(osh, entry, sizeof(parp_entry_t) + ip_len); +#ifdef DHD_DUMP_ARPTABLE + bcm_l2_parp_dump_table(arp_tbl); +#endif + return BCME_OK; +} + +/* search the IP entry in ARP table based on Cached argument, if cached argument is + * non zero positive value: it searches from parp_table, else search from + * parp_candidate_list + */ +parp_entry_t * +bcm_l2_filter_parp_findentry(arp_table_t* arp_tbl, uint8 *ip, uint8 ip_ver, bool cached, + unsigned int entry_tickcnt) +{ + parp_entry_t *entry; + uint8 idx, ip_len; + arp_table_t *ptable; + + if (ip_ver == IP_VER_4) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]); + ip_len = IPV4_ADDR_LEN; + } else if (ip_ver == IP_VER_6) { + idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]); + ip_len = IPV6_ADDR_LEN; + } else { + return NULL; + } + ptable = arp_tbl; + if (cached) { + entry = ptable->parp_table[idx]; + } else { + entry = ptable->parp_candidate_list; + } + while (entry) { + if (entry->ip.id == ip_ver && bcmp(entry->ip.data, ip, ip_len) == 0) { + /* time stamp of adding the station entry to arp table for ifp */ + entry->used = entry_tickcnt; + break; + } + entry = entry->next; + } + return entry; +} + +/* update arp table entries for every proxy arp enable interface */ +void +bcm_l2_filter_arp_table_update(osl_t *osh, arp_table_t* arp_tbl, bool all, uint8 *del_ea, + bool periodic, unsigned int tickcnt) +{ + parp_entry_t *prev, *entry, *delentry; + uint8 idx, ip_ver; + struct ether_addr ea; + uint8 ip[IPV6_ADDR_LEN]; + arp_table_t *ptable; + + ptable = arp_tbl; + for (idx = 0; idx < BCM_PARP_TABLE_SIZE; idx++) { + entry = ptable->parp_table[idx]; + while (entry) { + /* check if the entry need to be removed */ + if (all || (periodic && BCM_PARP_IS_TIMEOUT(tickcnt, entry)) || + (del_ea != NULL && !bcmp(del_ea, &entry->ea, ETHER_ADDR_LEN))) { + /* copy frame here */ + ip_ver = entry->ip.id; + bcopy(entry->ip.data, ip, entry->ip.len); + bcopy(&entry->ea, &ea, ETHER_ADDR_LEN); + entry = entry->next; + bcm_l2_filter_parp_delentry(osh, ptable, &ea, ip, ip_ver, TRUE); + } + else { + entry = entry->next; + } + } + } + + /* remove candidate or promote to real entry */ + prev = delentry = NULL; + entry = ptable->parp_candidate_list; + while (entry) { + /* remove candidate */ + if (all || (periodic && BCM_PARP_ANNOUNCE_WAIT_REACH(tickcnt, entry)) || + (del_ea != NULL && !bcmp(del_ea, (uint8 *)&entry->ea, ETHER_ADDR_LEN))) { + bool promote = (periodic && BCM_PARP_ANNOUNCE_WAIT_REACH(tickcnt, entry)) ? + TRUE: FALSE; + parp_entry_t *node = NULL; + + ip_ver = entry->ip.id; + + if (prev == NULL) + ptable->parp_candidate_list = entry->next; + else + prev->next = entry->next; + + node = bcm_l2_filter_parp_findentry(ptable, + entry->ip.data, IP_VER_6, TRUE, tickcnt); + if (promote && node == NULL) { + bcm_l2_filter_parp_addentry(osh, ptable, &entry->ea, + entry->ip.data, entry->ip.id, TRUE, tickcnt); + } + MFREE(osh, entry, sizeof(parp_entry_t) + entry->ip.len); + if (prev == NULL) { + entry = ptable->parp_candidate_list; + } else { + entry = prev->next; + } + } + else { + prev = entry; + entry = entry->next; + } + } +} +/* create 42 byte ARP packet for ARP response, aligned the Buffer */ +void * +bcm_l2_filter_proxyarp_alloc_reply(osl_t* osh, uint16 pktlen, struct ether_addr *src_ea, + struct ether_addr *dst_ea, uint16 ea_type, bool snap, void **p) +{ + void *pkt; + uint8 *frame; + + /* adjust pktlen since skb->data is aligned to 2 */ + pktlen += ALIGN_ADJ_BUFLEN; + + if ((pkt = PKTGET(osh, pktlen, FALSE)) == NULL) { + L2_FILTER_ERROR(("bcm_l2_filter_proxyarp_alloc_reply: PKTGET failed\n")); + return NULL; + } + /* adjust for pkt->data aligned */ + PKTPULL(osh, pkt, ALIGN_ADJ_BUFLEN); + frame = PKTDATA(osh, pkt); + + /* Create 14-byte eth header, plus snap header if applicable */ + bcopy(src_ea, frame + ETHER_SRC_OFFSET, ETHER_ADDR_LEN); + bcopy(dst_ea, frame + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + if (snap) { + hton16_ua_store(pktlen, frame + ETHER_TYPE_OFFSET); + bcopy(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN); + hton16_ua_store(ea_type, frame + ETHER_HDR_LEN + SNAP_HDR_LEN); + } else + hton16_ua_store(ea_type, frame + ETHER_TYPE_OFFSET); + + *p = (void *)(frame + ETHER_HDR_LEN + (snap ? SNAP_HDR_LEN + ETHER_TYPE_LEN : 0)); + return pkt; +} +/* copy the smac entry from parp_table */ +void bcm_l2_filter_parp_get_smac(arp_table_t* ptable, void* smac) +{ + bcopy(ptable->parp_smac, smac, ETHER_ADDR_LEN); +} +/* copy the cmac entry from parp_table */ +void bcm_l2_filter_parp_get_cmac(arp_table_t* ptable, void* cmac) +{ + bcopy(ptable->parp_cmac, cmac, ETHER_ADDR_LEN); +} +/* copy the smac entry to smac entry in parp_table */ +void bcm_l2_filter_parp_set_smac(arp_table_t* ptable, void* smac) +{ + bcopy(smac, ptable->parp_smac, ETHER_ADDR_LEN); +} +/* copy the cmac entry to cmac entry in parp_table */ +void bcm_l2_filter_parp_set_cmac(arp_table_t* ptable, void* cmac) +{ + bcopy(cmac, ptable->parp_cmac, ETHER_ADDR_LEN); +} + +uint16 +calc_checksum(uint8 *src_ipa, uint8 *dst_ipa, uint32 ul_len, uint8 prot, uint8 *ul_data) +{ + uint16 *startpos; + uint32 sum = 0; + int i; + uint16 answer = 0; + + if (src_ipa) { + uint8 ph[8] = {0, }; + for (i = 0; i < (IPV6_ADDR_LEN / 2); i++) { + sum += *((uint16 *)src_ipa); + src_ipa += 2; + } + + for (i = 0; i < (IPV6_ADDR_LEN / 2); i++) { + sum += *((uint16 *)dst_ipa); + dst_ipa += 2; + } + + *((uint32 *)ph) = hton32(ul_len); + *((uint32 *)(ph+4)) = 0; + ph[7] = prot; + startpos = (uint16 *)ph; + for (i = 0; i < 4; i++) { + sum += *startpos++; + } + } + + startpos = (uint16 *)ul_data; + while (ul_len > 1) { + sum += *startpos++; + ul_len -= 2; + } + + if (ul_len == 1) { + *((uint8 *)(&answer)) = *((uint8 *)startpos); + sum += answer; + } + + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); + answer = ~sum; + + return answer; +} +/* + * The length of the option including + * the type and length fields in units of 8 octets + */ +bcm_tlv_t * +parse_nd_options(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len * 8; + + /* validate remaining totlen */ + if ((elt->id == key) && + (totlen >= len)) + return (elt); + + elt = (bcm_tlv_t*)((uint8*)elt + len); + totlen -= len; + } + + return NULL; +} + +/* returns 0 if tdls set up request or tdls discovery request */ +int +bcm_l2_filter_block_tdls(osl_t *osh, void *pktbuf) +{ + uint16 ethertype; + uint8 *data; + int datalen; + bool snap; + uint8 action_field; + + if (get_pkt_ether_type(osh, pktbuf, &data, &datalen, ðertype, &snap) != BCME_OK) + return BCME_ERROR; + + if (ethertype != ETHER_TYPE_89_0D) + return BCME_ERROR; + + /* validate payload type */ + if (datalen < TDLS_PAYLOAD_TYPE_LEN + 2) { + L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong length for 89-0d eth frame %d\n", + datalen)); + return BCME_ERROR; + } + + /* validate payload type */ + if (*data != TDLS_PAYLOAD_TYPE) { + L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong payload type for 89-0d" + " eth frame %d\n", + *data)); + return BCME_ERROR; + } + data += TDLS_PAYLOAD_TYPE_LEN; + + /* validate TDLS action category */ + if (*data != TDLS_ACTION_CATEGORY_CODE) { + L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong TDLS Category %d\n", *data)); + return BCME_ERROR; + } + data++; + + action_field = *data; + + if ((action_field == TDLS_SETUP_REQ) || (action_field == TDLS_DISCOVERY_REQ)) + return BCME_OK; + + return BCME_ERROR; +} diff --git a/bcmdhd.101.10.361.x/bcmbloom.c b/bcmdhd.101.10.361.x/bcmbloom.c new file mode 100755 index 0000000..7660c88 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmbloom.c @@ -0,0 +1,233 @@ +/* + * Bloom filter support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#include +#else /* !BCMDRIVER */ +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* !BCMDRIVER */ +#include + +#include + +#define BLOOM_BIT_LEN(_x) ((_x) << 3) + +struct bcm_bloom_filter { + void *cb_ctx; + uint max_hash; + bcm_bloom_hash_t *hash; /* array of hash functions */ + uint filter_size; /* in bytes */ + uint8 *filter; /* can be NULL for validate only */ +}; + +/* public interface */ +int +bcm_bloom_create(bcm_bloom_alloc_t alloc_cb, + bcm_bloom_free_t free_cb, void *cb_ctx, uint max_hash, + uint filter_size, bcm_bloom_filter_t **bloom) +{ + int err = BCME_OK; + bcm_bloom_filter_t *bp = NULL; + + if (!bloom || !alloc_cb || (max_hash == 0)) { + err = BCME_BADARG; + goto done; + } + + bp = (*alloc_cb)(cb_ctx, sizeof(*bp)); + if (!bp) { + err = BCME_NOMEM; + goto done; + } + memset(bp, 0, sizeof(*bp)); + + bp->cb_ctx = cb_ctx; + bp->max_hash = max_hash; + bp->hash = (*alloc_cb)(cb_ctx, sizeof(*bp->hash) * max_hash); + if (!bp->hash) { + err = BCME_NOMEM; + goto done; + } + memset(bp->hash, 0, sizeof(*bp->hash) * max_hash); + + if (filter_size > 0) { + bp->filter = (*alloc_cb)(cb_ctx, filter_size); + if (!bp->filter) { + err = BCME_NOMEM; + goto done; + } + bp->filter_size = filter_size; + memset(bp->filter, 0, filter_size); + } + + *bloom = bp; + +done: + if (err != BCME_OK) + bcm_bloom_destroy(&bp, free_cb); + + return err; +} + +int +bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb) +{ + int err = BCME_OK; + bcm_bloom_filter_t *bp; + + if (!bloom || !*bloom || !free_cb) + goto done; + + bp = *bloom; + *bloom = NULL; + + if (bp->filter) + (*free_cb)(bp->cb_ctx, bp->filter, bp->filter_size); + if (bp->hash) + (*free_cb)(bp->cb_ctx, bp->hash, + sizeof(*bp->hash) * bp->max_hash); + (*free_cb)(bp->cb_ctx, bp, sizeof(*bp)); + +done: + return err; +} + +int +bcm_bloom_add_hash(bcm_bloom_filter_t *bp, bcm_bloom_hash_t hash, uint *idx) +{ + uint i; + + if (!bp || !hash || !idx) + return BCME_BADARG; + + for (i = 0; i < bp->max_hash; ++i) { + if (bp->hash[i] == NULL) + break; + } + + if (i >= bp->max_hash) + return BCME_NORESOURCE; + + bp->hash[i] = hash; + *idx = i; + return BCME_OK; +} + +int +bcm_bloom_remove_hash(bcm_bloom_filter_t *bp, uint idx) +{ + if (!bp) + return BCME_BADARG; + + if (idx >= bp->max_hash) + return BCME_NOTFOUND; + + bp->hash[idx] = NULL; + return BCME_OK; +} + +bool +bcm_bloom_is_member(bcm_bloom_filter_t *bp, + const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len) +{ + uint i; + int err = BCME_OK; + + if (!tag || (tag_len == 0)) /* empty tag is always a member */ + goto done; + + /* use internal buffer if none was specified */ + if (!buf || (buf_len == 0)) { + if (!bp->filter) /* every one is a member of empty filter */ + goto done; + + buf = bp->filter; + buf_len = bp->filter_size; + } + + for (i = 0; i < bp->max_hash; ++i) { + uint pos; + if (!bp->hash[i]) + continue; + pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len); + + /* all bits must be set for a match */ + if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) { + err = BCME_NOTFOUND; + break; + } + } + +done: + return err; +} + +int +bcm_bloom_add_member(bcm_bloom_filter_t *bp, const uint8 *tag, uint tag_len) +{ + uint i; + + if (!bp || !tag || (tag_len == 0)) + return BCME_BADARG; + + if (!bp->filter) /* validate only */ + return BCME_UNSUPPORTED; + + for (i = 0; i < bp->max_hash; ++i) { + uint pos; + if (!bp->hash[i]) + continue; + pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len); + setbit(bp->filter, pos % BLOOM_BIT_LEN(bp->filter_size)); + } + + return BCME_OK; +} + +int bcm_bloom_get_filter_data(bcm_bloom_filter_t *bp, + uint buf_size, uint8 *buf, uint *buf_len) +{ + if (!bp) + return BCME_BADARG; + + if (buf_len) + *buf_len = bp->filter_size; + + if (buf_size < bp->filter_size) + return BCME_BUFTOOSHORT; + + if (bp->filter && bp->filter_size) + memcpy(buf, bp->filter, bp->filter_size); + + return BCME_OK; +} diff --git a/bcmdhd.101.10.361.x/bcmevent.c b/bcmdhd.101.10.361.x/bcmevent.c new file mode 100755 index 0000000..a8cafcb --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmevent.c @@ -0,0 +1,445 @@ +/* + * bcmevent read-only data shared by kernel or app layers + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include +#include +#include <802.11.h> + +/* Table of event name strings for UIs and debugging dumps */ +typedef struct { + uint event; + const char *name; +} bcmevent_name_str_t; + +/* Use the actual name for event tracing */ +#define BCMEVENT_NAME(_event) {(_event), #_event} + +/* this becomes static data when all code is changed to use + * the bcmevent_get_name() API + */ +static const bcmevent_name_str_t bcmevent_names[] = { + BCMEVENT_NAME(WLC_E_SET_SSID), + BCMEVENT_NAME(WLC_E_JOIN), + BCMEVENT_NAME(WLC_E_START), + BCMEVENT_NAME(WLC_E_AUTH), + BCMEVENT_NAME(WLC_E_AUTH_IND), + BCMEVENT_NAME(WLC_E_DEAUTH), + BCMEVENT_NAME(WLC_E_DEAUTH_IND), + BCMEVENT_NAME(WLC_E_ASSOC), + BCMEVENT_NAME(WLC_E_ASSOC_IND), + BCMEVENT_NAME(WLC_E_REASSOC), + BCMEVENT_NAME(WLC_E_REASSOC_IND), + BCMEVENT_NAME(WLC_E_DISASSOC), + BCMEVENT_NAME(WLC_E_DISASSOC_IND), + BCMEVENT_NAME(WLC_E_QUIET_START), + BCMEVENT_NAME(WLC_E_QUIET_END), + BCMEVENT_NAME(WLC_E_BEACON_RX), + BCMEVENT_NAME(WLC_E_LINK), + BCMEVENT_NAME(WLC_E_MIC_ERROR), + BCMEVENT_NAME(WLC_E_NDIS_LINK), + BCMEVENT_NAME(WLC_E_ROAM), + BCMEVENT_NAME(WLC_E_TXFAIL), + BCMEVENT_NAME(WLC_E_PMKID_CACHE), + BCMEVENT_NAME(WLC_E_RETROGRADE_TSF), + BCMEVENT_NAME(WLC_E_PRUNE), + BCMEVENT_NAME(WLC_E_AUTOAUTH), + BCMEVENT_NAME(WLC_E_EAPOL_MSG), + BCMEVENT_NAME(WLC_E_SCAN_COMPLETE), + BCMEVENT_NAME(WLC_E_ADDTS_IND), + BCMEVENT_NAME(WLC_E_DELTS_IND), + BCMEVENT_NAME(WLC_E_BCNSENT_IND), + BCMEVENT_NAME(WLC_E_BCNRX_MSG), + BCMEVENT_NAME(WLC_E_BCNLOST_MSG), + BCMEVENT_NAME(WLC_E_ROAM_PREP), + BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), + BCMEVENT_NAME(WLC_E_PFN_NET_LOST), + BCMEVENT_NAME(WLC_E_JOIN_START), + BCMEVENT_NAME(WLC_E_ROAM_START), + BCMEVENT_NAME(WLC_E_ASSOC_START), +#ifdef EXT_STA + BCMEVENT_NAME(WLC_E_RESET_COMPLETE), + BCMEVENT_NAME(WLC_E_JOIN_START), + BCMEVENT_NAME(WLC_E_ROAM_START), + BCMEVENT_NAME(WLC_E_ASSOC_START), + BCMEVENT_NAME(WLC_E_ASSOC_RECREATED), + BCMEVENT_NAME(WLC_E_SPEEDY_RECREATE_FAIL), +#endif /* EXT_STA */ +#if defined(IBSS_PEER_DISCOVERY_EVENT) + BCMEVENT_NAME(WLC_E_IBSS_ASSOC), +#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ + BCMEVENT_NAME(WLC_E_RADIO), + BCMEVENT_NAME(WLC_E_PSM_WATCHDOG), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG), + BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND), + BCMEVENT_NAME(WLC_E_PSK_SUP), + BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED), + BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME), + BCMEVENT_NAME(WLC_E_ICV_ERROR), + BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_TRACE), + BCMEVENT_NAME(WLC_E_IF), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), +#endif + BCMEVENT_NAME(WLC_E_RSSI), + BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE), + BCMEVENT_NAME(WLC_E_ACTION_FRAME), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE), +#if defined(NDIS) + BCMEVENT_NAME(WLC_E_PRE_ASSOC_IND), + BCMEVENT_NAME(WLC_E_PRE_REASSOC_IND), + BCMEVENT_NAME(WLC_E_CHANNEL_ADOPTED), + BCMEVENT_NAME(WLC_E_AP_STARTED), + BCMEVENT_NAME(WLC_E_DFS_AP_STOP), + BCMEVENT_NAME(WLC_E_DFS_AP_RESUME), + BCMEVENT_NAME(WLC_E_ASSOC_IND_NDIS), + BCMEVENT_NAME(WLC_E_REASSOC_IND_NDIS), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS), + BCMEVENT_NAME(WLC_E_AUTH_REQ), + BCMEVENT_NAME(WLC_E_IBSS_COALESCE), +#endif /* #if defined(NDIS) */ + +#ifdef BCMWAPI_WAI + BCMEVENT_NAME(WLC_E_WAI_STA_EVENT), + BCMEVENT_NAME(WLC_E_WAI_MSG), +#endif /* BCMWAPI_WAI */ + + BCMEVENT_NAME(WLC_E_ESCAN_RESULT), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_PROBRESP_MSG), + BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG), +#endif +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP), +#endif + BCMEVENT_NAME(WLC_E_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_DCS_REQUEST), + BCMEVENT_NAME(WLC_E_RM_COMPLETE), + BCMEVENT_NAME(WLC_E_OVERLAY_REQ), + BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND), + BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), +#ifdef SOFTAP + BCMEVENT_NAME(WLC_E_GTK_PLUMBED), +#endif + BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE), + BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE), + BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX), +#ifdef WLTDLS + BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT), +#endif /* WLTDLS */ + BCMEVENT_NAME(WLC_E_NATIVE), +#ifdef WLPKTDLYSTAT + BCMEVENT_NAME(WLC_E_PKTDELAY_IND), +#endif /* WLPKTDLYSTAT */ + BCMEVENT_NAME(WLC_E_SERVICE_FOUND), + BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX), + BCMEVENT_NAME(WLC_E_GAS_COMPLETE), + BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE), + BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE), +#ifdef WLWNM + BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP), +#endif /* WLWNM */ +#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT) + BCMEVENT_NAME(WLC_E_PROXD), +#endif + BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL), + BCMEVENT_NAME(WLC_E_BSSID), +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT), +#endif + BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND), + BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), +#ifdef WLAIBSS + BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL), +#endif /* WLAIBSS */ +#ifdef GSCAN_SUPPORT + BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT), + BCMEVENT_NAME(WLC_E_PFN_SSID_EXT), +#endif /* GSCAN_SUPPORT */ +#ifdef WLBSSLOAD_REPORT + BCMEVENT_NAME(WLC_E_BSS_LOAD), +#endif +#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW) + BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ), +#endif +#ifdef WLFBT + BCMEVENT_NAME(WLC_E_FBT), +#endif /* WLFBT */ + BCMEVENT_NAME(WLC_E_AUTHORIZED), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX), + +#ifdef WLAWDL + BCMEVENT_NAME(WLC_E_AWDL_AW), + BCMEVENT_NAME(WLC_E_AWDL_ROLE), + BCMEVENT_NAME(WLC_E_AWDL_EVENT), +#endif /* WLAWDL */ + + BCMEVENT_NAME(WLC_E_CSA_START_IND), + BCMEVENT_NAME(WLC_E_CSA_DONE_IND), + BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND), + BCMEVENT_NAME(WLC_E_RMC_EVENT), + BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND), + BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW), + BCMEVENT_NAME(WLC_E_MSCH), + BCMEVENT_NAME(WLC_E_ULP), + BCMEVENT_NAME(WLC_E_NAN), + BCMEVENT_NAME(WLC_E_PKT_FILTER), + BCMEVENT_NAME(WLC_E_DMA_TXFLUSH_COMPLETE), + BCMEVENT_NAME(WLC_E_PSK_AUTH), + BCMEVENT_NAME(WLC_E_SDB_TRANSITION), + BCMEVENT_NAME(WLC_E_PFN_SCAN_BACKOFF), + BCMEVENT_NAME(WLC_E_PFN_BSSID_SCAN_BACKOFF), + BCMEVENT_NAME(WLC_E_AGGR_EVENT), + BCMEVENT_NAME(WLC_E_TVPM_MITIGATION), + BCMEVENT_NAME(WLC_E_SCAN), + BCMEVENT_NAME(WLC_E_SLOTTED_BSS_PEER_OP), + BCMEVENT_NAME(WLC_E_PHY_CAL), +#ifdef WL_NAN + BCMEVENT_NAME(WLC_E_NAN_CRITICAL), + BCMEVENT_NAME(WLC_E_NAN_NON_CRITICAL), + BCMEVENT_NAME(WLC_E_NAN), +#endif /* WL_NAN */ + BCMEVENT_NAME(WLC_E_RPSNOA), + BCMEVENT_NAME(WLC_E_WA_LQM), + BCMEVENT_NAME(WLC_E_OBSS_DETECTION), + BCMEVENT_NAME(WLC_E_SC_CHAN_QUAL), + BCMEVENT_NAME(WLC_E_DYNSAR), + BCMEVENT_NAME(WLC_E_ROAM_CACHE_UPDATE), + BCMEVENT_NAME(WLC_E_AP_BCN_DRIFT), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE_EXT), +#ifdef WL_CLIENT_SAE + BCMEVENT_NAME(WLC_E_AUTH_START), +#endif /* WL_CLIENT_SAE */ +#ifdef WL_TWT + BCMEVENT_NAME(WLC_E_TWT_SETUP), + BCMEVENT_NAME(WLC_E_TWT_TEARDOWN), + BCMEVENT_NAME(WLC_E_TWT_INFO_FRM) +#endif /* WL_TWT */ +}; + +const char *bcmevent_get_name(uint event_type) +{ + /* note: first coded this as a static const but some + * ROMs already have something called event_name so + * changed it so we don't have a variable for the + * 'unknown string + */ + const char *event_name = NULL; + + uint idx; + for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) { + + if (bcmevent_names[idx].event == event_type) { + event_name = bcmevent_names[idx].name; + break; + } + } + + /* if we find an event name in the array, return it. + * otherwise return unknown string. + */ + return ((event_name) ? event_name : "Unknown Event"); +} + +void +wl_event_to_host_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void +wl_event_to_network_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = hton32(evt->event_type); + evt->flags = hton16(evt->flags); + evt->status = hton32(evt->status); + evt->reason = hton32(evt->reason); + evt->auth_type = hton32(evt->auth_type); + evt->datalen = hton32(evt->datalen); + evt->version = hton16(evt->version); +} + +/* + * Validate if the event is proper and if valid copy event header to event. + * If proper event pointer is passed, to just validate, pass NULL to event. + * + * Return values are + * BCME_OK - It is a BRCM event or BRCM dongle event + * BCME_NOTFOUND - Not BRCM, not an event, may be okay + * BCME_BADLEN - Bad length, should not process, just drop + */ +int +is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event) +{ + uint16 evlen = 0; /* length in bcmeth_hdr */ + uint16 subtype; + uint16 usr_subtype; + bcm_event_t *bcm_event; + uint8 *pktend; + uint8 *evend; + int err = BCME_OK; + uint32 data_len = 0; /* data length in bcm_event */ + + pktend = (uint8 *)pktdata + pktlen; + bcm_event = (bcm_event_t *)pktdata; + + /* only care about 16-bit subtype / length versions */ + if ((uint8 *)&bcm_event->bcm_hdr < pktend) { + uint8 short_subtype = *(uint8 *)&bcm_event->bcm_hdr; + if (!(short_subtype & 0x80)) { + err = BCME_NOTFOUND; + goto done; + } + } + + /* must have both ether_header and bcmeth_hdr */ + if (pktlen < OFFSETOF(bcm_event_t, event)) { + err = BCME_BADLEN; + goto done; + } + + /* check length in bcmeth_hdr */ + +#ifdef BCMDONGLEHOST + /* temporary - header length not always set properly. When the below + * !BCMDONGLEHOST is in all branches that use trunk DHD, the code + * under BCMDONGLEHOST can be removed. + */ + evlen = (uint16)(pktend - (uint8 *)&bcm_event->bcm_hdr.version); +#else + evlen = ntoh16_ua((void *)&bcm_event->bcm_hdr.length); +#endif /* BCMDONGLEHOST */ + evend = (uint8 *)&bcm_event->bcm_hdr.version + evlen; + if (evend != pktend) { + err = BCME_BADLEN; + goto done; + } + + /* match on subtype, oui and usr subtype for BRCM events */ + subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.subtype); + if (subtype != BCMILCP_SUBTYPE_VENDOR_LONG) { + err = BCME_NOTFOUND; + goto done; + } + + if (bcmp(BRCM_OUI, &bcm_event->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + err = BCME_NOTFOUND; + goto done; + } + + /* if it is a bcm_event or bcm_dngl_event_t, validate it */ + usr_subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.usr_subtype); + switch (usr_subtype) { + case BCMILCP_BCM_SUBTYPE_EVENT: + /* check that header length and pkt length are sufficient */ + if ((pktlen < sizeof(bcm_event_t)) || + (evend < ((uint8 *)bcm_event + sizeof(bcm_event_t)))) { + err = BCME_BADLEN; + goto done; + } + + /* ensure data length in event is not beyond the packet. */ + data_len = ntoh32_ua((void *)&bcm_event->event.datalen); + if ((sizeof(bcm_event_t) + data_len + + BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) { + err = BCME_BADLEN; + goto done; + } + + if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) { + err = BCME_NOTFOUND; + goto done; + } + + if (out_event) { + /* ensure BRCM event pkt aligned */ + memcpy(&out_event->event, &bcm_event->event, sizeof(wl_event_msg_t)); + } + + break; + + case BCMILCP_BCM_SUBTYPE_DNGLEVENT: +#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT) + if ((pktlen < sizeof(bcm_dngl_event_t)) || + (evend < ((uint8 *)bcm_event + sizeof(bcm_dngl_event_t)))) { + err = BCME_BADLEN; + goto done; + } + + /* ensure data length in event is not beyond the packet. */ + data_len = ntoh16_ua((void *)&((bcm_dngl_event_t *)pktdata)->dngl_event.datalen); + if ((sizeof(bcm_dngl_event_t) + data_len + + BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) { + err = BCME_BADLEN; + goto done; + } + + if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) { + err = BCME_NOTFOUND; + goto done; + } + + if (out_event) { + /* ensure BRCM dngl event pkt aligned */ + memcpy(&out_event->dngl_event, &((bcm_dngl_event_t *)pktdata)->dngl_event, + sizeof(bcm_dngl_event_msg_t)); + } + + break; +#else + err = BCME_UNSUPPORTED; + break; +#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */ + + default: + err = BCME_NOTFOUND; + goto done; + } + + BCM_REFERENCE(data_len); +done: + return err; +} diff --git a/bcmdhd.101.10.361.x/bcminternal-android.mk b/bcmdhd.101.10.361.x/bcminternal-android.mk new file mode 100755 index 0000000..4fecfad --- /dev/null +++ b/bcmdhd.101.10.361.x/bcminternal-android.mk @@ -0,0 +1,88 @@ +# +# Broadcom Proprietary and Confidential. Copyright (C) 2020, +# All Rights Reserved. +# +# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; +# the contents of this file may not be disclosed to third parties, +# copied or duplicated in any form, in whole or in part, without +# the prior written permission of Broadcom. +# +# +# <> + +# This file should be seen only by internal builds because it will +# be mentioned only in internal filelists like brcm.flist. +# See extended comment bcminternal.mk for details. + +BCMINTERNAL := 1 + +BCMINTERNAL_DFLAGS += -DDHD_NO_MOG + +ifneq ($(CONFIG_BCMDHD_PCIE),) + # Enable Register access via dhd IOVAR + BCMINTERNAL_DFLAGS += -DDHD_PCIE_REG_ACCESS + # latency timestamping + BCMINTERNAL_DFLAGS += -DDHD_PKTTS + # Traffic Pattern Analysis on Socket Flow + BCMINTERNAL_DFLAGS += -DDHD_QOS_ON_SOCK_FLOW + # QoS unit testing support + BCMINTERNAL_DFLAGS += -DDHD_QOS_ON_SOCK_FLOW_UT + # Auto QOS + BCMINTERNAL_DFLAGS += -DWL_AUTO_QOS + + ifneq ($(filter -DCUSTOMER_HW4, $(DHDCFLAGS)),) + # These will be moved to hw4 Makefile for 4389b0 + BCMINTERNAL_DFLAGS += -DWBRC + BCMINTERNAL_DFLAGS += -DWLAN_ACCEL_BOOT + BCMINTERNAL_DFLAGS += -DDHD_HTPUT_TUNABLES + # BCMINTERNAL_DFLAGS += -DDHD_FIS_DUMP + # SCAN TYPES, if kernel < 4.17 ..back port support required + ifneq ($(CONFIG_CFG80211_SCANTYPE_BKPORT),) + DHDCFLAGS += -DWL_SCAN_TYPE + endif + # Jig builds + # No reset during dhd attach + BCMINTERNAL_DFLAGS += -DDHD_SKIP_DONGLE_RESET_IN_ATTACH + # Dongle Isolation will ensure no resets devreset ON/OFF + BCMINTERNAL_DFLAGS += -DDONGLE_ENABLE_ISOLATION + # Quiesce dongle using DB7 trap + BCMINTERNAL_DFLAGS += -DDHD_DONGLE_TRAP_IN_DETACH + # Collect socram during dongle init failurs for internal builds + BCMINTERNAL_DFLAGS += -DDEBUG_DNGL_INIT_FAIL + # Dongle reset during Wifi ON to keep in sane state + BCMINTERNAL_DFLAGS += -DFORCE_DONGLE_RESET_IN_DEVRESET_ON + # Perform Backplane Reset else FLR will happen + # BCMINTERNAL_DFLAGS += -DDHD_USE_BP_RESET_SS_CTRL + BCMINTERNAL_DFLAGS += -DWIFI_TURNOFF_DELAY=10 + + endif + + # NCI_BUS support + BCMINTERNAL_DFLAGS += -DSOCI_NCI_BUS +endif + + +BCMINTERNAL_DFLAGS += -DDHD_BUS_MEM_ACCESS + +# Support multiple chips +BCMINTERNAL_DFLAGS += -DSUPPORT_MULTIPLE_CHIPS + +# Support unreleased chips +BCMINTERNAL_DFLAGS += -DUNRELEASEDCHIP + +# Collect socram if readshared fails +BCMINTERNAL_DFLAGS += -DDEBUG_DNGL_INIT_FAIL + +# Force enable memdump value to DUMP_MEMFILE if it is disabled +BCMINTERNAL_DFLAGS += -DDHD_INIT_DEFAULT_MEMDUMP + +ifneq ($(filter -DDHD_QOS_ON_SOCK_FLOW,$(BCMINTERNAL_DFLAGS)),) +BCMINTERNAL_DHDOFILES += dhd_linux_sock_qos.o +endif +ifneq ($(filter -DSOCI_NCI_BUS,$(BCMINTERNAL_DFLAGS)),) +BCMINTERNAL_DHDOFILES += nciutils.o +endif +ifneq ($(filter -DWBRC,$(BCMINTERNAL_DFLAGS)),) +BCMINTERNAL_DHDOFILES += wb_regon_coordinator.o +endif +# vim: filetype=make shiftwidth=2 diff --git a/bcmdhd.101.10.361.x/bcminternal.mk b/bcmdhd.101.10.361.x/bcminternal.mk new file mode 100755 index 0000000..eb94021 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcminternal.mk @@ -0,0 +1,60 @@ +# +# Broadcom Proprietary and Confidential. Copyright (C) 2020, +# All Rights Reserved. +# +# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; +# the contents of this file may not be disclosed to third parties, +# copied or duplicated in any form, in whole or in part, without +# the prior written permission of Broadcom. +# +# +# <> + +# This file should be seen only by internal builds because it will +# be mentioned only in internal filelists like brcm.flist. The idea +# is that it will be conditionally included by makefiles using the +# "-include" syntax, with the result that internal builds will see +# this file and set BCMINTERNAL which will eventually result in a +# -DBCMINTERNAL option passed to the compiler along with possible +# other effects. External builds will never see it and it will be +# silently ignored. +# +# Any settings which should not be exposed to customers may be +# placed here. For instance, if we were working on a super-secret +# new feature in supersecret.c we could set a variable here like +# BCMINTERNAL_OBJECTS := supersecret.o +# and later say +# OBJECTS += $(BCMINTERNAL_OBJECTS) +# within the main makefile. +# +# The key point is that this file is never shipped to customers +# because it's present only in internal filelists so anything +# here is private. + +BCMINTERNAL := 1 + +BCMINTERNAL_DFLAGS += -DBCMINTERNAL +BCMINTERNAL_DFLAGS += -DDHD_NO_MOG + +# Support unreleased chips +BCMINTERNAL_DFLAGS += -DUNRELEASEDCHIP + +ifneq ($(findstring -fwtrace,-$(TARGET)-),) + BCMINTERNAL_DFLAGS += -DDHD_FWTRACE + BCMINTERNAL_CFILES += dhd_fwtrace.c +endif + +# support only for SDIO MFG Fedora builds +ifneq ($(findstring -sdstd-,-$(TARGET)-),) + ifneq ($(findstring -mfgtest-,-$(TARGET)-),) + BCMINTERNAL_DFLAGS += -DDHD_SPROM + BCMINTERNAL_CFILES += bcmsrom.c bcmotp.c + endif +endif + +ifneq ($(findstring -pciefd-,$(TARGET)-),) +# NCI_BUS support +BCMINTERNAL_DFLAGS += -DSOCI_NCI_BUS -DBOOKER_NIC400_INF +BCMINTERNAL_CFILES += nciutils.c +endif +# vim: filetype=make shiftwidth=2 diff --git a/bcmdhd.101.10.361.x/bcmsdh.c b/bcmdhd.101.10.361.x/bcmsdh.c new file mode 100755 index 0000000..538f056 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdh.c @@ -0,0 +1,953 @@ +/* + * BCMSDH interface glue + * implement bcmsdh API for SDIOH driver + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** + * @file bcmsdh.c + */ + +/* ****************** BCMSDH Interface Functions *************************** */ + +#include +#include +#include +#include +#include +#include +#if !defined(BCMDONGLEHOST) +#include +#endif /* !defined(BCMDONGLEHOST) */ +#include + +#include /* BRCM API for SDIO clients (such as wl, dhd) */ +#include /* common SDIO/controller interface */ +#include /* SDIO device core hardware definitions. */ +#include /* SDIO Device and Protocol Specs */ + +#if defined (BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 +const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; + +/* local copy of bcm sd handler */ +bcmsdh_info_t * l_bcmsdh = NULL; + +#if defined (BT_OVER_SDIO) +struct sdio_func *func_f3 = NULL; +static f3intr_handler processf3intr = NULL; +static dhd_hang_notification process_dhd_hang_notification = NULL; +static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE; +#endif /* defined (BT_OVER_SDIO) */ + +#if defined(NDIS) && (NDISVER < 0x0630) +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN) +extern int +sdioh_enable_hw_oob_intr(void *sdioh, bool enable); + +void +bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) +{ + sdioh_enable_hw_oob_intr(sdh->sdioh, enable); +} +#endif + +#if defined (BT_OVER_SDIO) +void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state) +{ + bool state_change = false; + + BCMSDH_ERROR(("%s: DHD hang state changed - [%d] -> [%d]\n", + __FUNCTION__, g_dhd_hang_state, new_state)); + + if (g_dhd_hang_state == new_state) + return; + + switch (g_dhd_hang_state) { + case NO_HANG_STATE: + if (HANG_START_STATE == new_state) + state_change = true; + break; + + case HANG_START_STATE: + if (HANG_RECOVERY_STATE == new_state || + NO_HANG_STATE == new_state) + state_change = true; + + break; + + case HANG_RECOVERY_STATE: + if (NO_HANG_STATE == new_state) + state_change = true; + break; + + default: + BCMSDH_ERROR(("%s: Unhandled Hang state\n", __FUNCTION__)); + break; + } + + if (!state_change) { + BCMSDH_ERROR(("%s: Hang state cannot be changed\n", __FUNCTION__)); + return; + } + + g_dhd_hang_state = new_state; +} + +void bcmsdh_btsdio_process_f3_intr(void) +{ + if (processf3intr && (g_dhd_hang_state == NO_HANG_STATE)) + processf3intr(func_f3); +} + +void bcmsdh_btsdio_process_dhd_hang_notification(bool wifi_recovery_completed) +{ + bcmsdh_btsdio_process_hang_state(HANG_START_STATE); + + if (process_dhd_hang_notification) + process_dhd_hang_notification(func_f3, wifi_recovery_completed); + + /* WiFi was off, so HANG_RECOVERY_STATE is not needed */ + if (wifi_recovery_completed) + bcmsdh_btsdio_process_hang_state(NO_HANG_STATE); + else { + bcmsdh_btsdio_process_hang_state(HANG_RECOVERY_STATE); + } +} + +void bcmsdh_btsdio_interface_init(struct sdio_func *func, + f3intr_handler f3intr_fun, dhd_hang_notification hang_notification) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)l_bcmsdh; + BCMSDH_INFO(("%s: func %p \n", __FUNCTION__, func)); + func_f3 = func; + processf3intr = f3intr_fun; + sdioh_sdmmc_card_enable_func_f3(bcmsdh->sdioh, func); + process_dhd_hang_notification = hang_notification; + +} EXPORT_SYMBOL(bcmsdh_btsdio_interface_init); +#endif /* defined (BT_OVER_SDIO) */ + +/* Attach BCMSDH layer to SDIO Host Controller Driver + * + * @param osh OSL Handle. + * @param cfghdl Configuration Handle. + * @param regsva Virtual address of controller registers. + * @param irq Interrupt number of SDIO controller. + * + * @return bcmsdh_info_t Handle to BCMSDH context. + */ +bcmsdh_info_t * +bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva) +{ + bcmsdh_info_t *bcmsdh; + + if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { + BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); + bcmsdh->sdioh = sdioh; + bcmsdh->osh = osh; + bcmsdh->init_success = TRUE; + *regsva = si_enum_base(0); + + bcmsdh_force_sbwad_calc(bcmsdh, FALSE); + + /* Report the BAR, to fix if needed */ + bcmsdh->sbwad = si_enum_base(0); + + /* save the handler locally */ + l_bcmsdh = bcmsdh; + + return bcmsdh; +} + +int +bcmsdh_detach(osl_t *osh, void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bcmsdh != NULL) { +#if defined(NDIS) && (NDISVER < 0x0630) + if (bcmsdh->sdioh) + sdioh_detach(osh, bcmsdh->sdioh); +#endif + MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); + } + + l_bcmsdh = NULL; + + return 0; +} + +int +bcmsdh_iovar_op(void *sdh, const char *name, + void *params, uint plen, void *arg, uint len, bool set) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); +} + +bool +bcmsdh_intr_query(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + bool on; + + ASSERT(bcmsdh); + status = sdioh_interrupt_query(bcmsdh->sdioh, &on); + if (SDIOH_API_SUCCESS(status)) + return FALSE; + else + return on; +} + +int +bcmsdh_intr_enable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef BCMSPI_ANDROID + uint32 data; +#endif /* BCMSPI_ANDROID */ + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); +#ifdef BCMSPI_ANDROID + data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL); + data |= 0xE0E70000; + bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL); +#endif /* BCMSPI_ANDROID */ + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_disable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef BCMSPI_ANDROID + uint32 data; +#endif /* BCMSPI_ANDROID */ + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); +#ifdef BCMSPI_ANDROID + data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL); + data &= ~0xE0E70000; + bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL); +#endif /* BCMSPI_ANDROID */ + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh); + + status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_dereg(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh); + + status = sdioh_interrupt_deregister(bcmsdh->sdioh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +#if defined(DHD_DEBUG) || defined(BCMDBG) +bool +bcmsdh_intr_pending(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + ASSERT(sdh); + return sdioh_interrupt_pending(bcmsdh->sdioh); +} +#endif + +int +bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + ASSERT(sdh); + + /* don't support yet */ + return BCME_UNSUPPORTED; +} + +/** + * Read from SDIO Configuration Space + * @param sdh SDIO Host context. + * @param func_num Function number to read from. + * @param addr Address to read from. + * @param err Error return. + * @return value read from SDIO configuration space. + */ +uint8 +bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + uint8 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_cfg_read); +#endif + +void +bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); +} +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_cfg_write); +#endif + +uint32 +bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, + addr, data)); +} + +int +bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + uint8 *tmp_buf, *tmp_ptr; + uint8 *ptr; + bool ascii = func & ~0xf; + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cis); + ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); + + status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); + + if (ascii) { + /* Move binary bits to tmp and format them into the provided buffer. */ + if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { + BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); + return BCME_NOMEM; + } + bcopy(cis, tmp_buf, length); + for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { + ptr += snprintf((char*)ptr, (cis + length - ptr - 4), + "%.2x ", *tmp_ptr & 0xff); + if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) + ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n"); + } + MFREE(bcmsdh->osh, tmp_buf, length); + } + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint32 offset) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cisd); + + status = sdioh_cisaddr_read(bcmsdh->sdioh, func, cisd, offset); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + + +int +bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) +{ + int err = 0; + uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK; + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bar0 != bcmsdh->sbwad || force_set) { + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + + if (!err) + bcmsdh->sbwad = bar0; + else + /* invalidate cached window var */ + bcmsdh->sbwad = 0; + +#ifdef BCMDBG + if (err) + BCMSDH_ERROR(("%s: error setting address window %08x\n", + __FUNCTION__, address)); +#endif /* BCMDBG */ + } + + return err; +} + +uint32 +bcmsdh_reg_read(void *sdh, uintptr addr, uint size) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 word = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, (unsigned int)addr)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc)) { + bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock + return 0xFFFFFFFF; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, + SDIOH_READ, SDIO_FUNC_1, addr, &word, size); + + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + BCMSDH_INFO(("uint32data = 0x%x\n", word)); + + /* if ok, return appropriately masked word */ + /* XXX Masking was put in for NDIS port, remove if not needed */ + if (SDIOH_API_SUCCESS(status)) { + switch (size) { + case sizeof(uint8): + return (word & 0xff); + case sizeof(uint16): + return (word & 0xffff); + case sizeof(uint32): + return word; + default: + bcmsdh->regfail = TRUE; + + } + } + + /* otherwise, bad sdio access or invalid size */ + BCMSDH_ERROR(("%s: error reading addr 0x%x size %d\n", + __FUNCTION__, (unsigned int)addr, size)); + return 0xFFFFFFFF; +} + +uint32 +bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + int err = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + __FUNCTION__, (unsigned int)addr, size*8, data)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc))) { + bcmsdh->regfail = TRUE; // terence 20130621: + return err; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + if (SDIOH_API_SUCCESS(status)) + return 0; + + BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", + __FUNCTION__, data, (unsigned int)addr, size)); + return 0xFFFFFFFF; +} + +bool +bcmsdh_regfail(void *sdh) +{ + return ((bcmsdh_info_t *)sdh)->regfail; +} + +int +bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_READ, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); +} + +int +bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_abort(void *sdh, uint fn) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_abort(bcmsdh->sdioh, fn); +} + +int +bcmsdh_start(void *sdh, int stage) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_start(bcmsdh->sdioh, stage); +} + +int +bcmsdh_stop(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_stop(bcmsdh->sdioh); +} + +int +bcmsdh_waitlockfree(void *sdh) +{ +#ifdef LINUX + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_waitlockfree(bcmsdh->sdioh); +#else + return 0; +#endif +} + +int +bcmsdh_query_device(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; +#if defined(BCMDONGLEHOST) + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; +#else + uint8 *fn0cis[1]; + int err; + char *vars; + uint varsz; + osl_t *osh = bcmsdh->osh; + + bcmsdh->vendevid = ~(0); + + if (!(fn0cis[0] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + BCMSDH_ERROR(("%s: CIS malloc failed\n", __FUNCTION__)); + return (bcmsdh->vendevid); + } + + bzero(fn0cis[0], SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, 0, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT))) { + BCMSDH_ERROR(("%s: CIS read err %d, report unknown BRCM device\n", + __FUNCTION__, err)); + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; + MFREE(osh, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT); + return (bcmsdh->vendevid); + } + + if (!err) { + if ((err = srom_parsecis(NULL, osh, fn0cis, 1, &vars, &varsz))) { + BCMSDH_ERROR(("%s: Error parsing CIS = %d\n", __FUNCTION__, err)); + } else { + bcmsdh->vendevid = (getintvar(vars, "vendid") << 16) | + getintvar(vars, "devid"); + MFREE(osh, vars, varsz); + } + } + + MFREE(osh, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT); +#endif /* BCMDONGLEHOST */ + return (bcmsdh->vendevid); +} + +uint +bcmsdh_query_iofnum(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (sdioh_query_iofnum(bcmsdh->sdioh)); +} + +int +bcmsdh_reset(bcmsdh_info_t *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_sdio_reset(bcmsdh->sdioh); +} + +/* XXX For use by NDIS port, remove if not needed. */ +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) +{ + ASSERT(sdh); + return sdh->sdioh; +} + +/* Function to pass device-status bits to DHD. */ +uint32 +bcmsdh_get_dstatus(void *sdh) +{ +#ifdef BCMSPI + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + return sdioh_get_dstatus(sd); +#else + return 0; +#endif /* BCMSPI */ +} +uint32 +bcmsdh_cur_sbwad(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (bcmsdh->sbwad); +} + +/* example usage: if force is TRUE, forces the bcmsdhsdio_set_sbaddr_window to + * calculate sbwad always instead of caching. + */ +void +bcmsdh_force_sbwad_calc(void *sdh, bool force) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + bcmsdh->force_sbwad_calc = force; +} + +void +bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) +{ +#ifdef BCMSPI + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + sdioh_chipinfo(sd, chip, chiprev); +#else + return; +#endif /* BCMSPI */ +} + +#ifdef BCMSPI +void +bcmsdh_dwordmode(void *sdh, bool set) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + sdioh_dwordmode(sd, set); + return; +} +#endif /* BCMSPI */ + +int +bcmsdh_sleep(void *sdh, bool enab) +{ +#ifdef SDIOH_SLEEP_ENABLED + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_sleep(sd, enab); +#else + return BCME_UNSUPPORTED; +#endif +} + +int +bcmsdh_gpio_init(void *sdh) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpio_init(sd); +} + +bool +bcmsdh_gpioin(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioin(sd, gpio); +} + +int +bcmsdh_gpioouten(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioouten(sd, gpio); +} + +int +bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioout(sd, gpio, enab); +} + +uint +bcmsdh_set_mode(void *sdh, uint mode) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return (sdioh_set_mode(bcmsdh->sdioh, mode)); +} + +#ifdef PKT_STATICS +uint32 +bcmsdh_get_spend_time(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return (sdioh_get_spend_time(bcmsdh->sdioh)); +} +#endif diff --git a/bcmdhd.101.10.361.x/bcmsdh_linux.c b/bcmdhd.101.10.361.x/bcmsdh_linux.c new file mode 100755 index 0000000..d297118 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdh_linux.c @@ -0,0 +1,594 @@ +/* + * SDIO access interface for drivers - linux specific (pci only) + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** + * @file bcmsdh_linux.c + */ + +#define __UNDEF_NO_VERSION__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +extern void dhdsdio_isr(void * args); +#include +#include +#include +#if defined(CONFIG_ARCH_ODIN) +#include +#endif /* defined(CONFIG_ARCH_ODIN) */ +#include + +/* driver info, initialized when bcmsdh_register is called */ +static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL}; + +typedef enum { + DHD_INTR_INVALID = 0, + DHD_INTR_INBAND, + DHD_INTR_HWOOB, + DHD_INTR_SWOOB +} DHD_HOST_INTR_TYPE; + +/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g. + * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather + * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this + * structure. + */ +typedef struct bcmsdh_os_info { + DHD_HOST_INTR_TYPE intr_type; + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + bcmsdh_cb_fn_t oob_irq_handler; + void *oob_irq_handler_context; + void *context; /* context returned from upper layer */ + void *sdioh; /* handle to lower layer (sdioh) */ + void *dev; /* handle to the underlying device */ + bool dev_wake_enabled; +} bcmsdh_os_info_t; + +/* debugging macros */ +#ifdef BCMDBG_ERR +#define SDLX_ERR(x) printf x +#define SDLX_MSG(x) printf x +#else +#define SDLX_ERR(x) printf x +#define SDLX_MSG(x) printf x +#endif /* BCMDBG_ERR */ + +/** + * Checks to see if vendor and device IDs match a supported SDIO Host Controller. + */ +bool +bcmsdh_chipmatch(uint16 vendor, uint16 device) +{ + /* Add other vendors and devices as required */ +#ifdef BCMINTERNAL +#ifdef BCMSDIOH_BCM + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + if (device == BCM_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + if (device == BCM4710_DEVICE_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* For now still accept the old devid */ + if (device == 0x4380 && vendor == VENDOR_BROADCOM) { + return (TRUE); + } +#endif /* BCMSDIOH_BCM */ +#endif /* BCMINTERNAL */ + +#ifdef BCMSDIOH_STD + /* Check for Arasan host controller */ + if (vendor == VENDOR_SI_IMAGE) { + return (TRUE); + } + /* Check for BRCM 27XX Standard host controller */ + if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for BRCM Standard host controller */ + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for TI PCIxx21 Standard host controller */ + if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) { + return (TRUE); + } + if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) { + return (TRUE); + } + /* Ricoh R5C822 Standard SDIO Host */ + if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) { + return (TRUE); + } + /* JMicron Standard SDIO Host */ + if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) { + return (TRUE); + } + +#ifdef BCMINTERNAL + /* Check for Jinvani (C-Guys) host controller */ + if (device == JINVANI_SDIOH_ID && vendor == VENDOR_JINVANI) { + return (TRUE); + } +#endif /* BCMINTERNAL */ +#endif /* BCMSDIOH_STD */ +#ifdef BCMSDIOH_SPI + /* This is the PciSpiHost. */ + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found PCI SPI Host Controller\n"); + return (TRUE); + } + +#ifdef BCMINTERNAL + /* This is the SPI Host for QT. */ + if (device == BCM_SPIH_ID && vendor == VENDOR_BROADCOM) { + printf("Found SPI Host Controller\n"); + return (TRUE); + } +#endif /* BCMINTERNAL */ +#endif /* BCMSDIOH_SPI */ + +#ifdef BCMINTERNAL + /* + * XXX - This is a hack to get the GPL SdioLinux driver to load on Arasan/x86 + * This is accomplished by installing a PciSpiHost into the system alongside the + * Arasan controller. The PciSpiHost is just used to get BCMSDH loaded. + */ +#ifdef BCMSDH_FD + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found SdioLinux Host Controller\n"); + return (TRUE); + } +#endif /* BCMSDH_FD */ +#endif /* BCMINTERNAL */ + return (FALSE); +} + +void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num) +{ + ulong regs; + bcmsdh_info_t *bcmsdh; + uint32 vendevid; + bcmsdh_os_info_t *bcmsdh_osinfo = NULL; + + bcmsdh = bcmsdh_attach(osh, sdioh, ®s); + if (bcmsdh == NULL) { + SDLX_ERR(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } + bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t)); + if (bcmsdh_osinfo == NULL) { + SDLX_ERR(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__)); + goto err; + } + bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + bcmsdh->os_cxt = bcmsdh_osinfo; + bcmsdh_osinfo->sdioh = sdioh; + bcmsdh_osinfo->dev = dev; + osl_set_bus_handle(osh, bcmsdh); + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dev && device_init_wakeup(dev, true) == 0) + bcmsdh_osinfo->dev_wake_enabled = TRUE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + +#if defined(OOB_INTR_ONLY) + spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock); + /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */ + bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info, + &bcmsdh_osinfo->oob_irq_flags); + if (bcmsdh_osinfo->oob_irq_num < 0) { + SDLX_ERR(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + goto err; + } +#endif /* defined(BCMLXSDMMC) */ + + /* Read the vendor/device ID from the CIS */ + vendevid = bcmsdh_query_device(bcmsdh); + /* try to attach to the target device */ + bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num, + slot_num, 0, bus_type, (void *)regs, osh, bcmsdh); + if (bcmsdh_osinfo->context == NULL) { + SDLX_ERR(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + return bcmsdh; + + /* error handling */ +err: + if (bcmsdh != NULL) + bcmsdh_detach(osh, bcmsdh); + if (bcmsdh_osinfo != NULL) + MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + return NULL; +} + +int bcmsdh_remove(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (bcmsdh_osinfo->dev) + device_init_wakeup(bcmsdh_osinfo->dev, false); + bcmsdh_osinfo->dev_wake_enabled = FALSE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + + drvinfo.remove(bcmsdh_osinfo->context); + MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t)); + bcmsdh_detach(bcmsdh->osh, bcmsdh); + + return 0; +} + +#ifdef DHD_WAKE_STATUS +int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh) +{ + return bcmsdh->total_wake_count; +} + +int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag) +{ +#if defined(OOB_INTR_ONLY) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + unsigned long flags; +#endif + int ret = 0; + +#if defined(OOB_INTR_ONLY) + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + + ret = bcmsdh->pkt_wake; + bcmsdh->total_wake_count += flag; + bcmsdh->pkt_wake = flag; + + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); +#endif + return ret; +} +#endif /* DHD_WAKE_STATUS */ + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context)) + return -EBUSY; + return 0; +} + +int bcmsdh_resume(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.resume) + return drvinfo.resume(bcmsdh_osinfo->context); + return 0; +} + +extern int bcmsdh_register_client_driver(void); +extern void bcmsdh_unregister_client_driver(void); +extern int sdio_func_reg_notify(void* semaphore); +extern void sdio_func_unreg_notify(void); + +#if defined(BCMLXSDMMC) +int bcmsdh_reg_sdio_notify(void* semaphore) +{ + return sdio_func_reg_notify(semaphore); +} + +void bcmsdh_unreg_sdio_notify(void) +{ + sdio_func_unreg_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +int +bcmsdh_register(bcmsdh_driver_t *driver) +{ + int error = 0; + + drvinfo = *driver; + SDLX_MSG(("%s: register client driver\n", __FUNCTION__)); + error = bcmsdh_register_client_driver(); + if (error) + SDLX_ERR(("%s: failed %d\n", __FUNCTION__, error)); + + return error; +} + +void +bcmsdh_unregister(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (bcmsdh_pci_driver.node.next == NULL) + return; +#endif + + bcmsdh_unregister_client_driver(); +} + +void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_stay_awake(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_relax(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + return bcmsdh_osinfo->dev_wake_enabled; +} + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) +int bcmsdh_get_oob_intr_num(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + return bcmsdh_osinfo->oob_irq_num; +} + +void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable) +{ + unsigned long flags; + bcmsdh_os_info_t *bcmsdh_osinfo; + + if (!bcmsdh) + return; + + bcmsdh_osinfo = bcmsdh->os_cxt; + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + if (bcmsdh_osinfo->oob_irq_enabled != enable) { + if (enable) + enable_irq(bcmsdh_osinfo->oob_irq_num); + else + disable_irq_nosync(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); +} + +#ifdef ENABLE_WAKEUP_PKT_DUMP +extern volatile bool dhd_mmc_suspend; +extern volatile bool dhd_mmc_wake; +#endif /* ENABLE_WAKEUP_PKT_DUMP */ + +static irqreturn_t wlan_oob_irq(int irq, void *dev_id) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + +#ifndef BCMSPI_ANDROID + bcmsdh_oob_intr_set(bcmsdh, FALSE); +#endif /* !BCMSPI_ANDROID */ + bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context); + +#ifdef ENABLE_WAKEUP_PKT_DUMP + if (dhd_mmc_suspend) { + dhd_mmc_wake = TRUE; + } +#endif /* ENABLE_WAKEUP_PKT_DUMP */ + + return IRQ_HANDLED; +} + +int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (bcmsdh_osinfo->oob_irq_registered) { + SDLX_ERR(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } +#ifdef HW_OOB + SDLX_MSG(("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__, + (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags)); +#else + SDLX_MSG(("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__, + (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags)); +#endif + bcmsdh_osinfo->oob_irq_handler = oob_irq_handler; + bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context; + bcmsdh_osinfo->oob_irq_enabled = TRUE; + bcmsdh_osinfo->oob_irq_registered = TRUE; +#if defined(CONFIG_ARCH_ODIN) + err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); +#else + err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); +#endif /* defined(CONFIG_ARCH_ODIN) */ + if (err) { + SDLX_ERR(("%s: request_irq failed with %d\n", __FUNCTION__, err)); + bcmsdh_osinfo->oob_irq_enabled = FALSE; + bcmsdh_osinfo->oob_irq_registered = FALSE; + return err; + } + +#if defined(DISABLE_WOWLAN) + SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__)); + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; +#else +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + if (device_may_wakeup(bcmsdh_osinfo->dev)) { +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ + err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (err) + SDLX_ERR(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err)); + else + bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + } +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ +#endif + + return 0; +} + +void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + if (!bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (bcmsdh_osinfo->oob_irq_wake_enabled) { +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + if (device_may_wakeup(bcmsdh_osinfo->dev)) { +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ + err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + } +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ + } + if (bcmsdh_osinfo->oob_irq_enabled) { + disable_irq(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = FALSE; + } + free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh); + bcmsdh_osinfo->oob_irq_registered = FALSE; +} +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + +/* Module parameters specific to each host-controller driver */ +/* XXX Need to move these to where they really belong! */ + +extern uint sd_msglevel; /* Debug message level */ +module_param(sd_msglevel, uint, 0); + +extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ +module_param(sd_power, uint, 0); + +extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ +module_param(sd_clock, uint, 0); + +extern uint sd_divisor; /* Divisor (-1 means external clock) */ +module_param(sd_divisor, uint, 0); + +extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ +module_param(sd_sdmode, uint, 0); + +extern uint sd_hiok; /* Ok to use hi-speed mode */ +module_param(sd_hiok, uint, 0); + +extern uint sd_f2_blocksize; +module_param(sd_f2_blocksize, int, 0); + +extern uint sd_f1_blocksize; +module_param(sd_f1_blocksize, int, 0); + +#ifdef BCMSDIOH_STD +extern int sd_uhsimode; +module_param(sd_uhsimode, int, 0); +extern uint sd_tuning_period; +module_param(sd_tuning_period, uint, 0); +extern int sd_delay_value; +module_param(sd_delay_value, uint, 0); + +/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */ +extern char dhd_sdiod_uhsi_ds_override[2]; +module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0); + +#endif + +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_attach); +EXPORT_SYMBOL(bcmsdh_detach); +EXPORT_SYMBOL(bcmsdh_intr_query); +EXPORT_SYMBOL(bcmsdh_intr_enable); +EXPORT_SYMBOL(bcmsdh_intr_disable); +EXPORT_SYMBOL(bcmsdh_intr_reg); +EXPORT_SYMBOL(bcmsdh_intr_dereg); + +#if defined(DHD_DEBUG) || defined(BCMDBG) +EXPORT_SYMBOL(bcmsdh_intr_pending); +#endif + +#if defined (BT_OVER_SDIO) +EXPORT_SYMBOL(bcmsdh_btsdio_interface_init); +#endif /* defined (BT_OVER_SDIO) */ + +EXPORT_SYMBOL(bcmsdh_devremove_reg); +EXPORT_SYMBOL(bcmsdh_cfg_read); +EXPORT_SYMBOL(bcmsdh_cfg_write); +EXPORT_SYMBOL(bcmsdh_cis_read); +EXPORT_SYMBOL(bcmsdh_reg_read); +EXPORT_SYMBOL(bcmsdh_reg_write); +EXPORT_SYMBOL(bcmsdh_regfail); +EXPORT_SYMBOL(bcmsdh_send_buf); +EXPORT_SYMBOL(bcmsdh_recv_buf); + +EXPORT_SYMBOL(bcmsdh_rwdata); +EXPORT_SYMBOL(bcmsdh_abort); +EXPORT_SYMBOL(bcmsdh_query_device); +EXPORT_SYMBOL(bcmsdh_query_iofnum); +EXPORT_SYMBOL(bcmsdh_iovar_op); +EXPORT_SYMBOL(bcmsdh_register); +EXPORT_SYMBOL(bcmsdh_unregister); +EXPORT_SYMBOL(bcmsdh_chipmatch); +EXPORT_SYMBOL(bcmsdh_reset); +EXPORT_SYMBOL(bcmsdh_waitlockfree); + +EXPORT_SYMBOL(bcmsdh_get_dstatus); +EXPORT_SYMBOL(bcmsdh_cfg_read_word); +EXPORT_SYMBOL(bcmsdh_cfg_write_word); +EXPORT_SYMBOL(bcmsdh_cur_sbwad); +EXPORT_SYMBOL(bcmsdh_chipinfo); + +#endif /* BCMSDH_MODULE */ diff --git a/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c b/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c new file mode 100755 index 0000000..596c02f --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c @@ -0,0 +1,2004 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include + +#include +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* Standard SDIO Host Controller Specification */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_PM_SLEEP) +#include +extern volatile bool dhd_mmc_suspend; +#endif +#include "bcmsdh_sdmmc.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static inline void +mmc_host_clk_hold(struct mmc_host *host) +{ + BCM_REFERENCE(host); + return; +} + +static inline void +mmc_host_clk_release(struct mmc_host *host) +{ + BCM_REFERENCE(host); + return; +} + +static inline unsigned int +mmc_host_clk_rate(struct mmc_host *host) +{ + return host->ios.clock; +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) */ + +#ifndef BCMSDH_MODULE +extern int sdio_function_init(void); +extern void sdio_function_cleanup(void); +#endif /* BCMSDH_MODULE */ + +#if !defined(OOB_INTR_ONLY) +static void IRQHandler(struct sdio_func *func); +static void IRQHandlerF2(struct sdio_func *func); +#endif /* !defined(OOB_INTR_ONLY) */ +static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET) +extern int mmc_sw_reset(struct mmc_host *host); +#else +extern int sdio_reset_comm(struct mmc_card *card); +#endif +#endif +#ifdef GLOBAL_SDMMC_INSTANCE +extern PBCMSDH_SDMMC_INSTANCE gInstance; +#endif + +#define DEFAULT_SDIO_F2_BLKSIZE 512 +#ifndef CUSTOM_SDIO_F2_BLKSIZE +#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE +#endif + +#define DEFAULT_SDIO_F1_BLKSIZE 64 +#ifndef CUSTOM_SDIO_F1_BLKSIZE +#define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE +#endif + +#define MAX_IO_RW_EXTENDED_BLK 511 + +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE; + +#if defined (BT_OVER_SDIO) +uint sd_f3_blocksize = 64; +#endif /* defined (BT_OVER_SDIO) */ + +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ +uint sd_msglevel = SDH_ERROR_VAL; +uint sd_use_dma = TRUE; + +#ifndef CUSTOM_RXCHAIN +#define CUSTOM_RXCHAIN 0 +#endif + +DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); + +#define DMA_ALIGN_MASK 0x03 +#define MMC_SDIO_ABORT_RETRY_LIMIT 5 + +int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); +#ifdef NOTYET +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data); +#endif /* NOTYET */ + +#if defined (BT_OVER_SDIO) +extern +void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func) +{ + sd->func[3] = func; + sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3])); +} +#endif /* defined (BT_OVER_SDIO) */ + +void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz); +uint sdmmc_get_clock_rate(sdioh_info_t *sd); +void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div); + +static int +sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) +{ + int err_ret; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Enable Function 1 */ + sdio_claim_host(sd->func[1]); + err_ret = sdio_enable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret)); + } + + return FALSE; +} + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, struct sdio_func *func) +{ + sdioh_info_t *sd = NULL; + int err_ret; + + sd_trace(("%s\n", __FUNCTION__)); + + if (func == NULL) { + sd_err(("%s: sdio function device is NULL\n", __FUNCTION__)); + return NULL; + } + + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + sd->fake_func0.num = 0; + sd->fake_func0.card = func->card; + sd->func[0] = &sd->fake_func0; +#ifdef GLOBAL_SDMMC_INSTANCE + if (func->num == 2) + sd->func[1] = gInstance->func[1]; +#else + sd->func[1] = func->card->sdio_func[0]; +#endif + sd->func[2] = func->card->sdio_func[1]; +#ifdef GLOBAL_SDMMC_INSTANCE + sd->func[func->num] = func; +#endif + +#if defined (BT_OVER_SDIO) + sd->func[3] = NULL; +#endif /* defined (BT_OVER_SDIO) */ + + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + sd->use_rxchain = CUSTOM_RXCHAIN; + if (sd->func[1] == NULL || sd->func[2] == NULL) { + sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__)); + goto fail; + } + sdio_set_drvdata(sd->func[1], sd); + + sdio_claim_host(sd->func[1]); + sd->client_block_size[1] = sd_f1_blocksize; + err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret)); + goto fail; + } + + sdio_claim_host(sd->func[2]); + if ((func->device == BCM43362_CHIP_ID || func->device == BCM4330_CHIP_ID) && + sd_f2_blocksize > 128) + sd_f2_blocksize = 128; + sd->client_block_size[2] = sd_f2_blocksize; + printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize); + err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + sdio_release_host(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n", + sd_f2_blocksize, err_ret)); + goto fail; + } + + sd->sd_clk_rate = sdmmc_get_clock_rate(sd); + printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate); + sdioh_sdmmc_card_enablefuncs(sd); +#if !defined(OOB_INTR_ONLY) + mutex_init(&sd->claim_host_mutex); // terence 20140926: fix for claim host issue +#endif + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; + +fail: + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + + /* Disable Function 2 */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_disable_func(sd->func[2]); + sdio_release_host(sd->func[2]); + } + + /* Disable Function 1 */ + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_disable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + sd->func[1] = NULL; + sd->func[2] = NULL; + + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +extern SDIOH_API_RC +sdioh_enable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + /* Enable F1 and F2 interrupts, clear master enable */ + reg &= ~INTR_CTL_MASTER_EN; + reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); +#if defined (BT_OVER_SDIO) + reg |= (INTR_CTL_FUNC3_EN); +#endif /* defined (BT_OVER_SDIO) */ + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_disable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); +#if defined(BT_OVER_SDIO) + reg &= ~INTR_CTL_FUNC3_EN; +#endif + /* Disable master interrupt with the last function interrupt */ + if (!(reg & 0xFE)) + reg = 0; + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + if (fn == NULL) { + sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + /* register and unmask irq */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_claim_irq(sd->func[2], IRQHandlerF2); + sdio_release_host(sd->func[2]); + } + + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[1]); + } +#elif defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + +#if !defined(OOB_INTR_ONLY) + if (sd->func[1]) { + /* register and unmask irq */ + sdio_claim_host(sd->func[1]); + sdio_release_irq(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + sdio_release_irq(sd->func[2]); + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#elif defined(HW_OOB) + if (dhd_download_fw_on_driverload) + sdioh_disable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) || defined(BCMDBG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return (0); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_RXCHAIN +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 }, +#ifdef BCMINTERNAL + {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, +#endif /* BCMINTERNAL */ + {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 }, +#ifdef BCMDBG + {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 }, +#endif + {NULL, 0, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, uint len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + uint val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* XXX Copied from dhd, copied from wl; certainly overkill here? */ + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + BCM_REFERENCE(bool_val); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */ + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + si->client_block_size[func] = blksize; + +#ifdef USE_DYNAMIC_F2_BLKSIZE + if (si->func[func] == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + bcmerror = BCME_NORESOURCE; + break; + } + sdio_claim_host(si->func[func]); + bcmerror = sdio_set_block_size(si->func[func], blksize); + if (bcmerror) + sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n", + __FUNCTION__, func, blksize, bcmerror)); + sdio_release_host(si->func[func]); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + break; + } + + case IOV_GVAL(IOV_RXCHAIN): + int_val = (int32)si->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + /* set the clock to divisor, if value is non-zero & power of 2 */ + if (int_val && !(int_val & (int_val - 1))) { + sd_divisor = int_val; + sdmmc_set_clock_divisor(si, sd_divisor); + } else { + DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n", + __FUNCTION__)); + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)0; + bcopy(&int_val, arg, val_size); + break; +#ifdef BCMINTERNAL + case IOV_GVAL(IOV_HOSTREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ + else if (sd_ptr->offset & 2) + int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ + else + int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = 0; + + if ((uint)sd_ptr->func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if ((uint)sd_ptr->func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } +#endif /* BCMINTERNAL */ + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + /* XXX Remove protective lock after clients all clean... */ + return bcmerror; +} + +#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN) +/* + * XXX dhd -i eth0 sd_devreg 0 0xf2 0x3 + */ + +#ifdef CUSTOMER_HW_AMLOGIC +#include +extern int wifi_irq_trigger_level(void); +#endif +SDIOH_API_RC +sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) +{ + SDIOH_API_RC status; + uint8 data; + + if (enable) { + if (wifi_irq_trigger_level() == GPIO_IRQ_LOW) + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; + else + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI; + } + else + data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */ + + status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data); + return status; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +static int +sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cis = (uint8)(foo & 0xff); + cis++; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset) +{ + uint32 foo; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cisd = (uint8)(foo & 0xff); + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int err_ret = 0; +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif + struct osl_timespec now, before; + + if (sd_msglevel & SDH_COST_VAL) + osl_do_gettimeofday(&before); + + sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); + + DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + if(rw) { /* CMD52 Write */ + if (func == 0) { + /* Can only directly write to some F0 registers. Handle F2 enable + * as a special case. + */ + if (regaddr == SDIOD_CCCR_IOEN) { +#if defined (BT_OVER_SDIO) + do { + if (sd->func[3]) { + sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte)); + + if (*byte & SDIO_FUNC_ENABLE_3) { + sdio_claim_host(sd->func[3]); + + /* Set Function 3 Block Size */ + err_ret = sdio_set_block_size(sd->func[3], + sd_f3_blocksize); + if (err_ret) { + sd_err(("F3 blocksize set err%d\n", + err_ret)); + } + + /* Enable Function 3 */ + sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n", + sd->func[3])); + err_ret = sdio_enable_func(sd->func[3]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n", + err_ret)); + } + + sdio_release_host(sd->func[3]); + + break; + } else if (*byte & SDIO_FUNC_DISABLE_3) { + sdio_claim_host(sd->func[3]); + + /* Disable Function 3 */ + sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n", + sd->func[3])); + err_ret = sdio_disable_func(sd->func[3]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n", + err_ret)); + } + sdio_release_host(sd->func[3]); + sd->func[3] = NULL; + + break; + } + } +#endif /* defined (BT_OVER_SDIO) */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + if (*byte & SDIO_FUNC_ENABLE_2) { + /* Enable Function 2 */ + err_ret = sdio_enable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n", + err_ret)); + } + } else { + /* Disable Function 2 */ + err_ret = sdio_disable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n", + err_ret)); + } + } + sdio_release_host(sd->func[2]); + } +#if defined (BT_OVER_SDIO) + } while (0); +#endif /* defined (BT_OVER_SDIO) */ + } +#if defined(MMC_SDIO_ABORT) + /* to allow abort command through F1 */ + else if (regaddr == SDIOD_CCCR_IOABORT) { + /* XXX Because of SDIO3.0 host issue on Manta, + * sometimes the abort fails. + * Retrying again will fix this issue. + */ + while (sdio_abort_retry--) { + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + /* + * this sdio_f0_writeb() can be replaced with + * another api depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + if (!err_ret) + break; + } + } +#endif /* MMC_SDIO_ABORT */ +#if defined(SDIO_ISR_THREAD) + else if (regaddr == SDIOD_CCCR_INTR_EXTN) { + while (sdio_abort_retry--) { + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + /* + * this sdio_f0_writeb() can be replaced with + * another api depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + if (!err_ret) + break; + } + } +#endif + else if (regaddr < 0xF0) { + sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); + } else { + /* Claim host controller, perform F0 write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_f0_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { + /* Claim host controller, perform Fn write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_writeb(sd->func[func], *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { /* CMD52 Read */ + /* Claim host controller, perform Fn read, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + if (func == 0) { + *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(sd->func[func], regaddr, &err_ret); + } + sdio_release_host(sd->func[func]); + } + } + + if (err_ret) { + if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ) + || (err_ret == -EIO))) { + /* XXX: Read/Write to SBSDIO_FUNC1_SLEEPCSR could return -110(timeout) + * or -84(CRC) error in case the host tries to wake the device up. + * Skip error log message if err code is -110 or -84 when accessing + * to SBSDIO_FUNC1_SLEEPCSR to avoid QA misunderstand and DHD shoul + * print error log message if retry count over the MAX_KSO_ATTEMPTS. + */ + } else { + sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", + rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); + } + } + + if (sd_msglevel & SDH_COST_VAL) { + uint32 diff_us; + osl_do_gettimeofday(&now); + diff_us = osl_do_gettimediff(&now, &before); + sd_cost(("%s: rw=%d len=1 cost = %3dms %3dus\n", __FUNCTION__, + rw, diff_us/1000, diff_us%1000)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +uint +sdioh_set_mode(sdioh_info_t *sd, uint mode) +{ + if (mode == SDPCM_TXGLOM_CPY) + sd->txglom_mode = mode; + else if (mode == SDPCM_TXGLOM_MDESC) + sd->txglom_mode = mode; + + return (sd->txglom_mode); +} + +#ifdef PKT_STATICS +uint32 +sdioh_get_spend_time(sdioh_info_t *sd) +{ + return (sd->sdio_spent_time_us); +} +#endif + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int err_ret = SDIOH_API_RC_FAIL; + int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif + struct osl_timespec now, before; + + if (sd_msglevel & SDH_COST_VAL) + osl_do_gettimeofday(&before); + + if (func == 0) { + sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", + __FUNCTION__, cmd_type, rw, func, addr, nbytes)); + + DHD_PM_RESUME_WAIT(sdioh_request_word_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Claim host controller */ + sdio_claim_host(sd->func[func]); + + if(rw) { /* CMD52 Write */ + if (nbytes == 4) { + sdio_writel(sd->func[func], *word, addr, &err_ret); + } else if (nbytes == 2) { + sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret); + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } else { /* CMD52 Read */ + if (nbytes == 4) { + *word = sdio_readl(sd->func[func], addr, &err_ret); + } else if (nbytes == 2) { + *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF; + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } + + /* Release host controller */ + sdio_release_host(sd->func[func]); + + if (err_ret) { +#if defined(MMC_SDIO_ABORT) + /* Any error on CMD53 transaction should abort that function using function 0. */ + while (sdio_abort_retry--) { + if (sd->func[0]) { + sdio_claim_host(sd->func[0]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[0], + func, SDIOD_CCCR_IOABORT, &err_ret2); + sdio_release_host(sd->func[0]); + } + if (!err_ret2) + break; + } + if (err_ret) +#endif /* MMC_SDIO_ABORT */ + { + sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n", + rw ? "Write" : "Read", func, addr, *word, err_ret)); + } + } + + if (sd_msglevel & SDH_COST_VAL) { + uint32 diff_us; + osl_do_gettimeofday(&now); + diff_us = osl_do_gettimediff(&now, &before); + sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__, + rw, nbytes, diff_us/1000, diff_us%1000)); + } + + return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +#ifdef BCMSDIOH_TXGLOM +static SDIOH_API_RC +sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, void *pkt) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + void *pnext; + uint ttl_len, pkt_offset; + uint blk_num; + uint blk_size; + uint max_blk_count; + uint max_req_size; + struct mmc_request mmc_req; + struct mmc_command mmc_cmd; + struct mmc_data mmc_dat; + uint32 sg_count; + struct sdio_func *sdio_func = sd->func[func]; + struct mmc_host *host = sdio_func->card->host; + uint8 *localbuf = NULL; + uint local_plen = 0; + uint pkt_len = 0; + struct osl_timespec now, before; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(pkt); + DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + +#ifndef PKT_STATICS + if (sd_msglevel & SDH_COST_VAL) +#endif + osl_do_gettimeofday(&before); + + blk_size = sd->client_block_size[func]; + max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK); + max_req_size = min(max_blk_count * blk_size, host->max_req_size); + + pkt_offset = 0; + pnext = pkt; + + ttl_len = 0; + sg_count = 0; + if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) { + while (pnext != NULL) { + ttl_len = 0; + sg_count = 0; + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&mmc_cmd, 0, sizeof(struct mmc_command)); + memset(&mmc_dat, 0, sizeof(struct mmc_data)); + sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list)); + + /* Set up scatter-gather DMA descriptors. this loop is to find out the max + * data we can transfer with one command 53. blocks per command is limited by + * host max_req_size and 9-bit max block number. when the total length of this + * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED + * commands (each transfer is still block aligned) + */ + while (pnext != NULL && ttl_len < max_req_size) { + int pkt_len; + int sg_data_size; + uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext); + + ASSERT(pdata != NULL); + pkt_len = PKTLEN(sd->osh, pnext); + sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len)); + /* sg_count is unlikely larger than the array size, and this is + * NOT something we can handle here, but in case it happens, PLEASE put + * a restriction on max tx/glom count (based on host->max_segs). + */ + if (sg_count >= ARRAYSIZE(sd->sg_list)) { + sd_err(("%s: sg list entries(%u) exceed limit(%zu)," + " sd blk_size=%u\n", + __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size)); + return (SDIOH_API_RC_FAIL); + } + pdata += pkt_offset; + + sg_data_size = pkt_len - pkt_offset; + if (sg_data_size > max_req_size - ttl_len) + sg_data_size = max_req_size - ttl_len; + /* some platforms put a restriction on the data size of each scatter-gather + * DMA descriptor, use multiple sg buffers when xfer_size is bigger than + * max_seg_size + */ + if (sg_data_size > host->max_seg_size) { + sg_data_size = host->max_seg_size; + } + sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size); + + ttl_len += sg_data_size; + pkt_offset += sg_data_size; + if (pkt_offset == pkt_len) { + pnext = PKTNEXT(sd->osh, pnext); + pkt_offset = 0; + } + } + + if (ttl_len % blk_size != 0) { + sd_err(("%s, data length %d not aligned to block size %d\n", + __FUNCTION__, ttl_len, blk_size)); + return SDIOH_API_RC_FAIL; + } + blk_num = ttl_len / blk_size; + mmc_dat.sg = sd->sg_list; + mmc_dat.sg_len = sg_count; + mmc_dat.blksz = blk_size; + mmc_dat.blocks = blk_num; + mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ + mmc_cmd.arg = write ? 1<<31 : 0; + mmc_cmd.arg |= (func & 0x7) << 28; + mmc_cmd.arg |= 1<<27; + mmc_cmd.arg |= fifo ? 0 : 1<<26; + mmc_cmd.arg |= (addr & 0x1FFFF) << 9; + mmc_cmd.arg |= blk_num & 0x1FF; + mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + mmc_req.cmd = &mmc_cmd; + mmc_req.data = &mmc_dat; + if (!fifo) + addr += ttl_len; + + sdio_claim_host(sdio_func); + mmc_set_data_timeout(&mmc_dat, sdio_func->card); + mmc_wait_for_req(host, &mmc_req); + sdio_release_host(sdio_func); + + err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; + if (0 != err_ret) { + sd_err(("%s:CMD53 %s failed with code %d\n", + __FUNCTION__, write ? "write" : "read", err_ret)); + return SDIOH_API_RC_FAIL; + } + } + } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) { + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { + ttl_len += PKTLEN(sd->osh, pnext); + } + /* Claim host controller */ + sdio_claim_host(sd->func[func]); + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { + uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext); + pkt_len = PKTLEN(sd->osh, pnext); + + if (!localbuf) { + localbuf = (uint8 *)MALLOC(sd->osh, ttl_len); + if (localbuf == NULL) { + sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n", + __FUNCTION__, (write) ? "TX" : "RX")); + goto txglomfail; + } + } + + bcopy(buf, (localbuf + local_plen), pkt_len); + local_plen += pkt_len; + if (PKTNEXT(sd->osh, pnext)) + continue; + + buf = localbuf; + pkt_len = local_plen; +txglomfail: + /* Align Patch */ + if (!write || pkt_len < 32) + pkt_len = (pkt_len + 3) & 0xFFFFFFFC; + else if (pkt_len % blk_size) + pkt_len += blk_size - (pkt_len % blk_size); + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len); + else if (write) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len); + else if (fifo) + err_ret = sdio_readsb(sd->func[func], buf, addr, pkt_len); + else + err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, pkt_len); + + if (err_ret) + sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, sg_count, addr, pkt_len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, sg_count, addr, pkt_len)); + + if (!fifo) + addr += pkt_len; + sg_count ++; + } + sdio_release_host(sd->func[func]); + } else { + sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode)); + return SDIOH_API_RC_FAIL; + } + + if (localbuf) + MFREE(sd->osh, localbuf, ttl_len); + +#ifndef PKT_STATICS + if (sd_msglevel & SDH_COST_VAL) +#endif + { + uint32 diff_us; + osl_do_gettimeofday(&now); + diff_us = osl_do_gettimediff(&now, &before); + sd_cost(("%s: rw=%d, ttl_len=%4d cost = %3dms %3dus\n", __FUNCTION__, + write, ttl_len, diff_us/1000, diff_us%1000)); +#ifdef PKT_STATICS + if (write && (func == 2)) + sd->sdio_spent_time_us = diff_us; +#endif + } + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} +#endif /* BCMSDIOH_TXGLOM */ + +static SDIOH_API_RC +sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, uint8 *buf, uint len) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + struct osl_timespec now, before; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(buf); + + if (sd_msglevel & SDH_COST_VAL) + osl_do_gettimeofday(&before); + + /* NOTE: + * For all writes, each packet length is aligned to 32 (or 4) + * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length + * is aligned to block boundary. If you want to align each packet to + * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here + * + * For reads, the alignment is doen in sdioh_request_buffer. + * + */ + sdio_claim_host(sd->func[func]); + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (write) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (fifo) + err_ret = sdio_readsb(sd->func[func], buf, addr, len); + else + err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len); + + sdio_release_host(sd->func[func]); + + if (err_ret) + sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len)); + + sd_trace(("%s: Exit\n", __FUNCTION__)); + + if (sd_msglevel & SDH_COST_VAL) { + uint32 diff_us; + osl_do_gettimeofday(&now); + diff_us = osl_do_gettimediff(&now, &before); + sd_cost(("%s: rw=%d, len=%4d cost = %3dms %3dus\n", __FUNCTION__, + write, len, diff_us/1000, diff_us%1000)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +/* + * This function takes a buffer or packet, and fixes everything up so that in the + * end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. If it is a packet chain, + * then all the packets in the chain must be properly aligned. If the packet data is not + * aligned, then there may only be one packet, and in this case, it is copied to a new + * aligned packet. + * + */ +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, + uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt) +{ + SDIOH_API_RC status; + void *tmppkt; + int is_vmalloc = FALSE; + struct osl_timespec now, before; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + if (sd_msglevel & SDH_COST_VAL) + osl_do_gettimeofday(&before); + + if (pkt) { +#ifdef BCMSDIOH_TXGLOM + /* packet chain, only used for tx/rx glom, all packets length + * are aligned, total length is a block multiple + */ + if (PKTNEXT(sd->osh, pkt)) + return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt); +#endif /* BCMSDIOH_TXGLOM */ + /* non-glom mode, ignore the buffer parameter and use the packet pointer + * (this shouldn't happen) + */ + buffer = PKTDATA(sd->osh, pkt); + buf_len = PKTLEN(sd->osh, pkt); + } + + ASSERT(buffer); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) + is_vmalloc = is_vmalloc_addr(buffer); +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) */ + + /* buffer and length are aligned, use it directly so we can avoid memory copy */ + if ((((ulong)buffer & DMA_ALIGN_MASK) == 0) && ((buf_len & DMA_ALIGN_MASK) == 0) && + (!is_vmalloc)) { + return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len); + } + + if (is_vmalloc) { + sd_trace(("%s: Need to memory copy due to virtual memory address.\n", + __FUNCTION__)); + } + + sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n", + __FUNCTION__, write, buffer, buf_len)); + + /* otherwise, a memory copy is needed as the input buffer is not aligned */ + tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE); + if (tmppkt == NULL) { + sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len)); + return SDIOH_API_RC_FAIL; + } + + if (write) + bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len); + + status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, + PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1))); + + if (!write) + bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len); + + PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); + + if (sd_msglevel & SDH_COST_VAL) { + uint32 diff_us; + osl_do_gettimeofday(&now); + diff_us = osl_do_gettimediff(&now, &before); + sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__, + write, buf_len, diff_us/1000, diff_us%1000)); + } + + return status; +} + +/* this function performs "abort" for both of host & device */ +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ +#if defined(MMC_SDIO_ABORT) + char t_func = (char) func; +#endif /* defined(MMC_SDIO_ABORT) */ + sd_trace(("%s: Enter\n", __FUNCTION__)); + + /* XXX Standard Linux SDIO Stack cannot perform an abort. */ +#if defined(MMC_SDIO_ABORT) + /* issue abort cmd52 command through F1 */ + sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); +#endif /* defined(MMC_SDIO_ABORT) */ + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Reset and re-initialize the device */ +int sdioh_sdio_reset(sdioh_info_t *si) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Disable device interrupt */ +void +sdioh_sdmmc_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask &= ~CLIENT_INTR; +} + +/* Enable device interrupt */ +void +sdioh_sdmmc_devintr_on(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask |= CLIENT_INTR; +} + +/* Read client card reg */ +int +sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp = 0; + + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + *data = temp; + *data &= 0xff; + sd_data(("%s: byte read data=0x%02x\n", + __FUNCTION__, *data)); + } else { + if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) { + return BCME_SDIO_ERROR; + } + if (regsize == 2) + *data &= 0xffff; + + sd_data(("%s: word read data=0x%08x\n", + __FUNCTION__, *data)); + } + + return SUCCESS; +} + +#if !defined(OOB_INTR_ONLY) +void sdio_claim_host_lock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (sd) + mutex_lock(&sd->claim_host_mutex); +#endif +} + +void sdio_claim_host_unlock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (sd) + mutex_unlock(&sd->claim_host_mutex); +#endif +} + +/* bcmsdh_sdmmc interrupt handler */ +static void IRQHandler(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd = sdio_get_drvdata(func); + + ASSERT(sd != NULL); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (mutex_is_locked(&sd->claim_host_mutex)) { + printf("%s: muxtex is locked and return\n", __FUNCTION__); + return; + } +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ + + sdio_claim_host_lock_local(sd); + sdio_release_host(sd->func[0]); + + if (sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { /* XXX - Do not remove these sd_err messages. Need to figure + out how to keep interrupts disabled until DHD registers + a handler. + */ + sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); + + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + + sdio_claim_host(sd->func[0]); + sdio_claim_host_unlock_local(sd); +} + +/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ +static void IRQHandlerF2(struct sdio_func *func) +{ + sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); +} +#endif /* !defined(OOB_INTR_ONLY) */ + +#ifdef NOTUSED +/* Write client card reg */ +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp; + + temp = data & 0xff; + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + sd_data(("%s: byte write data=0x%02x\n", + __FUNCTION__, data)); + } else { + if (regsize == 2) + data &= 0xffff; + + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); + + sd_data(("%s: word write data=0x%08x\n", + __FUNCTION__, data)); + } + + return SUCCESS; +} +#endif /* NOTUSED */ + +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) +static int sdio_sw_reset(sdioh_info_t *sd) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET) + struct mmc_host *host = sd->func[0]->card->host; +#endif + int err = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET) + printf("%s: Enter\n", __FUNCTION__); + sdio_claim_host(sd->func[0]); + err = mmc_sw_reset(host); + sdio_release_host(sd->func[0]); +#else + err = sdio_reset_comm(sd->func[0]->card); +#endif + + if (err) + sd_err(("%s Failed, error = %d\n", __FUNCTION__, err)); + + return err; +} +#endif + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ +#if defined(OEM_ANDROID) + int ret; + + if (!sd) { + sd_err(("%s Failed, sd is NULL\n", __FUNCTION__)); + return (0); + } + + /* Need to do this stages as we can't enable the interrupt till + downloading of the firmware is complete, other wise polling + sdio access will come in way + */ + if (sd->func[0]) { + if (stage == 0) { + /* Since the power to the chip is killed, we will have + re enumerate the device again. Set the block size + and enable the fucntion 1 for in preparation for + downloading the code + */ + /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux + 2.6.27. The implementation prior to that is buggy, and needs broadcom's + patch for it + */ +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) + if ((ret = sdio_sw_reset(sd))) { + sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); + return ret; + } else +#endif + { + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + if (sd->func[1]) { + /* Claim host controller */ + sdio_claim_host(sd->func[1]); + + sd->client_block_size[1] = 64; + ret = sdio_set_block_size(sd->func[1], 64); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 " + "blocksize(%d)\n", ret)); + } + + /* Release host controller F1 */ + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize); + ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 " + "blocksize to %d(%d)\n", sd_f2_blocksize, ret)); + } + + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + } + } else { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[2]) + sdio_claim_irq(sd->func[2], IRQHandlerF2); + if (sd->func[1]) + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif + bcmsdh_oob_intr_set(sd->bcmsdh, TRUE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + } + else + sd_err(("%s Failed\n", __FUNCTION__)); +#endif /* defined(OEM_ANDROID) */ + + return (0); +} + +int +sdioh_stop(sdioh_info_t *sd) +{ +#if defined(OEM_ANDROID) + /* MSM7201A Android sdio stack has bug with interrupt + So internaly within SDIO stack they are polling + which cause issue when device is turned off. So + unregister interrupt with SDIO stack to stop the + polling + */ +#if !defined(OOB_INTR_ONLY) + sdio_claim_host_lock_local(sd); +#endif + if (sd->func[0]) { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[1]) + sdio_release_irq(sd->func[1]); + if (sd->func[2]) + sdio_release_irq(sd->func[2]); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_disable_func_intr(sd); +#endif + bcmsdh_oob_intr_set(sd->bcmsdh, FALSE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + else + sd_err(("%s Failed\n", __FUNCTION__)); +#endif /* defined(OEM_ANDROID) */ +#if !defined(OOB_INTR_ONLY) + sdio_claim_host_unlock_local(sd); +#endif + return (0); +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return (1); +} + +#ifdef BCMINTERNAL +extern SDIOH_API_RC +sdioh_test_diag(sdioh_info_t *sd) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return (0); +} +#endif /* BCMINTERNAL */ + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} + +uint +sdmmc_get_clock_rate(sdioh_info_t *sd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + struct sdio_func *sdio_func = sd->func[0]; + struct mmc_host *host = sdio_func->card->host; + return mmc_host_clk_rate(host); +#else + return 0; +#endif +} + +void +sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + struct sdio_func *sdio_func = sd->func[0]; + struct mmc_host *host = sdio_func->card->host; + struct mmc_ios *ios = &host->ios; + + mmc_host_clk_hold(host); + DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock)); + if (hz < host->f_min) { + DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__)); + hz = host->f_min; + } + + if (hz > host->f_max) { + DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__)); + hz = host->f_max; + } + ios->clock = hz; + host->ops->set_ios(host, ios); + DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock)); + mmc_host_clk_release(host); +#else + return; +#endif +} + +void +sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div) +{ + uint hz; + uint old_div = sdmmc_get_clock_rate(sd); + if (old_div == sd_div) { + return; + } + + hz = sd->sd_clk_rate / sd_div; + sdmmc_set_clock_rate(sd, hz); +} diff --git a/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c b/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c new file mode 100755 index 0000000..1dfb408 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c @@ -0,0 +1,388 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include /* request_irq() */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(SDIO_VENDOR_ID_BROADCOM) +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */ + +#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 + +extern void wl_cfg80211_set_parent_dev(void *dev); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num); +extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh); + +int sdio_function_init(void); +void sdio_function_cleanup(void); + +#define DESCRIPTION "bcmsdh_sdmmc Driver" +#define AUTHOR "Broadcom Corporation" + +/* module param defaults */ +static int clockoverride = 0; + +module_param(clockoverride, int, 0644); +MODULE_PARM_DESC(clockoverride, "SDIO card clock override"); + +#ifdef GLOBAL_SDMMC_INSTANCE +PBCMSDH_SDMMC_INSTANCE gInstance; +#endif + +/* Maximum number of bcmsdh_sdmmc devices supported by driver */ +#define BCMSDH_SDMMC_MAX_DEVICES 1 + +extern volatile bool dhd_mmc_suspend; + +static int sdioh_probe(struct sdio_func *func) +{ + int host_idx = func->card->host->index; + uint32 rca = func->card->rca; + wifi_adapter_info_t *adapter; + osl_t *osh = NULL; + sdioh_info_t *sdioh = NULL; + + sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca)); + adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca); + if (adapter != NULL) { + sd_err(("found adapter info '%s'\n", adapter->name)); + adapter->bus_type = SDIO_BUS; + adapter->bus_num = host_idx; + adapter->slot_num = rca; + adapter->sdio_func = func; + } else + sd_err(("can't find adapter info for this chip\n")); + +#ifdef WL_CFG80211 + wl_cfg80211_set_parent_dev(&func->dev); +#endif + + /* allocate SDIO Host Controller state info */ + osh = osl_attach(&func->dev, SDIO_BUS, TRUE); + if (osh == NULL) { + sd_err(("%s: osl_attach failed\n", __FUNCTION__)); + goto fail; + } + osl_static_mem_init(osh, adapter); + sdioh = sdioh_attach(osh, func); + if (sdioh == NULL) { + sd_err(("%s: sdioh_attach failed\n", __FUNCTION__)); + goto fail; + } + sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca); + if (sdioh->bcmsdh == NULL) { + sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__)); + goto fail; + } + + sdio_set_drvdata(func, sdioh); + return 0; + +fail: + if (sdioh != NULL) + sdioh_detach(osh, sdioh); + if (osh != NULL) + osl_detach(osh); + return -ENOMEM; +} + +static void sdioh_remove(struct sdio_func *func) +{ + sdioh_info_t *sdioh; + osl_t *osh; + + sdioh = sdio_get_drvdata(func); + if (sdioh == NULL) { + sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__)); + return; + } + sd_err(("%s: Enter\n", __FUNCTION__)); + + osh = sdioh->osh; + bcmsdh_remove(sdioh->bcmsdh); + sdioh_detach(osh, sdioh); + osl_detach(osh); +} + +static int bcmsdh_sdmmc_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + + if (func == NULL) + return -EINVAL; + + sd_err(("%s: Enter num=%d\n", __FUNCTION__, func->num)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + +#ifdef GLOBAL_SDMMC_INSTANCE + gInstance->func[func->num] = func; +#endif + + /* 4318 doesn't have function 2 */ + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + ret = sdioh_probe(func); + + return ret; +} + +static void bcmsdh_sdmmc_remove(struct sdio_func *func) +{ + if (func == NULL) { + sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__)); + return; + } + + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + sdioh_remove(func); +} + +/* devices we support, null terminated */ +static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, + /* XXX This should not be in the external release, as it will attach to any SDIO + * device, even non-WLAN devices. + * Need to add IDs for the FALCON-based chips and put this under BCMINTERNAL + { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, + */ + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43752_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N2G_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N5G_ID) }, + { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, + { 0, 0, 0, 0 /* end: all zeroes */ + }, +}; + +MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) +static int bcmsdh_sdmmc_suspend(struct device *pdev) +{ + int err; + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + mmc_pm_flag_t sdio_flags; + + printf("%s Enter func->num=%d\n", __FUNCTION__, func->num); + if (func->num != 2) + return 0; + + dhd_mmc_suspend = TRUE; + sdioh = sdio_get_drvdata(func); + err = bcmsdh_suspend(sdioh->bcmsdh); + if (err) { + printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err); + dhd_mmc_suspend = FALSE; + return err; + } + + sdio_flags = sdio_get_host_pm_caps(func); + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; + return -EINVAL; + } + + /* keep power while host suspended */ + err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (err) { + sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; + return err; + } + smp_mb(); + + printf("%s Exit\n", __FUNCTION__); + return 0; +} + +static int bcmsdh_sdmmc_resume(struct device *pdev) +{ + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + + printf("%s Enter func->num=%d\n", __FUNCTION__, func->num); + if (func->num != 2) + return 0; + + dhd_mmc_suspend = FALSE; + sdioh = sdio_get_drvdata(func); + bcmsdh_resume(sdioh->bcmsdh); + + smp_mb(); + printf("%s Exit\n", __FUNCTION__); + return 0; +} + +static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = { + .suspend = bcmsdh_sdmmc_suspend, + .resume = bcmsdh_sdmmc_resume, +}; +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + +#if defined(BCMLXSDMMC) +static struct semaphore *notify_semaphore = NULL; + +static int dummy_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + sd_err(("%s: enter\n", __FUNCTION__)); + if (func && (func->num != 2)) { + return 0; + } + + if (notify_semaphore) + up(notify_semaphore); + return 0; +} + +static void dummy_remove(struct sdio_func *func) +{ +} + +static struct sdio_driver dummy_sdmmc_driver = { + .probe = dummy_probe, + .remove = dummy_remove, + .name = "dummy_sdmmc", + .id_table = bcmsdh_sdmmc_ids, + }; + +int sdio_func_reg_notify(void* semaphore) +{ + notify_semaphore = semaphore; + return sdio_register_driver(&dummy_sdmmc_driver); +} + +void sdio_func_unreg_notify(void) +{ + OSL_SLEEP(15); + sdio_unregister_driver(&dummy_sdmmc_driver); +} + +#endif /* defined(BCMLXSDMMC) */ + +static struct sdio_driver bcmsdh_sdmmc_driver = { + .probe = bcmsdh_sdmmc_probe, + .remove = bcmsdh_sdmmc_remove, + .name = "bcmsdh_sdmmc", + .id_table = bcmsdh_sdmmc_ids, +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) + .drv = { + .pm = &bcmsdh_sdmmc_pm_ops, + }, +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + }; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +}; + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + if (!sd) + return BCME_BADARG; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + return SDIOH_API_RC_SUCCESS; +} + +#ifdef BCMSDH_MODULE +static int __init +bcmsdh_module_init(void) +{ + int error = 0; + error = sdio_function_init(); + return error; +} + +static void __exit +bcmsdh_module_cleanup(void) +{ + sdio_function_cleanup(); +} + +module_init(bcmsdh_module_init); +module_exit(bcmsdh_module_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DESCRIPTION); +MODULE_AUTHOR(AUTHOR); + +#endif /* BCMSDH_MODULE */ +/* + * module init +*/ +int bcmsdh_register_client_driver(void) +{ + return sdio_register_driver(&bcmsdh_sdmmc_driver); +} + +/* + * module cleanup +*/ +void bcmsdh_unregister_client_driver(void) +{ + sdio_unregister_driver(&bcmsdh_sdmmc_driver); +} diff --git a/bcmdhd.101.10.361.x/bcmsdspi.h b/bcmdhd.101.10.361.x/bcmsdspi.h new file mode 100755 index 0000000..9a29370 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdspi.h @@ -0,0 +1,147 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: bcmsdspi.h 833013 2019-08-02 16:26:31Z jl904071 $ + */ +#ifndef _BCM_SD_SPI_H +#define _BCM_SD_SPI_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#ifdef BCMDBG +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#else +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#endif + +#ifdef BCMPERFSTATS +#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0) +#else +#define sd_log(x) +#endif + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of sdspi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + bool got_hcint; /* Host Controller interrupt. */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current register transfer size */ + uint32 cmd53_wr_data; /* Used to pass CMD53 write data */ + uint32 card_response; /* Used to pass back response status byte */ + uint32 card_rsp_data; /* Used to pass back response data word */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdspi.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmsdspi.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size); +extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size); + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#endif /* _BCM_SD_SPI_H */ diff --git a/bcmdhd.101.10.361.x/bcmsdspi_linux.c b/bcmdhd.101.10.361.x/bcmsdspi_linux.c new file mode 100755 index 0000000..b771682 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdspi_linux.c @@ -0,0 +1,433 @@ +/* + * Broadcom SPI Host Controller Driver - Linux Per-port + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#ifdef BCMSPI_ANDROID +#include +#include +#include +#else +#include +#include /* SDIO Device and Protocol Specs */ +#include /* request_irq(), free_irq() */ +#include +#include +#endif /* BCMSPI_ANDROID */ + +#ifndef BCMSPI_ANDROID +extern uint sd_crc; +module_param(sd_crc, uint, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif +#endif /* !BCMSPI_ANDROID */ + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +#ifndef BCMSPI_ANDROID + wait_queue_head_t intr_wait_queue; +#endif /* !BCMSPI_ANDROID */ +}; + +#ifndef BCMSPI_ANDROID +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) /* XXX Doesn't handle CONFIG_PREEMPT? */ +#endif + +/* Interrupt handler */ +static irqreturn_t +sdspi_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + sd->local_intrcount++; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = spi_check_client_intr(sd, NULL); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + + return IRQ_RETVAL(ours); + } +} +#endif /* !BCMSPI_ANDROID */ + +#ifdef BCMSPI_ANDROID +static struct spi_device *gBCMSPI = NULL; + +extern int bcmsdh_probe(struct device *dev); +extern int bcmsdh_remove(struct device *dev); + +static int bcmsdh_spi_probe(struct spi_device *spi_dev) +{ + int ret = 0; + + gBCMSPI = spi_dev; + +#ifdef SPI_PIO_32BIT_RW + spi_dev->bits_per_word = 32; +#else + spi_dev->bits_per_word = 8; +#endif /* SPI_PIO_32BIT_RW */ + ret = spi_setup(spi_dev); + + if (ret) { + sd_err(("bcmsdh_spi_probe: spi_setup fail with %d\n", ret)); + } + sd_err(("bcmsdh_spi_probe: spi_setup with %d, bits_per_word=%d\n", + ret, spi_dev->bits_per_word)); + ret = bcmsdh_probe(&spi_dev->dev); + + return ret; +} + +static int bcmsdh_spi_remove(struct spi_device *spi_dev) +{ + int ret = 0; + + ret = bcmsdh_remove(&spi_dev->dev); + gBCMSPI = NULL; + + return ret; +} + +static struct spi_driver bcmsdh_spi_driver = { + .probe = bcmsdh_spi_probe, + .remove = bcmsdh_spi_remove, + .driver = { + .name = "wlan_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, +}; + +/* + * module init +*/ +int bcmsdh_register_client_driver(void) +{ + int error = 0; + sd_trace(("bcmsdh_gspi: %s Enter\n", __FUNCTION__)); + + error = spi_register_driver(&bcmsdh_spi_driver); + + return error; +} + +/* + * module cleanup +*/ +void bcmsdh_unregister_client_driver(void) +{ + sd_trace(("%s Enter\n", __FUNCTION__)); + spi_unregister_driver(&bcmsdh_spi_driver); +} +#endif /* BCMSPI_ANDROID */ + +/* Register with Linux for interrupts */ +int +spi_register_irq(sdioh_info_t *sd, uint irq) +{ +#ifndef BCMSPI_ANDROID + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } +#endif /* !BCMSPI_ANDROID */ + return SUCCESS; +} + +/* Free Linux irq */ +void +spi_free_irq(uint irq, sdioh_info_t *sd) +{ +#ifndef BCMSPI_ANDROID + free_irq(irq, sd); +#endif /* !BCMSPI_ANDROID */ +} + +/* Map Host controller registers */ +#ifndef BCMSPI_ANDROID +uint32 * +spi_reg_map(osl_t *osh, uintptr addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +spi_reg_unmap(osl_t *osh, uintptr addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} +#endif /* !BCMSPI_ANDROID */ + +int +spi_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); +#ifndef BCMSPI_ANDROID + init_waitqueue_head(&sdos->intr_wait_queue); +#endif /* !BCMSPI_ANDROID */ + return BCME_OK; +} + +void +spi_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + +#ifndef BCMSPI_ANDROID + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#endif /* !BCMSPI_ANDROID */ + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; +#ifndef BCMSPI_ANDROID + if (enable && !sd->lockcount) + spi_devintr_on(sd); + else + spi_devintr_off(sd); +#endif /* !BCMSPI_ANDROID */ + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +spi_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked!\n", __FUNCTION__)); + ASSERT(sd->lockcount == 0); + } +#ifdef BCMSPI_ANDROID + if (sd->client_intr_enabled) + bcmsdh_oob_intr_set(0); +#else + spi_devintr_off(sd); +#endif /* BCMSPI_ANDROID */ + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +spi_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { +#ifdef BCMSPI_ANDROID + bcmsdh_oob_intr_set(1); +#else + spi_devintr_on(sd); +#endif /* BCMSPI_ANDROID */ + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +#ifndef BCMSPI_ANDROID +void spi_waitbits(sdioh_info_t *sd, bool yield) +{ +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif + sd_trace(("%s: yield %d canblock %d\n", + __FUNCTION__, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + struct sdos_info *sdos; + sdos = (struct sdos_info *)sd->sdos_info; + /* Wait for the indication, the interrupt will be masked when the ISR fires. */ + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + } else +#endif /* BCMSDYIELD */ + { + spi_spinbits(sd); + } + +} +#else /* !BCMSPI_ANDROID */ +int bcmgspi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */ + +static void +hexdump(char *pfx, unsigned char *msg, int msglen) +{ + int i, col; + char buf[80]; + + ASSERT(strlen(pfx) + 49 <= sizeof(buf)); + + col = 0; + + for (i = 0; i < msglen; i++, col++) { + if (col % 16 == 0) + strcpy(buf, pfx); + sprintf(buf + strlen(buf), "%02x", msg[i]); + if ((col + 1) % 16 == 0) + printf("%s\n", buf); + else + sprintf(buf + strlen(buf), " "); + } + + if (col % 16 != 0) + printf("%s\n", buf); +} + +/* Send/Receive an SPI Packet */ +void +spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen) +{ + int write = 0; + int tx_len = 0; + struct spi_message msg; + struct spi_transfer t[2]; + + spi_message_init(&msg); + memset(t, 0, 2*sizeof(struct spi_transfer)); + + if (sd->wordlen == 2) +#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) + write = msg_out[2] & 0x80; /* XXX bit 7: read:0, write :1 */ +#else + write = msg_out[1] & 0x80; /* XXX bit 7: read:0, write :1 */ +#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */ + if (sd->wordlen == 4) +#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) + write = msg_out[0] & 0x80; /* XXX bit 7: read:0, write :1 */ +#else + write = msg_out[3] & 0x80; /* XXX bit 7: read:0, write :1 */ +#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */ + + if (bcmgspi_dump) { + hexdump(" OUT: ", msg_out, msglen); + } + + tx_len = write ? msglen-4 : 4; + + sd_trace(("spi_sendrecv: %s, wordlen %d, cmd : 0x%02x 0x%02x 0x%02x 0x%02x\n", + write ? "WR" : "RD", sd->wordlen, + msg_out[0], msg_out[1], msg_out[2], msg_out[3])); + + t[0].tx_buf = (char *)&msg_out[0]; + t[0].rx_buf = 0; + t[0].len = tx_len; + + spi_message_add_tail(&t[0], &msg); + + t[1].rx_buf = (char *)&msg_in[tx_len]; + t[1].tx_buf = 0; + t[1].len = msglen-tx_len; + + spi_message_add_tail(&t[1], &msg); + spi_sync(gBCMSPI, &msg); + + if (bcmgspi_dump) { + hexdump(" IN : ", msg_in, msglen); + } +} +#endif /* !BCMSPI_ANDROID */ diff --git a/bcmdhd.101.10.361.x/bcmsdstd.c b/bcmdhd.101.10.361.x/bcmsdstd.c new file mode 100755 index 0000000..b58de62 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdstd.c @@ -0,0 +1,5406 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include + +#include +#include +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* Standard SDIO Host Controller Specification */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ +#include +#include +/* XXX Quick NDIS hack */ +#ifdef NDIS +#define inline __inline +#define PCI_CFG_VID 0 +#define PCI_CFG_BAR0 0x10 +#endif + +#define SD_PAGE_BITS 12 +#define SD_PAGE (1 << SD_PAGE_BITS) +#define SDSTD_MAX_TUNING_PHASE 5 + +/* + * Upper GPIO 16 - 31 are available on J22 + * J22.pin3 == gpio16, J22.pin5 == gpio17, etc. + * Lower GPIO 0 - 15 are available on J15 (WL_GPIO) + */ +#define SDH_GPIO16 16 +#define SDH_GPIO_ENABLE 0xffff + +#include +#include /* SDIOH (host controller) core hardware definitions */ + +/* Globals */ +uint sd_msglevel = SDH_ERROR_VAL; + +uint sd_hiok = TRUE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ +uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */ + +#define sd3_trace(x) + +/* sd3ClkMode: 0-SDR12 [25MHz] + * 1-SDR25 [50MHz]+SHS=1 + * 2-SDR50 [100MHz]+SSDR50=1 + * 3-SDR104 [208MHz]+SSDR104=1 + * 4-DDR50 [50MHz]+SDDR50=1 + */ +#define SD3CLKMODE_0_SDR12 (0) +#define SD3CLKMODE_1_SDR25 (1) +#define SD3CLKMODE_2_SDR50 (2) +#define SD3CLKMODE_3_SDR104 (3) +#define SD3CLKMODE_4_DDR50 (4) +#define SD3CLKMODE_DISABLED (-1) +#define SD3CLKMODE_AUTO (99) + +/* values for global_UHSI_Supp : Means host and card caps match. */ +#define HOST_SDR_UNSUPP (0) +#define HOST_SDR_12_25 (1) +#define HOST_SDR_50_104_DDR (2) + +/* depends-on/affects sd3_autoselect_uhsi_max. + * see sd3_autoselect_uhsi_max + */ +int sd_uhsimode = SD3CLKMODE_DISABLED; +uint sd_tuning_period = CAP3_RETUNING_TC_OTHER; +uint sd_delay_value = 500000; +/* Enables host to dongle glomming. Also increases the + * dma buffer size. This will increase the rx throughput + * as there will be lesser CMD53 transactions + */ +#ifdef BCMSDIOH_TXGLOM +uint sd_txglom; +#ifdef LINUX +module_param(sd_txglom, uint, 0); +#endif +#endif /* BCMSDIOH_TXGLOM */ + +char dhd_sdiod_uhsi_ds_override[2] = {' '}; + +#define MAX_DTS_INDEX (3) +#define DRVSTRN_MAX_CHAR ('D') +#define DRVSTRN_IGNORE_CHAR (' ') + +char DTS_vals[MAX_DTS_INDEX + 1] = { + 0x1, /* Driver Strength Type-A */ + 0x0, /* Driver Strength Type-B */ + 0x2, /* Driver Strength Type-C */ + 0x3, /* Driver Strength Type-D */ + }; + +/* depends-on/affects sd_uhsimode. + select MAX speed automatically based on caps of host and card. + If this is 1, sd_uhsimode will be ignored. If the sd_uhsimode is set + by the user specifically, this var becomes 0. default value: 0. [XXX:TBD: for future] + */ +uint32 sd3_autoselect_uhsi_max = 0; + +#define MAX_TUNING_ITERS (40) +/* (150+10)millisecs total time; so dividing it for per-loop */ +#define PER_TRY_TUNING_DELAY_MS (160/MAX_TUNING_ITERS) +#define CLKTUNING_MAX_BRR_RETRIES (1000) /* 1 ms: 1000 retries with 1 us delay per loop */ + +/* table analogous to preset value register. +* This is bcos current HC doesn't have preset value reg support. +* All has DrvStr as 'B' [val:0] and CLKGEN as 0. +*/ +static unsigned short presetval_sw_table[] = { + 0x0520, /* initialization: DrvStr:'B' [0]; CLKGen:0; + * SDCLKFreqSel: 520 [division: 320*2 = 640: ~400 KHz] + */ + 0x0008, /* default speed:DrvStr:'B' [0]; CLKGen:0; + * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz] + */ + 0x0004, /* High speed: DrvStr:'B' [0]; CLKGen:0; + * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz] + */ + 0x0008, /* SDR12: DrvStr:'B' [0]; CLKGen:0; + * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz] + */ + 0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0; + * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz] + */ + 0x0001, /* SDR50: DrvStr:'B' [0]; CLKGen:0; + * SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz] + */ + 0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0; + SDCLKFreqSel: 1 [no division: ~255/~208 MHz] + */ + 0x0002 /* DDR50: DrvStr:'B' [0]; CLKGen:0; + SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz] + */ +}; + +/* This is to have software overrides to the hardware. Info follows: + For override [1]: Preset registers: not supported + Voltage switch: not supported + Clock Tuning: not supported +*/ +bool sd3_sw_override1 = FALSE; +bool sd3_sw_read_magic_bytes = FALSE; + +#define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \ + (sd->version == HOST_CONTR_VER_3) && \ + ((sd_uhsimode == SD3CLKMODE_3_SDR104) || \ + ((sd_uhsimode == SD3CLKMODE_2_SDR50) && \ + (GFIELD(sd->caps3, CAP3_TUNING_SDR50))))) + +/* find next power of 2 */ +#define NEXT_POW2(n) {n--; n |= n>>1; n |= n>>2; n |= n>>4; n++;} + +#ifdef BCMSDYIELD +bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */ +uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */ +bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */ +#endif + +/* XXX: Issues with CMD14 enter/exit sleep + * XXX: Temp fix for special CMD14 handling + */ +#define F1_SLEEPCSR_ADDR 0x1001F + +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz + :might get changed in code for 208 + */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_3_power_save = 1; /* Default to SDIO 3.0 power save */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ +uint8 sd_dma_mode = DMA_MODE_AUTO; /* Default to AUTO & program based on capability */ + +/* XXX Base timeout counter value on 48MHz (2^20 @ 48MHz => 21845us) + * Could adjust by adding sd_divisor (to maintain bit count) but really + * need something more elaborate to do that right. Still allows xfer + * of about 1000 bytes at 400KHz, so constant is ok. + * Timeout control N produces 2^(13+N) counter. + */ +uint sd_toctl = 7; +static bool trap_errs = FALSE; + +static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" }; + +/* Prototypes */ +static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor); +static uint16 sdstd_start_power(sdioh_info_t *sd, int volts_req); +static bool sdstd_bus_width(sdioh_info_t *sd, int width); +static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode); +static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode); +static int sdstd_card_enablefuncs(sdioh_info_t *sd); +static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count); +static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg); +static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int sdstd_driver_init(sdioh_info_t *sd); +static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset); +static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int sdstd_abort(sdioh_info_t *sd, uint func); +static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg); +static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize); +static void sd_map_dma(sdioh_info_t * sd); +static void sd_unmap_dma(sdioh_info_t * sd); +static void sd_clear_adma_dscr_buf(sdioh_info_t *sd); +static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data); +static void sd_create_adma_descriptor(sdioh_info_t *sd, + uint32 index, uint32 addr_phys, + uint16 length, uint16 flags); +static void sd_dump_adma_dscr(sdioh_info_t *sd); +static void sdstd_dumpregs(sdioh_info_t *sd); + +static int sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode); +static int sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd); +static int sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, + int sd3_requested_clkmode); +static bool sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, + int sd3_requested_clkmode, uint32 *drvstrn, uint16 *presetval); +static int sdstd_3_clock_wrapper(sdioh_info_t *sd); +static int sdstd_clock_wrapper(sdioh_info_t *sd); + +#ifdef BCMINTERNAL +#ifdef NOTUSED +static int parse_caps(uint32 caps_reg, char *buf, int len); +static int parse_state(uint32 state_reg, char *buf, int len); +static void cis_fetch(sdioh_info_t *sd, int func, char *data, int len); +#endif /* NOTUSED */ +#endif /* BCMINTERNAL */ + +#ifdef BCMDBG +static void print_regs(sdioh_info_t *sd); +#endif + +/* + * Private register access routines. + */ + +/* 16 bit PCI regs */ + +/* XXX This is a hack to satisfy the -Wmissing-prototypes warning */ +extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg); +uint16 +sdstd_rreg16(sdioh_info_t *sd, uint reg) +{ + + volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); + sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data)); + return data; +} + +/* XXX This is a hack to satisfy the -Wmissing-prototypes warning */ +extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data); +void +sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data) +{ + *(volatile uint16 *)(sd->mem_space + reg) = (uint16) data; + sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data)); +} + +static void +sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val) +{ + volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); + sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val)); + data |= val; + *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; + +} +static void +sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val) +{ + + volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); + sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val)); + data &= ~mask; + data |= (val & mask); + *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; +} + +/* 32 bit PCI regs */ +static uint32 +sdstd_rreg(sdioh_info_t *sd, uint reg) +{ + volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg); + sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data)); + return data; +} +static inline void +sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data) +{ + *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data; + sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data)); + +} +#ifdef BCMINTERNAL +#ifdef NOTUSED +static void +sdstd_or_reg(sdioh_info_t *sd, uint reg, uint32 val) +{ + volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg); + data |= val; + *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data; +} +static void +sdstd_mod_reg(sdioh_info_t *sd, uint reg, uint32 mask, uint32 val) +{ + volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg); + data &= ~mask; + data |= (val & mask); + *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data; +} +#endif /* NOTUSED */ +#endif /* BCMINTERNAL */ + +/* 8 bit PCI regs */ +static inline void +sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data) +{ + *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data; + sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data)); +} +static uint8 +sdstd_rreg8(sdioh_info_t *sd, uint reg) +{ + volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg); + sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data)); + return data; +} + +/* + * Private work routines + */ + +sdioh_info_t *glob_sd; + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + glob_sd = sd; + sd->osh = osh; + if (sdstd_osinit(sd) != 0) { + sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + sd->mem_space = (volatile char *)sdstd_reg_map(osh, (ulong)bar0, SDIOH_REG_WINSZ); + sd_init_dma(sd); + sd->irq = irq; + if (sd->mem_space == NULL) { + sd_err(("%s:ioremap() failed\n", __FUNCTION__)); + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space)); + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->sd_dma_mode = sd_dma_mode; + + /* XXX Haven't figured out how to make bytemode work with dma */ + if (!sd->sd_blockmode) + sd->sd_dma_mode = DMA_MODE_NONE; + + if (sdstd_driver_init(sd) != SUCCESS) { + /* If host CPU was reset without resetting SD bus or + SD device, the device will still have its RCA but + driver no longer knows what it is (since driver has been restarted). + go through once to clear the RCA and a gain reassign it. + */ + sd_info(("driver_init failed - Reset RCA and try again\n")); + if (sdstd_driver_init(sd) != SUCCESS) { + sd_err(("%s:driver_init() failed()\n", __FUNCTION__)); + if (sd->mem_space) { + sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + } + + /* XXX Needed for NDIS as its OSL checks for correct dma address width + * This value is normally set by wlc_attach() which has yet to run + */ + OSL_DMADDRWIDTH(osh, 32); + + /* Always map DMA buffers, so we can switch between DMA modes. */ + sd_map_dma(sd); + + if (sdstd_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + sdstd_free_irq(sd->irq, sd); + if (sd->mem_space) { + sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_unmap_dma(sd); + sdstd_wreg16(sd, SD_IntrSignalEnable, 0); + if (sd->sd3_tuning_reqd == TRUE) { + sdstd_3_osclean_tuning(sd); + sd->sd3_tuning_reqd = FALSE; + } + sd->sd3_tuning_disable = FALSE; + sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq)); + sdstd_free_irq(sd->irq, sd); + if (sd->card_init_done) + sdstd_reset(sd, 1, 1); + if (sd->mem_space) { + sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we receive client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) || defined(BCMDBG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + uint16 intrstatus; + intrstatus = sdstd_rreg16(sd, SD_IntrStatus); + return !!(intrstatus & CLIENT_INTR); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_POWER_SAVE, + IOV_YIELDCPU, + IOV_MINYIELD, + IOV_FORCERB, + IOV_CLOCK, + IOV_UHSIMOD, + IOV_TUNEMOD, + IOV_TUNEDIS +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, 0, IOVT_UINT32, 0 }, +#ifdef BCMSDYIELD + {"sd_yieldcpu", IOV_YIELDCPU, 0, 0, IOVT_BOOL, 0 }, + {"sd_minyield", IOV_MINYIELD, 0, 0, IOVT_UINT32, 0 }, + {"sd_forcerb", IOV_FORCERB, 0, 0, IOVT_BOOL, 0 }, +#endif + {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 }, + {"sd_power_save", IOV_POWER_SAVE, 0, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0}, + {"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0}, +#ifdef BCMDBG + {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 }, +#endif + {"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0}, + {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0}, + + {NULL, 0, 0, 0, 0, 0 } +}; +uint8 sdstd_turn_on_clock(sdioh_info_t *sd) +{ + sdstd_or_reg16(sd, SD_ClockCntrl, 0x4); + return 0; +} + +uint8 sdstd_turn_off_clock(sdioh_info_t *sd) +{ + sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); + return 0; +} + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, uint len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + uint val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* XXX Copied from dhd, copied from wl; certainly overkill here? */ + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + BCM_REFERENCE(bool_val); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + if (!si->sd_blockmode) + si->sd_dma_mode = DMA_MODE_NONE; + break; + +#ifdef BCMSDYIELD + case IOV_GVAL(IOV_YIELDCPU): + int_val = sd_yieldcpu; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_YIELDCPU): + sd_yieldcpu = (bool)int_val; + break; + + case IOV_GVAL(IOV_MINYIELD): + int_val = sd_minyield; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MINYIELD): + sd_minyield = (bool)int_val; + break; + + case IOV_GVAL(IOV_FORCERB): + int_val = sd_forcerb; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCERB): + sd_forcerb = (bool)int_val; + break; +#endif /* BCMSDYIELD */ + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */ + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + sdstd_lock(si); + bcmerror = set_client_block_size(si, func, blksize); + sdstd_unlock(si); + break; + } + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_dma_mode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_dma_mode = (char)int_val; + sdstd_set_dma_mode(si, si->sd_dma_mode); + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!sdstd_start_clock(si, (uint16)sd_divisor)) { + sd_err(("set clock failed!\n")); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_POWER_SAVE): + int_val = (uint32)sd_3_power_save; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + if (sd_power == 1) { + if (sdstd_driver_init(si) != SUCCESS) { + sd_err(("set SD Slot power failed!\n")); + bcmerror = BCME_ERROR; + } else { + sd_err(("SD Slot Powered ON.\n")); + } + } else { + uint8 pwr = 0; + + pwr = SFIELD(pwr, PWR_BUS_EN, 0); + sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */ + sd_err(("SD Slot Powered OFF.\n")); + } + break; + + case IOV_SVAL(IOV_POWER_SAVE): + sd_3_power_save = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + if (sd_clock == 1) { + sd_info(("SD Clock turned ON.\n")); + if (!sdstd_start_clock(si, (uint16)sd_divisor)) { + sd_err(("sdstd_start_clock failed\n")); + bcmerror = BCME_ERROR; + } + } else { + /* turn off HC clock */ + sdstd_wreg16(si, SD_ClockCntrl, + sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4)); + + sd_info(("SD Clock turned OFF.\n")); + } + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + + if (!sdstd_bus_width(si, sd_sdmode)) { + sd_err(("sdstd_bus_width failed\n")); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok); + break; + + case IOV_GVAL(IOV_UHSIMOD): + sd3_trace(("%s: Get UHSI: \n", __FUNCTION__)); + int_val = (int)sd_uhsimode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_UHSIMOD): + { + int oldval = sd_uhsimode; /* save old, working value */ + sd3_trace(("%s: Set UHSI: \n", __FUNCTION__)); + /* check if UHSI is supported by card/host */ + if (!(si->card_UHSI_voltage_Supported && si->host_UHSISupported)) { + sd_err(("%s:UHSI not suppoted!\n", __FUNCTION__)); + bcmerror = BCME_UNSUPPORTED; + break; + } + /* check for valid values */ + if (!((int_val == SD3CLKMODE_AUTO) || + (int_val == SD3CLKMODE_DISABLED) || + ((int_val >= SD3CLKMODE_0_SDR12) && + (int_val <= SD3CLKMODE_4_DDR50)))) { + sd_err(("%s:CLK: bad arg!\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + sd_uhsimode = int_val; + if (SUCCESS != sdstd_3_clock_wrapper(si)) { + sd_err(("%s:Error in setting uhsi clkmode:%d," + "restoring back to %d\n", __FUNCTION__, + sd_uhsimode, oldval)); + /* try to set back the old one */ + sd_uhsimode = oldval; + if (SUCCESS != sdstd_3_clock_wrapper(si)) { + sd_err(("%s:Error in setting uhsi to old mode;" + "ignoring:\n", __FUNCTION__)); + } + } + break; + } +#ifdef DHD_DEBUG + case IOV_SVAL(IOV_TUNEMOD): + { + + if( int_val == SD_DHD_DISABLE_PERIODIC_TUNING) { /* do tuning single time */ + sd3_trace(("Start tuning from Iovar\n")); + si->sd3_tuning_reqd = TRUE; + sdstd_enable_disable_periodic_timer(si, int_val); + sdstd_lock(si); + sdstd_3_clk_tuning(si, sdstd_3_get_uhsi_clkmode(si)); + sdstd_unlock(si); + si->sd3_tuning_reqd = FALSE; + } + if (int_val == SD_DHD_ENABLE_PERIODIC_TUNING) { + sd3_trace(("Enabling automatic tuning\n")); + si->sd3_tuning_reqd = TRUE; + sdstd_enable_disable_periodic_timer(si, int_val); + } + break; + } +#endif /* debugging purpose */ + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = sdstd_rreg8(si, sd_ptr->offset); + else if (sd_ptr->offset & 2) + int_val = sdstd_rreg16(si, sd_ptr->offset); + else + int_val = sdstd_rreg(si, sd_ptr->offset); + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value); + else if (sd_ptr->offset & 2) + sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value); + else + sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value); + + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + +#ifdef BCMDBG + case IOV_GVAL(IOV_HCIREGS): + { + struct bcmstrbuf b; + bcm_binit(&b, arg, len); + + sdstd_lock(si); + bcm_bprintf(&b, "IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n", + sdstd_rreg16(si, SD_IntrStatus), + sdstd_rreg16(si, SD_ErrorIntrStatus)); + bcm_bprintf(&b, "IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n", + sdstd_rreg16(si, SD_IntrStatusEnable), + sdstd_rreg16(si, SD_ErrorIntrStatusEnable)); + bcm_bprintf(&b, "IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n", + sdstd_rreg16(si, SD_IntrSignalEnable), + sdstd_rreg16(si, SD_ErrorIntrSignalEnable)); + print_regs(si); + + sdstd_unlock(si); + + if (!b.size) + bcmerror = BCME_BUFTOOSHORT; + break; + } +#endif /* BCMDBG */ + + case IOV_SVAL(IOV_TUNEDIS): + si->sd3_tuning_disable = (bool)int_val; + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + /* XXX Remove protective lock after clients all clean... */ + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + return SDIOH_API_RC_FAIL; + } + + sdstd_lock(sd); + *cis = 0; + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdstd_card_regread(sd, 0, offset, 1, &foo)) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + sdstd_unlock(sd); + return SDIOH_API_RC_FAIL; + } + *cis = (uint8)(foo & 0xff); + cis++; + } + sdstd_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status = SDIOH_API_RC_SUCCESS; + uint32 cmd_arg; + uint32 rsp5; + + sdstd_lock(sd); + if (rw == SDIOH_READ) + sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA); + + /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING); + +#ifdef BCMDBG + if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: Entering: ErririntrStatus 0x%x, intstat = 0x%x\n", + __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg16(sd, SD_IntrStatus))); + } +#endif + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte); + + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) { + /* Change to DATA_TRANSFER_IDLE */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE); + sdstd_unlock(sd); + return status; + } + + sdstd_cmd_getrsp(sd, &rsp5, 1); + if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: 1: ErrorintrStatus 0x%x\n", + __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus))); + status = SDIOH_API_RC_FAIL; + } + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) { + /* PR 101351: sdiod_aos sleep followed by immediate wakeup + * before sdiod_aos takes over has a problem. + * While exiting sleep with CMD14, device returning 0x00 + * Don't flag as error for now for 0x1001f. + */ + if (GFIELD(cmd_arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) { + sd_err(("%s: rsp5 flags is 0x%x\t %d \n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); + } + status = SDIOH_API_RC_FAIL; + } + + if (GFIELD(rsp5, RSP5_STUFF)) { + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + status = SDIOH_API_RC_FAIL; + } + + if (rw == SDIOH_READ) + *byte = GFIELD(rsp5, RSP5_DATA); + + /* Change to DATA_TRANSFER_IDLE */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE); + + /* check if we have to do tuning; if so, start */ + sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA); + + sdstd_unlock(sd); + return status; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + sdstd_lock(sd); + + sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA); + + /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING); + + if (rw == SDIOH_READ) { + status = sdstd_card_regread(sd, func, addr, nbytes, word); + } else { + status = sdstd_card_regwrite(sd, func, addr, nbytes, *word); + } + + /* Change to DATA_TRANSFER_IDLE */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE); + + /* check if we have to do tuning; if so, start */ + sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA); + + sdstd_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +#ifdef BCMSDIOH_TXGLOM +void +sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len) +{ + BCM_REFERENCE(pkt); + sd->glom_info.dma_buf_arr[sd->glom_info.count] = frame; + sd->glom_info.nbytes[sd->glom_info.count] = len; + /* Convert the frame addr to phy addr for DMA in case of host controller version3 */ + if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) { + sd->glom_info.dma_phys_arr[sd->glom_info.count] = DMA_MAP(sd->osh, + frame, + len, + DMA_TX, 0, 0); + } + sd->glom_info.count++; +} + +void +sdioh_glom_clear(sdioh_info_t *sd) +{ + int i; + /* DMA_MAP is done per frame only if host controller version is 3 */ + if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) { + for (i = 0; i < sd->glom_info.count; i++) { + DMA_UNMAP(sd->osh, + sd->glom_info.dma_phys_arr[i], + sd->glom_info.nbytes[i], + DMA_TX, 0, 0); + } + } + sd->glom_info.count = 0; +} + +uint +sdioh_set_mode(sdioh_info_t *sd, uint mode) +{ + if (mode == SDPCM_TXGLOM_CPY) + sd->txglom_mode = mode; + else if ((mode == SDPCM_TXGLOM_MDESC) && (sd->version == HOST_CONTR_VER_3)) + sd->txglom_mode = mode; + + return (sd->txglom_mode); +} + +bool +sdioh_glom_enabled(void) +{ + return sd_txglom; +} +#endif /* BCMSDIOH_TXGLOM */ + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + uint8 is_ddr50 = FALSE; + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + uint8 *localbuf = NULL, *tmpbuf = NULL; + bool local_blockmode = sd->sd_blockmode; + SDIOH_API_RC status = SDIOH_API_RC_SUCCESS; + + sdstd_lock(sd); + + is_ddr50 = (sd_uhsimode == SD3CLKMODE_4_DDR50) ? TRUE : FALSE; + + sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA); + + /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + +#ifdef BCMSDIOH_TXGLOM + if (sd_txglom) { + while (pkt) { + sdioh_glom_post(sd, PKTDATA(sd->osh, pkt), pkt, PKTLEN(sd->osh, pkt)); + pkt = PKTNEXT(sd->osh, pkt); + } + } +#endif + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks: + * Bytemode: 1 block at a time. + * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE. + * Both: leftovers are handled last (will be sent via bytemode). + */ + while (buflen > 0) { + if (local_blockmode) { + int max_tran_size = SD_PAGE; +#ifdef BCMSDIOH_TXGLOM + /* There is no alignment requirement for HC3 */ + if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) + max_tran_size = SD_PAGE * 4; +#endif + /* Max xfer is Page size */ + len = MIN(max_tran_size, buflen); + + /* Round down to a block boundry */ + if (buflen > sd->client_block_size[func]) + len = (len/sd->client_block_size[func]) * + sd->client_block_size[func]; + /* XXX Arasan trashes 3-byte transfers, WAR to add one byte extra. */ + /* XXX In Case of SDIO3.0 DDR50 mode if no of bytes to be + * transferred is odd append one more byte to make it even. + * Check If odd bytes can come for SDIO_FUNC_2 also. + */ + if ((func == SDIO_FUNC_1) && (((len % 4) == 3) || (((len % 2) == 1) && + (is_ddr50))) && ((rw == SDIOH_WRITE) || (rw == SDIOH_READ))) { + sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__)); + len++; + tmpbuf = buffer; + if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) { + sd_err(("out of memory, malloced %d bytes\n", + MALLOCED(sd->osh))); + status = SDIOH_API_RC_FAIL; + goto done; + } + bcopy(buffer, localbuf, len); + buffer = localbuf; + } + } else { + /* Byte mode: One block at a time */ + len = MIN(sd->client_block_size[func], buflen); + } + + if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + status = SDIOH_API_RC_FAIL; + } + + /* XXX Restore len and buffer pointer WAR'ed for Arasan 3-byte transfer problem */ + /* XXX WAR for SDIO3.0 DDR50 mode. */ + if (local_blockmode && localbuf) { + MFREE(sd->osh, localbuf, len); + len--; + buffer = tmpbuf; + sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__)); + } + + if (status == SDIOH_API_RC_FAIL) { + goto done; + } + + buffer += len; + buflen -= len; + if (!fifo) + addr += len; +#ifdef BCMSDIOH_TXGLOM + /* This loop should not come in case of glommed pkts as it is send in + * multiple of blocks or total pkt size less than a block + */ + if (sd->glom_info.count != 0) + buflen = 0; +#endif + } +done: + + /* Change to DATA_TRANSFER_IDLE */ + sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE); + + /* check if we have to do tuning; if so, start */ + sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA); + + sdstd_unlock(sd); + +#ifdef BCMSDIOH_TXGLOM + if (sd_txglom) + sdioh_glom_clear(sd); +#endif + + return status; +} + +extern SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + uint offset = 0; + uint16 val; + + /* check if upper bank */ + if (gpio >= SDH_GPIO16) { + gpio -= SDH_GPIO16; + offset = 2; + } + + val = sdstd_rreg16(sd, SD_GPIO_OE + offset); + val |= (1 << gpio); + sdstd_wreg16(sd, SD_GPIO_OE + offset, val); + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + uint offset = 0; + uint16 val; + + /* check if upper bank */ + if (gpio >= SDH_GPIO16) { + gpio -= SDH_GPIO16; + offset = 2; + } + + val = sdstd_rreg16(sd, SD_GPIO_Reg + offset); + if (enab == TRUE) + val |= (1 << gpio); + else + val &= ~(1 << gpio); + sdstd_wreg16(sd, SD_GPIO_Reg + offset, val); + + return SDIOH_API_RC_SUCCESS; +} + +extern bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + uint offset = 0; + uint16 val; + + /* check if upper bank */ + if (gpio >= SDH_GPIO16) { + gpio -= SDH_GPIO16; + offset = 2; + } + + val = sdstd_rreg16(sd, SD_GPIO_Reg + offset); + val = (val >> gpio) & 1; + + return (val == 1); +} + +extern SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + uint rev; + + rev = sdstd_rreg16(sd, SD_HostControllerVersion) >> 8; + + /* Only P206 (fpga rev >= 16) supports gpio */ + if (rev < 16) { + sd_err(("%s: gpio not supported in rev %d \n", __FUNCTION__, rev)); + return SDIOH_API_RC_FAIL; + } + + sdstd_wreg16(sd, SD_GPIO_Enable, SDH_GPIO_ENABLE); + sdstd_wreg16(sd, SD_GPIO_Enable + 2, SDH_GPIO_ENABLE); + + /* Default to input */ + sdstd_wreg16(sd, SD_GPIO_OE, 0); + sdstd_wreg16(sd, SD_GPIO_OE + 2, 0); + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_sleep(sdioh_info_t *sd, bool enab) +{ + SDIOH_API_RC status; + uint32 cmd_arg = 0, rsp1 = 0; + int retry = 100; + + sdstd_lock(sd); + + cmd_arg = SFIELD(cmd_arg, CMD14_RCA, sd->card_rca); + cmd_arg = SFIELD(cmd_arg, CMD14_SLEEP, enab); + + /* + * For ExitSleep: + * 1) Repeat CMD14 until R1 is received + * 2) Send CMD7 + */ + status = SDIOH_API_RC_FAIL; + while (retry-- > 0) { + if ((sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_14, cmd_arg)) == SUCCESS) { + status = SDIOH_API_RC_SUCCESS; + break; + } + OSL_DELAY(1400); + } + + if (status == SDIOH_API_RC_FAIL) { + sd_err(("%s: CMD14: failed! enable:%d\n", __FUNCTION__, enab)); + goto exit; + } + + sdstd_cmd_getrsp(sd, &rsp1, 1); + sd_info(("%s: CMD14 OK: cmd_resp:0x%x\n", __FUNCTION__, rsp1)); + + /* ExitSleep: Send CMD7 After R1 */ + if (enab == FALSE) { + /* Select the card */ + cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca); + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) { + sd_err(("%s: CMD14 send CMD7 failed!\n", __FUNCTION__)); + status = SDIOH_API_RC_FAIL; + goto exit; + } + + sdstd_cmd_getrsp(sd, &rsp1, 1); + if (rsp1 != SDIOH_CMD7_EXP_STATUS) { + sd_err(("%s: CMD7 response error. Response = 0x%x!\n", + __FUNCTION__, rsp1)); + status = SDIOH_API_RC_FAIL; + goto exit; + } + } + +exit: + sdstd_unlock(sd); + + return status; +} + +/* XXX Copied guts of request_byte and cmd_issue. Might make sense to fold this into + * those by passing another parameter indicating command type (abort). [But maybe + * keeping it separate is better -- if called internally on command failure it's less + * recursion to wrap your head around?] + */ +static int +sdstd_abort(sdioh_info_t *sd, uint func) +{ + int err = 0; + int retries; + + uint16 cmd_reg; + uint32 cmd_arg; + uint32 rsp5; + uint8 rflags; + + uint16 int_reg = 0; + uint16 plain_intstatus; + + /* Argument is write to F0 (CCCR) IOAbort with function number */ + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func); + + /* Command is CMD52 write */ + cmd_reg = 0; + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52); + + /* XXX Copied from cmd_issue(), but no SPI response handling! */ + if (sd->sd_mode == SDIOH_MODE_SPI) { + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + } + + /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */ + /* XXX For a single-threaded driver, what circumstances would result + * in cmd_inhibit being on but going off in a short time? Experiment + * shows a HW command timeout doesn't leave inhibit on, so maybe a SW + * timeout? Then that command should be responsible for clearing... + */ + retries = RETRIES_SMALL; + while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) { + if (retries == RETRIES_SMALL) + sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n", + __FUNCTION__, sdstd_rreg(sd, SD_PresentState))); + if (!--retries) { + sd_err(("%s: Command Inhibit timeout, state 0x%08x\n", + __FUNCTION__, sdstd_rreg(sd, SD_PresentState))); + if (trap_errs) + ASSERT(0); + err = BCME_SDIO_ERROR; + goto done; + } + } + + /* Clear errors from any previous commands */ + if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) { + sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus)); + sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus); + } + plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus); + if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) { + sd_err(("abort: intstatus 0x%04x\n", plain_intstatus)); + if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) { + sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n")); + } + if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) { + sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n")); + err = BCME_NODEVICE; + goto done; + } + } + + /* Issue the command */ + sdstd_wreg(sd, SD_Arg0, cmd_arg); + sdstd_wreg16(sd, SD_Command, cmd_reg); + + /* In interrupt mode return, expect later CMD_COMPLETE interrupt */ + if (!sd->polled_mode) + return err; + + /* Otherwise, wait for the command to complete */ + retries = RETRIES_LARGE; + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && + (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) && + (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0)); + + /* If command completion fails, do a cmd reset and note the error */ + if (!retries) { + sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + + sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); + retries = RETRIES_LARGE; + do { + sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__)); + } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset), + SW_RESET_CMD)) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); + } + + if (trap_errs) + ASSERT(0); + + err = BCME_SDIO_ERROR; + } + + /* Clear Command Complete interrupt */ + int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1); + sdstd_wreg16(sd, SD_IntrStatus, int_reg); + + /* Check for Errors */ + if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) { + sd_err(("%s: ErrorintrStatus: 0x%x, " + "(intrstatus = 0x%x, present state 0x%x) clearing\n", + __FUNCTION__, plain_intstatus, + sdstd_rreg16(sd, SD_IntrStatus), + sdstd_rreg(sd, SD_PresentState))); + + sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus); + + sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1)); + retries = RETRIES_LARGE; + do { + sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__)); + } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset), + SW_RESET_DAT)) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__)); + } + + if (trap_errs) + ASSERT(0); + + /* ABORT is dataless, only cmd errs count */ + /* XXX But what about busy timeout? Response valid? */ + if (plain_intstatus & ERRINT_CMD_ERRS) + err = BCME_SDIO_ERROR; + } + + /* If command failed don't bother looking at response */ + if (err) + goto done; + + /* Otherwise, check the response */ + sdstd_cmd_getrsp(sd, &rsp5, 1); + rflags = GFIELD(rsp5, RSP5_FLAGS); + + if (rflags & SD_RSP_R5_ERRBITS) { + sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags)); + + /* The CRC error flag applies to the previous command */ + if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) { + err = BCME_SDIO_ERROR; + goto done; + } + } + + if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) && + ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) { + sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags)); + err = BCME_SDIO_ERROR; + goto done; + } + + if (GFIELD(rsp5, RSP5_STUFF)) { + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + err = BCME_SDIO_ERROR; + goto done; + } + +done: + if (err == BCME_NODEVICE) + return err; + + /* XXX As per spec 3.7.1 (and to be safe) do the resets here */ + sdstd_wreg8(sd, SD_SoftwareReset, + SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1)); + + retries = RETRIES_LARGE; + do { + rflags = sdstd_rreg8(sd, SD_SoftwareReset); + if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD)) + break; + } while (--retries); + + if (!retries) { + sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n", + __FUNCTION__, rflags)); + err = BCME_SDIO_ERROR; + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint fnum) +{ + int ret; + + sdstd_lock(sd); + ret = sdstd_abort(sd, fnum); + sdstd_unlock(sd); + + return ret; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + sdstd_waitlockfree(sd); + return SUCCESS; +} + +static int +sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg) +{ + uint16 regval; + uint retries; + uint function = 0; + + /* If no errors, we're done */ + if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0) + return SUCCESS; + +#ifdef BCMQT + if (regval == 0xFFFF) { + /* XXX - Getting bogus errors under QT + * XXX - Not sure why; Just ignore for now + */ + sd_err(("%s: Bogus SD_ErrorIntrStatus: 0x%x????\n", __FUNCTION__, regval)); + sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval); + return SUCCESS; + } +#endif + + sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n", + __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus), + sdstd_rreg(sdioh_info, SD_PresentState))); + sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval); + + if (cmd == SDIOH_CMD_14) { + if (regval & ERRINT_CMD_TIMEOUT_BIT) { + /* PR 101351: sdiod_aos sleep followed by immediate wakeup + * before sdiod_aos takes over has a problem. + * Getting command timeouts while exiting sleep + * with CMD14. Ignore this error due to this PR. + */ + regval &= ~ERRINT_CMD_TIMEOUT_BIT; + } + } + + /* On command error, issue CMD reset */ + if (regval & ERRINT_CMD_ERRS) { + sd_trace(("%s: issuing CMD reset\n", __FUNCTION__)); + sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); + for (retries = RETRIES_LARGE; retries; retries--) + if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD))) + break; + if (!retries) { + sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); + } + } + + /* On data error, issue DAT reset */ + if (regval & ERRINT_DATA_ERRS) { + if (regval & ERRINT_ADMA_BIT) + sd_err(("%s:ADMAError: status:0x%x\n", + __FUNCTION__, sdstd_rreg(sdioh_info, SD_ADMA_ErrStatus))); + sd_trace(("%s: issuing DAT reset\n", __FUNCTION__)); + sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1)); + for (retries = RETRIES_LARGE; retries; retries--) + if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT))) + break; + if (!retries) { + sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__)); + } + } + + /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */ + if (cmd == SDIOH_CMD_53) + function = GFIELD(arg, CMD53_FUNCTION); + else if (cmd == SDIOH_CMD_52) { + /* PR 101351: sdiod_aos sleep followed by immediate wakeup + * before sdiod_aos takes over has a problem. + */ + if (GFIELD(arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) + function = GFIELD(arg, CMD52_FUNCTION); + } + if (function) { + sd_trace(("%s: requesting abort for function %d after cmd %d\n", + __FUNCTION__, function, cmd)); + sdstd_abort(sdioh_info, function); + } + + if (trap_errs) + ASSERT(0); + + return ERROR; +} + +#ifdef BCMINTERNAL +extern SDIOH_API_RC +sdioh_test_diag(sdioh_info_t *sd) +{ + sd_err(("%s: Implement me\n", __FUNCTION__)); + return (0); +} +#endif /* BCMINTERNAL */ + +/* + * Private/Static work routines + */ +static bool +sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset) +{ + int retries = RETRIES_LARGE; + uchar regval; + + if (!sd) + return TRUE; + + sdstd_lock(sd); + /* Reset client card */ + if (client_reset && (sd->adapter_slot != -1)) { + if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS) + sd_err(("%s: Cannot write to card reg 0x%x\n", + __FUNCTION__, SDIOD_CCCR_IOABORT)); + else + sd->card_rca = 0; + } + + /* Reset host controller */ + if (host_reset) { + regval = SFIELD(0, SW_RESET_ALL, 1); + sdstd_wreg8(sd, SD_SoftwareReset, regval); + do { + sd_trace(("%s: waiting for reset\n", __FUNCTION__)); + } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__)); + sdstd_unlock(sd); + return (FALSE); + } + + /* A reset should reset bus back to 1 bit mode */ + sd->sd_mode = SDIOH_MODE_SD1; + sdstd_set_dma_mode(sd, sd->sd_dma_mode); + } + sdstd_unlock(sd); + return TRUE; +} + +/* Disable device interrupt */ +void +sdstd_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + if (sd->use_client_ints) { + sd->intmask &= ~CLIENT_INTR; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ + } +} + +/* Enable device interrupt */ +void +sdstd_devintr_on(sdioh_info_t *sd) +{ + ASSERT(sd->lockcount == 0); + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + if (sd->use_client_ints) { + if (sd->version < HOST_CONTR_VER_3) { + uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0)); + sdstd_wreg16(sd, SD_IntrStatusEnable, status); + } + + sd->intmask |= CLIENT_INTR; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ + } +} + +#ifdef BCMSDYIELD +/* Enable/disable other interrupts */ +void +sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err) +{ + if (err) { + norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); + sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err); + } + + sd->intmask |= norm; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + if (sd_forcerb) + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ +} + +void +sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err) +{ + if (err) { + norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); + sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0); + } + + sd->intmask &= ~norm; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + if (sd_forcerb) + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ +} +#endif /* BCMSDYIELD */ + +static int +sdstd_host_init(sdioh_info_t *sd) +{ + int num_slots, full_slot; + uint8 reg8; + uint32 card_ins; + int slot, first_bar = 0; + bool detect_slots = FALSE; +#ifdef _WIN32 + NDIS_PHYSICAL_ADDRESS bar; +#else + uint bar; +#endif + + /* Check for Arasan ID */ + if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) { + sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_ARASAN_HDK; + detect_slots = TRUE; + /* Controller supports SDMA, so turn it on here. */ + sd->sd_dma_mode = DMA_MODE_SDMA; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) { + sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_BCM27XX; + detect_slots = FALSE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) { + sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_TI_PCIXX21; + detect_slots = TRUE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) { + sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n", + __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_RICOH_R5C822; + detect_slots = TRUE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) { + sd_info(("%s: JMicron Standard SDIO Host Controller\n", + __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_JMICRON; + detect_slots = TRUE; +#ifdef BCMINTERNAL + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JINVANI) { + sd_info(("%s: Found Jinvani Standard SDIO Host Controller\n", __FUNCTION__)); + detect_slots = FALSE; + sd->controller_type = SDIOH_TYPE_JINVANI_GOLD; +#endif /* BCMINTERNAL */ + } else { + return ERROR; + } + + /* + * Determine num of slots + * Search each slot + */ + + first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7; + num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4; + num_slots &= 7; + num_slots++; /* map bits to num slots according to spec */ + + /* XXX Since the sdio20h core does not present the proper SD_SlotInfo + * register at PCI config space offset 0x40, fake it here. Also, + * set the BAR0 window to point to the sdio20h core. + */ + if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) == + ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) { + sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__)); + /* Set BAR0 Window to SDIOSTH core */ + OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000); + + /* Set defaults particular to this controller. */ + detect_slots = TRUE; + num_slots = 1; + first_bar = 0; + + /* Controller supports ADMA2, so turn it on here. */ + sd->sd_dma_mode = DMA_MODE_ADMA2; + } + + /* Map in each slot on the board and query it to see if a + * card is inserted. Use the first populated slot found. + */ + if (sd->mem_space) { + sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + + full_slot = -1; + + for (slot = 0; slot < num_slots; slot++) { +/* XXX :Ugly define, is there a better way */ +#ifdef _WIN32 + bar.HighPart = 0; + bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + + (4*(slot + first_bar)), 4); + sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, + (int32)&bar, SDIOH_REG_WINSZ); +#else + bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4); + sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, + (uintptr)bar, SDIOH_REG_WINSZ); +#endif + + sd->adapter_slot = -1; + + if (detect_slots) { + card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT); + } else { + card_ins = TRUE; + } + + if (card_ins) { + sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot)); + if (full_slot < 0) + full_slot = slot; + } else { + sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot)); + } + + if (sd->mem_space) { + sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + } + + if (full_slot < 0) { + sd_err(("No slots on SDIO controller are populated\n")); + return -1; + } + +/* XXX :Ugly define, is there a better way */ +#ifdef _WIN32 + bar.HighPart = 0; + bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4); + sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (int32)&bar, SDIOH_REG_WINSZ); +#else + bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4); + sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ); +#endif + + sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n", + full_slot, + (full_slot + first_bar), + OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4), + sd->mem_space)); + + sd->adapter_slot = full_slot; + + sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF; + switch (sd->version) { + case 0: + sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n", + sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); + break; + case 1: + sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n", + sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); + break; + case 2: + sd_err(("Host Controller version 3.0, Vendor Revision: 0x%02x\n", + sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); + break; + default: + sd_err(("%s: Host Controller version 0x%02x not supported.\n", + __FUNCTION__, sd->version)); + break; + } + + sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */ + /* MSB 32 bits of caps supported in sdio 3.0 */ + sd->caps3 = sdstd_rreg(sd, SD_Capabilities3); /* Cache this for later use */ + sd3_trace(("sd3: %s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps)); + sd3_trace(("sd3: %s: caps3: 0x%x\n", __FUNCTION__, sd->caps3)); + sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap); + + sd_info(("%s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps)); + + sdstd_set_dma_mode(sd, sd->sd_dma_mode); + +#if defined(BCMINTERNAL) + if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) == + ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) { + sd_err(("* * * SDIO20H FPGA Build Date: 0x%04x\n", sdstd_rreg(sd, 0x110))); + } + + if (GFIELD(sd->caps, CAP_MAXBLOCK) == 0x3) { + sd_info(("SD HOST CAPS: Max block size is INVALID\n")); + } else { + sd_info(("SD HOST CAPS: Max block size is %d bytes\n", + 512 << GFIELD(sd->caps, CAP_MAXBLOCK))); + } + + sd_info(("SD HOST CAPS: 64-bit DMA is %ssupported.\n", + GFIELD(sd->caps, CAP_64BIT_HOST) ? "" : "not ")); + sd_info(("SD HOST CAPS: Suspend/Resume is %ssupported.\n", + GFIELD(sd->caps, CAP_SUSPEND) ? "" : "not ")); + + sd_err(("SD HOST CAPS: SD Host supports ")); + if (GFIELD(sd->caps, CAP_VOLT_3_3)) { + sd_err(("3.3V")); + if (GFIELD(sd->curr_caps, CAP_CURR_3_3)) { + sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_3))); + } + } + if (GFIELD(sd->caps, CAP_VOLT_3_0)) { + sd_err((", 3.0V")); + if (GFIELD(sd->curr_caps, CAP_CURR_3_0)) { + sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_0))); + } + } + if (GFIELD(sd->caps, CAP_VOLT_1_8)) { + sd_err((", 1.8V")); + if (GFIELD(sd->curr_caps, CAP_CURR_1_8)) { + sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_1_8))); + } + } + sd_err(("\n")); +#endif /* defined(BCMINTERNAL) */ + + sdstd_reset(sd, 1, 0); + + /* Read SD4/SD1 mode */ + if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) { + if (reg8 & SD4_MODE) { + sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n", + __FUNCTION__, reg8)); + } + } + + /* Default power on mode is SD1 */ + sd->sd_mode = SDIOH_MODE_SD1; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = full_slot; + + /* XXX: If sd_uhsimode is disabled, which means that, use the HC in SDIO 2.0 mode. */ + if (sd_uhsimode == SD3CLKMODE_DISABLED) { + sd->version = HOST_CONTR_VER_2; + sd3_trace(("%s:forcing to SDIO HC 2.0\n", __FUNCTION__)); + } + + if (sd->version == HOST_CONTR_VER_3) { + /* read host ctrl 2 */ + uint16 reg16 = 0; + sd3_trace(("sd3: %s: HC3: reading additional regs\n", __FUNCTION__)); + + reg16 = sdstd_rreg16(sd, SD3_HostCntrl2); + + sd_info(("%s: HCtrl: 0x%x; HCtrl2:0x%x\n", __FUNCTION__, reg8, reg16)); + BCM_REFERENCE(reg16); + + /* if HC supports 1.8V and one of the SDR/DDR modes, hc uhci support is PRESENT */ + if ((GFIELD(sd->caps, CAP_VOLT_1_8)) && + (GFIELD(sd->caps3, CAP3_SDR50_SUP) || + GFIELD(sd->caps3, CAP3_SDR104_SUP) || + GFIELD(sd->caps3, CAP3_DDR50_SUP))) + sd->host_UHSISupported = 1; + } + +#ifdef BCMQT + { + uint32 intmask; + + /* FIX: force interrupts with QT sdio20 host */ + /* pci cw [expr $def(configbase) +0x95] 1 2 */ + intmask = OSL_PCI_READ_CONFIG(sd->osh, PCI_INT_MASK, 4); + intmask |= 0x0200; + OSL_PCI_WRITE_CONFIG(sd->osh, PCI_INT_MASK, 4, intmask); + } +#endif + return (SUCCESS); +} +#define CMD5_RETRIES 200 +static int +get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp) +{ + int retries, status; + + /* Get the Card's Operation Condition. Occasionally the board + * takes a while to become ready + */ + retries = CMD5_RETRIES; + do { + *cmd_rsp = 0; + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg)) + != SUCCESS) { + sd_err(("%s: CMD5 failed\n", __FUNCTION__)); + return status; + } + sdstd_cmd_getrsp(sd, cmd_rsp, 1); + if (!GFIELD(*cmd_rsp, RSP4_CARD_READY)) + sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__)); + } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries); + if (!retries) + return ERROR; + + return (SUCCESS); +} + +static int +sdstd_client_init(sdioh_info_t *sd) +{ + uint32 cmd_arg, cmd_rsp; + int status; + uint8 fn_ints; + uint32 regdata; + uint16 powerstat = 0; + +#ifdef BCMINTERNAL +#ifdef NOTUSED + /* Handy routine to dump capabilities. */ + static char caps_buf[500]; + parse_caps(sd->caps, caps_buf, 500); + sd_err((caps_buf)); +#endif /* NOTUSED */ +#endif /* BCMINTERNAL */ + + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + + /* Clear any pending ints */ + sdstd_wreg16(sd, SD_IntrStatus, 0x1fff); + sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff); + + /* Enable both Normal and Error Status. This does not enable + * interrupts, it only enables the status bits to + * become 'live' + */ + + if (!sd->host_UHSISupported) + sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff); + else + { + /* INT_x interrupts, but DO NOT enable signalling [enable retuning + * will happen later] + */ + sdstd_wreg16(sd, SD_IntrStatusEnable, 0x0fff); + } + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff); + + sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */ + + if (sd->host_UHSISupported) { + /* when HC is started for SDIO 3.0 mode, start in lowest voltage mode first. */ + powerstat = sdstd_start_power(sd, 1); + if (SDIO_OCR_READ_FAIL == powerstat) { + /* This could be because the device is 3.3V, and possible does + * not have sdio3.0 support. So, try in highest voltage + */ + sd_err(("sdstd_start_power: legacy device: trying highest voltage\n")); + sd_err(("%s failed\n", __FUNCTION__)); + return ERROR; + } else if (TRUE != powerstat) { + sd_err(("sdstd_start_power failed\n")); + return ERROR; + } + } else + /* XXX legacy driver: start in highest voltage mode first. + * CAUTION: trying to start a legacy dhd with sdio3.0HC and sdio3.0 device could + * burn the sdio3.0device if the device has started in 1.8V. + */ + if (TRUE != sdstd_start_power(sd, 0)) { + sd_err(("sdstd_start_power failed\n")); + return ERROR; + } + + if (sd->num_funcs == 0) { + sd_err(("%s: No IO funcs!\n", __FUNCTION__)); + return ERROR; + } + + /* In SPI mode, issue CMD0 first */ + if (sd->sd_mode == SDIOH_MODE_SPI) { + cmd_arg = 0; + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg)) + != SUCCESS) { + sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n")); + return status; + } + } + + if (sd->sd_mode != SDIOH_MODE_SPI) { + uint16 rsp6_status; + + /* Card is operational. Ask it to send an RCA */ + cmd_arg = 0; + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg)) + != SUCCESS) { + sd_err(("%s: CMD3 failed!\n", __FUNCTION__)); + return status; + } + + /* Verify the card status returned with the cmd response */ + sdstd_cmd_getrsp(sd, &cmd_rsp, 1); + rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS); + if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) || + GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) || + GFIELD(rsp6_status, RSP6STAT_ERROR)) { + sd_err(("%s: CMD3 response error. Response = 0x%x!\n", + __FUNCTION__, rsp6_status)); + return ERROR; + } + + /* Save the Card's RCA */ + sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA); + sd_info(("RCA is 0x%x\n", sd->card_rca)); + + if (rsp6_status) + sd_err(("raw status is 0x%x\n", rsp6_status)); + + /* Select the card */ + cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca); + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) + != SUCCESS) { + sd_err(("%s: CMD7 failed!\n", __FUNCTION__)); + return status; + } + sdstd_cmd_getrsp(sd, &cmd_rsp, 1); + if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) { + sd_err(("%s: CMD7 response error. Response = 0x%x!\n", + __FUNCTION__, cmd_rsp)); + return ERROR; + } + } + + /* Disable default/power-up device Card Detect (CD) pull up resistor on DAT3 + * via CCCR bus interface control register. Set CD disable bit while leaving + * others alone. + */ + if (sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data) != SUCCESS) { + sd_err(("Disabling card detect: read of device CCCR BICTRL register failed\n")); + return ERROR; + } + regdata |= BUS_CARD_DETECT_DIS; + + if (sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata) != SUCCESS) { + sd_err(("Disabling card detect: write of device CCCR BICTRL register failed\n")); + return ERROR; + } + + sdstd_card_enablefuncs(sd); + + if (!sdstd_bus_width(sd, sd_sdmode)) { + sd_err(("sdstd_bus_width failed\n")); + return ERROR; + } + + set_client_block_size(sd, 1, sd_f1_blocksize); + fn_ints = INTR_CTL_FUNC1_EN; + + if (sd->num_funcs >= 2) { + /* XXX Device side can't handle 512 yet */ + set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */); + fn_ints |= INTR_CTL_FUNC2_EN; + } + + /* Enable/Disable Client interrupts */ + /* Turn on here but disable at host controller? */ + if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1, + (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) { + sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__)); + return ERROR; + } + + if (sd_uhsimode != SD3CLKMODE_DISABLED) { + /* Switch to High-speed clocking mode if both host and device support it */ + if (sdstd_3_clock_wrapper(sd) != SUCCESS) { + sd_err(("sdstd_3_clock_wrapper failed\n")); + return ERROR; + } + } else + { + if (sdstd_clock_wrapper(sd)) { + sd_err(("sdstd_start_clock failed\n")); + return ERROR; + } + } + sd->card_init_done = TRUE; + + return SUCCESS; +} + +static int +sdstd_clock_wrapper(sdioh_info_t *sd) +{ + sd_trace(("%s:Enter\n", __FUNCTION__)); + /* After configuring for High-Speed mode, set the desired clock rate. */ + sdstd_set_highspeed_mode(sd, (bool)sd_hiok); + + if (FALSE == sdstd_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("sdstd_start_clock failed\n")); + return ERROR; + } + return SUCCESS; +} + +static int +sdstd_3_clock_wrapper(sdioh_info_t *sd) +{ + int retclk = 0; + sd_info(("%s: Enter\n", __FUNCTION__)); + if (sd->card_UHSI_voltage_Supported) { + /* check if clk config requested is supported by both host and target. */ + retclk = sdstd_3_get_matching_uhsi_clkmode(sd, sd_uhsimode); + + /* if no match for requested caps, try to get the max match possible */ + if (retclk == -1) { + /* if auto enabled */ + if (sd3_autoselect_uhsi_max == 1) { + retclk = sdstd_3_get_matching_uhsi_clkmode(sd, SD3CLKMODE_AUTO); + /* still NO match */ + if (retclk == -1) { + /* NO match with HC and card capabilities. Now try the + * High speed/legacy mode if possible. + */ + + sd_err(("%s: Not able to set requested clock\n", + __FUNCTION__)); + return ERROR; + } + } else { + /* means user doesn't want auto clock. So return ERROR */ + sd_err(("%s: Not able to set requested clock, Try" + "auto mode\n", __FUNCTION__)); + return ERROR; + } + } + + if (retclk != -1) { + /* set the current clk to be selected clock */ + sd_uhsimode = retclk; + + if (BCME_OK != sdstd_3_set_highspeed_uhsi_mode(sd, sd_uhsimode)) { + sd_err(("%s: Not able to set requested clock\n", __FUNCTION__)); + return ERROR; + } + } else { + /* try legacy mode */ + if (SUCCESS != sdstd_clock_wrapper(sd)) { + sd_err(("sdstd_start_clock failed\n")); + return ERROR; + } + } + } else { + sd_info(("%s: Legacy Mode Clock\n", __FUNCTION__)); + /* try legacy mode */ + if (SUCCESS != sdstd_clock_wrapper(sd)) { + sd_err(("%s sdstd_clock_wrapper failed\n", __FUNCTION__)); + return ERROR; + } + } + return SUCCESS; +} + +int +sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode) +{ + int status, lcount = 0, brr_count = 0; + uint16 val1 = 0, bufready = 0; + uint32 val2 = 0; + uint8 phase_info_local = 0; + + sd3_trace(("sd3: %s: Enter\n", __FUNCTION__)); + /* if (NOT SDR104) OR + * (SDR_50 AND sdr50_tuning_reqd is NOT enabled) + * return success, as tuning not reqd. + */ + if (!sd->sd3_tuning_reqd) { + sd_info(("%s: Tuning NOT reqd!\n", __FUNCTION__)); + return SUCCESS; + } + + /* execute tuning procedure */ + + /* enable Buffer ready status. [donot enable the interrupt right now] */ + /* Execute tuning */ + sd_trace(("sd3: %s: Execute tuning\n", __FUNCTION__)); + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 1); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + + do { + sd3_trace(("sd3: %s: cmd19 issue\n", __FUNCTION__)); + /* Issue cmd19 */ + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_19, 0)) + != SUCCESS) { + sd_err(("%s: CMD19 failed\n", __FUNCTION__)); + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0); + val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + return status; + } + + /* wait for buffer read ready */ + brr_count = 0; + do { + bufready = sdstd_rreg16(sd, SD_IntrStatus); + + if (GFIELD(bufready, INTSTAT_BUF_READ_READY)) + break; + + /* delay after checking bufready becuase INTSTAT_BUF_READ_READY + might have been most likely set already in the first check + */ + OSL_DELAY(1); + } while (++brr_count < CLKTUNING_MAX_BRR_RETRIES); + + /* buffer read ready timedout */ + if (brr_count == CLKTUNING_MAX_BRR_RETRIES) { + sd_err(("%s: TUNINGFAILED: BRR response timedout!\n", + __FUNCTION__)); + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0); + val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + return ERROR; + } + + /* In response to CMD19 card will send 64 magic bytes. + * Current Aizyc HC h/w doesn't auto clear those bytes. + * So read 64 bytes send by card. + * Aizyc need to implement in hw to do an auto clear. + */ + if (sd3_sw_read_magic_bytes == TRUE) + { + uint8 l_cnt_1 = 0; + uint32 l_val_1 = 0; + for (l_cnt_1 = 0; l_cnt_1 < 16; l_cnt_1++) { + l_val_1 = sdstd_rreg(sd, SD_BufferDataPort0); + sd_trace(("%s:l_val_1 = 0x%x", __FUNCTION__, l_val_1)); + } + BCM_REFERENCE(l_val_1); + } + + /* clear BuffReadReady int */ + bufready = SFIELD(bufready, INTSTAT_BUF_READ_READY, 1); + sdstd_wreg16(sd, SD_IntrStatus, bufready); + + /* wait before continuing */ + /* OSL_DELAY(PER_TRY_TUNING_DELAY_MS * 1000); */ /* Not required */ + + /* check execute tuning bit */ + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + if (!GFIELD(val1, HOSTCtrl2_EXEC_TUNING)) { + /* done tuning, break from loop */ + break; + } + + /* max tuning iterations exceeded */ + if (lcount++ > MAX_TUNING_ITERS) { + sd_err(("%s: TUNINGFAILED: Max tuning iterations" + "exceeded!\n", __FUNCTION__)); + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0); + val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + return ERROR; + } + } while (1); + + val2 = sdstd_rreg(sd, SD3_Tuning_Info_Register); + phase_info_local = ((val2>>15)& 0x7); + sd_info(("Phase passed info: 0x%x\n", (val2>>8)& 0x3F)); + sd_info(("Phase selected post tune: 0x%x\n", phase_info_local)); + + if (phase_info_local > SDSTD_MAX_TUNING_PHASE) { + sd_err(("!!Phase selected:%x\n", phase_info_local)); + } + + /* check sampling clk select */ + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + if (!GFIELD(val1, HOSTCtrl2_SAMPCLK_SEL)) { + /* error in selecting clk */ + sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__)); + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0); + val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + return ERROR; + } +/* done: */ + sd_info(("%s: TUNING Success!\n", __FUNCTION__)); + return SUCCESS; +} + +void +sdstd_3_enable_retuning_int(sdioh_info_t *sd) +{ + uint16 raw_int; + unsigned long flags; + + sdstd_os_lock_irqsave(sd, &flags); + raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable); + sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int | HC_INTR_RETUNING)); + /* Enable retuning status */ + raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int | HC_INTR_RETUNING)); + sdstd_os_unlock_irqrestore(sd, &flags); +} + +void +sdstd_3_disable_retuning_int(sdioh_info_t *sd) +{ + uint16 raw_int; + unsigned long flags; + + sdstd_os_lock_irqsave(sd, &flags); + sd->intmask &= ~HC_INTR_RETUNING; + raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable); + sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int & (~HC_INTR_RETUNING))); + /* Disable retuning status */ + raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int & (~HC_INTR_RETUNING))); + sdstd_os_unlock_irqrestore(sd, &flags); +} + +bool +sdstd_3_is_retuning_int_set(sdioh_info_t *sd) +{ + uint16 raw_int; + + raw_int = sdstd_rreg16(sd, SD_IntrStatus); + + if (GFIELD(raw_int, INTSTAT_RETUNING_INT)) + return TRUE; + + return FALSE; +} + +/* + Assumption: sd3ClkMode is checked to be present in both host/card + capabilities before entering this function. VALID values for sd3ClkMode + in this function: SD3CLKMODE_2, 3, 4 [0 and 1 NOT supported as + they are legacy] For that, need to call + sdstd_3_get_matching_uhsi_clkmode() +*/ +static int +sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode) +{ + uint32 drvstrn; + int status; + uint8 hc_reg8; + uint16 val1 = 0, presetval = 0; + uint32 regdata; + + sd3_trace(("sd3: %s:enter:clkmode:%d\n", __FUNCTION__, sd3ClkMode)); + + hc_reg8 = sdstd_rreg8(sd, SD_HostCntrl); + + if (HOST_SDR_UNSUPP == sd->global_UHSI_Supp) { + sd_err(("%s:Trying to set clk with unsupported global support\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* get [double check, as this is already done in + sdstd_3_get_matching_uhsi_clkmode] drvstrn + */ + if (!sdstd_3_get_matching_drvstrn(sd, sd3ClkMode, &drvstrn, &presetval)) { + sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset" + "val:0x%x\n", __FUNCTION__, drvstrn, presetval)); + return BCME_SDIO_ERROR; + } + + /* also set driver type select in CCCR */ + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH, + 1, drvstrn)) != BCME_OK) { + sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in card Failed!\n", __FUNCTION__)); + return BCME_SDIO_ERROR; + } + + /* ********** change Bus speed select in device */ + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != SUCCESS) { + sd_err(("%s:FAILED 1\n", __FUNCTION__)); + return BCME_SDIO_ERROR; + } + sd_info(("Attempting to change BSS.current val:0x%x\n", regdata)); + + if (regdata & SDIO_SPEED_SHS) { + sd_info(("Device supports High-Speed mode.\n")); + /* clear existing BSS */ + regdata &= ~0xE; + + regdata |= (sd3ClkMode << 1); + + sd_info(("Writing %08x to Card at %08x\n", + regdata, SDIOD_CCCR_SPEED_CONTROL)); + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + sd_err(("%s:FAILED 2\n", __FUNCTION__)); + return BCME_SDIO_ERROR; + } + + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != BCME_OK) { + sd_err(("%s:FAILED 3\n", __FUNCTION__)); + return BCME_SDIO_ERROR; + } + + sd_info(("Read %08x from Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL)); + } + else { + sd_err(("Device does not support High-Speed Mode.\n")); + } + + /* SD Clock Enable = 0 */ + sdstd_wreg16(sd, SD_ClockCntrl, + sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); + + /* set to HighSpeed mode */ + /* TBD: is these to change SD_HostCntrl reqd for UHSI? */ + hc_reg8 = SFIELD(hc_reg8, HOST_HI_SPEED_EN, 1); + sdstd_wreg8(sd, SD_HostCntrl, hc_reg8); + + /* set UHS Mode select in HC2 and also set preset */ + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_UHSMODE_SEL, sd3ClkMode); + if (TRUE != sd3_sw_override1) { + val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 1); + } else { + /* set hC registers manually using the retreived values */ + /* *set drvstrn */ + val1 = SFIELD(val1, HOSTCtrl2_DRIVSTRENGTH_SEL, + GFIELD(presetval, PRESET_DRIVR_SELECT)); + val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 0); + } + + /* finally write Hcontrol2 */ + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + + sd_err(("%s:HostCtrl2 final value:0x%x\n", __FUNCTION__, val1)); + + /* start clock : clk will be enabled inside. */ + if (FALSE == sdstd_start_clock(sd, GFIELD(presetval, PRESET_CLK_DIV))) { + sd_err(("sdstd_start_clock failed\n")); + return ERROR; + } + + /* execute first tuning procedure */ + if (!sd3_sw_override1) { + if (SD3_TUNING_REQD(sd, sd3ClkMode)) { + sd_err(("%s: Tuning start..\n", __FUNCTION__)); + sd->sd3_tuning_reqd = TRUE; + /* TBD: first time: enabling INT's could be problem? */ + sdstd_3_start_tuning(sd); + } + else + sd->sd3_tuning_reqd = FALSE; + } + + return BCME_OK; +} + +/* Check & do tuning if required */ +void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param) +{ + int retries = 0; + + if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) { + sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__)); + if (tuning_param == CHECK_TUNING_PRE_DATA) { + if (sd->sd3_tun_state == TUNING_ONGOING) { + retries = RETRIES_SMALL; + /* check if tuning is already going on */ + while ((GFIELD(sdstd_rreg(sd, SD3_HostCntrl2), + HOSTCtrl2_EXEC_TUNING)) && retries--) { + if (retries == RETRIES_SMALL) + sd_err(("%s: Waiting for Tuning to complete\n", + __FUNCTION__)); + } + + if (!retries) { + sd_err(("%s: Tuning wait timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + } + } else if (sd->sd3_tun_state == TUNING_START) { + /* check and start tuning if required. */ + sd3_trace(("sd3 : %s : Doing Tuning before Data Transfer\n", + __FUNCTION__)); + sdstd_3_start_tuning(sd); + } + } else if (tuning_param == CHECK_TUNING_POST_DATA) { + if (sd->sd3_tun_state == TUNING_START_AFTER_DAT) { + sd3_trace(("sd3: %s: tuning start\n", __FUNCTION__)); + /* check and start tuning if required. */ + sdstd_3_start_tuning(sd); + } + } + } +} +/* Need to run this function in interrupt-disabled context */ +bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd) +{ + sd3_trace(("sd3: %s:\n", __FUNCTION__)); + + /* if already initiated, just return without anything */ + if ((sd->sd3_tun_state == TUNING_START) || + (sd->sd3_tun_state == TUNING_ONGOING) || + (sd->sd3_tun_state == TUNING_START_AFTER_DAT)) { + /* do nothing */ + return FALSE; + } + + if (sd->sd3_dat_state == DATA_TRANSFER_IDLE) { + sd->sd3_tun_state = TUNING_START; /* tuning to be started by the tasklet */ + return TRUE; + } else { + /* tuning to be started after finishing the existing data transfer */ + sd->sd3_tun_state = TUNING_START_AFTER_DAT; + } + return FALSE; +} + +int sdstd_3_get_data_state(sdioh_info_t *sd) +{ + return sd->sd3_dat_state; +} + +void sdstd_3_set_data_state(sdioh_info_t *sd, int state) +{ + sd->sd3_dat_state = state; +} + +int sdstd_3_get_tune_state(sdioh_info_t *sd) +{ + return sd->sd3_tun_state; +} + +void sdstd_3_set_tune_state(sdioh_info_t *sd, int state) +{ + sd->sd3_tun_state = state; +} + +uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd) +{ + if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) { + return GFIELD(sd->caps3, CAP3_RETUNING_TC); + } else { + return (uint8)sd_tuning_period; + } +} + +uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd) +{ + return sd_uhsimode; +} + +/* check, to see if the card supports driver_type corr to the driver_type + in preset value, which will be selected by requested UHSI mode + input: + clk mode: valid values: SD3CLKMODE_2_SDR50, SD3CLKMODE_3_SDR104, + SD3CLKMODE_4_DDR50, SD3CLKMODE_AUTO + outputs: + return_val: TRUE; if a matching drvstrn for the given clkmode is + found in both HC and card. otherwise, FALSE. + [other outputs below valid ONLY if return_val is TRUE] + drvstrn : driver strength read from CCCR. + presetval: value of preset reg, corr to the clkmode. + */ +static bool +sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, int sd3_requested_clkmode, + uint32 *drvstrn, uint16 *presetval) +{ + int status; + uint8 presetreg; + uint8 cccr_reqd_dtype_mask = 1; + + sd3_trace(("sd3: %s:\n", __FUNCTION__)); + + if (sd3_requested_clkmode != SD3CLKMODE_AUTO) { + /* CARD: get the card driver strength from cccr */ + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH, + 1, drvstrn)) != BCME_OK) { + sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card" + "Failed!\n", __FUNCTION__)); + return FALSE; + } + if (TRUE != sd3_sw_override1) { + /* HOSTC: get the addr of preset register indexed by the clkmode */ + presetreg = SD3_PresetValStart + + (2*sd3_requested_clkmode + 6); + *presetval = sdstd_rreg16(sd, presetreg); + } else { + /* Note: +3 for mapping between SD3CLKMODE_xxx and presetval_sw_table */ + *presetval = presetval_sw_table[sd3_requested_clkmode + 3]; + } + sd_err(("%s:reqCLK: %d, presetval: 0x%x\n", + __FUNCTION__, sd3_requested_clkmode, *presetval)); + + cccr_reqd_dtype_mask <<= GFIELD(*presetval, PRESET_DRIVR_SELECT); + + /* compare/match */ + if (!(cccr_reqd_dtype_mask & GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))) { + sd_err(("%s:cccr_reqd_dtype_mask and SDIO_BUS_DRVR_TYPE_CAP" + "not matching!:reqd:0x%x, cap:0x%x\n", __FUNCTION__, + cccr_reqd_dtype_mask, GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))); + return FALSE; + } else { + /* check if drive strength override is required. If so, first setit */ + if (*dhd_sdiod_uhsi_ds_override != DRVSTRN_IGNORE_CHAR) { + int ds_offset = 0; + uint32 temp = 0; + + /* drvstrn to reflect the preset val: this is default */ + *drvstrn = GFIELD(*presetval, PRESET_DRIVR_SELECT); + + /* now check override */ + ds_offset = (((int)DRVSTRN_MAX_CHAR - + (int)(*dhd_sdiod_uhsi_ds_override))); + if ((ds_offset >= 0) && (ds_offset <= MAX_DTS_INDEX)) { + ds_offset = MAX_DTS_INDEX - ds_offset; + sd_err(("%s:Drive strength override: %c, offset: " + "%d, val: %d\n", __FUNCTION__, + *dhd_sdiod_uhsi_ds_override, + ds_offset, DTS_vals[ds_offset])); + temp = SFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_SEL, + DTS_vals[ds_offset]); + sd_err(("%s:DrvStrn orig: 0x%x, modif: 0x%x\n", + __FUNCTION__, *drvstrn, temp)); + *drvstrn = temp; + } else { + /* else case is default: use preset val */ + sd_err(("%s:override invalid: DrvStrn is from " + "preset: 0x%x\n", + __FUNCTION__, *drvstrn)); + } + } else { + sd_err(("%s:DrvStrn is from preset: 0x%x\n", + __FUNCTION__, *drvstrn)); + } + } + } else { + /* TBD check for sd3_requested_clkmode : -1 also. */ + sd_err(("%s: Automode not supported!\n", __FUNCTION__)); + return FALSE; + } + return TRUE; +} + +/* Returns a matching UHSI clk speed is found. If not, returns -1. + Also, if sd3_requested_clkmode is -1, finds the closest max match clk and returns. + */ +static int +sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, int sd3_requested_clkmode) +{ + uint32 card_val_uhsisupp; + uint8 speedmask = 1; + uint32 drvstrn; + uint16 presetval; + int status; + + sd3_trace(("sd3: %s:\n", __FUNCTION__)); + + sd->global_UHSI_Supp = HOST_SDR_UNSUPP; + + /* for legacy/25MHz/50MHz bus speeds, no checks done here */ + if ((sd3_requested_clkmode == SD3CLKMODE_0_SDR12) || + (sd3_requested_clkmode == SD3CLKMODE_1_SDR25)) { + sd->global_UHSI_Supp = HOST_SDR_12_25; + return sd3_requested_clkmode; + } + /* get cap of card */ + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_UHSI_SUPPORT, + 1, &card_val_uhsisupp)) != BCME_OK) { + sd_err(("%s:SDIOD_CCCR_UHSI_SUPPORT query failed!\n", __FUNCTION__)); + return -1; + } + sd_info(("%s:Read %08x from Card at %08x\n", __FUNCTION__, + card_val_uhsisupp, SDIOD_CCCR_UHSI_SUPPORT)); + + if (sd3_requested_clkmode != SD3CLKMODE_AUTO) { + /* Note: it is assumed that, following are executed when (sd3ClkMode >= 2) */ + speedmask <<= (sd3_requested_clkmode - SD3CLKMODE_2_SDR50); + + /* check first about 3.0 HS CLK modes */ + if (!(GFIELD(sd->caps3, CAP3_30CLKCAP) & speedmask)) { + sd_err(("%s:HC does not support req 3.0 UHSI mode." + "requested:%d; capable:0x%x\n", __FUNCTION__, + sd3_requested_clkmode, GFIELD(sd->caps3, CAP3_30CLKCAP))); + return -1; + } + + /* check first about 3.0 CARD CLK modes */ + if (!(GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP) & speedmask)) { + sd_err(("%s:Card does not support req 3.0 UHSI mode. requested:%d;" + "capable:0x%x\n", __FUNCTION__, sd3_requested_clkmode, + GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP))); + return -1; + } + + /* check, to see if the card supports driver_type corr to the + driver_type in preset value, which will be selected by + requested UHSI mode + */ + if (!sdstd_3_get_matching_drvstrn(sd, sd3_requested_clkmode, + &drvstrn, &presetval)) { + sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset" + "val:0x%x\n", __FUNCTION__, drvstrn, presetval)); + return -1; + } + /* success path. change the support variable accordingly */ + sd->global_UHSI_Supp = HOST_SDR_50_104_DDR; + return sd3_requested_clkmode; + } else { + /* auto clk selection: get the highest clock capable by both card and HC */ +/* TBD TOBE DONE */ +/* sd->global_UHSI_Supp = TRUE; on success */ + return -1; + } +} + +static int +sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd) +{ + int status; + uint32 cmd_rsp = 0, presst; + uint16 val1 = 0; + + sd3_trace(("sd3: %s:\n", __FUNCTION__)); + + /* Issue cmd11 */ + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_11, 0)) + != SUCCESS) { + sd_err(("%s: CMD11 failed\n", __FUNCTION__)); + return status; + } + + /* check response */ + sdstd_cmd_getrsp(sd, &cmd_rsp, 1); + if ( + GFIELD(cmd_rsp, RSP1_ERROR) || /* bit 19 */ + GFIELD(cmd_rsp, RSP1_ILLEGAL_CMD) || /* bit 22 */ + GFIELD(cmd_rsp, RSP1_COM_CRC_ERROR) || /* bit 23 */ + GFIELD(cmd_rsp, RSP1_CARD_LOCKED) /* bit 25 */ ) { + sd_err(("%s: FAIL:CMD11: cmd_resp:0x%x\n", __FUNCTION__, cmd_rsp)); + return ERROR; + } + + /* SD Clock Enable = 0 */ + sdstd_wreg16(sd, SD_ClockCntrl, + sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); + + /* check DAT[3..0] using Present State Reg. If not 0, error */ + presst = sdstd_rreg(sd, SD_PresentState); + if (0 != GFIELD(presst, PRES_DAT_SIGNAL)) { + sd_err(("%s: FAIL: PRESTT:0x%x\n", __FUNCTION__, presst)); + return ERROR; + } + + /* turn 1.8V sig enable in HC2 */ + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + + /* wait 5ms */ + OSL_DELAY(5000); + + /* check 1.8V sig enable in HC2. if cleared, error */ + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + + if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) { + sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1)); + return ERROR; + } + + /* SD Clock Enable = 1 */ + val1 = sdstd_rreg16(sd, SD_ClockCntrl); + sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4); + + /* wait 1ms */ + OSL_DELAY(1000); + + /* check DAT[3..0] using Present State Reg. If not 0b1111, error */ + presst = sdstd_rreg(sd, SD_PresentState); + if (0xf != GFIELD(presst, PRES_DAT_SIGNAL)) { + sd_err(("%s: FAIL: PRESTT_FINAL:0x%x\n", __FUNCTION__, presst)); + return ERROR; + } + + return (SUCCESS); +} + +static int +sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode) +{ + uint32 regdata; + int status; + uint8 reg8; + + uint32 drvstrn; + + reg8 = sdstd_rreg8(sd, SD_HostCntrl); + +#ifdef BCMINTERNAL + /* The Jinvani SD Gold Host forces the highest clock rate in high-speed mode */ + /* Only enable high-speed mode if the SD clock divisor is 1. */ + if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) { + if (sd_divisor != 1) { + HSMode = FALSE; + } + } +#endif /* BCMINTERNAL */ + + if (HSMode == TRUE) { + if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) { + sd_err(("Host Controller does not support hi-speed mode.\n")); + return BCME_ERROR; + } + + sd_info(("Attempting to enable High-Speed mode.\n")); + + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != SUCCESS) { + return BCME_SDIO_ERROR; + } + if (regdata & SDIO_SPEED_SHS) { + sd_info(("Device supports High-Speed mode.\n")); + + regdata |= SDIO_SPEED_EHS; + + sd_info(("Writing %08x to Card at %08x\n", + regdata, SDIOD_CCCR_SPEED_CONTROL)); + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + return BCME_SDIO_ERROR; + } + + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != BCME_OK) { + return BCME_SDIO_ERROR; + } + + sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL)); + + reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1); + + sd_err(("High-speed clocking mode enabled.\n")); + } + else { + sd_err(("Device does not support High-Speed Mode.\n")); + reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0); + } + } else { + /* Force off device bit */ + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != BCME_OK) { + return status; + } + if (regdata & SDIO_SPEED_EHS) { + regdata &= ~SDIO_SPEED_EHS; + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + return status; + } + } + + sd_err(("High-speed clocking mode disabled.\n")); + reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0); + } + + if ((sd->host_UHSISupported) && (sd->card_UHSI_voltage_Supported)) { + /* also set the default driver strength in the card/HC [this is reqd because, + if earlier we selected any other drv_strn, we need to reset it] + */ + /* get the card driver strength from cccr */ + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH, + 1, &drvstrn)) != BCME_OK) { + sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card" + "Failed!\n", __FUNCTION__)); + return BCME_SDIO_ERROR; + } + + /* reset card drv strn */ + drvstrn = SFIELD(drvstrn, SDIO_BUS_DRVR_TYPE_SEL, 0); + + /* set card drv strn */ + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH, + 1, drvstrn)) != BCME_OK) { + sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in" + "card Failed!\n", __FUNCTION__)); + return BCME_SDIO_ERROR; + } + } + + sdstd_wreg8(sd, SD_HostCntrl, reg8); + + return BCME_OK; +} + +/* Select DMA Mode: + * If dma_mode == DMA_MODE_AUTO, pick the "best" mode. + * Otherwise, pick the selected mode if supported. + * If not supported, use PIO mode. + */ +static int +sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode) +{ + uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE; + int8 prev_dma_mode = sd->sd_dma_mode; + + switch (prev_dma_mode) { + case DMA_MODE_AUTO: + sd_dma(("%s: Selecting best DMA mode supported by controller.\n", + __FUNCTION__)); + if (GFIELD(sd->caps, CAP_ADMA2)) { + sd->sd_dma_mode = DMA_MODE_ADMA2; + dma_sel_bits = SDIOH_ADMA2_MODE; + } else if (GFIELD(sd->caps, CAP_ADMA1)) { + sd->sd_dma_mode = DMA_MODE_ADMA1; + dma_sel_bits = SDIOH_ADMA1_MODE; + } else if (GFIELD(sd->caps, CAP_DMA)) { + sd->sd_dma_mode = DMA_MODE_SDMA; + } else { + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_NONE: + sd->sd_dma_mode = DMA_MODE_NONE; + break; + case DMA_MODE_SDMA: + if (GFIELD(sd->caps, CAP_DMA)) { + sd->sd_dma_mode = DMA_MODE_SDMA; + } else { + sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_ADMA1: + if (GFIELD(sd->caps, CAP_ADMA1)) { + sd->sd_dma_mode = DMA_MODE_ADMA1; + dma_sel_bits = SDIOH_ADMA1_MODE; + } else { + sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_ADMA2: + if (GFIELD(sd->caps, CAP_ADMA2)) { + sd->sd_dma_mode = DMA_MODE_ADMA2; + dma_sel_bits = SDIOH_ADMA2_MODE; + } else { + sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_ADMA2_64: + sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + break; + default: + sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__, + prev_dma_mode)); + sd->sd_dma_mode = DMA_MODE_NONE; + break; + } + + /* clear SysAddr, only used for SDMA */ + sdstd_wreg(sd, SD_SysAddr, 0); + + sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode])); + + reg8 = sdstd_rreg8(sd, SD_HostCntrl); + reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits); + sdstd_wreg8(sd, SD_HostCntrl, reg8); + sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8)); + + return BCME_OK; +} + +#ifdef BCMDBG +void +print_regs(sdioh_info_t *sd) +{ + uint8 reg8 = 0; + uint16 reg16 = 0; + uint32 reg32 = 0; + uint8 presetreg; + int i; + + reg8 = sdstd_rreg8(sd, SD_BlockSize); + printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_BlockCount); + printf("REGS: SD_BlockCount [006h]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_BlockSize); + printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_TransferMode); + printf("REGS: SD_TransferMode [00Ch]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_HostCntrl); + printf("REGS: SD_HostCntrl [028h]:0x%x\n", reg8); + + reg32 = sdstd_rreg(sd, SD_PresentState); + printf("REGS: SD_PresentState [024h]:0x%x\n", reg32); + + reg8 = sdstd_rreg8(sd, SD_PwrCntrl); + printf("REGS: SD_PwrCntrl [029h]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_BlockGapCntrl); + printf("REGS: SD_BlockGapCntrl [02Ah]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_WakeupCntrl); + printf("REGS: SD_WakeupCntrl [02Bh]:0x%x\n", reg8); + + reg16 = sdstd_rreg16(sd, SD_ClockCntrl); + printf("REGS: SD_ClockCntrl [02Ch]:0x%x\n", reg16); + + reg8 = sdstd_rreg8(sd, SD_TimeoutCntrl); + printf("REGS: SD_TimeoutCntrl [02Eh]:0x%x\n", reg8); + + reg8 = sdstd_rreg8(sd, SD_SoftwareReset); + printf("REGS: SD_SoftwareReset [02Fh]:0x%x\n", reg8); + + reg16 = sdstd_rreg16(sd, SD_IntrStatus); + printf("REGS: SD_IntrStatus [030h]:0x%x\n", reg16); + + reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatus); + printf("REGS: SD_ErrorIntrStatus [032h]:0x%x\n", reg16); + + reg16 = sdstd_rreg16(sd, SD_IntrStatusEnable); + printf("REGS: SD_IntrStatusEnable [034h]:0x%x\n", reg16); + + reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable); + printf("REGS: SD_ErrorIntrStatusEnable [036h]:0x%x\n", reg16); + + reg16 = sdstd_rreg16(sd, SD_IntrSignalEnable); + printf("REGS: SD_IntrSignalEnable [038h]:0x%x\n", reg16); + + reg16 = sdstd_rreg16(sd, SD_ErrorIntrSignalEnable); + printf("REGS: SD_ErrorIntrSignalEnable [03Ah]:0x%x\n", reg16); + + reg32 = sdstd_rreg(sd, SD_Capabilities); + printf("REGS: SD_Capabilities [040h]:0x%x\n", reg32); + + reg32 = sdstd_rreg(sd, SD_MaxCurCap); + printf("REGS: SD_MaxCurCap [04Ah]:0x%x\n", reg32); + + reg32 = sdstd_rreg(sd, SD_Capabilities3); + printf("REGS: SD_Capabilities3 [044h]:0x%x\n", reg32); + + reg16 = sdstd_rreg16(sd, SD3_HostCntrl2); + printf("REGS: SD3_HostCntrl2 [03Eh]:0x%x\n", reg16); + + for (i = 0; i < 8; i++) { + presetreg = SD3_PresetValStart + i*2; + printf("REGS: Presetvalreg:ix[%d]:0x%x, val=0x%x\n", i, + presetreg, sdstd_rreg16(sd, presetreg)); + } + + reg16 = sdstd_rreg16(sd, SD_SlotInterruptStatus); + printf("REGS: SD_SlotInterruptStatus [0FCh]:0x%x\n", reg16); + + reg16 = sdstd_rreg16(sd, SD_HostControllerVersion); + printf("REGS: SD_HostControllerVersion [0FEh]:0x%x\n", reg16); +} +#endif /* BCMDBG */ + +#ifdef BCMINTERNAL +#ifdef NOTUSED +static int +parse_state(uint32 state, char *buf, int len) +{ + char *data = buf; + + sd_err(("Parsing state 0x%x\n", state)); + if (!len) { + return (0); + } + + data += sprintf(data, "cmd_inhibit %d\n", GFIELD(state, PRES_CMD_INHIBIT)); + data += sprintf(data, "dat_inhibit %d\n", GFIELD(state, PRES_DAT_INHIBIT)); + data += sprintf(data, "dat_busy %d\n", GFIELD(state, PRES_DAT_BUSY)); + data += sprintf(data, "write_active %d\n", GFIELD(state, PRES_WRITE_ACTIVE)); + data += sprintf(data, "read_active %d\n", GFIELD(state, PRES_READ_ACTIVE)); + data += sprintf(data, "write_data_rdy %d\n", GFIELD(state, PRES_WRITE_DATA_RDY)); + data += sprintf(data, "read_data_rdy %d\n", GFIELD(state, PRES_READ_DATA_RDY)); + data += sprintf(data, "card_present %d\n", GFIELD(state, PRES_CARD_PRESENT)); + data += sprintf(data, "card_stable %d\n", GFIELD(state, PRES_CARD_STABLE)); + data += sprintf(data, "card_present_raw %d\n", GFIELD(state, PRES_CARD_PRESENT_RAW)); + data += sprintf(data, "write_enabled %d\n", GFIELD(state, PRES_WRITE_ENABLED)); + data += sprintf(data, "cmd_signal %d\n", GFIELD(state, PRES_CMD_SIGNAL)); + + return (data - buf); +} + +static int +parse_caps(uint32 cap, char *buf, int len) +{ + int block = 0xbeef; + char *data = buf; + + data += sprintf(data, "TimeOut Clock Freq:\t%d\n", GFIELD(cap, CAP_TO_CLKFREQ)); + data += sprintf(data, "TimeOut Clock Unit:\t%d\n", GFIELD(cap, CAP_TO_CLKUNIT)); + data += sprintf(data, "Base Clock:\t\t%d\n", GFIELD(cap, CAP_BASECLK)); + switch (GFIELD(cap, CAP_MAXBLOCK)) { + case 0: block = 512; break; + case 1: block = 1024; break; + case 2: block = 2048; break; + case 3: block = 0; break; + } + data += sprintf(data, "Max Block Size:\t\t%d\n", block); + data += sprintf(data, "Support High Speed:\t%d\n", GFIELD(cap, CAP_HIGHSPEED)); + data += sprintf(data, "Support DMA:\t\t%d\n", GFIELD(cap, CAP_DMA)); + data += sprintf(data, "Support Suspend:\t%d\n", GFIELD(cap, CAP_SUSPEND)); + data += sprintf(data, "Support 3.3 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_3)); + data += sprintf(data, "Support 3.0 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_0)); + data += sprintf(data, "Support 1.8 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_1_8)); + return (data - buf); +} +#endif /* NOTUSED */ +#endif /* BCMINTERNAL */ + +/* XXX Per SDIO Host Controller Spec section 3.2.1 + Note: for 2.x HC, new_sd_divisor should be a power of 2, but for 3.0 + HC, new_sd_divisor should be a multiple of 2. +*/ +bool +sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor) +{ + uint rc, count; + uint16 divisor; + uint16 regdata; + uint16 val1; + + sd3_trace(("%s: starting clk\n", __FUNCTION__)); + /* turn off HC clock */ + sdstd_wreg16(sd, SD_ClockCntrl, + sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */ + + /* Set divisor */ + if (sd->host_UHSISupported) { +#ifdef BCMDBG + if ((new_sd_divisor != 1) && /* 1 is a valid value */ + ((new_sd_divisor & (0x1)) || /* check for multiple of 2 */ + (new_sd_divisor == 0) || + (new_sd_divisor > 0x3ff))) { + sd_err(("3.0: Invalid clock divisor target: %d\n", new_sd_divisor)); + return FALSE; + } +#endif + divisor = (new_sd_divisor >> 1); + } else + { +#ifdef BCMDBG + if ((new_sd_divisor & (new_sd_divisor-1)) || + (new_sd_divisor == 0)) { + sd_err(("Invalid clock divisor target: %d\n", new_sd_divisor)); + return FALSE; + } +#endif + /* new logic: if divisor > 256, restrict to 256 */ + if (new_sd_divisor > 256) + new_sd_divisor = 256; + divisor = (new_sd_divisor >> 1) << 8; + } +#ifdef BCMINTERNAL + if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) { + divisor = (new_sd_divisor >> 2) << 8; + } +#endif /* BCMINTERNAL */ + + sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl))); + if (sd->host_UHSISupported) { + /* *get preset value and shift so that. + * bits 0-7 are in 15-8 and 9-8 are in 7-6 of clkctrl + */ + val1 = divisor << 2; + val1 &= 0x0ffc; + val1 |= divisor >> 8; + val1 <<= 6; + printf("divisor:%x;val1:%x\n", divisor, val1); + sdstd_mod_reg16(sd, SD_ClockCntrl, 0xffC0, val1); + } else + { + sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor); + } + + sd_err(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__, + new_sd_divisor, divisor)); + if (new_sd_divisor > 0) + sd_err(("%s:now, divided clk is: %d Hz\n", + __FUNCTION__, GFIELD(sd->caps, CAP_BASECLK)*1000000/new_sd_divisor)); + else + sd_err(("Using Primary Clock Freq of %d MHz\n", GFIELD(sd->caps, CAP_BASECLK))); + sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_BASECLK))); + if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)), + ((50 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)), + ((48 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)), + ((33 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 31) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((31 % new_sd_divisor) ? (31000 / new_sd_divisor) : (31 / new_sd_divisor)), + ((31 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 8) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((8 % new_sd_divisor) ? (8000 / new_sd_divisor) : (8 / new_sd_divisor)), + ((8 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) { + /* XXX - BCM 27XX Standard Host Controller returns 0 for CLKFREQ */ + } else { + sd_err(("Need to determine divisor for %d MHz clocks\n", + GFIELD(sd->caps, CAP_BASECLK))); + sd_err(("Consult SD Host Controller Spec: Clock Control Register\n")); + return (FALSE); + } + + sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */ + + /* Wait for clock to stabilize */ + rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2); + count = 0; + while (!rc) { + OSL_DELAY(1); + sd_info(("Waiting for clock to become stable 0x%x\n", rc)); + rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2); + count++; + if (count > 10000) { + sd_err(("%s:Clocks failed to stabilize after %u attempts\n", + __FUNCTION__, count)); + return (FALSE); + } + } + /* Turn on clock */ + sdstd_or_reg16(sd, SD_ClockCntrl, 0x4); + + OSL_DELAY(20); + + /* Set timeout control (adjust default value based on divisor). + * Disabling timeout interrupts during setting is advised by host spec. + */ +#ifdef BCMQT + if (GFIELD(sd->caps, CAP_BASECLK) < 50) +#endif + { + uint toval; + + toval = sd_toctl; + divisor = new_sd_divisor; + + while (toval && !(divisor & 1)) { + toval -= 1; + divisor >>= 1; + } + + regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable); + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT)); + sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval); + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata); + } +#ifdef BCMQT + else { + sd_info(("%s: REsetting err int control\n", __FUNCTION__)); + /* XXX: turn off timeout INT, it resets clk ctrl bit */ + regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable); + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT)); + } +#endif + OSL_DELAY(2); + + sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl))); + + return TRUE; +} + +/* XXX Per SDIO Host Controller Spec 3.3 + * volts_req: + * 0 means default: select highest voltage. + * 1 means 1.8V + * 2 means 3.0V + * 3 means 3.3V + * returns + * TRUE: no error + * FALSE: general error + * SDIO_OCR_READ_FAIL: ocr reading failure. Now the HC has to try in other available voltages. +*/ +uint16 +sdstd_start_power(sdioh_info_t *sd, int volts_req) +{ + char *s; + uint32 cmd_arg; + uint32 cmd_rsp; + uint8 pwr = 0; + int volts = 0; + uint16 val1; + uint16 init_divider = 0; + uint8 baseclk = 0; + bool selhighest = (volts_req == 0) ? TRUE : FALSE; + + /* reset the card uhsi volt support to false */ + sd->card_UHSI_voltage_Supported = FALSE; + + /* Ensure a power on reset by turning off bus power in case it happened to + * be on already. (This might happen if driver doesn't unload/clean up correctly, + * crash, etc.) Leave off for 100ms to make sure the power off isn't + * ignored/filtered by the device. Note we can't skip this step if the power is + * off already since we don't know how long it has been off before starting + * the driver. + */ + sdstd_wreg8(sd, SD_PwrCntrl, 0); + sd_info(("Turning off VDD/bus power briefly (100ms) to ensure reset\n")); + OSL_DELAY(100000); + + /* For selecting highest available voltage, start from lowest and iterate */ + if (!volts_req) + volts_req = 1; + + s = NULL; + + if (volts_req == 1) { + if (GFIELD(sd->caps, CAP_VOLT_1_8)) { + volts = 5; + s = "1.8"; + if (FALSE == selhighest) + goto voltsel; + else + volts_req++; + } else { + sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts)); + volts_req++; + } + } + + if (volts_req == 2) { + if (GFIELD(sd->caps, CAP_VOLT_3_0)) { + volts = 6; + s = "3.0"; + if (FALSE == selhighest) + goto voltsel; + else volts_req++; + } else { + sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts)); + volts_req++; + } + } + + if (volts_req == 3) { + if (GFIELD(sd->caps, CAP_VOLT_3_3)) { + volts = 7; + s = "3.3"; + } else { + if ((FALSE == selhighest) || (volts == 0)) { + sd_err(("HC doesn't support any voltage! error!\n")); + return FALSE; + } + } + } + + /* XXX + * if UHSI is NOT supported, check for other voltages also. This is a safety measure + * for embedded devices also, so that HC starts in lower power first. If this + * function fails, the caller may disable UHSISupported + * and call start power again to check support in higher voltages. + */ + +voltsel: + pwr = SFIELD(pwr, PWR_VOLTS, volts); + pwr = SFIELD(pwr, PWR_BUS_EN, 1); + sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */ + sd_info(("Setting Bus Power to %s Volts\n", s)); + BCM_REFERENCE(s); + + /* + * PR101766 : BRCM SDIO3.0 card is an embedded SD device. It is not a SD card. + * VDDIO signalling will be tied to 1.8v level on all SDIO3.0 based boards. + * So program the HC to drive VDDIO at 1.8v level. + */ + if ((sd->version == HOST_CONTR_VER_3) && (volts == 5)) { + val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1); + sdstd_wreg16(sd, SD3_HostCntrl2, val1); + } + + /* Wait for 500ms for power to stabilize. Some designs have reset IC's + * which can hold reset low for close to 300ms. In addition there can + * be ramp time for VDD and/or VDDIO which might be provided from a LDO. + * For these reasons we need a pretty conservative delay here to have + * predictable reset behavior in the face of an unknown design. + */ + OSL_DELAY(500000); + + baseclk = GFIELD(sd->caps, CAP_BASECLK); + sd_info(("%s:baseclk: %d MHz\n", __FUNCTION__, baseclk)); + /* for 3.0, find divisor */ + if (sd->host_UHSISupported) { + /* ToDo : Dynamic modification of preset value table based on base clk */ + sd3_trace(("sd3: %s: checking divisor\n", __FUNCTION__)); + if (GFIELD(sd->caps3, CAP3_CLK_MULT) != 0) { + sd_err(("%s:Possible error: CLK Mul 1 CLOCKING NOT supported!\n", + __FUNCTION__)); + return FALSE; + } else { + /* calculate dividor, which leads to 400KHz. */ + init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */ + /* make it a multiple of 2. */ + init_divider += (init_divider & 0x1); + sd_err(("%s:divider used for init:%d\n", + __FUNCTION__, init_divider)); + } + } else { + /* Note: sd_divisor assumes that SDIO Base CLK is 50MHz. */ + int final_freq_based_on_div = 50/sd_divisor; + if (baseclk > 50) + sd_divisor = baseclk/final_freq_based_on_div; + /* TBD: merge both SDIO 2.0 and 3.0 to share same divider logic */ + init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */ + /* find next power of 2 */ + NEXT_POW2(init_divider); + sd_err(("%s:NONUHSI: divider used for init:%d\n", + __FUNCTION__, init_divider)); + } + + /* Start at ~400KHz clock rate for initialization */ + if (!sdstd_start_clock(sd, init_divider)) { + sd_err(("%s: sdstd_start_clock failed\n", __FUNCTION__)); + return FALSE; + } + + /* Get the Card's Operation Condition. Occasionally the board + * takes a while to become ready + */ + cmd_arg = 0; + cmd_rsp = 0; + if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) { + sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__)); + /* No need to reset as not sure in what state the card is. */ + return SDIO_OCR_READ_FAIL; + } + + sd_info(("cmd_rsp = 0x%x\n", cmd_rsp)); + sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT))); + sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS))); + sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY))); + sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); + + /* Verify that the card supports I/O mode */ + if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) { + sd_err(("%s: Card does not support I/O\n", __FUNCTION__)); + return ERROR; + } + sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS); + + /* Examine voltage: Arasan only supports 3.3 volts, + * so look for 3.2-3.3 Volts and also 3.3-3.4 volts. + */ + + /* XXX Pg 10 SDIO spec v1.10 */ + if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) { + sd_err(("This client does not support 3.3 volts!\n")); + return ERROR; + } + sd_info(("Leaving bus power at 3.3 Volts\n")); + + cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000); + /* if HC uhsi supported and card voltage set is 3.3V then switch to 1.8V */ + if ((sd->host_UHSISupported) && (volts == 5)) { + /* set S18R also */ + cmd_arg = SFIELD(cmd_arg, CMD5_S18R, 1); + } + cmd_rsp = 0; + get_ocr(sd, &cmd_arg, &cmd_rsp); + sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); + + if ((sd->host_UHSISupported)) { + /* card responded with s18A => card supports sdio3.0,do tuning proc */ + if (GFIELD(cmd_rsp, RSP4_S18A) == 1) { + if (sdstd_3_sigvoltswitch_proc(sd)) { + /* continue with legacy way of working */ + sd_err(("%s: voltage switch not done. error, stopping\n", + __FUNCTION__)); + /* How to gracefully proceced here? */ + return FALSE; + } else { + sd->card_UHSI_voltage_Supported = TRUE; + sd_err(("%s: voltage switch SUCCESS!\n", __FUNCTION__)); + } + } else { + /* This could happen for 2 cases. + * 1) means card is NOT sdio3.0 . Note that + * card_UHSI_voltage_Supported is already false. + * 2) card is sdio3.0 but it is already in 1.8V. + * But now, how to change host controller's voltage? + * In this case we need to do the following. + * sd->card_UHSI_voltage_Supported = TRUE; + * turn 1.8V sig enable in HC2 + * val1 = sdstd_rreg16(sd, SD3_HostCntrl2); + * val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1); + * sdstd_wreg16(sd, SD3_HostCntrl2, val1); + */ + sd_info(("%s: Not sdio3.0: host_UHSISupported: %d; HC volts=%d\n", + __FUNCTION__, sd->host_UHSISupported, volts)); + } + } else { + sd_info(("%s: Legacy [non sdio3.0] HC\n", __FUNCTION__)); + } + + return TRUE; +} + +bool +sdstd_bus_width(sdioh_info_t *sd, int new_mode) +{ + uint32 regdata; + int status; + uint8 reg8; + + sd_trace(("%s\n", __FUNCTION__)); + if (sd->sd_mode == new_mode) { + sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode)); + /* Could exit, but continue just in case... */ + } + + /* Set client side via reg 0x7 in CCCR */ + if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data)) != SUCCESS) + return (bool)status; + regdata &= ~BUS_SD_DATA_WIDTH_MASK; + if (new_mode == SDIOH_MODE_SD4) { + sd_info(("Changing to SD4 Mode\n")); + regdata |= SD4_MODE; + } else if (new_mode == SDIOH_MODE_SD1) { + sd_info(("Changing to SD1 Mode\n")); + } else { + sd_err(("SPI Mode not supported by Standard Host Controller\n")); + } + + if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS) + return (bool)status; + + if (sd->host_UHSISupported) { + uint32 card_asyncint = 0; + uint16 host_asyncint = 0; + + if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_INTR_EXTN, 1, + &card_asyncint)) != SUCCESS) { + sd_err(("%s:INTR EXT getting failed!, ignoring\n", __FUNCTION__)); + } else { + host_asyncint = sdstd_rreg16(sd, SD3_HostCntrl2); + + /* check if supported by host and card */ + if ((regdata & SD4_MODE) && + (GFIELD(card_asyncint, SDIO_BUS_ASYNCINT_CAP)) && + (GFIELD(sd->caps, CAP_ASYNCINT_SUP))) { + /* set enable async int in card */ + card_asyncint = SFIELD(card_asyncint, SDIO_BUS_ASYNCINT_SEL, 1); + + if ((status = sdstd_card_regwrite (sd, 0, + SDIOD_CCCR_INTR_EXTN, 1, card_asyncint)) != SUCCESS) + sd_err(("%s:INTR EXT setting failed!, ignoring\n", + __FUNCTION__)); + else { + /* set enable async int in host */ + host_asyncint = SFIELD(host_asyncint, + HOSTCtrl2_ASYINT_EN, 1); + sdstd_wreg16(sd, SD3_HostCntrl2, host_asyncint); + } + } else { + sd_err(("%s:INTR EXT NOT supported by either host or" + "card!, ignoring\n", __FUNCTION__)); + } + } + } + + /* Set host side via Host reg */ + reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE; + if (new_mode == SDIOH_MODE_SD4) + reg8 |= SD4_MODE; + sdstd_wreg8(sd, SD_HostCntrl, reg8); + + sd->sd_mode = new_mode; + + return TRUE; +} + +static int +sdstd_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + sd->sd3_tuning_reqd = FALSE; + sd->sd3_tuning_disable = FALSE; + if ((sdstd_host_init(sd)) != SUCCESS) { + return ERROR; + } + + /* Give WL_reset before sending CMD5 to dongle for Revx SDIO3 HC's */ + if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3)) + { + sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x8); + OSL_DELAY(sd_delay_value); + sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x0); + OSL_DELAY(500000); + } + + if (sdstd_client_init(sd) != SUCCESS) { + return ERROR; + } + + /* if the global cap matched and is SDR 104/50 [if 50 it is reqd] enable tuning. */ + if ((TRUE != sd3_sw_override1) && SD3_TUNING_REQD(sd, sd_uhsimode)) { + sd->sd3_tuning_reqd = TRUE; + + /* init OS structs for tuning */ + sdstd_3_osinit_tuning(sd); + + /* enable HC tuning interrupt OR timer based on tuning method */ + if (GFIELD(sd->caps3, CAP3_RETUNING_MODES)) { + /* enable both RTReq and timer */ + sd->intmask |= HC_INTR_RETUNING; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); +#ifdef BCMSDYIELD + if (sd_forcerb) + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ +#endif /* BCMSDYIELD */ + } + } + + return SUCCESS; +} + +static int +sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdstd_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +static int +sdstd_card_enablefuncs(sdioh_info_t *sd) +{ + int status; + uint32 regdata; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + /* Enable function 1 on the card */ + regdata = SDIO_FUNC_ENABLE_1; + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS) + return status; + + return SUCCESS; +} + +/* Read client card reg */ +static int +sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + +#ifdef BCMDBG + if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: Entering: ErrorintrStatus 0x%x, intstat = 0x%x\n", + __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg16(sd, SD_IntrStatus))); + } +#endif + + cmd_arg = 0; + + if ((func == 0) || (regsize == 1)) { + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0); + + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: 1: ErrorintrStatus 0x%x\n", + __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus))); + } + + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + *data = GFIELD(rsp5, RSP5_DATA); + + sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data)); + } else { + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */ + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); + + sd->data_xfer_count = regsize; + + /* sdstd_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + + if (sd->polled_mode) { + volatile uint16 int_reg; + int retries = RETRIES_LARGE; + + /* Wait for Read Buffer to become ready */ + do { + sdstd_os_yield(sd); + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0)); + + if (!retries) { + sd_err(("%s: Timeout on Buf_Read_Ready: " + "intStat: 0x%x errint: 0x%x PresentState 0x%x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); + return (ERROR); + } + + /* Have Buffer Ready, so clear it and read the data */ + sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1)); + if (regsize == 2) + *data = sdstd_rreg16(sd, SD_BufferDataPort0); + else + *data = sdstd_rreg(sd, SD_BufferDataPort0); + + sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data)); + /* Check Status. + * After the data is read, the Transfer Complete bit should be on + */ + retries = RETRIES_LARGE; + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0)); + + /* Check for any errors from the data phase */ + if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) + return ERROR; + + if (!retries) { + sd_err(("%s: Timeout on xfer complete: " + "intr 0x%04x err 0x%04x state 0x%08x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + return (ERROR); + } + + sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1)); + } + } + if (sd->polled_mode) { + if (regsize == 2) + *data &= 0xffff; + } + return SUCCESS; +} + +bool +check_client_intr(sdioh_info_t *sd) +{ + uint16 raw_int, cur_int, old_int; + + raw_int = sdstd_rreg16(sd, SD_IntrStatus); + cur_int = raw_int & sd->intmask; + + if (!cur_int) { + /* Not an error -- might share interrupts... */ + return FALSE; + } + + if (GFIELD(cur_int, INTSTAT_CARD_INT)) { + unsigned long flags; + + sdstd_os_lock_irqsave(sd, &flags); + old_int = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0)); + sdstd_os_unlock_irqrestore(sd, &flags); + + if (sd->client_intr_enabled && sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + sdstd_os_lock_irqsave(sd, &flags); + old_int = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 1)); + sdstd_os_unlock_irqrestore(sd, &flags); + } else { + /* Local interrupt: disable, set flag, and save intrstatus */ + sdstd_wreg16(sd, SD_IntrSignalEnable, 0); + sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0); + sd->local_intrcount++; + sd->got_hcint = TRUE; + sd->last_intrstatus = cur_int; + } + + return TRUE; +} + +void +sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err) +{ + uint16 int_reg, err_reg; + int retries = RETRIES_LARGE; + + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus); + } while (--retries && !(int_reg & norm) && !(err_reg & err)); + + norm |= sd->intmask; + if (err_reg & err) + norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); + sd->last_intrstatus = int_reg & norm; +} + +/* write a client register */ +static int +sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, rsp5, flags; + + cmd_arg = 0; + + if ((func == 0) || (regsize == 1)) { + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff); + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + flags = GFIELD(rsp5, RSP5_FLAGS); + if (flags && (flags != 0x10)) + sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n", + __FUNCTION__, flags)); + } + else { + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); + /* XXX SDIO spec v 1.10, Sec 5.3 Not FIFO */ + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + + sd->data_xfer_count = regsize; + + /* sdstd_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS))); + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + + if (sd->polled_mode) { + uint16 int_reg; + int retries = RETRIES_LARGE; + + /* Wait for Write Buffer to become ready */ + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0)); + + if (!retries) { + sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x " + "errint: 0x%x PresentState 0x%x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); + return (ERROR); + } + /* Clear Write Buf Ready bit */ + int_reg = 0; + int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1); + sdstd_wreg16(sd, SD_IntrStatus, int_reg); + + /* At this point we have Buffer Ready, so write the data */ + if (regsize == 2) + sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data); + else + sdstd_wreg(sd, SD_BufferDataPort0, data); + + /* Wait for Transfer Complete */ + retries = RETRIES_LARGE; + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0)); + + /* Check for any errors from the data phase */ + if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) + return ERROR; + + if (retries == 0) { + sd_err(("%s: Timeout for xfer complete; State = 0x%x, " + "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n", + __FUNCTION__, sdstd_rreg(sd, SD_PresentState), + int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), + sd->r_cnt, sd->t_cnt)); + } + /* Clear the status bits */ + sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0)); + } + } + return SUCCESS; +} + +void +sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */) +{ + int rsp_count; + int respaddr = SD_Response0; + + if (count > 4) + count = 4; + + for (rsp_count = 0; rsp_count < count; rsp_count++) { + *rsp_buffer++ = sdstd_rreg(sd, respaddr); + respaddr += 4; + } +} + +/* + Note: options: 0 - default + 1 - tuning option: Means that, this cmd issue is as a part + of tuning. So no need to check the start tuning function. +*/ +static int +sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg) +{ + uint16 cmd_reg; + int retries; + uint32 cmd_arg; + uint16 xfer_reg = 0; + +#ifdef BCMDBG + if (sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: Entering: ErrorIntrStatus 0x%x, Expecting 0\n", + __FUNCTION__, sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus))); + } +#endif + + if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) && + ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) { + sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd)); + return ERROR; + } + + retries = RETRIES_SMALL; + while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) { + if (retries == RETRIES_SMALL) + sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n", + __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState))); + } + if (!retries) { + sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + + cmd_reg = 0; + switch (cmd) { + case SDIOH_CMD_0: /* Set Card to Idle State - No Response */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_5: /* Send Operation condition - Response R4 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_7: /* Select card - Response R1 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_14: /* eSD Sleep - Response R1 */ + case SDIOH_CMD_11: /* Select card - Response R1 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_15: /* Set card to inactive state - Response None */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_19: /* clock tuning - Response R1 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + /* Host controller reads 64 byte magic pattern from card + * Hence Direction = 1 ( READ ) + */ + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); + break; + + case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */ + + sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n", + __FUNCTION__, + GFIELD(arg, CMD52_FUNCTION), + GFIELD(arg, CMD52_REG_ADDR), + GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R", + GFIELD(arg, CMD52_DATA))); + + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */ + + sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n", + __FUNCTION__, + GFIELD(arg, CMD53_FUNCTION), + GFIELD(arg, CMD53_REG_ADDR), + GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R", + GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte", + GFIELD(arg, CMD53_BYTE_BLK_CNT), + GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr")); + + cmd_arg = arg; + xfer_reg = 0; + + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + + use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE); + + if (GFIELD(cmd_arg, CMD53_BLK_MODE)) { + uint16 blocksize; + uint16 blockcount; + int func; + + ASSERT(sdioh_info->sd_blockmode); + + func = GFIELD(cmd_arg, CMD53_FUNCTION); + blocksize = MIN((int)sdioh_info->data_xfer_count, + sdioh_info->client_block_size[func]); + blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT); + + /* data_xfer_cnt is already setup so that for multiblock mode, + * it is the entire buffer length. For non-block or single block, + * it is < 64 bytes + */ + if (use_dma) { + switch (sdioh_info->sd_dma_mode) { + case DMA_MODE_SDMA: + sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n", + __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr), + (uint32)sdioh_info->dma_phys)); + sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys); + break; + case DMA_MODE_ADMA1: + case DMA_MODE_ADMA2: + sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__)); +#ifdef BCMSDIOH_TXGLOM + /* multi-descriptor is currently used only for hc3 */ + if ((sdioh_info->glom_info.count != 0) && + (sdioh_info->txglom_mode == SDPCM_TXGLOM_MDESC)) { + uint32 i = 0; + for (i = 0; + i < sdioh_info->glom_info.count-1; + i++) { + glom_buf_t *glom_info; + glom_info = &(sdioh_info->glom_info); + sd_create_adma_descriptor(sdioh_info, + i, + glom_info->dma_phys_arr[i], + glom_info->nbytes[i], + ADMA2_ATTRIBUTE_VALID | + ADMA2_ATTRIBUTE_ACT_TRAN); + } + + sd_create_adma_descriptor(sdioh_info, + i, + sdioh_info->glom_info.dma_phys_arr[i], + sdioh_info->glom_info.nbytes[i], + ADMA2_ATTRIBUTE_VALID | + ADMA2_ATTRIBUTE_END | + ADMA2_ATTRIBUTE_INT | + ADMA2_ATTRIBUTE_ACT_TRAN); + } else +#endif /* BCMSDIOH_TXGLOM */ + { + sd_create_adma_descriptor(sdioh_info, 0, + sdioh_info->dma_phys, blockcount*blocksize, + ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END | + ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN); + } + /* Dump descriptor if DMA debugging is enabled. */ + if (sd_msglevel & SDH_DMA_VAL) { + sd_dump_adma_dscr(sdioh_info); + } + + sdstd_wreg(sdioh_info, SD_ADMA_SysAddr, + sdioh_info->adma2_dscr_phys); + break; + default: + sd_err(("%s: unsupported DMA mode %d.\n", + __FUNCTION__, sdioh_info->sd_dma_mode)); + break; + } + } + + sd_trace(("%s: Setting block count %d, block size %d bytes\n", + __FUNCTION__, blockcount, blocksize)); + sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize); + sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount); + + xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma); + + if (sdioh_info->client_block_size[func] != blocksize) + set_client_block_size(sdioh_info, 1, blocksize); + + if (blockcount > 1) { + xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1); + xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1); + xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); + } else { + xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0); + xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); + xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); + } + + if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ) + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); + else + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0); + + retries = RETRIES_SMALL; + while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), + PRES_DAT_INHIBIT) && --retries) + sd_err(("%s: Waiting for Data Inhibit cmd = %d\n", + __FUNCTION__, cmd)); + if (!retries) { + sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + + /* Consider deferring this write to the comment below "Deferred Write" */ + sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); + + } else { /* Non block mode */ + uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT); + /* The byte/block count field only has 9 bits, + * so, to do a 512-byte bytemode transfer, this + * field will contain 0, but we need to tell the + * controller we're transferring 512 bytes. + */ + if (bytes == 0) bytes = 512; + + if (use_dma) + sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys); + + /* PCI: Transfer Mode register 0x0c */ + xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma); + xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); + if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ) + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); + else + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0); + /* See table 2-8 Host Controller spec ver 1.00 */ + xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */ + xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0); + + sdstd_wreg16(sdioh_info, SD_BlockSize, bytes); + + /* XXX This should be a don't care but Arasan needs it + * to be one. Its fixed in later versions (but they + * don't have version numbers, sigh). + */ + sdstd_wreg16(sdioh_info, SD_BlockCount, 1); + + retries = RETRIES_SMALL; + while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), + PRES_DAT_INHIBIT) && --retries) + sd_err(("%s: Waiting for Data Inhibit cmd = %d\n", + __FUNCTION__, cmd)); + if (!retries) { + sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + + /* Consider deferring this write to the comment below "Deferred Write" */ + sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); + } + break; + + default: + sd_err(("%s: Unknown command\n", __FUNCTION__)); + return ERROR; + } + + if (sdioh_info->sd_mode == SDIOH_MODE_SPI) { + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + } + + /* Setup and issue the SDIO command */ + sdstd_wreg(sdioh_info, SD_Arg0, arg); + + /* Deferred Write + * Consider deferring the two writes above until this point in the code. + * The following would do one 32 bit write. + * + * { + * uint32 tmp32 = cmd_reg << 16; + * tmp32 |= xfer_reg; + * sdstd_wreg(sdioh_info, SD_TransferMode, tmp32); + * } + */ + + /* Alternate to Deferred Write START */ + + /* In response to CMD19 card sends 64 byte magic pattern. + * So SD_BlockSize = 64 & SD_BlockCount = 1 + */ + if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) { + sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); + sdstd_wreg16(sdioh_info, SD_BlockSize, 64); + sdstd_wreg16(sdioh_info, SD_BlockCount, 1); + } + sdstd_wreg16(sdioh_info, SD_Command, cmd_reg); + + /* Alternate to Deferred Write END */ + + /* If we are in polled mode, wait for the command to complete. + * In interrupt mode, return immediately. The calling function will + * know that the command has completed when the CMDATDONE interrupt + * is asserted + */ + if (sdioh_info->polled_mode) { + uint16 int_reg = 0; + retries = RETRIES_LARGE; + + /* For CMD19 no need to wait for cmd completion */ + if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) + return SUCCESS; + + do { + int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus); + sdstd_os_yield(sdioh_info); + } while (--retries && + (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) && + (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0)); + + if (!retries) { + sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x " + "error stat 0x%x state 0x%x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus), + sdstd_rreg(sdioh_info, SD_PresentState))); + + /* Attempt to reset CMD line when we get a CMD timeout */ + sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); + retries = RETRIES_LARGE; + do { + sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__)); + } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), + SW_RESET_CMD)) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); + } + + if (trap_errs) + ASSERT(0); + return (ERROR); + } + + /* Clear Command Complete interrupt */ + int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1); + sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg); + + /* Check for Errors */ + if (sdstd_check_errs(sdioh_info, cmd, arg)) { + if (trap_errs) + ASSERT(0); + return ERROR; + } + } + return SUCCESS; +} + +/* + * XXX On entry: If single block or non-block, buffersize <= blocksize. + * If Mulitblock, buffersize is unlimited. + * Question is how to handle the leftovers in either single or multiblock. + * I think the caller should break the buffer up so this routine will always + * use blocksize == buffersize to handle the end piece of the buffer + */ + +static int +sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data) +{ + int retval = SUCCESS; + int status; + uint32 cmd_arg; + uint32 rsp5; + uint16 int_reg, int_bit; + uint flags; + int num_blocks, blocksize; + bool local_blockmode, local_dma; + bool read = rw == SDIOH_READ ? 1 : 0; + bool local_yield = FALSE; +#ifdef BCMSDIOH_TXGLOM + uint32 i; + uint8 *localbuf = NULL; +#endif + + ASSERT(nbytes); + + cmd_arg = 0; + + sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt)); + + if (read) sd->r_cnt++; else sd->t_cnt++; + + local_blockmode = sd->sd_blockmode; + local_dma = USE_DMA(sd); + +#ifdef BCMSDIOH_TXGLOM + /* If multiple buffers are there, then calculate the nbytes from that */ + if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) { + uint32 ii; + nbytes = 0; + for (ii = 0; ii < sd->glom_info.count; ii++) { + nbytes += sd->glom_info.nbytes[ii]; + } + ASSERT(nbytes <= sd->alloced_dma_size); + } +#endif + + /* Don't bother with block mode on small xfers */ + if (nbytes < sd->client_block_size[func]) { + sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n", + nbytes, sd->client_block_size[func])); + local_blockmode = FALSE; + local_dma = FALSE; +#ifdef BCMSDIOH_TXGLOM + /* In glommed case, create a single pkt from multiple pkts */ + if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) { + uint32 offset = 0; + localbuf = (uint8 *)MALLOC(sd->osh, nbytes); + data = (uint32 *)localbuf; + for (i = 0; i < sd->glom_info.count; i++) { + bcopy(sd->glom_info.dma_buf_arr[i], + ((uint8 *)data + offset), + sd->glom_info.nbytes[i]); + offset += sd->glom_info.nbytes[i]; + } + } +#endif + } + + if (local_blockmode) { + blocksize = MIN(sd->client_block_size[func], nbytes); + num_blocks = nbytes/blocksize; + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1); + } else { + num_blocks = 1; + blocksize = nbytes; + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + } + + if (local_dma && !read) { +#ifdef BCMSDIOH_TXGLOM + if ((func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) { + /* In case of hc ver 2 DMA_MAP may not work properly due to 4K alignment + * requirements. So copying pkt to 4K aligned pre-allocated pkt. + * Total length should not cross the pre-alloced memory size + */ + if (sd->txglom_mode == SDPCM_TXGLOM_CPY) { + uint32 total_bytes = 0; + for (i = 0; i < sd->glom_info.count; i++) { + bcopy(sd->glom_info.dma_buf_arr[i], + (uint8 *)sd->dma_buf + total_bytes, + sd->glom_info.nbytes[i]); + total_bytes += sd->glom_info.nbytes[i]; + } + sd_sync_dma(sd, read, total_bytes); + } + } else +#endif /* BCMSDIOH_TXGLOM */ + { + bcopy(data, sd->dma_buf, nbytes); + sd_sync_dma(sd, read, nbytes); + } + } + + if (fifo) + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); /* XXX SDIO spec v 1.10, Sec 5.3 */ + else + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */ + + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr); + if (read) + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); + else + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + + sd->data_xfer_count = nbytes; + + /* sdstd_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write"))); + retval = status; + goto done; + } + + sdstd_cmd_getrsp(sd, &rsp5, 1); + + if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) { + sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d " + "numblocks %d, blocksize %d\n", + __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize)); + + if (flags & 1) + sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, " + "bytes %d dma %d\n", + __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT), + GFIELD(cmd_arg, CMD53_BLK_MODE))); + if (flags & 0x8) + sd_err(("%s: Rsp5: General Error\n", __FUNCTION__)); + + sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n", + __FUNCTION__, flags)); + if (trap_errs) + ASSERT(0); + retval = ERROR; + goto done; + } + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + +#ifdef BCMSDYIELD + local_yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield); +#endif + + if (!local_dma) { + int bytes, ii; + uint32 tmp; + + for (ii = 0; ii < num_blocks; ii++) { + int words; + + /* Decide which status bit we're waiting for */ + if (read) + int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1); + else + int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1); + + /* If not on, wait for it (or for xfer error) */ + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + if (!(int_reg & int_bit)) { + status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, + local_yield, &int_reg); + switch (status) { + case -1: + sd_err(("%s: pio interrupted\n", __FUNCTION__)); + retval = ERROR; + goto done; + case -2: + sd_err(("%s: pio timeout waiting for interrupt\n", + __FUNCTION__)); + retval = ERROR; + goto done; + } + } +#ifdef BCMSLTGT + /* int_reg = sdstd_rreg16(sd, SD_IntrStatus); */ +#endif + /* Confirm we got the bit w/o error */ + if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) { + sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x " + "errint: 0x%x PresentState 0x%x\n", + __FUNCTION__, read ? "Read" : "Write", int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + sdstd_dumpregs(sd); + sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); + retval = ERROR; + goto done; + } + + /* Clear Buf Ready bit */ + sdstd_wreg16(sd, SD_IntrStatus, int_bit); + + /* At this point we have Buffer Ready, write the data 4 bytes at a time */ + for (words = blocksize/4; words; words--) { + if (read) + *data = sdstd_rreg(sd, SD_BufferDataPort0); + else + sdstd_wreg(sd, SD_BufferDataPort0, *data); + data++; + } + + /* XXX + * Handle < 4 bytes. wlc_pio.c currently (as of 12/20/05) truncates buflen + * to be evenly divisible by 4. However dongle passes arbitrary lengths, + * so handle it here + */ + bytes = blocksize % 4; + + /* If no leftover bytes, go to next block */ + if (!bytes) + continue; + + switch (bytes) { + case 1: + /* R/W 8 bits */ + if (read) + *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0)); + else + sdstd_wreg8(sd, SD_BufferDataPort0, + (uint8)(*(data++) & 0xff)); + break; + case 2: + /* R/W 16 bits */ + if (read) + *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0); + else + sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++))); + break; + case 3: + /* R/W 24 bits: + * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23] + */ + if (read) { + tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0); + tmp |= ((uint32)(sdstd_rreg8(sd, + SD_BufferDataPort1)) << 16); + *(data++) = tmp; + } else { + tmp = *(data++); + sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff); + sdstd_wreg8(sd, SD_BufferDataPort1, + (uint8)((tmp >> 16) & 0xff)); + } + break; + default: + sd_err(("%s: Unexpected bytes leftover %d\n", + __FUNCTION__, bytes)); + ASSERT(0); + break; + } + } + } /* End PIO processing */ + + /* Wait for Transfer Complete or Transfer Error */ + int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1); + + /* If not on, wait for it (or for xfer error) */ + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + if (!(int_reg & int_bit)) { + status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg); + switch (status) { + case -1: + sd_err(("%s: interrupted\n", __FUNCTION__)); + retval = ERROR; + goto done; + case -2: + sd_err(("%s: timeout waiting for interrupt\n", __FUNCTION__)); + retval = ERROR; + goto done; + } + } + + /* Check for any errors from the data phase */ + if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) { + retval = ERROR; + goto done; + } + + /* May have gotten a software timeout if not blocking? */ + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + if (!(int_reg & int_bit)) { + sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, " + "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n", + __FUNCTION__, read ? "R" : "W", local_dma, + sdstd_rreg(sd, SD_PresentState), int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes, + sd->r_cnt, sd->t_cnt)); + sdstd_dumpregs(sd); + retval = ERROR; + goto done; + } + + /* Clear the status bits */ + int_reg = int_bit; + if (local_dma) { + /* DMA Complete */ + /* XXX Step 14, Section 3.6.2.2 Stnd Cntrlr Spec */ + /* Reads in particular don't have DMA_COMPLETE set */ + int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1); + } + sdstd_wreg16(sd, SD_IntrStatus, int_reg); + + /* Fetch data */ + if (local_dma && read) { + sd_sync_dma(sd, read, nbytes); + bcopy(sd->dma_buf, data, nbytes); + } + +done: +#ifdef BCMSDIOH_TXGLOM + if (localbuf) + MFREE(sd->osh, localbuf, nbytes); +#endif + return retval; +} + +static int +set_client_block_size(sdioh_info_t *sd, int func, int block_size) +{ + int base; + int err = 0; + + sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func)); + sd->client_block_size[func] = block_size; + + /* Set the block size in the SDIO Card register */ + base = func * SDIOD_FBR_SIZE; + err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff); + if (!err) { + err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1, + (block_size >> 8) & 0xff); + } + + /* Do not set the block size in the SDIO Host register, that + * is func dependent and will get done on an individual + * transaction basis + */ + + return (err ? BCME_SDIO_ERROR : 0); +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + uint8 hreg; + + /* Reset the attached device (use slower clock for safety) */ + if (!sdstd_start_clock(si, 128)) { + sd_err(("set clock failed!\n")); + return ERROR; + } + sdstd_reset(si, 0, 1); + + /* Reset portions of the host state accordingly */ + hreg = sdstd_rreg8(si, SD_HostCntrl); + hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0); + hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0); + si->sd_mode = SDIOH_MODE_SD1; + + /* Reinitialize the card */ + si->card_init_done = FALSE; + return sdstd_client_init(si); +} + +#ifdef BCMINTERNAL +#ifdef NOTUSED +static void +cis_fetch(sdioh_info_t *sd, int func, char *data, int len) +{ + int count; + int offset; + char *end = data + len; + uint32 foo; + + for (count = 0; count < 512 && data < end; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdstd_card_regread (sd, func, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed\n", __FUNCTION__)); + return; + } + data += sprintf(data, "%.2x ", foo & 0xff); + if (((count+1) % 16) == 0) + data += sprintf(data, "\n"); + } +} +#endif /* NOTUSED */ +#endif /* BCMINTERNAL */ + +static void +sd_map_dma(sdioh_info_t * sd) +{ + + int alloced; + void *va; + uint dma_buf_size = SD_PAGE; + +#ifdef BCMSDIOH_TXGLOM + /* There is no alignment requirement for HC3 */ + if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) { + /* Max glom packet length is 64KB */ + dma_buf_size = SD_PAGE * 16; + } +#endif + + alloced = 0; + if ((va = DMA_ALLOC_CONSISTENT(sd->osh, dma_buf_size, SD_PAGE_BITS, &alloced, + &sd->dma_start_phys, 0x12)) == NULL) { + sd->sd_dma_mode = DMA_MODE_NONE; + sd->dma_start_buf = 0; + sd->dma_buf = (void *)0; + sd->dma_phys = 0; + sd->alloced_dma_size = 0; + sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__)); + } else { + sd->dma_start_buf = va; + sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE); + sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE); + sd->alloced_dma_size = alloced; + sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n", + __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, + (uint)PHYSADDRHI(sd->dma_phys), (uint)PHYSADDRLO(sd->dma_phys))); + sd_fill_dma_data_buf(sd, 0xA5); + } + + if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, SD_PAGE_BITS, &alloced, + &sd->adma2_dscr_start_phys, 0x12)) == NULL) { + sd->sd_dma_mode = DMA_MODE_NONE; + sd->adma2_dscr_start_buf = 0; + sd->adma2_dscr_buf = (void *)0; + sd->adma2_dscr_phys = 0; + sd->alloced_adma2_dscr_size = 0; + sd_err(("%s: DMA_ALLOC failed for descriptor buffer. " + "Disabling DMA support.\n", __FUNCTION__)); + } else { + sd->adma2_dscr_start_buf = va; + sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE); + sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE); + sd->alloced_adma2_dscr_size = alloced; + sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n", + __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf, + (uint)PHYSADDRHI(sd->adma2_dscr_phys), + (uint)PHYSADDRLO(sd->adma2_dscr_phys))); + sd_clear_adma_dscr_buf(sd); + } +} + +static void +sd_unmap_dma(sdioh_info_t * sd) +{ + if (sd->dma_start_buf) { + DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size, + sd->dma_start_phys, 0x12); + } + + if (sd->adma2_dscr_start_buf) { + DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size, + sd->adma2_dscr_start_phys, 0x12); + } +} + +static void +sd_clear_adma_dscr_buf(sdioh_info_t *sd) +{ + bzero((char *)sd->adma2_dscr_buf, SD_PAGE); + sd_dump_adma_dscr(sd); +} + +static void +sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data) +{ + memset((char *)sd->dma_buf, data, SD_PAGE); +} + +static void +sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index, + uint32 addr_phys, uint16 length, uint16 flags) +{ + adma2_dscr_32b_t *adma2_dscr_table; + adma1_dscr_t *adma1_dscr_table; + + adma2_dscr_table = sd->adma2_dscr_buf; + adma1_dscr_table = sd->adma2_dscr_buf; + + switch (sd->sd_dma_mode) { + case DMA_MODE_ADMA2: + sd_dma(("%s: creating ADMA2 descriptor for index %d\n", + __FUNCTION__, index)); + + adma2_dscr_table[index].phys_addr = addr_phys; + adma2_dscr_table[index].len_attr = length << 16; + adma2_dscr_table[index].len_attr |= flags; + break; + case DMA_MODE_ADMA1: + /* ADMA1 requires two descriptors, one for len + * and the other for data transfer + */ + index <<= 1; + + sd_dma(("%s: creating ADMA1 descriptor for index %d\n", + __FUNCTION__, index)); + + adma1_dscr_table[index].phys_addr_attr = length << 12; + adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET | + ADMA2_ATTRIBUTE_VALID); + adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000; + adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f); + break; + default: + sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n", + __FUNCTION__, sd->sd_dma_mode)); + break; + } +} + +static void +sd_dump_adma_dscr(sdioh_info_t *sd) +{ + adma2_dscr_32b_t *adma2_dscr_table; + adma1_dscr_t *adma1_dscr_table; + uint32 i = 0; + uint16 flags; + char flags_str[32]; + + ASSERT(sd->adma2_dscr_buf != NULL); + + adma2_dscr_table = sd->adma2_dscr_buf; + adma1_dscr_table = sd->adma2_dscr_buf; + + switch (sd->sd_dma_mode) { + case DMA_MODE_ADMA2: + sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n", + SD_PAGE, sd->adma2_dscr_buf, + (uint)PHYSADDRHI(sd->adma2_dscr_phys), + (uint)PHYSADDRLO(sd->adma2_dscr_phys))); + sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)" + " |\n")); + while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) { + flags = adma2_dscr_table->len_attr & 0xFFFF; + sprintf(flags_str, "%s%s%s%s", + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ", + (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "), + (flags & ADMA2_ATTRIBUTE_END ? "END " : " "), + (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : "")); + sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n", + i, adma2_dscr_table, adma2_dscr_table->phys_addr, + adma2_dscr_table->len_attr >> 16, flags, flags_str)); + i++; + +#ifdef linux + /* Follow LINK descriptors or skip to next. */ + if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) { + adma2_dscr_table = phys_to_virt( + adma2_dscr_table->phys_addr); + } else { + adma2_dscr_table++; + } +#else + adma2_dscr_table++; +#endif /* linux */ + + } + break; + case DMA_MODE_ADMA1: + sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n", + SD_PAGE, sd->adma2_dscr_buf, + (uint)PHYSADDRHI(sd->adma2_dscr_phys), + (uint)PHYSADDRLO(sd->adma2_dscr_phys))); + sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n")); + + for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) { + flags = adma1_dscr_table->phys_addr_attr & 0x3F; + sprintf(flags_str, "%s%s%s%s", + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ", + (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "), + (flags & ADMA2_ATTRIBUTE_END ? "END " : " "), + (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : "")); + sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n", + i, adma1_dscr_table, + adma1_dscr_table->phys_addr_attr & 0xFFFFF000, + flags, flags_str)); + +#ifdef linux + /* Follow LINK descriptors or skip to next. */ + if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) { + adma1_dscr_table = phys_to_virt( + adma1_dscr_table->phys_addr_attr & 0xFFFFF000); + } else { + adma1_dscr_table++; + } +#else + adma2_dscr_table++; +#endif /* linux */ + } + break; + default: + sd_err(("Unknown DMA Descriptor Table Format.\n")); + break; + } +} + +static void +sdstd_dumpregs(sdioh_info_t *sd) +{ + sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n", + sdstd_rreg16(sd, SD_IntrStatus), + sdstd_rreg16(sd, SD_ErrorIntrStatus))); + sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n", + sdstd_rreg16(sd, SD_IntrStatusEnable), + sdstd_rreg16(sd, SD_ErrorIntrStatusEnable))); + sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n", + sdstd_rreg16(sd, SD_IntrSignalEnable), + sdstd_rreg16(sd, SD_ErrorIntrSignalEnable))); +} diff --git a/bcmdhd.101.10.361.x/bcmsdstd.h b/bcmdhd.101.10.361.x/bcmsdstd.h new file mode 100755 index 0000000..e0d19e2 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdstd.h @@ -0,0 +1,301 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: bcmsdstd.h 833030 2019-08-02 17:22:42Z jl904071 $ + */ +#ifndef _BCM_SD_STD_H +#define _BCM_SD_STD_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ +#ifdef BCMDBG +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#define sd_dma(x) do { if (sd_msglevel & SDH_DMA_VAL) printf x; } while (0) +#else +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_dma(x) +#endif /* BCMDBG */ + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); +/* Allocate/init/free per-OS private data */ +extern int sdstd_osinit(sdioh_info_t *sd); +extern void sdstd_osfree(sdioh_info_t *sd); + +#ifdef BCMPERFSTATS +#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0) +#else +#define sd_log(x) +#endif + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 +#define SDIOH_MODE_SD1 1 +#define SDIOH_MODE_SD4 2 + +#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */ +#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */ + +#define SDIOH_TYPE_ARASAN_HDK 1 +#define SDIOH_TYPE_BCM27XX 2 +#ifdef BCMINTERNAL +#define SDIOH_TYPE_JINVANI_GOLD 3 +#endif +#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */ +#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */ +#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */ + +/* For linux, allow yielding for dongle */ +#if defined(linux) && defined(BCMDONGLEHOST) +#define BCMSDYIELD +#endif + +/* Expected card status value for CMD7 */ +#define SDIOH_CMD7_EXP_STATUS 0x00001E00 + +#define RETRIES_LARGE 100000 +#ifdef BCMQT +extern void sdstd_os_yield(sdioh_info_t *sd); +#define RETRIES_SMALL 10000 +#else +#define sdstd_os_yield(sd) do {} while (0) +#define RETRIES_SMALL 100 +#endif + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +#define USE_FIFO 0x8 /* Fifo vs non-fifo */ + +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +#define HC_INTR_RETUNING 0x1000 + +#ifdef BCMSDIOH_TXGLOM +/* Total glom pkt can not exceed 64K + * need one more slot for glom padding packet + */ +#define SDIOH_MAXGLOM_SIZE (40+1) + +typedef struct glom_buf { + uint32 count; /* Total number of pkts queued */ + void *dma_buf_arr[SDIOH_MAXGLOM_SIZE]; /* Frame address */ + dmaaddr_t dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */ + uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */ +} glom_buf_t; +#endif + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint32 curr_caps; /* max current capabilities reg */ + + osl_t *osh; /* osh handler */ + volatile char *mem_space; /* pci device memory va */ + uint lockcount; /* nest count of sdstd_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint target_dev; /* Target device ID */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + void *bcmsdh; /* handler to upper layer stack (bcmsdh) */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + int local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; /* DMA Buffer virtual address */ + dmaaddr_t dma_phys; /* DMA Buffer physical address */ + void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */ + dmaaddr_t adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */ + + /* adjustments needed to make the dma align properly */ + void *dma_start_buf; + dmaaddr_t dma_start_phys; + uint alloced_dma_size; + void *adma2_dscr_start_buf; + dmaaddr_t adma2_dscr_start_phys; + uint alloced_adma2_dscr_size; + + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + bool got_hcint; /* local interrupt flag */ + uint16 last_intrstatus; /* to cache intrstatus */ + int host_UHSISupported; /* whether UHSI is supported for HC. */ + int card_UHSI_voltage_Supported; /* whether UHSI is supported for + * Card in terms of Voltage [1.8 or 3.3]. + */ + int global_UHSI_Supp; /* type of UHSI support in both host and card. + * HOST_SDR_UNSUPP: capabilities not supported/matched + * HOST_SDR_12_25: SDR12 and SDR25 supported + * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd + */ + volatile int sd3_dat_state; /* data transfer state used for retuning check */ + volatile int sd3_tun_state; /* tuning state used for retuning check */ + bool sd3_tuning_reqd; /* tuning requirement parameter */ + bool sd3_tuning_disable; /* tuning disable due to bus sleeping */ + uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */ +#ifdef BCMSDIOH_TXGLOM + glom_buf_t glom_info; /* pkt information used for glomming */ + uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ +#endif +}; + +#define DMA_MODE_NONE 0 +#define DMA_MODE_SDMA 1 +#define DMA_MODE_ADMA1 2 +#define DMA_MODE_ADMA2 3 +#define DMA_MODE_ADMA2_64 4 +#define DMA_MODE_AUTO -1 + +#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE)) + +/* States for Tuning and corr data */ +#define TUNING_IDLE 0 +#define TUNING_START 1 +#define TUNING_START_AFTER_DAT 2 +#define TUNING_ONGOING 3 + +#define DATA_TRANSFER_IDLE 0 +#define DATA_TRANSFER_ONGOING 1 + +#define CHECK_TUNING_PRE_DATA 1 +#define CHECK_TUNING_POST_DATA 2 + +#ifdef DHD_DEBUG +#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01 +#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00 +#endif + +/************************************************************ + * Internal interfaces: per-port references into bcmsdstd.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdstd_devintr_on(sdioh_info_t *sd); +extern void sdstd_devintr_off(sdioh_info_t *sd); + +/* Enable/disable interrupts for local controller events */ +extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err); +extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err); + +/* Wait for specified interrupt and error bits to be set */ +extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err); + +/************************************************************** + * Internal interfaces: bcmsdstd.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdstd_reg_map(osl_t *osh, dmaaddr_t addr, int size); +extern void sdstd_reg_unmap(osl_t *osh, dmaaddr_t addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdstd_register_irq(sdioh_info_t *sd, uint irq); +extern void sdstd_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void sdstd_lock(sdioh_info_t *sd); +extern void sdstd_unlock(sdioh_info_t *sd); +extern void sdstd_waitlockfree(sdioh_info_t *sd); + +/* OS-specific wrappers for safe concurrent register access */ +extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags); +extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags); + +/* OS-specific wait-for-interrupt-or-status */ +extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits); + +/* used by bcmsdstd_linux [implemented in sdstd] */ +extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd); +extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd); +extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd); +extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param); +extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd); +extern int sdstd_3_get_tune_state(sdioh_info_t *sd); +extern int sdstd_3_get_data_state(sdioh_info_t *sd); +extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state); +extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state); +extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd); +extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd); +extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode); + +/* used by sdstd [implemented in bcmsdstd_linux/ndis] */ +extern void sdstd_3_start_tuning(sdioh_info_t *sd); +extern void sdstd_3_osinit_tuning(sdioh_info_t *sd); +extern void sdstd_3_osclean_tuning(sdioh_info_t *sd); + +extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif /* _BCM_SD_STD_H */ diff --git a/bcmdhd.101.10.361.x/bcmsdstd_linux.c b/bcmdhd.101.10.361.x/bcmsdstd_linux.c new file mode 100755 index 0000000..be635ae --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsdstd_linux.c @@ -0,0 +1,690 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver - linux portion + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include /* request_irq() */ +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* SDIO Host Controller Spec header file */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ +#include +#include + +extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num); +extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh); + +/* Extern functions for sdio power save */ +extern uint8 sdstd_turn_on_clock(sdioh_info_t *sd); +extern uint8 sdstd_turn_off_clock(sdioh_info_t *sd); +/* Extern variable for sdio power save. This is enabled or disabled using the IOCTL call */ +extern uint sd_3_power_save; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; + timer_list_compat_t tuning_timer; + int tuning_timer_exp; + atomic_t timer_enab; + struct tasklet_struct tuning_tasklet; +}; + +#define SDSTD_WAITBITS_TIMEOUT (5 * HZ) /* seconds * HZ */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) /* XXX Doesn't handle CONFIG_PREEMPT? */ +#endif + +static void +sdstd_3_ostasklet(ulong data); +static void +sdstd_3_tuning_timer(ulong data); + +/* Interrupt handler */ +static irqreturn_t +sdstd_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + unsigned long flags; + sd = (sdioh_info_t *)dev_id; + sdos = (struct sdos_info *)sd->sdos_info; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + if (sdstd_3_is_retuning_int_set(sd)) { + /* for 3.0 host, retuning request might come in this path */ + /* * disable ISR's */ + local_irq_save(flags); + + if (sdstd_3_check_and_set_retuning(sd)) + tasklet_schedule(&sdos->tuning_tasklet); + + /* * enable back ISR's */ + local_irq_restore(flags); + + /* * disable tuning isr signaling */ + sdstd_3_disable_retuning_int(sd); + /* * note: check_client_intr() checks for intmask also to + wakeup. so be careful to use sd->intmask to disable + re-tuning ISR. + */ + } + ours = check_client_intr(sd); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sd_trace(("INTR->WAKE\n")); +/* sdos = (struct sdos_info *)sd->sdos_info; */ + wake_up_interruptible(&sdos->intr_wait_queue); + } + return IRQ_RETVAL(ours); + } +} + +/* Register with Linux for interrupts */ +int +sdstd_register_irq(sdioh_info_t *sd, uint irq) +{ + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdstd_isr, IRQF_SHARED, "bcmsdstd", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } + return SUCCESS; +} + +/* Free Linux irq */ +void +sdstd_free_irq(uint irq, sdioh_info_t *sd) +{ + free_irq(irq, sd); +} + +/* Map Host controller registers */ + +uint32 * +sdstd_reg_map(osl_t *osh, dmaaddr_t addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +sdstd_reg_unmap(osl_t *osh, dmaaddr_t addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} + +int +sdstd_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + atomic_set(&sdos->timer_enab, FALSE); + init_waitqueue_head(&sdos->intr_wait_queue); + return BCME_OK; +} + +/* initilize tuning related OS structures */ +void +sdstd_3_osinit_tuning(sdioh_info_t *sd) +{ + struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; + uint8 timer_count = sdstd_3_get_tuning_exp(sdos->sd); + + sd_trace(("%s Enter\n", __FUNCTION__)); + + init_timer_compat(&sdos->tuning_timer, sdstd_3_tuning_timer, sdos); + if (timer_count == CAP3_RETUNING_TC_DISABLED || timer_count > CAP3_RETUNING_TC_1024S) { + sdos->tuning_timer_exp = 0; + } else { + sdos->tuning_timer_exp = 1 << (timer_count - 1); + } + tasklet_init(&sdos->tuning_tasklet, sdstd_3_ostasklet, (ulong)sdos); + if (sdos->tuning_timer_exp) { + timer_expires(&sdos->tuning_timer) = jiffies + sdos->tuning_timer_exp * HZ; + add_timer(&sdos->tuning_timer); + atomic_set(&sdos->timer_enab, TRUE); + } +} + +/* finalize tuning related OS structures */ +void +sdstd_3_osclean_tuning(sdioh_info_t *sd) +{ + struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; + if (atomic_read(&sdos->timer_enab) == TRUE) { + /* disable timer if it was running */ + del_timer_sync(&sdos->tuning_timer); + atomic_set(&sdos->timer_enab, FALSE); + } + tasklet_kill(&sdos->tuning_tasklet); +} + +static void +sdstd_3_ostasklet(ulong data) +{ + struct sdos_info *sdos = (struct sdos_info *)data; + int tune_state = sdstd_3_get_tune_state(sdos->sd); + int data_state = sdstd_3_get_data_state(sdos->sd); + if ((tune_state == TUNING_START) || (tune_state == TUNING_ONGOING) || + (tune_state == TUNING_START_AFTER_DAT)) { + return; + } + else if (data_state == DATA_TRANSFER_IDLE) + sdstd_3_set_tune_state(sdos->sd, TUNING_START); + else if (data_state == DATA_TRANSFER_ONGOING) + sdstd_3_set_tune_state(sdos->sd, TUNING_START_AFTER_DAT); +} + +static void +sdstd_3_tuning_timer(ulong data) +{ + struct sdos_info *sdos = (struct sdos_info *)data; +/* uint8 timeout = 0; */ + unsigned long int_flags; + + sd_trace(("%s: enter\n", __FUNCTION__)); + /* schedule tasklet */ + /* * disable ISR's */ + local_irq_save(int_flags); + if (sdstd_3_check_and_set_retuning(sdos->sd)) + tasklet_schedule(&sdos->tuning_tasklet); + + /* * enable back ISR's */ + local_irq_restore(int_flags); +} + +void sdstd_3_start_tuning(sdioh_info_t *sd) +{ + int tune_state; + unsigned long int_flags = 0; + unsigned int timer_enab; + struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; + sd_trace(("%s: enter\n", __FUNCTION__)); + /* * disable ISR's */ + local_irq_save(int_flags); + timer_enab = atomic_read(&sdos->timer_enab); + + tune_state = sdstd_3_get_tune_state(sd); + + if (tune_state == TUNING_ONGOING) { + /* do nothing */ + local_irq_restore(int_flags); + goto exit; + } + /* change state */ + sdstd_3_set_tune_state(sd, TUNING_ONGOING); + /* * enable ISR's */ + local_irq_restore(int_flags); + sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd)); +#ifdef BCMSDIOH_STD_TUNING_WAR + /* + * Observed intermittent SDIO command error after re-tuning done + * successfully. Re-tuning twice is giving much reliable results. + */ + sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd)); +#endif /* BCMSDIOH_STD_TUNING_WAR */ + /* * disable ISR's */ + local_irq_save(int_flags); + sdstd_3_set_tune_state(sd, TUNING_IDLE); + /* * enable ISR's */ + local_irq_restore(int_flags); + + /* enable retuning intrrupt */ + sdstd_3_enable_retuning_int(sd); + + /* start retuning timer if enabled */ + if ((sdos->tuning_timer_exp) && (timer_enab)) { + if (sd->sd3_tuning_reqd) { + timer_expires(&sdos->tuning_timer) = jiffies + sdos->tuning_timer_exp * HZ; + mod_timer(&sdos->tuning_timer, timer_expires(&sdos->tuning_timer)); + } + } +exit: + return; + +} + +void +sdstd_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable && !sd->lockcount) + sdstd_devintr_on(sd); + else + sdstd_devintr_off(sd); + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +sdstd_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + int wait_count = 0; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + while (sd->lockcount) + { + spin_unlock_irqrestore(&sdos->lock, flags); + yield(); + spin_lock_irqsave(&sdos->lock, flags); + if (++wait_count == 25000) { + if (!(sd->lockcount == 0)) { + sd_err(("%s: ERROR: sd->lockcount == 0\n", __FUNCTION__)); + } + } + } + /* PR86684: Add temporary debugging print */ + if (wait_count) + printk("sdstd_lock: wait count = %d\n", wait_count); + sdstd_devintr_off(sd); + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); + if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3)) + sdstd_turn_on_clock(sd); +} + +/* Enable client interrupt */ +void +sdstd_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { + sdstd_devintr_on(sd); + } + spin_unlock_irqrestore(&sdos->lock, flags); + if (sd_3_power_save) + { + if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && + (sd->version == HOST_CONTR_VER_3)) + sdstd_turn_off_clock(sd); + } +} + +void +sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags) +{ + struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; + spin_lock_irqsave(&sdos->lock, *flags); +} +void +sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags) +{ + struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; + spin_unlock_irqrestore(&sdos->lock, *flags); +} + +void +sdstd_waitlockfree(sdioh_info_t *sd) +{ + if (sd->lockcount) { + printk("wait lock free\n"); + while (sd->lockcount) + { + yield(); + } + } +} + +#ifdef BCMQT +void +sdstd_os_yield(sdioh_info_t *sd) +{ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 29)) +/* + * FC4/11 issue on QT if driver hogs > 10s of CPU causing: + * BUG: soft lockup detected on CPU#0! + * + * XXX Hack: For now, interleave yielding of CPU when we're spinning waiting for + * XXX register status + */ + yield(); +#endif +} +#endif /* BCMQT */ + +/* Returns 0 for success, -1 for interrupted, -2 for timeout */ +int +sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool local_yield, uint16 *bits) +{ + struct sdos_info *sdos; + int rc = 0; + + sdos = (struct sdos_info *)sd->sdos_info; + +#ifndef BCMSDYIELD + ASSERT(!local_yield); +#endif + sd_trace(("%s: int 0x%02x err 0x%02x yield %d canblock %d\n", + __FUNCTION__, norm, err, local_yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + sd->last_intrstatus = 0; + +#ifdef BCMSDYIELD + if (local_yield && BLOCKABLE()) { + /* Enable interrupts, wait for the indication, then disable */ + sdstd_intrs_on(sd, norm, err); + rc = wait_event_interruptible_timeout(sdos->intr_wait_queue, + (sd->got_hcint), + SDSTD_WAITBITS_TIMEOUT); + if (rc < 0) + rc = -1; /* interrupted */ + else if (rc == 0) + rc = -2; /* timeout */ + else + rc = 0; /* success */ + sdstd_intrs_off(sd, norm, err); + } else +#endif /* BCMSDYIELD */ + { + sdstd_spinbits(sd, norm, err); + } + + sd_trace(("%s: last_intrstatus 0x%04x\n", __FUNCTION__, sd->last_intrstatus)); + + *bits = sd->last_intrstatus; + + return rc; +} + +#ifdef DHD_DEBUG +void sdstd_enable_disable_periodic_timer(sdioh_info_t *sd, uint val) +{ + struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; + + if (val == SD_DHD_ENABLE_PERIODIC_TUNING) { + /* start of tuning timer */ + timer_expires(&sdos->tuning_timer) = jiffies + sdos->tuning_timer_exp * HZ; + mod_timer(&sdos->tuning_timer, timer_expires(&sdos->tuning_timer)); + } + if (val == SD_DHD_DISABLE_PERIODIC_TUNING) { + /* stop periodic timer */ + del_timer_sync(&sdos->tuning_timer); + } +} +#endif /* debugging purpose */ + +/* forward declarations for PCI probe and remove functions. */ +static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev); + +/** + * pci id table + */ +static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = { + { vendor: PCI_ANY_ID, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: 0, + class_mask: 0, + driver_data: 0, + }, + { 0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid); + +/** + * SDIO Host Controller pci driver info + */ +static struct pci_driver bcmsdh_pci_driver = { + node: {&(bcmsdh_pci_driver.node), &(bcmsdh_pci_driver.node)}, + name: "bcmsdh", + id_table: bcmsdh_pci_devid, + probe: bcmsdh_pci_probe, + remove: bcmsdh_pci_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + save_state: NULL, +#endif + suspend: NULL, + resume: NULL, + }; + +extern uint sd_pci_slot; /* Force detection to a particular PCI */ + /* slot only . Allows for having multiple */ + /* WL devices at once in a PC */ + /* Only one instance of dhd will be */ + /* usable at a time */ + /* Upper word is bus number, */ + /* lower word is slot number */ + /* Default value of 0xffffffff turns this */ + /* off */ +module_param(sd_pci_slot, uint, 0); + +/** + * Detect supported SDIO Host Controller and attach if found. + * + * Determine if the device described by pdev is a supported SDIO Host + * Controller. If so, attach to it and attach to the target device. + */ +static int __devinit +bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + osl_t *osh = NULL; + sdioh_info_t *sdioh = NULL; + int rc; + + if (sd_pci_slot != 0xFFFFffff) { + if (pdev->bus->number != (sd_pci_slot>>16) || + PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) { + sd_err(("%s: %s: bus %X, slot %X, vend %X, dev %X\n", + __FUNCTION__, + bcmsdh_chipmatch(pdev->vendor, pdev->device) + ?"Found compatible SDIOHC" + :"Probing unknown device", + pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, + pdev->device)); + return -ENODEV; + } + sd_err(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n", + __FUNCTION__, + bcmsdh_chipmatch(pdev->vendor, pdev->device) + ?"Using compatible SDIOHC" + :"WARNING, forced use of unkown device", + pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device)); + } + + if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) || + (pdev->device == PCIXX21_FLASHMEDIA0_ID))) { + uint32 config_reg; + + sd_err(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__)); + if (!(osh = osl_attach(pdev, SDIO_BUS, TRUE))) { + sd_err(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + + config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4); + + /* + * Set MMC_SD_DIS bit in FlashMedia Controller. + * Disbling the SD/MMC Controller in the FlashMedia Controller + * allows the Standard SD Host Controller to take over control + * of the SD Slot. + */ + config_reg |= 0x02; + OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg); + osl_detach(osh); + } + /* match this pci device with what we support */ + /* we can't solely rely on this to believe it is our SDIO Host Controller! */ + if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) { + if (pdev->vendor == VENDOR_BROADCOM) { + sd_err(("%s: Unknown Broadcom device (vendor: %#x, device: %#x).\n", + __FUNCTION__, pdev->vendor, pdev->device)); + } + return -ENODEV; + } + + /* this is a pci device we might support */ + sd_err(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n", + __FUNCTION__, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), pdev->irq)); + + /* use bcmsdh_query_device() to get the vendor ID of the target device so + * it will eventually appear in the Broadcom string on the console + */ + + /* allocate SDIO Host Controller state info */ + if (!(osh = osl_attach(pdev, SDIO_BUS, TRUE))) { + sd_err(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + + /* map to address where host can access */ + pci_set_master(pdev); + rc = pci_enable_device(pdev); + if (rc) { + sd_err(("%s: Cannot enable PCI device\n", __FUNCTION__)); + goto err; + } + + sdioh = sdioh_attach(osh, (void *)(ulong)pci_resource_start(pdev, 0), pdev->irq); + if (sdioh == NULL) { + sd_err(("%s: sdioh_attach failed\n", __FUNCTION__)); + goto err; + } + sdioh->bcmsdh = bcmsdh_probe(osh, &pdev->dev, sdioh, NULL, PCI_BUS, -1, -1); + if (sdioh->bcmsdh == NULL) { + sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__)); + goto err; + } + + pci_set_drvdata(pdev, sdioh); + return 0; + +err: + if (sdioh != NULL) + sdioh_detach(osh, sdioh); + if (osh != NULL) + osl_detach(osh); + return -ENOMEM; +} + +/** + * Detach from target devices and SDIO Host Controller + */ +static void __devexit +bcmsdh_pci_remove(struct pci_dev *pdev) +{ + sdioh_info_t *sdioh; + osl_t *osh; + + sdioh = pci_get_drvdata(pdev); + if (sdioh == NULL) { + sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__)); + return; + } + + osh = sdioh->osh; + bcmsdh_remove(sdioh->bcmsdh); + sdioh_detach(osh, sdioh); + osl_detach(osh); +} + +int bcmsdh_register_client_driver(void) +{ + return pci_module_init(&bcmsdh_pci_driver); +} + +void bcmsdh_unregister_client_driver(void) +{ + pci_unregister_driver(&bcmsdh_pci_driver); +} diff --git a/bcmdhd.101.10.361.x/bcmspibrcm.c b/bcmdhd.101.10.361.x/bcmspibrcm.c new file mode 100755 index 0000000..e58d0cf --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmspibrcm.c @@ -0,0 +1,1922 @@ +/* + * Broadcom BCMSDH to gSPI Protocol Conversion Layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifdef BCMDONGLEHOST +#define HSMODE +#else +#endif /* BCMDONGLEHOST */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* SDIO device core hardware definitions. */ +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ +#include /* SDIO Device and Protocol Specs */ + +#if defined(linux) +#include +#endif + +/* XXX Quick NDIS hack */ +#ifdef NDIS +#define inline __inline +#define PCI_CFG_VID 0 +#define PCI_CFG_BAR0 0x10 +#endif + +#include +#ifdef BCMSPI_ANDROID +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +#else +#include +#endif /* BCMSPI_ANDROID */ + +/* these are for the older cores... for newer cores we have control for each of them */ +#define F0_RESPONSE_DELAY 16 +#define F1_RESPONSE_DELAY 16 +#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY + +#define GSPI_F0_RESP_DELAY 0 +#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY +#define GSPI_F2_RESP_DELAY 0 +#define GSPI_F3_RESP_DELAY 0 + +#define CMDLEN 4 + +/* Globals */ +#if defined(BCMDBG) || defined(DHD_DEBUG) +uint sd_msglevel = SDH_ERROR_VAL; +#else +uint sd_msglevel = 0; +#endif /* BCMDBG || DHD_DEBUG */ + +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + +uint sd_divisor = 2; +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ + +uint8 spi_outbuf[SPI_MAX_PKT_LEN]; +uint8 spi_inbuf[SPI_MAX_PKT_LEN]; + +/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits + * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. + */ +#define BUF2_PKT_LEN 128 +uint8 spi_outbuf2[BUF2_PKT_LEN]; +uint8 spi_inbuf2[BUF2_PKT_LEN]; +#ifdef BCMSPI_ANDROID +uint *dhd_spi_lockcount = NULL; +#endif /* BCMSPI_ANDROID */ + +#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) +#define SPISWAP_WD4(x) bcmswap32(x); +#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \ + (bcmswap16((x & 0xffff0000) >> 16) << 16); +#else +/* XXX Some SPI host controller changes endianness when writing/reading +* to/from SPI device TX/RX register in case the bits_per_word is more than 1 byte. +*/ +#define SPISWAP_WD4(x) x; +#define SPISWAP_WD2(x) bcmswap32by16(x); +#endif + +/* Prototypes */ +static bool bcmspi_test_card(sdioh_info_t *sd); +static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); +static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); +static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen); +static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, + uint8 *data); +static int bcmspi_driver_init(sdioh_info_t *sd); +static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, + uint32 *data); +static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); +static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + +#ifndef BCMSPI_ANDROID + sd->bar0 = bar0; +#endif /* !BCMSPI_ANDROID */ + sd->irq = irq; +#ifndef BCMSPI_ANDROID + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; +#endif /* !BCMSPI_ANDROID */ + + /* Set defaults */ + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit + * mode + */ + sd->wordlen = 2; + +#ifdef BCMSPI_ANDROID + dhd_spi_lockcount = &sd->lockcount; +#endif /* BCMSPI_ANDROID */ + +#ifndef BCMSPI_ANDROID + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } +#endif /* !BCMSPI_ANDROID */ + + if (bcmspi_driver_init(sd) != SUCCESS) { + sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); +#ifndef BCMSPI_ANDROID + spi_hw_detach(sd); +#endif /* !BCMSPI_ANDROID */ + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); +#ifndef BCMSPI_ANDROID + spi_hw_detach(sd); +#endif /* !BCMSPI_ANDROID */ + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_err(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); +#ifndef BCMSPI_ANDROID + spi_hw_detach(sd); +#endif /* !BCMSPI_ANDROID */ + spi_osfree(sd); +#ifdef BCMSPI_ANDROID + dhd_spi_lockcount = NULL; +#endif /* !BCMSPI_ANDROID */ + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ +#ifndef BCMSPI_ANDROID + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; +#endif /* !BCMSPI_ANDROID */ + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) || defined(BCMDBG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif + +/* Provide dstatus bits of spi-transaction for dhd layers. */ +extern uint32 +sdioh_get_dstatus(sdioh_info_t *sd) +{ + return sd->card_dstatus; +} + +extern void +sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) +{ + sd->chip = chip; + sd->chiprev = chiprev; +} + +extern void +sdioh_dwordmode(sdioh_info_t *sd, bool set) +{ + uint8 reg = 0; + int status; + + if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } + + if (set) { + reg |= DWORD_PKT_LEN_EN; + sd->dwordmode = TRUE; + sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ + } else { + reg &= ~DWORD_PKT_LEN_EN; + sd->dwordmode = FALSE; + sd->client_block_size[SPI_FUNC_2] = 2048; + } + + if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } +} + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_SPIERRSTATS, + IOV_RESP_DELAY_ALL +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, +#ifdef BCMDBG + {"sd_hciregs", IOV_HCIREGS, 0, IOVT_BUFFER, 0 }, +#endif + {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, + {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, uint len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + uint val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; +/* + sdioh_regs_t *regs; +*/ + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* XXX Copied from dhd, copied from wl; certainly overkill here? */ + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + +#ifndef BCMSPI_ANDROID + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("%s: set clock failed\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + } + break; +#endif /* !BCMSPI_ANDROID */ + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("%s: Failed changing highspeed mode to %d.\n", + __FUNCTION__, sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_DEVREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + /* XXX Should copy for alignment reasons */ + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + +#ifdef BCMDBG + case IOV_GVAL(IOV_HCIREGS): + { + struct bcmstrbuf b; + bcm_binit(&b, arg, len); + + spi_lock(si); + bcm_bprintf(&b, "Unsupported\n"); + spi_unlock(si); + + if (!b.size) + bcmerror = BCME_BUFTOOSHORT; + break; + } +#endif /* BCMDBG */ + + case IOV_GVAL(IOV_SPIERRSTATS): + { + bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); + break; + } + + case IOV_SVAL(IOV_SPIERRSTATS): + { + bzero(&si->spierrstats, sizeof(struct spierrstats_t)); + break; + } + + case IOV_GVAL(IOV_RESP_DELAY_ALL): + int_val = (int32)si->resp_delay_all; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RESP_DELAY_ALL): + si->resp_delay_all = (bool)int_val; + int_val = STATUS_ENABLE|INTR_WITH_STATUS; + if (si->resp_delay_all) + int_val |= RESP_DELAY_ALL; + else { + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, + F1_RESPONSE_DELAY) != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + } + + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) + != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + /* XXX Remove protective lock after clients all clean... */ + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + + /* WAR for gSPI for PR55208: Read SFC_WF_TERM before write for write to be + * successful on address SBSDIO_FUNC1_FRAMECTRL. + */ + if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { + uint8 dummy_data; + status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); + if (status) { + sd_err(("sdioh_cfg_read() failed.\n")); + return status; + } + } + + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 cis_byte; + uint16 *cis = (uint16 *)cisd; + uint bar0 = SI_ENUM_BASE(sd->sih); + int status; + uint8 data; + + sd_trace(("%s: Func %d\n", __FUNCTION__, func)); + + spi_lock(sd); + + /* Set sb window address to 0x18000000 */ + data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); + if (status == SUCCESS) { + data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + if (status == SUCCESS) { + data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + offset = CC_SROM_OTP; /* OTP offset in chipcommon. */ + for (count = 0; count < length/2; count++) { + if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + *cis = (uint16)cis_byte; + cis++; + offset += 2; + } + + spi_unlock(sd); + + return (BCME_OK); +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + if (rw == SDIOH_READ) { + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr)); + } else { + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + } + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + if (rw == SDIOH_READ) { + *byte = (uint8)data; + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte)); + } + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = bcmspi_card_regread(sd, func, addr, nbytes, word); + else + status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks. */ + while (buflen > 0) { + len = MIN(sd->client_block_size[func], buflen); + if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sd_err(("%s: bcmspi_card_buf %s failed\n", + __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. + * Its main aim is to have simpler spi writes rather than recursive writes. + * e.g. When there is a need to program response delay on the fly after detecting the SPI-func + * this call will allow to program the response delay. + */ +static int +bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) +{ + uint32 cmd_arg; + uint32 datalen = 1; + uint32 hostlen; + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + +#ifdef BCMDBG + /* Fill up buffers with a value that generates known dutycycle on MOSI/MISO lines. */ + memset(spi_outbuf2, 0xee, BUF2_PKT_LEN); + memset(spi_inbuf2, 0xee, BUF2_PKT_LEN); +#endif /* BCMDBG */ + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + } else { + sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (datalen != 0) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte); + } + } + + /* +4 for cmd, +4 for dstatus */ + hostlen = datalen + 8; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +/* Program the response delay corresponding to the spi function */ +static int +bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) +{ + if (sd->resp_delay_all == FALSE) + return (BCME_OK); + + if (sd->prev_fun == func) + return (BCME_OK); + + if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) + return (BCME_OK); + + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); + + /* Remember function for which to avoid reprogramming resp-delay in next iteration */ + sd->prev_fun = func; + + return (BCME_OK); + +} + +#define GSPI_RESYNC_PATTERN 0x0 + +/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. + * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is + * synchronised and all queued resuests are cancelled. + */ +static int +bcmspi_resync_f1(sdioh_info_t *sd) +{ + uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; + +#ifdef BCMDBG + /* Fill up buffers with a value that generates known dutycycle on MOSI/MISO lines. */ + memset(spi_outbuf2, 0xee, BUF2_PKT_LEN); + memset(spi_inbuf2, 0xee, BUF2_PKT_LEN); +#endif /* BCMDBG */ + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + *(uint32 *)spi_outbuf2 = cmd_arg; + + /* for Write, put the data into the output buffer */ + *(uint32 *)&spi_outbuf2[CMDLEN] = data; + + /* +4 for cmd, +4 for dstatus */ + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +uint32 dstatus_count = 0; + +static int +bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) +{ + uint32 dstatus = sd->card_dstatus; + struct spierrstats_t *spierrstats = &sd->spierrstats; + int err = SUCCESS; + + sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); + + /* Store dstatus of last few gSPI transactions */ + spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; + spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; + dstatus_count++; + + if (sd->card_init_done == FALSE) + return err; + + if (dstatus & STATUS_DATA_NOT_AVAILABLE) { + spierrstats->dna++; + sd_trace(("Read data not available on F1 addr = 0x%x\n", + GFIELD(cmd_arg, SPI_REG_ADDR))); + /* Clear dna bit */ + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); + } + + if (dstatus & STATUS_UNDERFLOW) { + spierrstats->rdunderflow++; + sd_err(("FIFO underflow happened due to current F2 read command.\n")); + } + + if (dstatus & STATUS_OVERFLOW) { + spierrstats->wroverflow++; + sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); + bcmspi_resync_f1(sd); + sd_err(("Recovering from F1 FIFO overflow.\n")); + } + + if (dstatus & STATUS_F2_INTR) { + spierrstats->f2interrupt++; + sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_F3_INTR) { + spierrstats->f3interrupt++; + sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_HOST_CMD_DATA_ERR) { + spierrstats->hostcmddataerr++; + sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); + } + + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + spierrstats->f2pktavailable++; + sd_trace(("Packet is available/ready in F2 TX FIFO\n")); + sd_trace(("Packet length = %d\n", sd->dwordmode ? + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); + } + + if (dstatus & STATUS_F3_PKT_AVAILABLE) { + spierrstats->f3pktavailable++; + sd_err(("Packet is available/ready in F3 TX FIFO\n")); + sd_err(("Packet length = %d\n", + (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ + return 0; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return SUCCESS; +} + +#ifdef BCMINTERNAL +extern SDIOH_API_RC +sdioh_test_diag(sdioh_info_t *sd) +{ + sd_err(("%s: Implement me\n", __FUNCTION__)); + return (0); +} +#endif /* BCMINTERNAL */ + +/* + * Private/Static work routines + */ +static int +bcmspi_host_init(sdioh_info_t *sd) +{ + + /* Default power on mode */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +static int +get_client_blocksize(sdioh_info_t *sd) +{ + uint32 regdata[2]; + int status; + + /* Find F1/F2/F3 max packet size */ + if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, + 8, regdata)) != SUCCESS) { + return status; + } + + sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", + regdata[0], regdata[1])); + + sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; + sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); + ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); + + sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; + sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); + ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); + + sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; + sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); + ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); + + return 0; +} + +static int +bcmspi_client_init(sdioh_info_t *sd) +{ + uint32 status_en_reg = 0; + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + +#ifndef BCMSPI_ANDROID +#ifdef HSMODE + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#else + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ +#endif /* !BCMSPI_ANDROID */ + + if (!bcmspi_host_device_init_adapt(sd)) { + sd_err(("bcmspi_host_device_init_adapt failed\n")); + return ERROR; + } + + if (!bcmspi_test_card(sd)) { + sd_err(("bcmspi_test_card failed\n")); + return ERROR; + } + + sd->num_funcs = SPI_MAX_IOFUNCS; + + get_client_blocksize(sd); + + /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ + bcmspi_resync_f1(sd); + + sd->dwordmode = FALSE; + + bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); + + sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); + status_en_reg |= INTR_WITH_STATUS; + + if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, + status_en_reg & 0xff) != SUCCESS) { + sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); + return ERROR; + } + +#ifndef HSMODE +#ifndef BCMSPI_ANDROID + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, 4)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* !BCMSPI_ANDROID */ +#endif /* HSMODE */ + + /* check to see if the response delay needs to be programmed properly */ + { + uint32 f1_respdelay = 0; + bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay); + if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) { + /* older sdiodevice core and has no separte resp delay for each of */ + sd_err(("older corerev < 4 so use the same resp delay for all funcs\n")); + sd->resp_delay_new = FALSE; + } + else { + /* older sdiodevice core and has no separte resp delay for each of */ + int ret_val; + sd->resp_delay_new = TRUE; + sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n")); + sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n", + GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY, + GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY)); + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1, + GSPI_F0_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1, + GSPI_F1_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1, + GSPI_F2_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1, + GSPI_F3_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + } + } + +/* XXX:Cleanup after finding a common place in dhd or bcmsdh layer to do this */ +#ifndef BCMDONGLEHOST + if ((status = bcmspi_card_regwrite(sd, 1, SBSDIO_FUNC1_SBADDRLOW, 4, + SB_ENUM_BASE >> 8)) != SUCCESS) + return FALSE; +#endif + sd->card_init_done = TRUE; + +#ifdef BCMDBG + { + uint8 regbuf[32]; + int j; + bzero(regbuf, 32); + /* Read default F0 registers */ + sd_trace(("Reading default values of first 32(8bit) F0 spid regs again before" + " quitting init.\n")); + bcmspi_card_regread(sd, 0, SPID_CONFIG, 32, (uint32 *)regbuf); + for (j = 0; j < 32; j++) + sd_trace(("regbuf[%d]=0x%x \n", j, regbuf[j])); + sd_trace(("\n")); + } +#endif /* BCMDBG */ + /* get the device rev to program the prop respdelays */ + + return SUCCESS; +} + +/* XXX What is clock rate at high and low speeds ? */ +static int +bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, + 4, ®data)) != SUCCESS) + return status; + + sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); + + if (hsmode == TRUE) { + sd_trace(("Attempting to enable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + sd_trace(("Device is already in High-Speed mode.\n")); + return status; + } else { + regdata |= HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) { + return status; + } + } + } else { + sd_trace(("Attempting to disable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + regdata &= ~HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) + return status; + } + else { + sd_trace(("Device is already in Low-Speed mode.\n")); + return status; + } + } +#ifndef BCMSPI_ANDROID + spi_controller_highspeed_mode(sd, hsmode); +#endif /* !BCMSPI_ANDROID */ + + return TRUE; +} + +#define bcmspi_find_curr_mode(sd) { \ + sd->wordlen = 2; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd->wordlen = 4; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd_trace(("Silicon testability issue: regdata = 0x%x." \ + " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ + OSL_DELAY(100000); \ +} + +#define INIT_ADAPT_LOOP 100 + +/* Adapt clock-phase-speed-bitwidth between host and device */ +static bool +bcmspi_host_device_init_adapt(sdioh_info_t *sd) +{ + uint32 wrregdata, regdata = 0; + int status; + int i; +#ifdef BCMDBG + int j; + uint8 regbuf[32]; + bzero(regbuf, 32); +#endif /* BCMDBG */ + + /* Due to a silicon testability issue, the first command from the Host + * to the device will get corrupted (first bit will be lost). So the + * Host should poll the device with a safe read request. ie: The Host + * should try to read F0 addr 0x14 using the Fixed address mode + * (This will prevent a unintended write command to be detected by device) + */ + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + /* If device was not power-cycled it will stay in 32bit mode with + * response-delay-all bit set. Alternate the iteration so that + * read either with or without response-delay for F0 to succeed. + */ + bcmspi_find_curr_mode(sd); + sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = TRUE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = FALSE; + } + + /* Bail out, device not detected */ + if (i == INIT_ADAPT_LOOP) + return FALSE; + + /* Softreset the spid logic */ + if ((sd->dwordmode) || (sd->wordlen == 4)) { + bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); + bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); + sd_trace(("reset reg read = 0x%x\n", regdata)); + sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, + sd->wordlen, sd->resp_delay_all)); + /* Restore default state after softreset */ + sd->wordlen = 2; + sd->dwordmode = FALSE; + } + + if (sd->wordlen == 4) { + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != + SUCCESS) + return FALSE; + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", + regdata)); + sd_trace(("Spid power was left on.\n")); + } else { + sd_err(("Spid power was left on but signature read failed." + " Value read = 0x%x\n", regdata)); + return FALSE; + } + } else { + sd->wordlen = 2; + +#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ + + wrregdata = (CTRL_REG_DEFAULT); + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); + +#ifndef HSMODE + wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); + wrregdata &= ~HIGH_SPEED_MODE; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); +#endif /* HSMODE */ + + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { + sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, + ®data)) != SUCCESS) + return FALSE; + } + OSL_DELAY(1000); + } + +#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH) + /* Change to host controller intr-polarity of active-high */ + /* XXX With intr-polarity active-high, host platform does not go into suspend mode + * since the pin is asserted high. + */ + wrregdata |= INTR_POLARITY; +#else + /* Change to host controller intr-polarity of active-low */ + wrregdata &= ~INTR_POLARITY; +#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */ + + sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", + wrregdata)); + /* Change to 32bit mode */ + wrregdata |= WORD_LENGTH_32; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + + /* Change command/data packaging in 32bit LE mode */ + sd->wordlen = 4; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); + sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); + } else { + sd_err(("Stale spid reg values read as it was kept powered. Value read =" + "0x%x\n", regdata)); + return FALSE; + } + } + +#ifdef BCMDBG + /* Read default F0 registers */ + sd_trace(("Reading default values of first 32(8bit) F0 spid regs\n")); + bcmspi_card_regread(sd, 0, SPID_CONFIG, 32, (uint32 *)regbuf); + for (j = 0; j < 32; j++) + sd_trace(("regbuf[%d]=0x%x \n", j, regbuf[j])); + sd_trace(("\n")); +#endif /* BCMDBG */ + + return TRUE; +} + +static bool +bcmspi_test_card(sdioh_info_t *sd) +{ + uint32 regdata; + int status; +#ifdef BCMDBG + uint8 regbuf[32]; + bzero(regbuf, 32); +#endif /* BCMDBG */ + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == (TEST_RO_DATA_32BIT_LE)) + sd_trace(("32bit LE regdata = 0x%x\n", regdata)); + else { + sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); + return FALSE; + } + +#define RW_PATTERN1 0xA0A1A2A3 +#define RW_PATTERN2 0x4B5B6B7B + + regdata = RW_PATTERN1; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN1) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN1, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + regdata = RW_PATTERN2; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN2) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN2, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + return TRUE; +} + +static int +bcmspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((bcmspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (bcmspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +/* Read device reg */ +static int +bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +static int +bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + int status; + uint32 cmd_arg; + uint32 dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data)); + + bcmspi_cmd_getdstatus(sd, &dstatus); + sd_trace(("dstatus =0x%x\n", dstatus)); + return SUCCESS; +} + +/* write a device register */ +static int +bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + return SUCCESS; +} + +/* write a device register - 1 byte */ +static int +bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +void +bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) +{ + *dstatus_buffer = sd->card_dstatus; +} + +/* 'data' is of type uint32 whereas other buffers are of type uint8 */ +static int +bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen) +{ + uint32 i, j; + uint8 resp_delay = 0; + int err = SUCCESS; + uint32 hostlen; + uint32 spilen = 0; + uint32 dstatus_idx = 0; + uint16 templen, buslen, len, *ptr = NULL; + + sd_trace(("spi cmd = 0x%x\n", cmd_arg)); +#ifdef BCMDBG + /* Fill up buffer with known pattern */ + memset(spi_outbuf, 0xee, SPI_MAX_PKT_LEN); + memset(spi_inbuf, 0xee, SPI_MAX_PKT_LEN); +#endif /* BCMDBG */ + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + if (datalen < 4) + datalen = ROUNDUP(datalen, 4); + } else { + sd_err(("Host is %d bit spid, could not create SPI command.\n", + 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { + /* We send len field of hw-header always a mod16 size, both from host and dongle */ + if (datalen != 0) { + for (i = 0; i < datalen/4; i++) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD4(data[i]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD2(data[i]); + } + } + } + } + + /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ + if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) { + int func = GFIELD(cmd_arg, SPI_FUNCTION); + switch (func) { + case 0: + if (sd->resp_delay_new) + resp_delay = GSPI_F0_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; + break; + case 1: + if (sd->resp_delay_new) + resp_delay = GSPI_F1_RESP_DELAY; + else + resp_delay = F1_RESPONSE_DELAY; + break; + case 2: + if (sd->resp_delay_new) + resp_delay = GSPI_F2_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; + break; + default: + ASSERT(0); + break; + } + /* Program response delay */ + if (sd->resp_delay_new == FALSE) + bcmspi_prog_resp_delay(sd, func, resp_delay); + } + + /* +4 for cmd and +4 for dstatus */ + hostlen = datalen + 8 + resp_delay; + hostlen += dstatus_idx; +#ifdef BCMSPI_ANDROID + if (hostlen%4) { + sd_err(("Unaligned data len %d, hostlen %d\n", + datalen, hostlen)); +#endif /* BCMSPI_ANDROID */ + hostlen += (4 - (hostlen & 0x3)); +#ifdef BCMSPI_ANDROID + } +#endif /* BCMSPI_ANDROID */ +#ifdef BCMDBG + if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 1) && + (sd->dwordmode) && + (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + sd_trace(("len/~len/spilen/hostlen=0x%x/0x%x/0x%x/0x%x\n", + *ptr, ~*(ptr+1), spilen, hostlen)); + } +#endif /* BCMDBG */ + spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); + + /* for Read, get the data into the input buffer */ + if (datalen != 0) { + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ + for (j = 0; j < datalen/4; j++) { + if (sd->wordlen == 4) { /* 32bit spid */ + data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } + } + } + } + + dstatus_idx += (datalen + CMDLEN + resp_delay); + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else { + sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", + 8 * sd->wordlen)); + return ERROR; + } + if (sd->card_dstatus == 0xffffffff) { + sd_err(("looks like not a GSPI device or device is not powered.\n")); + } + + err = bcmspi_update_stats(sd, cmd_arg); +#ifdef BCMDBG + if (err) + prhex("Overflowing frame", (uint8 *)data, datalen); +#endif /* BCMDBG */ + + return err; + +} + +static int +bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + bool write = rw == SDIOH_READ ? 0 : 1; + uint retries = 0; + + bool enable; + uint32 spilen; + + cmd_arg = 0; + + ASSERT(nbytes); + ASSERT(nbytes <= sd->client_block_size[func]); + + if (write) sd->t_cnt++; else sd->r_cnt++; + + if (func == 2) { + /* Frame len check limited by gSPI. */ + if ((nbytes > 2000) && write) { + sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); +#ifdef BCMDBG + prhex("Host for gSPI", (uint8 *)data, 32); +#endif /* BCMDBG */ + } + /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ + /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ + if (write) { + uint32 dstatus; + /* check F2 ready with cached one */ + bcmspi_cmd_getdstatus(sd, &dstatus); + if ((dstatus & STATUS_F2_RX_READY) == 0) { + retries = WAIT_F2RXFIFORDY; + enable = 0; + while (retries-- && !enable) { + OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); + bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, + &dstatus); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + if (!enable) { + struct spierrstats_t *spierrstats = &sd->spierrstats; + spierrstats->f2rxnotready++; + sd_err(("F2 FIFO is not ready to receive data.\n")); + return ERROR; + } + sd_trace(("No of retries on F2 ready %d\n", + (WAIT_F2RXFIFORDY - retries))); + } + } + } + + /* F2 transfers happen on 0 addr */ + addr = (func == 2) ? 0 : addr; + + /* In pio mode buffer is read using fixed address fifo in func 1 */ + if ((func == 1) && (fifo)) + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); + else + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); + + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); + spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); + if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + /* convert len to mod4 size */ + spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } else + cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); + + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, + (write ? "write" : "read"))); + return status; + } + + /* gSPI expects that hw-header-len is equal to spi-command-len */ + if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { + ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); + ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); + } + + if ((nbytes > 2000) && !write) { + sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); +#ifdef BCMDBG + prhex("Host for gSPI", (uint8 *)data, 32); +#endif /* BCMDBG */ + } + + return SUCCESS; +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return bcmspi_client_init(si); +} + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/bcmdhd.101.10.361.x/bcmsrom.c b/bcmdhd.101.10.361.x/bcmsrom.c new file mode 100755 index 0000000..c17a2dc --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmsrom.c @@ -0,0 +1,6365 @@ +/* + * Routines to access SPROM and to parse SROM/CIS variables. + * + * Despite its file name, OTP contents is also parsed in this file. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/* + * List of non obvious preprocessor defines used in this file and their meaning: + * DONGLEBUILD : building firmware that runs on the dongle's CPU + * BCM_DONGLEVARS : NVRAM variables can be read from OTP/S(P)ROM. + * When host may supply nvram vars in addition to the ones in OTP/SROM: + * BCMHOSTVARS : full nic / full dongle + * BCMDONGLEHOST : defined when building DHD, code executes on the host in a dongle environment. + * DHD_SPROM : defined when building a DHD that supports reading/writing to SPROM + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef BCMSDIO +#include +#include +#endif +#ifdef BCMSPI +#include +#endif + +#include +#include +#ifndef BCMUSBDEV_COMPOSITE +#define BCMUSBDEV_COMPOSITE +#endif +#if defined(BCMUSBDEV) || defined(BCMSDIO) || defined(BCMSDIODEV) +#include +#include +#include +#endif + +#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) +#include +#endif +#include /* for sprom content groking */ + +#include +#ifdef EVENT_LOG_COMPILE +#include +#endif + +#if defined(EVENT_LOG_COMPILE) && defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG) +#if defined(ERR_USE_EVENT_LOG_RA) +#define BS_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_BSROM_ERROR, args) +#else +#define BS_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_BSROM_ERROR, args) +#endif /* ERR_USE_EVENT_LOG_RA */ +#elif defined(BCMDBG_ERR) || defined(WLTEST) +#define BS_ERROR(args) printf args +#else +#define BS_ERROR(args) +#endif /* defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG) */ + +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) +static bool BCMATTACHDATA(is_caldata_prsnt) = FALSE; +static uint16 BCMATTACHDATA(caldata_array)[SROM_MAX / 2]; +static uint8 BCMATTACHDATA(srom_sromrev); +#endif + +static const char BCMATTACHDATA(rstr_uuidstr)[] = + "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X"; +static const char BCMATTACHDATA(rstr_paddr)[] = "pa%d=0x%%x"; +static const char BCMATTACHDATA(rstr_pdata)[] = "pd%d=0x%%x"; +static const char BCMATTACHDATA(rstr_pdatah)[] = "pdh%d=0x%%x"; +static const char BCMATTACHDATA(rstr_pdatal)[] = "pdl%d=0x%%x"; +static const char BCMATTACHDATA(rstr_gci_ccreg_entry)[] = "gcr%d=0x%%x"; +static const char BCMATTACHDATA(rstr_hex)[] = "0x%x"; + +/** curmap: contains host start address of PCI BAR0 window */ +static volatile uint8* srom_offset(si_t *sih, volatile void *curmap) +{ + if (sih->ccrev <= 31) + return (volatile uint8*)curmap + PCI_BAR0_SPROM_OFFSET; + if ((sih->cccaps & CC_CAP_SROM) == 0) + return NULL; + + if (BUSTYPE(sih->bustype) == SI_BUS) + return (uint8 *)((uintptr)SI_ENUM_BASE(sih) + CC_SROM_OTP); + + return (volatile uint8*)curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP; +} + +#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) +#define WRITE_ENABLE_DELAY 500 /* 500 ms after write enable/disable toggle */ +#define WRITE_WORD_DELAY 20 /* 20 ms between each word write */ +#endif + +srom_info_t *sromh = NULL; + +extern char *_vars; +extern uint _varsz; +#ifdef DONGLEBUILD +char * BCMATTACHDATA(_vars_otp) = NULL; +#define DONGLE_STORE_VARS_OTP_PTR(v) (_vars_otp = (v)) +#else +#define DONGLE_STORE_VARS_OTP_PTR(v) +#endif + +#define SROM_CIS_SINGLE 1 + +#if !defined(BCMDONGLEHOST) +static int initvars_srom_si(si_t *sih, osl_t *osh, volatile void *curmap, char **vars, uint *count); +static void _initvars_srom_pci(uint8 sromrev, uint16 *srom, uint off, varbuf_t *b); +static int initvars_srom_pci(si_t *sih, volatile void *curmap, char **vars, uint *count); +static int initvars_cis_pci(si_t *sih, osl_t *osh, volatile void *curmap, char **vars, uint *count); +#endif /* !defined(BCMDONGLEHOST) */ +#if !defined(BCMUSBDEV_ENABLED) && !defined(BCMSDIODEV_ENABLED) &&\ + !defined(BCMDONGLEHOST) && !defined(BCMPCIEDEV_ENABLED) +static int initvars_flash_si(si_t *sih, char **vars, uint *count); +#endif /* !defined(BCMUSBDEV) && !defined(BCMSDIODEV) && !defined(BCMDONGLEHOST) */ +#ifdef BCMSDIO +#if !defined(BCMDONGLEHOST) +static int initvars_cis_sdio(si_t *sih, osl_t *osh, char **vars, uint *count); +#endif /* !defined(BCMDONGLEHOST) */ +static int sprom_cmd_sdio(osl_t *osh, uint8 cmd); +static int sprom_read_sdio(osl_t *osh, uint16 addr, uint16 *data); +#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) +static int sprom_write_sdio(osl_t *osh, uint16 addr, uint16 data); +#endif /* defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) */ +#endif /* BCMSDIO */ +#if !defined(BCMDONGLEHOST) +#ifdef BCMSPI +static int initvars_cis_spi(si_t *sih, osl_t *osh, char **vars, uint *count); +#endif /* BCMSPI */ +#endif /* !defined(BCMDONGLEHOST) */ +static int sprom_read_pci(osl_t *osh, si_t *sih, volatile uint16 *sprom, uint wordoff, uint16 *buf, + uint nwords, bool check_crc); +#if !defined(BCMDONGLEHOST) +#if defined(BCMNVRAMW) || defined(BCMNVRAMR) +static int otp_read_pci(osl_t *osh, si_t *sih, uint16 *buf, uint bufsz); +#endif /* defined(BCMNVRAMW) || defined(BCMNVRAMR) */ +#endif /* !defined(BCMDONGLEHOST) */ +static uint16 srom_cc_cmd(si_t *sih, osl_t *osh, volatile void *ccregs, uint32 cmd, uint wordoff, + uint16 data); + +#if !defined(BCMDONGLEHOST) +static int initvars_flash(si_t *sih, osl_t *osh, char **vp, uint len); +int dbushost_initvars_flash(si_t *sih, osl_t *osh, char **base, uint len); +static uint get_max_cis_size(si_t *sih); +#endif /* !defined(BCMDONGLEHOST) */ + +#if defined (BCMHOSTVARS) +/* Also used by wl_readconfigdata for vars download */ +char BCMATTACHDATA(mfgsromvars)[VARS_MAX]; +int BCMATTACHDATA(defvarslen) = 0; +#endif /* defined(BCMHOSTVARS) */ + +#if !defined(BCMDONGLEHOST) +#if defined (BCMHOSTVARS) +/* FIXME: Fake 4331 SROM to boot 4331 driver on QT w/o SPROM/OTP */ +static char BCMATTACHDATA(defaultsromvars_4331)[] = + "sromrev=9\0" + "boardrev=0x1104\0" + "boardflags=0x200\0" + "boardflags2=0x0\0" + "boardtype=0x524\0" + "boardvendor=0x14e4\0" + "boardnum=0x2064\0" + "macaddr=00:90:4c:1a:20:64\0" + "ccode=0x0\0" + "regrev=0x0\0" + "opo=0x0\0" + "aa2g=0x7\0" + "aa5g=0x7\0" + "ag0=0x2\0" + "ag1=0x2\0" + "ag2=0x2\0" + "ag3=0xff\0" + "pa0b0=0xfe7f\0" + "pa0b1=0x15d9\0" + "pa0b2=0xfac6\0" + "pa0itssit=0x20\0" + "pa0maxpwr=0x48\0" + "pa1b0=0xfe89\0" + "pa1b1=0x14b1\0" + "pa1b2=0xfada\0" + "pa1lob0=0xffff\0" + "pa1lob1=0xffff\0" + "pa1lob2=0xffff\0" + "pa1hib0=0xfe8f\0" + "pa1hib1=0x13df\0" + "pa1hib2=0xfafa\0" + "pa1itssit=0x3e\0" + "pa1maxpwr=0x3c\0" + "pa1lomaxpwr=0x3c\0" + "pa1himaxpwr=0x3c\0" + "bxa2g=0x3\0" + "rssisav2g=0x7\0" + "rssismc2g=0xf\0" + "rssismf2g=0xf\0" + "bxa5g=0x3\0" + "rssisav5g=0x7\0" + "rssismc5g=0xf\0" + "rssismf5g=0xf\0" + "tri2g=0xff\0" + "tri5g=0xff\0" + "tri5gl=0xff\0" + "tri5gh=0xff\0" + "rxpo2g=0xff\0" + "rxpo5g=0xff\0" + "txchain=0x7\0" + "rxchain=0x7\0" + "antswitch=0x0\0" + "tssipos2g=0x1\0" + "extpagain2g=0x2\0" + "pdetrange2g=0x4\0" + "triso2g=0x3\0" + "antswctl2g=0x0\0" + "tssipos5g=0x1\0" + "elna2g=0xff\0" + "extpagain5g=0x2\0" + "pdetrange5g=0x4\0" + "triso5g=0x3\0" + "antswctl5g=0x0\0" + "elna5g=0xff\0" + "cckbw202gpo=0x0\0" + "cckbw20ul2gpo=0x0\0" + "legofdmbw202gpo=0x0\0" + "legofdmbw20ul2gpo=0x0\0" + "legofdmbw205glpo=0x0\0" + "legofdmbw20ul5glpo=0x0\0" + "legofdmbw205gmpo=0x0\0" + "legofdmbw20ul5gmpo=0x0\0" + "legofdmbw205ghpo=0x0\0" + "legofdmbw20ul5ghpo=0x0\0" + "mcsbw202gpo=0x0\0" + "mcsbw20ul2gpo=0x0\0" + "mcsbw402gpo=0x0\0" + "mcsbw205glpo=0x0\0" + "mcsbw20ul5glpo=0x0\0" + "mcsbw405glpo=0x0\0" + "mcsbw205gmpo=0x0\0" + "mcsbw20ul5gmpo=0x0\0" + "mcsbw405gmpo=0x0\0" + "mcsbw205ghpo=0x0\0" + "mcsbw20ul5ghpo=0x0\0" + "mcsbw405ghpo=0x0\0" + "mcs32po=0x0\0" + "legofdm40duppo=0x0\0" + "maxp2ga0=0x48\0" + "itt2ga0=0x20\0" + "itt5ga0=0x3e\0" + "pa2gw0a0=0xfe7f\0" + "pa2gw1a0=0x15d9\0" + "pa2gw2a0=0xfac6\0" + "maxp5ga0=0x3c\0" + "maxp5gha0=0x3c\0" + "maxp5gla0=0x3c\0" + "pa5gw0a0=0xfe89\0" + "pa5gw1a0=0x14b1\0" + "pa5gw2a0=0xfada\0" + "pa5glw0a0=0xffff\0" + "pa5glw1a0=0xffff\0" + "pa5glw2a0=0xffff\0" + "pa5ghw0a0=0xfe8f\0" + "pa5ghw1a0=0x13df\0" + "pa5ghw2a0=0xfafa\0" + "maxp2ga1=0x48\0" + "itt2ga1=0x20\0" + "itt5ga1=0x3e\0" + "pa2gw0a1=0xfe54\0" + "pa2gw1a1=0x1563\0" + "pa2gw2a1=0xfa7f\0" + "maxp5ga1=0x3c\0" + "maxp5gha1=0x3c\0" + "maxp5gla1=0x3c\0" + "pa5gw0a1=0xfe53\0" + "pa5gw1a1=0x14fe\0" + "pa5gw2a1=0xfa94\0" + "pa5glw0a1=0xffff\0" + "pa5glw1a1=0xffff\0" + "pa5glw2a1=0xffff\0" + "pa5ghw0a1=0xfe6e\0" + "pa5ghw1a1=0x1457\0" + "pa5ghw2a1=0xfab9\0" + "END\0"; + +static char BCMATTACHDATA(defaultsromvars_4360)[] = + "sromrev=11\0" + "boardrev=0x1421\0" + "boardflags=0x10401001\0" + "boardflags2=0x0\0" + "boardtype=0x61b\0" + "subvid=0x14e4\0" + "boardflags3=0x1\0" + "boardnum=62526\0" + "macaddr=00:90:4c:0d:f4:3e\0" + "ccode=X0\0" + "regrev=15\0" + "aa2g=7\0" + "aa5g=7\0" + "agbg0=71\0" + "agbg1=71\0" + "agbg2=133\0" + "aga0=71\0" + "aga1=133\0" + "aga2=133\0" + "antswitch=0\0" + "tssiposslope2g=1\0" + "epagain2g=0\0" + "pdgain2g=9\0" + "tworangetssi2g=0\0" + "papdcap2g=0\0" + "femctrl=2\0" + "tssiposslope5g=1\0" + "epagain5g=0\0" + "pdgain5g=9\0" + "tworangetssi5g=0\0" + "papdcap5g=0\0" + "gainctrlsph=0\0" + "tempthresh=255\0" + "tempoffset=255\0" + "rawtempsense=0x1ff\0" + "measpower=0x7f\0" + "tempsense_slope=0xff\0" + "tempcorrx=0x3f\0" + "tempsense_option=0x3\0" + "xtalfreq=65535\0" + "phycal_tempdelta=255\0" + "temps_period=15\0" + "temps_hysteresis=15\0" + "measpower1=0x7f\0" + "measpower2=0x7f\0" + "pdoffset2g40ma0=15\0" + "pdoffset2g40ma1=15\0" + "pdoffset2g40ma2=15\0" + "pdoffset2g40mvalid=1\0" + "pdoffset40ma0=9010\0" + "pdoffset40ma1=12834\0" + "pdoffset40ma2=8994\0" + "pdoffset80ma0=16\0" + "pdoffset80ma1=4096\0" + "pdoffset80ma2=0\0" + "subband5gver=0x4\0" + "cckbw202gpo=0\0" + "cckbw20ul2gpo=0\0" + "mcsbw202gpo=2571386880\0" + "mcsbw402gpo=2571386880\0" + "dot11agofdmhrbw202gpo=17408\0" + "ofdmlrbw202gpo=0\0" + "mcsbw205glpo=4001923072\0" + "mcsbw405glpo=4001923072\0" + "mcsbw805glpo=4001923072\0" + "mcsbw1605glpo=0\0" + "mcsbw205gmpo=3431497728\0" + "mcsbw405gmpo=3431497728\0" + "mcsbw805gmpo=3431497728\0" + "mcsbw1605gmpo=0\0" + "mcsbw205ghpo=3431497728\0" + "mcsbw405ghpo=3431497728\0" + "mcsbw805ghpo=3431497728\0" + "mcsbw1605ghpo=0\0" + "mcslr5glpo=0\0" + "mcslr5gmpo=0\0" + "mcslr5ghpo=0\0" + "sb20in40hrpo=0\0" + "sb20in80and160hr5glpo=0\0" + "sb40and80hr5glpo=0\0" + "sb20in80and160hr5gmpo=0\0" + "sb40and80hr5gmpo=0\0" + "sb20in80and160hr5ghpo=0\0" + "sb40and80hr5ghpo=0\0" + "sb20in40lrpo=0\0" + "sb20in80and160lr5glpo=0\0" + "sb40and80lr5glpo=0\0" + "sb20in80and160lr5gmpo=0\0" + "sb40and80lr5gmpo=0\0" + "sb20in80and160lr5ghpo=0\0" + "sb40and80lr5ghpo=0\0" + "dot11agduphrpo=0\0" + "dot11agduplrpo=0\0" + "pcieingress_war=15\0" + "sar2g=18\0" + "sar5g=15\0" + "noiselvl2ga0=31\0" + "noiselvl2ga1=31\0" + "noiselvl2ga2=31\0" + "noiselvl5ga0=31,31,31,31\0" + "noiselvl5ga1=31,31,31,31\0" + "noiselvl5ga2=31,31,31,31\0" + "rxgainerr2ga0=63\0" + "rxgainerr2ga1=31\0" + "rxgainerr2ga2=31\0" + "rxgainerr5ga0=63,63,63,63\0" + "rxgainerr5ga1=31,31,31,31\0" + "rxgainerr5ga2=31,31,31,31\0" + "maxp2ga0=76\0" + "pa2ga0=0xff3c,0x172c,0xfd20\0" + "rxgains5gmelnagaina0=7\0" + "rxgains5gmtrisoa0=15\0" + "rxgains5gmtrelnabypa0=1\0" + "rxgains5ghelnagaina0=7\0" + "rxgains5ghtrisoa0=15\0" + "rxgains5ghtrelnabypa0=1\0" + "rxgains2gelnagaina0=4\0" + "rxgains2gtrisoa0=7\0" + "rxgains2gtrelnabypa0=1\0" + "rxgains5gelnagaina0=3\0" + "rxgains5gtrisoa0=7\0" + "rxgains5gtrelnabypa0=1\0" + "maxp5ga0=76,76,76,76\0" +"pa5ga0=0xff3a,0x14d4,0xfd5f,0xff36,0x1626,0xfd2e,0xff42,0x15bd,0xfd47,0xff39,0x15a3,0xfd3d\0" + "maxp2ga1=76\0" + "pa2ga1=0xff2a,0x16b2,0xfd28\0" + "rxgains5gmelnagaina1=7\0" + "rxgains5gmtrisoa1=15\0" + "rxgains5gmtrelnabypa1=1\0" + "rxgains5ghelnagaina1=7\0" + "rxgains5ghtrisoa1=15\0" + "rxgains5ghtrelnabypa1=1\0" + "rxgains2gelnagaina1=3\0" + "rxgains2gtrisoa1=6\0" + "rxgains2gtrelnabypa1=1\0" + "rxgains5gelnagaina1=3\0" + "rxgains5gtrisoa1=6\0" + "rxgains5gtrelnabypa1=1\0" + "maxp5ga1=76,76,76,76\0" +"pa5ga1=0xff4e,0x1530,0xfd53,0xff58,0x15b4,0xfd4d,0xff58,0x1671,0xfd2f,0xff55,0x15e2,0xfd46\0" + "maxp2ga2=76\0" + "pa2ga2=0xff3c,0x1736,0xfd1f\0" + "rxgains5gmelnagaina2=7\0" + "rxgains5gmtrisoa2=15\0" + "rxgains5gmtrelnabypa2=1\0" + "rxgains5ghelnagaina2=7\0" + "rxgains5ghtrisoa2=15\0" + "rxgains5ghtrelnabypa2=1\0" + "rxgains2gelnagaina2=4\0" + "rxgains2gtrisoa2=7\0" + "rxgains2gtrelnabypa2=1\0" + "rxgains5gelnagaina2=3\0" + "rxgains5gtrisoa2=7\0" + "rxgains5gtrelnabypa2=1\0" + "maxp5ga2=76,76,76,76\0" +"pa5ga2=0xff2d,0x144a,0xfd63,0xff35,0x15d7,0xfd3b,0xff35,0x1668,0xfd2f,0xff31,0x1664,0xfd27\0" + "END\0"; + +#endif /* defined(BCMHOSTVARS) */ +#endif /* !defined(BCMDONGLEHOST) */ + +#if !defined(BCMDONGLEHOST) +#if defined(BCMHOSTVARS) +static char BCMATTACHDATA(defaultsromvars_wltest)[] = + "macaddr=00:90:4c:f8:00:01\0" + "et0macaddr=00:11:22:33:44:52\0" + "et0phyaddr=30\0" + "et0mdcport=0\0" + "gpio2=robo_reset\0" + "boardvendor=0x14e4\0" + "boardflags=0x210\0" + "boardflags2=0\0" + "boardtype=0x04c3\0" + "boardrev=0x1100\0" + "sromrev=8\0" + "devid=0x432c\0" + "ccode=0\0" + "regrev=0\0" + "aa2g=3\0" + "ag0=2\0" + "ag1=2\0" + "aa5g=3\0" + "aa0=2\0" + "aa1=2\0" + "txchain=3\0" + "rxchain=3\0" + "antswitch=0\0" + "itt2ga0=0x20\0" + "maxp2ga0=0x48\0" + "pa2gw0a0=0xfe9e\0" + "pa2gw1a0=0x15d5\0" + "pa2gw2a0=0xfae9\0" + "itt2ga1=0x20\0" + "maxp2ga1=0x48\0" + "pa2gw0a1=0xfeb3\0" + "pa2gw1a1=0x15c9\0" + "pa2gw2a1=0xfaf7\0" + "tssipos2g=1\0" + "extpagain2g=0\0" + "pdetrange2g=0\0" + "triso2g=3\0" + "antswctl2g=0\0" + "tssipos5g=1\0" + "extpagain5g=0\0" + "pdetrange5g=0\0" + "triso5g=3\0" + "antswctl5g=0\0" + "cck2gpo=0\0" + "ofdm2gpo=0\0" + "mcs2gpo0=0\0" + "mcs2gpo1=0\0" + "mcs2gpo2=0\0" + "mcs2gpo3=0\0" + "mcs2gpo4=0\0" + "mcs2gpo5=0\0" + "mcs2gpo6=0\0" + "mcs2gpo7=0\0" + "cddpo=0\0" + "stbcpo=0\0" + "bw40po=4\0" + "bwduppo=0\0" + "END\0"; + +/** + * The contents of this array is a first attempt, is likely incorrect for 43602, needs to be + * edited in a later stage. + */ +static char BCMATTACHDATA(defaultsromvars_43602)[] = + "sromrev=11\0" + "boardrev=0x1421\0" + "boardflags=0x10401001\0" + "boardflags2=0x00000002\0" + "boardflags3=0x00000003\0" + "boardtype=0x61b\0" + "subvid=0x14e4\0" + "boardnum=62526\0" + "macaddr=00:90:4c:0d:f4:3e\0" + "ccode=X0\0" + "regrev=15\0" + "aa2g=7\0" + "aa5g=7\0" + "agbg0=71\0" + "agbg1=71\0" + "agbg2=133\0" + "aga0=71\0" + "aga1=133\0" + "aga2=133\0" + "antswitch=0\0" + "tssiposslope2g=1\0" + "epagain2g=0\0" + "pdgain2g=9\0" + "tworangetssi2g=0\0" + "papdcap2g=0\0" + "femctrl=2\0" + "tssiposslope5g=1\0" + "epagain5g=0\0" + "pdgain5g=9\0" + "tworangetssi5g=0\0" + "papdcap5g=0\0" + "gainctrlsph=0\0" + "tempthresh=255\0" + "tempoffset=255\0" + "rawtempsense=0x1ff\0" + "measpower=0x7f\0" + "tempsense_slope=0xff\0" + "tempcorrx=0x3f\0" + "tempsense_option=0x3\0" + "xtalfreq=40000\0" + "phycal_tempdelta=255\0" + "temps_period=15\0" + "temps_hysteresis=15\0" + "measpower1=0x7f\0" + "measpower2=0x7f\0" + "pdoffset2g40ma0=15\0" + "pdoffset2g40ma1=15\0" + "pdoffset2g40ma2=15\0" + "pdoffset2g40mvalid=1\0" + "pdoffset40ma0=9010\0" + "pdoffset40ma1=12834\0" + "pdoffset40ma2=8994\0" + "pdoffset80ma0=16\0" + "pdoffset80ma1=4096\0" + "pdoffset80ma2=0\0" + "subband5gver=0x4\0" + "cckbw202gpo=0\0" + "cckbw20ul2gpo=0\0" + "mcsbw202gpo=2571386880\0" + "mcsbw402gpo=2571386880\0" + "dot11agofdmhrbw202gpo=17408\0" + "ofdmlrbw202gpo=0\0" + "mcsbw205glpo=4001923072\0" + "mcsbw405glpo=4001923072\0" + "mcsbw805glpo=4001923072\0" + "mcsbw1605glpo=0\0" + "mcsbw205gmpo=3431497728\0" + "mcsbw405gmpo=3431497728\0" + "mcsbw805gmpo=3431497728\0" + "mcsbw1605gmpo=0\0" + "mcsbw205ghpo=3431497728\0" + "mcsbw405ghpo=3431497728\0" + "mcsbw805ghpo=3431497728\0" + "mcsbw1605ghpo=0\0" + "mcslr5glpo=0\0" + "mcslr5gmpo=0\0" + "mcslr5ghpo=0\0" + "sb20in40hrpo=0\0" + "sb20in80and160hr5glpo=0\0" + "sb40and80hr5glpo=0\0" + "sb20in80and160hr5gmpo=0\0" + "sb40and80hr5gmpo=0\0" + "sb20in80and160hr5ghpo=0\0" + "sb40and80hr5ghpo=0\0" + "sb20in40lrpo=0\0" + "sb20in80and160lr5glpo=0\0" + "sb40and80lr5glpo=0\0" + "sb20in80and160lr5gmpo=0\0" + "sb40and80lr5gmpo=0\0" + "sb20in80and160lr5ghpo=0\0" + "sb40and80lr5ghpo=0\0" + "dot11agduphrpo=0\0" + "dot11agduplrpo=0\0" + "pcieingress_war=15\0" + "sar2g=18\0" + "sar5g=15\0" + "noiselvl2ga0=31\0" + "noiselvl2ga1=31\0" + "noiselvl2ga2=31\0" + "noiselvl5ga0=31,31,31,31\0" + "noiselvl5ga1=31,31,31,31\0" + "noiselvl5ga2=31,31,31,31\0" + "rxgainerr2ga0=63\0" + "rxgainerr2ga1=31\0" + "rxgainerr2ga2=31\0" + "rxgainerr5ga0=63,63,63,63\0" + "rxgainerr5ga1=31,31,31,31\0" + "rxgainerr5ga2=31,31,31,31\0" + "maxp2ga0=76\0" + "pa2ga0=0xff3c,0x172c,0xfd20\0" + "rxgains5gmelnagaina0=7\0" + "rxgains5gmtrisoa0=15\0" + "rxgains5gmtrelnabypa0=1\0" + "rxgains5ghelnagaina0=7\0" + "rxgains5ghtrisoa0=15\0" + "rxgains5ghtrelnabypa0=1\0" + "rxgains2gelnagaina0=4\0" + "rxgains2gtrisoa0=7\0" + "rxgains2gtrelnabypa0=1\0" + "rxgains5gelnagaina0=3\0" + "rxgains5gtrisoa0=7\0" + "rxgains5gtrelnabypa0=1\0" + "maxp5ga0=76,76,76,76\0" +"pa5ga0=0xff3a,0x14d4,0xfd5f,0xff36,0x1626,0xfd2e,0xff42,0x15bd,0xfd47,0xff39,0x15a3,0xfd3d\0" + "maxp2ga1=76\0" + "pa2ga1=0xff2a,0x16b2,0xfd28\0" + "rxgains5gmelnagaina1=7\0" + "rxgains5gmtrisoa1=15\0" + "rxgains5gmtrelnabypa1=1\0" + "rxgains5ghelnagaina1=7\0" + "rxgains5ghtrisoa1=15\0" + "rxgains5ghtrelnabypa1=1\0" + "rxgains2gelnagaina1=3\0" + "rxgains2gtrisoa1=6\0" + "rxgains2gtrelnabypa1=1\0" + "rxgains5gelnagaina1=3\0" + "rxgains5gtrisoa1=6\0" + "rxgains5gtrelnabypa1=1\0" + "maxp5ga1=76,76,76,76\0" +"pa5ga1=0xff4e,0x1530,0xfd53,0xff58,0x15b4,0xfd4d,0xff58,0x1671,0xfd2f,0xff55,0x15e2,0xfd46\0" + "maxp2ga2=76\0" + "pa2ga2=0xff3c,0x1736,0xfd1f\0" + "rxgains5gmelnagaina2=7\0" + "rxgains5gmtrisoa2=15\0" + "rxgains5gmtrelnabypa2=1\0" + "rxgains5ghelnagaina2=7\0" + "rxgains5ghtrisoa2=15\0" + "rxgains5ghtrelnabypa2=1\0" + "rxgains2gelnagaina2=4\0" + "rxgains2gtrisoa2=7\0" + "rxgains2gtrelnabypa2=1\0" + "rxgains5gelnagaina2=3\0" + "rxgains5gtrisoa2=7\0" + "rxgains5gtrelnabypa2=1\0" + "maxp5ga2=76,76,76,76\0" +"pa5ga2=0xff2d,0x144a,0xfd63,0xff35,0x15d7,0xfd3b,0xff35,0x1668,0xfd2f,0xff31,0x1664,0xfd27\0" + "END\0"; + +/** + * The contents of this array is a first attempt, was copied from 4378, needs to be edited in + * a later stage. + */ +static char BCMATTACHDATA(defaultsromvars_4378)[] = + "cckdigfilttype=4\0" + "sromrev=11\0" + "boardrev=0x1102\0" + "boardtype=0x0771\0" + "boardflags=0x10481201\0" + "boardflags2=0x00000000\0" + "boardflags3=0x04000080\0" + "macaddr=00:90:4c:12:43:47\0" + "ccode=0\0" + "regrev=0\0" + "antswitch=0\0" + "pdgain5g=0\0" + "pdgain2g=0\0" + "tworangetssi2g=0\0" + "tworangetssi5g=0\0" + "femctrl=16\0" + "vendid=0x14e4\0" + "devid=0x4425\0" + "manfid=0x2d0\0" + "nocrc=1\0" + "btc_params82=0x1a0\0" + "otpimagesize=502\0" + "xtalfreq=37400\0" + "rxgains2gelnagaina0=3\0" + "rxgains2gtrisoa0=7\0" + "rxgains2gtrelnabypa0=1\0" + "rxgains5gelnagaina0=3\0" + "rxgains5gtrisoa0=6\0" + "rxgains5gtrelnabypa0=1\0" + "rxgains5gmelnagaina0=3\0" + "rxgains5gmtrisoa0=6\0" + "rxgains5gmtrelnabypa0=1\0" + "rxgains5ghelnagaina0=3\0" + "rxgains5ghtrisoa0=6\0" + "rxgains5ghtrelnabypa0=1\0" + "rxgains2gelnagaina1=3\0" + "rxgains2gtrisoa1=7\0" + "rxgains2gtrelnabypa1=1\0" + "rxgains5gelnagaina1=3\0" + "rxgains5gtrisoa1=6\0" + "rxgains5gtrelnabypa1=1\0" + "rxgains5gmelnagaina1=3\0" + "rxgains5gmtrisoa1=6\0" + "rxgains5gmtrelnabypa1=1\0" + "rxgains5ghelnagaina1=3\0" + "rxgains5ghtrisoa1=6\0" + "rxgains5ghtrelnabypa1=1\0" + "rxchain=3\0" + "txchain=3\0" + "aa2g=3\0" + "aa5g=3\0" + "agbg0=2\0" + "agbg1=2\0" + "aga0=2\0" + "aga1=2\0" + "tssipos2g=1\0" + "tssipos5g=1\0" + "tempthresh=255\0" + "tempoffset=255\0" + "rawtempsense=0x1ff\0" + "pa2gccka0=-200,7392,-897\0" + "pa2gccka1=-198,7522,-907\0" + "pa2ga0=-174,7035,-838\0" + "pa2ga1=-185,6772,-811\0" + "pa5ga0=-175,7296,-887,-164,7553,-910,-155,7801,-936,-149,7908,-951\0" + "pa5ga1=-155,7675,-925,-148,7851,-940,-152,7930,-954,-143,8121,-969\0" + "pa5gbw4080a0=-178,7872,-959,-173,8107,-986,-165,8398,-1019,-150,8809,-1063\0" + "pa5gbw4080a1=-166,8179,-993,-161,8378,-1015,-165,8402,-1019,-155,8757,-1057\0" + "maxp2ga0=66\0" + "maxp2ga1=66\0" + "maxp5ga0=66,66,66,66\0" + "maxp5ga1=66,66,66,66\0" + "subband5gver=0x4\0" + "paparambwver=3\0" + "cckpwroffset0=0\0" + "cckpwroffset1=0\0" + "pdoffset40ma0=0x0000\0" + "pdoffset80ma0=0xeeee\0" + "pdoffset40ma1=0x0000\0" + "pdoffset80ma1=0xeeee\0" + "cckbw202gpo=0\0" + "cckbw20ul2gpo=0\0" + "mcsbw202gpo=0xEC888222\0" + "mcsbw402gpo=0xEC888222\0" + "dot11agofdmhrbw202gpo=0x6622\0" + "ofdmlrbw202gpo=0x0000\0" + "mcsbw205glpo=0xCA666000\0" + "mcsbw405glpo=0xCA666000\0" + "mcsbw805glpo=0xEA666000\0" + "mcsbw1605glpo=0\0" + "mcsbw205gmpo=0xCA666000\0" + "mcsbw405gmpo=0xCA666000\0" + "mcsbw805gmpo=0xEA666000\0" + "mcsbw1605gmpo=0\0" + "mcsbw205ghpo=0xCA666000\0" + "mcsbw405ghpo=0xCA666000\0" + "mcsbw805ghpo=0xEA666000\0" + "mcsbw1605ghpo=0\0" + "mcslr5glpo=0x0000\0" + "mcslr5gmpo=0x0000\0" + "mcslr5ghpo=0x0000\0" + "sb20in40hrpo=0x0\0" + "sb20in80and160hr5glpo=0x0\0" + "sb40and80hr5glpo=0x0\0" + "sb20in80and160hr5gmpo=0x0\0" + "sb40and80hr5gmpo=0x0\0" + "sb20in80and160hr5ghpo=0x0\0" + "sb40and80hr5ghpo=0x0\0" + "sb20in40lrpo=0x0\0" + "sb20in80and160lr5glpo=0x0\0" + "sb40and80lr5glpo=0x0\0" + "sb20in80and160lr5gmpo=0x0\0" + "sb40and80lr5gmpo=0x0\0" + "sb20in80and160lr5ghpo=0x0\0" + "sb40and80lr5ghpo=0x0\0" + "dot11agduphrpo=0x0\0" + "dot11agduplrpo=0x0\0" + "phycal_tempdelta=15\0" + "temps_period=15\0" + "temps_hysteresis=15\0" + "swctrlmap_2g=0x00000404,0x0a0a0000,0x02020000,0x010a02,0x1fe\0" + "swctrlmapext_2g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0" + "swctrlmap_5g=0x00001010,0x60600000,0x40400000,0x000000,0x0f0\0" + "swctrlmapext_5g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0" + "powoffs2gtna0=1,3,3,1,0,0,1,2,2,2,1,1,0,0\0" + "powoffs2gtna1=-1,1,1,1,0,0,1,2,3,2,2,0,0,0\0" + "END\0"; + +/** + * The contents of this array is a first attempt, was copied from 4387, needs to be edited in + * a later stage. + */ +static char BCMATTACHDATA(defaultsromvars_4387)[] = + "cckdigfilttype=4\0" + "sromrev=11\0" + "boardrev=0x1102\0" + "boardtype=0x0771\0" + "boardflags=0x10481201\0" + "boardflags2=0x00000000\0" + "boardflags3=0x04000080\0" + "macaddr=00:90:4c:12:43:47\0" + "ccode=0\0" + "regrev=0\0" + "antswitch=0\0" + "pdgain5g=0\0" + "pdgain2g=0\0" + "tworangetssi2g=0\0" + "tworangetssi5g=0\0" + "femctrl=16\0" + "vendid=0x14e4\0" + "devid=0x4425\0" + "manfid=0x2d0\0" + "nocrc=1\0" + "btc_params82=0x1a0\0" + "otpimagesize=502\0" + "xtalfreq=37400\0" + "rxgains2gelnagaina0=3\0" + "rxgains2gtrisoa0=7\0" + "rxgains2gtrelnabypa0=1\0" + "rxgains5gelnagaina0=3\0" + "rxgains5gtrisoa0=6\0" + "rxgains5gtrelnabypa0=1\0" + "rxgains5gmelnagaina0=3\0" + "rxgains5gmtrisoa0=6\0" + "rxgains5gmtrelnabypa0=1\0" + "rxgains5ghelnagaina0=3\0" + "rxgains5ghtrisoa0=6\0" + "rxgains5ghtrelnabypa0=1\0" + "rxgains2gelnagaina1=3\0" + "rxgains2gtrisoa1=7\0" + "rxgains2gtrelnabypa1=1\0" + "rxgains5gelnagaina1=3\0" + "rxgains5gtrisoa1=6\0" + "rxgains5gtrelnabypa1=1\0" + "rxgains5gmelnagaina1=3\0" + "rxgains5gmtrisoa1=6\0" + "rxgains5gmtrelnabypa1=1\0" + "rxgains5ghelnagaina1=3\0" + "rxgains5ghtrisoa1=6\0" + "rxgains5ghtrelnabypa1=1\0" + "rxchain=3\0" + "txchain=3\0" + "aa2g=3\0" + "aa5g=3\0" + "agbg0=2\0" + "agbg1=2\0" + "aga0=2\0" + "aga1=2\0" + "tssipos2g=1\0" + "tssipos5g=1\0" + "tempthresh=255\0" + "tempoffset=255\0" + "rawtempsense=0x1ff\0" + "pa2gccka0=-200,7392,-897\0" + "pa2gccka1=-198,7522,-907\0" + "pa2ga0=-174,7035,-838\0" + "pa2ga1=-185,6772,-811\0" + "pa5ga0=-175,7296,-887,-164,7553,-910,-155,7801,-936,-149,7908,-951\0" + "pa5ga1=-155,7675,-925,-148,7851,-940,-152,7930,-954,-143,8121,-969\0" + "pa5gbw4080a0=-178,7872,-959,-173,8107,-986,-165,8398,-1019,-150,8809,-1063\0" + "pa5gbw4080a1=-166,8179,-993,-161,8378,-1015,-165,8402,-1019,-155,8757,-1057\0" + "maxp2ga0=66\0" + "maxp2ga1=66\0" + "maxp5ga0=66,66,66,66\0" + "maxp5ga1=66,66,66,66\0" + "subband5gver=0x4\0" + "paparambwver=3\0" + "cckpwroffset0=0\0" + "cckpwroffset1=0\0" + "pdoffset40ma0=0x0000\0" + "pdoffset80ma0=0xeeee\0" + "pdoffset40ma1=0x0000\0" + "pdoffset80ma1=0xeeee\0" + "cckbw202gpo=0\0" + "cckbw20ul2gpo=0\0" + "mcsbw202gpo=0xEC888222\0" + "mcsbw402gpo=0xEC888222\0" + "dot11agofdmhrbw202gpo=0x6622\0" + "ofdmlrbw202gpo=0x0000\0" + "mcsbw205glpo=0xCA666000\0" + "mcsbw405glpo=0xCA666000\0" + "mcsbw805glpo=0xEA666000\0" + "mcsbw1605glpo=0\0" + "mcsbw205gmpo=0xCA666000\0" + "mcsbw405gmpo=0xCA666000\0" + "mcsbw805gmpo=0xEA666000\0" + "mcsbw1605gmpo=0\0" + "mcsbw205ghpo=0xCA666000\0" + "mcsbw405ghpo=0xCA666000\0" + "mcsbw805ghpo=0xEA666000\0" + "mcsbw1605ghpo=0\0" + "mcslr5glpo=0x0000\0" + "mcslr5gmpo=0x0000\0" + "mcslr5ghpo=0x0000\0" + "sb20in40hrpo=0x0\0" + "sb20in80and160hr5glpo=0x0\0" + "sb40and80hr5glpo=0x0\0" + "sb20in80and160hr5gmpo=0x0\0" + "sb40and80hr5gmpo=0x0\0" + "sb20in80and160hr5ghpo=0x0\0" + "sb40and80hr5ghpo=0x0\0" + "sb20in40lrpo=0x0\0" + "sb20in80and160lr5glpo=0x0\0" + "sb40and80lr5glpo=0x0\0" + "sb20in80and160lr5gmpo=0x0\0" + "sb40and80lr5gmpo=0x0\0" + "sb20in80and160lr5ghpo=0x0\0" + "sb40and80lr5ghpo=0x0\0" + "dot11agduphrpo=0x0\0" + "dot11agduplrpo=0x0\0" + "phycal_tempdelta=15\0" + "temps_period=15\0" + "temps_hysteresis=15\0" + "swctrlmap_2g=0x00000404,0x0a0a0000,0x02020000,0x010a02,0x1fe\0" + "swctrlmapext_2g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0" + "swctrlmap_5g=0x00001010,0x60600000,0x40400000,0x000000,0x0f0\0" + "swctrlmapext_5g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0" + "powoffs2gtna0=1,3,3,1,0,0,1,2,2,2,1,1,0,0\0" + "powoffs2gtna1=-1,1,1,1,0,0,1,2,3,2,2,0,0,0\0" + "END\0"; + +#endif /* defined(BCMHOSTVARS) */ +#endif /* !defined(BCMDONGLEHOST) */ + +static bool srvars_inited = FALSE; /* Use OTP/SROM as global variables */ + +#if (!defined(BCMDONGLEHOST) && defined(BCMHOSTVARS)) +/* It must end with pattern of "END" */ +static uint +BCMATTACHFN(srom_vars_len)(char *vars) +{ + uint pos = 0; + uint len; + char *s; + char *emark = "END"; + uint emark_len = strlen(emark) + 1; + + for (s = vars; s && *s;) { + + if (strcmp(s, emark) == 0) + break; + + len = strlen(s); + s += strlen(s) + 1; + pos += len + 1; + /* BS_ERROR(("len %d vars[pos] %s\n", pos, s)); */ + if (pos >= (VARS_MAX - emark_len)) { + return 0; + } + } + + return pos + emark_len; /* include the "END\0" */ +} +#endif /* BCMHOSTVARS */ + +#if !defined(BCMDONGLEHOST) +#ifdef BCMNVRAMVARS +static int +BCMATTACHFN(initvars_nvram_vars)(si_t *sih, osl_t *osh, char **vars, uint *vars_sz) +{ + int ret; + + ASSERT(vars != NULL && vars_sz != NULL); + + /* allocate maximum buffer as we don't know big it should be */ + *vars = MALLOC(osh, MAXSZ_NVRAM_VARS); + if (*vars == NULL) { + ret = BCME_NOMEM; + goto fail; + } + *vars_sz = MAXSZ_NVRAM_VARS; + + /* query the name=value pairs */ + if ((ret = nvram_getall(*vars, *vars_sz)) != BCME_OK) { + goto fail; + } + + /* treat empty name=value list as an error so that we can indicate + * the condition up throught error code return... + */ + if (*vars_sz == 0) { + ret = BCME_ERROR; + goto fail; + } + + return BCME_OK; + +fail: + if (*vars != NULL) { + MFREE(osh, *vars, MAXSZ_NVRAM_VARS); + } + *vars = NULL; + *vars_sz = 0; + return ret; +} +#endif /* BCMNVRAMVARS */ + +/** + * Initialize local vars from the right source for this platform. Called from siutils.c. + * + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL, in that case this function will prematurely return. + * + * Return 0 on success, nonzero on error. + */ +int +BCMATTACHFN(srom_var_init)(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh, + char **vars, uint *count) +{ + ASSERT(bustype == BUSTYPE(bustype)); + if (vars == NULL || count == NULL) + return (0); + + *vars = NULL; + *count = 0; + + switch (BUSTYPE(bustype)) { + case SI_BUS: +#ifdef BCMPCIEDEV + if (BCMPCIEDEV_ENAB()) { + int ret; + + ret = initvars_cis_pci(sih, osh, curmap, vars, count); + +#ifdef BCMPCIEDEV_SROM_FORMAT + if (ret) + ret = initvars_srom_pci(sih, curmap, vars, count); +#endif + if (ret) + ret = initvars_srom_si(sih, osh, curmap, vars, count); + return ret; + } else +#endif /* BCMPCIEDEV */ + { + return initvars_srom_si(sih, osh, curmap, vars, count); + } + case PCI_BUS: { + int ret; + +#ifdef BCMNVRAMVARS + if ((ret = initvars_nvram_vars(sih, osh, vars, count)) == BCME_OK) { + return ret; + } else +#endif + { + ASSERT(curmap != NULL); + if (curmap == NULL) + return (-1); + + /* First check for CIS format. if not CIS, try SROM format */ + if ((ret = initvars_cis_pci(sih, osh, curmap, vars, count))) + return initvars_srom_pci(sih, curmap, vars, count); + return ret; + } + } + +#ifdef BCMSDIO + case SDIO_BUS: + return initvars_cis_sdio(sih, osh, vars, count); +#endif /* BCMSDIO */ + +#ifdef BCMSPI + case SPI_BUS: + return initvars_cis_spi(sih, osh, vars, count); +#endif /* BCMSPI */ + + default: + ASSERT(0); + } + return (-1); +} +#endif /* !defined(BCMDONGLEHOST) */ + +/** support only 16-bit word read from srom */ +int +srom_read(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh, + uint byteoff, uint nbytes, uint16 *buf, bool check_crc) +{ + uint i, off, nw; + + BCM_REFERENCE(i); + + ASSERT(bustype == BUSTYPE(bustype)); + + /* check input - 16-bit access only */ + if (byteoff & 1 || nbytes & 1 || (byteoff + nbytes) > SROM_MAX) + return 1; + + off = byteoff / 2; + nw = nbytes / 2; + +#ifdef BCMPCIEDEV + if ((BUSTYPE(bustype) == SI_BUS) && + (BCM43602_CHIP(sih->chip) || + (BCM4369_CHIP(sih->chip)) || + (BCM4378_CHIP(sih->chip)) || + (BCM4387_CHIP(sih->chip)) || + (BCM4388_CHIP(sih->chip)) || + (BCM4362_CHIP(sih->chip)) || + (BCM4385_CHIP(sih->chip)) || + (BCM4389_CHIP(sih->chip)) || + (BCM4397_CHIP(sih->chip)) || + +#ifdef UNRELEASEDCHIP +#endif + + FALSE)) { /* building firmware for chips with a PCIe interface and internal SI bus */ +#else + if (BUSTYPE(bustype) == PCI_BUS) { +#endif /* BCMPCIEDEV */ + if (!curmap) + return 1; + + if (si_is_sprom_available(sih)) { + volatile uint16 *srom; + + srom = (volatile uint16 *)srom_offset(sih, curmap); + if (srom == NULL) + return 1; + + if (sprom_read_pci(osh, sih, srom, off, buf, nw, check_crc)) + return 1; + } +#if !defined(BCMDONGLEHOST) && (defined(BCMNVRAMW) || defined(BCMNVRAMR)) + else if (!((BUSTYPE(bustype) == SI_BUS) && + (BCM43602_CHIP(sih->chip) || + (BCM4369_CHIP(sih->chip)) || + (BCM4362_CHIP(sih->chip)) || + (BCM4378_CHIP(sih->chip)) || + (BCM4385_CHIP(sih->chip)) || + (BCM4389_CHIP(sih->chip)) || + (BCM4387_CHIP(sih->chip)) || + (BCM4388_CHIP(sih->chip)) || + (BCM4397_CHIP(sih->chip)) || + 0))) { + if (otp_read_pci(osh, sih, buf, nbytes)) + return 1; + } +#endif /* !BCMDONGLEHOST && (BCMNVRAMW||BCMNVRAMR) */ +#ifdef BCMSDIO + } else if (BUSTYPE(bustype) == SDIO_BUS) { + off = byteoff / 2; + nw = nbytes / 2; + for (i = 0; i < nw; i++) { + if (sprom_read_sdio(osh, (uint16)(off + i), (uint16 *)(buf + i))) + return 1; + } +#endif /* BCMSDIO */ +#ifdef BCMSPI + } else if (BUSTYPE(bustype) == SPI_BUS) { + if (bcmsdh_cis_read(NULL, SDIO_FUNC_1, (uint8 *)buf, byteoff + nbytes) != 0) + return 1; +#endif /* BCMSPI */ + } else if (BUSTYPE(bustype) == SI_BUS) { + return 1; + } else { + return 1; + } + + return 0; +} + +#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) +/** support only 16-bit word write into srom */ +int +srom_write(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh, + uint byteoff, uint nbytes, uint16 *buf) +{ + uint i, nw, crc_range; + uint16 *old, *new; + uint8 crc; + volatile uint32 val32; + int rc = 1; + + ASSERT(bustype == BUSTYPE(bustype)); + + /* freed in same function */ + old = MALLOC_NOPERSIST(osh, SROM_MAXW * sizeof(uint16)); + new = MALLOC_NOPERSIST(osh, SROM_MAXW * sizeof(uint16)); + + if (old == NULL || new == NULL) + goto done; + + /* check input - 16-bit access only. use byteoff 0x55aa to indicate + * srclear + */ + if ((byteoff != 0x55aa) && ((byteoff & 1) || (nbytes & 1))) + goto done; + + if ((byteoff != 0x55aa) && ((byteoff + nbytes) > SROM_MAX)) + goto done; + + if (FALSE) { + } +#ifdef BCMSDIO + else if (BUSTYPE(bustype) == SDIO_BUS) { + crc_range = SROM_MAX; + } +#endif + else { + crc_range = srom_size(sih, osh); + } + + nw = crc_range / 2; + /* read first small number words from srom, then adjust the length, read all */ + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) + goto done; + + BS_ERROR(("srom_write: old[SROM4_SIGN] 0x%x, old[SROM8_SIGN] 0x%x\n", + old[SROM4_SIGN], old[SROM8_SIGN])); + /* Deal with blank srom */ + if (old[0] == 0xffff) { + /* Do nothing to blank srom when it's srclear */ + if (byteoff == 0x55aa) { + rc = 0; + goto done; + } + + /* see if the input buffer is valid SROM image or not */ + if (buf[SROM11_SIGN] == SROM11_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM11_SIGN] 0x%x\n", + buf[SROM11_SIGN])); + + /* block invalid buffer size */ + if (nbytes < SROM11_WORDS * 2) { + rc = BCME_BUFTOOSHORT; + goto done; + } else if (nbytes > SROM11_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM11_WORDS; + + } else if (buf[SROM12_SIGN] == SROM12_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM12_SIGN] 0x%x\n", + buf[SROM12_SIGN])); + + /* block invalid buffer size */ + if (nbytes < SROM12_WORDS * 2) { + rc = BCME_BUFTOOSHORT; + goto done; + } else if (nbytes > SROM12_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM12_WORDS; + + } else if (buf[SROM13_SIGN] == SROM13_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM13_SIGN] 0x%x\n", + buf[SROM13_SIGN])); + + /* block invalid buffer size */ + if (nbytes < SROM13_WORDS * 2) { + rc = BCME_BUFTOOSHORT; + goto done; + } else if (nbytes > SROM13_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM13_WORDS; + + } else if (buf[SROM16_SIGN] == SROM16_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM16_SIGN] 0x%x\n", + buf[SROM16_SIGN])); + + /* block invalid buffer size */ + if (nbytes < SROM16_WORDS * 2) { + rc = BCME_BUFTOOSHORT; + goto done; + } else if (nbytes > SROM16_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM16_WORDS; + + } else if (buf[SROM17_SIGN] == SROM17_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM17_SIGN] 0x%x\n", + buf[SROM17_SIGN])); + + /* block invalid buffer size */ + if (nbytes < SROM17_WORDS * 2) { + rc = BCME_BUFTOOSHORT; + goto done; + } else if (nbytes > SROM17_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM17_WORDS; + } else if (buf[SROM18_SIGN] == SROM18_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM18_SIGN] 0x%x\n", + buf[SROM18_SIGN])); + + /* block invalid buffer size */ + /* nbytes can be < SROM18 bytes since host limits transfer chunk size + * to 1500 Bytes + */ + if (nbytes > SROM18_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM18_WORDS; + + } else if (buf[SROM11_SIGN] == SROM15_SIGNATURE) { + BS_ERROR(("srom_write: buf[SROM15_SIGN] 0x%x\n", + buf[SROM11_SIGN])); + /* nbytes can be < SROM15 bytes since host limits trasnfer chunk size + * to 1518 Bytes + */ + if (nbytes > SROM15_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + nw = SROM15_WORDS; + } else if ((buf[SROM4_SIGN] == SROM4_SIGNATURE) || + (buf[SROM8_SIGN] == SROM4_SIGNATURE)) { + BS_ERROR(("srom_write: buf[SROM4_SIGN] 0x%x, buf[SROM8_SIGN] 0x%x\n", + buf[SROM4_SIGN], buf[SROM8_SIGN])); + + /* block invalid buffer size */ + if (nbytes < SROM4_WORDS * 2) { + rc = BCME_BUFTOOSHORT; + goto done; + } else if (nbytes > SROM4_WORDS * 2) { + rc = BCME_BUFTOOLONG; + goto done; + } + + nw = SROM4_WORDS; + } else if (nbytes == SROM_WORDS * 2){ /* the other possible SROM format */ + BS_ERROR(("srom_write: Not SROM4 or SROM8.\n")); + + nw = SROM_WORDS; + } else { + BS_ERROR(("srom_write: Invalid input file signature\n")); + rc = BCME_BADARG; + goto done; + } + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM18_SIGN] == SROM18_SIGNATURE) { + nw = SROM18_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM17_SIGN] == SROM17_SIGNATURE) { + nw = SROM17_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM16_SIGN] == SROM16_SIGNATURE) { + nw = SROM16_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM15_SIGN] == SROM15_SIGNATURE) { + nw = SROM15_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM13_SIGN] == SROM13_SIGNATURE) { + nw = SROM13_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM12_SIGN] == SROM12_SIGNATURE) { + nw = SROM12_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if (old[SROM11_SIGN] == SROM11_SIGNATURE) { + nw = SROM11_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else if ((old[SROM4_SIGN] == SROM4_SIGNATURE) || + (old[SROM8_SIGN] == SROM4_SIGNATURE)) { + nw = SROM4_WORDS; + crc_range = nw * 2; + if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) { + goto done; + } + } else { + /* Assert that we have already read enough for sromrev 2 */ + ASSERT(crc_range >= SROM_WORDS * 2); + nw = SROM_WORDS; + crc_range = nw * 2; + } + + if (byteoff == 0x55aa) { + /* Erase request */ + crc_range = 0; + memset((void *)new, 0xff, nw * 2); + } else { + /* Copy old contents */ + bcopy((void *)old, (void *)new, nw * 2); + /* make changes */ + bcopy((void *)buf, (void *)&new[byteoff / 2], nbytes); + } + + if (crc_range) { + /* calculate crc */ + htol16_buf(new, crc_range); + crc = ~hndcrc8((uint8 *)new, crc_range - 1, CRC8_INIT_VALUE); + ltoh16_buf(new, crc_range); + new[nw - 1] = (crc << 8) | (new[nw - 1] & 0xff); + } + +#ifdef BCMPCIEDEV + if ((BUSTYPE(bustype) == SI_BUS) && + (BCM43602_CHIP(sih->chip) || + (BCM4369_CHIP(sih->chip)) || + (BCM4362_CHIP(sih->chip)) || + (BCM4378_CHIP(sih->chip)) || + (BCM4387_CHIP(sih->chip)) || + (BCM4388_CHIP(sih->chip)) || + (BCM4385_CHIP(sih->chip)) || + (BCM4389_CHIP(sih->chip)) || + (BCM4397_CHIP(sih->chip)) || + +#ifdef UNRELEASEDCHIP +#endif /* UNRELEASEDCHIP */ + + FALSE)) { +#else + if (BUSTYPE(bustype) == PCI_BUS) { +#endif /* BCMPCIEDEV */ + volatile uint16 *srom = NULL; + volatile void *ccregs = NULL; + uint32 ccval = 0; + + if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM43526_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + BCM43602_CHIP(sih->chip)) { + /* save current control setting */ + ccval = si_chipcontrl_read(sih); + } + + if (BCM43602_CHIP(sih->chip) || + (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) && + (CHIPREV(sih->chiprev) <= 2))) { + si_chipcontrl_srom4360(sih, TRUE); + } + + if (FALSE) { + si_srom_clk_set(sih); /* corrects srom clock frequency */ + } + + /* enable writes to the SPROM */ + if (sih->ccrev > 31) { + if (BUSTYPE(sih->bustype) == SI_BUS) + ccregs = (void *)(uintptr)SI_ENUM_BASE(sih); + else + ccregs = ((volatile uint8 *)curmap + PCI_16KB0_CCREGS_OFFSET); + srom = (volatile uint16 *)((volatile uint8 *)ccregs + CC_SROM_OTP); + (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WREN, 0, 0); + } else { + srom = (volatile uint16 *) + ((volatile uint8 *)curmap + PCI_BAR0_SPROM_OFFSET); + val32 = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); + val32 |= SPROM_WRITEEN; + OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32); + } + bcm_mdelay(WRITE_ENABLE_DELAY); + /* write srom */ + for (i = 0; i < nw; i++) { + if (old[i] != new[i]) { + if (sih->ccrev > 31) { + if ((sih->cccaps & CC_CAP_SROM) == 0) { + /* No srom support in this chip */ + BS_ERROR(("srom_write, invalid srom, skip\n")); + } else + (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRITE, + i, new[i]); + } else { + W_REG(osh, &srom[i], new[i]); + } + bcm_mdelay(WRITE_WORD_DELAY); + } + } + /* disable writes to the SPROM */ + if (sih->ccrev > 31) { + (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRDIS, 0, 0); + } else { + OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32 & + ~SPROM_WRITEEN); + } + + if (BCM43602_CHIP(sih->chip) || + (CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) { + /* Restore config after reading SROM */ + si_chipcontrl_restore(sih, ccval); + } +#ifdef BCMSDIO + } else if (BUSTYPE(bustype) == SDIO_BUS) { + /* enable writes to the SPROM */ + if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WEN)) + goto done; + bcm_mdelay(WRITE_ENABLE_DELAY); + /* write srom */ + for (i = 0; i < nw; i++) { + if (old[i] != new[i]) { + sprom_write_sdio(osh, (uint16)(i), new[i]); + bcm_mdelay(WRITE_WORD_DELAY); + } + } + /* disable writes to the SPROM */ + if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WDS)) + goto done; +#endif /* BCMSDIO */ + } else if (BUSTYPE(bustype) == SI_BUS) { + goto done; + } else { + goto done; + } + + bcm_mdelay(WRITE_ENABLE_DELAY); + rc = 0; + +done: + if (old != NULL) + MFREE(osh, old, SROM_MAXW * sizeof(uint16)); + if (new != NULL) + MFREE(osh, new, SROM_MAXW * sizeof(uint16)); + + return rc; +} + +/** support only 16-bit word write into srom */ +int +srom_write_short(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh, + uint byteoff, uint16 value) +{ + volatile uint32 val32; + int rc = 1; + + ASSERT(bustype == BUSTYPE(bustype)); + + if (byteoff & 1) + goto done; + +#ifdef BCMPCIEDEV + if ((BUSTYPE(bustype) == SI_BUS) && + (BCM43602_CHIP(sih->chip) || + FALSE)) { +#else + if (BUSTYPE(bustype) == PCI_BUS) { +#endif /* BCMPCIEDEV */ + volatile uint16 *srom = NULL; + volatile void *ccregs = NULL; + uint32 ccval = 0; + + if (BCM43602_CHIP(sih->chip) || + (CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM43526_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) { + /* save current control setting */ + ccval = si_chipcontrl_read(sih); + } + + if (BCM43602_CHIP(sih->chip) || + (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) && + (CHIPREV(sih->chiprev) <= 2))) { + si_chipcontrl_srom4360(sih, TRUE); + } + + if (FALSE) { + si_srom_clk_set(sih); /* corrects srom clock frequency */ + } + + /* enable writes to the SPROM */ + if (sih->ccrev > 31) { + if (BUSTYPE(sih->bustype) == SI_BUS) + ccregs = (void *)(uintptr)SI_ENUM_BASE(sih); + else + ccregs = ((volatile uint8 *)curmap + PCI_16KB0_CCREGS_OFFSET); + srom = (volatile uint16 *)((volatile uint8 *)ccregs + CC_SROM_OTP); + (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WREN, 0, 0); + } else { + srom = (volatile uint16 *) + ((volatile uint8 *)curmap + PCI_BAR0_SPROM_OFFSET); + val32 = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); + val32 |= SPROM_WRITEEN; + OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32); + } + bcm_mdelay(WRITE_ENABLE_DELAY); + /* write srom */ + if (sih->ccrev > 31) { + if ((sih->cccaps & CC_CAP_SROM) == 0) { + /* No srom support in this chip */ + BS_ERROR(("srom_write, invalid srom, skip\n")); + } else + (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRITE, + byteoff/2, value); + } else { + W_REG(osh, &srom[byteoff/2], value); + } + bcm_mdelay(WRITE_WORD_DELAY); + + /* disable writes to the SPROM */ + if (sih->ccrev > 31) { + (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRDIS, 0, 0); + } else { + OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32 & + ~SPROM_WRITEEN); + } + + if (BCM43602_CHIP(sih->chip) || + (CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM43526_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) { + /* Restore config after reading SROM */ + si_chipcontrl_restore(sih, ccval); + } +#ifdef BCMSDIO + } else if (BUSTYPE(bustype) == SDIO_BUS) { + /* enable writes to the SPROM */ + if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WEN)) + goto done; + bcm_mdelay(WRITE_ENABLE_DELAY); + /* write srom */ + sprom_write_sdio(osh, (uint16)(byteoff/2), value); + bcm_mdelay(WRITE_WORD_DELAY); + + /* disable writes to the SPROM */ + if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WDS)) + goto done; +#endif /* BCMSDIO */ + } else if (BUSTYPE(bustype) == SI_BUS) { + goto done; + } else { + goto done; + } + + bcm_mdelay(WRITE_ENABLE_DELAY); + rc = 0; + +done: + return rc; +} +#endif /* defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) */ + +/** + * These 'vstr_*' definitions are used to convert from CIS format to a 'NVRAM var=val' format, the + * NVRAM format is used throughout the rest of the firmware. + */ +#if !defined(BCMDONGLEHOST) +static const char BCMATTACHDATA(vstr_manf)[] = "manf=%s"; +static const char BCMATTACHDATA(vstr_productname)[] = "productname=%s"; +static const char BCMATTACHDATA(vstr_manfid)[] = "manfid=0x%x"; +static const char BCMATTACHDATA(vstr_prodid)[] = "prodid=0x%x"; +#ifdef BCMSDIO +static const char BCMATTACHDATA(vstr_sdmaxspeed)[] = "sdmaxspeed=%d"; +static const char BCMATTACHDATA(vstr_sdmaxblk)[][13] = + { "sdmaxblk0=%d", "sdmaxblk1=%d", "sdmaxblk2=%d" }; +#endif +static const char BCMATTACHDATA(vstr_regwindowsz)[] = "regwindowsz=%d"; +static const char BCMATTACHDATA(vstr_sromrev)[] = "sromrev=%d"; +static const char BCMATTACHDATA(vstr_chiprev)[] = "chiprev=%d"; +static const char BCMATTACHDATA(vstr_subvendid)[] = "subvendid=0x%x"; +static const char BCMATTACHDATA(vstr_subdevid)[] = "subdevid=0x%x"; +static const char BCMATTACHDATA(vstr_boardrev)[] = "boardrev=0x%x"; +static const char BCMATTACHDATA(vstr_aa2g)[] = "aa2g=0x%x"; +static const char BCMATTACHDATA(vstr_aa5g)[] = "aa5g=0x%x"; +static const char BCMATTACHDATA(vstr_ag)[] = "ag%d=0x%x"; +static const char BCMATTACHDATA(vstr_cc)[] = "cc=%d"; +static const char BCMATTACHDATA(vstr_opo)[] = "opo=%d"; +static const char BCMATTACHDATA(vstr_pa0b)[][9] = { "pa0b0=%d", "pa0b1=%d", "pa0b2=%d" }; +static const char BCMATTACHDATA(vstr_pa0b_lo)[][12] = + { "pa0b0_lo=%d", "pa0b1_lo=%d", "pa0b2_lo=%d" }; +static const char BCMATTACHDATA(vstr_pa0itssit)[] = "pa0itssit=%d"; +static const char BCMATTACHDATA(vstr_pa0maxpwr)[] = "pa0maxpwr=%d"; +static const char BCMATTACHDATA(vstr_pa1b)[][9] = { "pa1b0=%d", "pa1b1=%d", "pa1b2=%d" }; +static const char BCMATTACHDATA(vstr_pa1lob)[][11] = + { "pa1lob0=%d", "pa1lob1=%d", "pa1lob2=%d" }; +static const char BCMATTACHDATA(vstr_pa1hib)[][11] = + { "pa1hib0=%d", "pa1hib1=%d", "pa1hib2=%d" }; +static const char BCMATTACHDATA(vstr_pa1itssit)[] = "pa1itssit=%d"; +static const char BCMATTACHDATA(vstr_pa1maxpwr)[] = "pa1maxpwr=%d"; +static const char BCMATTACHDATA(vstr_pa1lomaxpwr)[] = "pa1lomaxpwr=%d"; +static const char BCMATTACHDATA(vstr_pa1himaxpwr)[] = "pa1himaxpwr=%d"; +static const char BCMATTACHDATA(vstr_oem)[] = "oem=%02x%02x%02x%02x%02x%02x%02x%02x"; +static const char BCMATTACHDATA(vstr_boardflags)[] = "boardflags=0x%x"; +static const char BCMATTACHDATA(vstr_boardflags2)[] = "boardflags2=0x%x"; +static const char BCMATTACHDATA(vstr_boardflags3)[] = "boardflags3=0x%x"; +static const char BCMATTACHDATA(vstr_boardflags4)[] = "boardflags4=0x%x"; +static const char BCMATTACHDATA(vstr_boardflags5)[] = "boardflags5=0x%x"; +static const char BCMATTACHDATA(vstr_noccode)[] = "ccode=0x0"; +static const char BCMATTACHDATA(vstr_ccode)[] = "ccode=%c%c"; +static const char BCMATTACHDATA(vstr_cctl)[] = "cctl=0x%x"; +static const char BCMATTACHDATA(vstr_cckpo)[] = "cckpo=0x%x"; +static const char BCMATTACHDATA(vstr_ofdmpo)[] = "ofdmpo=0x%x"; +static const char BCMATTACHDATA(vstr_rdlid)[] = "rdlid=0x%x"; +#ifdef BCM_BOOTLOADER +static const char BCMATTACHDATA(vstr_rdlrndis)[] = "rdlrndis=%d"; +static const char BCMATTACHDATA(vstr_rdlrwu)[] = "rdlrwu=%d"; +static const char BCMATTACHDATA(vstr_rdlsn)[] = "rdlsn=%d"; +#endif /* BCM_BOOTLOADER */ +static const char BCMATTACHDATA(vstr_usbfs)[] = "usbfs=%d"; +static const char BCMATTACHDATA(vstr_wpsgpio)[] = "wpsgpio=%d"; +static const char BCMATTACHDATA(vstr_wpsled)[] = "wpsled=%d"; +static const char BCMATTACHDATA(vstr_rssismf2g)[] = "rssismf2g=%d"; +static const char BCMATTACHDATA(vstr_rssismc2g)[] = "rssismc2g=%d"; +static const char BCMATTACHDATA(vstr_rssisav2g)[] = "rssisav2g=%d"; +static const char BCMATTACHDATA(vstr_bxa2g)[] = "bxa2g=%d"; +static const char BCMATTACHDATA(vstr_rssismf5g)[] = "rssismf5g=%d"; +static const char BCMATTACHDATA(vstr_rssismc5g)[] = "rssismc5g=%d"; +static const char BCMATTACHDATA(vstr_rssisav5g)[] = "rssisav5g=%d"; +static const char BCMATTACHDATA(vstr_bxa5g)[] = "bxa5g=%d"; +static const char BCMATTACHDATA(vstr_tri2g)[] = "tri2g=%d"; +static const char BCMATTACHDATA(vstr_tri5gl)[] = "tri5gl=%d"; +static const char BCMATTACHDATA(vstr_tri5g)[] = "tri5g=%d"; +static const char BCMATTACHDATA(vstr_tri5gh)[] = "tri5gh=%d"; +static const char BCMATTACHDATA(vstr_rxpo2g)[] = "rxpo2g=%d"; +static const char BCMATTACHDATA(vstr_rxpo5g)[] = "rxpo5g=%d"; +static const char BCMATTACHDATA(vstr_boardtype)[] = "boardtype=0x%x"; +static const char BCMATTACHDATA(vstr_vendid)[] = "vendid=0x%x"; +static const char BCMATTACHDATA(vstr_devid)[] = "devid=0x%x"; +static const char BCMATTACHDATA(vstr_xtalfreq)[] = "xtalfreq=%d"; +static const char BCMATTACHDATA(vstr_txchain)[] = "txchain=0x%x"; +static const char BCMATTACHDATA(vstr_rxchain)[] = "rxchain=0x%x"; +static const char BCMATTACHDATA(vstr_elna2g)[] = "elna2g=0x%x"; +static const char BCMATTACHDATA(vstr_elna5g)[] = "elna5g=0x%x"; +static const char BCMATTACHDATA(vstr_antswitch)[] = "antswitch=0x%x"; +static const char BCMATTACHDATA(vstr_regrev)[] = "regrev=0x%x"; +static const char BCMATTACHDATA(vstr_antswctl2g)[] = "antswctl2g=0x%x"; +static const char BCMATTACHDATA(vstr_triso2g)[] = "triso2g=0x%x"; +static const char BCMATTACHDATA(vstr_pdetrange2g)[] = "pdetrange2g=0x%x"; +static const char BCMATTACHDATA(vstr_extpagain2g)[] = "extpagain2g=0x%x"; +static const char BCMATTACHDATA(vstr_tssipos2g)[] = "tssipos2g=0x%x"; +static const char BCMATTACHDATA(vstr_antswctl5g)[] = "antswctl5g=0x%x"; +static const char BCMATTACHDATA(vstr_triso5g)[] = "triso5g=0x%x"; +static const char BCMATTACHDATA(vstr_pdetrange5g)[] = "pdetrange5g=0x%x"; +static const char BCMATTACHDATA(vstr_extpagain5g)[] = "extpagain5g=0x%x"; +static const char BCMATTACHDATA(vstr_tssipos5g)[] = "tssipos5g=0x%x"; +static const char BCMATTACHDATA(vstr_maxp2ga)[] = "maxp2ga%d=0x%x"; +static const char BCMATTACHDATA(vstr_itt2ga0)[] = "itt2ga0=0x%x"; +static const char BCMATTACHDATA(vstr_pa)[] = "pa%dgw%da%d=0x%x"; +static const char BCMATTACHDATA(vstr_pahl)[] = "pa%dg%cw%da%d=0x%x"; +static const char BCMATTACHDATA(vstr_maxp5ga0)[] = "maxp5ga0=0x%x"; +static const char BCMATTACHDATA(vstr_itt5ga0)[] = "itt5ga0=0x%x"; +static const char BCMATTACHDATA(vstr_maxp5gha0)[] = "maxp5gha0=0x%x"; +static const char BCMATTACHDATA(vstr_maxp5gla0)[] = "maxp5gla0=0x%x"; +static const char BCMATTACHDATA(vstr_itt2ga1)[] = "itt2ga1=0x%x"; +static const char BCMATTACHDATA(vstr_maxp5ga1)[] = "maxp5ga1=0x%x"; +static const char BCMATTACHDATA(vstr_itt5ga1)[] = "itt5ga1=0x%x"; +static const char BCMATTACHDATA(vstr_maxp5gha1)[] = "maxp5gha1=0x%x"; +static const char BCMATTACHDATA(vstr_maxp5gla1)[] = "maxp5gla1=0x%x"; +static const char BCMATTACHDATA(vstr_cck2gpo)[] = "cck2gpo=0x%x"; +static const char BCMATTACHDATA(vstr_ofdm2gpo)[] = "ofdm2gpo=0x%x"; +static const char BCMATTACHDATA(vstr_ofdm5gpo)[] = "ofdm5gpo=0x%x"; +static const char BCMATTACHDATA(vstr_ofdm5glpo)[] = "ofdm5glpo=0x%x"; +static const char BCMATTACHDATA(vstr_ofdm5ghpo)[] = "ofdm5ghpo=0x%x"; +static const char BCMATTACHDATA(vstr_cddpo)[] = "cddpo=0x%x"; +static const char BCMATTACHDATA(vstr_stbcpo)[] = "stbcpo=0x%x"; +static const char BCMATTACHDATA(vstr_bw40po)[] = "bw40po=0x%x"; +static const char BCMATTACHDATA(vstr_bwduppo)[] = "bwduppo=0x%x"; +static const char BCMATTACHDATA(vstr_mcspo)[] = "mcs%dgpo%d=0x%x"; +static const char BCMATTACHDATA(vstr_mcspohl)[] = "mcs%dg%cpo%d=0x%x"; +static const char BCMATTACHDATA(vstr_custom)[] = "customvar%d=0x%x"; +static const char BCMATTACHDATA(vstr_cckdigfilttype)[] = "cckdigfilttype=%d"; +static const char BCMATTACHDATA(vstr_usbflags)[] = "usbflags=0x%x"; +#ifdef BCM_BOOTLOADER +static const char BCMATTACHDATA(vstr_mdio)[] = "mdio%d=0x%%x"; +static const char BCMATTACHDATA(vstr_mdioex)[] = "mdioex%d=0x%%x"; +static const char BCMATTACHDATA(vstr_brmin)[] = "brmin=0x%x"; +static const char BCMATTACHDATA(vstr_brmax)[] = "brmax=0x%x"; +static const char BCMATTACHDATA(vstr_pllreg)[] = "pll%d=0x%x"; +static const char BCMATTACHDATA(vstr_ccreg)[] = "chipc%d=0x%x"; +static const char BCMATTACHDATA(vstr_regctrl)[] = "reg%d=0x%x"; +static const char BCMATTACHDATA(vstr_time)[] = "r%dt=0x%x"; +static const char BCMATTACHDATA(vstr_depreg)[] = "r%dd=0x%x"; +static const char BCMATTACHDATA(vstr_usbpredly)[] = "usbpredly=0x%x"; +static const char BCMATTACHDATA(vstr_usbpostdly)[] = "usbpostdly=0x%x"; +static const char BCMATTACHDATA(vstr_usbrdy)[] = "usbrdy=0x%x"; +static const char BCMATTACHDATA(vstr_hsicphyctrl1)[] = "hsicphyctrl1=0x%x"; +static const char BCMATTACHDATA(vstr_hsicphyctrl2)[] = "hsicphyctrl2=0x%x"; +static const char BCMATTACHDATA(vstr_usbdevctrl)[] = "usbdevctrl=0x%x"; +static const char BCMATTACHDATA(vstr_bldr_reset_timeout)[] = "bldr_to=0x%x"; +static const char BCMATTACHDATA(vstr_muxenab)[] = "muxenab=0x%x"; +static const char BCMATTACHDATA(vstr_pubkey)[] = "pubkey=%s"; +#endif /* BCM_BOOTLOADER */ +static const char BCMATTACHDATA(vstr_boardnum)[] = "boardnum=%d"; +static const char BCMATTACHDATA(vstr_macaddr)[] = "macaddr=%s"; +static const char BCMATTACHDATA(vstr_macaddr2)[] = "macaddr2=%s"; +static const char BCMATTACHDATA(vstr_usbepnum)[] = "usbepnum=0x%x"; +#ifdef BCMUSBDEV_COMPOSITE +static const char BCMATTACHDATA(vstr_usbdesc_composite)[] = "usbdesc_composite=0x%x"; +#endif /* BCMUSBDEV_COMPOSITE */ +static const char BCMATTACHDATA(vstr_usbutmi_ctl)[] = "usbutmi_ctl=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_utmi_ctl0)[] = "usbssphy_utmi_ctl0=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_utmi_ctl1)[] = "usbssphy_utmi_ctl1=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_utmi_ctl2)[] = "usbssphy_utmi_ctl2=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_sleep0)[] = "usbssphy_sleep0=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_sleep1)[] = "usbssphy_sleep1=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_sleep2)[] = "usbssphy_sleep2=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_sleep3)[] = "usbssphy_sleep3=0x%x"; +static const char BCMATTACHDATA(vstr_usbssphy_mdio)[] = "usbssmdio%d=0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_usb30phy_noss)[] = "usbnoss=0x%x"; +static const char BCMATTACHDATA(vstr_usb30phy_u1u2)[] = "usb30u1u2=0x%x"; +static const char BCMATTACHDATA(vstr_usb30phy_regs)[] = "usb30regs%d=0x%x,0x%x,0x%x,0x%x"; + +/* Power per rate for SROM V9 */ +static const char BCMATTACHDATA(vstr_cckbw202gpo)[][21] = + { "cckbw202gpo=0x%x", "cckbw20ul2gpo=0x%x", "cckbw20in802gpo=0x%x" }; +static const char BCMATTACHDATA(vstr_legofdmbw202gpo)[][23] = + { "legofdmbw202gpo=0x%x", "legofdmbw20ul2gpo=0x%x" }; +static const char BCMATTACHDATA(vstr_legofdmbw205gpo)[][24] = + { "legofdmbw205glpo=0x%x", "legofdmbw20ul5glpo=0x%x", + "legofdmbw205gmpo=0x%x", "legofdmbw20ul5gmpo=0x%x", + "legofdmbw205ghpo=0x%x", "legofdmbw20ul5ghpo=0x%x" }; + +static const char BCMATTACHDATA(vstr_mcs2gpo)[][19] = +{ "mcsbw202gpo=0x%x", "mcsbw20ul2gpo=0x%x", "mcsbw402gpo=0x%x", "mcsbw802gpo=0x%x" }; + +static const char BCMATTACHDATA(vstr_mcs5glpo)[][20] = + { "mcsbw205glpo=0x%x", "mcsbw20ul5glpo=0x%x", "mcsbw405glpo=0x%x" }; + +static const char BCMATTACHDATA(vstr_mcs5gmpo)[][20] = + { "mcsbw205gmpo=0x%x", "mcsbw20ul5gmpo=0x%x", "mcsbw405gmpo=0x%x" }; + +static const char BCMATTACHDATA(vstr_mcs5ghpo)[][20] = + { "mcsbw205ghpo=0x%x", "mcsbw20ul5ghpo=0x%x", "mcsbw405ghpo=0x%x" }; + +static const char BCMATTACHDATA(vstr_mcs32po)[] = "mcs32po=0x%x"; +static const char BCMATTACHDATA(vstr_legofdm40duppo)[] = "legofdm40duppo=0x%x"; + +/* SROM V11 */ +static const char BCMATTACHDATA(vstr_tempthresh)[] = "tempthresh=%d"; /* HNBU_TEMPTHRESH */ +static const char BCMATTACHDATA(vstr_temps_period)[] = "temps_period=%d"; +static const char BCMATTACHDATA(vstr_temps_hysteresis)[] = "temps_hysteresis=%d"; +static const char BCMATTACHDATA(vstr_tempoffset)[] = "tempoffset=%d"; +static const char BCMATTACHDATA(vstr_tempsense_slope)[] = "tempsense_slope=%d"; +static const char BCMATTACHDATA(vstr_temp_corrx)[] = "tempcorrx=%d"; +static const char BCMATTACHDATA(vstr_tempsense_option)[] = "tempsense_option=%d"; +static const char BCMATTACHDATA(vstr_phycal_tempdelta)[] = "phycal_tempdelta=%d"; +static const char BCMATTACHDATA(vstr_tssiposslopeg)[] = "tssiposslope%dg=%d"; /* HNBU_FEM_CFG */ +static const char BCMATTACHDATA(vstr_epagaing)[] = "epagain%dg=%d"; +static const char BCMATTACHDATA(vstr_pdgaing)[] = "pdgain%dg=%d"; +static const char BCMATTACHDATA(vstr_tworangetssi)[] = "tworangetssi%dg=%d"; +static const char BCMATTACHDATA(vstr_papdcap)[] = "papdcap%dg=%d"; +static const char BCMATTACHDATA(vstr_femctrl)[] = "femctrl=%d"; +static const char BCMATTACHDATA(vstr_gainctrlsph)[] = "gainctrlsph=%d"; +static const char BCMATTACHDATA(vstr_subband5gver)[] = "subband5gver=%d"; /* HNBU_ACPA_CX */ +static const char BCMATTACHDATA(vstr_pa2ga)[] = "pa2ga%d=0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_maxp5ga)[] = "maxp5ga%d=0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_pa5ga)[] = "pa5ga%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x," + "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_subband6gver)[] = "subband6gver=%d"; /* HNBU_ACPA_CX */ +static const char BCMATTACHDATA(vstr_maxp6ga)[] = "maxp6ga%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_pa6ga)[] = "pa6ga%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x," + "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_pa2gccka)[] = "pa2gccka%d=0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_pa5gbw40a)[] = "pa5gbw40a%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x," + "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_pa5gbw80a)[] = "pa5gbw80a%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x," + "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_pa5gbw4080a)[] = "pa5gbw4080a%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x," + "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_rxgainsgelnagaina)[] = "rxgains%dgelnagaina%d=%d"; +static const char BCMATTACHDATA(vstr_rxgainsgtrisoa)[] = "rxgains%dgtrisoa%d=%d"; +static const char BCMATTACHDATA(vstr_rxgainsgtrelnabypa)[] = "rxgains%dgtrelnabypa%d=%d"; +static const char BCMATTACHDATA(vstr_rxgainsgxelnagaina)[] = "rxgains%dg%celnagaina%d=%d"; +static const char BCMATTACHDATA(vstr_rxgainsgxtrisoa)[] = "rxgains%dg%ctrisoa%d=%d"; +static const char BCMATTACHDATA(vstr_rxgainsgxtrelnabypa)[] = "rxgains%dg%ctrelnabypa%d=%d"; +static const char BCMATTACHDATA(vstr_measpower)[] = "measpower=0x%x"; /* HNBU_MEAS_PWR */ +static const char BCMATTACHDATA(vstr_measpowerX)[] = "measpower%d=0x%x"; +static const char BCMATTACHDATA(vstr_pdoffsetma)[] = "pdoffset%dma%d=0x%x"; /* HNBU_PDOFF */ +static const char BCMATTACHDATA(vstr_pdoffset2gma)[] = "pdoffset2g%dma%d=0x%x"; /* HNBU_PDOFF_2G */ +static const char BCMATTACHDATA(vstr_pdoffset2gmvalid)[] = "pdoffset2g%dmvalid=0x%x"; +static const char BCMATTACHDATA(vstr_rawtempsense)[] = "rawtempsense=0x%x"; +/* HNBU_ACPPR_2GPO */ +static const char BCMATTACHDATA(vstr_dot11agofdmhrbw202gpo)[] = "dot11agofdmhrbw202gpo=0x%x"; +static const char BCMATTACHDATA(vstr_ofdmlrbw202gpo)[] = "ofdmlrbw202gpo=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw805gpo)[] = "mcsbw805g%cpo=0x%x"; /* HNBU_ACPPR_5GPO */ +static const char BCMATTACHDATA(vstr_mcsbw1605gpo)[] = "mcsbw1605g%cpo=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw80p805gpo)[] = "mcsbw80p805g%cpo=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw80p805g1po)[] = "mcsbw80p805g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw1605g1po)[] = "mcsbw1605g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw805g1po)[] = "mcsbw805g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw405g1po)[] = "mcsbw405g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_mcsbw205g1po)[] = "mcsbw205g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_mcslr5gpo)[] = "mcslr5g%cpo=0x%x"; +static const char BCMATTACHDATA(vstr_mcslr5g1po)[] = "mcslr5g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_mcslr5g80p80po)[] = "mcslr5g80p80po=0x%x"; +/* HNBU_ACPPR_SBPO */ +static const char BCMATTACHDATA(vstr_sb20in40rpo)[] = "sb20in40%crpo=0x%x"; +/* HNBU_ACPPR_SBPO */ +static const char BCMATTACHDATA(vstr_sb20in40and80rpo)[] = "sb20in40and80%crpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in80and160r5gpo)[] = "sb20in80and160%cr5g%cpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in80and160r5g1po)[] = "sb20in80and160%cr5g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_sb2040and80in80p80r5gpo)[] = + "sb2040and80in80p80%cr5g%cpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb2040and80in80p80r5g1po)[] = + "sb2040and80in80p80%cr5g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in40dot11agofdm2gpo)[] = "sb20in40dot11agofdm2gpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in80dot11agofdm2gpo)[] = "sb20in80dot11agofdm2gpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in40ofdmlrbw202gpo)[] = "sb20in40ofdmlrbw202gpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in80ofdmlrbw202gpo)[] = "sb20in80ofdmlrbw202gpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb20in80p80r5gpo)[] = "sb20in80p80%cr5gpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb40and80r5gpo)[] = "sb40and80%cr5g%cpo=0x%x"; +static const char BCMATTACHDATA(vstr_sb40and80r5g1po)[] = "sb40and80%cr5g%c1po=0x%x"; +static const char BCMATTACHDATA(vstr_dot11agduprpo)[] = "dot11agdup%crpo=0x%x"; +static const char BCMATTACHDATA(vstr_dot11agduppo)[] = "dot11agduppo=0x%x"; +static const char BCMATTACHDATA(vstr_noiselvl2ga)[] = "noiselvl2ga%d=%d"; /* HNBU_NOISELVL */ +static const char BCMATTACHDATA(vstr_noiselvl5ga)[] = "noiselvl5ga%d=%d,%d,%d,%d"; +/* HNBU_RXGAIN_ERR */ +static const char BCMATTACHDATA(vstr_rxgainerr2ga)[] = "rxgainerr2ga%d=0x%x"; +static const char BCMATTACHDATA(vstr_rxgainerr5ga)[] = "rxgainerr5ga%d=0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_agbg)[] = "agbg%d=0x%x"; /* HNBU_AGBGA */ +static const char BCMATTACHDATA(vstr_aga)[] = "aga%d=0x%x"; +static const char BCMATTACHDATA(vstr_txduty_ofdm)[] = "tx_duty_cycle_ofdm_%d_5g=%d"; +static const char BCMATTACHDATA(vstr_txduty_thresh)[] = "tx_duty_cycle_thresh_%d_5g=%d"; +static const char BCMATTACHDATA(vstr_paparambwver)[] = "paparambwver=%d"; + +static const char BCMATTACHDATA(vstr_uuid)[] = "uuid=%s"; + +static const char BCMATTACHDATA(vstr_wowlgpio)[] = "wowl_gpio=%d"; +static const char BCMATTACHDATA(vstr_wowlgpiopol)[] = "wowl_gpiopol=%d"; + +static const char BCMATTACHDATA(rstr_ag0)[] = "ag0"; +static const char BCMATTACHDATA(rstr_sromrev)[] = "sromrev"; + +static const char BCMATTACHDATA(vstr_paparamrpcalvars)[][20] = + {"rpcal2g=0x%x", "rpcal5gb0=0x%x", "rpcal5gb1=0x%x", + "rpcal5gb2=0x%x", "rpcal5gb3=0x%x"}; + +static const char BCMATTACHDATA(vstr_gpdn)[] = "gpdn=0x%x"; + +/* SROM V13 PA */ +static const char BCMATTACHDATA(vstr_sr13pa2ga)[] = "pa2ga%d=0x%x,0x%x,0x%x,0x%x"; +static const char BCMATTACHDATA(vstr_maxp5gba)[] = "maxp5gb%da%d=0x%x"; +static const char BCMATTACHDATA(vstr_sr13pa5ga)[] = "pa5ga%d=%s"; +static const char BCMATTACHDATA(vstr_sr13pa5gbwa)[] = "pa5g%da%d=%s"; +static const char BCMATTACHDATA(vstr_pa2g40a)[] = "pa2g40a%d=0x%x,0x%x,0x%x,0x%x"; + +/* RSSI Cal parameters */ +static const char BCMATTACHDATA(vstr_rssicalfrqg)[] = + "rssi_cal_freq_grp_2g=0x%x0x%x0x%x0x%x0x%x0x%x0x%x"; +static const char BCMATTACHDATA(vstr_rssidelta2g)[] = + "rssi_delta_2gb%d=%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d"; +static const char BCMATTACHDATA(vstr_rssidelta5g)[] = + "rssi_delta_5g%s=%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d"; + +uint8 patch_pair = 0; + +/* For dongle HW, accept partial calibration parameters */ +#if defined(BCMSDIODEV) || defined(BCMUSBDEV) || defined(BCMDONGLEHOST) +#define BCMDONGLECASE(n) case n: +#else +#define BCMDONGLECASE(n) +#endif + +#ifdef BCM_BOOTLOADER +/* The format of the PMUREGS OTP Tuple -> + * 1 byte -> Lower 5 bits has the address of the register + * Higher 3 bits has the mode of the register like + * PLL, ChipCtrl, RegCtrl, UpDwn or Dependency mask + * 4 bytes -> Value of the register to be updated. + */ +#define PMUREGS_MODE_MASK 0xE0 +#define PMUREGS_MODE_SHIFT 5 +#define PMUREGS_ADDR_MASK 0x1F +#define PMUREGS_TPL_SIZE 5 + +enum { + PMU_PLLREG_MODE, + PMU_CCREG_MODE, + PMU_VOLTREG_MODE, + PMU_RES_TIME_MODE, + PMU_RESDEPEND_MODE +}; + +#define USBREGS_TPL_SIZE 5 +enum { + USB_DEV_CTRL_REG, + HSIC_PHY_CTRL1_REG, + HSIC_PHY_CTRL2_REG +}; + +#define USBRDY_DLY_TYPE 0x8000 /* Bit indicating if the byte is pre or post delay value */ +#define USBRDY_DLY_MASK 0x7FFF /* Bits indicating the amount of delay */ +#define USBRDY_MAXOTP_SIZE 5 /* Max size of the OTP parameter */ + +#endif /* BCM_BOOTLOADER */ + +static uint +BCMATTACHFN(get_max_cis_size)(si_t *sih) +{ + uint max_cis_size; + void *oh; + + max_cis_size = (sih && sih->ccrev >= 49) ? CIS_SIZE_12K : CIS_SIZE; + if (sih && (oh = otp_init(sih)) != NULL) { + max_cis_size -= otp_avsbitslen(oh); + } + return max_cis_size; +} + +#ifndef BCM_BOOTLOADER +static uint32 +BCMATTACHFN(srom_data2value)(uint8 *p, uint8 len) +{ + uint8 pos = 0; + uint32 value = 0; + + ASSERT(len <= 4); + + while (pos < len) { + value += (p[pos] << (pos * 8)); + pos++; + } + + return value; +} +#endif /* BCM_BOOTLOADER */ + +/** + * Both SROM and OTP contain variables in 'CIS' format, whereas the rest of the firmware works with + * 'variable/value' string pairs. + */ +int +BCMATTACHFN(srom_parsecis)(si_t *sih, osl_t *osh, uint8 *pcis[], uint ciscnt, char **vars, + uint *count) +{ + char eabuf[32]; + char eabuf2[32]; + char *base; + varbuf_t b; + uint8 *cis, tup, tlen, sromrev = 1; + uint i; + uint16 j; +#ifndef BCM_BOOTLOADER + bool ag_init = FALSE; +#endif + uint32 w32; + uint funcid; + uint cisnum; + int32 boardnum; + int err; + bool standard_cis; + uint max_cis_size; + uint var_cis_size = 0; + + ASSERT(count != NULL); + + if (vars == NULL) { + ASSERT(0); /* crash debug images for investigation */ + return BCME_BADARG; + } + + boardnum = -1; + + /* freed in same function */ + max_cis_size = get_max_cis_size(sih); + var_cis_size = *count + ((max_cis_size + 2u) * ciscnt); + + ASSERT(var_cis_size <= MAXSZ_NVRAM_VARS); + + base = MALLOC_NOPERSIST(osh, var_cis_size); + ASSERT(base != NULL); + if (!base) + return -2; + + varbuf_init(&b, base, var_cis_size); + bzero(base, var_cis_size); + /* Append from vars if there's already something inside */ + if (*vars && **vars && (*count >= 3)) { + /* back off \0 at the end, leaving only one \0 for the last param */ + while (((*vars)[(*count)-1] == '\0') && ((*vars)[(*count)-2] == '\0')) + (*count)--; + + bcopy(*vars, base, *count); + b.buf += *count; + } + eabuf[0] = '\0'; + eabuf2[0] = '\0'; + for (cisnum = 0; cisnum < ciscnt; cisnum++) { + cis = *pcis++; + i = 0; + funcid = 0; + standard_cis = TRUE; + do { + if (standard_cis) { + tup = cis[i++]; + if (tup == CISTPL_NULL || tup == CISTPL_END) + tlen = 0; + else + tlen = cis[i++]; + } else { + if (cis[i] == CISTPL_NULL || cis[i] == CISTPL_END) { + tlen = 0; + tup = cis[i]; + } else { + tlen = cis[i]; + tup = CISTPL_BRCM_HNBU; + } + ++i; + } + if ((i + tlen) >= max_cis_size) + break; + + switch (tup) { + case CISTPL_VERS_1: + /* assume the strings are good if the version field checks out */ + if (((cis[i + 1] << 8) + cis[i]) >= 0x0008) { + varbuf_append(&b, vstr_manf, &cis[i + 2]); + varbuf_append(&b, vstr_productname, + &cis[i + 3 + strlen((char *)&cis[i + 2])]); + break; + } + + case CISTPL_MANFID: + varbuf_append(&b, vstr_manfid, (cis[i + 1] << 8) + cis[i]); + varbuf_append(&b, vstr_prodid, (cis[i + 3] << 8) + cis[i + 2]); + break; + + case CISTPL_FUNCID: + funcid = cis[i]; + break; + + case CISTPL_FUNCE: + switch (funcid) { + case CISTPL_FID_SDIO: +#ifdef BCMSDIO + if (cis[i] == 0) { + uint8 spd = cis[i + 3]; + static int lbase[] = { + -1, 10, 12, 13, 15, 20, 25, 30, + 35, 40, 45, 50, 55, 60, 70, 80 + }; + static int mult[] = { + 10, 100, 1000, 10000, + -1, -1, -1, -1 + }; + ASSERT((mult[spd & 0x7] != -1) && + (lbase[(spd >> 3) & 0x0f])); + varbuf_append(&b, vstr_sdmaxblk[0], + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_sdmaxspeed, + (mult[spd & 0x7] * + lbase[(spd >> 3) & 0x0f])); + } else if (cis[i] == 1) { + varbuf_append(&b, vstr_sdmaxblk[cisnum], + (cis[i + 13] << 8) | cis[i + 12]); + } +#endif /* BCMSDIO */ + funcid = 0; + break; + default: + /* set macaddr if HNBU_MACADDR not seen yet */ + if (eabuf[0] == '\0' && cis[i] == LAN_NID && + !(ETHER_ISNULLADDR(&cis[i + 2])) && + !(ETHER_ISMULTI(&cis[i + 2]))) { + ASSERT(cis[i + 1] == ETHER_ADDR_LEN); + bcm_ether_ntoa((struct ether_addr *)&cis[i + 2], + eabuf); + + /* set boardnum if HNBU_BOARDNUM not seen yet */ + if (boardnum == -1) + boardnum = (cis[i + 6] << 8) + cis[i + 7]; + } + break; + } + break; + + case CISTPL_CFTABLE: + varbuf_append(&b, vstr_regwindowsz, (cis[i + 7] << 8) | cis[i + 6]); + break; + + case CISTPL_BRCM_HNBU: + switch (cis[i]) { + case HNBU_SROMREV: + sromrev = cis[i + 1]; + varbuf_append(&b, vstr_sromrev, sromrev); + break; + + case HNBU_XTALFREQ: + varbuf_append(&b, vstr_xtalfreq, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + break; + + case HNBU_CHIPID: + varbuf_append(&b, vstr_vendid, (cis[i + 2] << 8) + + cis[i + 1]); + varbuf_append(&b, vstr_devid, (cis[i + 4] << 8) + + cis[i + 3]); + if (tlen >= 7) { + varbuf_append(&b, vstr_chiprev, + (cis[i + 6] << 8) + cis[i + 5]); + } + if (tlen >= 9) { + varbuf_append(&b, vstr_subvendid, + (cis[i + 8] << 8) + cis[i + 7]); + } + if (tlen >= 11) { + varbuf_append(&b, vstr_subdevid, + (cis[i + 10] << 8) + cis[i + 9]); + /* subdevid doubles for boardtype */ + varbuf_append(&b, vstr_boardtype, + (cis[i + 10] << 8) + cis[i + 9]); + } + break; + + case HNBU_BOARDNUM: + boardnum = (cis[i + 2] << 8) + cis[i + 1]; + break; + + case HNBU_PATCH: { + char vstr_paddr[16]; + char vstr_pdata[16]; + + /* retrieve the patch pairs + * from tlen/6; where 6 is + * sizeof(patch addr(2)) + + * sizeof(patch data(4)). + */ + patch_pair = tlen/6; + + for (j = 0; j < patch_pair; j++) { + snprintf(vstr_paddr, sizeof(vstr_paddr), + rstr_paddr, j); + snprintf(vstr_pdata, sizeof(vstr_pdata), + rstr_pdata, j); + + varbuf_append(&b, vstr_paddr, + (cis[i + (j*6) + 2] << 8) | + cis[i + (j*6) + 1]); + + varbuf_append(&b, vstr_pdata, + (cis[i + (j*6) + 6] << 24) | + (cis[i + (j*6) + 5] << 16) | + (cis[i + (j*6) + 4] << 8) | + cis[i + (j*6) + 3]); + } + break; + } + + case HNBU_BOARDREV: + if (tlen == 2) + varbuf_append(&b, vstr_boardrev, cis[i + 1]); + else + varbuf_append(&b, vstr_boardrev, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_BOARDFLAGS: + w32 = (cis[i + 2] << 8) + cis[i + 1]; + if (tlen >= 5) + w32 |= ((cis[i + 4] << 24) + (cis[i + 3] << 16)); + varbuf_append(&b, vstr_boardflags, w32); + + if (tlen >= 7) { + w32 = (cis[i + 6] << 8) + cis[i + 5]; + if (tlen >= 9) + w32 |= ((cis[i + 8] << 24) + + (cis[i + 7] << 16)); + varbuf_append(&b, vstr_boardflags2, w32); + } + if (tlen >= 11) { + w32 = (cis[i + 10] << 8) + cis[i + 9]; + if (tlen >= 13) + w32 |= ((cis[i + 12] << 24) + + (cis[i + 11] << 16)); + varbuf_append(&b, vstr_boardflags3, w32); + } + if (tlen >= 15) { + w32 = (cis[i + 14] << 8) + cis[i + 13]; + if (tlen >= 17) + w32 |= ((cis[i + 16] << 24) + + (cis[i + 15] << 16)); + varbuf_append(&b, vstr_boardflags4, w32); + } + if (tlen >= 19) { + w32 = (cis[i + 18] << 8) + cis[i + 17]; + if (tlen >= 21) + w32 |= ((cis[i + 20] << 24) + + (cis[i + 19] << 16)); + varbuf_append(&b, vstr_boardflags5, w32); + } + break; + + case HNBU_USBFS: + varbuf_append(&b, vstr_usbfs, cis[i + 1]); + break; + + case HNBU_BOARDTYPE: + varbuf_append(&b, vstr_boardtype, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_HNBUCIS: + /* + * what follows is a nonstandard HNBU CIS + * that lacks CISTPL_BRCM_HNBU tags + * + * skip 0xff (end of standard CIS) + * after this tuple + */ + tlen++; + standard_cis = FALSE; + break; + + case HNBU_USBEPNUM: + varbuf_append(&b, vstr_usbepnum, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_PATCH_AUTOINC: { + char vstr_paddr[16]; + char vstr_pdata[16]; + uint32 addr_inc; + uint8 pcnt; + + addr_inc = (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + (cis[i + 1]); + + pcnt = (tlen - 5)/4; + for (j = 0; j < pcnt; j++) { + snprintf(vstr_paddr, sizeof(vstr_paddr), + rstr_paddr, j + patch_pair); + snprintf(vstr_pdata, sizeof(vstr_pdata), + rstr_pdata, j + patch_pair); + + varbuf_append(&b, vstr_paddr, addr_inc); + varbuf_append(&b, vstr_pdata, + (cis[i + (j*4) + 8] << 24) | + (cis[i + (j*4) + 7] << 16) | + (cis[i + (j*4) + 6] << 8) | + cis[i + (j*4) + 5]); + addr_inc += 4; + } + patch_pair += pcnt; + break; + } + case HNBU_PATCH2: { + char vstr_paddr[16]; + char vstr_pdata[16]; + + /* retrieve the patch pairs + * from tlen/8; where 8 is + * sizeof(patch addr(4)) + + * sizeof(patch data(4)). + */ + patch_pair = tlen/8; + + for (j = 0; j < patch_pair; j++) { + snprintf(vstr_paddr, sizeof(vstr_paddr), + rstr_paddr, j); + snprintf(vstr_pdata, sizeof(vstr_pdata), + rstr_pdata, j); + + varbuf_append(&b, vstr_paddr, + (cis[i + (j*8) + 4] << 24) | + (cis[i + (j*8) + 3] << 16) | + (cis[i + (j*8) + 2] << 8) | + cis[i + (j*8) + 1]); + + varbuf_append(&b, vstr_pdata, + (cis[i + (j*8) + 8] << 24) | + (cis[i + (j*8) + 7] << 16) | + (cis[i + (j*8) + 6] << 8) | + cis[i + (j*8) + 5]); + } + break; + } + case HNBU_PATCH_AUTOINC8: { + char vstr_paddr[16]; + char vstr_pdatah[16]; + char vstr_pdatal[16]; + uint32 addr_inc; + uint8 pcnt; + + addr_inc = (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + (cis[i + 1]); + + pcnt = (tlen - 5)/8; + for (j = 0; j < pcnt; j++) { + snprintf(vstr_paddr, sizeof(vstr_paddr), + rstr_paddr, j + patch_pair); + snprintf(vstr_pdatah, sizeof(vstr_pdatah), + rstr_pdatah, j + patch_pair); + snprintf(vstr_pdatal, sizeof(vstr_pdatal), + rstr_pdatal, j + patch_pair); + + varbuf_append(&b, vstr_paddr, addr_inc); + varbuf_append(&b, vstr_pdatal, + (cis[i + (j*8) + 8] << 24) | + (cis[i + (j*8) + 7] << 16) | + (cis[i + (j*8) + 6] << 8) | + cis[i + (j*8) + 5]); + varbuf_append(&b, vstr_pdatah, + (cis[i + (j*8) + 12] << 24) | + (cis[i + (j*8) + 11] << 16) | + (cis[i + (j*8) + 10] << 8) | + cis[i + (j*8) + 9]); + addr_inc += 8; + } + patch_pair += pcnt; + break; + } + case HNBU_PATCH8: { + char vstr_paddr[16]; + char vstr_pdatah[16]; + char vstr_pdatal[16]; + + /* retrieve the patch pairs + * from tlen/8; where 8 is + * sizeof(patch addr(4)) + + * sizeof(patch data(4)). + */ + patch_pair = tlen/12; + + for (j = 0; j < patch_pair; j++) { + snprintf(vstr_paddr, sizeof(vstr_paddr), + rstr_paddr, j); + snprintf(vstr_pdatah, sizeof(vstr_pdatah), + rstr_pdatah, j); + snprintf(vstr_pdatal, sizeof(vstr_pdatal), + rstr_pdatal, j); + + varbuf_append(&b, vstr_paddr, + (cis[i + (j*12) + 4] << 24) | + (cis[i + (j*12) + 3] << 16) | + (cis[i + (j*12) + 2] << 8) | + cis[i + (j*12) + 1]); + + varbuf_append(&b, vstr_pdatal, + (cis[i + (j*12) + 8] << 24) | + (cis[i + (j*12) + 7] << 16) | + (cis[i + (j*12) + 6] << 8) | + cis[i + (j*12) + 5]); + + varbuf_append(&b, vstr_pdatah, + (cis[i + (j*12) + 12] << 24) | + (cis[i + (j*12) + 11] << 16) | + (cis[i + (j*12) + 10] << 8) | + cis[i + (j*12) + 9]); + } + break; + } + case HNBU_USBFLAGS: + varbuf_append(&b, vstr_usbflags, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + break; +#ifdef BCM_BOOTLOADER + case HNBU_MDIOEX_REGLIST: + case HNBU_MDIO_REGLIST: { + /* Format: addr (8 bits) | val (16 bits) */ + const uint8 msize = 3; + char mdiostr[24]; + const char *mdiodesc; + uint8 *st; + + mdiodesc = (cis[i] == HNBU_MDIO_REGLIST) ? + vstr_mdio : vstr_mdioex; + + ASSERT(((tlen - 1) % msize) == 0); + + st = &cis[i + 1]; /* start of reg list */ + for (j = 0; j < (tlen - 1); j += msize, st += msize) { + snprintf(mdiostr, sizeof(mdiostr), + mdiodesc, st[0]); + varbuf_append(&b, mdiostr, (st[2] << 8) | st[1]); + } + break; + } + case HNBU_BRMIN: + varbuf_append(&b, vstr_brmin, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + break; + + case HNBU_BRMAX: + varbuf_append(&b, vstr_brmax, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + break; +#endif /* BCM_BOOTLOADER */ + + case HNBU_RDLID: + varbuf_append(&b, vstr_rdlid, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_GCI_CCR: { + /* format: + * |0x80| <== brcm + * |len| <== variable, multiple of 5 + * |tup| <== tupletype + * |ccreg_ix0| <== ix of ccreg [1byte] + * |ccreg_val0| <== corr value [4bytes] + * --- + * Multiple registers are possible. for eg: we + * can specify reg_ix3val3 and reg_ix5val5, etc + */ + char vstr_gci_ccreg_entry[16]; + uint8 num_entries = 0; + + /* retrieve the index-value pairs + * from tlen/5; where 5 is + * sizeof(ccreg_ix(1)) + + * sizeof(ccreg_val(4)). + */ + num_entries = tlen/5; + + for (j = 0; j < num_entries; j++) { + snprintf(vstr_gci_ccreg_entry, + sizeof(vstr_gci_ccreg_entry), + rstr_gci_ccreg_entry, + cis[i + (j*5) + 1]); + + varbuf_append(&b, vstr_gci_ccreg_entry, + (cis[i + (j*5) + 5] << 24) | + (cis[i + (j*5) + 4] << 16) | + (cis[i + (j*5) + 3] << 8) | + cis[i + (j*5) + 2]); + } + break; + } + +#ifdef BCM_BOOTLOADER + case HNBU_RDLRNDIS: + varbuf_append(&b, vstr_rdlrndis, cis[i + 1]); + break; + + case HNBU_RDLRWU: + varbuf_append(&b, vstr_rdlrwu, cis[i + 1]); + break; + + case HNBU_RDLSN: + if (tlen >= 5) + varbuf_append(&b, vstr_rdlsn, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + else + varbuf_append(&b, vstr_rdlsn, + (cis[i + 2] << 8) | + cis[i + 1]); + break; + + case HNBU_PMUREGS: { + uint8 offset = 1, mode_addr, mode, addr; + const char *fmt; + + do { + mode_addr = cis[i+offset]; + + mode = (mode_addr & PMUREGS_MODE_MASK) + >> PMUREGS_MODE_SHIFT; + addr = mode_addr & PMUREGS_ADDR_MASK; + + switch (mode) { + case PMU_PLLREG_MODE: + fmt = vstr_pllreg; + break; + case PMU_CCREG_MODE: + fmt = vstr_ccreg; + break; + case PMU_VOLTREG_MODE: + fmt = vstr_regctrl; + break; + case PMU_RES_TIME_MODE: + fmt = vstr_time; + break; + case PMU_RESDEPEND_MODE: + fmt = vstr_depreg; + break; + default: + fmt = NULL; + break; + } + + if (fmt != NULL) { + varbuf_append(&b, fmt, addr, + (cis[i + offset + 4] << 24) | + (cis[i + offset + 3] << 16) | + (cis[i + offset + 2] << 8) | + cis[i + offset + 1]); + } + + offset += PMUREGS_TPL_SIZE; + } while (offset < tlen); + break; + } + + case HNBU_USBREGS: { + uint8 offset = 1, usb_reg; + const char *fmt; + + do { + usb_reg = cis[i+offset]; + + switch (usb_reg) { + case USB_DEV_CTRL_REG: + fmt = vstr_usbdevctrl; + break; + case HSIC_PHY_CTRL1_REG: + fmt = vstr_hsicphyctrl1; + break; + case HSIC_PHY_CTRL2_REG: + fmt = vstr_hsicphyctrl2; + break; + default: + fmt = NULL; + break; + } + + if (fmt != NULL) { + varbuf_append(&b, fmt, + (cis[i + offset + 4] << 24) | + (cis[i + offset + 3] << 16) | + (cis[i + offset + 2] << 8) | + cis[i + offset + 1]); + } + + offset += USBREGS_TPL_SIZE; + } while (offset < tlen); + break; + } + + case HNBU_USBRDY: + /* The first byte of this tuple indicate if the host + * needs to be informed about the readiness of + * the HSIC/USB for enumeration on which GPIO should + * the device assert this event. + */ + varbuf_append(&b, vstr_usbrdy, cis[i + 1]); + + /* The following fields in this OTP are optional. + * The remaining bytes will indicate the delay required + * before and/or after the ch_init(). The delay is defined + * using 16-bits of this the MSB(bit15 of 15:0) will be + * used indicate if the parameter is for Pre or Post delay. + */ + for (j = 2; j < USBRDY_MAXOTP_SIZE && j < tlen; + j += 2) { + uint16 usb_delay; + + usb_delay = cis[i + j] | (cis[i + j + 1] << 8); + + /* The bit-15 of the delay field will indicate the + * type of delay (pre or post). + */ + if (usb_delay & USBRDY_DLY_TYPE) { + varbuf_append(&b, vstr_usbpostdly, + (usb_delay & USBRDY_DLY_MASK)); + } else { + varbuf_append(&b, vstr_usbpredly, + (usb_delay & USBRDY_DLY_MASK)); + } + } + break; + + case HNBU_BLDR_TIMEOUT: + /* The Delay after USBConnect for timeout till dongle + * receives get_descriptor request. + */ + varbuf_append(&b, vstr_bldr_reset_timeout, + (cis[i + 1] | (cis[i + 2] << 8))); + break; + + case HNBU_MUXENAB: + varbuf_append(&b, vstr_muxenab, cis[i + 1]); + break; + case HNBU_PUBKEY: { + /* The public key is in binary format in OTP, + * convert to string format before appending + * buffer string. + * public key(12 bytes) + crc (1byte) = 129 + */ + unsigned char a[300]; + int k; + + for (k = 1, j = 0; k < 129; k++) + j += snprintf((char *)(a + j), + sizeof(a) - j, + "%02x", cis[i + k]); + + a[256] = 0; + + varbuf_append(&b, vstr_pubkey, a); + break; + } +#else + case HNBU_AA: + varbuf_append(&b, vstr_aa2g, cis[i + 1]); + if (tlen >= 3) + varbuf_append(&b, vstr_aa5g, cis[i + 2]); + break; + + case HNBU_AG: + varbuf_append(&b, vstr_ag, 0, cis[i + 1]); + if (tlen >= 3) + varbuf_append(&b, vstr_ag, 1, cis[i + 2]); + if (tlen >= 4) + varbuf_append(&b, vstr_ag, 2, cis[i + 3]); + if (tlen >= 5) + varbuf_append(&b, vstr_ag, 3, cis[i + 4]); + ag_init = TRUE; + break; + + case HNBU_ANT5G: + varbuf_append(&b, vstr_aa5g, cis[i + 1]); + varbuf_append(&b, vstr_ag, 1, cis[i + 2]); + break; + + case HNBU_CC: + ASSERT(sromrev == 1); + varbuf_append(&b, vstr_cc, cis[i + 1]); + break; + + case HNBU_PAPARMS: { + uint8 pa0_lo_offset = 0; + switch (tlen) { + case 2: + ASSERT(sromrev == 1); + varbuf_append(&b, vstr_pa0maxpwr, cis[i + 1]); + break; + /* case 16: + ASSERT(sromrev >= 11); + for (j = 0; j < 3; j++) { + varbuf_append(&b, vstr_pa0b_lo[j], + (cis[i + (j * 2) + 11] << 8) + + cis[i + (j * 2) + 10]); + } + FALLTHROUGH + */ + case 10: + case 16: + ASSERT(sromrev >= 2); + varbuf_append(&b, vstr_opo, cis[i + 9]); + if (tlen >= 13 && pa0_lo_offset == 0) + pa0_lo_offset = 9; + /* FALLTHROUGH */ + case 9: + case 15: + varbuf_append(&b, vstr_pa0maxpwr, cis[i + 8]); + if (tlen >= 13 && pa0_lo_offset == 0) + pa0_lo_offset = 8; + /* FALLTHROUGH */ + BCMDONGLECASE(8) + BCMDONGLECASE(14) + varbuf_append(&b, vstr_pa0itssit, cis[i + 7]); + varbuf_append(&b, vstr_maxp2ga, 0, cis[i + 7]); + if (tlen >= 13 && pa0_lo_offset == 0) + pa0_lo_offset = 7; + /* FALLTHROUGH */ + BCMDONGLECASE(7) + BCMDONGLECASE(13) + for (j = 0; j < 3; j++) { + varbuf_append(&b, vstr_pa0b[j], + (cis[i + (j * 2) + 2] << 8) + + cis[i + (j * 2) + 1]); + } + if (tlen >= 13 && pa0_lo_offset == 0) + pa0_lo_offset = 6; + + if (tlen >= 13 && pa0_lo_offset != 0) { + for (j = 0; j < 3; j++) { + varbuf_append(&b, vstr_pa0b_lo[j], + (cis[pa0_lo_offset+i+(j*2)+2]<<8)+ + cis[pa0_lo_offset+i+(j*2)+1]); + } + } + break; + default: + ASSERT((tlen == 2) || (tlen == 9) || (tlen == 10) || + (tlen == 15) || (tlen == 16)); + break; + } + break; + } + case HNBU_PAPARMS5G: + ASSERT((sromrev == 2) || (sromrev == 3)); + switch (tlen) { + case 23: + varbuf_append(&b, vstr_pa1himaxpwr, cis[i + 22]); + varbuf_append(&b, vstr_pa1lomaxpwr, cis[i + 21]); + varbuf_append(&b, vstr_pa1maxpwr, cis[i + 20]); + /* FALLTHROUGH */ + case 20: + varbuf_append(&b, vstr_pa1itssit, cis[i + 19]); + /* FALLTHROUGH */ + case 19: + for (j = 0; j < 3; j++) { + varbuf_append(&b, vstr_pa1b[j], + (cis[i + (j * 2) + 2] << 8) + + cis[i + (j * 2) + 1]); + } + for (j = 3; j < 6; j++) { + varbuf_append(&b, vstr_pa1lob[j - 3], + (cis[i + (j * 2) + 2] << 8) + + cis[i + (j * 2) + 1]); + } + for (j = 6; j < 9; j++) { + varbuf_append(&b, vstr_pa1hib[j - 6], + (cis[i + (j * 2) + 2] << 8) + + cis[i + (j * 2) + 1]); + } + break; + default: + ASSERT((tlen == 19) || + (tlen == 20) || (tlen == 23)); + break; + } + break; + + case HNBU_OEM: + ASSERT(sromrev == 1); + varbuf_append(&b, vstr_oem, + cis[i + 1], cis[i + 2], + cis[i + 3], cis[i + 4], + cis[i + 5], cis[i + 6], + cis[i + 7], cis[i + 8]); + break; + + case HNBU_CCODE: + ASSERT(sromrev > 1); + if ((cis[i + 1] == 0) || (cis[i + 2] == 0)) + varbuf_append(&b, vstr_noccode); + else + varbuf_append(&b, vstr_ccode, + cis[i + 1], cis[i + 2]); + varbuf_append(&b, vstr_cctl, cis[i + 3]); + break; + + case HNBU_CCKPO: + ASSERT(sromrev > 2); + varbuf_append(&b, vstr_cckpo, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_OFDMPO: + ASSERT(sromrev > 2); + varbuf_append(&b, vstr_ofdmpo, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + break; + + case HNBU_WPS: + varbuf_append(&b, vstr_wpsgpio, cis[i + 1]); + if (tlen >= 3) + varbuf_append(&b, vstr_wpsled, cis[i + 2]); + break; + + case HNBU_RSSISMBXA2G: + ASSERT(sromrev == 3); + varbuf_append(&b, vstr_rssismf2g, cis[i + 1] & 0xf); + varbuf_append(&b, vstr_rssismc2g, (cis[i + 1] >> 4) & 0xf); + varbuf_append(&b, vstr_rssisav2g, cis[i + 2] & 0x7); + varbuf_append(&b, vstr_bxa2g, (cis[i + 2] >> 3) & 0x3); + break; + + case HNBU_RSSISMBXA5G: + ASSERT(sromrev == 3); + varbuf_append(&b, vstr_rssismf5g, cis[i + 1] & 0xf); + varbuf_append(&b, vstr_rssismc5g, (cis[i + 1] >> 4) & 0xf); + varbuf_append(&b, vstr_rssisav5g, cis[i + 2] & 0x7); + varbuf_append(&b, vstr_bxa5g, (cis[i + 2] >> 3) & 0x3); + break; + + case HNBU_TRI2G: + ASSERT(sromrev == 3); + varbuf_append(&b, vstr_tri2g, cis[i + 1]); + break; + + case HNBU_TRI5G: + ASSERT(sromrev == 3); + varbuf_append(&b, vstr_tri5gl, cis[i + 1]); + varbuf_append(&b, vstr_tri5g, cis[i + 2]); + varbuf_append(&b, vstr_tri5gh, cis[i + 3]); + break; + + case HNBU_RXPO2G: + ASSERT(sromrev == 3); + varbuf_append(&b, vstr_rxpo2g, cis[i + 1]); + break; + + case HNBU_RXPO5G: + ASSERT(sromrev == 3); + varbuf_append(&b, vstr_rxpo5g, cis[i + 1]); + break; + + case HNBU_MACADDR: + if (!(ETHER_ISNULLADDR(&cis[i+1])) && + !(ETHER_ISMULTI(&cis[i+1]))) { + bcm_ether_ntoa((struct ether_addr *)&cis[i + 1], + eabuf); + + /* set boardnum if HNBU_BOARDNUM not seen yet */ + if (boardnum == -1) + boardnum = (cis[i + 5] << 8) + cis[i + 6]; + } + break; + + case HNBU_CHAINSWITCH: + varbuf_append(&b, vstr_txchain, cis[i + 1]); + varbuf_append(&b, vstr_rxchain, cis[i + 2]); + varbuf_append(&b, vstr_antswitch, + (cis[i + 4] << 8) + cis[i + 3]); + break; + + case HNBU_ELNA2G: + varbuf_append(&b, vstr_elna2g, cis[i + 1]); + break; + + case HNBU_ELNA5G: + varbuf_append(&b, vstr_elna5g, cis[i + 1]); + break; + + case HNBU_REGREV: + varbuf_append(&b, vstr_regrev, + srom_data2value(&cis[i + 1], tlen - 1)); + break; + + case HNBU_FEM: { + uint16 fem = (cis[i + 2] << 8) + cis[i + 1]; + varbuf_append(&b, vstr_antswctl2g, (fem & + SROM8_FEM_ANTSWLUT_MASK) >> + SROM8_FEM_ANTSWLUT_SHIFT); + varbuf_append(&b, vstr_triso2g, (fem & + SROM8_FEM_TR_ISO_MASK) >> + SROM8_FEM_TR_ISO_SHIFT); + varbuf_append(&b, vstr_pdetrange2g, (fem & + SROM8_FEM_PDET_RANGE_MASK) >> + SROM8_FEM_PDET_RANGE_SHIFT); + varbuf_append(&b, vstr_extpagain2g, (fem & + SROM8_FEM_EXTPA_GAIN_MASK) >> + SROM8_FEM_EXTPA_GAIN_SHIFT); + varbuf_append(&b, vstr_tssipos2g, (fem & + SROM8_FEM_TSSIPOS_MASK) >> + SROM8_FEM_TSSIPOS_SHIFT); + if (tlen < 5) break; + + fem = (cis[i + 4] << 8) + cis[i + 3]; + varbuf_append(&b, vstr_antswctl5g, (fem & + SROM8_FEM_ANTSWLUT_MASK) >> + SROM8_FEM_ANTSWLUT_SHIFT); + varbuf_append(&b, vstr_triso5g, (fem & + SROM8_FEM_TR_ISO_MASK) >> + SROM8_FEM_TR_ISO_SHIFT); + varbuf_append(&b, vstr_pdetrange5g, (fem & + SROM8_FEM_PDET_RANGE_MASK) >> + SROM8_FEM_PDET_RANGE_SHIFT); + varbuf_append(&b, vstr_extpagain5g, (fem & + SROM8_FEM_EXTPA_GAIN_MASK) >> + SROM8_FEM_EXTPA_GAIN_SHIFT); + varbuf_append(&b, vstr_tssipos5g, (fem & + SROM8_FEM_TSSIPOS_MASK) >> + SROM8_FEM_TSSIPOS_SHIFT); + break; + } + + case HNBU_PAPARMS_C0: + varbuf_append(&b, vstr_maxp2ga, 0, cis[i + 1]); + varbuf_append(&b, vstr_itt2ga0, cis[i + 2]); + varbuf_append(&b, vstr_pa, 2, 0, 0, + (cis[i + 4] << 8) + cis[i + 3]); + varbuf_append(&b, vstr_pa, 2, 1, 0, + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_pa, 2, 2, 0, + (cis[i + 8] << 8) + cis[i + 7]); + if (tlen < 31) break; + + varbuf_append(&b, vstr_maxp5ga0, cis[i + 9]); + varbuf_append(&b, vstr_itt5ga0, cis[i + 10]); + varbuf_append(&b, vstr_maxp5gha0, cis[i + 11]); + varbuf_append(&b, vstr_maxp5gla0, cis[i + 12]); + varbuf_append(&b, vstr_pa, 5, 0, 0, + (cis[i + 14] << 8) + cis[i + 13]); + varbuf_append(&b, vstr_pa, 5, 1, 0, + (cis[i + 16] << 8) + cis[i + 15]); + varbuf_append(&b, vstr_pa, 5, 2, 0, + (cis[i + 18] << 8) + cis[i + 17]); + varbuf_append(&b, vstr_pahl, 5, 'l', 0, 0, + (cis[i + 20] << 8) + cis[i + 19]); + varbuf_append(&b, vstr_pahl, 5, 'l', 1, 0, + (cis[i + 22] << 8) + cis[i + 21]); + varbuf_append(&b, vstr_pahl, 5, 'l', 2, 0, + (cis[i + 24] << 8) + cis[i + 23]); + varbuf_append(&b, vstr_pahl, 5, 'h', 0, 0, + (cis[i + 26] << 8) + cis[i + 25]); + varbuf_append(&b, vstr_pahl, 5, 'h', 1, 0, + (cis[i + 28] << 8) + cis[i + 27]); + varbuf_append(&b, vstr_pahl, 5, 'h', 2, 0, + (cis[i + 30] << 8) + cis[i + 29]); + break; + + case HNBU_PAPARMS_C1: + varbuf_append(&b, vstr_maxp2ga, 1, cis[i + 1]); + varbuf_append(&b, vstr_itt2ga1, cis[i + 2]); + varbuf_append(&b, vstr_pa, 2, 0, 1, + (cis[i + 4] << 8) + cis[i + 3]); + varbuf_append(&b, vstr_pa, 2, 1, 1, + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_pa, 2, 2, 1, + (cis[i + 8] << 8) + cis[i + 7]); + if (tlen < 31) break; + + varbuf_append(&b, vstr_maxp5ga1, cis[i + 9]); + varbuf_append(&b, vstr_itt5ga1, cis[i + 10]); + varbuf_append(&b, vstr_maxp5gha1, cis[i + 11]); + varbuf_append(&b, vstr_maxp5gla1, cis[i + 12]); + varbuf_append(&b, vstr_pa, 5, 0, 1, + (cis[i + 14] << 8) + cis[i + 13]); + varbuf_append(&b, vstr_pa, 5, 1, 1, + (cis[i + 16] << 8) + cis[i + 15]); + varbuf_append(&b, vstr_pa, 5, 2, 1, + (cis[i + 18] << 8) + cis[i + 17]); + varbuf_append(&b, vstr_pahl, 5, 'l', 0, 1, + (cis[i + 20] << 8) + cis[i + 19]); + varbuf_append(&b, vstr_pahl, 5, 'l', 1, 1, + (cis[i + 22] << 8) + cis[i + 21]); + varbuf_append(&b, vstr_pahl, 5, 'l', 2, 1, + (cis[i + 24] << 8) + cis[i + 23]); + varbuf_append(&b, vstr_pahl, 5, 'h', 0, 1, + (cis[i + 26] << 8) + cis[i + 25]); + varbuf_append(&b, vstr_pahl, 5, 'h', 1, 1, + (cis[i + 28] << 8) + cis[i + 27]); + varbuf_append(&b, vstr_pahl, 5, 'h', 2, 1, + (cis[i + 30] << 8) + cis[i + 29]); + break; + + case HNBU_PO_CCKOFDM: + varbuf_append(&b, vstr_cck2gpo, + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_ofdm2gpo, + (cis[i + 6] << 24) + (cis[i + 5] << 16) + + (cis[i + 4] << 8) + cis[i + 3]); + if (tlen < 19) break; + + varbuf_append(&b, vstr_ofdm5gpo, + (cis[i + 10] << 24) + (cis[i + 9] << 16) + + (cis[i + 8] << 8) + cis[i + 7]); + varbuf_append(&b, vstr_ofdm5glpo, + (cis[i + 14] << 24) + (cis[i + 13] << 16) + + (cis[i + 12] << 8) + cis[i + 11]); + varbuf_append(&b, vstr_ofdm5ghpo, + (cis[i + 18] << 24) + (cis[i + 17] << 16) + + (cis[i + 16] << 8) + cis[i + 15]); + break; + + case HNBU_PO_MCS2G: + for (j = 0; j <= (tlen/2); j++) { + varbuf_append(&b, vstr_mcspo, 2, j, + (cis[i + 2 + 2*j] << 8) + cis[i + 1 + 2*j]); + } + break; + + case HNBU_PO_MCS5GM: + for (j = 0; j <= (tlen/2); j++) { + varbuf_append(&b, vstr_mcspo, 5, j, + (cis[i + 2 + 2*j] << 8) + cis[i + 1 + 2*j]); + } + break; + + case HNBU_PO_MCS5GLH: + for (j = 0; j <= (tlen/4); j++) { + varbuf_append(&b, vstr_mcspohl, 5, 'l', j, + (cis[i + 2 + 2*j] << 8) + cis[i + 1 + 2*j]); + } + + for (j = 0; j <= (tlen/4); j++) { + varbuf_append(&b, vstr_mcspohl, 5, 'h', j, + (cis[i + ((tlen/2)+2) + 2*j] << 8) + + cis[i + ((tlen/2)+1) + 2*j]); + } + + break; + + case HNBU_PO_CDD: + varbuf_append(&b, vstr_cddpo, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_PO_STBC: + varbuf_append(&b, vstr_stbcpo, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_PO_40M: + varbuf_append(&b, vstr_bw40po, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_PO_40MDUP: + varbuf_append(&b, vstr_bwduppo, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_OFDMPO5G: + varbuf_append(&b, vstr_ofdm5gpo, + (cis[i + 4] << 24) + (cis[i + 3] << 16) + + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_ofdm5glpo, + (cis[i + 8] << 24) + (cis[i + 7] << 16) + + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_ofdm5ghpo, + (cis[i + 12] << 24) + (cis[i + 11] << 16) + + (cis[i + 10] << 8) + cis[i + 9]); + break; + /* Power per rate for SROM V9 */ + case HNBU_CCKBW202GPO: + varbuf_append(&b, vstr_cckbw202gpo[0], + ((cis[i + 2] << 8) + cis[i + 1])); + if (tlen > 4) + varbuf_append(&b, vstr_cckbw202gpo[1], + ((cis[i + 4] << 8) + cis[i + 3])); + if (tlen > 6) + varbuf_append(&b, vstr_cckbw202gpo[2], + ((cis[i + 6] << 8) + cis[i + 5])); + break; + + case HNBU_LEGOFDMBW202GPO: + varbuf_append(&b, vstr_legofdmbw202gpo[0], + ((cis[i + 4] << 24) + (cis[i + 3] << 16) + + (cis[i + 2] << 8) + cis[i + 1])); + if (tlen > 6) { + varbuf_append(&b, vstr_legofdmbw202gpo[1], + ((cis[i + 8] << 24) + (cis[i + 7] << 16) + + (cis[i + 6] << 8) + cis[i + 5])); + } + break; + + case HNBU_LEGOFDMBW205GPO: + for (j = 0; j < 6; j++) { + if (tlen < (2 + 4 * j)) + break; + varbuf_append(&b, vstr_legofdmbw205gpo[j], + ((cis[4 * j + i + 4] << 24) + + (cis[4 * j + i + 3] << 16) + + (cis[4 * j + i + 2] << 8) + + cis[4 * j + i + 1])); + } + break; + + case HNBU_MCS2GPO: + for (j = 0; j < 4; j++) { + if (tlen < (2 + 4 * j)) + break; + varbuf_append(&b, vstr_mcs2gpo[j], + ((cis[4 * j + i + 4] << 24) + + (cis[4 * j + i + 3] << 16) + + (cis[4 * j + i + 2] << 8) + + cis[4 * j + i + 1])); + } + break; + + case HNBU_MCS5GLPO: + for (j = 0; j < 3; j++) { + if (tlen < (2 + 4 * j)) + break; + varbuf_append(&b, vstr_mcs5glpo[j], + ((cis[4 * j + i + 4] << 24) + + (cis[4 * j + i + 3] << 16) + + (cis[4 * j + i + 2] << 8) + + cis[4 * j + i + 1])); + } + break; + + case HNBU_MCS5GMPO: + for (j = 0; j < 3; j++) { + if (tlen < (2 + 4 * j)) + break; + varbuf_append(&b, vstr_mcs5gmpo[j], + ((cis[4 * j + i + 4] << 24) + + (cis[4 * j + i + 3] << 16) + + (cis[4 * j + i + 2] << 8) + + cis[4 * j + i + 1])); + } + break; + + case HNBU_MCS5GHPO: + for (j = 0; j < 3; j++) { + if (tlen < (2 + 4 * j)) + break; + varbuf_append(&b, vstr_mcs5ghpo[j], + ((cis[4 * j + i + 4] << 24) + + (cis[4 * j + i + 3] << 16) + + (cis[4 * j + i + 2] << 8) + + cis[4 * j + i + 1])); + } + break; + + case HNBU_MCS32PO: + varbuf_append(&b, vstr_mcs32po, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_LEG40DUPPO: + varbuf_append(&b, vstr_legofdm40duppo, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_CUSTOM1: + varbuf_append(&b, vstr_custom, 1, ((cis[i + 4] << 24) + + (cis[i + 3] << 16) + (cis[i + 2] << 8) + + cis[i + 1])); + break; + +#if defined(BCMSDIO) || defined(BCMCCISSR3) + case HNBU_SROM3SWRGN: + if (tlen >= 73) { + uint16 srom[35]; + uint8 srev = cis[i + 1 + 70]; + ASSERT(srev == 3); + /* make tuple value 16-bit aligned and parse it */ + bcopy(&cis[i + 1], srom, sizeof(srom)); + _initvars_srom_pci(srev, srom, SROM3_SWRGN_OFF, &b); + /* 2.4G antenna gain is included in SROM */ + ag_init = TRUE; + /* Ethernet MAC address is included in SROM */ + eabuf[0] = 0; + /* why boardnum is not -1? */ + boardnum = -1; + } + /* create extra variables */ + if (tlen >= 75) + varbuf_append(&b, vstr_vendid, + (cis[i + 1 + 73] << 8) + + cis[i + 1 + 72]); + if (tlen >= 77) + varbuf_append(&b, vstr_devid, + (cis[i + 1 + 75] << 8) + + cis[i + 1 + 74]); + if (tlen >= 79) + varbuf_append(&b, vstr_xtalfreq, + (cis[i + 1 + 77] << 8) + + cis[i + 1 + 76]); + break; +#endif /* BCMSDIO || BCMCCISSR3 */ + + case HNBU_CCKFILTTYPE: + varbuf_append(&b, vstr_cckdigfilttype, + (cis[i + 1])); + break; + + case HNBU_TEMPTHRESH: + varbuf_append(&b, vstr_tempthresh, + (cis[i + 1])); + /* period in msb nibble */ + varbuf_append(&b, vstr_temps_period, + (cis[i + 2] & SROM11_TEMPS_PERIOD_MASK) >> + SROM11_TEMPS_PERIOD_SHIFT); + /* hysterisis in lsb nibble */ + varbuf_append(&b, vstr_temps_hysteresis, + (cis[i + 2] & SROM11_TEMPS_HYSTERESIS_MASK) >> + SROM11_TEMPS_HYSTERESIS_SHIFT); + if (tlen >= 4) { + varbuf_append(&b, vstr_tempoffset, + (cis[i + 3])); + varbuf_append(&b, vstr_tempsense_slope, + (cis[i + 4])); + varbuf_append(&b, vstr_temp_corrx, + (cis[i + 5] & SROM11_TEMPCORRX_MASK) >> + SROM11_TEMPCORRX_SHIFT); + varbuf_append(&b, vstr_tempsense_option, + (cis[i + 5] & SROM11_TEMPSENSE_OPTION_MASK) >> + SROM11_TEMPSENSE_OPTION_SHIFT); + varbuf_append(&b, vstr_phycal_tempdelta, + (cis[i + 6])); + } + break; + + case HNBU_FEM_CFG: { + /* fem_cfg1 */ + uint16 fem_cfg = (cis[i + 2] << 8) + cis[i + 1]; + varbuf_append(&b, vstr_femctrl, + (fem_cfg & SROM11_FEMCTRL_MASK) >> + SROM11_FEMCTRL_SHIFT); + varbuf_append(&b, vstr_papdcap, 2, + (fem_cfg & SROM11_PAPDCAP_MASK) >> + SROM11_PAPDCAP_SHIFT); + varbuf_append(&b, vstr_tworangetssi, 2, + (fem_cfg & SROM11_TWORANGETSSI_MASK) >> + SROM11_TWORANGETSSI_SHIFT); + varbuf_append(&b, vstr_pdgaing, 2, + (fem_cfg & SROM11_PDGAIN_MASK) >> + SROM11_PDGAIN_SHIFT); + varbuf_append(&b, vstr_epagaing, 2, + (fem_cfg & SROM11_EPAGAIN_MASK) >> + SROM11_EPAGAIN_SHIFT); + varbuf_append(&b, vstr_tssiposslopeg, 2, + (fem_cfg & SROM11_TSSIPOSSLOPE_MASK) >> + SROM11_TSSIPOSSLOPE_SHIFT); + /* fem_cfg2 */ + fem_cfg = (cis[i + 4] << 8) + cis[i + 3]; + varbuf_append(&b, vstr_gainctrlsph, + (fem_cfg & SROM11_GAINCTRLSPH_MASK) >> + SROM11_GAINCTRLSPH_SHIFT); + varbuf_append(&b, vstr_papdcap, 5, + (fem_cfg & SROM11_PAPDCAP_MASK) >> + SROM11_PAPDCAP_SHIFT); + varbuf_append(&b, vstr_tworangetssi, 5, + (fem_cfg & SROM11_TWORANGETSSI_MASK) >> + SROM11_TWORANGETSSI_SHIFT); + varbuf_append(&b, vstr_pdgaing, 5, + (fem_cfg & SROM11_PDGAIN_MASK) >> + SROM11_PDGAIN_SHIFT); + varbuf_append(&b, vstr_epagaing, 5, + (fem_cfg & SROM11_EPAGAIN_MASK) >> + SROM11_EPAGAIN_SHIFT); + varbuf_append(&b, vstr_tssiposslopeg, 5, + (fem_cfg & SROM11_TSSIPOSSLOPE_MASK) >> + SROM11_TSSIPOSSLOPE_SHIFT); + break; + } + + case HNBU_ACPA_C0: { + const int a = 0; + +#ifndef OTP_SKIP_MAXP_PAPARAMS + varbuf_append(&b, vstr_subband5gver, + (cis[i + 2] << 8) + cis[i + 1]); + /* maxp2g */ + /* Decoupling this touple to program from NVRAM */ + varbuf_append(&b, vstr_maxp2ga, a, + (cis[i + 4] << 8) + cis[i + 3]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa2g */ + varbuf_append(&b, vstr_pa2ga, a, + (cis[i + 6] << 8) + cis[i + 5], + (cis[i + 8] << 8) + cis[i + 7], + (cis[i + 10] << 8) + cis[i + 9]); +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp5g */ + varbuf_append(&b, vstr_maxp5ga, a, + cis[i + 11], + cis[i + 12], + cis[i + 13], + cis[i + 14]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa5g */ + varbuf_append(&b, vstr_pa5ga, a, + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23], + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35], + (cis[i + 38] << 8) + cis[i + 37]); + break; + } + + case HNBU_ACPA_C1: { + const int a = 1; + +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp2g */ + /* Decoupling this touple to program from NVRAM */ + varbuf_append(&b, vstr_maxp2ga, a, + (cis[i + 2] << 8) + cis[i + 1]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa2g */ + varbuf_append(&b, vstr_pa2ga, a, + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5], + (cis[i + 8] << 8) + cis[i + 7]); +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp5g */ + varbuf_append(&b, vstr_maxp5ga, a, + cis[i + 9], + cis[i + 10], + cis[i + 11], + cis[i + 12]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa5g */ + varbuf_append(&b, vstr_pa5ga, a, + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23], + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35]); + break; + } + + case HNBU_ACPA_C2: { + const int a = 2; + +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp2g */ + /* Decoupling this touple to program from NVRAM */ + varbuf_append(&b, vstr_maxp2ga, a, + (cis[i + 2] << 8) + cis[i + 1]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa2g */ + varbuf_append(&b, vstr_pa2ga, a, + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5], + (cis[i + 8] << 8) + cis[i + 7]); +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp5g */ + varbuf_append(&b, vstr_maxp5ga, a, + cis[i + 9], + cis[i + 10], + cis[i + 11], + cis[i + 12]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa5g */ + varbuf_append(&b, vstr_pa5ga, a, + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23], + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35]); + break; + } + + case HNBU_MEAS_PWR: + varbuf_append(&b, vstr_measpower, cis[i + 1]); + varbuf_append(&b, vstr_measpowerX, 1, (cis[i + 2])); + varbuf_append(&b, vstr_measpowerX, 2, (cis[i + 3])); + varbuf_append(&b, vstr_rawtempsense, + ((cis[i + 5] & 0x1) << 8) + cis[i + 4]); + break; + + case HNBU_PDOFF: + varbuf_append(&b, vstr_pdoffsetma, 40, 0, + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_pdoffsetma, 40, 1, + (cis[i + 4] << 8) + cis[i + 3]); + varbuf_append(&b, vstr_pdoffsetma, 40, 2, + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_pdoffsetma, 80, 0, + (cis[i + 8] << 8) + cis[i + 7]); + varbuf_append(&b, vstr_pdoffsetma, 80, 1, + (cis[i + 10] << 8) + cis[i + 9]); + varbuf_append(&b, vstr_pdoffsetma, 80, 2, + (cis[i + 12] << 8) + cis[i + 11]); + break; + + case HNBU_ACPPR_2GPO: + varbuf_append(&b, vstr_dot11agofdmhrbw202gpo, + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_ofdmlrbw202gpo, + (cis[i + 4] << 8) + cis[i + 3]); + + if (tlen < 13) break; + varbuf_append(&b, vstr_sb20in40dot11agofdm2gpo, + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_sb20in80dot11agofdm2gpo, + (cis[i + 8] << 8) + cis[i + 7]); + varbuf_append(&b, vstr_sb20in40ofdmlrbw202gpo, + (cis[i + 10] << 8) + cis[i + 9]); + varbuf_append(&b, vstr_sb20in80ofdmlrbw202gpo, + (cis[i + 12] << 8) + cis[i + 11]); + break; + + case HNBU_ACPPR_5GPO: + varbuf_append(&b, vstr_mcsbw805gpo, 'l', + (cis[i + 4] << 24) + (cis[i + 3] << 16) + + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_mcsbw1605gpo, 'l', + (cis[i + 8] << 24) + (cis[i + 7] << 16) + + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_mcsbw805gpo, 'm', + (cis[i + 12] << 24) + (cis[i + 11] << 16) + + (cis[i + 10] << 8) + cis[i + 9]); + varbuf_append(&b, vstr_mcsbw1605gpo, 'm', + (cis[i + 16] << 24) + (cis[i + 15] << 16) + + (cis[i + 14] << 8) + cis[i + 13]); + varbuf_append(&b, vstr_mcsbw805gpo, 'h', + (cis[i + 20] << 24) + (cis[i + 19] << 16) + + (cis[i + 18] << 8) + cis[i + 17]); + varbuf_append(&b, vstr_mcsbw1605gpo, 'h', + (cis[i + 24] << 24) + (cis[i + 23] << 16) + + (cis[i + 22] << 8) + cis[i + 21]); + varbuf_append(&b, vstr_mcslr5gpo, 'l', + (cis[i + 26] << 8) + cis[i + 25]); + varbuf_append(&b, vstr_mcslr5gpo, 'm', + (cis[i + 28] << 8) + cis[i + 27]); + varbuf_append(&b, vstr_mcslr5gpo, 'h', + (cis[i + 30] << 8) + cis[i + 29]); + + if (tlen < 51) break; + varbuf_append(&b, vstr_mcsbw80p805gpo, 'l', + (cis[i + 34] << 24) + (cis[i + 33] << 16) + + (cis[i + 32] << 8) + cis[i + 31]); + varbuf_append(&b, vstr_mcsbw80p805gpo, 'm', + (cis[i + 38] << 24) + (cis[i + 37] << 16) + + (cis[i + 36] << 8) + cis[i + 35]); + varbuf_append(&b, vstr_mcsbw80p805gpo, 'h', + (cis[i + 42] << 24) + (cis[i + 41] << 16) + + (cis[i + 40] << 8) + cis[i + 39]); + varbuf_append(&b, vstr_mcsbw80p805g1po, 'x', + (cis[i + 46] << 24) + (cis[i + 45] << 16) + + (cis[i + 44] << 8) + cis[i + 43]); + varbuf_append(&b, vstr_mcslr5g1po, 'x', + (cis[i + 48] << 8) + cis[i + 47]); + varbuf_append(&b, vstr_mcslr5g80p80po, + (cis[i + 50] << 8) + cis[i + 49]); + varbuf_append(&b, vstr_mcsbw805g1po, 'x', + (cis[i + 54] << 24) + (cis[i + 53] << 16) + + (cis[i + 52] << 8) + cis[i + 51]); + varbuf_append(&b, vstr_mcsbw1605g1po, 'x', + (cis[i + 58] << 24) + (cis[i + 57] << 16) + + (cis[i + 56] << 8) + cis[i + 55]); + + break; + + case HNBU_MCS5Gx1PO: + varbuf_append(&b, vstr_mcsbw205g1po, 'x', + (cis[i + 4] << 24) + (cis[i + 3] << 16) + + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_mcsbw405g1po, 'x', + (cis[i + 8] << 24) + (cis[i + 7] << 16) + + (cis[i + 6] << 8) + cis[i + 5]); + break; + + case HNBU_ACPPR_SBPO: + varbuf_append(&b, vstr_sb20in40rpo, 'h', + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_sb20in80and160r5gpo, 'h', 'l', + (cis[i + 4] << 8) + cis[i + 3]); + varbuf_append(&b, vstr_sb40and80r5gpo, 'h', 'l', + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_sb20in80and160r5gpo, 'h', 'm', + (cis[i + 8] << 8) + cis[i + 7]); + varbuf_append(&b, vstr_sb40and80r5gpo, 'h', 'm', + (cis[i + 10] << 8) + cis[i + 9]); + varbuf_append(&b, vstr_sb20in80and160r5gpo, 'h', 'h', + (cis[i + 12] << 8) + cis[i + 11]); + varbuf_append(&b, vstr_sb40and80r5gpo, 'h', 'h', + (cis[i + 14] << 8) + cis[i + 13]); + varbuf_append(&b, vstr_sb20in40rpo, 'l', + (cis[i + 16] << 8) + cis[i + 15]); + varbuf_append(&b, vstr_sb20in80and160r5gpo, 'l', 'l', + (cis[i + 18] << 8) + cis[i + 17]); + varbuf_append(&b, vstr_sb40and80r5gpo, 'l', 'l', + (cis[i + 20] << 8) + cis[i + 19]); + varbuf_append(&b, vstr_sb20in80and160r5gpo, 'l', 'm', + (cis[i + 22] << 8) + cis[i + 21]); + varbuf_append(&b, vstr_sb40and80r5gpo, 'l', 'm', + (cis[i + 24] << 8) + cis[i + 23]); + varbuf_append(&b, vstr_sb20in80and160r5gpo, 'l', 'h', + (cis[i + 26] << 8) + cis[i + 25]); + varbuf_append(&b, vstr_sb40and80r5gpo, 'l', 'h', + (cis[i + 28] << 8) + cis[i + 27]); + varbuf_append(&b, vstr_dot11agduprpo, 'h', + (cis[i + 32] << 24) + (cis[i + 31] << 16) + + (cis[i + 30] << 8) + cis[i + 29]); + varbuf_append(&b, vstr_dot11agduprpo, 'l', + (cis[i + 36] << 24) + (cis[i + 35] << 16) + + (cis[i + 34] << 8) + cis[i + 33]); + + if (tlen < 49) break; + varbuf_append(&b, vstr_sb20in40and80rpo, 'h', + (cis[i + 38] << 8) + cis[i + 37]); + varbuf_append(&b, vstr_sb20in40and80rpo, 'l', + (cis[i + 40] << 8) + cis[i + 39]); + varbuf_append(&b, vstr_sb20in80and160r5g1po, 'h', 'x', + (cis[i + 42] << 8) + cis[i + 41]); + varbuf_append(&b, vstr_sb20in80and160r5g1po, 'l', 'x', + (cis[i + 44] << 8) + cis[i + 43]); + varbuf_append(&b, vstr_sb40and80r5g1po, 'h', 'x', + (cis[i + 46] << 8) + cis[i + 45]); + varbuf_append(&b, vstr_sb40and80r5g1po, 'l', 'x', + (cis[i + 48] << 8) + cis[i + 47]); + break; + + case HNBU_ACPPR_SB8080_PO: + varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'h', 'l', + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'l', 'l', + (cis[i + 4] << 8) + cis[i + 3]); + varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'h', 'm', + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'l', 'm', + (cis[i + 8] << 8) + cis[i + 7]); + varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'h', 'h', + (cis[i + 10] << 8) + cis[i + 9]); + varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'l', 'h', + (cis[i + 12] << 8) + cis[i + 11]); + varbuf_append(&b, vstr_sb2040and80in80p80r5g1po, 'h', 'x', + (cis[i + 14] << 8) + cis[i + 13]); + varbuf_append(&b, vstr_sb2040and80in80p80r5g1po, 'l', 'x', + (cis[i + 16] << 8) + cis[i + 15]); + varbuf_append(&b, vstr_sb20in80p80r5gpo, 'h', + (cis[i + 18] << 8) + cis[i + 17]); + varbuf_append(&b, vstr_sb20in80p80r5gpo, 'l', + (cis[i + 20] << 8) + cis[i + 19]); + varbuf_append(&b, vstr_dot11agduppo, + (cis[i + 22] << 8) + cis[i + 21]); + break; + + case HNBU_NOISELVL: + /* noiselvl2g */ + varbuf_append(&b, vstr_noiselvl2ga, 0, + (cis[i + 1] & 0x1f)); + varbuf_append(&b, vstr_noiselvl2ga, 1, + (cis[i + 2] & 0x1f)); + varbuf_append(&b, vstr_noiselvl2ga, 2, + (cis[i + 3] & 0x1f)); + /* noiselvl5g */ + varbuf_append(&b, vstr_noiselvl5ga, 0, + (cis[i + 4] & 0x1f), + (cis[i + 5] & 0x1f), + (cis[i + 6] & 0x1f), + (cis[i + 7] & 0x1f)); + varbuf_append(&b, vstr_noiselvl5ga, 1, + (cis[i + 8] & 0x1f), + (cis[i + 9] & 0x1f), + (cis[i + 10] & 0x1f), + (cis[i + 11] & 0x1f)); + varbuf_append(&b, vstr_noiselvl5ga, 2, + (cis[i + 12] & 0x1f), + (cis[i + 13] & 0x1f), + (cis[i + 14] & 0x1f), + (cis[i + 15] & 0x1f)); + break; + + case HNBU_RXGAIN_ERR: + varbuf_append(&b, vstr_rxgainerr2ga, 0, + (cis[i + 1] & 0x3f)); + varbuf_append(&b, vstr_rxgainerr2ga, 1, + (cis[i + 2] & 0x1f)); + varbuf_append(&b, vstr_rxgainerr2ga, 2, + (cis[i + 3] & 0x1f)); + varbuf_append(&b, vstr_rxgainerr5ga, 0, + (cis[i + 4] & 0x3f), + (cis[i + 5] & 0x3f), + (cis[i + 6] & 0x3f), + (cis[i + 7] & 0x3f)); + varbuf_append(&b, vstr_rxgainerr5ga, 1, + (cis[i + 8] & 0x1f), + (cis[i + 9] & 0x1f), + (cis[i + 10] & 0x1f), + (cis[i + 11] & 0x1f)); + varbuf_append(&b, vstr_rxgainerr5ga, 2, + (cis[i + 12] & 0x1f), + (cis[i + 13] & 0x1f), + (cis[i + 14] & 0x1f), + (cis[i + 15] & 0x1f)); + break; + + case HNBU_AGBGA: + varbuf_append(&b, vstr_agbg, 0, cis[i + 1]); + varbuf_append(&b, vstr_agbg, 1, cis[i + 2]); + varbuf_append(&b, vstr_agbg, 2, cis[i + 3]); + varbuf_append(&b, vstr_aga, 0, cis[i + 4]); + varbuf_append(&b, vstr_aga, 1, cis[i + 5]); + varbuf_append(&b, vstr_aga, 2, cis[i + 6]); + break; + + case HNBU_ACRXGAINS_C0: { + int a = 0; + + /* rxgains */ + uint16 rxgains = (cis[i + 2] << 8) + cis[i + 1]; + varbuf_append(&b, vstr_rxgainsgtrelnabypa, 5, a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrisoa, 5, a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgelnagaina, 5, a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrelnabypa, 2, a, + (rxgains & SROM11_RXGAINS2GTRELNABYPA_MASK) >> + SROM11_RXGAINS2GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrisoa, 2, a, + (rxgains & SROM11_RXGAINS2GTRISOA_MASK) >> + SROM11_RXGAINS2GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgelnagaina, 2, a, + (rxgains & SROM11_RXGAINS2GELNAGAINA_MASK) >> + SROM11_RXGAINS2GELNAGAINA_SHIFT); + /* rxgains1 */ + rxgains = (cis[i + 4] << 8) + cis[i + 3]; + varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + break; + } + + case HNBU_ACRXGAINS_C1: { + int a = 1; + + /* rxgains */ + uint16 rxgains = (cis[i + 2] << 8) + cis[i + 1]; + varbuf_append(&b, vstr_rxgainsgtrelnabypa, 5, a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrisoa, 5, a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgelnagaina, 5, a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrelnabypa, 2, a, + (rxgains & SROM11_RXGAINS2GTRELNABYPA_MASK) >> + SROM11_RXGAINS2GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrisoa, 2, a, + (rxgains & SROM11_RXGAINS2GTRISOA_MASK) >> + SROM11_RXGAINS2GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgelnagaina, 2, a, + (rxgains & SROM11_RXGAINS2GELNAGAINA_MASK) >> + SROM11_RXGAINS2GELNAGAINA_SHIFT); + /* rxgains1 */ + rxgains = (cis[i + 4] << 8) + cis[i + 3]; + varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + break; + } + + case HNBU_ACRXGAINS_C2: { + int a = 2; + + /* rxgains */ + uint16 rxgains = (cis[i + 2] << 8) + cis[i + 1]; + varbuf_append(&b, vstr_rxgainsgtrelnabypa, 5, a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrisoa, 5, a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgelnagaina, 5, a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrelnabypa, 2, a, + (rxgains & SROM11_RXGAINS2GTRELNABYPA_MASK) >> + SROM11_RXGAINS2GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgtrisoa, 2, a, + (rxgains & SROM11_RXGAINS2GTRISOA_MASK) >> + SROM11_RXGAINS2GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgelnagaina, 2, a, + (rxgains & SROM11_RXGAINS2GELNAGAINA_MASK) >> + SROM11_RXGAINS2GELNAGAINA_SHIFT); + /* rxgains1 */ + rxgains = (cis[i + 4] << 8) + cis[i + 3]; + varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'h', a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >> + SROM11_RXGAINS5GTRELNABYPA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >> + SROM11_RXGAINS5GTRISOA_SHIFT); + varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'm', a, + (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >> + SROM11_RXGAINS5GELNAGAINA_SHIFT); + break; + } + + case HNBU_TXDUTY: { + varbuf_append(&b, vstr_txduty_ofdm, 40, + (cis[i + 2] << 8) + cis[i + 1]); + varbuf_append(&b, vstr_txduty_thresh, 40, + (cis[i + 4] << 8) + cis[i + 3]); + varbuf_append(&b, vstr_txduty_ofdm, 80, + (cis[i + 6] << 8) + cis[i + 5]); + varbuf_append(&b, vstr_txduty_thresh, 80, + (cis[i + 8] << 8) + cis[i + 7]); + break; + } + + case HNBU_UUID: { + /* uuid format 12345678-1234-5678-1234-567812345678 */ + + char uuidstr[37]; /* 32 ids, 4 '-', 1 Null */ + + snprintf(uuidstr, sizeof(uuidstr), + rstr_uuidstr, + cis[i + 1], cis[i + 2], cis[i + 3], cis[i + 4], + cis[i + 5], cis[i + 6], cis[i + 7], cis[i + 8], + cis[i + 9], cis[i + 10], cis[i + 11], cis[i + 12], + cis[i + 13], cis[i + 14], cis[i + 15], cis[i + 16]); + + varbuf_append(&b, vstr_uuid, uuidstr); + break; + } + + case HNBU_WOWLGPIO: + varbuf_append(&b, vstr_wowlgpio, ((cis[i + 1]) & 0x7F)); + varbuf_append(&b, vstr_wowlgpiopol, + (((cis[i + 1]) >> 7) & 0x1)); + break; + +#endif /* !BCM_BOOTLOADER */ +#ifdef BCMUSBDEV_COMPOSITE + case HNBU_USBDESC_COMPOSITE: + varbuf_append(&b, vstr_usbdesc_composite, + (cis[i + 2] << 8) | cis[i + 1]); + break; +#endif /* BCMUSBDEV_COMPOSITE */ + case HNBU_USBUTMI_CTL: + varbuf_append(&b, vstr_usbutmi_ctl, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_UTMI_CTL0: + varbuf_append(&b, vstr_usbssphy_utmi_ctl0, + (cis[i + 4] << 24) | (cis[i + 3] << 16) | + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_UTMI_CTL1: + varbuf_append(&b, vstr_usbssphy_utmi_ctl1, + (cis[i + 4] << 24) | (cis[i + 3] << 16) | + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_UTMI_CTL2: + varbuf_append(&b, vstr_usbssphy_utmi_ctl2, + (cis[i + 4] << 24) | (cis[i + 3] << 16) | + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_SLEEP0: + varbuf_append(&b, vstr_usbssphy_sleep0, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_SLEEP1: + varbuf_append(&b, vstr_usbssphy_sleep1, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_SLEEP2: + varbuf_append(&b, vstr_usbssphy_sleep2, + (cis[i + 2] << 8) | cis[i + 1]); + break; + + case HNBU_USBSSPHY_SLEEP3: + varbuf_append(&b, vstr_usbssphy_sleep3, + (cis[i + 2] << 8) | cis[i + 1]); + break; + case HNBU_USBSSPHY_MDIO: { + uint8 setnum; + uint16 k; + + setnum = (cis[i + 1])/4; + if (setnum == 0) + break; + for (j = 0; j < setnum; j++) { + k = j*12; + varbuf_append(&b, vstr_usbssphy_mdio, j, + (cis[i+4+k]<<16) | (cis[i+3+k]<<8) | cis[i+2+k], + (cis[i+7+k]<<16) | (cis[i+6+k]<<8) | cis[i+5+k], + (cis[i+10+k]<<16) | (cis[i+9+k]<<8) | cis[i+8+k], + (cis[i+13+k]<<16) | (cis[i+12+k]<<8) | cis[i+11+k]); + } + break; + } + case HNBU_USB30PHY_NOSS: + varbuf_append(&b, vstr_usb30phy_noss, cis[i + 1]); + break; + case HNBU_USB30PHY_U1U2: + varbuf_append(&b, vstr_usb30phy_u1u2, cis[i + 1]); + break; + case HNBU_USB30PHY_REGS: + varbuf_append(&b, vstr_usb30phy_regs, 0, + cis[i+4]|cis[i+3]|cis[i+2]|cis[i+1], + cis[i+8]|cis[i+7]|cis[i+6]|cis[i+5], + cis[i+12]|cis[i+11]|cis[i+10]|cis[i+9], + cis[i+16]|cis[i+15]|cis[i+14]|cis[i+13]); + varbuf_append(&b, vstr_usb30phy_regs, 1, + cis[i+20]|cis[i+19]|cis[i+18]|cis[i+17], + cis[i+24]|cis[i+23]|cis[i+22]|cis[i+21], + cis[i+28]|cis[i+27]|cis[i+26]|cis[i+25], + cis[i+32]|cis[i+31]|cis[i+30]|cis[i+29]); + + break; + + case HNBU_PDOFF_2G: { + uint16 pdoff_2g = (cis[i + 2] << 8) + cis[i + 1]; + varbuf_append(&b, vstr_pdoffset2gma, 40, 0, + (pdoff_2g & SROM11_PDOFF_2G_40M_A0_MASK) >> + SROM11_PDOFF_2G_40M_A0_SHIFT); + varbuf_append(&b, vstr_pdoffset2gma, 40, 1, + (pdoff_2g & SROM11_PDOFF_2G_40M_A1_MASK) >> + SROM11_PDOFF_2G_40M_A1_SHIFT); + varbuf_append(&b, vstr_pdoffset2gma, 40, 2, + (pdoff_2g & SROM11_PDOFF_2G_40M_A2_MASK) >> + SROM11_PDOFF_2G_40M_A2_SHIFT); + varbuf_append(&b, vstr_pdoffset2gmvalid, 40, + (pdoff_2g & SROM11_PDOFF_2G_40M_VALID_MASK) >> + SROM11_PDOFF_2G_40M_VALID_SHIFT); + break; + } + + case HNBU_ACPA_CCK_C0: + varbuf_append(&b, vstr_pa2gccka, 0, + (cis[i + 2] << 8) + cis[i + 1], + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5]); + break; + + case HNBU_ACPA_CCK_C1: + varbuf_append(&b, vstr_pa2gccka, 1, + (cis[i + 2] << 8) + cis[i + 1], + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5]); + break; + + case HNBU_ACPA_40: + varbuf_append(&b, vstr_pa5gbw40a, 0, + (cis[i + 2] << 8) + cis[i + 1], + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5], + (cis[i + 8] << 8) + cis[i + 7], + (cis[i + 10] << 8) + cis[i + 9], + (cis[i + 12] << 8) + cis[i + 11], + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23]); + break; + + case HNBU_ACPA_80: + varbuf_append(&b, vstr_pa5gbw80a, 0, + (cis[i + 2] << 8) + cis[i + 1], + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5], + (cis[i + 8] << 8) + cis[i + 7], + (cis[i + 10] << 8) + cis[i + 9], + (cis[i + 12] << 8) + cis[i + 11], + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23]); + break; + + case HNBU_ACPA_4080: + varbuf_append(&b, vstr_pa5gbw4080a, 0, + (cis[i + 2] << 8) + cis[i + 1], + (cis[i + 4] << 8) + cis[i + 3], + (cis[i + 6] << 8) + cis[i + 5], + (cis[i + 8] << 8) + cis[i + 7], + (cis[i + 10] << 8) + cis[i + 9], + (cis[i + 12] << 8) + cis[i + 11], + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23]); + varbuf_append(&b, vstr_pa5gbw4080a, 1, + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35], + (cis[i + 38] << 8) + cis[i + 37], + (cis[i + 40] << 8) + cis[i + 39], + (cis[i + 42] << 8) + cis[i + 41], + (cis[i + 44] << 8) + cis[i + 43], + (cis[i + 46] << 8) + cis[i + 45], + (cis[i + 48] << 8) + cis[i + 47]); + break; + + case HNBU_ACPA_4X4C0: + case HNBU_ACPA_4X4C1: + case HNBU_ACPA_4X4C2: + case HNBU_ACPA_4X4C3: { + int core_num = 0; + uint8 tuple = cis[i]; + + if (tuple == HNBU_ACPA_4X4C1) { + core_num = 1; + } else if (tuple == HNBU_ACPA_4X4C2) { + core_num = 2; + } else if (tuple == HNBU_ACPA_4X4C3) { + core_num = 3; + } + + varbuf_append(&b, vstr_maxp2ga, core_num, cis[i + 1]); + /* pa2g */ + varbuf_append(&b, vstr_sr13pa2ga, core_num, + (cis[i + 3] << 8) + cis[i + 2], + (cis[i + 5] << 8) + cis[i + 4], + (cis[i + 7] << 8) + cis[i + 6], + (cis[i + 9] << 8) + cis[i + 8]); + /* pa2g40 */ + varbuf_append(&b, vstr_pa2g40a, core_num, + (cis[i + 11] << 8) + cis[i + 10], + (cis[i + 13] << 8) + cis[i + 12], + (cis[i + 15] << 8) + cis[i + 14], + (cis[i + 17] << 8) + cis[i + 16]); + for (j = 0; j < 5; j++) { + varbuf_append(&b, vstr_maxp5gba, j, core_num, + cis[i + j + 18]); + } + break; + } + + case HNBU_ACPA_BW20_4X4C0: + case HNBU_ACPA_BW40_4X4C0: + case HNBU_ACPA_BW80_4X4C0: + case HNBU_ACPA_BW20_4X4C1: + case HNBU_ACPA_BW40_4X4C1: + case HNBU_ACPA_BW80_4X4C1: + case HNBU_ACPA_BW20_4X4C2: + case HNBU_ACPA_BW40_4X4C2: + case HNBU_ACPA_BW80_4X4C2: + case HNBU_ACPA_BW20_4X4C3: + case HNBU_ACPA_BW40_4X4C3: + case HNBU_ACPA_BW80_4X4C3: { + int k = 0; + char pabuf[140]; /* max: 20 '0x????'s + 19 ','s + 1 Null */ + int core_num = 0, buflen = 0; + uint8 tuple = cis[i]; + + if (tuple == HNBU_ACPA_BW20_4X4C1 || + tuple == HNBU_ACPA_BW40_4X4C1 || + tuple == HNBU_ACPA_BW80_4X4C1) { + core_num = 1; + } else if (tuple == HNBU_ACPA_BW20_4X4C2 || + tuple == HNBU_ACPA_BW40_4X4C2 || + tuple == HNBU_ACPA_BW80_4X4C2) { + core_num = 2; + } else if (tuple == HNBU_ACPA_BW20_4X4C3 || + tuple == HNBU_ACPA_BW40_4X4C3 || + tuple == HNBU_ACPA_BW80_4X4C3) { + core_num = 3; + } + + buflen = sizeof(pabuf); + for (j = 0; j < 20; j++) { /* cis[i+1] - cis[i+40] */ + k += snprintf(pabuf+k, buflen-k, rstr_hex, + ((cis[i + (2*j) + 2] << 8) + + cis[i + (2*j) + 1])); + if (j < 19) { + k += snprintf(pabuf+k, buflen-k, + ","); + } + } + + if (tuple == HNBU_ACPA_BW20_4X4C0 || + tuple == HNBU_ACPA_BW20_4X4C1 || + tuple == HNBU_ACPA_BW20_4X4C2 || + tuple == HNBU_ACPA_BW20_4X4C3) { + varbuf_append(&b, vstr_sr13pa5ga, core_num, pabuf); + } else { + int bw = 40; + + if (tuple == HNBU_ACPA_BW80_4X4C0 || + tuple == HNBU_ACPA_BW80_4X4C1 || + tuple == HNBU_ACPA_BW80_4X4C2 || + tuple == HNBU_ACPA_BW80_4X4C3) { + bw = 80; + } + varbuf_append(&b, vstr_sr13pa5gbwa, bw, + core_num, pabuf); + } + break; + } + + case HNBU_RSSI_DELTA_2G_B0: + case HNBU_RSSI_DELTA_2G_B1: + case HNBU_RSSI_DELTA_2G_B2: + case HNBU_RSSI_DELTA_2G_B3: + case HNBU_RSSI_DELTA_2G_B4: { + uint8 tuple = cis[i]; + uint8 grp; + if (tuple == HNBU_RSSI_DELTA_2G_B0) { + grp = 0; + } else if (tuple == HNBU_RSSI_DELTA_2G_B1) { + grp = 1; + } else if (tuple == HNBU_RSSI_DELTA_2G_B2) { + grp = 2; + } else if (tuple == HNBU_RSSI_DELTA_2G_B3) { + grp = 3; + } else { + grp = 4; + } + /* 2G Band Gourp = grp */ + varbuf_append(&b, vstr_rssidelta2g, grp, + cis[i + 1], cis[i + 2], + cis[i + 3], cis[i + 4], + cis[i + 5], cis[i + 6], + cis[i + 7], cis[i + 8], + cis[i + 9], cis[i + 10], + cis[i + 11], cis[i + 12], + cis[i + 13], cis[i + 14], + cis[i + 15], cis[i + 16]); + break; + } + + case HNBU_RSSI_CAL_FREQ_GRP_2G: + /* 2G Band Gourp Defintion */ + varbuf_append(&b, vstr_rssicalfrqg, + cis[i + 1], cis[i + 2], + cis[i + 3], cis[i + 4], + cis[i + 5], cis[i + 6], + cis[i + 7]); + break; + + case HNBU_RSSI_DELTA_5GL: + case HNBU_RSSI_DELTA_5GML: + case HNBU_RSSI_DELTA_5GMU: + case HNBU_RSSI_DELTA_5GH: { + uint8 tuple = cis[i]; + char *band[] = {"l", "ml", "mu", "h"}; + char *pband; + if (tuple == HNBU_RSSI_DELTA_5GL) { + pband = band[0]; + } else if (tuple == HNBU_RSSI_DELTA_5GML) { + pband = band[1]; + } else if (tuple == HNBU_RSSI_DELTA_5GMU) { + pband = band[2]; + } else { + pband = band[3]; + } + /* 5G Band = band */ + varbuf_append(&b, vstr_rssidelta5g, pband, + cis[i + 1], cis[i + 2], + cis[i + 3], cis[i + 4], + cis[i + 5], cis[i + 6], + cis[i + 7], cis[i + 8], + cis[i + 9], cis[i + 10], + cis[i + 11], cis[i + 12], + cis[i + 13], cis[i + 14], + cis[i + 15], cis[i + 16], + cis[i + 17], cis[i + 17], + cis[i + 19], cis[i + 20], + cis[i + 21], cis[i + 22], + cis[i + 9], cis[i + 24]); + break; + } + + case HNBU_ACPA_6G_C0: { + const int a = 0; +#ifndef OTP_SKIP_MAXP_PAPARAMS + varbuf_append(&b, vstr_subband6gver, + (cis[i + 2] << 8) + cis[i + 1]); + /* maxp5g */ + varbuf_append(&b, vstr_maxp6ga, a, + cis[i + 3], + cis[i + 4], + cis[i + 5], + cis[i + 6], + cis[i + 7], + cis[i + 8]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa5g */ + varbuf_append(&b, vstr_pa6ga, a, + (cis[i + 10] << 8) + cis[i + 9], + (cis[i + 12] << 8) + cis[i + 11], + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23], + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35], + (cis[i + 38] << 8) + cis[i + 37], + (cis[i + 40] << 8) + cis[i + 39], + (cis[i + 42] << 8) + cis[i + 41], + (cis[i + 44] << 8) + cis[i + 43]); + break; + } + + case HNBU_ACPA_6G_C1: { + const int a = 1; +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp6g */ + varbuf_append(&b, vstr_maxp6ga, a, + cis[i + 1], + cis[i + 2], + cis[i + 3], + cis[i + 4], + cis[i + 5], + cis[i + 6]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa6g */ + varbuf_append(&b, vstr_pa6ga, a, + (cis[i + 8] << 8) + cis[i + 7], + (cis[i + 10] << 8) + cis[i + 9], + (cis[i + 12] << 8) + cis[i + 11], + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23], + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35], + (cis[i + 38] << 8) + cis[i + 37], + (cis[i + 40] << 8) + cis[i + 39], + (cis[i + 42] << 8) + cis[i + 41]); + break; + } + + case HNBU_ACPA_6G_C2: { + const int a = 2; +#ifndef OTP_SKIP_MAXP_PAPARAMS + /* maxp6g */ + varbuf_append(&b, vstr_maxp6ga, a, + cis[i + 1], + cis[i + 2], + cis[i + 3], + cis[i + 4], + cis[i + 5], + cis[i + 6]); +#endif /* OTP_SKIP_MAXP_PAPARAMS */ + /* pa6g */ + varbuf_append(&b, vstr_pa6ga, a, + (cis[i + 8] << 8) + cis[i + 7], + (cis[i + 10] << 8) + cis[i + 9], + (cis[i + 12] << 8) + cis[i + 11], + (cis[i + 14] << 8) + cis[i + 13], + (cis[i + 16] << 8) + cis[i + 15], + (cis[i + 18] << 8) + cis[i + 17], + (cis[i + 20] << 8) + cis[i + 19], + (cis[i + 22] << 8) + cis[i + 21], + (cis[i + 24] << 8) + cis[i + 23], + (cis[i + 26] << 8) + cis[i + 25], + (cis[i + 28] << 8) + cis[i + 27], + (cis[i + 30] << 8) + cis[i + 29], + (cis[i + 32] << 8) + cis[i + 31], + (cis[i + 34] << 8) + cis[i + 33], + (cis[i + 36] << 8) + cis[i + 35], + (cis[i + 38] << 8) + cis[i + 37], + (cis[i + 40] << 8) + cis[i + 39], + (cis[i + 42] << 8) + cis[i + 41]); + break; + } + + case HNBU_SUBBAND5GVER: + varbuf_append(&b, vstr_subband5gver, + (cis[i + 2] << 8) + cis[i + 1]); + break; + + case HNBU_PAPARAMBWVER: + varbuf_append(&b, vstr_paparambwver, cis[i + 1]); + break; + + case HNBU_TXBFRPCALS: + /* note: all 5 rpcal parameters are expected to be */ + /* inside one tuple record, i.e written with one */ + /* wl wrvar cmd as follows: */ + /* wl wrvar rpcal2g=0x1211 ... rpcal5gb3=0x0 */ + if (tlen != 11 ) { /* sanity check */ + BS_ERROR(("srom_parsecis:incorrect length:%d for" + " HNBU_TXBFRPCALS tuple\n", + tlen)); + break; + } + + varbuf_append(&b, vstr_paparamrpcalvars[0], + (cis[i + 1] + (cis[i + 2] << 8))); + varbuf_append(&b, vstr_paparamrpcalvars[1], + (cis[i + 3] + (cis[i + 4] << 8))); + varbuf_append(&b, vstr_paparamrpcalvars[2], + (cis[i + 5] + (cis[i + 6] << 8))); + varbuf_append(&b, vstr_paparamrpcalvars[3], + (cis[i + 7] + (cis[i + 8] << 8))); + varbuf_append(&b, vstr_paparamrpcalvars[4], + (cis[i + 9] + (cis[i + 10] << 8))); + break; + + case HNBU_GPIO_PULL_DOWN: + varbuf_append(&b, vstr_gpdn, + (cis[i + 4] << 24) | + (cis[i + 3] << 16) | + (cis[i + 2] << 8) | + cis[i + 1]); + break; + + case HNBU_MACADDR2: + if (!(ETHER_ISNULLADDR(&cis[i+1])) && + !(ETHER_ISMULTI(&cis[i+1]))) { + bcm_ether_ntoa((struct ether_addr *)&cis[i + 1], + eabuf2); + } + break; + } /* CISTPL_BRCM_HNBU */ + break; + } /* switch (tup) */ + + i += tlen; + } while (tup != CISTPL_END); + } + + if (boardnum != -1) { + varbuf_append(&b, vstr_boardnum, boardnum); + } + + if (eabuf[0]) { + varbuf_append(&b, vstr_macaddr, eabuf); + } + + if (eabuf2[0]) { + varbuf_append(&b, vstr_macaddr2, eabuf2); + } + +#ifndef BCM_BOOTLOADER + /* if there is no antenna gain field, set default */ + sromrev = (sromrev == 1u) ? (uint8)getintvar(NULL, rstr_sromrev) : sromrev; + if (sromrev <= 10u && getvar(NULL, rstr_ag0) == NULL && ag_init == FALSE) { + varbuf_append(&b, vstr_ag, 0, 0xff); + } +#endif + + /* final nullbyte terminator */ + ASSERT(b.size >= 1u); + *b.buf++ = '\0'; + + ASSERT((uint)(b.buf - base) <= var_cis_size); + + /* initvars_table() MALLOCs, copies and assigns the MALLOCed buffer to '*vars' */ + err = initvars_table(osh, base /* start */, b.buf /* end */, vars, count); + + MFREE(osh, base, var_cis_size); + return err; +} +#endif /* !defined(BCMDONGLEHOST) */ + +/** + * In chips with chipcommon rev 32 and later, the srom is in chipcommon, + * not in the bus cores. + */ +static uint16 +srom_cc_cmd(si_t *sih, osl_t *osh, volatile void *ccregs, uint32 cmd, uint wordoff, uint16 data) +{ + chipcregs_t *cc = ccregs; + uint wait_cnt = 1000; + uint32 byteoff = 0, sprom_size = 0; + + BCM_REFERENCE(sih); + byteoff = wordoff * 2; + + sprom_size = R_REG(osh, &cc->sromcontrol); + sprom_size = (sprom_size & SROM_SIZE_MASK) >> SROM_SIZE_SHFT_MASK; + if (sprom_size == SROM_SIZE_2K) + sprom_size = 2048; + else if (sprom_size == SROM_SIZE_512) + sprom_size = 512; + else if (sprom_size == SROM_SIZE_128) + sprom_size = 128; + if (byteoff >= sprom_size) + return 0xffff; + + if ((cmd == SRC_OP_READ) || (cmd == SRC_OP_WRITE)) { + if (sih->ccrev >= 59) + W_REG(osh, &cc->chipcontrol, (byteoff & SROM16K_BANK_SEL_MASK) >> + SROM16K_BANK_SHFT_MASK); + W_REG(osh, &cc->sromaddress, (byteoff & SROM16K_ADDR_SEL_MASK)); + if (cmd == SRC_OP_WRITE) + W_REG(osh, &cc->sromdata, data); + } + + W_REG(osh, &cc->sromcontrol, SRC_START | cmd); + + while (wait_cnt--) { + if ((R_REG(osh, &cc->sromcontrol) & SRC_BUSY) == 0) + break; + } + + if (!wait_cnt) { + BS_ERROR(("srom_cc_cmd: Command 0x%x timed out\n", cmd)); + return 0xffff; + } + if (cmd == SRC_OP_READ) + return (uint16)R_REG(osh, &cc->sromdata); + else + return 0xffff; +} + +#define CC_SROM_SHADOW_WSIZE 512 /* 0x800 - 0xC00 */ + +/** + * Read in and validate sprom. + * Return 0 on success, nonzero on error. + * Returns success on an SPROM containing only ones, unclear if this is intended. + */ +static int +sprom_read_pci(osl_t *osh, si_t *sih, volatile uint16 *sprom, uint wordoff, + uint16 *buf, uint nwords, bool check_crc) +{ + int err = 0; + uint i; + volatile void *ccregs = NULL; + chipcregs_t *cc = NULL; + uint32 ccval = 0, sprom_size = 0; + uint32 sprom_num_words; + + if (BCM43602_CHIP(sih->chip) || + (CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) { + /* save current control setting */ + ccval = si_chipcontrl_read(sih); + } + + if (BCM43602_CHIP(sih->chip) || + (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) && + (CHIPREV(sih->chiprev) <= 2))) { + si_chipcontrl_srom4360(sih, TRUE); + } + + if (FALSE) { + si_srom_clk_set(sih); /* corrects srom clock frequency */ + } + + ccregs = ((volatile uint8 *)sprom - CC_SROM_OTP); + cc = ccregs; + sprom_size = R_REG(osh, &cc->sromcontrol); + sprom_size = (sprom_size & SROM_SIZE_MASK) >> SROM_SIZE_SHFT_MASK; + if (sprom_size == SROM_SIZE_2K) + sprom_size = 2048; + else if (sprom_size == SROM_SIZE_512) + sprom_size = 512; + else if (sprom_size == SROM_SIZE_128) + sprom_size = 128; + sprom_num_words = sprom_size/2; + + /* read the sprom */ + for (i = 0; i < nwords; i++) { + if (sih->ccrev > 31 && ISSIM_ENAB(sih)) { + /* use indirect since direct is too slow on QT */ + if ((sih->cccaps & CC_CAP_SROM) == 0) { + err = 1; + goto error; + } + + /* hack to get ccregs */ + ccregs = (volatile void *)((volatile uint8 *)sprom - CC_SROM_OTP); + buf[i] = srom_cc_cmd(sih, osh, ccregs, SRC_OP_READ, wordoff + i, 0); + + } else { + /* Because of the slow emulation we need to read twice in QT */ + if (ISSIM_ENAB(sih)) { + buf[i] = R_REG(osh, &sprom[wordoff + i]); + } + + if ((wordoff + i) >= sprom_num_words) { + buf[i] = 0xffff; + } else if ((wordoff + i) >= CC_SROM_SHADOW_WSIZE) { + /* Srom shadow region in chipcommon is only 512 words + * use indirect access for Srom beyond 512 words + */ + buf[i] = srom_cc_cmd(sih, osh, ccregs, SRC_OP_READ, wordoff + i, 0); + } else { + buf[i] = R_REG(osh, &sprom[wordoff + i]); + } + } + if (i == SROM13_SIGN) { + if ((buf[SROM13_SIGN] != SROM13_SIGNATURE) && (nwords == SROM13_WORDS)) { + err = 1; + goto error; + } + } + } + + /* bypass crc checking for simulation to allow srom hack */ + if (ISSIM_ENAB(sih)) { + goto error; + } + + if (check_crc) { + + if (buf[0] == 0xffff) { + /* The hardware thinks that an srom that starts with 0xffff + * is blank, regardless of the rest of the content, so declare + * it bad. + */ + BS_ERROR(("sprom_read_pci: buf[0] = 0x%x, returning bad-crc\n", buf[0])); + err = 1; + goto error; + } + + /* fixup the endianness so crc8 will pass */ + htol16_buf(buf, nwords * 2); + if (hndcrc8((uint8 *)buf, nwords * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE) { + /* DBG only pci always read srom4 first, then srom8/9 */ + /* BS_ERROR(("sprom_read_pci: bad crc\n")); */ + err = 1; + } + /* now correct the endianness of the byte array */ + ltoh16_buf(buf, nwords * 2); + } + +error: + if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + BCM43602_CHIP(sih->chip)) { + /* Restore config after reading SROM */ + si_chipcontrl_restore(sih, ccval); + } + + return err; +} + +#if !defined(BCMDONGLEHOST) +#if defined(BCMNVRAMW) || defined(BCMNVRAMR) +static int +BCMSROMATTACHFN(otp_read_pci)(osl_t *osh, si_t *sih, uint16 *buf, uint bufsz) +{ + uint8 *otp; + uint sz = OTP_SZ_MAX/2; /* size in words */ + int err = 0; + + if (bufsz > OTP_SZ_MAX) { + return BCME_ERROR; + } + + /* freed in same function */ + if ((otp = MALLOC_NOPERSIST(osh, OTP_SZ_MAX)) == NULL) { + return BCME_ERROR; + } + + bzero(otp, OTP_SZ_MAX); + + err = otp_read_region(sih, OTP_HW_RGN, (uint16 *)otp, &sz); + + if (err) { + MFREE(osh, otp, OTP_SZ_MAX); + return err; + } + + bcopy(otp, buf, bufsz); + + /* Check CRC */ + if (((uint16 *)otp)[0] == 0xffff) { + /* The hardware thinks that an srom that starts with 0xffff + * is blank, regardless of the rest of the content, so declare + * it bad. + */ + BS_ERROR(("otp_read_pci: otp[0] = 0x%x, returning bad-crc\n", + ((uint16 *)otp)[0])); + MFREE(osh, otp, OTP_SZ_MAX); + return 1; + } + + /* fixup the endianness so crc8 will pass */ + htol16_buf(otp, OTP_SZ_MAX); + if (hndcrc8(otp, SROM4_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE && + hndcrc8(otp, SROM10_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE && + hndcrc8(otp, SROM11_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE && + hndcrc8(otp, SROM12_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE && + hndcrc8(otp, SROM13_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE) { + BS_ERROR(("otp_read_pci: bad crc\n")); + err = 1; + } + + MFREE(osh, otp, OTP_SZ_MAX); + + return err; +} +#endif /* defined(BCMNVRAMW) || defined(BCMNVRAMR) */ +#endif /* !defined(BCMDONGLEHOST) */ + +int +srom_otp_write_region_crc(si_t *sih, uint nbytes, uint16* buf16, bool write) +{ +#if defined(WLTEST) || defined(BCMDBG) + int err = 0, crc = 0; +#if !defined(BCMDONGLEHOST) + uint8 *buf8; + + /* Check nbytes is not odd or too big */ + if ((nbytes & 1) || (nbytes > SROM_MAX)) + return 1; + + /* block invalid buffer size */ + if (nbytes < SROM4_WORDS * 2) + return BCME_BUFTOOSHORT; + else if (nbytes > SROM13_WORDS * 2) + return BCME_BUFTOOLONG; + + /* Verify signatures */ + if (!((buf16[SROM4_SIGN] == SROM4_SIGNATURE) || + (buf16[SROM8_SIGN] == SROM4_SIGNATURE) || + (buf16[SROM10_SIGN] == SROM4_SIGNATURE) || + (buf16[SROM11_SIGN] == SROM11_SIGNATURE)|| + (buf16[SROM12_SIGN] == SROM12_SIGNATURE)|| + (buf16[SROM13_SIGN] == SROM13_SIGNATURE))) { + BS_ERROR(("srom_otp_write_region_crc: wrong signature SROM4_SIGN %x SROM8_SIGN %x" + " SROM10_SIGN %x\n", + buf16[SROM4_SIGN], buf16[SROM8_SIGN], buf16[SROM10_SIGN])); + return BCME_ERROR; + } + + /* Check CRC */ + if (buf16[0] == 0xffff) { + /* The hardware thinks that an srom that starts with 0xffff + * is blank, regardless of the rest of the content, so declare + * it bad. + */ + BS_ERROR(("srom_otp_write_region_crc: invalid buf16[0] = 0x%x\n", buf16[0])); + goto out; + } + + buf8 = (uint8*)buf16; + /* fixup the endianness and then calculate crc */ + htol16_buf(buf8, nbytes); + crc = ~hndcrc8(buf8, nbytes - 1, CRC8_INIT_VALUE); + /* now correct the endianness of the byte array */ + ltoh16_buf(buf8, nbytes); + + if (nbytes == SROM11_WORDS * 2) + buf16[SROM11_CRCREV] = (crc << 8) | (buf16[SROM11_CRCREV] & 0xff); + else if (nbytes == SROM12_WORDS * 2) + buf16[SROM12_CRCREV] = (crc << 8) | (buf16[SROM12_CRCREV] & 0xff); + else if (nbytes == SROM13_WORDS * 2) + buf16[SROM13_CRCREV] = (crc << 8) | (buf16[SROM13_CRCREV] & 0xff); + else if (nbytes == SROM10_WORDS * 2) + buf16[SROM10_CRCREV] = (crc << 8) | (buf16[SROM10_CRCREV] & 0xff); + else + buf16[SROM4_CRCREV] = (crc << 8) | (buf16[SROM4_CRCREV] & 0xff); + +#ifdef BCMNVRAMW + /* Write the CRC back */ + if (write) + err = otp_write_region(sih, OTP_HW_RGN, buf16, nbytes/2, 0); +#endif /* BCMNVRAMW */ + +out: +#endif /* !defined(BCMDONGLEHOST) */ + return write ? err : crc; +#else + BCM_REFERENCE(sih); + BCM_REFERENCE(nbytes); + BCM_REFERENCE(buf16); + BCM_REFERENCE(write); + return 0; +#endif /* WLTEST || BCMDBG */ + +} + +#if !defined(BCMDONGLEHOST) +int +BCMATTACHFN(dbushost_initvars_flash)(si_t *sih, osl_t *osh, char **base, uint len) +{ + return initvars_flash(sih, osh, base, len); +} + +/** + * Find variables with from flash. 'base' points to the beginning + * of the table upon enter and to the end of the table upon exit when success. + * Return 0 on success, nonzero on error. + */ +static int +BCMATTACHFN(initvars_flash)(si_t *sih, osl_t *osh, char **base, uint len) +{ + char *vp = *base; + char *flash; + int err; + char *s; + uint l, dl, copy_len; + char devpath[SI_DEVPATH_BUFSZ], devpath_pcie[SI_DEVPATH_BUFSZ]; + char coded_name[SI_DEVPATH_BUFSZ] = {0}; + int path_len, coded_len, devid_len, pcie_path_len; + + /* allocate memory and read in flash */ + /* freed in same function */ + if (!(flash = MALLOC_NOPERSIST(osh, MAX_NVRAM_SPACE))) + return BCME_NOMEM; + if ((err = nvram_getall(flash, MAX_NVRAM_SPACE))) + goto exit; + + /* create legacy devpath prefix */ + si_devpath(sih, devpath, sizeof(devpath)); + path_len = strlen(devpath); + + if (BUSTYPE(sih->bustype) == PCI_BUS) { + si_devpath_pcie(sih, devpath_pcie, sizeof(devpath_pcie)); + pcie_path_len = strlen(devpath_pcie); + } else + pcie_path_len = 0; + + /* create coded devpath prefix */ + si_coded_devpathvar(sih, coded_name, sizeof(coded_name), "devid"); + + /* coded_name now is 'xx:devid, eat ending 'devid' */ + /* to be 'xx:' */ + devid_len = strlen("devid"); + coded_len = strlen(coded_name); + if (coded_len > devid_len) { + coded_name[coded_len - devid_len] = '\0'; + coded_len -= devid_len; + } + else + coded_len = 0; + + /* grab vars with the prefix or previx in name */ + for (s = flash; s && *s; s += l + 1) { + l = strlen(s); + + /* skip non-matching variable */ + if (strncmp(s, devpath, path_len) == 0) + dl = path_len; + else if (pcie_path_len && strncmp(s, devpath_pcie, pcie_path_len) == 0) + dl = pcie_path_len; + else if (coded_len && strncmp(s, coded_name, coded_len) == 0) + dl = coded_len; + else + continue; + + /* is there enough room to copy? */ + copy_len = l - dl + 1; + if (len < copy_len) { + err = BCME_BUFTOOSHORT; + goto exit; + } + + /* no prefix, just the name=value */ + strlcpy(vp, &s[dl], copy_len); + vp += copy_len; + len -= copy_len; + } + + /* add null string as terminator */ + if (len < 1) { + err = BCME_BUFTOOSHORT; + goto exit; + } + *vp++ = '\0'; + + *base = vp; + +exit: + MFREE(osh, flash, MAX_NVRAM_SPACE); + return err; +} +#endif /* !defined(BCMDONGLEHOST) */ + +#if !defined(BCMUSBDEV_ENABLED) && !defined(BCMSDIODEV_ENABLED) && \ + !defined(BCMPCIEDEV_ENABLED) +#if !defined(BCMDONGLEHOST) +/** + * Initialize nonvolatile variable table from flash. + * Return 0 on success, nonzero on error. + */ +/* no needs to load the nvram variables from the flash for dongles. + * These variables are mainly for supporting SROM-less devices although + * we can use the same machenism to support configuration of multiple + * cores of the same type. + */ +static int +BCMATTACHFN(initvars_flash_si)(si_t *sih, char **vars, uint *count) +{ + osl_t *osh = si_osh(sih); + char *vp, *base; + int err; + + ASSERT(vars != NULL); + ASSERT(count != NULL); + + /* freed in same function */ + base = vp = MALLOC_NOPERSIST(osh, MAXSZ_NVRAM_VARS); + ASSERT(vp != NULL); + if (!vp) + return BCME_NOMEM; + + if ((err = initvars_flash(sih, osh, &vp, MAXSZ_NVRAM_VARS)) == 0) + err = initvars_table(osh, base, vp, vars, count); + + MFREE(osh, base, MAXSZ_NVRAM_VARS); + + return err; +} +#endif /* !defined(BCMDONGLEHOST) */ +#endif /* !BCMUSBDEV && !BCMSDIODEV */ + +#if !defined(BCMDONGLEHOST) + +/** returns position of rightmost bit that was set in caller supplied mask */ +static uint +mask_shift(uint16 mask) +{ + uint i; + for (i = 0; i < (sizeof(mask) << 3); i ++) { + if (mask & (1 << i)) + return i; + } + ASSERT(mask); + return 0; +} + +static uint +mask_width(uint16 mask) +{ + int i; + for (i = (sizeof(mask) << 3) - 1; i >= 0; i --) { + if (mask & (1 << i)) + return (uint)(i - mask_shift(mask) + 1); + } + ASSERT(mask); + return 0; +} + +#ifdef BCMASSERT_SUPPORT +static bool +mask_valid(uint16 mask) +{ + uint shift = mask_shift(mask); + uint width = mask_width(mask); + return mask == ((~0 << shift) & ~(~0 << (shift + width))); +} +#endif +#ifdef NVSRCX +void +srom_set_sromvars(char *vars) +{ + if (sromh) + sromh->_srom_vars = vars; +} +char * +srom_get_sromvars() +{ + if (sromh) + return sromh->_srom_vars; + else + return NULL; +} + +srom_info_t * +srom_info_init(osl_t *osh) +{ + sromh = (srom_info_t *) MALLOC_NOPERSIST(osh, sizeof(srom_info_t)); + if (!sromh) + return NULL; + sromh->_srom_vars = NULL; + sromh->is_caldata_prsnt = FALSE; + return sromh; +} +#endif /* NVSRCX */ +/** + * Parses caller supplied SROM contents into name=value pairs. Global array pci_sromvars[] contains + * the link between a word offset in SROM and the corresponding NVRAM variable name.'srom' points to + * the SROM word array. 'off' specifies the offset of the first word 'srom' points to, which should + * be either 0 or SROM3_SWRG_OFF (full SROM or software region). + */ +static void +BCMATTACHFN(_initvars_srom_pci)(uint8 sromrev, uint16 *srom, uint off, varbuf_t *b) +{ + uint16 w; + uint32 val; + const sromvar_t *srv; + uint width; + uint flags; + uint32 sr = (1 << sromrev); + bool in_array = FALSE; + static char array_temp[256]; + uint array_curr = 0; + const char* array_name = NULL; + + varbuf_append(b, "sromrev=%d", sromrev); +#if !defined(SROM15_MEMOPT) && !defined(SROM17_MEMOPT) + if (sromrev == 15) { + srv = pci_srom15vars; + } else if (sromrev == 16) { + srv = pci_srom16vars; + } else if (sromrev == 17) { + srv = pci_srom17vars; + } else if (sromrev == 18) { + srv = pci_srom18vars; + } else { + srv = pci_sromvars; + } +#else +#if defined(SROM15_MEMOPT) + srv = pci_srom15vars; +#endif /* defined(SROM15_MEMOPT) */ +#if defined(SROM17_MEMOPT) + srv = pci_srom17vars; +#endif /* defined(SROM17_MEMOPT) */ +#endif /* !defined(SROM15_MEMOPT) && !defined(SROM17_MEMOPT) */ + + for (; srv->name != NULL; srv ++) { + const char *name; + static bool in_array2 = FALSE; + static char array_temp2[256]; + static uint array_curr2 = 0; + static const char* array_name2 = NULL; + + if ((srv->revmask & sr) == 0) + continue; + + if (srv->off < off) + continue; + + flags = srv->flags; + name = srv->name; + + /* This entry is for mfgc only. Don't generate param for it, */ + if (flags & SRFL_NOVAR) + continue; + + if (flags & SRFL_ETHADDR) { + char eabuf[ETHER_ADDR_STR_LEN]; + struct ether_addr ea; + + ea.octet[0] = (srom[srv->off - off] >> 8) & 0xff; + ea.octet[1] = srom[srv->off - off] & 0xff; + ea.octet[2] = (srom[srv->off + 1 - off] >> 8) & 0xff; + ea.octet[3] = srom[srv->off + 1 - off] & 0xff; + ea.octet[4] = (srom[srv->off + 2 - off] >> 8) & 0xff; + ea.octet[5] = srom[srv->off + 2 - off] & 0xff; + bcm_ether_ntoa(&ea, eabuf); + + varbuf_append(b, "%s=%s", name, eabuf); + } else { + ASSERT(mask_valid(srv->mask)); + ASSERT(mask_width(srv->mask)); + + /* Start of an array */ + if (sromrev >= 10 && (srv->flags & SRFL_ARRAY) && !in_array2) { + array_curr2 = 0; + array_name2 = (const char*)srv->name; + bzero((void*)array_temp2, sizeof(array_temp2)); + in_array2 = TRUE; + } + + w = srom[srv->off - off]; + val = (w & srv->mask) >> mask_shift(srv->mask); + width = mask_width(srv->mask); + + while (srv->flags & SRFL_MORE) { + srv ++; + ASSERT(srv->name != NULL); + + if (srv->off == 0 || srv->off < off) + continue; + + ASSERT(mask_valid(srv->mask)); + ASSERT(mask_width(srv->mask)); + + w = srom[srv->off - off]; + val += ((w & srv->mask) >> mask_shift(srv->mask)) << width; + width += mask_width(srv->mask); + } + + if ((flags & SRFL_NOFFS) && ((int)val == (1 << width) - 1)) + continue; + + /* Array support starts in sromrev 10. Skip arrays for sromrev <= 9 */ + if (sromrev <= 9 && srv->flags & SRFL_ARRAY) { + while (srv->flags & SRFL_ARRAY) + srv ++; + srv ++; + } + + if (in_array2) { + int ret; + + if (flags & SRFL_PRHEX) { + ret = snprintf(array_temp2 + array_curr2, + sizeof(array_temp2) - array_curr2, "0x%x,", val); + } else if ((flags & SRFL_PRSIGN) && + (val & (1 << (width - 1)))) { + ret = snprintf(array_temp2 + array_curr2, + sizeof(array_temp2) - array_curr2, "%d,", + (int)(val | (~0 << width))); + } else { + ret = snprintf(array_temp2 + array_curr2, + sizeof(array_temp2) - array_curr2, "%u,", val); + } + + if (ret > 0) { + array_curr2 += ret; + } else { + BS_ERROR(("_initvars_srom_pci: array %s parsing error." + " buffer too short.\n", + array_name2)); + ASSERT(0); + + /* buffer too small, skip this param */ + while (srv->flags & SRFL_ARRAY) + srv ++; + srv ++; + in_array2 = FALSE; + continue; + } + + if (!(srv->flags & SRFL_ARRAY)) { /* Array ends */ + /* Remove the last ',' */ + array_temp2[array_curr2-1] = '\0'; + in_array2 = FALSE; + varbuf_append(b, "%s=%s", array_name2, array_temp2); + } + } else if (flags & SRFL_CCODE) { + if (val == 0) + varbuf_append(b, "ccode="); + else + varbuf_append(b, "ccode=%c%c", (val >> 8), (val & 0xff)); + } else if (flags & SRFL_PRHEX) { + varbuf_append(b, "%s=0x%x", name, val); + } else if ((flags & SRFL_PRSIGN) && (val & (1 << (width - 1)))) { + varbuf_append(b, "%s=%d", name, (int)(val | (~0 << width))); + } else { + varbuf_append(b, "%s=%u", name, val); + } + } + } + + if ((sromrev >= 4) && (sromrev != 16) && (sromrev != 18)) { + /* Do per-path variables */ + uint p, pb, psz, path_num; + + if ((sromrev == 17) || (sromrev == 15)) { + pb = psz = 0; + path_num = 0; + if (sromh) + sromh->is_caldata_prsnt = TRUE; + } else if (sromrev >= 13) { + pb = SROM13_PATH0; + psz = SROM13_PATH1 - SROM13_PATH0; + path_num = MAX_PATH_SROM_13; + } else if (sromrev >= 12) { + pb = SROM12_PATH0; + psz = SROM12_PATH1 - SROM12_PATH0; + path_num = MAX_PATH_SROM_12; + } else if (sromrev >= 11) { + pb = SROM11_PATH0; + psz = SROM11_PATH1 - SROM11_PATH0; + path_num = MAX_PATH_SROM_11; + } else if (sromrev >= 8) { + pb = SROM8_PATH0; + psz = SROM8_PATH1 - SROM8_PATH0; + path_num = MAX_PATH_SROM; + } else { + pb = SROM4_PATH0; + psz = SROM4_PATH1 - SROM4_PATH0; + path_num = MAX_PATH_SROM; + } + + for (p = 0; p < path_num; p++) { + for (srv = perpath_pci_sromvars; srv->name != NULL; srv ++) { + + if ((srv->revmask & sr) == 0) + continue; + + if (pb + srv->off < off) + continue; + + /* This entry is for mfgc only. Don't generate param for it, */ + if (srv->flags & SRFL_NOVAR) + continue; + + /* Start of an array */ + if (sromrev >= 10 && (srv->flags & SRFL_ARRAY) && !in_array) { + array_curr = 0; + array_name = (const char*)srv->name; + bzero((void*)array_temp, sizeof(array_temp)); + in_array = TRUE; + } + + w = srom[pb + srv->off - off]; + + ASSERT(mask_valid(srv->mask)); + val = (w & srv->mask) >> mask_shift(srv->mask); + width = mask_width(srv->mask); + + flags = srv->flags; + + /* Cheating: no per-path var is more than 1 word */ + + if ((srv->flags & SRFL_NOFFS) && ((int)val == (1 << width) - 1)) + continue; + + if (in_array) { + int ret; + + if (flags & SRFL_PRHEX) { + ret = snprintf(array_temp + array_curr, + sizeof(array_temp) - array_curr, "0x%x,", val); + } else if ((flags & SRFL_PRSIGN) && + (val & (1 << (width - 1)))) { + ret = snprintf(array_temp + array_curr, + sizeof(array_temp) - array_curr, "%d,", + (int)(val | (~0 << width))); + } else { + ret = snprintf(array_temp + array_curr, + sizeof(array_temp) - array_curr, "%u,", val); + } + + if (ret > 0) { + array_curr += ret; + } else { + BS_ERROR( + ("_initvars_srom_pci: array %s parsing error." + " buffer too short.\n", + array_name)); + ASSERT(0); + + /* buffer too small, skip this param */ + while (srv->flags & SRFL_ARRAY) + srv ++; + srv ++; + in_array = FALSE; + continue; + } + + if (!(srv->flags & SRFL_ARRAY)) { /* Array ends */ + /* Remove the last ',' */ + array_temp[array_curr-1] = '\0'; + in_array = FALSE; + varbuf_append(b, "%s%d=%s", + array_name, p, array_temp); + } + } else if (srv->flags & SRFL_PRHEX) + varbuf_append(b, "%s%d=0x%x", srv->name, p, val); + else + varbuf_append(b, "%s%d=%d", srv->name, p, val); + } + if (sromrev >= 13 && (p == (MAX_PATH_SROM_13 - 2))) { + psz = SROM13_PATH3 - SROM13_PATH2; + } + pb += psz; + } + } /* per path variables */ +} /* _initvars_srom_pci */ + +int +BCMATTACHFN(get_srom_pci_caldata_size)(uint32 sromrev) +{ + uint32 caldata_size; + + switch (sromrev) { + case 15: + caldata_size = (SROM15_CALDATA_WORDS * 2); + break; + case 17: + caldata_size = (SROM17_CALDATA_WORDS * 2); + break; + default: + caldata_size = 0; + break; + } + return caldata_size; +} + +uint32 +BCMATTACHFN(get_srom_size)(uint32 sromrev) +{ + uint32 size; + + switch (sromrev) { + case 15: + size = (SROM15_WORDS * 2); + break; + case 17: + size = (SROM17_WORDS * 2); + break; + default: + size = 0; + break; + } + return size; +} +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) + +int +BCMATTACHFN(_initvars_srom_pci_caldata)(si_t *sih, uint16 *srom, uint32 sromrev) +{ + int err = BCME_ERROR; + + if (sromh && (!sromh->is_caldata_prsnt)) { + return err; + } + + if (si_is_sprom_available(sih)) { + uint32 caldata_size; + + caldata_size = get_srom_pci_caldata_size(sromrev); + memcpy(srom, caldata_array, caldata_size); + err = BCME_OK; + } + return err; +} +#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */ +/** + * Initialize nonvolatile variable table from sprom, or OTP when SPROM is not available, or + * optionally a set of 'defaultsromvars' (compiled-in) variables when both OTP and SPROM bear no + * contents. + * + * On success, a buffer containing var/val pairs is allocated and returned in params vars and count. + * + * Return 0 on success, nonzero on error. + */ +static int +BCMATTACHFN(initvars_srom_pci)(si_t *sih, volatile void *curmap, char **vars, uint *count) +{ + uint16 *srom; + volatile uint16 *sromwindow; + uint8 sromrev = 0; + uint32 sr; + varbuf_t b; + char *vp, *base = NULL; + osl_t *osh = si_osh(sih); + bool flash = FALSE; + int err = 0; +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) + uint16 cal_wordoffset; +#endif + + /* + * Apply CRC over SROM content regardless SROM is present or not, and use variable + * sromrev's existance in flash to decide if we should return an error when CRC + * fails or read SROM variables from flash. + */ + + /* freed in same function */ + srom = MALLOC_NOPERSIST(osh, SROM_MAX); + ASSERT(srom != NULL); + if (!srom) + return -2; + + sromwindow = (volatile uint16 *)srom_offset(sih, curmap); + if (si_is_sprom_available(sih)) { + err = sprom_read_pci(osh, sih, sromwindow, 0, srom, SROM_SIGN_MINWORDS + 1, FALSE); + if (err == 0) { + if (srom[SROM18_SIGN] == SROM18_SIGNATURE) { + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM18_WORDS, TRUE); + sromrev = srom[SROM18_CRCREV] & 0xff; + } else if (srom[SROM17_SIGN] == SROM17_SIGNATURE) { + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM17_WORDS, TRUE); + sromrev = srom[SROM17_CRCREV] & 0xff; + } else if (srom[SROM16_SIGN] == SROM16_SIGNATURE) { + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM16_WORDS, TRUE); + sromrev = srom[SROM16_CRCREV] & 0xff; + } else if (srom[SROM15_SIGN] == SROM15_SIGNATURE) { /* srom 15 */ + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM15_WORDS, TRUE); + sromrev = srom[SROM15_CRCREV] & 0xff; + } else if (srom[SROM11_SIGN] == SROM13_SIGNATURE) { + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM13_WORDS, TRUE); + sromrev = srom[SROM13_CRCREV] & 0xff; + } else if (srom[SROM11_SIGN] == SROM12_SIGNATURE) { + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM12_WORDS, TRUE); + sromrev = srom[SROM12_CRCREV] & 0xff; + } else if (srom[SROM11_SIGN] == SROM11_SIGNATURE) { + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM11_WORDS, TRUE); + sromrev = srom[SROM11_CRCREV] & 0xff; + } else if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) || /* srom 4 */ + (srom[SROM8_SIGN] == SROM4_SIGNATURE)) { /* srom 8,9 */ + err = sprom_read_pci(osh, sih, sromwindow, + 0, srom, SROM4_WORDS, TRUE); + sromrev = srom[SROM4_CRCREV] & 0xff; + } else { + err = sprom_read_pci(osh, sih, sromwindow, 0, + srom, SROM_WORDS, TRUE); + if (err == 0) { + /* srom is good and is rev < 4 */ + /* top word of sprom contains version and crc8 */ + sromrev = srom[SROM_CRCREV] & 0xff; + /* bcm4401 sroms misprogrammed */ + if (sromrev == 0x10) + sromrev = 1; + } + } + if (err) + BS_ERROR(("srom read failed\n")); + } + else + BS_ERROR(("srom read failed\n")); + } + +#if defined(BCMNVRAMW) || defined(BCMNVRAMR) + /* Use OTP if SPROM not available */ + else if ((err = otp_read_pci(osh, sih, srom, SROM_MAX)) == 0) { + /* OTP only contain SROM rev8/rev9/rev10/Rev11 for now */ + + if (srom[SROM13_SIGN] == SROM13_SIGNATURE) + sromrev = srom[SROM13_CRCREV] & 0xff; + else if (srom[SROM12_SIGN] == SROM12_SIGNATURE) + sromrev = srom[SROM12_CRCREV] & 0xff; + else if (srom[SROM11_SIGN] == SROM11_SIGNATURE) + sromrev = srom[SROM11_CRCREV] & 0xff; + else if (srom[SROM10_SIGN] == SROM10_SIGNATURE) + sromrev = srom[SROM10_CRCREV] & 0xff; + else + sromrev = srom[SROM4_CRCREV] & 0xff; + } +#endif /* defined(BCMNVRAMW) || defined(BCMNVRAMR) */ + else { + err = 1; + BS_ERROR(("Neither SPROM nor OTP has valid image\n")); + } + + BS_ERROR(("srom rev:%d\n", sromrev)); + + /* We want internal/wltest driver to come up with default sromvars so we can + * program a blank SPROM/OTP. + */ + if (err || sromrev == 0) { + char *value; +#if defined(BCMHOSTVARS) + uint32 val; +#endif + + if ((value = si_getdevpathvar(sih, "sromrev"))) { + sromrev = (uint8)bcm_strtoul(value, NULL, 0); + flash = TRUE; + goto varscont; + } + + BS_ERROR(("initvars_srom_pci, SROM CRC Error\n")); + +#if !defined(DONGLEBUILD) || defined(BCMPCIEDEV_SROM_FORMAT) + /* NIC build or PCIe FD using SROM format shouldn't load driver + * default when external nvram exists. + */ + if ((value = getvar(NULL, "sromrev"))) { + BS_ERROR(("initvars_srom_pci, Using external nvram\n")); + err = 0; + goto errout; + } +#endif /* !DONGLEBUILD || BCMPCIEDEV_SROM_FORMAT */ + +#if defined(BCMHOSTVARS) + /* + * CRC failed on srom, so if the device is using OTP + * and if OTP is not programmed use the default variables. + * for 4311 A1 there is no signature to indicate that OTP is + * programmed, so can't really verify the OTP is unprogrammed + * or a bad OTP. + */ + val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); + if ((si_is_sprom_available(sih) && srom[0] == 0xffff) || +#ifdef BCMQT + (si_is_sprom_available(sih) && sromrev == 0) || +#endif + (val & SPROM_OTPIN_USE)) { + vp = base = mfgsromvars; + + /* For windows internal/wltest driver, a .nvm file with default + * nvram parameters is downloaded from the file system (in src/wl/sys: + * wl_readconfigdata()). + * Only when we cannot download default vars from the file system, use + * defaultsromvars_wltest as default + */ + if (defvarslen == 0) { + BS_ERROR(("No nvm file, use generic default (for programming" + " SPROM/OTP only)\n")); + + if (BCM43602_CHIP(sih->chip)) { + defvarslen = srom_vars_len(defaultsromvars_43602); + bcopy(defaultsromvars_43602, vp, defvarslen); + } else if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) { + defvarslen = srom_vars_len(defaultsromvars_4360); + bcopy(defaultsromvars_4360, vp, defvarslen); + } else if (BCM4378_CHIP(sih->chip)) { + defvarslen = srom_vars_len(defaultsromvars_4378); + bcopy(defaultsromvars_4378, vp, defvarslen); + } else if (BCM4387_CHIP(sih->chip)) { + defvarslen = srom_vars_len(defaultsromvars_4387); + bcopy(defaultsromvars_4387, vp, defvarslen); + } else { + defvarslen = srom_vars_len(defaultsromvars_wltest); + bcopy(defaultsromvars_wltest, vp, defvarslen); + } + } else { + BS_ERROR(("Use nvm file as default\n")); + } + + vp += defvarslen; + /* add final null terminator */ + *vp++ = '\0'; + + BS_ERROR(("Used %d bytes of defaultsromvars\n", defvarslen)); + goto varsdone; + + } else if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + BCM43602_CHIP(sih->chip)) { + + base = vp = mfgsromvars; + + if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + BCM43602_CHIP(sih->chip)) + BS_ERROR(("4360 BOOT w/o SPROM or OTP\n")); + else + BS_ERROR(("BOOT w/o SPROM or OTP\n")); + + if (defvarslen == 0) { + if (BCM43602_CHIP(sih->chip)) { + defvarslen = srom_vars_len(defaultsromvars_43602); + bcopy(defaultsromvars_43602, vp, defvarslen); + } else if ((sih->chip == BCM4360_CHIP_ID) || + (sih->chip == BCM4352_CHIP_ID)) { + defvarslen = srom_vars_len(defaultsromvars_4360); + bcopy(defaultsromvars_4360, vp, defvarslen); + } else { + defvarslen = srom_vars_len(defaultsromvars_4331); + bcopy(defaultsromvars_4331, vp, defvarslen); + } + } + vp += defvarslen; + *vp++ = '\0'; + goto varsdone; + } else +#endif /* defined(BCMHOSTVARS) */ + { + err = -1; + goto errout; + } + } +#if defined(BCM_ONE_NVRAM_SRC) + /* Discard hostvars if SROM parsing is successful, so only one nvram source + * will be used. + * Routers use combined srom/host nvram so shouldn't define BCM_ONE_NVRAM_SRC. + */ + else { + nvram_exit((void *)sih); /* free up global vars */ + } +#endif /* BCM_ONE_NVRAM_SRC */ + +varscont: + /* Bitmask for the sromrev */ + sr = 1 << sromrev; + + /* srom version check: Current valid versions are: + * 1-5, 8-11, 12, 13, 15, 16, 17, 18 SROM_MAXREV + * This is a bit mask of all valid SROM versions. + */ + if ((sr & 0x7bf3e) == 0) { + BS_ERROR(("Invalid SROM rev %d\n", sromrev)); + err = -2; + goto errout; + } + + ASSERT(vars != NULL); + ASSERT(count != NULL); + +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) + srom_sromrev = sromrev; +#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */ + + /* freed in same function */ + base = vp = MALLOC_NOPERSIST(osh, MAXSZ_NVRAM_VARS); + ASSERT(vp != NULL); + if (!vp) { + err = -2; + goto errout; + } + + /* read variables from flash */ + if (flash) { + if ((err = initvars_flash(sih, osh, &vp, MAXSZ_NVRAM_VARS))) + goto errout; + goto varsdone; + } + + varbuf_init(&b, base, MAXSZ_NVRAM_VARS); + + /* parse SROM into name=value pairs. */ + _initvars_srom_pci(sromrev, srom, 0, &b); + + /* final nullbyte terminator */ + ASSERT(b.size >= 1); + vp = b.buf; + *vp++ = '\0'; + + ASSERT((vp - base) <= MAXSZ_NVRAM_VARS); + +varsdone: + err = initvars_table(osh, base, vp, vars, count); /* allocates buffer in 'vars' */ + +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) + if (sromrev == 18) { + int caldata_wordoffset = srom[SROM18_CALDATA_OFFSET_LOC] / 2; + + if ((caldata_wordoffset != 0) && + (caldata_wordoffset + SROM_CALDATA_WORDS < SROM18_WORDS)) { + memcpy(caldata_array, srom + caldata_wordoffset, SROM18_CALDATA_WORDS * 2); + is_caldata_prsnt = TRUE; + } + } else if (sromrev == 16) { + int caldata_wordoffset = srom[SROM16_CALDATA_OFFSET_LOC] / 2; + + if ((caldata_wordoffset != 0) && + (caldata_wordoffset + SROM_CALDATA_WORDS < SROM16_WORDS)) { + memcpy(caldata_array, srom + caldata_wordoffset, SROM_CALDATA_WORDS * 2); + is_caldata_prsnt = TRUE; + } + } +#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */ + +#ifdef NVSRCX + if (sromrev != 0) + nvram_append((void *)sih, *vars, *count, VARBUF_PRIO_SROM); +#endif +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) + if ((sromrev == 15) || (sromrev == 17)) { + uint32 caldata_size = get_srom_pci_caldata_size(sromrev); + + cal_wordoffset = getintvar(NULL, "caldata_offset")/2; + memcpy(caldata_array, srom + cal_wordoffset, caldata_size); + } +#endif +errout: +#if defined(BCMHOSTVARS) + if (base && (base != mfgsromvars)) +#else + if (base) +#endif /* defined(BCMHOSTVARS) */ + MFREE(osh, base, MAXSZ_NVRAM_VARS); + + MFREE(osh, srom, SROM_MAX); + return err; +} + +/** + * initvars_cis_pci() parses OTP CIS. This is specifically for PCIe full dongle that has SROM + * header plus CIS tuples programmed in OTP. + * Return error if the content is not in CIS format or OTP is not present. + */ +static int +BCMATTACHFN(initvars_cis_pci)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *count) +{ + uint wsz = 0, sz = 0, base_len = 0; + void *oh = NULL; + int rc = BCME_OK; + uint16 *cisbuf = NULL; + uint8 *cis = NULL; +#if defined (BCMHOSTVARS) + char *vp = NULL; +#endif /* BCMHOSTVARS */ + char *base = NULL; + bool wasup; + uint32 min_res_mask = 0; + BCM_REFERENCE(curmap); + + /* Bail out if we've dealt with OTP/SPROM before! */ + if (srvars_inited) + goto exit; + + /* Turn on OTP if it's not already on */ + if (!(wasup = si_is_otp_powered(sih))) + si_otp_power(sih, TRUE, &min_res_mask); + + if (si_cis_source(sih) != CIS_OTP) + rc = BCME_NOTFOUND; + else if ((oh = otp_init(sih)) == NULL) + rc = BCME_ERROR; + else if (!(((BUSCORETYPE(sih->buscoretype) == PCIE2_CORE_ID) || otp_newcis(oh)) && + (otp_status(oh) & OTPS_GUP_HW))) { + /* OTP bit CIS format (507) not used by pcie core - only needed for sdio core */ + rc = BCME_NOTFOUND; + } else if ((sz = otp_size(oh)) != 0) { + if ((cisbuf = (uint16*)MALLOC_NOPERSIST(osh, sz))) { + /* otp_size() returns bytes, not words. */ + wsz = sz >> 1; + /* for 4389b0 (CCREV-70) sw region is before the hw region */ + if (CCREV(sih->ccrev) == 70) { + rc = otp_read_region(sih, OTP_SW_RGN, cisbuf, &wsz); + cis = (uint8*)cisbuf; + } else { + rc = otp_read_region(sih, OTP_HW_RGN, cisbuf, &wsz); + /* Bypass the HW header and signature */ + cis = (uint8*)(cisbuf + (otp_pcie_hwhdr_sz(sih) / 2)); + } + BS_ERROR(("initvars_cis_pci: Parsing CIS in OTP.\n")); + } else + rc = BCME_NOMEM; + } + + /* Restore original OTP state */ + if (!wasup) + si_otp_power(sih, FALSE, &min_res_mask); + + if (rc != BCME_OK) { + BS_ERROR(("initvars_cis_pci: Not CIS format\n")); + goto exit; + } + +#if defined (BCMHOSTVARS) + if (defvarslen) { + vp = mfgsromvars; + vp += defvarslen; + + /* allocates buffer in 'vars' */ + rc = initvars_table(osh, mfgsromvars, vp, &base, &base_len); + if (rc) + goto exit; + + *vars = base; + *count = base_len; + + BS_ERROR(("initvars_cis_pci external nvram %d bytes\n", defvarslen)); + } + +#endif /* BCMHOSTVARS */ + + /* Parse the CIS and allocate a(nother) buffer in 'vars' */ + rc = srom_parsecis(sih, osh, &cis, SROM_CIS_SINGLE, vars, count); + + srvars_inited = TRUE; +exit: + /* Clean up */ + if (base) + MFREE(osh, base, base_len); + if (cisbuf) + MFREE(osh, cisbuf, sz); + + /* return OK so the driver will load & use defaults if bad srom/otp */ + return rc; +} +#endif /* !defined(BCMDONGLEHOST) */ + +#ifdef BCMSDIO +#if !defined(BCMDONGLEHOST) +/** + * Read the SDIO cis and call parsecis to allocate and initialize the NVRAM vars buffer. + * Return 0 on success, nonzero on error. + */ +static int +BCMATTACHFN(initvars_cis_sdio)(si_t *sih, osl_t *osh, char **vars, uint *count) +{ + uint8 *cis[SBSDIO_NUM_FUNCTION + 1]; + uint fn, numfn; + int rc = 0; + + /* Using MALLOC here causes the Windows driver to crash Needs Investigating */ +#ifdef NDIS + uint8 cisd[SBSDIO_NUM_FUNCTION + 1][SBSDIO_CIS_SIZE_LIMIT]; +#endif + + numfn = bcmsdh_query_iofnum(NULL); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + for (fn = 0; fn <= numfn; fn++) { +#ifdef NDIS + cis[fn] = (uint8*)cisd[fn]; +#else + /* freed in same function */ + if ((cis[fn] = MALLOC_NOPERSIST(osh, SBSDIO_CIS_SIZE_LIMIT)) == NULL) { + rc = -1; + break; + } +#endif /* NDIS */ + + bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); + + if (bcmsdh_cis_read(NULL, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT) != 0) { +#ifdef NDIS + /* nothing to do */ +#else + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); +#endif + rc = -2; + break; + } + } + + if (!rc) + rc = srom_parsecis(sih, osh, cis, fn, vars, count); + +#ifdef NDIS + /* nothing to do here */ +#else + while (fn-- > 0) + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); +#endif + + return (rc); +} +#endif /* !defined(BCMDONGLEHOST) */ + +/** set SDIO sprom command register */ +static int +BCMATTACHFN(sprom_cmd_sdio)(osl_t *osh, uint8 cmd) +{ + uint8 status = 0; + uint wait_cnt = 1000; + + /* write sprom command register */ + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_CS, cmd, NULL); + + /* wait status */ + while (wait_cnt--) { + status = bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_CS, NULL); + if (status & SBSDIO_SPROM_DONE) + return 0; + } + + return 1; +} + +/** read a word from the SDIO srom */ +static int +sprom_read_sdio(osl_t *osh, uint16 addr, uint16 *data) +{ + uint8 addr_l, addr_h, data_l, data_h; + + addr_l = (uint8)((addr * 2) & 0xff); + addr_h = (uint8)(((addr * 2) >> 8) & 0xff); + + /* set address */ + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_HIGH, addr_h, NULL); + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_LOW, addr_l, NULL); + + /* do read */ + if (sprom_cmd_sdio(osh, SBSDIO_SPROM_READ)) + return 1; + + /* read data */ + data_h = bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_HIGH, NULL); + data_l = bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_LOW, NULL); + + *data = (data_h << 8) | data_l; + return 0; +} + +#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) +/** write a word to the SDIO srom */ +static int +sprom_write_sdio(osl_t *osh, uint16 addr, uint16 data) +{ + uint8 addr_l, addr_h, data_l, data_h; + + addr_l = (uint8)((addr * 2) & 0xff); + addr_h = (uint8)(((addr * 2) >> 8) & 0xff); + data_l = (uint8)(data & 0xff); + data_h = (uint8)((data >> 8) & 0xff); + + /* set address */ + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_HIGH, addr_h, NULL); + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_LOW, addr_l, NULL); + + /* write data */ + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_HIGH, data_h, NULL); + bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_LOW, data_l, NULL); + + /* do write */ + return sprom_cmd_sdio(osh, SBSDIO_SPROM_WRITE); +} +#endif /* defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) */ +#endif /* BCMSDIO */ + +#if !defined(BCMDONGLEHOST) +#ifdef BCMSPI +/** + * Read the SPI cis and call parsecis to allocate and initialize the NVRAM vars buffer. + * Return 0 on success, nonzero on error. + */ +static int +BCMATTACHFN(initvars_cis_spi)(si_t *sih, osl_t *osh, char **vars, uint *count) +{ + uint8 *cis; + int rc; + + /* Using MALLOC here causes the Windows driver to crash Needs Investigating */ +#ifdef NDIS + uint8 cisd[SBSDIO_CIS_SIZE_LIMIT]; + cis = (uint8*)cisd; +#else + /* freed in same function */ + if ((cis = MALLOC_NOPERSIST(osh, SBSDIO_CIS_SIZE_LIMIT)) == NULL) { + return -1; + } +#endif /* NDIS */ + + bzero(cis, SBSDIO_CIS_SIZE_LIMIT); + + if (bcmsdh_cis_read(NULL, SDIO_FUNC_1, cis, SBSDIO_CIS_SIZE_LIMIT) != 0) { +#ifdef NDIS + /* nothing to do */ +#else + MFREE(osh, cis, SBSDIO_CIS_SIZE_LIMIT); +#endif /* NDIS */ + return -2; + } + + rc = srom_parsecis(sih, osh, &cis, SDIO_FUNC_1, vars, count); + +#ifdef NDIS + /* nothing to do here */ +#else + MFREE(osh, cis, SBSDIO_CIS_SIZE_LIMIT); +#endif + + return (rc); +} +#endif /* BCMSPI */ +#endif /* !defined(BCMDONGLEHOST) */ + +/** Return sprom size in 16-bit words */ +uint +srom_size(si_t *sih, osl_t *osh) +{ + uint size = (SROM16_SIGN + 1) * 2; /* must big enough for SROM16 */ + return size; +} + +/** + * initvars are different for BCMUSBDEV and BCMSDIODEV. This is OK when supporting both at + * the same time, but only because all of the code is in attach functions and not in ROM. + */ + +#if defined(BCMUSBDEV_ENABLED) +#ifdef BCM_DONGLEVARS +/*** reads a CIS structure (so not an SROM-MAP structure) from either OTP or SROM */ +static int +BCMATTACHFN(initvars_srom_si_bl)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *varsz) +{ + int sel = 0; /* where to read srom/cis: 0 - none, 1 - otp, 2 - sprom */ + uint sz = 0; /* srom size in bytes */ + void *oh = NULL; + int rc = BCME_OK; + uint16 prio = VARBUF_PRIO_INVALID; + + if ((oh = otp_init(sih)) != NULL && (otp_status(oh) & OTPS_GUP_SW)) { + /* Access OTP if it is present, powered on, and programmed */ + sz = otp_size(oh); + sel = 1; + } else if ((sz = srom_size(sih, osh)) != 0) { + /* Access the SPROM if it is present */ + sz <<= 1; + sel = 2; + } + + /* Read CIS in OTP/SPROM */ + if (sel != 0) { + uint16 *srom; + uint8 *body = NULL; + uint otpsz = sz; + + ASSERT(sz); + + /* Allocate memory */ + if ((srom = (uint16 *)MALLOC(osh, sz)) == NULL) + return BCME_NOMEM; + + /* Read CIS */ + switch (sel) { + case 1: + rc = otp_read_region(sih, OTP_SW_RGN, srom, &otpsz); + sz = otpsz; + body = (uint8 *)srom; + prio = VARBUF_PRIO_OTP; + break; + case 2: + rc = srom_read(sih, SI_BUS, curmap, osh, 0, sz, srom, TRUE); + /* sprom has 8 byte h/w header */ + body = (uint8 *)srom + SBSDIO_SPROM_CIS_OFFSET; + prio = VARBUF_PRIO_SROM; + break; + default: + /* impossible to come here */ + ASSERT(0); + break; + } + + /* Parse CIS */ + if (rc == BCME_OK) { + /* each word is in host endian */ + htol16_buf((uint8 *)srom, sz); + ASSERT(body); + rc = srom_parsecis(sih, osh, &body, SROM_CIS_SINGLE, vars, varsz); + } + + MFREE(osh, srom, sz); /* Clean up */ + + /* Make SROM variables global */ + if (rc == BCME_OK) { + nvram_append((void *)sih, *vars, *varsz, prio); + DONGLE_STORE_VARS_OTP_PTR(*vars); + } + } + + return BCME_OK; +} +#endif /* #ifdef BCM_DONGLEVARS */ + +/** + * initvars_srom_si() is defined multiple times in this file. This is the 1st variant for chips with + * an active USB interface. It is called only for bus types SI_BUS, and only for CIS + * format in SPROM and/or OTP. Reads OTP or SPROM (bootloader only) and appends parsed contents to + * caller supplied var/value pairs. + */ +static int +BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *varsz) +{ + +#if defined(BCM_DONGLEVARS) + BCM_REFERENCE(osh); + BCM_REFERENCE(sih); + BCM_REFERENCE(curmap); +#endif + + /* Bail out if we've dealt with OTP/SPROM before! */ + if (srvars_inited) + goto exit; + +#ifdef BCM_DONGLEVARS /* this flag should be defined for usb bootloader, to read OTP or SROM */ + if (BCME_OK != initvars_srom_si_bl(sih, osh, curmap, vars, varsz)) /* CIS format only */ + return BCME_ERROR; +#endif + + /* update static local var to skip for next call */ + srvars_inited = TRUE; + +exit: + /* Tell the caller there is no individual SROM variables */ + *vars = NULL; + *varsz = 0; + + /* return OK so the driver will load & use defaults if bad srom/otp */ + return BCME_OK; +} + +#elif defined(BCMSDIODEV_ENABLED) + +#ifdef BCM_DONGLEVARS +static uint8 BCMATTACHDATA(defcis4369)[] = { 0x20, 0x4, 0xd0, 0x2, 0x64, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis43012)[] = { 0x20, 0x4, 0xd0, 0x2, 0x04, 0xA8, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis43013)[] = { 0x20, 0x4, 0xd0, 0x2, 0x05, 0xA8, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis43014)[] = { 0x20, 0x4, 0xd0, 0x2, 0x06, 0xA8, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4362)[] = { 0x20, 0x4, 0xd0, 0x2, 0x62, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4378)[] = { 0x20, 0x4, 0xd0, 0x2, 0x78, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4385)[] = { 0x20, 0x4, 0xd0, 0x2, 0x85, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4387)[] = { 0x20, 0x4, 0xd0, 0x2, 0x78, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4388)[] = { 0x20, 0x4, 0xd0, 0x2, 0x88, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4389)[] = { 0x20, 0x4, 0xd0, 0x2, 0x89, 0x43, 0xff, 0xff }; +static uint8 BCMATTACHDATA(defcis4397)[] = { 0x20, 0x4, 0xd0, 0x2, 0x97, 0x43, 0xff, 0xff }; + +/** + * initvars_srom_si() is defined multiple times in this file. This is the 2nd variant for chips with + * an active SDIOd interface using DONGLEVARS + */ +static int +BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *varsz) +{ + int cis_src; + uint msz = 0; + uint sz = 0; + void *oh = NULL; + int rc = BCME_OK; + bool new_cisformat = FALSE; + + uint16 *cisbuf = NULL; + + /* # sdiod fns + common + extra */ + uint8 *cis[SBSDIO_NUM_FUNCTION + 2] = { 0 }; + + uint ciss = 0; + uint8 *defcis; + uint hdrsz; + uint16 prio = VARBUF_PRIO_INVALID; + +#if defined(BCMSDIODEV_ENABLED) && defined(ATE_BUILD) + if (si_chipcap_sdio_ate_only(sih)) { + BS_ERROR(("ATE BUILD: skip cis based var init\n")); + goto exit; + } +#endif /* BCMSDIODEV_ENABLED && ATE_BUILD */ + + /* Bail out if we've dealt with OTP/SPROM before! */ + if (srvars_inited) + goto exit; + + /* Initialize default and cis format count */ + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: ciss = 1; defcis = defcis4369; hdrsz = 4; break; + case BCM4378_CHIP_GRPID: ciss = 1; defcis = defcis4378; hdrsz = 4; break; + case BCM4385_CHIP_GRPID: ciss = 1; defcis = defcis4385; hdrsz = 4; break; + case BCM4387_CHIP_GRPID: ciss = 1; defcis = defcis4387; hdrsz = 4; break; + case BCM4388_CHIP_GRPID: ciss = 1; defcis = defcis4388; hdrsz = 4; break; + case BCM4389_CHIP_GRPID: ciss = 1; defcis = defcis4389; hdrsz = 4; break; + case BCM4397_CHIP_GRPID: ciss = 1; defcis = defcis4397; hdrsz = 4; break; + case BCM43012_CHIP_ID: ciss = 1; defcis = defcis43012; hdrsz = 4; break; + case BCM43013_CHIP_ID: ciss = 1; defcis = defcis43013; hdrsz = 4; break; + case BCM43014_CHIP_ID: ciss = 1; defcis = defcis43014; hdrsz = 4; break; + case BCM4362_CHIP_GRPID: ciss = 1; defcis = defcis4362; hdrsz = 4; break; + default: + BS_ERROR(("initvars_srom_si: Unknown chip 0x%04x\n", CHIPID(sih->chip))); + return BCME_ERROR; + } + if (sih->ccrev >= 36) { + uint32 otplayout; + if (AOB_ENAB(sih)) { + otplayout = si_corereg(sih, si_findcoreidx(sih, GCI_CORE_ID, 0), + OFFSETOF(gciregs_t, otplayout), 0, 0); + } else { + otplayout = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otplayout), + 0, 0); + } + if (otplayout & OTP_CISFORMAT_NEW) { + ciss = 1; + hdrsz = 2; + new_cisformat = TRUE; + } + else { + ciss = 3; + hdrsz = 12; + } + } + + cis_src = si_cis_source(sih); + switch (cis_src) { + case CIS_SROM: + sz = srom_size(sih, osh) << 1; + prio = VARBUF_PRIO_SROM; + break; + case CIS_OTP: + /* Note that for *this* type of OTP -- which otp_read_region() + * can operate on -- otp_size() returns bytes, not words. + */ + if (((oh = otp_init(sih)) != NULL) && (otp_status(oh) & OTPS_GUP_HW)) + sz = otp_size(oh); + prio = VARBUF_PRIO_OTP; + break; + } + + if (sz != 0) { + /* freed in same function */ + if ((cisbuf = (uint16*)MALLOC_NOPERSIST(osh, sz)) == NULL) + return BCME_NOMEM; + msz = sz; + + switch (cis_src) { + case CIS_SROM: + rc = srom_read(sih, SI_BUS, curmap, osh, 0, sz, cisbuf, FALSE); + break; + case CIS_OTP: + sz >>= 1; + rc = otp_read_region(sih, OTP_HW_RGN, cisbuf, &sz); + sz <<= 1; + break; + } + + ASSERT(sz > hdrsz); + if (rc == BCME_OK) { + if ((cisbuf[0] == 0xffff) || (cisbuf[0] == 0)) { + MFREE(osh, cisbuf, msz); + } else if (new_cisformat) { + cis[0] = (uint8*)(cisbuf + hdrsz); + } else { + cis[0] = (uint8*)cisbuf + hdrsz; + cis[1] = (uint8*)cisbuf + hdrsz + + (cisbuf[1] >> 8) + ((cisbuf[2] & 0x00ff) << 8) - + SBSDIO_CIS_BASE_COMMON; + cis[2] = (uint8*)cisbuf + hdrsz + + cisbuf[3] - SBSDIO_CIS_BASE_COMMON; + cis[3] = (uint8*)cisbuf + hdrsz + + cisbuf[4] - SBSDIO_CIS_BASE_COMMON; + ASSERT((cis[1] >= cis[0]) && (cis[1] < (uint8*)cisbuf + sz)); + ASSERT((cis[2] >= cis[0]) && (cis[2] < (uint8*)cisbuf + sz)); + ASSERT(((cis[3] >= cis[0]) && (cis[3] < (uint8*)cisbuf + sz)) || + (ciss <= 3)); + } + } + } + + /* Use default if strapped to, or strapped source empty */ + if (cisbuf == NULL) { + ciss = 1; + cis[0] = defcis; + } + + /* Parse the CIS */ + if (rc == BCME_OK) { + if ((rc = srom_parsecis(sih, osh, cis, ciss, vars, varsz)) == BCME_OK) { + nvram_append((void *)sih, *vars, *varsz, prio); + DONGLE_STORE_VARS_OTP_PTR(*vars); + } + } + + /* Clean up */ + if (cisbuf != NULL) + MFREE(osh, cisbuf, msz); + + srvars_inited = TRUE; +exit: + /* Tell the caller there is no individual SROM variables */ + *vars = NULL; + *varsz = 0; + + /* return OK so the driver will load & use defaults if bad srom/otp */ + return BCME_OK; +} /* initvars_srom_si */ +#else /* BCM_DONGLEVARS */ + +/** + * initvars_srom_si() is defined multiple times in this file. This is the variant for chips with an + * active SDIOd interface but without BCM_DONGLEVARS + */ +static int +BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *varsz) +{ + *vars = NULL; + *varsz = 0; + return BCME_OK; +} +#endif /* BCM_DONGLEVARS */ + +#elif defined(BCMPCIEDEV_ENABLED) + +/** + * initvars_srom_si() is defined multiple times in this file. This is the variant for chips with an + * active PCIe interface *and* that use OTP for NVRAM storage. + * + * On success, a buffer containing var/val values has been allocated in parameter 'vars'. + * put an ifdef where if the host wants the dongle wants to parse sprom or not + */ +static int +BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *varsz) +{ +#ifdef BCM_DONGLEVARS + void *oh = NULL; + uint8 *cis; + uint sz = 0; + int rc; + + if (si_cis_source(sih) != CIS_OTP) + return BCME_OK; + + if (((oh = otp_init(sih)) != NULL) && (otp_status(oh) & OTPS_GUP_HW)) + sz = otp_size(oh); + if (sz == 0) + return BCME_OK; + + if ((cis = MALLOC(osh, sz)) == NULL) + return BCME_NOMEM; + sz >>= 1; + rc = otp_read_region(sih, OTP_HW_RGN, (uint16 *)cis, &sz); + sz <<= 1; + + /* account for the Hardware header */ + if (sz == 128) + return BCME_OK; + + cis += 128; + + /* need to find a better way to identify sprom format content and ignore parse */ + if (*(uint16 *)cis == SROM11_SIGNATURE) { + return BCME_OK; + } + + if ((rc = srom_parsecis(sih, osh, &cis, SROM_CIS_SINGLE, vars, varsz)) == BCME_OK) + nvram_append((void *)sih, *vars, *varsz, VARBUF_PRIO_OTP); + + return rc; +#else /* BCM_DONGLEVARS */ + *vars = NULL; + *varsz = 0; + return BCME_OK; +#endif /* BCM_DONGLEVARS */ +} +#else /* !BCMUSBDEV && !BCMSDIODEV && !BCMPCIEDEV */ + +#ifndef BCMDONGLEHOST + +/** + * initvars_srom_si() is defined multiple times in this file. This is the variant for: + * !BCMDONGLEHOST && !BCMUSBDEV && !BCMSDIODEV && !BCMPCIEDEV + * So this function is defined for PCI (not PCIe) builds that are also non DHD builds. + * On success, a buffer containing var/val values has been allocated in parameter 'vars'. + */ +static int +BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap, + char **vars, uint *varsz) +{ + /* Search flash nvram section for srom variables */ + BCM_REFERENCE(osh); + BCM_REFERENCE(curmap); + return initvars_flash_si(sih, vars, varsz); +} /* initvars_srom_si */ +#endif /* !BCMDONGLEHOST */ +#endif /* !BCMUSBDEV && !BCMSDIODEV && !BCMPCIEDEV */ + +void +BCMATTACHFN(srom_var_deinit)(si_t *sih) +{ + BCM_REFERENCE(sih); + + srvars_inited = FALSE; +} + +#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) +bool +BCMATTACHFN(srom_caldata_prsnt)(si_t *sih) +{ + return is_caldata_prsnt; +} + +int +BCMATTACHFN(srom_get_caldata)(si_t *sih, uint16 *srom) +{ + if (!is_caldata_prsnt) { + return BCME_ERROR; + } + if (srom_sromrev == 18) { + memcpy(srom, caldata_array, SROM18_CALDATA_WORDS * 2); + } else { + memcpy(srom, caldata_array, SROM_CALDATA_WORDS * 2); + } + return BCME_OK; +} +#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */ diff --git a/bcmdhd.101.10.361.x/bcmstdlib.c b/bcmdhd.101.10.361.x/bcmstdlib.c new file mode 100755 index 0000000..9e4f3a6 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmstdlib.c @@ -0,0 +1,1251 @@ +/* + * stdlib support routines for self-contained images. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/* + * bcmstdlib.c file should be used only to construct an OSL or alone without any OSL + * It should not be used with any orbitarary OSL's as there could be a conflict + * with some of the routines defined here. + */ + +/* + * Define BCMSTDLIB_WIN32_APP if this is a Win32 Application compile + */ +#if defined(_WIN32) && !defined(NDIS) && !defined(EFI) +#define BCMSTDLIB_WIN32_APP 1 +#endif /* _WIN32 && !NDIS */ + +/* + * Define BCMSTDLIB_SNPRINTF_ONLY if we only want snprintf & vsnprintf implementations + */ +#if defined(_WIN32) && !defined(EFI) +#define BCMSTDLIB_SNPRINTF_ONLY 1 +#endif /* _WIN32 || !EFI */ + +#include +#ifdef BCMSTDLIB_WIN32_APP +/* for size_t definition */ +#include +#endif +#include +#ifndef BCMSTDLIB_WIN32_APP +#include +#endif +#include + +/* Don't use compiler builtins for stdlib APIs within the implementation of the stdlib itself. */ +#if defined(BCM_FORTIFY_SOURCE) || defined(BCM_STDLIB_USE_BUILTINS) +#undef memcpy +#undef memmove +#undef memset +#undef strncpy +#undef snprintf +#undef vsnprintf +#endif /* BCM_FORTIFY_SOURCE || BCM_STDLIB_USE_BUILTINS */ + +#ifdef HND_PRINTF_THREAD_SAFE +#include +#include +#include + +/* mutex macros for thread safe */ +#define HND_PRINTF_MUTEX_DECL(mutex) static OSL_EXT_MUTEX_DECL(mutex) +#define HND_PRINTF_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PRINTF_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PRINTF_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PRINTF_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) + +HND_PRINTF_MUTEX_DECL(printf_mutex); +int in_isr_handler = 0, in_trap_handler = 0, in_fiq_handler = 0; + +bool +printf_lock_init(void) +{ + /* create mutex for critical section locking */ + if (HND_PRINTF_MUTEX_CREATE("printf_mutex", &printf_mutex) != OSL_EXT_SUCCESS) + return FALSE; + return TRUE; +} + +bool +printf_lock_cleanup(void) +{ + /* create mutex for critical section locking */ + if (HND_PRINTF_MUTEX_DELETE(&printf_mutex) != OSL_EXT_SUCCESS) + return FALSE; + return TRUE; +} + +/* returns TRUE if allowed to proceed, FALSE to discard. +* printf from isr hook or fiq hook is not allowed due to IRQ_MODE and FIQ_MODE stack size +* limitation. +*/ +static bool +printf_lock(void) +{ + + /* discard for irq or fiq context, we need to keep irq/fiq stack small. */ + if (in_isr_handler || in_fiq_handler) + return FALSE; + + /* allow printf in trap handler, proceed without mutex. */ + if (in_trap_handler) + return TRUE; + + /* if not in isr or trap, then go thread-protection with mutex. */ + if (HND_PRINTF_MUTEX_ACQUIRE(&printf_mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + else + return TRUE; +} + +static void +printf_unlock(void) +{ + if (in_isr_handler || in_fiq_handler) + return; + + if (in_trap_handler) + return; + + if (HND_PRINTF_MUTEX_RELEASE(&printf_mutex) != OSL_EXT_SUCCESS) + return; +} + +#else +#define printf_lock() (TRUE) +#define printf_unlock() +#endif /* HND_PRINTF_THREAD_SAFE */ + +#ifdef BCMSTDLIB_WIN32_APP + +/* for a WIN32 application, use _vsnprintf as basis of vsnprintf/snprintf to + * support full set of format specifications. + */ + +int +BCMPOSTTRAPFN(vsnprintf)(char *buf, size_t bufsize, const char *fmt, va_list ap) +{ + int r; + + r = _vsnprintf(buf, bufsize, fmt, ap); + + /* Microsoft _vsnprintf() will not null terminate on overflow, + * so null terminate at buffer end on error + */ + if (r < 0 && bufsize > 0) + buf[bufsize - 1] = '\0'; + + return r; +} + +int +BCMPOSTTRAPFN(snprintf)(char *buf, size_t bufsize, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + r = vsnprintf(buf, bufsize, fmt, ap); + va_end(ap); + + return r; +} + +#else /* BCMSTDLIB_WIN32_APP */ + +#if !defined(BCMROMOFFLOAD_EXCLUDE_STDLIB_FUNCS) + +static const char hex_upper[17] = "0123456789ABCDEF"; +static const char hex_lower[17] = "0123456789abcdef"; + +static int +BCMPOSTTRAPFN(__atolx)(char *buf, char * end, unsigned long num, unsigned long radix, int width, + const char *digits, int zero_pad) +{ + char buffer[32]; + char *op; + int retval; + + op = &buffer[0]; + retval = 0; + + do { + *op++ = digits[num % radix]; + retval++; + num /= radix; + } while (num != 0); + + if (width && (width > retval) && zero_pad) { + width = width - retval; + while (width) { + *op++ = '0'; + retval++; + width--; + } + } + + while (op != buffer) { + op--; + if (buf <= end) + *buf = *op; + buf++; + } + + return retval; +} + +static int +BCMPOSTTRAPFN(__atox)(char *buf, char * end, unsigned int num, unsigned int radix, int width, + const char *digits, int zero_pad) +{ + char buffer[16]; + char *op; + int retval; + + op = &buffer[0]; + retval = 0; + + do { + *op++ = digits[num % radix]; + retval++; + num /= radix; + } while (num != 0); + + if (width && (width > retval) && zero_pad) { + width = width - retval; + while (width) { + *op++ = '0'; + retval++; + width--; + } + } + + while (op != buffer) { + op--; + if (buf <= end) + *buf = *op; + buf++; + } + + return retval; +} + +int +BCMPOSTTRAPFN(vsnprintf)(char *buf, size_t size, const char *fmt, va_list ap) +{ + char *optr; + char *end; + const char *iptr, *tmpptr; + unsigned int x; + int i; + int islong; + int width; + int width2 = 0; + int hashash = 0; + int zero_pad; + unsigned long ul = 0; + long int li = 0; + + optr = buf; + end = buf + size - 1; + iptr = fmt; + + if (FWSIGN_ENAB()) { + return 0; + } + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + while (*iptr) { + zero_pad = 0; + if (*iptr != '%') { + if (optr <= end) + *optr = *iptr; + ++optr; + ++iptr; + continue; + } + + iptr++; + + if (*iptr == '#') { + hashash = 1; + iptr++; + } + if (*iptr == '-') { + iptr++; + } + + if (*iptr == '0') { + zero_pad = 1; + iptr++; + } + + width = 0; + while (*iptr && bcm_isdigit(*iptr)) { + width += (*iptr - '0'); + iptr++; + if (bcm_isdigit(*iptr)) + width *= 10; + } + if (*iptr == '.') { + iptr++; + width2 = 0; + while (*iptr && bcm_isdigit(*iptr)) { + width2 += (*iptr - '0'); + iptr++; + if (bcm_isdigit(*iptr)) width2 *= 10; + } + } + + islong = 0; + if (*iptr == 'l') { + islong++; + iptr++; + if (*iptr == 'l') { + ++islong; + ++iptr; + } + } + + switch (*iptr) { + case 's': + tmpptr = va_arg(ap, const char *); + if (!tmpptr) + tmpptr = "(null)"; + if ((width == 0) & (width2 == 0)) { + while (*tmpptr) { + if (optr <= end) + *optr = *tmpptr; + ++optr; + ++tmpptr; + } + break; + } + while (width && *tmpptr) { + if (optr <= end) + *optr = *tmpptr; + ++optr; + ++tmpptr; + width--; + } + while (width) { + if (optr <= end) + *optr = ' '; + ++optr; + width--; + } + break; + case 'd': + case 'i': + if (!islong) { + i = va_arg(ap, int); + if (i < 0) { + if (optr <= end) + *optr = '-'; + ++optr; + i = -i; + } + optr += __atox(optr, end, i, 10, width, hex_upper, zero_pad); + } else { + li = va_arg(ap, long int); + if (li < 0) { + if (optr <= end) + *optr = '-'; + ++optr; + li = -li; + } + optr += __atolx(optr, end, li, 10, width, hex_upper, zero_pad); + } + break; + case 'u': + if (!islong) { + x = va_arg(ap, unsigned int); + optr += __atox(optr, end, x, 10, width, hex_upper, zero_pad); + } else { + ul = va_arg(ap, unsigned long); + optr += __atolx(optr, end, ul, 10, width, hex_upper, zero_pad); + } + break; + case 'X': + case 'x': + if (hashash) { + *optr++ = '0'; + *optr++ = *iptr; + } + if (!islong) { + x = va_arg(ap, unsigned int); + optr += __atox(optr, end, x, 16, width, + (*iptr == 'X') ? hex_upper : hex_lower, zero_pad); + } else { + ul = va_arg(ap, unsigned long); + optr += __atolx(optr, end, ul, 16, width, + (*iptr == 'X') ? hex_upper : hex_lower, zero_pad); + } + break; + case 'p': + case 'P': + x = va_arg(ap, unsigned int); + optr += __atox(optr, end, x, 16, 8, + (*iptr == 'P') ? hex_upper : hex_lower, zero_pad); + break; + case 'c': + x = va_arg(ap, int); + if (optr <= end) + *optr = x & 0xff; + optr++; + break; + + default: + if (optr <= end) + *optr = *iptr; + optr++; + break; + } + iptr++; + } + + if (optr <= end) { + *optr = '\0'; + return (int)(optr - buf); + } else { + *end = '\0'; + return (int)(end - buf); + } +} + +int +BCMPOSTTRAPFN(snprintf)(char *buf, size_t bufsize, const char *fmt, ...) +{ + va_list ap; + int r; + + if (FWSIGN_ENAB()) { + return 0; + } + + va_start(ap, fmt); + r = vsnprintf(buf, bufsize, fmt, ap); + va_end(ap); + + return r; +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_STDLIB_FUNCS */ + +#endif /* BCMSTDLIB_WIN32_APP */ + +#ifndef BCMSTDLIB_SNPRINTF_ONLY +int +BCMPOSTTRAPFN(vsprintf)(char *buf, const char *fmt, va_list ap) +{ + if (FWSIGN_ENAB()) { + return 0; + } + return (vsnprintf(buf, INT_MAX, fmt, ap)); +} + +int +BCMPOSTTRAPFN(sprintf)(char *buf, const char *fmt, ...) +{ + va_list ap; + int count; + + if (FWSIGN_ENAB()) { + return 0; + } + + va_start(ap, fmt); + count = vsprintf(buf, fmt, ap); + va_end(ap); + + return count; +} + +#if !defined(EFI) || !defined(COMPILER_INTRINSICS_LIB) +void * +memmove(void *dest, const void *src, size_t n) +{ + /* only use memcpy if there is no overlap. otherwise copy each byte in a safe sequence */ + if (((const char *)src >= (char *)dest + n) || ((const char *)src + n <= (char *)dest)) { + return memcpy(dest, src, n); + } + + /* Overlapping copy forward or backward */ + if (src < dest) { + unsigned char *d = (unsigned char *)dest + (n - 1); + const unsigned char *s = (const unsigned char *)src + (n - 1); + while (n) { + *d-- = *s--; + n--; + } + } else if (src > dest) { + unsigned char *d = (unsigned char *)dest; + const unsigned char *s = (const unsigned char *)src; + while (n) { + *d++ = *s++; + n--; + } + } + + return dest; +} +#endif /* !EFI || !COMPILER_INTRINSICS_LIB */ + +#ifndef EFI +int +memcmp(const void *s1, const void *s2, size_t n) +{ + const unsigned char *ss1; + const unsigned char *ss2; + + ss1 = (const unsigned char *)s1; + ss2 = (const unsigned char *)s2; + + while (n) { + if (*ss1 < *ss2) + return -1; + if (*ss1 > *ss2) + return 1; + ss1++; + ss2++; + n--; + } + + return 0; +} + +/* Skip over functions that are being used from DriverLibrary to save space */ +char * +strncpy(char *dest, const char *src, size_t n) +{ + char *endp; + char *p; + + p = dest; + endp = p + n; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + /* zero fill remainder */ + bzero(p, (endp - p)); + + return dest; +} + +size_t +BCMPOSTTRAPFN(strlen)(const char *s) +{ + size_t n = 0; + + while (*s) { + s++; + n++; + } + + return n; +} + +int +BCMPOSTTRAPFN(strcmp)(const char *s1, const char *s2) +{ + while (*s2 && *s1) { + if (*s1 < *s2) + return -1; + if (*s1 > *s2) + return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) + return 1; + if (!*s1 && *s2) + return -1; + return 0; +} +#endif /* EFI */ + +int +strncmp(const char *s1, const char *s2, size_t n) +{ + while (*s2 && *s1 && n) { + if (*s1 < *s2) + return -1; + if (*s1 > *s2) + return 1; + s1++; + s2++; + n--; + } + + if (!n) + return 0; + if (*s1 && !*s2) + return 1; + if (!*s1 && *s2) + return -1; + return 0; +} + +char * +strchr(const char *str, int c) +{ + const char *x = str; + + while (*x != (char)c) { + if (*x++ == '\0') + return (NULL); + } + + return DISCARD_QUAL(x, char); +} + +char * +strrchr(const char *str, int c) +{ + const char *save = NULL; + + do { + if (*str == (char)c) + save = str; + } while (*str++ != '\0'); + + return DISCARD_QUAL(save, char); +} + +/* Skip over functions that are being used from DriverLibrary to save space */ +#ifndef EFI +char * +strstr(const char *s, const char *substr) +{ + int substr_len = strlen(substr); + + for (; *s; s++) + if (strncmp(s, substr, substr_len) == 0) + return DISCARD_QUAL(s, char); + + return NULL; +} +#endif /* EFI */ + +size_t +strspn(const char *s, const char *accept) +{ + uint count = 0; + + while (s[count] && strchr(accept, s[count])) + count++; + + return count; +} + +size_t +strcspn(const char *s, const char *reject) +{ + uint count = 0; + + while (s[count] && !strchr(reject, s[count])) + count++; + + return count; +} + +void * +memchr(const void *s, int c, size_t n) +{ + if (n != 0) { + const unsigned char *ptr = s; + + do { + if (*ptr == (unsigned char)c) + return DISCARD_QUAL(ptr, void); + ptr++; + n--; + } while (n != 0); + } + return NULL; +} + +unsigned long +strtoul(const char *cp, char **endp, int base) +{ + ulong result, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp - '0' : bcm_toupper(*cp) - 'A' + 10) < + (ulong) base) { + result = result * base + value; + cp++; + } + + if (minus) + result = (ulong)(result * -1); + + if (endp) + *endp = DISCARD_QUAL(cp, char); + + return (result); +} + +#ifdef EFI +int +putchar(int c) +{ + return putc(c, stdout); +} + +int +puts(const char *s) +{ + char c; + + while ((c = *s++)) + putchar(c); + + putchar('\n'); + + return 0; +} + +#else /* !EFI */ + +/* memset is not in ROM offload because it is used directly by the compiler in + * structure assignments/character array initialization with "". + */ +void * +BCMPOSTTRAPFN(memset)(void *dest, int c, size_t n) +{ + uint32 w, *dw; + unsigned char *d; + + dw = (uint32 *)dest; + + /* 8 min because we have to create w */ + if ((n >= 8) && (((uintptr)dest & 3) == 0)) { + if (c == 0) + w = 0; + else { + unsigned char ch; + + ch = (unsigned char)(c & 0xff); + w = (ch << 8) | ch; + w |= w << 16; + } + while (n >= 4) { + *dw++ = w; + n -= 4; + } + } + d = (unsigned char *)dw; + + while (n) { + *d++ = (unsigned char)c; + n--; + } + + return dest; +} + +/* memcpy is not in ROM offload because it is used directly by the compiler in + * structure assignments. + */ +#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7A__) +void * +BCMPOSTTRAPFN(memcpy)(void *dest, const void *src, size_t n) +{ + uint32 *dw; + const uint32 *sw; + unsigned char *d; + const unsigned char *s; + + sw = (const uint32 *)src; + dw = (uint32 *)dest; + + if (n >= 4 && ((uintptr)src & 3) == ((uintptr)dest & 3)) { + uint32 t1, t2, t3, t4, t5, t6, t7, t8; + int i = (4 - ((uintptr)src & 3)) % 4; + + n -= i; + + d = (unsigned char *)dw; + s = (const unsigned char *)sw; + while (i--) { + *d++ = *s++; + } + sw = (const uint32 *)s; + dw = (uint32 *)d; + + if (n >= 32) { + const uint32 *sfinal = (const uint32 *)((const uint8 *)sw + (n & ~31)); + + asm volatile("\n1:\t" + "ldmia.w\t%0!, {%3, %4, %5, %6, %7, %8, %9, %10}\n\t" + "stmia.w\t%1!, {%3, %4, %5, %6, %7, %8, %9, %10}\n\t" + "cmp\t%2, %0\n\t" + "bhi.n\t1b\n\t" + : "=r" (sw), "=r" (dw), "=r" (sfinal), "=r" (t1), "=r" (t2), + "=r" (t3), "=r" (t4), "=r" (t5), "=r" (t6), "=r" (t7), + "=r" (t8) + : "0" (sw), "1" (dw), "2" (sfinal)); + + n %= 32; + } + + /* Copy the remaining words */ + switch (n / 4) { + case 0: + break; + case 1: + asm volatile("ldr\t%2, [%0]\n\t" + "str\t%2, [%1]\n\t" + "adds\t%0, #4\n\t" + "adds\t%1, #4\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1) + : "0" (sw), "1" (dw)); + break; + case 2: + asm volatile("ldmia.w\t%0!, {%2, %3}\n\t" + "stmia.w\t%1!, {%2, %3}\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2) + : "0" (sw), "1" (dw)); + break; + case 3: + asm volatile("ldmia.w\t%0!, {%2, %3, %4}\n\t" + "stmia.w\t%1!, {%2, %3, %4}\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2), + "=r" (t3) + : "0" (sw), "1" (dw)); + break; + case 4: + asm volatile("ldmia.w\t%0!, {%2, %3, %4, %5}\n\t" + "stmia.w\t%1!, {%2, %3, %4, %5}\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2), + "=r" (t3), "=r" (t4) + : "0" (sw), "1" (dw)); + break; + case 5: + asm volatile( + "ldmia.w\t%0!, {%2, %3, %4, %5, %6}\n\t" + "stmia.w\t%1!, {%2, %3, %4, %5, %6}\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2), + "=r" (t3), "=r" (t4), "=r" (t5) + : "0" (sw), "1" (dw)); + break; + case 6: + asm volatile( + "ldmia.w\t%0!, {%2, %3, %4, %5, %6, %7}\n\t" + "stmia.w\t%1!, {%2, %3, %4, %5, %6, %7}\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2), + "=r" (t3), "=r" (t4), "=r" (t5), "=r" (t6) + : "0" (sw), "1" (dw)); + break; + case 7: + asm volatile( + "ldmia.w\t%0!, {%2, %3, %4, %5, %6, %8, %7}\n\t" + "stmia.w\t%1!, {%2, %3, %4, %5, %6, %8, %7}\n\t" + : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2), + "=r" (t3), "=r" (t4), "=r" (t5), "=r" (t6), + "=r" (t7) + : "0" (sw), "1" (dw)); + break; + default: + ASSERT(0); + break; + } + n = n % 4; + } + + /* Copy the remaining bytes */ + d = (unsigned char *)dw; + s = (const unsigned char *)sw; + while (n--) { + *d++ = *s++; + } + + return dest; +} + +#ifdef __clang__ +/* TODO: remove once toolchain builtin libraries are available */ +/* simulate compiler builtins */ + +/* not aligned */ +void *__aeabi_memcpy(void *dest, const void *src, size_t n); +void * +__aeabi_memcpy(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +/* 4 byte aligned */ +void *__aeabi_memcpy4(void *dest, const void *src, size_t n); +void * +__aeabi_memcpy4(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +/* 8 byte aligned */ +void *__aeabi_memcpy8(void *dest, const void *src, size_t n); +void * +__aeabi_memcpy8(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +/* 8 byte aligned */ +void *__aeabi_memclr8(void *dest, size_t n); +void * +__aeabi_memclr8(void *dest, size_t n) +{ + return memset(dest, 0, n); +} +#endif /* __clang__ */ +#else +void * +memcpy(void *dest, const void *src, size_t n) +{ + uint32 *dw; + const uint32 *sw; + unsigned char *d; + const unsigned char *s; + + sw = (const uint32 *)src; + dw = (uint32 *)dest; + + if ((n >= 4) && (((uintptr)src & 3) == ((uintptr)dest & 3))) { + int i = (4 - ((uintptr)src & 3)) % 4; + n -= i; + d = (unsigned char *)dw; + s = (const unsigned char *)sw; + while (i--) { + *d++ = *s++; + } + + sw = (const uint32 *)s; + dw = (uint32 *)d; + while (n >= 4) { + *dw++ = *sw++; + n -= 4; + } + } + d = (unsigned char *)dw; + s = (const unsigned char *)sw; + while (n--) { + *d++ = *s++; + } + + return dest; +} +#endif /* defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7A__) */ +#endif /* EFI */ + +/* a hook to send printf output to the host */ +static printf_sendup_output_fn_t g_printf_sendup_output_fn = NULL; +static void *g_printf_sendup_output_ctx = NULL; + +#ifdef DONGLEBUILD +static bool _rodata_overwritten = FALSE; + +/* Ensure this string is not const. */ +CONST char BCMPOST_TRAP_RODATA(warn_str)[] = "RO overwritten %p\n"; +CONST char BCMPOST_TRAP_RODATA(time_str)[] = "%06u.%03u "; +#endif /* DONGLEBUILD */ + +void +printf_set_sendup_output_fn(printf_sendup_output_fn_t fn, void *ctx) +{ + g_printf_sendup_output_fn = fn; + g_printf_sendup_output_ctx = ctx; +} + +#ifdef DONGLEBUILD +void +BCMPOSTTRAPFN(printf_set_rodata_invalid)(void) +{ + _rodata_overwritten = TRUE; +} + +bool +printf_get_rodata_invalid(void) +{ + return (_rodata_overwritten); +} +#endif /* DONGLEBUILD */ + +/* Include printf if it has already not been defined as NULL */ +#ifndef printf +static bool last_nl = FALSE; +int +BCMPOSTTRAPFN(printf)(const char *fmt, ...) +{ + va_list ap; + int count = 0, i; + char buffer[PRINTF_BUFLEN + 1]; + + if (FWSIGN_ENAB()) { + return 0; + } + + if (!printf_lock()) + return 0; + +#ifdef DONGLEBUILD + if (_rodata_overwritten == TRUE) { + /* Regular printf will be garbage if ROdata is overwritten. In that case, + * print the caller address. + */ + _rodata_overwritten = FALSE; + count = printf(warn_str, CALL_SITE); + _rodata_overwritten = TRUE; + return count; + } + + if (last_nl) { + /* add the dongle ref time */ + uint32 dongle_time_ms = hnd_get_reftime_ms(); + count = sprintf(buffer, time_str, dongle_time_ms / 1000, dongle_time_ms % 1000); + } +#endif /* DONGLEBUILD */ + + va_start(ap, fmt); + count += vsnprintf(buffer + count, sizeof(buffer) - count, fmt, ap); + va_end(ap); + + for (i = 0; i < count; i++) { + putchar(buffer[i]); + + /* EFI environment requires CR\LF in a printf, etc. + * so unless the string has \r\n, it will not execute CR + * So force it! + */ +#ifdef EFI + if (buffer[i] == '\n') + putchar('\r'); +#endif + } + + /* send the output up to the host */ + if (g_printf_sendup_output_fn != NULL) { + g_printf_sendup_output_fn(g_printf_sendup_output_ctx, buffer, count); + } + + if (buffer[count - 1] == '\n') + last_nl = TRUE; + else + last_nl = FALSE; + + printf_unlock(); + + return count; +} +#endif /* printf */ + +#if !defined(_WIN32) && !defined(EFI) +int +fputs(const char *s, FILE *stream /* UNUSED */) +{ + char c; + + UNUSED_PARAMETER(stream); + while ((c = *s++)) + putchar(c); + return 0; +} + +int +puts(const char *s) +{ + fputs(s, stdout); + putchar('\n'); + return 0; +} + +int +fputc(int c, FILE *stream) +{ + return putc(c, stream); +} + +unsigned long +rand(void) +{ + static unsigned long seed = 1; + long x, hi, lo, t; + + x = seed; + hi = x / 127773; + lo = x % 127773; + t = 16807 * lo - 2836 * hi; + if (t <= 0) t += 0x7fffffff; + seed = t; + return t; +} +#endif /* !_WIN32 && !EFI */ + +#endif /* BCMSTDLIB_SNPRINTF_ONLY */ + +#if !defined(_WIN32) || defined(EFI) +size_t +strnlen(const char *s, size_t maxlen) +{ + const char *b = s; + const char *e = s + maxlen; + + while (s < e && *s) { + s++; + } + + return s - b; +} +#endif /* !_WIN32 || EFI */ + +/* FORTIFY_SOURCE: Implementation of compiler built-in functions for C standard library functions + * that provide run-time buffer overflow detection. + */ +#if defined(BCM_FORTIFY_SOURCE) + +void* +__memcpy_chk(void *dest, const void *src, size_t n, size_t destsz) +{ + if (memcpy_s(dest, destsz, src, n) != 0) { + OSL_SYS_HALT(); + } + + return (dest); +} + +void * +__memmove_chk(void *dest, const void *src, size_t n, size_t destsz) +{ + if (memmove_s(dest, destsz, src, n) != 0) { + OSL_SYS_HALT(); + } + + return (dest); +} + +void * +__memset_chk(void *dest, int c, size_t n, size_t destsz) +{ + if (memset_s(dest, destsz, c, n) != 0) { + OSL_SYS_HALT(); + } + + return (dest); +} + +int +__snprintf_chk(char *str, size_t n, int flag, size_t destsz, const char *fmt, ...) +{ + va_list arg; + int rc; + + if (n > destsz) { + OSL_SYS_HALT(); + } + + va_start(arg, fmt); + rc = vsnprintf(str, n, fmt, arg); + va_end(arg); + + return (rc); +} + +int +__vsnprintf_chk(char *str, size_t n, int flags, size_t destsz, const char *fmt, va_list ap) +{ + if (n > destsz) { + OSL_SYS_HALT(); + } + + return (vsnprintf(str, n, fmt, ap)); +} + +char * +__strncpy_chk(char *dest, const char *src, size_t n, size_t destsz) +{ + if (n > destsz) { + OSL_SYS_HALT(); + } + + return (strncpy(dest, src, n)); +} +#endif /* BCM_FORTIFY_SOURCE */ + +/* Provide stub implementations for xxx_s() APIs that are remapped to compiler builtins. + * This allows the target to link. + * + * This is only intended as a compile-time test, and should be used by compile-only targets. + */ +#if defined(BCM_STDLIB_S_BUILTINS_TEST) +#undef strcpy +char* strcpy(char *dest, const char *src); +char* +strcpy(char *dest, const char *src) +{ + return (NULL); +} + +#undef strcat +char* strcat(char *dest, const char *src); +char* +strcat(char *dest, const char *src) +{ + return (NULL); +} +#endif /* BCM_STDLIB_S_BUILTINS_TEST */ diff --git a/bcmdhd.101.10.361.x/bcmstdlib_s.c b/bcmdhd.101.10.361.x/bcmstdlib_s.c new file mode 100755 index 0000000..fd7e83e --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmstdlib_s.c @@ -0,0 +1,298 @@ +/* + * Broadcom Secure Standard Library. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#ifdef BCMDRIVER +#include +#else /* BCMDRIVER */ +#include +#include +#endif /* else BCMDRIVER */ + +#include +#include + +/* Don't use compiler builtins for stdlib APIs within the implementation of the stdlib itself. */ +#if defined(BCM_STDLIB_S_BUILTINS_TEST) + #undef memmove_s + #undef memcpy_s + #undef memset_s + #undef strlcpy + #undef strlcat_s +#endif /* BCM_STDLIB_S_BUILTINS_TEST */ + +/* + * __SIZE_MAX__ value is depending on platform: + * Firmware Dongle: RAMSIZE (Dongle Specific Limit). + * LINUX NIC/Windows/MACOSX/Application: OS Native or + * 0xFFFFFFFFu if not defined. + */ +#ifndef SIZE_MAX +#ifndef __SIZE_MAX__ +#ifdef DONGLEBUILD +#define __SIZE_MAX__ RAMSIZE +#else +#define __SIZE_MAX__ 0xFFFFFFFFu +#endif /* DONGLEBUILD */ +#endif /* __SIZE_MAX__ */ +#define SIZE_MAX __SIZE_MAX__ +#endif /* SIZE_MAX */ +#define RSIZE_MAX (SIZE_MAX >> 1u) + +#if !defined(__STDC_WANT_SECURE_LIB__) && \ + !(defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__)) +/* + * memmove_s - secure memmove + * dest : pointer to the object to copy to + * destsz : size of the destination buffer + * src : pointer to the object to copy from + * n : number of bytes to copy + * Return Value : zero on success and non-zero on error + * Also on error, if dest is not a null pointer and destsz not greater + * than RSIZE_MAX, writes destsz zero bytes into the dest object. + */ +int +memmove_s(void *dest, size_t destsz, const void *src, size_t n) +{ + int err = BCME_OK; + + if ((!dest) || (((char *)dest + destsz) < (char *)dest)) { + err = BCME_BADARG; + goto exit; + } + + if (destsz > RSIZE_MAX) { + err = BCME_BADLEN; + goto exit; + } + + if (destsz < n) { + memset(dest, 0, destsz); + err = BCME_BADLEN; + goto exit; + } + + if ((!src) || (((const char *)src + n) < (const char *)src)) { + memset(dest, 0, destsz); + err = BCME_BADARG; + goto exit; + } + + memmove(dest, src, n); +exit: + return err; +} + +/* + * memcpy_s - secure memcpy + * dest : pointer to the object to copy to + * destsz : size of the destination buffer + * src : pointer to the object to copy from + * n : number of bytes to copy + * Return Value : zero on success and non-zero on error + * Also on error, if dest is not a null pointer and destsz not greater + * than RSIZE_MAX, writes destsz zero bytes into the dest object. + */ +int +BCMPOSTTRAPFN(memcpy_s)(void *dest, size_t destsz, const void *src, size_t n) +{ + int err = BCME_OK; + char *d = dest; + const char *s = src; + + if ((!d) || ((d + destsz) < d)) { + err = BCME_BADARG; + goto exit; + } + + if (destsz > RSIZE_MAX) { + err = BCME_BADLEN; + goto exit; + } + + if (destsz < n) { + memset(dest, 0, destsz); + err = BCME_BADLEN; + goto exit; + } + + if ((!s) || ((s + n) < s)) { + memset(dest, 0, destsz); + err = BCME_BADARG; + goto exit; + } + + /* overlap checking between dest and src */ + if (!(((d + destsz) <= s) || (d >= (s + n)))) { + memset(dest, 0, destsz); + err = BCME_BADARG; + goto exit; + } + + (void)memcpy(dest, src, n); +exit: + return err; +} + +/* + * memset_s - secure memset + * dest : pointer to the object to be set + * destsz : size of the destination buffer + * c : byte value + * n : number of bytes to be set + * Return Value : zero on success and non-zero on error + * Also on error, if dest is not a null pointer and destsz not greater + * than RSIZE_MAX, writes destsz bytes with value c into the dest object. + */ +int +BCMPOSTTRAPFN(memset_s)(void *dest, size_t destsz, int c, size_t n) +{ + int err = BCME_OK; + if ((!dest) || (((char *)dest + destsz) < (char *)dest)) { + err = BCME_BADARG; + goto exit; + } + + if (destsz > RSIZE_MAX) { + err = BCME_BADLEN; + goto exit; + } + + if (destsz < n) { + (void)memset(dest, c, destsz); + err = BCME_BADLEN; + goto exit; + } + + (void)memset(dest, c, n); +exit: + return err; +} +#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */ + +#if !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY) +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer 0 if input parameters are NOK + * return: string leng of src (which is always < size) on success or size on failure + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t i; + + if (dest == NULL || size == 0) { + return 0; + } + + if (src == NULL) { + *dest = '\0'; + return 0; + } + + for (i = 0; i < size; i++) { + dest[i] = src[i]; + if (dest[i] == '\0') { + /* success - src string copied */ + return i; + } + } + + /* NULL terminate since not found in src */ + dest[size - 1u] = '\0'; + + /* fail - src string truncated */ + return size; +} +#endif /* !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY) */ + +/** + * strlcat_s - Concatenate a %NUL terminated string with a sized buffer + * @dest: Where to concatenate the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * return: string length of created string (i.e. the initial length of dest plus the length of src) + * not including the NUL char, up until size + * + * Unlike strncat(), strlcat() take the full size of the buffer (not just the number of bytes to + * copy) and guarantee to NUL-terminate the result (even when there's nothing to concat). + * If the length of dest string concatinated with the src string >= size, truncation occurs. + * + * Compatible with *BSD: the result is always a valid NUL-terminated string that fits in the buffer + * (unless, of course, the buffer size is zero). + * + * If either src or dest is not NUL-terminated, dest[size-1] will be set to NUL. + * If size < strlen(dest) + strlen(src), dest[size-1] will be set to NUL. + * If size == 0, dest[0] will be set to NUL. + */ +size_t +strlcat_s(char *dest, const char *src, size_t size) +{ + char *d = dest; + const char *s = src; /* point to the start of the src string */ + size_t n = size; + size_t dlen; + size_t bytes_to_copy = 0; + + if (dest == NULL) { + return 0; + } + + /* set d to point to the end of dest string (up to size) */ + while (n != 0 && *d != '\0') { + d++; + n--; + } + dlen = (size_t)(d - dest); + + if (s != NULL) { + size_t slen = 0; + + /* calculate src len in case it's not null-terminated */ + n = size; + while (n-- != 0 && *(s + slen) != '\0') { + ++slen; + } + + n = size - dlen; /* maximum num of chars to copy */ + if (n != 0) { + /* copy relevant chars (until end of src buf or given size is reached) */ + bytes_to_copy = MIN(slen - (size_t)(s - src), n - 1); + (void)memcpy(d, s, bytes_to_copy); + d += bytes_to_copy; + } + } + if (n == 0 && dlen != 0) { + --d; /* nothing to copy, but NUL-terminate dest anyway */ + } + *d = '\0'; /* NUL-terminate dest */ + + return (dlen + bytes_to_copy); +} diff --git a/bcmdhd.101.10.361.x/bcmutils.c b/bcmdhd.101.10.361.x/bcmutils.c new file mode 100755 index 0000000..929056f --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmutils.c @@ -0,0 +1,6097 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#ifdef BCMDRIVER +#include +#include +#if !defined(BCMDONGLEHOST) || defined(BCMNVRAM) +#include +#endif + +#else /* !BCMDRIVER */ + +#include +#include +#include + +#if defined(BCMEXTSUP) +#include +#endif + +#ifndef ASSERT +#define ASSERT(exp) +#endif + +#endif /* !BCMDRIVER */ + +#ifdef WL_UNITTEST +/* + * Definitions and includes needed during software unit test compilation and execution. + */ +#include +#include +#ifdef ASSERT +#undef ASSERT +#endif /* ASSERT */ +#define ASSERT(exp) +#endif /* WL_UNITTEST */ + +#if defined(_WIN32) || defined(NDIS) +/* Debatable */ +#include +#endif +#include +#include +#include +#include +#include +#include +#include <802.1d.h> +#include <802.11.h> +#include +#include +#include +#ifdef BCMPERFSTATS +#include +#endif +#include + +#define NUMBER_OF_BITS_BYTE 8u + +#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING +#define CUST_IPV4_TOS_PREC_MASK 0x3F +#define DCSP_MAX_VALUE 64 +extern uint dhd_dscpmap_enable; +/* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */ +int dscp2priomap[DCSP_MAX_VALUE]= +{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */ + 2, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 4, 0, 0, 0, 0, 0, 0, 0, + 5, 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 7, 0, 0, 0, 0, 0, 0, 0 +}; +#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */ + +#ifdef PRIVACY_MASK +struct ether_addr privacy_addrmask; + +/* RAM accessor function to avoid 'privacy_addrmask' in ROM/RAM shared data section. */ +static struct ether_addr * +BCMRAMFN(privacy_addrmask_get)(void) +{ + return &privacy_addrmask; +} +#endif /* PRIVACY_MASK */ + +#ifdef BCMDRIVER + +#ifndef BCM_ARM_BACKTRACE +/* function pointers for firmware stack backtrace utility */ +void (*const print_btrace_int_fn)(int depth, uint32 pc, uint32 lr, uint32 sp) = NULL; +void (*const print_btrace_fn)(int depth) = NULL; +#else +void print_backtrace(int depth); +void (*const print_btrace_fn)(int depth) = print_backtrace; +void print_backtrace_int(int depth, uint32 pc, uint32 lr, uint32 sp); +void (*const print_btrace_int_fn)(int depth, uint32 pc, uint32 lr, uint32 sp) = print_backtrace_int; +#endif + +#if !defined(BCMDONGLEHOST) +/* Forward declarations */ +char * getvar_internal(char *vars, const char *name); +int getintvar_internal(char *vars, const char *name); +int getintvararray_internal(char *vars, const char *name, int index); +int getintvararraysize_internal(char *vars, const char *name); + +#ifndef WL_FWSIGN +/* + * Search the name=value vars for a specific one and return its value. + * Returns NULL if not found. + */ +char * +getvar(char *vars, const char *name) +{ + NVRAM_RECLAIM_CHECK(name); + return getvar_internal(vars, name); +} + +char * +getvar_internal(char *vars, const char *name) +{ + char *s; + uint len; + + if (!name) + return NULL; + + len = strlen(name); + if (len == 0u) { + return NULL; + } + + /* first look in vars[] */ + for (s = vars; s && *s;) { + if ((bcmp(s, name, len) == 0) && (s[len] == '=')) { + return (&s[len+1u]); + } + while (*s++) + ; + } + + /* then query nvram */ + return (nvram_get(name)); +} + +/* + * Search the vars for a specific one and return its value as + * an integer. Returns 0 if not found. + */ +int +getintvar(char *vars, const char *name) +{ + NVRAM_RECLAIM_CHECK(name); + return getintvar_internal(vars, name); +} + +int +getintvar_internal(char *vars, const char *name) +{ + char *val; + + if ((val = getvar_internal(vars, name)) == NULL) + return (0); + + return (bcm_strtoul(val, NULL, 0)); +} + +int +getintvararray(char *vars, const char *name, int index) +{ + NVRAM_RECLAIM_CHECK(name); + return getintvararray_internal(vars, name, index); +} + +int +getintvararray_internal(char *vars, const char *name, int index) +{ + char *buf, *endp; + int i = 0; + int val = 0; + + if ((buf = getvar_internal(vars, name)) == NULL) { + return (0); + } + + /* table values are always separated by "," or " " */ + while (*buf != '\0') { + val = bcm_strtoul(buf, &endp, 0); + if (i == index) { + return val; + } + buf = endp; + /* delimiter is ',' */ + if (*buf == ',') + buf++; + i++; + } + return (0); +} + +int +getintvararraysize(char *vars, const char *name) +{ + NVRAM_RECLAIM_CHECK(name); + return getintvararraysize_internal(vars, name); +} + +int +getintvararraysize_internal(char *vars, const char *name) +{ + char *buf, *endp; + int count = 0; + int val = 0; + + if ((buf = getvar_internal(vars, name)) == NULL) { + return (0); + } + + /* table values are always separated by "," or " " */ + while (*buf != '\0') { + val = bcm_strtoul(buf, &endp, 0); + buf = endp; + /* delimiter is ',' */ + if (*buf == ',') + buf++; + count++; + } + BCM_REFERENCE(val); + return count; +} + +/* Read an array of values from a possibly slice-specific nvram string + * Store the values in either the uint8 dest_array1 or in the int16 dest_array2. + * Pass in NULL for the dest_array[12] that is not to be used. + */ +static int +BCMATTACHFN(getintvararray_slicespecific)(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, uint8* dest_array1, int16* dest_array2, uint dest_size) +{ + uint i; + uint array_size = 0; + int err = BCME_OK; + uint prefixed_name_sz; + char *prefixed_name = NULL; + const char *new_name; + int val; + + prefixed_name_sz = get_slicespecific_var_name(osh, vars_table_accessor, + name, &prefixed_name); + if (prefixed_name_sz == 0) { + return BCME_NOMEM; + } + + new_name = prefixed_name; + (void) new_name; + if (getvar(vars, new_name) == NULL) { + /* Try again without the slice prefix in the name */ + new_name = name; + if (getvar(vars, name) == NULL) { + err = BCME_NOTFOUND; + goto done; + } + } + + array_size = (uint)getintvararraysize(vars, new_name); + if (array_size > dest_size) { + err = BCME_BUFTOOSHORT; + ASSERT(array_size <= dest_size); + goto done; + } + + /* limit the initialization to the size of the nvram array */ + array_size = MIN(array_size, dest_size); + + /* load the destination array with the nvram array values */ + for (i = 0; i < array_size; i++) { + val = getintvararray(vars, new_name, i); + if (dest_array1) { + dest_array1[i] = (uint8)val; + } else if (dest_array2) { + dest_array2[i] = (int16)val; + } + } +done: + MFREE(osh, prefixed_name, prefixed_name_sz); + return (err < 0) ? err : (int)array_size; +} + +int +BCMATTACHFN(get_uint8_vararray_slicespecific)(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, uint8* dest_array, uint dest_size) +{ + int ret; + + ret = getintvararray_slicespecific(osh, vars, vars_table_accessor, + name, dest_array, NULL, dest_size); + return ret; +} + +int +BCMATTACHFN(get_int16_vararray_slicespecific)(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, int16* dest_array, uint dest_size) +{ + return getintvararray_slicespecific(osh, vars, vars_table_accessor, + name, NULL, dest_array, dest_size); +} + +/* Prepend a slice-specific accessor to an nvram string name. + * Sets name_out to the allocated string. Returns the allocated size of the name string. + * Caller is responsible for freeing the resulting name string with MFREE. + */ +uint +BCMATTACHFN(get_slicespecific_var_name)(osl_t *osh, char *vars_table_accessor, const char *name, + char **name_out) +{ + char *name_with_prefix = NULL; + uint sz; + uint max_copy_size; + + sz = strlen(name) + strlen(vars_table_accessor) + 1; + name_with_prefix = (char *) MALLOC_NOPERSIST(osh, sz); + if (name_with_prefix == NULL) { + sz = 0; + goto end; + } + name_with_prefix[0] = 0; + name_with_prefix[sz - 1] = 0; + max_copy_size = sz - 1; + + /* if accessor contains a "slice/N/" string */ + if (vars_table_accessor[0] != 0) { + /* prepend accessor to the vars-name */ + bcmstrncat(name_with_prefix, vars_table_accessor, max_copy_size); + max_copy_size -= strlen(name_with_prefix); + } + + /* Append vars-name */ + bcmstrncat(name_with_prefix, name, max_copy_size); +end: + *name_out = name_with_prefix; + return sz; +} +#endif /* WL_FWSIGN */ + +/* Search for token in comma separated token-string */ +static int +findmatch(const char *string, const char *name) +{ + uint len; + char *c; + + len = strlen(name); + while ((c = strchr(string, ',')) != NULL) { + if (len == (uint)(c - string) && !strncmp(string, name, len)) + return 1; + string = c + 1; + } + + return (!strcmp(string, name)); +} + +/* Return gpio pin number assigned to the named pin + * + * Variable should be in format: + * + * gpio=pin_name,pin_name + * + * This format allows multiple features to share the gpio with mutual + * understanding. + * + * 'def_pin' is returned if a specific gpio is not defined for the requested functionality + * and if def_pin is not used by others. + */ +uint +getgpiopin(char *vars, char *pin_name, uint def_pin) +{ + char name[] = "gpioXXXX"; + char *val; + uint pin; + + /* Go thru all possibilities till a match in pin name */ + for (pin = 0; pin < GPIO_NUMPINS; pin ++) { + snprintf(name, sizeof(name), "gpio%d", pin); + val = getvar(vars, name); + if (val && findmatch(val, pin_name)) + return pin; + } + + if (def_pin != GPIO_PIN_NOTDEFINED) { + /* make sure the default pin is not used by someone else */ + snprintf(name, sizeof(name), "gpio%d", def_pin); + if (getvar(vars, name)) { + def_pin = GPIO_PIN_NOTDEFINED; + } + } + return def_pin; +} +#endif /* !BCMDONGLEHOST */ + +/* return total length of buffer chain. In case of CSO, submsdu may have extra tsohdr and if + * pktotlen should not include submsdu tso header, use the API pkttotlen_no_sfhtoe_hdr. + */ +uint +BCMFASTPATH(pkttotlen)(osl_t *osh, void *p) +{ + uint total = 0; + + for (; p; p = PKTNEXT(osh, p)) { + total += PKTLEN(osh, p); + + if (BCMLFRAG_ENAB() && PKTISFRAG(osh, p)) { + total += PKTFRAGTOTLEN(osh, p); + } + } + + return (total); +} + +#ifdef WLCSO +/* return total length of buffer chain, but exclude tso hdr of submsdu if its added */ +uint +BCMFASTPATH(pkttotlen_no_sfhtoe_hdr)(osl_t *osh, void *p, uint toe_hdr_len) +{ + uint total = 0; + + for (; p; p = PKTNEXT(osh, p)) { + total += PKTLEN(osh, p); + + /* exclude toe_hdr_len if its part of PKTLEN() */ + if (PKTISSUBMSDUTOEHDR(osh, p)) { + total -= toe_hdr_len; + } + + if (BCMLFRAG_ENAB() && PKTISFRAG(osh, p)) { + total += PKTFRAGTOTLEN(osh, p); + } + } + + return (total); +} +#endif /* WLCSO */ + +/* return total length of buffer chain */ +uint +BCMFASTPATH(pkttotcnt)(osl_t *osh, void *p) +{ + uint cnt = 0; + + for (; p; p = PKTNEXT(osh, p)) { + cnt++; + } + + return (cnt); +} + +/* return the last buffer of chained pkt */ +void * +BCMFASTPATH(pktlast)(osl_t *osh, void *p) +{ + for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) + ; + + return (p); +} + +/* count segments of a chained packet */ +uint +BCMFASTPATH(pktsegcnt)(osl_t *osh, void *p) +{ + uint cnt; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) { + if (PKTLEN(osh, p)) { + cnt++; + } +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB() && PKTISFRAG(osh, p)) { + cnt += PKTFRAGTOTNUM(osh, p); + } +#endif /* BCMLFRAG */ + } + + return cnt; +} + +#ifdef DONGLEBUILD +/** + * Takes in a lbuf/lfrag and no of bytes to be trimmed from tail. + * trim bytes could be spread out in below 3 formats + * 1. entirely in dongle + * 2. entirely in host + * 3. split between host-dongle + */ +void +BCMFASTPATH(pktfrag_trim_tailbytes)(osl_t * osh, void* p, uint16 trim_len, uint8 type) +{ + uint16 tcmseg_len = PKTLEN(osh, p); /* TCM segment length */ + uint16 hostseg_len = PKTFRAGUSEDLEN(osh, p); /* HOST segment length */ + + /* return if zero trim length- Nothing to do */ + if (trim_len == 0) + return; + + /* if header conv is on, there is no fcs at the end */ + /* JIRA:SW4349-318 */ + if (PKTISHDRCONVTD(osh, p)) + return; + + /* if pktfetched, then its already trimmed */ + if (PKTISPKTFETCHED(osh, p)) + return; + + if (PKTFRAGUSEDLEN(osh, p) >= trim_len) { + /* TRIM bytes entirely in host */ + ASSERT_FP(PKTISRXFRAG(osh, p)); + + PKTSETFRAGUSEDLEN(osh, p, (hostseg_len - trim_len)); + } else { + /* trim bytes either in dongle or split between dongle-host */ + PKTSETLEN(osh, p, (tcmseg_len - (trim_len - hostseg_len))); + + /* No more contents in host; reset length to zero */ + if (PKTFRAGUSEDLEN(osh, p)) + PKTSETFRAGUSEDLEN(osh, p, 0); + } +} +#endif /* DONGLEBUILD */ + +/* copy a pkt buffer chain into a buffer */ +uint +pktcopy(osl_t *osh, void *p, uint offset, uint len, uchar *buf) +{ + uint n, ret = 0; + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN(PKTLEN(osh, p) - offset, len); + bcopy(PKTDATA(osh, p) + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +/* copy a buffer into a pkt buffer chain */ +uint +pktfrombuf(osl_t *osh, void *p, uint offset, uint len, uchar *buf) +{ + uint n, ret = 0; + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN(PKTLEN(osh, p) - offset, len); + bcopy(buf, PKTDATA(osh, p) + offset, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +#ifdef NOT_YET +/* copy data from one pkt buffer (chain) to another */ +uint +pkt2pktcopy(osl_t *osh, void *p1, uint offs1, void *p2, uint offs2, uint maxlen) +{ + uint8 *dp1, *dp2; + uint len1, len2, copylen, totallen; + + for (; p1 && offs; p1 = PKTNEXT(osh, p1)) { + if (offs1 < (uint)PKTLEN(osh, p1)) + break; + offs1 -= PKTLEN(osh, p1); + } + for (; p2 && offs; p2 = PKTNEXT(osh, p2)) { + if (offs2 < (uint)PKTLEN(osh, p2)) + break; + offs2 -= PKTLEN(osh, p2); + } + + /* Heck w/it, only need the above for now */ +} +#endif /* NOT_YET */ + +uint8 * +BCMFASTPATH(pktdataoffset)(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint pkt_off = 0, len = 0; + uint8 *pdata = (uint8 *) PKTDATA(osh, p); + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + pdata = (uint8 *) PKTDATA(osh, p); + pkt_off = offset - len; + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return (uint8*) (pdata+pkt_off); +} + +/* given a offset in pdata, find the pkt seg hdr */ +void * +pktoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint len = 0; + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return p; +} + +void +bcm_mdelay(uint ms) +{ + uint i; + + for (i = 0; i < ms; i++) { + OSL_DELAY(1000); + } +} + +#if defined(BCMPERFSTATS) || defined(BCMTSTAMPEDLOGS) + +#if defined(__ARM_ARCH_7R__) +#define BCMLOG_CYCLE_OVERHEAD 54 /* Number of CPU cycle overhead due to bcmlog(). + * This is to compensate CPU cycle incurred by + * added bcmlog() function call for profiling. + */ +#else +#define BCMLOG_CYCLE_OVERHEAD 0 +#endif + +#define LOGSIZE 256 /* should be power of 2 to avoid div below */ +static struct { + uint cycles; + const char *fmt; + uint a1; + uint a2; + uchar indent; /* track indent level for nice printing */ +} logtab[LOGSIZE]; + +/* last entry logged */ +static uint logi = 0; +/* next entry to read */ +static uint volatile readi = 0; +#endif /* defined(BCMPERFSTATS) || defined(BCMTSTAMPEDLOGS) */ + +#ifdef BCMPERFSTATS +/* TODO: make the utility configurable (choose between icache, dcache, hits, misses ...) */ +void +bcm_perf_enable() +{ + BCMPERF_ENABLE_INSTRCOUNT(); + BCMPERF_ENABLE_ICACHE_MISS(); + BCMPERF_ENABLE_ICACHE_HIT(); +} + +/* WARNING: This routine uses OSL_GETCYCLES(), which can give unexpected results on + * modern speed stepping CPUs. Use bcmtslog() instead in combination with TSF counter. + */ +void +bcmlog(char *fmt, uint a1, uint a2) +{ + static uint last = 0; + uint cycles, i, elapsed; + OSL_GETCYCLES(cycles); + + i = logi; + + elapsed = cycles - last; + if (elapsed > BCMLOG_CYCLE_OVERHEAD) + logtab[i].cycles = elapsed - BCMLOG_CYCLE_OVERHEAD; + else + logtab[i].cycles = 0; + logtab[i].fmt = fmt; + logtab[i].a1 = a1; + logtab[i].a2 = a2; + + logi = (i + 1) % LOGSIZE; + last = cycles; + + /* if log buffer is overflowing, readi should be advanced. + * Otherwise logi and readi will become out of sync. + */ + if (logi == readi) { + readi = (readi + 1) % LOGSIZE; + } else { + /* This redundant else is to make CPU cycles of bcmlog() function to be uniform, + * so that the cycle compensation with BCMLOG_CYCLE_OVERHEAD is more accurate. + */ + readi = readi % LOGSIZE; + } +} + +/* Same as bcmlog but specializes the use of a1 and a2 to + * store icache misses and instruction count. + * TODO : make this use a configuration array to decide what counter to read. + * We are limited to 2 numbers but it seems it is the most we can get anyway + * since dcache and icache cannot be enabled at the same time. Recording + * both the hits and misses at the same time for a given cache is not that useful either. +*/ + +void +bcmstats(char *fmt) +{ + static uint last = 0; + static uint32 ic_miss = 0; + static uint32 instr_count = 0; + uint32 ic_miss_cur; + uint32 instr_count_cur; + uint cycles, i; + + OSL_GETCYCLES(cycles); + BCMPERF_GETICACHE_MISS(ic_miss_cur); + BCMPERF_GETINSTRCOUNT(instr_count_cur); + + i = logi; + + logtab[i].cycles = cycles - last; + logtab[i].a1 = ic_miss_cur - ic_miss; + logtab[i].a2 = instr_count_cur - instr_count; + logtab[i].fmt = fmt; + + logi = (i + 1) % LOGSIZE; + + last = cycles; + instr_count = instr_count_cur; + ic_miss = ic_miss_cur; + + /* if log buffer is overflowing, readi should be advanced. + * Otherwise logi and readi will become out of sync. + */ + if (logi == readi) { + readi = (readi + 1) % LOGSIZE; + } else { + /* This redundant else is to make CPU cycles of bcmstats() function to be uniform + */ + readi = readi % LOGSIZE; + } +} + +/* + * TODO (linux version): a "proc" version where the log would be dumped + * on the proc file directly. + */ + +void +bcmdumplog(char *buf, int size) +{ + char *limit; + int j = 0; + int num; + + limit = buf + size - 80; + *buf = '\0'; + + num = logi - readi; + + if (num < 0) + num += LOGSIZE; + + /* print in chronological order */ + + for (j = 0; j < num && (buf < limit); readi = (readi + 1) % LOGSIZE, j++) { + if (logtab[readi].fmt == NULL) + continue; + buf += snprintf(buf, (limit - buf), "%d\t", logtab[readi].cycles); + buf += snprintf(buf, (limit - buf), logtab[readi].fmt, logtab[readi].a1, + logtab[readi].a2); + buf += snprintf(buf, (limit - buf), "\n"); + } + +} + +/* + * Dump one log entry at a time. + * Return index of next entry or -1 when no more . + */ +int +bcmdumplogent(char *buf, uint i) +{ + bool hit; + + /* + * If buf is NULL, return the starting index, + * interpreting i as the indicator of last 'i' entries to dump. + */ + if (buf == NULL) { + i = ((i > 0) && (i < (LOGSIZE - 1))) ? i : (LOGSIZE - 1); + return ((logi - i) % LOGSIZE); + } + + *buf = '\0'; + + ASSERT(i < LOGSIZE); + + if (i == logi) + return (-1); + + hit = FALSE; + for (; (i != logi) && !hit; i = (i + 1) % LOGSIZE) { + if (logtab[i].fmt == NULL) + continue; + buf += snprintf(buf, LOGSIZE, "%d: %d\t", i, logtab[i].cycles); + buf += snprintf(buf, LOGSIZE, logtab[i].fmt, logtab[i].a1, logtab[i].a2); + buf += snprintf(buf, LOGSIZE, "\n"); + hit = TRUE; + } + + return (i); +} + +#endif /* BCMPERFSTATS */ + +#if defined(BCMTSTAMPEDLOGS) +/* Store a TSF timestamp and a log line in the log buffer */ +/* + a1 is used to signify entering/exiting a routine. When entering + the indent level is increased. When exiting, the delta since entering + is printed and the indent level is bumped back out. + Nesting can go up to level MAX_TS_INDENTS deep. +*/ +#define MAX_TS_INDENTS 20 +void +bcmtslog(uint32 tstamp, const char *fmt, uint a1, uint a2) +{ + uint i = logi; + bool use_delta = TRUE; + static uint32 last = 0; /* used only when use_delta is true */ + static uchar indent = 0; + static uint32 indents[MAX_TS_INDENTS]; + + logtab[i].cycles = tstamp; + if (use_delta) + logtab[i].cycles -= last; + + logtab[i].a2 = a2; + + if (a1 == TS_EXIT && indent) { + indent--; + logtab[i].a2 = tstamp - indents[indent]; + } + + logtab[i].fmt = fmt; + logtab[i].a1 = a1; + logtab[i].indent = indent; + + if (a1 == TS_ENTER) { + indents[indent] = tstamp; + if (indent < MAX_TS_INDENTS - 1) + indent++; + } + + if (use_delta) + last = tstamp; + logi = (i + 1) % LOGSIZE; +} + +/* Print out a microsecond timestamp as "sec.ms.us " */ +void +bcmprinttstamp(uint32 ticks) +{ + uint us, ms, sec; + + us = (ticks % TSF_TICKS_PER_MS) * 1000 / TSF_TICKS_PER_MS; + ms = ticks / TSF_TICKS_PER_MS; + sec = ms / 1000; + ms -= sec * 1000; + printf("%04u.%03u.%03u ", sec, ms, us); +} + +/* Print out the log buffer with timestamps */ +void +bcmprinttslogs(void) +{ + int j = 0; + int num; + + num = logi - readi; + if (num < 0) + num += LOGSIZE; + + /* Format and print the log entries directly in chronological order */ + for (j = 0; j < num; readi = (readi + 1) % LOGSIZE, j++) { + if (logtab[readi].fmt == NULL) + continue; + bcmprinttstamp(logtab[readi].cycles); + printf(logtab[readi].fmt, logtab[readi].a1, logtab[readi].a2); + printf("\n"); + } +} + +/* + Identical to bcmdumplog, but output is based on tsf instead of cycles. + + a1 is used to signify entering/exiting a routine. When entering + the indent level is increased. When exiting, the delta since entering + is printed and the indent level is bumped back out. +*/ +void +bcmdumptslog(struct bcmstrbuf *b) +{ + char *limit; + int j = 0; + int num; + uint us, ms, sec; + int skip; + char *lines = "| | | | | | | | | | | | | | | | | | | |"; + + limit = BCMSTRBUF_BUF(b) + BCMSTRBUF_LEN(b) - 80; + + num = logi - readi; + + if (num < 0) + num += LOGSIZE; + + /* print in chronological order */ + for (j = 0; j < num && (BCMSTRBUF_BUF(b) < limit); readi = (readi + 1) % LOGSIZE, j++) { + char *last_buf = BCMSTRBUF_BUF(b); + if (logtab[readi].fmt == NULL) + continue; + + us = (logtab[readi].cycles % TSF_TICKS_PER_MS) * 1000 / TSF_TICKS_PER_MS; + ms = logtab[readi].cycles / TSF_TICKS_PER_MS; + sec = ms / 1000; + ms -= sec * 1000; + + bcm_bprintf(b, "%04u.%03u.%03u ", sec, ms, us); + + /* 2 spaces for each indent level */ + bcm_bprintf(b, "%.*s", logtab[readi].indent * 2, lines); + + /* + * The following call to snprintf generates a compiler warning + * due to -Wformat-security. However, the format string is coming + * from internal callers rather than external data input, and is a + * useful debugging tool serving a variety of diagnostics. Rather + * than expand code size by replicating multiple functions with different + * argument lists, or disabling the warning globally, let's consider + * if we can just disable the warning for this one instance. + */ + bcm_bprintf(b, logtab[readi].fmt); + + /* If a1 is ENTER or EXIT, print the + or - */ + skip = 0; + if (logtab[readi].a1 == TS_ENTER) { + bcm_bprintf(b, " +"); + skip++; + } + if (logtab[readi].a1 == TS_EXIT) { + bcm_bprintf(b, " -"); + skip++; + } + + /* else print the real a1 */ + if (logtab[readi].a1 && !skip) + bcm_bprintf(b, " %d", logtab[readi].a1); + + /* + If exiting routine, print a nicely formatted delta since entering. + Otherwise, just print a2 normally. + */ + if (logtab[readi].a2) { + if (logtab[readi].a1 == TS_EXIT) { + int num_space = 75 - (BCMSTRBUF_BUF(b) - last_buf); + bcm_bprintf(b, "%*.s", num_space, ""); + bcm_bprintf(b, "%5d usecs", logtab[readi].a2); + } else + bcm_bprintf(b, " %d", logtab[readi].a2); + } + bcm_bprintf(b, "\n"); + last_buf = BCMSTRBUF_BUF(b); + } +} + +#endif /* BCMTSTAMPEDLOGS */ + +#if defined(BCMDBG) || defined(DHD_DEBUG) +/* pretty hex print a pkt buffer chain */ +void +prpkt(const char *msg, osl_t *osh, void *p0) +{ + void *p; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + for (p = p0; p; p = PKTNEXT(osh, p)) + prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); +} +#endif /* BCMDBG || DHD_DEBUG */ + +/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. + * Also updates the inplace vlan tag if requested. + * For debugging, it returns an indication of what it did. + */ +uint +BCMFASTPATH(pktsetprio)(void *pkt, bool update_vtag) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata; + int priority = 0; + int rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + ASSERT_FP(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { + uint16 vlan_tag; + int vlan_prio, dscp_prio = 0; + + evh = (struct ethervlan_header *)eh; + + vlan_tag = ntoh16(evh->vlan_tag); + vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + + if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || + (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); + uint8 tos_tc = IP_TOS46(ip_body); + dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } + + /* DSCP priority gets precedence over 802.1P (vlan tag) */ + if (dscp_prio != 0) { + priority = dscp_prio; + rc |= PKTPRIO_VDSCP; + } else { + priority = vlan_prio; + rc |= PKTPRIO_VLAN; + } + /* + * If the DSCP priority is not the same as the VLAN priority, + * then overwrite the priority field in the vlan tag, with the + * DSCP priority value. This is required for Linux APs because + * the VLAN driver on Linux, overwrites the skb->priority field + * with the priority value in the vlan tag + */ + if (update_vtag && (priority != vlan_prio)) { + vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); + vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; + evh->vlan_tag = hton16(vlan_tag); + rc |= PKTPRIO_UPD; + } +#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING) + } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + priority = PRIO_8021D_NC; + rc = PKTPRIO_DSCP; +#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */ +#if defined(WLTDLS) + } else if (eh->ether_type == hton16(ETHER_TYPE_89_0D)) { + /* Bump up the priority for TDLS frames */ + priority = PRIO_8021D_VI; + rc = PKTPRIO_DSCP; +#endif /* WLTDLS */ + } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || + (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ether_header); + uint8 tos_tc = IP_TOS46(ip_body); + uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; + switch (dscp) { + case DSCP_EF: + case DSCP_VA: + priority = PRIO_8021D_VO; + break; + case DSCP_AF31: + case DSCP_AF32: + case DSCP_AF33: + case DSCP_CS3: + priority = PRIO_8021D_CL; + break; + case DSCP_AF21: + case DSCP_AF22: + case DSCP_AF23: + priority = PRIO_8021D_EE; + break; + case DSCP_AF11: + case DSCP_AF12: + case DSCP_AF13: + case DSCP_CS2: + priority = PRIO_8021D_BE; + break; + case DSCP_CS6: + case DSCP_CS7: + priority = PRIO_8021D_NC; + break; + default: +#ifndef CUSTOM_DSCP_TO_PRIO_MAPPING + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); +#else + if (dhd_dscpmap_enable) { + priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT) + & CUST_IPV4_TOS_PREC_MASK)]; + } + else { + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } +#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */ + break; + } + + rc |= PKTPRIO_DSCP; + } + + ASSERT_FP(priority >= 0 && priority <= MAXPRIO); + PKTSETPRIO(pkt, priority); + return (rc | priority); +} + +/* lookup user priority for specified DSCP */ +static uint8 +dscp2up(uint8 *up_table, uint8 dscp) +{ + uint8 user_priority = 255; + + /* lookup up from table if parameters valid */ + if (up_table != NULL && dscp < UP_TABLE_MAX) { + user_priority = up_table[dscp]; + } + + /* 255 is unused value so return up from dscp */ + if (user_priority == 255) { + user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT); + } + + return user_priority; +} + +/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */ +uint +BCMFASTPATH(pktsetprio_qms)(void *pkt, uint8* up_table, bool update_vtag) +{ + if (up_table) { + uint8 *pktdata; + uint pktlen; + uint8 dscp; + uint user_priority = 0; + uint rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + pktlen = PKTLEN(OSH_NULL, pkt); + if (pktgetdscp(pktdata, pktlen, &dscp)) { + rc = PKTPRIO_DSCP; + user_priority = dscp2up(up_table, dscp); + PKTSETPRIO(pkt, user_priority); + } + + return (rc | user_priority); + } else { + return pktsetprio(pkt, update_vtag); + } +} + +/* Returns TRUE and DSCP if IP header found, FALSE otherwise. + */ +bool +BCMFASTPATH(pktgetdscp)(uint8 *pktdata, uint pktlen, uint8 *dscp) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *ip_body; + bool rc = FALSE; + + /* minimum length is ether header and IP header */ + if (pktlen < (sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)) { + return FALSE; + } + + eh = (struct ether_header *) pktdata; + + if ((eh->ether_type == HTON16(ETHER_TYPE_IP)) || + (eh->ether_type == HTON16(ETHER_TYPE_IPV6))) { + ip_body = pktdata + sizeof(struct ether_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) { + evh = (struct ethervlan_header *)eh; + + /* minimum length is ethervlan header and IP header */ + if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN && + evh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ethervlan_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + } + + return rc; +} + +/* usr_prio range from low to high with usr_prio value */ +static bool +up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high) +{ + int i; + + if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) { + return FALSE; + } + + for (i = low; i <= high; i++) { + up_table[i] = usr_prio; + } + + return TRUE; +} + +/* set user priority table */ +int +BCMFASTPATH(wl_set_up_table)(uint8 *up_table, bcm_tlv_t *qos_map_ie) +{ + uint8 len; + + if (up_table == NULL || qos_map_ie == NULL) { + return BCME_ERROR; + } + + /* clear table to check table was set or not */ + memset(up_table, 0xff, UP_TABLE_MAX); + + /* length of QoS Map IE must be 16+n*2, n is number of exceptions */ + if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID && + (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH && + (len % 2) == 0) { + uint8 *except_ptr = (uint8 *)qos_map_ie->data; + uint8 except_len = len - QOS_MAP_FIXED_LENGTH; + uint8 *range_ptr = except_ptr + except_len; + int i; + + /* fill in ranges */ + for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { + uint8 low = range_ptr[i]; + uint8 high = range_ptr[i + 1]; + if (low == 255 && high == 255) { + continue; + } + + if (!up_table_set(up_table, i / 2, low, high)) { + /* clear the table on failure */ + memset(up_table, 0xff, UP_TABLE_MAX); + return BCME_ERROR; + } + } + + /* update exceptions */ + for (i = 0; i < except_len; i += 2) { + uint8 dscp = except_ptr[i]; + uint8 usr_prio = except_ptr[i+1]; + + /* exceptions with invalid dscp/usr_prio are ignored */ + up_table_set(up_table, usr_prio, dscp, dscp); + } + } + + return BCME_OK; +} + +#ifndef BCM_BOOTLOADER +/* The 0.5KB string table is not removed by compiler even though it's unused */ + +static char bcm_undeferrstr[32]; +static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; + +/* Convert the error codes into related error strings */ +/* BCMRAMFN for BCME_LAST usage */ +const char * +BCMRAMFN(bcmerrorstr)(int bcmerror) +{ + /* check if someone added a bcmerror code but forgot to add errorstring */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + + if (bcmerror > 0 || bcmerror < BCME_LAST) { + snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); + return bcm_undeferrstr; + } + + ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); + + return bcmerrorstrtable[-bcmerror]; +} + +#endif /* !BCM_BOOTLOADER */ + +#ifdef BCMDBG_PKT /* pkt logging for debugging */ +/* Add a packet to the pktlist */ +static void +_pktlist_add(pktlist_info_t *pktlist, void *pkt, int line, char *file) +{ + uint16 i; + char *basename; +#ifdef BCMDBG_PTRACE + uint16 *idx = PKTLIST_IDX(pkt); +#endif /* BCMDBG_PTRACE */ + + ASSERT(pktlist->count < PKTLIST_SIZE); + + /* Verify the packet is not already part of the list */ + for (i = 0; i < pktlist->count; i++) { + if (pktlist->list[i].pkt == pkt) + ASSERT(0); + } + pktlist->list[pktlist->count].pkt = pkt; + pktlist->list[pktlist->count].line = line; + + basename = strrchr(file, '/'); + if (basename) + basename++; + else + basename = file; + pktlist->list[pktlist->count].file = basename; +#ifdef BCMDBG_PTRACE + *idx = pktlist->count; + bzero(pktlist->list[pktlist->count].pkt_trace, PKTTRACE_MAX_BYTES); +#endif /* BCMDBG_PTRACE */ + pktlist->count++; + + return; +} + +void +pktlist_add(pktlist_info_t *pktlist, void *pkt, int line, char *file) +{ + void *p; + for (p = pkt; p != NULL; p = PKTCLINK(p)) + _pktlist_add(pktlist, p, line, file); +} + +/* Remove a packet from the pktlist */ +static void +_pktlist_remove(pktlist_info_t *pktlist, void *pkt) +{ + uint16 i; + uint16 num = pktlist->count; +#ifdef BCMDBG_PTRACE + uint16 *idx = PKTLIST_IDX(pkt); + + ASSERT((*idx) < pktlist->count); +#endif /* BCMDBG_PTRACE */ + + /* find the index where pkt exists */ + for (i = 0; i < num; i++) { + /* check for the existence of pkt in the list */ + if (pktlist->list[i].pkt == pkt) { +#ifdef BCMDBG_PTRACE + ASSERT((*idx) == i); +#endif /* BCMDBG_PTRACE */ + /* replace with the last element */ + pktlist->list[i].pkt = pktlist->list[num-1].pkt; + pktlist->list[i].line = pktlist->list[num-1].line; + pktlist->list[i].file = pktlist->list[num-1].file; +#ifdef BCMDBG_PTRACE + memcpy(pktlist->list[i].pkt_trace, pktlist->list[num-1].pkt_trace, + PKTTRACE_MAX_BYTES); + idx = PKTLIST_IDX(pktlist->list[i].pkt); + *idx = i; +#endif /* BCMDBG_PTRACE */ + pktlist->count--; + return; + } + } + ASSERT(0); +} + +void +pktlist_remove(pktlist_info_t *pktlist, void *pkt) +{ + void *p; + for (p = pkt; p != NULL; p = PKTCLINK(p)) + _pktlist_remove(pktlist, p); +} + +#ifdef BCMDBG_PTRACE +static void +_pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit) +{ + uint16 *idx = PKTLIST_IDX(pkt); + + ASSERT(((*idx) < pktlist->count) && (bit < PKTTRACE_MAX_BITS)); + ASSERT(pktlist->list[(*idx)].pkt == pkt); + + pktlist->list[(*idx)].pkt_trace[bit/NBBY] |= (1 << ((bit)%NBBY)); + +} +void +pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit) +{ + void *p; + for (p = pkt; p != NULL; p = PKTCLINK(p)) + _pktlist_trace(pktlist, p, bit); +} +#endif /* BCMDBG_PTRACE */ + +/* Dump the pktlist (and the contents of each packet if 'data' + * is set). 'buf' should be large enough + */ + +char * +pktlist_dump(pktlist_info_t *pktlist, char *buf) +{ + char *obuf = buf; + uint16 i; + + if (buf != NULL) + buf += sprintf(buf, "Packet list dump:\n"); + else + printf("Packet list dump:\n"); + + for (i = 0; i < (pktlist->count); i++) { + if (buf != NULL) + buf += sprintf(buf, "Pkt_addr: 0x%p Line: %d File: %s\t", + OSL_OBFUSCATE_BUF(pktlist->list[i].pkt), pktlist->list[i].line, + pktlist->list[i].file); + else + printf("Pkt_addr: 0x%p Line: %d File: %s\t", + OSL_OBFUSCATE_BUF(pktlist->list[i].pkt), + pktlist->list[i].line, pktlist->list[i].file); + +/* #ifdef NOTDEF Remove this ifdef to print pkttag and pktdata */ + if (buf != NULL) { + if (PKTTAG(pktlist->list[i].pkt)) { + /* Print pkttag */ + buf += sprintf(buf, "Pkttag(in hex): "); + buf += bcm_format_hex(buf, PKTTAG(pktlist->list[i].pkt), + OSL_PKTTAG_SZ); + } + buf += sprintf(buf, "Pktdata(in hex): "); + buf += bcm_format_hex(buf, PKTDATA(OSH_NULL, pktlist->list[i].pkt), + PKTLEN(OSH_NULL, pktlist->list[i].pkt)); + } else { + void *pkt = pktlist->list[i].pkt, *npkt; + + printf("Pkt[%d] Dump:\n", i); + while (pkt) { + int hroom; + uint pktlen; + uchar *src; +#ifdef BCMDBG_PTRACE + uint16 *idx = PKTLIST_IDX(pkt); + + ASSERT((*idx) < pktlist->count); + prhex("Pkt Trace (in hex):", pktlist->list[(*idx)].pkt_trace, + PKTTRACE_MAX_BYTES); +#endif /* BCMDBG_PTRACE */ + npkt = (void *)PKTNEXT(OSH_NULL, pkt); + PKTSETNEXT(OSH_NULL, pkt, NULL); + + src = (uchar *)(PKTTAG(pkt)); + pktlen = PKTLEN(OSH_NULL, pkt); + hroom = PKTHEADROOM(OSH_NULL, pkt); + + printf("Pkttag_addr: %p\n", OSL_OBFUSCATE_BUF(src)); + if (src) + prhex("Pkttag(in hex): ", src, OSL_PKTTAG_SZ); + src = (uchar *) (PKTDATA(OSH_NULL, pkt)); + printf("Pkthead_addr: %p len: %d\n", + OSL_OBFUSCATE_BUF(src - hroom), hroom); + prhex("Pkt headroom content(in hex): ", src - hroom, hroom); + printf("Pktdata_addr: %p len: %d\n", + OSL_OBFUSCATE_BUF(src), pktlen); + prhex("Pktdata(in hex): ", src, pktlen); + + pkt = npkt; + } + } +/* #endif NOTDEF */ + + if (buf != NULL) + buf += sprintf(buf, "\n"); + else + printf("\n"); + } + return obuf; +} +#endif /* BCMDBG_PKT */ + +/* iovar table lookup */ +/* could mandate sorted tables and do a binary search */ +const bcm_iovar_t* +bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) +{ + const bcm_iovar_t *vi; + const char *lookup_name; + + /* skip any ':' delimited option prefixes */ + lookup_name = strrchr(name, ':'); + if (lookup_name != NULL) + lookup_name++; + else + lookup_name = name; + + ASSERT(table != NULL); + + for (vi = table; vi->name; vi++) { + if (!strcmp(vi->name, lookup_name)) + return vi; + } + /* ran to end of table */ + + return NULL; /* var name not found */ +} + +int +bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, uint len, bool set) +{ + int bcmerror = 0; + BCM_REFERENCE(arg); + + /* length check on io buf */ + switch (vi->type) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_INT16: + case IOVT_INT32: + case IOVT_UINT8: + case IOVT_UINT16: + case IOVT_UINT32: + /* all integers are int32 sized args at the ioctl interface */ + if (len < sizeof(int)) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_BUFFER: + /* buffer must meet minimum length requirement */ + if (len < vi->minlen) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_VOID: + if (!set) { + /* Cannot return nil... */ + bcmerror = BCME_UNSUPPORTED; + } + break; + + default: + /* unknown type for length check in iovar info */ + ASSERT(0); + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +/* + * Hierarchical Multiword bitmap based small id allocator. + * + * Multilevel hierarchy bitmap. (maximum 2 levels) + * First hierarchy uses a multiword bitmap to identify 32bit words in the + * second hierarchy that have at least a single bit set. Each bit in a word of + * the second hierarchy represents a unique ID that may be allocated. + * + * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed. + * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word + * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs. + * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non + * non-zero bitmap word carrying at least one free ID. + * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations. + * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID + * + * Design Notes: + * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many + * bits are computed each time on allocation and deallocation, requiring 4 + * array indexed access and 3 arithmetic operations. When not defined, a runtime + * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed. + * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation. + * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may + * be used by defining BCM_MWBMAP_USE_CNTSETBITS. + * + * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array + * size is fixed. No intention to support larger than 4K indice allocation. ID + * allocators for ranges smaller than 4K will have a wastage of only 12Bytes + * with savings in not having to use an indirect access, had it been dynamically + * allocated. + */ +#if defined(DONGLEBUILD) +#define BCM_MWBMAP_USE_CNTSETBITS /* runtime count set bits */ +#if defined(PCIEDEV_HOST_PKTID_AUDIT_ENABLED) +#define BCM_MWBMAP_ITEMS_MAX (38 * 1024) +#else /* ! PCIEDEV_HOST_PKTID_AUDIT_ENABLED */ +#define BCM_MWBMAP_ITEMS_MAX (7 * 1024) +#endif /* PCIEDEV_HOST_PKTID_AUDIT_ENABLED */ +#else /* ! DONGLEBUILD */ +#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */ +#endif /* DONGLEBUILD */ + +#define BCM_MWBMAP_BITS_WORD (NBITS(uint32)) +#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_SHIFT_OP (5) +#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1)) +#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP) +#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP) + +/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */ +#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl)) +#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr)) + +#if defined(BCM_MWBMAP_DEBUG) +#define BCM_MWBMAP_AUDIT(mwb) \ + do { \ + ASSERT((mwb != NULL) && \ + (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \ + bcm_mwbmap_audit(mwb); \ + } while (0) +#define MWBMAP_ASSERT(exp) ASSERT(exp) +#define MWBMAP_DBG(x) printf x +#else /* !BCM_MWBMAP_DEBUG */ +#define BCM_MWBMAP_AUDIT(mwb) do {} while (0) +#define MWBMAP_ASSERT(exp) do {} while (0) +#define MWBMAP_DBG(x) +#endif /* !BCM_MWBMAP_DEBUG */ + +typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */ + uint16 wmaps; /* Total number of words in free wd bitmap */ + uint16 imaps; /* Total number of words in free id bitmap */ + int32 ifree; /* Count of free indices. Used only in audits */ + uint16 total; /* Total indices managed by multiword bitmap */ + + void * magic; /* Audit handle parameter from user */ + + uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + uint32 id_bitmap[0]; /* Second level bitmap */ +} bcm_mwbmap_t; + +/* Incarnate a hierarchical multiword bitmap based small index allocator. */ +struct bcm_mwbmap * +BCMATTACHFN(bcm_mwbmap_init)(osl_t *osh, uint32 items_max) +{ + struct bcm_mwbmap * mwbmap_p; + uint32 wordix, size, words, extra; + + /* Implementation Constraint: Uses 32bit word bitmap */ + MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U); + MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U); + MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX)); + MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U); + + ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX); + + /* Determine the number of words needed in the multiword bitmap */ + extra = BCM_MWBMAP_MODOP(items_max); + words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U); + + /* Allocate runtime state of multiword bitmap */ + /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */ + size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words); + mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size); + if (mwbmap_p == (bcm_mwbmap_t *)NULL) { + ASSERT(0); + goto error1; + } + memset(mwbmap_p, 0, size); + + /* Initialize runtime multiword bitmap state */ + mwbmap_p->imaps = (uint16)words; + mwbmap_p->ifree = (int32)items_max; + mwbmap_p->total = (uint16)items_max; + + /* Setup magic, for use in audit of handle */ + mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p); + + /* Setup the second level bitmap of free indices */ + /* Mark all indices as available */ + for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) { + mwbmap_p->id_bitmap[wordix] = (uint32)(~0U); +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD; +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Ensure that extra indices are tagged as un-available */ + if (extra) { /* fixup the free ids in last bitmap and wd_count */ + uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Setup the first level bitmap hierarchy */ + extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps); + words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U); + + mwbmap_p->wmaps = (uint16)words; + + for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++) + mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U); + if (extra) { + uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ + } + + return mwbmap_p; + +error1: + return BCM_MWBMAP_INVALID_HDL; +} + +/* Release resources used by multiword bitmap based small index allocator. */ +void +BCMATTACHFN(bcm_mwbmap_fini)(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap) + + (sizeof(uint32) * mwbmap_p->imaps)); + return; +} + +/* Allocate a unique small index using a multiword bitmap index allocator. */ +uint32 +BCMFASTPATH(bcm_mwbmap_alloc)(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + /* Start with the first hierarchy */ + for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */ + + if (bitmap != 0U) { + + uint32 count, bitix, *bitmap_p; + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + /* clear all except trailing 1 */ + if (bitmap != (1u << 31u)) { + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + } + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + wordix = BCM_MWBMAP_MULOP(wordix) + bitix; + + /* Clear bit if wd count is 0, without conditional branch */ +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1; +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[wordix]--; + count = mwbmap_p->wd_count[wordix]; + MWBMAP_ASSERT(count == + (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1)); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + /* clear wd_bitmap bit if id_map count is 0 */ + bitmap = (count == 0) << bitix; + + MWBMAP_DBG(( + "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; + + /* Use bitix in the second hierarchy */ + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */ + MWBMAP_ASSERT(bitmap != 0U); + + /* clear all except trailing 1 */ + if (bitmap != (1u << 31u)) { + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + } + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = BCM_MWBMAP_MULOP(wordix) + + (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + + mwbmap_p->ifree--; /* decrement system wide free count */ + MWBMAP_ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(( + "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */ + + return bitix; + } + } + + ASSERT(mwbmap_p->ifree == 0); + + return BCM_MWBMAP_INVALID_IDX; +} + +/* Force an index at a specified position to be in use */ +void +bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == bitmap); + + mwbmap_p->ifree--; /* update free count */ + ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + /* Update first hierarchy */ + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[bitix]--; + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix); + + MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap, + (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + return; +} + +/* Free a previously allocated index back into the multiword bitmap allocator */ +void +BCMPOSTTRAPFASTPATH(bcm_mwbmap_free)(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT_FP(bitix < mwbmap_p->total); + + /* Start with second level hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT_FP((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */ + + mwbmap_p->ifree++; /* update free count */ + ASSERT_FP(mwbmap_p->ifree <= mwbmap_p->total); + + MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, + mwbmap_p->ifree)); + + *bitmap_p |= bitmap; /* mark as available */ + + /* Now update first level hierarchy */ + + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */ + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[bitix]++; +#endif + +#if defined(BCM_MWBMAP_DEBUG) + { + uint32 count; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD); + + MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count)); + } +#endif /* BCM_MWBMAP_DEBUG */ + + *bitmap_p |= bitmap; + + return; +} + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +uint32 +bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(mwbmap_p->ifree >= 0); + + return mwbmap_p->ifree; +} + +/* Determine whether an index is inuse or free */ +bool +bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + + return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U); +} + +/* Debug dump a multiword bitmap allocator */ +void +bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl) +{ + uint32 ix, count; + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", + OSL_OBFUSCATE_BUF((void *)mwbmap_p), + mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total); + for (ix = 0U; ix < mwbmap_p->wmaps; ix++) { + printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]); + bcm_bitprint32(mwbmap_p->wd_bitmap[ix]); + printf("\n"); + } + for (ix = 0U; ix < mwbmap_p->imaps; ix++) { +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[ix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count); + bcm_bitprint32(mwbmap_p->id_bitmap[ix]); + printf("\n"); + } + + return; +} + +/* Audit a hierarchical multiword bitmap */ +void +bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p; + + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) { + if ((*bitmap_p) & (1 << bitix)) { + idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[idmap_ix]; + ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + ASSERT(count != 0U); + free_cnt += count; + } + } + } + + ASSERT((int)free_cnt == mwbmap_p->ifree); +} +/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */ + +/* Simple 16bit Id allocator using a stack implementation. */ +typedef struct id16_map { + uint32 failures; /* count of failures */ + void *dbg; /* debug placeholder */ + uint16 total; /* total number of ids managed by allocator */ + uint16 start; /* start value of 16bit ids to be managed */ + int stack_idx; /* index into stack of available ids */ + uint16 stack[0]; /* stack of 16 bit ids */ +} id16_map_t; + +#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \ + (sizeof(uint16) * (items))) + +#if defined(BCM_DBG) + +/* Uncomment BCM_DBG_ID16 to debug double free */ +/* #define BCM_DBG_ID16 */ + +typedef struct id16_map_dbg { + uint16 total; + bool avail[0]; +} id16_map_dbg_t; +#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \ + (sizeof(bool) * (items))) +#define ID16_MAP_MSG(x) print x +#else +#define ID16_MAP_MSG(x) +#endif /* BCM_DBG */ + +void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */ +id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids)); + if (id16_map == NULL) { + return NULL; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + id16_map->dbg = NULL; + + /* + * Populate stack with 16bit id values, commencing with start_val16. + * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map. + */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); + + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return (void *)id16_map; +} + +void * /* Destruct an id16 allocator instance */ +id16_map_fini(osl_t *osh, void * id16_map_hndl) +{ + uint16 total_ids; + id16_map_t * id16_map; + + if (id16_map_hndl == NULL) + return NULL; + + id16_map = (id16_map_t *)id16_map_hndl; + + total_ids = id16_map->total; + ASSERT(total_ids > 0); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids)); + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->total = 0; + MFREE(osh, id16_map, ID16_MAP_SZ(total_ids)); + + return NULL; +} + +void +id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *)id16_map_hndl; + if (id16_map == NULL) { + return; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + + /* Populate stack with 16bit id values, commencing with start_val16 */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ +} + +uint16 /* Allocate a unique 16bit id */ +BCMFASTPATH(id16_map_alloc)(void * id16_map_hndl) +{ + uint16 val16; + id16_map_t * id16_map; + + ASSERT_FP(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT_FP(id16_map->total > 0); + + if (id16_map->stack_idx < 0) { + id16_map->failures++; + return ID16_INVALID; + } + + val16 = id16_map->stack[id16_map->stack_idx]; + id16_map->stack_idx--; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT_FP((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT_FP(id16_map_dbg->avail[val16 - id16_map->start] == TRUE); + id16_map_dbg->avail[val16 - id16_map->start] = FALSE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return val16; +} + +void /* Free a 16bit id value into the id16 allocator */ +BCMFASTPATH(id16_map_free)(void * id16_map_hndl, uint16 val16) +{ + id16_map_t * id16_map; + + ASSERT_FP(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT_FP((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT_FP(id16_map_dbg->avail[val16 - id16_map->start] == FALSE); + id16_map_dbg->avail[val16 - id16_map->start] = TRUE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->stack_idx++; + id16_map->stack[id16_map->stack_idx] = val16; +} + +uint32 /* Returns number of failures to allocate an unique id16 */ +id16_map_failures(void * id16_map_hndl) +{ + ASSERT(id16_map_hndl != NULL); + return ((id16_map_t *)id16_map_hndl)->failures; +} + +bool +id16_map_audit(void * id16_map_hndl) +{ + int idx; + int insane = 0; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->stack_idx >= -1); + ASSERT(id16_map->stack_idx < (int)id16_map->total); + + if (id16_map->start == ID16_UNDEFINED) + goto done; + + for (idx = 0; idx <= id16_map->stack_idx; idx++) { + ASSERT(id16_map->stack[idx] >= id16_map->start); + ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total)); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 val16 = id16_map->stack[idx]; + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n", + OSL_OBFUSATE_BUF(id16_map_hndl), idx, val16)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 avail = 0; /* Audit available ids counts */ + for (idx = 0; idx < id16_map_dbg->total; idx++) { + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE) + avail++; + } + if (avail && (avail != (id16_map->stack_idx + 1))) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n", + OSL_OBFUSCATE_BUF(id16_map_hndl), + avail, id16_map->stack_idx)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + +done: + /* invoke any other system audits */ + return (!!insane); +} +/* END: Simple id16 allocator */ + +void +BCMATTACHFN(dll_pool_detach)(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size; + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + if (pool) + MFREE(osh, pool, mem_size); +} +dll_pool_t * +BCMATTACHFN(dll_pool_init)(void * osh, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size, i; + dll_pool_t * dll_pool_p; + dll_t * elem_p; + + ASSERT(elem_size > sizeof(dll_t)); + + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + + if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) { + ASSERT(0); + return dll_pool_p; + } + + dll_init(&dll_pool_p->free_list); + dll_pool_p->elems_max = elems_max; + dll_pool_p->elem_size = elem_size; + + elem_p = dll_pool_p->elements; + for (i = 0; i < elems_max; i++) { + dll_append(&dll_pool_p->free_list, elem_p); + elem_p = (dll_t *)((uintptr)elem_p + elem_size); + } + + dll_pool_p->free_count = elems_max; + + return dll_pool_p; +} + +void * +dll_pool_alloc(dll_pool_t * dll_pool_p) +{ + dll_t * elem_p; + + if (dll_pool_p->free_count == 0) { + ASSERT(dll_empty(&dll_pool_p->free_list)); + return NULL; + } + + elem_p = dll_head_p(&dll_pool_p->free_list); + dll_delete(elem_p); + dll_pool_p->free_count -= 1; + + return (void *)elem_p; +} + +void +BCMPOSTTRAPFN(dll_pool_free)(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_prepend(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + +void +dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_append(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + +#ifdef BCMDBG +void +dll_pool_dump(dll_pool_t * dll_pool_p, dll_elem_dump elem_dump) +{ + dll_t * elem_p; + dll_t * next_p; + printf("dll_pool<%p> free_count<%u> elems_max<%u> elem_size<%u>\n", + OSL_OBFUSCATE_BUF(dll_pool_p), dll_pool_p->free_count, + dll_pool_p->elems_max, dll_pool_p->elem_size); + + for (elem_p = dll_head_p(&dll_pool_p->free_list); + !dll_end(&dll_pool_p->free_list, elem_p); elem_p = next_p) { + + next_p = dll_next_p(elem_p); + printf("\telem<%p>\n", OSL_OBFUSCATE_BUF(elem_p)); + if (elem_dump != NULL) + elem_dump((void *)elem_p); + } +} +#endif /* BCMDBG */ + +#endif /* BCMDRIVER */ + +#if defined(BCMDRIVER) || defined(WL_UNITTEST) + +/* triggers bcm_bprintf to print to kernel log */ +bool bcm_bprintf_bypass = FALSE; + +/* Initialization of bcmstrbuf structure */ +void +BCMPOSTTRAPFN(bcm_binit)(struct bcmstrbuf *b, char *buf, uint size) +{ + b->origsize = b->size = size; + b->origbuf = b->buf = buf; + if (size > 0) { + buf[0] = '\0'; + } +} + +/* Buffer sprintf wrapper to guard against buffer overflow */ +int +BCMPOSTTRAPFN(bcm_bprintf)(struct bcmstrbuf *b, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + + r = vsnprintf(b->buf, b->size, fmt, ap); + if (bcm_bprintf_bypass == TRUE) { + printf("%s", b->buf); + goto exit; + } + + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + /* r == 0 is also the case when strlen(fmt) is zero. + * typically the case when "" is passed as argument. + */ + if ((r == -1) || (r >= (int)b->size)) { + b->size = 0; + } else { + b->size -= r; + b->buf += r; + } + +exit: + va_end(ap); + + return r; +} + +void +bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, uint len) +{ + uint i; + + if (msg != NULL && msg[0] != '\0') + bcm_bprintf(b, "%s", msg); + for (i = 0u; i < len; i ++) + bcm_bprintf(b, "%02X", buf[i]); + if (newline) + bcm_bprintf(b, "\n"); +} + +void +bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) +{ + int i; + + for (i = 0; i < num_bytes; i++) { + num[i] += amount; + if (num[i] >= amount) + break; + amount = 1; + } +} + +int +bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes) +{ + int i; + + for (i = nbytes - 1; i >= 0; i--) { + if (arg1[i] != arg2[i]) + return (arg1[i] - arg2[i]); + } + return 0; +} + +void +bcm_print_bytes(const char *name, const uchar *data, uint len) +{ + uint i; + int per_line = 0; + + printf("%s: %d \n", name ? name : "", len); + for (i = 0u; i < len; i++) { + printf("%02x ", *data++); + per_line++; + if (per_line == 16) { + per_line = 0; + printf("\n"); + } + } + printf("\n"); +} + +/* Search for an IE having a specific tag and an OUI type from a buffer. + * tlvs: buffer to search for IE + * tlvs_len: buffer length + * tag: IE tag + * oui: Specific OUI to match + * oui_len: length of the OUI + * type: OUI type + * Return the matched IE, else return null. +*/ +bcm_tlv_t * +bcm_find_ie(const uint8* tlvs, uint tlvs_len, uint8 tag, uint8 oui_len, + const char *oui, uint8 type) +{ + const bcm_tlv_t *ie; + + COV_TAINTED_DATA_SINK(tlvs_len); + COV_NEG_SINK(tlvs_len); + + /* Walk through the IEs looking for an OUI match */ + while ((ie = bcm_parse_tlvs_advance(&tlvs, &tlvs_len, tag, + BCM_TLV_ADVANCE_TO))) { + if ((ie->len > oui_len) && + !bcmp(ie->data, oui, oui_len) && + ie->data[oui_len] == type) { + + COV_TAINTED_DATA_ARG(ie); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(ie); /* a match */ + GCC_DIAGNOSTIC_POP(); + } + /* Point to the next IE */ + bcm_tlv_buffer_advance_past(ie, &tlvs, &tlvs_len); + } + + return NULL; +} + +/* Look for vendor-specific IE with specified OUI and optional type */ +bcm_tlv_t * +bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, uint8 *type, uint type_len) +{ + const bcm_tlv_t *ie; + uint8 ie_len; + + COV_TAINTED_DATA_SINK(tlvs_len); + COV_NEG_SINK(tlvs_len); + + ie = (const bcm_tlv_t*)tlvs; + + /* make sure we are looking at a valid IE */ + if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) { + return NULL; + } + + /* Walk through the IEs looking for an OUI match */ + do { + ie_len = ie->len; + if ((ie->id == DOT11_MNG_VS_ID) && + (ie_len >= (DOT11_OUI_LEN + type_len)) && + !bcmp(ie->data, voui, DOT11_OUI_LEN)) + { + /* compare optional type */ + if (type_len == 0 || + !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) { + + COV_TAINTED_DATA_ARG(ie); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(ie); /* a match */ + GCC_DIAGNOSTIC_POP(); + } + } + } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL); + + return NULL; +} + +#if defined(WLTINYDUMP) || defined(BCMDBG) || defined(WLMSG_INFORM) || \ + defined(WLMSG_ASSOC) || defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +int +bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) +{ + uint i, c; + char *p = buf; + char *endp = buf + SSID_FMT_BUF_LEN; + + if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; + + for (i = 0; i < ssid_len; i++) { + c = (uint)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (bcm_isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += snprintf(p, (endp - p), "\\x%02X", c); + } + } + *p = '\0'; + ASSERT(p < endp); + + return (int)(p - buf); +} +#endif /* WLTINYDUMP || BCMDBG || WLMSG_INFORM || WLMSG_ASSOC || WLMSG_PRPKT */ + +#endif /* BCMDRIVER || WL_UNITTEST */ + +/* Masking few bytes of MAC address per customer in all prints/eventlogs. */ +int +BCMRAMFN(bcm_addrmask_set)(int enable) +{ +#ifdef PRIVACY_MASK + struct ether_addr *privacy = privacy_addrmask_get(); + if (enable) { + /* apply mask as (For SS) + * orig : 12:34:56:78:90:ab + * masked : 12:xx:xx:xx:x0:ab + */ + privacy->octet[1] = privacy->octet[2] = + privacy->octet[3] = 0; + privacy->octet[0] = privacy->octet[5] = 0xff; + privacy->octet[4] = 0x0f; + } else + { + /* No masking. All are 0xff. */ + memcpy(privacy, ðer_bcast, sizeof(struct ether_addr)); + } + + return BCME_OK; +#else + BCM_REFERENCE(enable); + return BCME_UNSUPPORTED; +#endif /* PRIVACY_MASK */ + +} + +int +bcm_addrmask_get(int *val) +{ +#ifdef PRIVACY_MASK + struct ether_addr *privacy = privacy_addrmask_get(); + if (!eacmp(ðer_bcast, privacy)) { + *val = FALSE; + } else { + *val = TRUE; + } + + return BCME_OK; +#else + BCM_REFERENCE(val); + return BCME_UNSUPPORTED; +#endif +} + +uint64 +BCMRAMFN(bcm_ether_ntou64)(const struct ether_addr *ea) +{ + uint64 mac; + struct ether_addr addr; + + memcpy(&addr, ea, sizeof(struct ether_addr)); + +#ifdef PRIVACY_MASK + struct ether_addr *privacy = privacy_addrmask_get(); + if (!ETHER_ISMULTI(ea)) { + *(uint32*)(&addr.octet[0]) &= *((uint32*)&privacy->octet[0]); + *(uint16*)(&addr.octet[4]) &= *((uint16*)&privacy->octet[4]); + } +#endif /* PRIVACY_MASK */ + + mac = ((uint64)HTON16(*((const uint16*)&addr.octet[4]))) << 32 | + HTON32(*((const uint32*)&addr.octet[0])); + return (mac); +} + +char * +bcm_ether_ntoa(const struct ether_addr *ea, char *buf) +{ + static const char hex[] = + { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' + }; + const uint8 *octet = ea->octet; + char *p = buf; + int i; + + for (i = 0; i < 6; i++, octet++) { + *p++ = hex[(*octet >> 4) & 0xf]; + *p++ = hex[*octet & 0xf]; + *p++ = ':'; + } + + *(p-1) = '\0'; + + return (buf); +} + +/* Find the position of first bit set + * in the given number. + */ +int +bcm_find_fsb(uint32 num) +{ + uint8 pos = 0; + if (!num) + return pos; + while (!(num & 1)) { + num >>= 1; + pos++; + } + return (pos+1); +} + +/* TODO: need to pass in the buffer length for validation check */ +char * +bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) +{ + snprintf(buf, 16, "%d.%d.%d.%d", + ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); + return (buf); +} + +/* TODO: need to pass in the buffer length for validation check */ +char * +bcm_ipv6_ntoa(void *ipv6, char *buf) +{ + /* Implementing RFC 5952 Sections 4 + 5 */ + /* Not thoroughly tested */ + uint16 tmp[8]; + uint16 *a = &tmp[0]; + char *p = buf; + int i, i_max = -1, cnt = 0, cnt_max = 1; + uint8 *a4 = NULL; + memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if (a[i]) { + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + cnt = 0; + } else + cnt++; + } + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + if (i_max == 0 && + /* IPv4-translated: ::ffff:0:a.b.c.d */ + ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) || + /* IPv4-mapped: ::ffff:a.b.c.d */ + (cnt_max == 5 && a[5] == 0xffff))) + a4 = (uint8*) (a + 6); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if ((uint8*) (a + i) == a4) { + snprintf(p, 17, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]); + break; + } else if (i == i_max) { + *p++ = ':'; + i += cnt_max - 1; + p[0] = ':'; + p[1] = '\0'; + } else { + if (i) + *p++ = ':'; + p += snprintf(p, 8, "%x", ntoh16(a[i])); + } + } + + return buf; +} + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +const unsigned char bcm_ctype[256] = { + + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ + _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, + _BCM_C, /* 8-15 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ + _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ + _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ + _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ + _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ + _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, + _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ + _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, + _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ + _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ +}; + +uint64 +bcm_strtoull(const char *cp, char **endp, uint base) +{ + uint64 result, last_result = 0, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { + result = result*base + value; + /* Detected overflow */ + if (result < last_result && !minus) { + if (endp) { + /* Go to the end of current number */ + while (bcm_isxdigit(*cp)) { + cp++; + } + *endp = DISCARD_QUAL(cp, char); + } + return (ulong)-1; + } + last_result = result; + cp++; + } + + if (minus) + result = (ulong)(-(long)result); + + if (endp) + *endp = DISCARD_QUAL(cp, char); + + return (result); +} + +ulong +bcm_strtoul(const char *cp, char **endp, uint base) +{ + return (ulong) bcm_strtoull(cp, endp, base); +} + +int +bcm_atoi(const char *s) +{ + return (int)bcm_strtoul(s, NULL, 10); +} + +/* return pointer to location of substring 'needle' in 'haystack' */ +char * +bcmstrstr(const char *haystack, const char *needle) +{ + uint len, nlen; + uint i; + + if ((haystack == NULL) || (needle == NULL)) + return DISCARD_QUAL(haystack, char); + + nlen = (uint)strlen(needle); + if (strlen(haystack) < nlen) { + return NULL; + } + len = (uint)strlen(haystack) - nlen + 1u; + + for (i = 0u; i < len; i++) + if (memcmp(needle, &haystack[i], nlen) == 0) + return DISCARD_QUAL(&haystack[i], char); + return (NULL); +} + +char * +bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len) +{ + for (; s_len >= substr_len; s++, s_len--) + if (strncmp(s, substr, substr_len) == 0) + return DISCARD_QUAL(s, char); + + return NULL; +} + +char * +bcmstrcat(char *dest, const char *src) +{ + char *p; + + p = dest + strlen(dest); + + while ((*p++ = *src++) != '\0') + ; + + return (dest); +} + +char * +bcmstrncat(char *dest, const char *src, uint size) +{ + char *endp; + char *p; + + p = dest + strlen(dest); + endp = p + size; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + return (dest); +} + +/**************************************************************************** +* Function: bcmstrtok +* +* Purpose: +* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), +* but allows strToken() to be used by different strings or callers at the same +* time. Each call modifies '*string' by substituting a NULL character for the +* first delimiter that is encountered, and updates 'string' to point to the char +* after the delimiter. Leading delimiters are skipped. +* +* Parameters: +* string (mod) Ptr to string ptr, updated by token. +* delimiters (in) Set of delimiter characters. +* tokdelim (out) Character that delimits the returned token. (May +* be set to NULL if token delimiter is not required). +* +* Returns: Pointer to the next token found. NULL when no more tokens are found. +***************************************************************************** +*/ +char * +bcmstrtok(char **string, const char *delimiters, char *tokdelim) +{ + unsigned char *str; + unsigned long map[8]; + int count; + char *nextoken; + + if (tokdelim != NULL) { + /* Prime the token delimiter */ + *tokdelim = '\0'; + } + + /* Clear control map */ + for (count = 0; count < 8; count++) { + map[count] = 0; + } + + /* Set bits in delimiter table */ + do { + map[*delimiters >> 5] |= (1 << (*delimiters & 31)); + } + while (*delimiters++); + + str = (unsigned char*)*string; + + /* Find beginning of token (skip over leading delimiters). Note that + * there is no token iff this loop sets str to point to the terminal + * null (*str == '\0') + */ + while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { + str++; + } + + nextoken = (char*)str; + + /* Find the end of the token. If it is not the end of the string, + * put a null there. + */ + for (; *str; str++) { + if (map[*str >> 5] & (1 << (*str & 31))) { + if (tokdelim != NULL) { + *tokdelim = *str; + } + + *str++ = '\0'; + break; + } + } + + *string = (char*)str; + + /* Determine if a token has been found. */ + if (nextoken == (char *) str) { + return NULL; + } + else { + return nextoken; + } +} + +#define xToLower(C) \ + ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) + +/**************************************************************************** +* Function: bcmstricmp +* +* Purpose: Compare to strings case insensitively. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstricmp(const char *s1, const char *s2) +{ + char dc, sc; + + while (*s2 && *s1) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/**************************************************************************** +* Function: bcmstrnicmp +* +* Purpose: Compare to strings case insensitively, upto a max of 'cnt' +* characters. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* cnt (in) Max characters to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstrnicmp(const char* s1, const char* s2, int cnt) +{ + char dc, sc; + + while (*s2 && *s1 && cnt) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + cnt--; + } + + if (!cnt) return 0; + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ +int +bcm_ether_atoe(const char *p, struct ether_addr *ea) +{ + int i = 0; + char *ep; + + for (;;) { + ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); + p = ep; + if (!*p++ || i == 6) + break; + } + + return (i == 6); +} + +/* parse a nnn.nnn.nnn.nnn format IPV4 address */ +int +bcm_atoipv4(const char *p, struct ipv4_addr *ip) +{ + + int i = 0; + char *c; + for (;;) { + ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0); + if (*c++ != '.' || i == IPV4_ADDR_LEN) + break; + p = c; + } + return (i == IPV4_ADDR_LEN); +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + +const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; +const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; +const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}}; + +int +ether_isbcast(const void *ea) +{ + return (memcmp(ea, ðer_bcast, sizeof(struct ether_addr)) == 0); +} + +int +BCMPOSTTRAPFN(ether_isnulladdr)(const void *ea) +{ + const uint8 *ea8 = (const uint8 *)ea; + return !(ea8[5] || ea8[4] || ea8[3] || ea8[2] || ea8[1] || ea8[0]); +} + +#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) +/* registry routine buffer preparation utility functions: + * parameter order is like strlcpy, but returns count + * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) + */ +ulong +wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) +{ + ulong copyct = 1; + ushort i; + + if (abuflen == 0) + return 0; + + /* wbuflen is in bytes */ + wbuflen /= sizeof(ushort); + + for (i = 0; i < wbuflen; ++i) { + if (--abuflen == 0) + break; + *abuf++ = (char) *wbuf++; + ++copyct; + } + *abuf = '\0'; + + return copyct; +} +#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ + +#ifdef BCM_OBJECT_TRACE + +#define BCM_OBJECT_MERGE_SAME_OBJ 0 + +/* some place may add / remove the object to trace list for Linux: */ +/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */ +/* remove: osl_pktfree dev_kfree_skb netif_rx */ + +#if defined(__linux__) +#define BCM_OBJDBG_COUNT (1024 * 100) +static spinlock_t dbgobj_lock; +#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock) +#define BCM_OBJDBG_LOCK_DESTROY() +#define BCM_OBJDBG_LOCK spin_lock_irqsave +#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore +#else +#define BCM_OBJDBG_COUNT (256) +#define BCM_OBJDBG_LOCK_INIT() +#define BCM_OBJDBG_LOCK_DESTROY() +#define BCM_OBJDBG_LOCK(x, y) +#define BCM_OBJDBG_UNLOCK(x, y) +#endif /* else OS */ + +#define BCM_OBJDBG_ADDTOHEAD 0 +#define BCM_OBJDBG_ADDTOTAIL 1 + +#define BCM_OBJDBG_CALLER_LEN 32 +struct bcm_dbgobj { + struct bcm_dbgobj *prior; + struct bcm_dbgobj *next; + uint32 flag; + void *obj; + uint32 obj_sn; + uint32 obj_state; + uint32 line; + char caller[BCM_OBJDBG_CALLER_LEN]; +}; + +static struct bcm_dbgobj *dbgobj_freehead = NULL; +static struct bcm_dbgobj *dbgobj_freetail = NULL; +static struct bcm_dbgobj *dbgobj_objhead = NULL; +static struct bcm_dbgobj *dbgobj_objtail = NULL; + +static uint32 dbgobj_sn = 0; +static int dbgobj_count = 0; +static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT]; + +void +bcm_object_trace_init(void) +{ + int i = 0; + BCM_OBJDBG_LOCK_INIT(); + memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT); + dbgobj_freehead = &bcm_dbg_objs[0]; + dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1]; + + for (i = 0; i < BCM_OBJDBG_COUNT; ++i) { + bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ? + dbgobj_freehead : &bcm_dbg_objs[i + 1]; + bcm_dbg_objs[i].prior = (i == 0) ? + dbgobj_freetail : &bcm_dbg_objs[i - 1]; + } +} + +void +bcm_object_trace_deinit(void) +{ + if (dbgobj_objhead || dbgobj_objtail) { + printf("bcm_object_trace_deinit: not all objects are released\n"); + ASSERT(0); + } + BCM_OBJDBG_LOCK_DESTROY(); +} + +static void +bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj) +{ + if ((dbgobj == *head) && (dbgobj == *tail)) { + *head = NULL; + *tail = NULL; + } else if (dbgobj == *head) { + *head = (*head)->next; + } else if (dbgobj == *tail) { + *tail = (*tail)->prior; + } + dbgobj->next->prior = dbgobj->prior; + dbgobj->prior->next = dbgobj->next; +} + +static void +bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int addtotail) +{ + if (!(*head) && !(*tail)) { + *head = dbgobj; + *tail = dbgobj; + dbgobj->next = dbgobj; + dbgobj->prior = dbgobj; + } else if ((*head) && (*tail)) { + (*tail)->next = dbgobj; + (*head)->prior = dbgobj; + dbgobj->next = *head; + dbgobj->prior = *tail; + if (addtotail == BCM_OBJDBG_ADDTOTAIL) + *tail = dbgobj; + else + *head = dbgobj; + } else { + ASSERT(0); /* can't be this case */ + } +} + +static INLINE void +bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int movetotail) +{ + if ((*head) && (*tail)) { + if (movetotail == BCM_OBJDBG_ADDTOTAIL) { + if (dbgobj != (*tail)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } else { + if (dbgobj != (*head)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } + } else { + ASSERT(0); /* can't be this case */ + } +} + +void +bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + if (opt == BCM_OBJDBG_ADD_PKT || + opt == BCM_OBJDBG_ADD) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + printf("bcm_object_trace_opr: obj %p allocated from %s(%d)," + " allocate again from %s(%d)\n", + dbgobj->obj, + dbgobj->caller, dbgobj->line, + caller, line); + ASSERT(0); + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +#if BCM_OBJECT_MERGE_SAME_OBJ + dbgobj = dbgobj_freetail; + while (dbgobj) { + if (dbgobj->obj == obj) { + goto FREED_ENTRY_FOUND; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + + dbgobj = dbgobj_freehead; +#if BCM_OBJECT_MERGE_SAME_OBJ +FREED_ENTRY_FOUND: +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + if (!dbgobj) { + printf("bcm_object_trace_opr: already got %d objects ?????????????????\n", + BCM_OBJDBG_COUNT); + ASSERT(0); + goto EXIT; + } + + bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj); + dbgobj->obj = obj; + strlcpy(dbgobj->caller, caller, sizeof(dbgobj->caller)); + dbgobj->line = line; + dbgobj->flag = 0; + if (opt == BCM_OBJDBG_ADD_PKT) { + dbgobj->obj_sn = dbgobj_sn++; + dbgobj->obj_state = 0; + /* first 4 bytes is pkt sn */ + if (((unsigned long)PKTTAG(obj)) & 0x3) + printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj)); + *(uint32*)PKTTAG(obj) = dbgobj->obj_sn; + } + bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + + dbgobj_count++; + + } else if (opt == BCM_OBJDBG_REMOVE) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (dbgobj->flag) { + printf("bcm_object_trace_opr: rm flagged obj %p" + " flag 0x%08x from %s(%d)\n", + obj, dbgobj->flag, caller, line); + } + bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj); + bzero(dbgobj->caller, sizeof(dbgobj->caller)); + strlcpy(dbgobj->caller, caller, sizeof(dbgobj->caller)); + dbgobj->line = line; + bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + dbgobj_count--; + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj && dbgobj->obj) { + if (dbgobj->obj == obj) { + printf("bcm_object_trace_opr: obj %p already freed" + " from from %s(%d)," + " try free again from %s(%d)\n", + obj, + dbgobj->caller, dbgobj->line, + caller, line); + //ASSERT(0); /* release same obj more than one time? */ + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("bcm_object_trace_opr: ################### release none-existing" + " obj %p from %s(%d)\n", + obj, caller, line); + //ASSERT(0); /* release same obj more than one time? */ + + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_upd(void *obj, void *obj_new) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + dbgobj->obj = obj_new; + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { +#if 0 + printf("bcm_object_trace_chk: (%s:%d) obj %p was allocated from %s(%d)\n", + caller, line, + dbgobj->obj, dbgobj->caller, dbgobj->line); +#endif /* #if 0 */ + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + printf("bcm_object_trace_chk: (%s:%d) obj %p (sn %d state %d)" + " was freed from %s(%d)\n", + caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state, + dbgobj->caller, dbgobj->line); + goto EXIT; + } + else if (dbgobj->obj == NULL) { + break; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("bcm_object_trace_chk: obj %p not found, check from %s(%d), chksn %s, sn %d\n", + obj, caller, line, chksn ? "yes" : "no", sn); + dbgobj = dbgobj_objtail; + while (dbgobj) { + printf("bcm_object_trace_chk: (%s:%d) obj %p sn %d was allocated from %s(%d)\n", + caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line); + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_feature_set(void *obj, uint32 type, uint32 value) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + if (value & BCM_OBJECT_FEATURE_CLEAR) + dbgobj->flag &= ~(value); + else + dbgobj->flag |= (value); + } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) { + dbgobj->obj_state = value; + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("bcm_object_feature_set: obj %p not found in active list\n", obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +int +bcm_object_feature_get(void *obj, uint32 type, uint32 value) +{ + int rtn = 0; + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR); + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("bcm_object_feature_get: obj %p not found in active list\n", obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return rtn; +} + +#endif /* BCM_OBJECT_TRACE */ + +uint8 * +BCMPOSTTRAPFN(bcm_write_tlv)(int type, const void *data, uint datalen, uint8 *dst) +{ + uint8 *new_dst = dst; + bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst; + + /* dst buffer should always be valid */ + ASSERT(dst); + + /* data len must be within valid range */ + ASSERT((datalen <= BCM_TLV_MAX_DATA_SIZE)); + + /* source data buffer pointer should be valid, unless datalen is 0 + * meaning no data with this TLV + */ + ASSERT((data != NULL) || (datalen == 0)); + + /* only do work if the inputs are valid + * - must have a dst to write to AND + * - datalen must be within range AND + * - the source data pointer must be non-NULL if datalen is non-zero + * (this last condition detects datalen > 0 with a NULL data pointer) + */ + if ((dst != NULL) && + ((datalen <= BCM_TLV_MAX_DATA_SIZE)) && + ((data != NULL) || (datalen == 0u))) { + + /* write type, len fields */ + dst_tlv->id = (uint8)type; + dst_tlv->len = (uint8)datalen; + + /* if data is present, copy to the output buffer and update + * pointer to output buffer + */ + if (datalen > 0u) { + + memcpy(dst_tlv->data, data, datalen); + } + + /* update the output destination poitner to point past + * the TLV written + */ + new_dst = dst + BCM_TLV_HDR_SIZE + datalen; + } + + return (new_dst); +} + +uint8 * +bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst) +{ + uint8 *new_dst = dst; + bcm_tlv_ext_t *dst_tlv = (bcm_tlv_ext_t *)dst; + + /* dst buffer should always be valid */ + ASSERT(dst); + + /* data len must be within valid range */ + ASSERT(datalen <= BCM_TLV_EXT_MAX_DATA_SIZE); + + /* source data buffer pointer should be valid, unless datalen is 0 + * meaning no data with this TLV + */ + ASSERT((data != NULL) || (datalen == 0)); + + /* only do work if the inputs are valid + * - must have a dst to write to AND + * - datalen must be within range AND + * - the source data pointer must be non-NULL if datalen is non-zero + * (this last condition detects datalen > 0 with a NULL data pointer) + */ + if ((dst != NULL) && + (datalen <= BCM_TLV_EXT_MAX_DATA_SIZE) && + ((data != NULL) || (datalen == 0))) { + + /* write type, len fields */ + dst_tlv->id = (uint8)type; + dst_tlv->ext = ext; + dst_tlv->len = 1 + (uint8)datalen; + + /* if data is present, copy to the output buffer and update + * pointer to output buffer + */ + if (datalen > 0) { + memcpy(dst_tlv->data, data, datalen); + } + + /* update the output destination poitner to point past + * the TLV written + */ + new_dst = dst + BCM_TLV_EXT_HDR_SIZE + datalen; + } + + return (new_dst); +} + +uint8 * +BCMPOSTTRAPFN(bcm_write_tlv_safe)(int type, const void *data, uint datalen, uint8 *dst, + uint dst_maxlen) +{ + uint8 *new_dst = dst; + + if ((datalen <= BCM_TLV_MAX_DATA_SIZE)) { + + /* if len + tlv hdr len is more than destlen, don't do anything + * just return the buffer untouched + */ + if ((datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) { + + new_dst = bcm_write_tlv(type, data, datalen, dst); + } + } + + return (new_dst); +} + +uint8 * +bcm_copy_tlv(const void *src, uint8 *dst) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + uint totlen; + + ASSERT(dst && src); + if (dst && src) { + + totlen = BCM_TLV_HDR_SIZE + src_tlv->len; + memcpy(dst, src_tlv, totlen); + new_dst = dst + totlen; + } + + return (new_dst); +} + +uint8 * +bcm_copy_tlv_safe(const void *src, uint8 *dst, uint dst_maxlen) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + + ASSERT(src); + if (bcm_valid_tlv(src_tlv, dst_maxlen)) { + new_dst = bcm_copy_tlv(src, dst); + } + + return (new_dst); +} + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +/******************************************************************************* + * crc8 + * + * Computes a crc8 over the input data using the polynomial: + * + * x^8 + x^7 +x^6 + x^4 + x^2 + 1 + * + * The caller provides the initial value (either CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC8_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint8 crc8_table[256] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F +}; + +#define CRC_INNER_LOOP(n, c, x) \ + (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] + +uint8 +hndcrc8( + const uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint8 crc /* either CRC8_INIT_VALUE or previous return value */ +) +{ + /* hard code the crc loop instead of using CRC_INNER_LOOP macro + * to avoid the undefined and unnecessary (uint8 >> 8) operation. + */ + while (nbytes-- > 0) + crc = crc8_table[(crc ^ *pdata++) & 0xff]; + + return crc; +} + +/******************************************************************************* + * crc16 + * + * Computes a crc16 over the input data using the polynomial: + * + * x^16 + x^12 +x^5 + 1 + * + * The caller provides the initial value (either CRC16_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC16_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint16 crc16_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, + 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, + 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, + 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, + 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, + 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, + 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, + 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, + 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, + 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, + 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, + 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, + 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, + 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, + 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, + 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, + 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, + 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, + 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, + 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, + 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, + 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, + 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, + 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, + 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, + 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, + 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, + 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, + 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, + 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, + 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, + 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 +}; + +uint16 +hndcrc16( + const uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint16 crc /* either CRC16_INIT_VALUE or previous return value */ +) +{ + while (nbytes-- > 0) + CRC_INNER_LOOP(16, crc, *pdata++); + return crc; +} + +static const uint32 crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +/* + * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if + * accumulating over multiple pieces. + */ +uint32 +hndcrc32(const uint8 *pdata, uint nbytes, uint32 crc) +{ + const uint8 *pend; + pend = pdata + nbytes; + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); + + return crc; +} + +#ifdef NOT_YET +#define CLEN 1499 /* CRC Length */ +#define CBUFSIZ (CLEN+4) +#define CNBUFS 5 /* # of bufs */ + +void +testcrc32(void) +{ + uint j, k, l; + uint8 *buf; + uint len[CNBUFS]; + uint32 crcr; + uint32 crc32tv[CNBUFS] = + {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; + + ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); + + /* step through all possible alignments */ + for (l = 0; l <= 4; l++) { + for (j = 0; j < CNBUFS; j++) { + len[j] = CLEN; + for (k = 0; k < len[j]; k++) + *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; + } + + for (j = 0; j < CNBUFS; j++) { + crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); + ASSERT(crcr == crc32tv[j]); + } + } + + MFREE(buf, CBUFSIZ*CNBUFS); + return; +} +#endif /* NOT_YET */ + +/* + * Advance from the current 1-byte tag/1-byte length/variable-length value + * triple, to the next, returning a pointer to the next. + * If the current or next TLV is invalid (does not fit in given buffer length), + * NULL is returned. + * *buflen is not modified if the TLV elt parameter is invalid, or is decremented + * by the TLV parameter's length if it is valid. + */ +bcm_tlv_t * +bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen) +{ + uint len; + + COV_TAINTED_DATA_SINK(buflen); + COV_NEG_SINK(buflen); + + /* validate current elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + /* advance to next elt */ + len = TLV_HDR_LEN + elt->len; + elt = (const bcm_tlv_t*)((const uint8 *)elt + len); + +#if defined(__COVERITY__) + /* The 'len' value is tainted in Coverity because it is read from the tainted data pointed + * to by 'elt'. However, bcm_valid_tlv() verifies that the elt pointer is a valid element, + * so its length, len = (TLV_HDR_LEN + elt->len), is in the bounds of the buffer. + * Clearing the tainted attribute of 'len' for Coverity. + */ + __coverity_tainted_data_sanitize__(len); + if (len > *buflen) { + return NULL; + } +#endif /* __COVERITY__ */ + + *buflen -= len; + + /* validate next elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + COV_TAINTED_DATA_ARG(elt); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(elt); + GCC_DIAGNOSTIC_POP(); +} + +/** + * Advance a const tlv buffer pointer and length up to the given tlv element pointer + * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data + * are all in the range of the buffer/length. + * + * @param elt pointer to a valid bcm_tlv_t in the buffer + * @param buffer pointer to a tlv buffer + * @param buflen length of the buffer in bytes + * + * On return, if elt is not a tlv in the buffer bounds, the *buffer parameter + * will be set to NULL and *buflen parameter will be set to zero. Otherwise, + * *buffer will point to elt, and *buflen will have been adjusted by the the + * difference between *buffer and elt. + */ +void +bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen) +{ + uint new_buflen; + const uint8 *new_buffer; + + /* model the input length value as a tainted and negative sink so + * Coverity will complain about unvalidated or possibly length values + */ + COV_TAINTED_DATA_SINK(*buflen); + COV_NEG_SINK(*buflen); + + new_buffer = (const uint8*)elt; + + /* make sure the input buffer pointer is non-null, that (buffer + buflen) does not wrap, + * and that the elt pointer is in the range of [buffer, buffer + buflen] + */ + if ((*buffer != NULL) && + ((uintptr)*buffer < ((uintptr)*buffer + *buflen)) && + (new_buffer >= *buffer) && + (new_buffer < (*buffer + *buflen))) { + /* delta between buffer and new_buffer is <= *buflen, so truncating cast to uint + * from ptrdiff is ok + */ + uint delta = (uint)(new_buffer - *buffer); + + /* New buffer length is old len minus the delta from the buffer start to elt. + * The check just above guarantees that the subtractions does not underflow. + */ + new_buflen = *buflen - delta; + + /* validate current elt */ + if (bcm_valid_tlv(elt, new_buflen)) { + /* All good, so update the input/output parameters */ + *buffer = new_buffer; + *buflen = new_buflen; + return; + } + } + + /* something did not check out, clear out the buffer info */ + *buffer = NULL; + *buflen = 0; + + return; +} + +/** + * Advance a const tlv buffer pointer and length past the given tlv element pointer + * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data + * are all in the range of the buffer/length. The function also checks that the + * remaining buffer starts with a valid tlv. + * + * @param elt pointer to a valid bcm_tlv_t in the buffer + * @param buffer pointer to a tlv buffer + * @param buflen length of the buffer in bytes + * + * On return, if elt is not a tlv in the buffer bounds, or the remaining buffer + * following the elt does not begin with a tlv in the buffer bounds, the *buffer + * parameter will be set to NULL and *buflen parameter will be set to zero. + * Otherwise, *buffer will point to the first byte past elt, and *buflen will + * have the remaining buffer length. + */ +void +bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen) +{ + /* Start by advancing the buffer up to the given elt */ + bcm_tlv_buffer_advance_to(elt, buffer, buflen); + + /* if that did not work, bail out */ + if (*buflen == 0) { + return; + } + +#if defined(__COVERITY__) + /* The elt has been verified by bcm_tlv_buffer_advance_to() to be a valid element, + * so its elt->len is in the bounds of the buffer. The following check prevents + * Coverity from flagging the (elt->data + elt->len) statement below as using a + * tainted elt->len to index into array 'elt->data'. + */ + if (elt->len > *buflen) { + return; + } +#endif /* __COVERITY__ */ + + /* We know we are advanced up to a good tlv. + * Now just advance to the following tlv. + */ + elt = (const bcm_tlv_t*)(elt->data + elt->len); + + bcm_tlv_buffer_advance_to(elt, buffer, buflen); + + return; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +bcm_tlv_t * +bcm_parse_tlvs(const void *buf, uint buflen, uint key) +{ + const bcm_tlv_t *elt; + uint totlen; + + COV_TAINTED_DATA_SINK(buflen); + COV_NEG_SINK(buflen); + + if ((elt = (const bcm_tlv_t*)buf) == NULL) { + return NULL; + } + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint len = elt->len; + + /* check if elt overruns buffer */ + if (totlen < (len + TLV_HDR_LEN)) { + break; + } + /* did we find the ID? */ + if ((elt->id == key)) { + COV_TAINTED_DATA_ARG(elt); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(elt); + GCC_DIAGNOSTIC_POP(); + } + elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. + * The 'advance' parmeter specifies what to do to the parse buf/buflen values if a + * matching tlv is found: + * BCM_TLV_ADVANCE_NONE - do nothing + * BCM_TLV_ADVANCE_TO - move the buf up to the discovered tlv, and adjust buflen. + * BCM_TLV_ADVANCE_PAST - move the buf past the discovered tlb, and adjust buflen. + * If a tlv is not found, no changes are made to buf/buflen + * + */ +const bcm_tlv_t * +bcm_parse_tlvs_advance(const uint8 **buf, uint *buflen, uint key, bcm_tlv_advance_mode_t advance) +{ + const bcm_tlv_t *elt; + + elt = bcm_parse_tlvs(*buf, *buflen, key); + + if (elt == NULL) { + return elt; + } + + if (advance == BCM_TLV_ADVANCE_TO) { + bcm_tlv_buffer_advance_to(elt, buf, buflen); + } else if (advance == BCM_TLV_ADVANCE_PAST) { + bcm_tlv_buffer_advance_past(elt, buf, buflen); + } else if (advance == BCM_TLV_ADVANCE_NONE) { + /* nothing to do */ + } else { + /* there are only 3 modes, but just in case, zero the parse buffer pointer and + * length to prevent infinite loops in callers that expect progress. + */ + ASSERT(0); + *buf = NULL; + *buflen = 0; + } + + return elt; +} + +bcm_tlv_t * +bcm_parse_tlvs_dot11(const void *buf, uint buflen, uint key, bool id_ext) +{ + bcm_tlv_t *elt; + uint totlen; + + COV_TAINTED_DATA_SINK(buflen); + COV_NEG_SINK(buflen); + + /* + ideally, we don't want to do that, but returning a const pointer + from these parse function spreads casting everywhere in the code + */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + elt = (bcm_tlv_t*)buf; + GCC_DIAGNOSTIC_POP(); + + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint len = elt->len; + + /* validate remaining totlen */ + if (totlen < (len + TLV_HDR_LEN)) { + break; + } + + do { + if (id_ext) { + if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key)) + break; + } else if (elt->id != key) { + break; + } + + COV_TAINTED_DATA_ARG(elt); + + return (bcm_tlv_t *)(elt); /* a match */ + } while (0); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + * return NULL if not found or length field < min_varlen + */ +bcm_tlv_t * +bcm_parse_tlvs_min_bodylen(const void *buf, uint buflen, uint key, uint min_bodylen) +{ + bcm_tlv_t * ret; + ret = bcm_parse_tlvs(buf, buflen, key); + if (ret == NULL || ret->len < min_bodylen) { + return NULL; + } + return ret; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + * return NULL if not found or tlv size > max_len or < min_len + */ +bcm_tlv_t * +bcm_parse_tlvs_minmax_len(const void *buf, uint buflen, uint key, + uint min_len, uint max_len) +{ + bcm_tlv_t * ret; + ret = bcm_parse_tlvs(buf, buflen, key); + if (ret == NULL || + (BCM_TLV_SIZE(ret) > max_len) || + (BCM_TLV_SIZE(ret) < min_len)) { + return NULL; + } + return ret; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. Stop parsing when we see an element whose ID is greater + * than the target key. + */ +const bcm_tlv_t * +bcm_parse_ordered_tlvs(const void *buf, uint buflen, uint key) +{ + const bcm_tlv_t *elt; + uint totlen; + + COV_TAINTED_DATA_SINK(buflen); + COV_NEG_SINK(buflen); + + elt = (const bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint id = elt->id; + uint len = elt->len; + + /* Punt if we start seeing IDs > than target key */ + if (id > key) { + return (NULL); + } + + /* validate remaining totlen */ + if (totlen < (len + TLV_HDR_LEN)) { + break; + } + if (id == key) { + COV_TAINTED_DATA_ARG(elt); + return (elt); + } + + elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + return NULL; +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + +uint +bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, uint len) +{ + uint i, slen = 0; + uint32 bit, mask; + const char *name; + mask = bd->mask; + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) { + bit = bd->bitfield[i].bit; + if ((flags & mask) == bit) { + slen = (int)strlen(name); + if (memcpy_s(buf, len, name, slen + 1) != BCME_OK) { + slen = 0; + } + break; + } + } + return slen; +} + +int +bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, uint len) +{ + uint i; + char *p = buf; + char *end = (buf + len); + char hexstr[16]; + uint32 bit; + const char* name; + bool err = FALSE; + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; flags != 0; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (bit == 0 && flags != 0) { + /* print any unnamed bits */ + snprintf(hexstr, sizeof(hexstr), "0x%X", flags); + name = hexstr; + flags = 0; /* exit loop */ + } else if ((flags & bit) == 0) { + continue; + } + flags &= ~bit; + + /* Print named bit. */ + p += strlcpy(p, name, (end - p)); + if (p == end) { + /* Truncation error. */ + err = TRUE; + break; + } + + /* Add space delimiter if there are more bits. */ + if (flags != 0) { + p += strlcpy(p, " ", (end - p)); + if (p == end) { + /* Truncation error. */ + err = TRUE; + break; + } + } + } + + /* indicate the str was too short */ + if (err) { + ASSERT(len >= 2u); + buf[len - 2u] = '>'; + } + + return (int)(p - buf); +} + +/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */ +int +bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz, + const uint8 *addr, uint size, char *buf, uint len) +{ + uint i; + char *p = buf; + uint slen = 0, nlen = 0; + uint32 bit; + const char* name; + bool more = FALSE; + + BCM_REFERENCE(size); + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; i < bdsz; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (isset(addr, bit)) { + nlen = (int)strlen(name); + slen += nlen; + /* need SPACE - for simplicity */ + slen += 1; + /* need NULL as well */ + if (len < slen + 1) { + more = TRUE; + break; + } + memcpy(p, name, nlen); + p += nlen; + p[0] = ' '; + p += 1; + p[0] = '\0'; + } + } + + if (more) { + p[0] = '>'; + p += 1; + p[0] = '\0'; + } + + return (int)(p - buf); +} + +/* Transform an hexadecimal string into binary. + * Output is limited to 64K. + * hex : string + * hex_len : string length + * buf : allocated output buffer + * buf_len : allocated size + * return : copied length, if successfull, 0 if error. + */ +uint16 +bcmhex2bin(const uint8* hex, uint hex_len, uint8 *buf, uint buf_len) +{ + uint i = 0; + uint16 out_len; + char tmp[] = "XX"; + if (hex_len % 2) { + /* hex_len not even */ + return 0; + } + /* check for hex radix */ + if ((hex[0] == '0') && ((hex[1] == 'x') || (hex[1] == 'X'))) { + hex += 2; + hex_len -= 2; + } + if (hex_len/2 > 0xFFFF) { + /* exceed 64K buffer capacity */ + return 0; + } + if ((out_len = hex_len/2) > buf_len) { + /* buf too short */ + return 0; + } + do { + tmp[0] = *hex++; + tmp[1] = *hex++; + if (!bcm_isxdigit(tmp[0]) || !bcm_isxdigit(tmp[1])) { + /* char is not a 256-bit hex number */ + return 0; + } + /* okay so far; make this piece a number */ + buf[i] = (uint8) bcm_strtoul(tmp, NULL, 16); + } while (++i < out_len); + return out_len; +} + +/* print bytes formatted as hex to a string. return the resulting string length */ +int +bcm_format_hex(char *str, const void *bytes, uint len) +{ + uint i; + char *p = str; + const uint8 *src = (const uint8*)bytes; + + for (i = 0; i < len; i++) { + p += snprintf(p, 3, "%02X", *src); + src++; + } + return (int)(p - str); +} + +/* pretty hex print a contiguous buffer */ +void +prhex(const char *msg, const uchar *buf, uint nbytes) +{ + char line[128], *p; + uint len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + printf("%s\n", line); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + printf("%s\n", line); +} + +static const char *crypto_algo_names[] = { + "NONE", + "WEP1", + "TKIP", + "WEP128", + "AES_CCM", + "AES_OCB_MSDU", + "AES_OCB_MPDU", + "NALG", + "UNDEF", + "UNDEF", + "UNDEF", + +#ifdef BCMWAPI_WAI + "WAPI", +#endif /* BCMWAPI_WAI */ + +#ifndef BCMWAPI_WAI + "UNDEF", +#endif + "PMK", + "BIP", + "AES_GCM", + "AES_CCM256", + "AES_GCM256", + "BIP_CMAC256", + "BIP_GMAC", + "BIP_GMAC256", + "UNDEF" +}; + +const char * +bcm_crypto_algo_name(uint algo) +{ + return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; +} + +#ifdef BCMDBG +void +deadbeef(void *p, uint len) +{ + static uint8 meat[] = { 0xde, 0xad, 0xbe, 0xef }; + + while (len-- > 0) { + *(uint8*)p = meat[((uintptr)p) & 3]; + p = (uint8*)p + 1; + } +} +#endif /* BCMDBG */ + +char * +bcm_chipname(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +/* Produce a human-readable string for boardrev */ +char * +bcm_brev_str(uint32 brev, char *buf) +{ + if (brev < 0x100) + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + else + snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); + + return (buf); +} + +#define BUFSIZE_TODUMP_ATONCE 128 /* Buffer size */ + +/* dump large strings to console */ +void +printbig(char *buf) +{ + uint len, max_len; + char c; + + len = (uint)strlen(buf); + + max_len = BUFSIZE_TODUMP_ATONCE; + + while (len > max_len) { + c = buf[max_len]; + buf[max_len] = '\0'; + printf("%s", buf); + buf[max_len] = c; + + buf += max_len; + len -= max_len; + } + /* print the remaining string */ + printf("%s\n", buf); + return; +} + +/* routine to dump fields in a fileddesc structure */ +uint +bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, + char *buf, uint32 bufsize) +{ + uint filled_len; + int len; + struct fielddesc *cur_ptr; + + filled_len = 0; + cur_ptr = fielddesc_array; + + while (bufsize > 1) { + if (cur_ptr->nameandfmt == NULL) + break; + len = snprintf(buf, bufsize, cur_ptr->nameandfmt, + read_rtn(arg0, arg1, cur_ptr->offset)); + /* check for snprintf overflow or error */ + if (len < 0 || (uint32)len >= bufsize) + len = bufsize - 1; + buf += len; + bufsize -= len; + filled_len += len; + cur_ptr++; + } + return filled_len; +} + +uint +bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen) +{ + uint len; + + len = (uint)strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + strlcpy(buf, name, buflen); + + /* append data onto the end of the name string */ + if (data && datalen != 0) { + memcpy(&buf[len], data, datalen); + len += datalen; + } + + return len; +} + +/* Quarter dBm units to mW + * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 + * Table is offset so the last entry is largest mW value that fits in + * a uint16. + */ + +#define QDBM_OFFSET 153 /* Offset for first entry */ +#define QDBM_TABLE_LEN 40 /* Table size */ + +/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. + * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 + */ +#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ + +/* Largest mW value that will round down to the last table entry, + * QDBM_OFFSET + QDBM_TABLE_LEN-1. + * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. + */ +#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ + +static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { +/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ +/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, +/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, +/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, +/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, +/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 +}; + +uint16 +bcm_qdbm_to_mw(uint8 qdbm) +{ + uint factor = 1; + int idx = qdbm - QDBM_OFFSET; + + if (idx >= QDBM_TABLE_LEN) { + /* clamp to max uint16 mW value */ + return 0xFFFF; + } + + /* scale the qdBm index up to the range of the table 0-40 + * where an offset of 40 qdBm equals a factor of 10 mW. + */ + while (idx < 0) { + idx += 40; + factor *= 10; + } + + /* return the mW value scaled down to the correct factor of 10, + * adding in factor/2 to get proper rounding. + */ + return ((nqdBm_to_mW_map[idx] + factor/2) / factor); +} + +uint8 +bcm_mw_to_qdbm(uint16 mw) +{ + uint8 qdbm; + int offset; + uint mw_uint = mw; + uint boundary; + + /* handle boundary case */ + if (mw_uint <= 1) + return 0; + + offset = QDBM_OFFSET; + + /* move mw into the range of the table */ + while (mw_uint < QDBM_TABLE_LOW_BOUND) { + mw_uint *= 10; + offset -= 40; + } + + for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { + boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - + nqdBm_to_mW_map[qdbm])/2; + if (mw_uint < boundary) break; + } + + qdbm += (uint8)offset; + + return (qdbm); +} + +uint +BCMPOSTTRAPFN(bcm_bitcount)(const uint8 *bitmap, uint length) +{ + uint bitcount = 0, i; + uint8 tmp; + for (i = 0; i < length; i++) { + tmp = bitmap[i]; + while (tmp) { + bitcount++; + tmp &= (tmp - 1); + } + } + return bitcount; +} + +void +dump_nvram(char *varbuf, int column, unsigned int n, unsigned int len) +{ + unsigned int m; + char vars[128]; + + if (((n==0) && (varbuf[0]=='#')) || + ((column==0) && (android_msg_level & ANDROID_INFO_LEVEL))) { + memset(vars, 0x00, sizeof(vars)); + for (m=n; m=\n" lines read from a file and ending in a NUL. + * also accepts nvram files which are already in the format of =\0\=\0 + * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. + * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. +*/ + +unsigned int +process_nvram_vars(char *varbuf, unsigned int len) +{ + char *dp; + bool findNewline; + int column; + unsigned int buf_len, n; + unsigned int pad = 0; + + dp = varbuf; + + findNewline = FALSE; + column = 0; + + dump_nvram(varbuf, 0, 0, len); + for (n = 0; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if (findNewline && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\n') { + if (column == 0) + continue; + *dp++ = 0; + column = 0; + continue; + } + dump_nvram(varbuf, column, n, len); + *dp++ = varbuf[n]; + column++; + } + buf_len = (unsigned int)(dp - varbuf); + if (buf_len % 4) { + pad = 4 - buf_len % 4; + if (pad && (buf_len + pad <= len)) { + buf_len += pad; + } + } + + while (dp < varbuf + n) + *dp++ = 0; + + return buf_len; +} + +#ifndef setbit /* As in the header file */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +/* Set bit in byte array. */ +void +setbit(void *array, uint bit) +{ + ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY); +} + +/* Clear bit in byte array. */ +void +clrbit(void *array, uint bit) +{ + ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY)); +} + +/* Test if bit is set in byte array. */ +bool +isset(const void *array, uint bit) +{ + return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))); +} + +/* Test if bit is clear in byte array. */ +bool +isclr(const void *array, uint bit) +{ + return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0); +} +#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */ +#endif /* setbit */ + +void +BCMPOSTTRAPFN(set_bitrange)(void *array, uint start, uint end, uint maxbit) +{ + uint startbyte = start/NBBY; + uint endbyte = end/NBBY; + uint i, startbytelastbit, endbytestartbit; + + if (end >= start) { + if (endbyte - startbyte > 1) { + startbytelastbit = ((startbyte + 1) * NBBY) - 1; + endbytestartbit = endbyte * NBBY; + for (i = startbyte + 1; i < endbyte; i++) + ((uint8 *)array)[i] = 0xFF; + for (i = start; i <= startbytelastbit; i++) + setbit(array, i); + for (i = endbytestartbit; i <= end; i++) + setbit(array, i); + } else { + for (i = start; i <= end; i++) + setbit(array, i); + } + } else { + set_bitrange(array, start, maxbit, maxbit); + set_bitrange(array, 0, end, maxbit); + } +} + +void +clr_bitrange(void *array, uint start, uint end, uint maxbit) +{ + uint startbyte = start/NBBY; + uint endbyte = end/NBBY; + uint i, startbytelastbit, endbytestartbit; + + if (end >= start) { + if (endbyte - startbyte > 1) { + startbytelastbit = ((startbyte + 1) * NBBY) - 1; + endbytestartbit = endbyte * NBBY; + for (i = startbyte + 1; i < endbyte; i++) + ((uint8 *)array)[i] = 0x0; + for (i = start; i <= startbytelastbit; i++) + clrbit(array, i); + for (i = endbytestartbit; i <= end; i++) + clrbit(array, i); + } else { + for (i = start; i <= end; i++) + clrbit(array, i); + } + } else { + clr_bitrange(array, start, maxbit, maxbit); + clr_bitrange(array, 0, end, maxbit); + } +} + +/* + * This api (set_bitrange_int_access) as same as set_bitrange but uses int32 operation + * This api can be used in the place of set_bitrange but array should be word (32bit) alligned. + * This api has to be used when the memory being accessed has restrictions of + * not using them in 8bit (byte) mode and needing 32bit (word) mode. + */ +void +set_bitrange_u32(void *array, uint start, uint end, uint maxbit) +{ + uint startword = start/SIZE_BITS32(uint32); + uint endword = end/SIZE_BITS32(uint32); + uint startwordstartbit = start % SIZE_BITS32(uint32); + uint endwordlastbit = end % SIZE_BITS32(uint32); + /* Used to caluculate bit number from MSB */ + uint u32msbnum = SIZE_BITS32(uint32) - 1U; + uint i; + uint32 setbitsword; + uint32 u32max = ~0U; + + ASSERT(ISALIGNED(array, sizeof(uint32))); /* array should be alligned for this API */ + + if (start > end) { + set_bitrange_u32(array, start, maxbit, maxbit); + set_bitrange_u32(array, 0U, end, maxbit); + return; + } + + if (endword - startword) { + /* Setting MSB bits including startwordstartbit */ + setbitsword = u32max << startwordstartbit; + ((uint32 *)array)[startword] |= setbitsword; + + /* Setting all bits in 'startword + 1' to 'endword - 1' */ + for (i = startword + 1U; i <= endword - 1U; i++) { + ((uint32 *)array)[i] = u32max; + } + + /* Setting LSB bits including endwordlastbit */ + setbitsword = u32max >> (u32msbnum - endwordlastbit); + ((uint32 *)array)[endword] |= setbitsword; + } else { /* start and end are in same word */ + /* Setting start bit to end bit including start and end bits */ + setbitsword = + (u32max << startwordstartbit) & (u32max >> (u32msbnum - endwordlastbit)); + ((uint32 *)array)[startword] |= setbitsword; + } +} + +/* + * This api (clr_bitrange_u32) as same as clr_bitrange but uses int32 operation + * This api can be used in the place of clr_bitrange but array should be word (32bit) alligned. + * This api has to be used when the memory being accessed has restrictions of + * not using them in 8bit (byte) mode and needing 32bit (word) mode. + */ +void +clr_bitrange_u32(void *array, uint start, uint end, uint maxbit) +{ + uint startword = start/SIZE_BITS32(uint32); + uint endword = end/SIZE_BITS32(uint32); + uint startwordstartbit = start % SIZE_BITS32(uint32); + uint endwordlastbit = end % SIZE_BITS32(uint32); + /* Used to caluculate bit number from MSB */ + uint u32msbnum = SIZE_BITS32(uint32) - 1U; + uint i; + uint32 clrbitsword; + uint32 u32max = ~0U; + + ASSERT(ISALIGNED(array, sizeof(uint32))); /* array should be alligned for this API */ + + if (start > end) { + clr_bitrange_u32(array, start, maxbit, maxbit); + clr_bitrange_u32(array, 0U, end, maxbit); + return; + } + + if (endword - startword) { + /* Clearing MSB bits including startwordstartbit */ + clrbitsword = ~(u32max << startwordstartbit); + ((uint32 *)array)[startword] &= clrbitsword; + + /* Clearing all bits in 'startword + 1' to 'endword - 1' */ + for (i = startword + 1U; i <= endword - 1U; i++) { + ((uint32 *)array)[i] = 0U; + } + + /* Clearing LSB bits including endwordlastbit */ + clrbitsword = ~(u32max >> (u32msbnum - endwordlastbit)); + ((uint32 *)array)[endword] &= clrbitsword; + } else { /* start and end are in same word */ + /* Clearing start bit to end bit including start and end bits */ + clrbitsword = + ~(u32max << startwordstartbit) | ~(u32max >> (u32msbnum - endwordlastbit)); + ((uint32 *)array)[startword] &= clrbitsword; + } +} + +void +bcm_bitprint32(const uint32 u32arg) +{ + int i; + for (i = NBITS(uint32) - 1; i >= 0; i--) { + if (isbitset(u32arg, i)) { + printf("1"); + } else { + printf("0"); + } + + if ((i % NBBY) == 0) printf(" "); + } + printf("\n"); +} + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 +bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) +{ + while (len > 1) { + sum += (buf[0] << 8) | buf[1]; + buf += 2; + len -= 2; + } + + if (len > 0) { + sum += (*buf) << 8; + } + + while (sum >> 16) { + sum = (sum & 0xffff) + (sum >> 16); + } + + return ((uint16)~sum); +} + +/* calculate a + b where a is a 64 bit number and b is a 32 bit number */ +void +bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset) +{ + uint32 r1_lo = *r_lo; + (*r_lo) += offset; + if (*r_lo < r1_lo) + (*r_hi) ++; +} + +/* calculate a - b where a is a 64 bit number and b is a 32 bit number */ +void +bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset) +{ + uint32 r1_lo = *r_lo; + (*r_lo) -= offset; + if (*r_lo > r1_lo) + (*r_hi) --; +} + +int +BCMRAMFN(valid_bcmerror)(int e) +{ + return ((e <= 0) && (e >= BCME_LAST)); +} + +#ifdef DEBUG_COUNTER +#if (OSL_SYSUPTIME_SUPPORT == TRUE) +void counter_printlog(counter_tbl_t *ctr_tbl) +{ + uint32 now; + + if (!ctr_tbl->enabled) + return; + + now = OSL_SYSUPTIME(); + + if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) { + uint8 i = 0; + printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print); + + for (i = 0; i < ctr_tbl->needed_cnt; i++) { + printf(" %u", ctr_tbl->cnt[i]); + } + printf("\n"); + + ctr_tbl->prev_log_print = now; + bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint)); + } +} +#else +/* OSL_SYSUPTIME is not supported so no way to get time */ +#define counter_printlog(a) do {} while (0) +#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */ +#endif /* DEBUG_COUNTER */ + +/* calculate partial checksum */ +static uint32 +ip_cksum_partial(uint32 sum, uint8 *val8, uint32 count) +{ + uint32 i; + uint16 *val16 = (uint16 *)val8; + + ASSERT(val8 != NULL); + /* partial chksum calculated on 16-bit values */ + ASSERT((count % 2) == 0); + + count /= 2; + + for (i = 0; i < count; i++) { + sum += *val16++; + } + return sum; +} + +/* calculate IP checksum */ +static uint16 +ip_cksum(uint32 sum, uint8 *val8, uint32 count) +{ + uint16 *val16 = (uint16 *)val8; + + ASSERT(val8 != NULL); + + while (count > 1) { + sum += *val16++; + count -= 2; + } + /* add left-over byte, if any */ + if (count > 0) { + sum += (*(uint8 *)val16); + } + + /* fold 32-bit sum to 16 bits */ + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); + return ((uint16)~sum); +} + +/* calculate IPv4 header checksum + * - input ip points to IP header in network order + * - output cksum is in network order + */ +uint16 +ipv4_hdr_cksum(uint8 *ip, uint ip_len) +{ + uint32 sum = 0; + uint8 *ptr = ip; + + ASSERT(ip != NULL); + ASSERT(ip_len >= IPV4_MIN_HEADER_LEN); + + if (ip_len < IPV4_MIN_HEADER_LEN) { + return 0; + } + + /* partial cksum skipping the hdr_chksum field */ + sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct ipv4_hdr, hdr_chksum)); + ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2; + + /* return calculated chksum */ + return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip)); +} + +/* calculate TCP header checksum using partial sum */ +static uint16 +tcp_hdr_chksum(uint32 sum, uint8 *tcp_hdr, uint16 tcp_len) +{ + uint8 *ptr = tcp_hdr; + + ASSERT(tcp_hdr != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* partial TCP cksum skipping the chksum field */ + sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct bcmtcp_hdr, chksum)); + ptr += OFFSETOF(struct bcmtcp_hdr, chksum) + 2; + + /* return calculated chksum */ + return ip_cksum(sum, ptr, tcp_len - OFFSETOF(struct bcmtcp_hdr, urg_ptr)); +} + +struct tcp_pseudo_hdr { + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ + uint8 zero; + uint8 prot; + uint16 tcp_size; +}; + +/* calculate IPv4 TCP header checksum + * - input ip and tcp points to IP and TCP header in network order + * - output cksum is in network order + */ +uint16 +ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len) +{ + struct ipv4_hdr *ip_hdr = (struct ipv4_hdr *)ip; + struct tcp_pseudo_hdr tcp_ps; + uint32 sum = 0; + + ASSERT(ip != NULL); + ASSERT(tcp != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* pseudo header cksum */ + memset(&tcp_ps, 0, sizeof(tcp_ps)); + memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN); + memcpy(&tcp_ps.src_ip, ip_hdr->src_ip, IPV4_ADDR_LEN); + tcp_ps.zero = 0; + tcp_ps.prot = ip_hdr->prot; + tcp_ps.tcp_size = hton16(tcp_len); + sum = ip_cksum_partial(sum, (uint8 *)&tcp_ps, sizeof(tcp_ps)); + + /* return calculated TCP header chksum */ + return tcp_hdr_chksum(sum, tcp, tcp_len); +} + +struct ipv6_pseudo_hdr { + uint8 saddr[IPV6_ADDR_LEN]; + uint8 daddr[IPV6_ADDR_LEN]; + uint16 payload_len; + uint8 zero; + uint8 next_hdr; +}; + +/* calculate IPv6 TCP header checksum + * - input ipv6 and tcp points to IPv6 and TCP header in network order + * - output cksum is in network order + */ +uint16 +ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len) +{ + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)ipv6; + struct ipv6_pseudo_hdr ipv6_pseudo; + uint32 sum = 0; + + ASSERT(ipv6 != NULL); + ASSERT(tcp != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* pseudo header cksum */ + memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo)); + memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr, + sizeof(ipv6_pseudo.saddr)); + memcpy((char *)ipv6_pseudo.daddr, (char *)ipv6_hdr->daddr.addr, + sizeof(ipv6_pseudo.daddr)); + ipv6_pseudo.payload_len = ipv6_hdr->payload_len; + ipv6_pseudo.next_hdr = ipv6_hdr->nexthdr; + sum = ip_cksum_partial(sum, (uint8 *)&ipv6_pseudo, sizeof(ipv6_pseudo)); + + /* return calculated TCP header chksum */ + return tcp_hdr_chksum(sum, tcp, tcp_len); +} + +void *_bcmutils_dummy_fn = NULL; + +/* GROUP 1 --- start + * These function under GROUP 1 are general purpose functions to do complex number + * calculations and square root calculation. + */ + +uint32 sqrt_int(uint32 value) +{ + uint32 root = 0, shift = 0; + + /* Compute integer nearest to square root of input integer value */ + for (shift = 0; shift < 32; shift += 2) { + if (((0x40000000 >> shift) + root) <= value) { + value -= ((0x40000000 >> shift) + root); + root = (root >> 1) | (0x40000000 >> shift); + } + else { + root = root >> 1; + } + } + + /* round to the nearest integer */ + if (root < value) ++root; + + return root; +} +/* GROUP 1 --- end */ + +/* read/write field in a consecutive bits in an octet array. + * 'addr' is the octet array's start byte address + * 'size' is the octet array's byte size + * 'stbit' is the value's start bit offset + * 'nbits' is the value's bit size + * This set of utilities are for convenience. Don't use them + * in time critical/data path as there's a great overhead in them. + */ +void +setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val) +{ + uint fbyte = stbit >> 3; /* first byte */ + uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */ + uint fbit = stbit & 7; /* first bit in the first byte */ + uint rbits = (nbits > 8 - fbit ? + nbits - (8 - fbit) : + 0) & 7; /* remaining bits of the last byte when not 0 */ + uint8 mask; + uint byte; + + BCM_REFERENCE(size); + + ASSERT(fbyte < size); + ASSERT(lbyte < size); + ASSERT(nbits <= (sizeof(val) << 3)); + + /* all bits are in the same byte */ + if (fbyte == lbyte) { + mask = ((1 << nbits) - 1) << fbit; + addr[fbyte] &= ~mask; + addr[fbyte] |= (uint8)(val << fbit); + return; + } + + /* first partial byte */ + if (fbit > 0) { + mask = (0xff << fbit); + addr[fbyte] &= ~mask; + addr[fbyte] |= (uint8)(val << fbit); + val >>= (8 - fbit); + nbits -= (8 - fbit); + fbyte ++; /* first full byte */ + } + + /* last partial byte */ + if (rbits > 0) { + mask = (1 << rbits) - 1; + addr[lbyte] &= ~mask; + addr[lbyte] |= (uint8)(val >> (nbits - rbits)); + lbyte --; /* last full byte */ + } + + /* remaining full byte(s) */ + for (byte = fbyte; byte <= lbyte; byte ++) { + addr[byte] = (uint8)val; + val >>= 8; + } +} + +uint32 +getbits(const uint8 *addr, uint size, uint stbit, uint nbits) +{ + uint fbyte = stbit >> 3; /* first byte */ + uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */ + uint fbit = stbit & 7; /* first bit in the first byte */ + uint rbits = (nbits > 8 - fbit ? + nbits - (8 - fbit) : + 0) & 7; /* remaining bits of the last byte when not 0 */ + uint32 val = 0; + uint bits = 0; /* bits in first partial byte */ + uint8 mask; + uint byte; + + BCM_REFERENCE(size); + + ASSERT(fbyte < size); + ASSERT(lbyte < size); + ASSERT(nbits <= (sizeof(val) << 3)); + + /* all bits are in the same byte */ + if (fbyte == lbyte) { + mask = ((1 << nbits) - 1) << fbit; + val = (addr[fbyte] & mask) >> fbit; + return val; + } + + /* first partial byte */ + if (fbit > 0) { + bits = 8 - fbit; + mask = (0xff << fbit); + val |= (addr[fbyte] & mask) >> fbit; + fbyte ++; /* first full byte */ + } + + /* last partial byte */ + if (rbits > 0) { + mask = (1 << rbits) - 1; + val |= (addr[lbyte] & mask) << (nbits - rbits); + lbyte --; /* last full byte */ + } + + /* remaining full byte(s) */ + for (byte = fbyte; byte <= lbyte; byte ++) { + val |= (addr[byte] << (((byte - fbyte) << 3) + bits)); + } + + return val; +} + +#if defined(BCMDBG) || defined(WLMSG_ASSOC) +/* support for getting 802.11 frame type/name based on frame kind */ +#define FK_NAME_DECL(x) {FC_##x, #x} +static const struct { + uint fk; + const char *name; +} bcm_80211_fk_names[] = { + FK_NAME_DECL(ASSOC_REQ), + FK_NAME_DECL(ASSOC_RESP), + FK_NAME_DECL(REASSOC_REQ), + FK_NAME_DECL(REASSOC_RESP), + FK_NAME_DECL(PROBE_REQ), + FK_NAME_DECL(PROBE_RESP), + FK_NAME_DECL(BEACON), + FK_NAME_DECL(ATIM), + FK_NAME_DECL(DISASSOC), + FK_NAME_DECL(AUTH), + FK_NAME_DECL(DEAUTH), + FK_NAME_DECL(ACTION), + FK_NAME_DECL(ACTION_NOACK), + FK_NAME_DECL(CTL_TRIGGER), + FK_NAME_DECL(CTL_WRAPPER), + FK_NAME_DECL(BLOCKACK_REQ), + FK_NAME_DECL(BLOCKACK), + FK_NAME_DECL(PS_POLL), + FK_NAME_DECL(RTS), + FK_NAME_DECL(CTS), + FK_NAME_DECL(ACK), + FK_NAME_DECL(CF_END), + FK_NAME_DECL(CF_END_ACK), + FK_NAME_DECL(DATA), + FK_NAME_DECL(NULL_DATA), + FK_NAME_DECL(DATA_CF_ACK), + FK_NAME_DECL(QOS_DATA), + FK_NAME_DECL(QOS_NULL) +}; +static const uint n_bcm_80211_fk_names = ARRAYSIZE(bcm_80211_fk_names); + +const char *bcm_80211_fk_name(uint fk) +{ + uint i; + for (i = 0; i < n_bcm_80211_fk_names; ++i) { + if (bcm_80211_fk_names[i].fk == fk) { + return bcm_80211_fk_names[i].name; + } + } + return "unknown"; +} +#endif /* BCMDBG || WLMSG_ASSOC */ + +#ifdef BCMDRIVER + +/** allocate variable sized data with 'size' bytes. note: vld should NOT be null. + */ +int +bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size) +{ + int ret = BCME_ERROR; + uint8 *dat = NULL; + + if (vld == NULL) { + ASSERT(0); + goto done; + } + + /* trying to allocate twice? */ + if (vld->vdata != NULL) { + ASSERT(0); + goto done; + } + + /* trying to allocate 0 size? */ + if (size == 0) { + ASSERT(0); + ret = BCME_BADARG; + goto done; + } + + dat = MALLOCZ(osh, size); + if (dat == NULL) { + ret = BCME_NOMEM; + goto done; + } + vld->vlen = size; + vld->vdata = dat; + ret = BCME_OK; +done: + return ret; +} + +/** free memory associated with variable sized data. note: vld should NOT be null. + */ +int +bcm_vdata_free(osl_t *osh, var_len_data_t *vld) +{ + int ret = BCME_ERROR; + + if (vld == NULL) { + ASSERT(0); + goto done; + } + + if (vld->vdata) { + MFREE(osh, vld->vdata, vld->vlen); + vld->vlen = 0; + ret = BCME_OK; + } +done: + return ret; +} + +/* return TRUE if : + * - both buffers are of length 0 + * OR + * - both buffers are NULL + * OR + * lengths and contents are the same. + */ +bool +bcm_match_buffers(const uint8 *b1, uint b1_len, const uint8 *b2, uint b2_len) + +{ + if (b1_len == 0 && b2_len == 0) { + return TRUE; + } + + if (b1 == NULL && b2 == NULL) { + return TRUE; + } + + /* If they are not both NULL, neither can be */ + if (b1 == NULL || b2 == NULL) { + return FALSE; + } + + if ((b1_len == b2_len) && !memcmp(b1, b2, b1_len)) { + return TRUE; + } + return FALSE; +} + +#ifdef PRIVACY_MASK +/* applies privacy mask on the input address itself */ +void +BCMRAMFN(bcm_ether_privacy_mask)(struct ether_addr *addr) +{ + struct ether_addr *privacy = privacy_addrmask_get(); + if (addr && !ETHER_ISMULTI(addr)) { + *(uint32*)(&(addr->octet[0])) &= *((uint32*)&privacy->octet[0]); + *(uint16*)(&(addr->octet[4])) &= *((uint16*)&privacy->octet[4]); + } +} +#endif /* PRIVACY_MASK */ +#endif /* BCMDRIVER */ + +/* Count the number of elements not matching a given value in a null terminated array */ +int +BCMATTACHFN(array_value_mismatch_count)(uint8 value, uint8 *array, int array_size) +{ + int i; + int count = 0; + + for (i = 0; i < array_size; i++) { + /* exit if a null terminator is found */ + if (array[i] == 0) { + break; + } + if (array[i] != value) { + count++; + } + } + return count; +} + +/* Count the number of non-zero elements in an uint8 array */ +int +BCMATTACHFN(array_nonzero_count)(uint8 *array, int array_size) +{ + return array_value_mismatch_count(0, array, array_size); +} + +/* Count the number of non-zero elements in an int16 array */ +int +BCMATTACHFN(array_nonzero_count_int16)(int16 *array, int array_size) +{ + int i; + int count = 0; + + for (i = 0; i < array_size; i++) { + if (array[i] != 0) { + count++; + } + } + return count; +} + +/* Count the number of zero elements in an uint8 array */ +int +BCMATTACHFN(array_zero_count)(uint8 *array, int array_size) +{ + int i; + int count = 0; + + for (i = 0; i < array_size; i++) { + if (array[i] == 0) { + count++; + } + } + return count; +} + +/* Validate an array that can be 1 of 2 data types. + * One of array1 or array2 should be non-NULL. The other should be NULL. + */ +static int +BCMATTACHFN(verify_ordered_array)(uint8 *array1, int16 *array2, int array_size, + int range_lo, int range_hi, bool err_if_no_zero_term, bool is_ordered) +{ + int ret; + int i; + int val = 0; + int prev_val = 0; + + ret = err_if_no_zero_term ? BCME_NOTFOUND : BCME_OK; + + /* Check that: + * - values are in descending order. + * - values are within the valid range. + */ + for (i = 0; i < array_size; i++) { + if (array1) { + val = (int)array1[i]; + } else if (array2) { + val = (int)array2[i]; + } else { + /* both array parameters are NULL */ + return BCME_NOTFOUND; + } + if (val == 0) { + /* array is zero-terminated */ + ret = BCME_OK; + break; + } + + if (is_ordered && i > 0 && val > prev_val) { + /* array is not in descending order */ + ret = BCME_BADOPTION; + break; + } + prev_val = val; + + if (val < range_lo || val > range_hi) { + /* array value out of range */ + ret = BCME_RANGE; + break; + } + } + + return ret; +} + +/* Validate an ordered uint8 configuration array */ +int +BCMATTACHFN(verify_ordered_array_uint8)(uint8 *array, int array_size, + uint8 range_lo, uint8 range_hi) +{ + return verify_ordered_array(array, NULL, array_size, (int)range_lo, (int)range_hi, + TRUE, TRUE); +} + +/* Validate an ordered int16 non-zero-terminated configuration array */ +int +BCMATTACHFN(verify_ordered_array_int16)(int16 *array, int array_size, + int16 range_lo, int16 range_hi) +{ + return verify_ordered_array(NULL, array, array_size, (int)range_lo, (int)range_hi, + FALSE, TRUE); +} + +/* Validate all values in an array are in range */ +int +BCMATTACHFN(verify_array_values)(uint8 *array, int array_size, + int range_lo, int range_hi, bool zero_terminated) +{ + int ret = BCME_OK; + int i; + int val = 0; + + /* Check that: + * - values are in strict descending order. + * - values are within the valid range. + */ + for (i = 0; i < array_size; i++) { + val = (int)array[i]; + if (val == 0 && zero_terminated) { + ret = BCME_OK; + break; + } + if (val < range_lo || val > range_hi) { + /* array value out of range */ + ret = BCME_RANGE; + break; + } + } + return ret; +} + +/* Adds/replaces NVRAM variable with given value + * varbuf[in,out] - Buffer with NVRAM variables (sequence of zero-terminated 'name=value' records, + * terminated with additional zero) + * buflen[in] - Length of buffer (may, even should, have some unused space) + * variable[in] - Variable to add/replace in 'name=value' form + * datalen[out,opt] - Optional output parameter - resulting length of data in buffer + * Returns TRUE on success, FALSE if buffer too short or variable specified incorrectly + */ +bool +replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, + unsigned int *datalen) +{ + char *p; + int variable_heading_len, record_len, variable_record_len = (int)strlen(variable) + 1; + char *buf_end = varbuf + buflen; + p = strchr(variable, '='); + if (!p) { + return FALSE; + } + /* Length of given variable name, followed by '=' */ + variable_heading_len = (int)((const char *)(p + 1) - variable); + /* Scanning NVRAM, record by record up to trailing 0 */ + for (p = varbuf; *p; p += strlen(p) + 1) { + /* If given variable found - remove it */ + if (!strncmp(p, variable, variable_heading_len)) { + record_len = (int)strlen(p) + 1; + memmove_s(p, buf_end - p, p + record_len, buf_end - (p + record_len)); + } + } + /* If buffer does not have space for given variable - return FALSE */ + if ((p + variable_record_len + 1) > buf_end) { + return FALSE; + } + /* Copy given variable to end of buffer */ + memmove_s(p, buf_end - p, variable, variable_record_len); + /* Adding trailing 0 */ + p[variable_record_len] = 0; + /* Setting optional output parameter - length of data in buffer */ + if (datalen) { + *datalen = (unsigned int)(p + variable_record_len + 1 - varbuf); + } + return TRUE; +} + +/* + * Gets the ceil bit set to the nearest power of 2 + * val[in] - value for which nearest power of 2 bit set to be returned + * bitpos[out] - the position of the nearest power of 2 bit set + */ +uint8 +bcm_get_ceil_pow_2(uint val) +{ + uint8 bitpos = 0; + ASSERT(val); + if (val & (val-1)) { + /* val is not powers of 2. + * pad it, so that allocation will be aligned to + * next immediate powers of 2. + */ + bitpos = 1; + } + while (val >>= 1) { + bitpos ++; + } + return (bitpos); +} + +#if !defined(BCMDONGLEHOST) +/** Initialization of varbuf structure */ +void +BCMATTACHFN(varbuf_init)(varbuf_t *b, char *buf, uint size) +{ + b->size = size; + b->base = b->buf = buf; +} + +/** append a null terminated var=value string */ +int +BCMATTACHFN(varbuf_append)(varbuf_t *b, const char *fmt, ...) +{ + va_list ap; + int r; + size_t len; + char *s; + + if (b->size < 2) + return 0; + + va_start(ap, fmt); + r = vsnprintf(b->buf, b->size, fmt, ap); + va_end(ap); + + /* C99 snprintf behavior returns r >= size on overflow, + * others return -1 on overflow. + * All return -1 on format error. + * We need to leave room for 2 null terminations, one for the current var + * string, and one for final null of the var table. So check that the + * strlen written, r, leaves room for 2 chars. + */ + if ((r == -1) || (r > (int)(b->size - 2))) { + b->size = 0; + return 0; + } + + /* Remove any earlier occurrence of the same variable */ + if ((s = strchr(b->buf, '=')) != NULL) { + len = (size_t)(s - b->buf); + for (s = b->base; s < b->buf;) { + if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') { + len = strlen(s) + 1; + memmove(s, (s + len), ((b->buf + r + 1) - (s + len))); + b->buf -= len; + b->size += (unsigned int)len; + break; + } + + while (*s++) + ; + } + } + + /* skip over this string's null termination */ + r++; + b->size -= r; + b->buf += r; + + return r; +} + +#if defined(BCMDRIVER) +/** + * Create variable table from memory. + * Return 0 on success, nonzero on error. + */ +int +BCMATTACHFN(initvars_table)(osl_t *osh, char *start, char *end, char **vars, + uint *count) +{ + int c = (int)(end - start); + + /* do it only when there is more than just the null string */ + if (c > 1) { + char *vp = MALLOC(osh, c); + ASSERT(vp != NULL); + if (!vp) + return BCME_NOMEM; + bcopy(start, vp, c); + *vars = vp; + *count = c; + } + else { + *vars = NULL; + *count = 0; + } + + return 0; +} +#endif /* BCMDRIVER */ + +#endif /* !BCMDONGLEHOST */ + +/* bit shift operation in serialized buffer taking input bits % 8 */ +int buf_shift_right(uint8 *buf, uint16 len, uint8 bits) +{ + uint16 i; + + if (len == 0 || (bits == 0) || (bits >= NBBY)) { + return BCME_BADARG; + } + + for (i = len - 1u; i > 0; i--) { + buf[i] = (buf[i - 1u] << (NBBY - bits)) | (buf[i] >> bits); + } + buf[0] >>= bits; + + return BCME_OK; +} + +/* print the content of the 'buf' in hex string format */ +void +prhexstr(const char *prefix, const uint8 *buf, uint len, bool newline) +{ + if (len > 0) { + uint i; + + if (prefix != NULL) { + printf("%s", prefix); + } + for (i = 0; i < len; i ++) { + printf("%02X", buf[i]); + } + if (newline) { + printf("\n"); + } + } +} + +/* Add to adjust the 802.1x priority */ +void +pktset8021xprio(void *pkt, int prio) +{ + struct ether_header *eh; + uint8 *pktdata; + if(prio == PKTPRIO(pkt)) + return; + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + eh = (struct ether_header *) pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + ASSERT(prio >= 0 && prio <= MAXPRIO); + PKTSETPRIO(pkt, prio); + } +} diff --git a/bcmdhd.101.10.361.x/bcmwifi_channels.c b/bcmdhd.101.10.361.x/bcmwifi_channels.c new file mode 100755 index 0000000..9387207 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmwifi_channels.c @@ -0,0 +1,3000 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else +#include +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ + +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wlexe/Makefile.wlm_dll */ +#endif + +#include <802.11.h> + +/* Definitions for D11AC capable (80MHz+) Chanspec type */ + +/* Chanspec ASCII representation: + * + * ['g']['/'[] + * ['/'<1st-channel-segment>'-'<2nd-channel-segment>]] + * + * : + * (optional) 2, 4, 5, 6 for 2.4GHz, 4GHz, 5GHz, and 6GHz respectively. + * Default value is 2g if channel <= 14, otherwise 5g. + * : + * channel number of the 20MHz channel, + * or primary 20 MHz channel of 40MHz, 80MHz, 160MHz, 80+80MHz, + * 240MHz, 320MHz, or 160+160MHz channels. + * : + * (optional) 20, 40, 80, 160, 80+80, 240, 320, or 160+160. Default value is 20. + * : + * 'u' or 'l' (only for 2.4GHz band 40MHz) + * + * For 2.4GHz band 40MHz channels, the same primary channel may be the + * upper sideband for one 40MHz channel, and the lower sideband for an + * overlapping 40MHz channel. The {u: upper, l: lower} primary sideband + * indication disambiguates which 40MHz channel is being specified. + * + * For 40MHz in the 5GHz or 6GHz band and all channel bandwidths greater than + * 40MHz, the U/L specification is not necessary or allowed since the channels are + * non-overlapping and the primary 20MHz channel position is derived from its + * position in the wide bandwidth channel. + * <1st-channel-segment> + * <2nd-channel-segment>: + * Required for 80+80 or 160+160, otherwise not allowed. + * These fields specify the center channel of the first and the second 80MHz + * or 160MHz channels. + * + * In its simplest form, it is a 20MHz channel number, with the implied band + * of 2.4GHz if channel number <= 14, and 5GHz otherwise. + * + * To allow for backward compatibility with scripts, the old form for + * 40MHz channels is also allowed: + * + * : + * primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz + * : + * "U" for upper, "L" for lower (or lower case "u" "l") + * + * 5 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 5g8 20MHz 8 - - + * 52 20MHz 52 - - + * 52/40 40MHz 54 52-56 52 + * 56/40 40MHz 54 52-56 56 + * 52/80 80MHz 58 52-64 52 + * 56/80 80MHz 58 52-64 56 + * 60/80 80MHz 58 52-64 60 + * 64/80 80MHz 58 52-64 64 + * 52/160 160MHz 50 36-64 52 + * 36/160 160MGz 50 36-64 36 + * 36/80+80/42-106 80+80MHz 42,106 36-48,100-112 36 + * + * 2 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 2g8 20MHz 8 - - + * 8 20MHz 8 - - + * 6 20MHz 6 - - + * 6/40l 40MHz 8 6-10 6 + * 6l 40MHz 8 6-10 6 + * 6/40u 40MHz 4 2-6 6 + * 6u 40MHz 4 2-6 6 + */ + +/* bandwidth ASCII string */ +static const char *wf_chspec_bw_str[] = +{ + "320", + "160+160", + "20", + "40", + "80", + "160", + "80+80", + "240" +}; + +static const uint16 wf_chspec_bw_mhz[] = { + 320, 320, 20, 40, 80, 160, 160, 240 +}; +#define WF_NUM_BW ARRAYSIZE(wf_chspec_bw_mhz) + +/* 40MHz channels in 2.4GHz band */ +static const uint8 wf_2g_40m_chans[] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11 +}; +#define WF_NUM_2G_40M_CHANS ARRAYSIZE(wf_2g_40m_chans) + +/* 40MHz channels in 5GHz band */ +static const uint8 wf_5g_40m_chans[] = { + 38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159, 167, 175 +}; +#define WF_NUM_5G_40M_CHANS ARRAYSIZE(wf_5g_40m_chans) + +/* 80MHz channels in 5GHz band */ +static const uint8 wf_5g_80m_chans[] = { + 42, 58, 106, 122, 138, 155, 171 +}; +#define WF_NUM_5G_80M_CHANS ARRAYSIZE(wf_5g_80m_chans) + +/* 160MHz channels in 5GHz band */ +static const uint8 wf_5g_160m_chans[] = { + 50, 114, 163 +}; +#define WF_NUM_5G_160M_CHANS ARRAYSIZE(wf_5g_160m_chans) + +/** 80MHz channels in 6GHz band */ +#define WF_NUM_6G_80M_CHANS 14 + +/** 160MHz channels in 6GHz band */ +#define WF_NUM_6G_160M_CHANS 7 /* TBD */ + +/** 240MHz channels in 6GHz band */ +#define WF_NUM_6G_240M_CHANS 4 /* TBD */ + +/** 320MHz channels in 6GHz band */ +#define WF_NUM_6G_320M_CHANS 3 /* TBD */ + +/* Define the conditional macro to help with reducing the code size bloat + * in other branches and in trunk targets that don't need 11BE features... + */ +#define WFC_2VALS_EQ(var, val) ((var) == (val)) + +/* compare bandwidth unconditionally for 11be related stuff */ +#ifdef WL11BE +#define WFC_BW_EQ(bw, val) WFC_2VALS_EQ(bw, val) +#else +#define WFC_BW_EQ(bw, val) (FALSE) +#endif + +static void wf_chanspec_iter_firstchan(wf_chanspec_iter_t *iter); +static chanspec_bw_t wf_iter_next_bw(chanspec_bw_t bw); +static bool wf_chanspec_iter_next_2g(wf_chanspec_iter_t *iter); +static bool wf_chanspec_iter_next_5g(wf_chanspec_iter_t *iter); +static int wf_chanspec_iter_next_5g_range(wf_chanspec_iter_t *iter, chanspec_bw_t bw); +static void wf_chanspec_iter_6g_range_init(wf_chanspec_iter_t *iter, chanspec_bw_t bw); +static bool wf_chanspec_iter_next_6g(wf_chanspec_iter_t *iter); + +/** + * Return the chanspec bandwidth in MHz + * Bandwidth of 160 MHz will be returned for 80+80MHz chanspecs. + * + * @param chspec chanspec_t + * + * @return bandwidth of chspec in MHz units + */ +uint +wf_bw_chspec_to_mhz(chanspec_t chspec) +{ + uint bw; + + bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT; + return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]); +} + +/* bw in MHz, return the channel count from the center channel to the + * the channel at the edge of the band + */ +static uint +center_chan_to_edge(chanspec_bw_t bw) +{ + uint delta = 0; + + /* edge channels separated by BW - 10MHz on each side + * delta from cf to edge is half of that, + */ + if (bw == WL_CHANSPEC_BW_40) { + /* 10 MHz */ + delta = 2; + } else if (bw == WL_CHANSPEC_BW_80) { + /* 30 MHz */ + delta = 6; + } else if (bw == WL_CHANSPEC_BW_160) { + /* 70 MHz */ + delta = 14; + } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240)) { + /* 110 MHz */ + delta = 22; + } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) { + /* 150 MHz */ + delta = 30; + } + return delta; +} + +/* return channel number of the low edge of the band + * given the center channel and BW + */ +static uint +channel_low_edge(uint center_ch, chanspec_bw_t bw) +{ + return (center_ch - center_chan_to_edge(bw)); +} + +/* return side band number given center channel and primary20 channel + * return -1 on error + */ +static int +channel_to_sb(uint center_ch, uint primary_ch, chanspec_bw_t bw) +{ + uint lowest = channel_low_edge(center_ch, bw); + uint sb; + + if (primary_ch < lowest || + (primary_ch - lowest) % 4) { + /* bad primary channel lower than the low edge of the channel, + * or not mult 4. + */ + return -1; + } + + sb = ((primary_ch - lowest) / 4); + + /* sb must be a index to a 20MHz channel in range */ + if ((bw == WL_CHANSPEC_BW_20 && sb >= 1) || + (bw == WL_CHANSPEC_BW_40 && sb >= 2) || + (bw == WL_CHANSPEC_BW_80 && sb >= 4) || + (bw == WL_CHANSPEC_BW_160 && sb >= 8) || + (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240) && sb >= 12) || + (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320) && sb >= 16)) { + /* primary_ch must have been too high for the center_ch */ + return -1; + } + + return sb; +} + +/* return primary20 channel given center channel and side band */ +static uint +channel_to_primary20_chan(uint center_ch, chanspec_bw_t bw, uint sb) +{ + return (channel_low_edge(center_ch, bw) + sb * 4); +} + +/* return index of 80MHz channel from channel number + * return -1 on error + */ +static int +channel_80mhz_to_id(uint ch) +{ + uint i; + for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) { + if (ch == wf_5g_80m_chans[i]) + return i; + } + + return -1; +} + +/* return index of the 6G 80MHz channel from channel number + * return -1 on error + */ +static int +channel_6g_80mhz_to_id(uint ch) +{ + /* The 6GHz center channels start at 7, and have a spacing of 16 */ + if (ch >= CH_MIN_6G_80M_CHANNEL && + ch <= CH_MAX_6G_80M_CHANNEL && + ((ch - CH_MIN_6G_80M_CHANNEL) % 16) == 0) { // even multiple of 16 + return (ch - CH_MIN_6G_80M_CHANNEL) / 16; + } + + return -1; +} + +/* return index of the 5G 160MHz channel from channel number + * return -1 on error + */ +static int +channel_5g_160mhz_to_id(uint ch) +{ + uint i; + for (i = 0; i < WF_NUM_5G_160M_CHANS; i ++) { + if (ch == wf_5g_160m_chans[i]) { + return i; + } + } + + return -1; +} + +/* return index of the 6G 160MHz channel from channel number + * return -1 on error + */ +static int +channel_6g_160mhz_to_id(uint ch) +{ + /* The 6GHz center channels start at 15, and have a spacing of 32 */ + if (ch >= CH_MIN_6G_160M_CHANNEL && + ch <= CH_MAX_6G_160M_CHANNEL && + ((ch - CH_MIN_6G_160M_CHANNEL) % 32) == 0) { + return (ch - CH_MIN_6G_160M_CHANNEL) / 32; + } + + return -1; +} + +/* return index of the 6G 240MHz channel from channel number + * return -1 on error + */ +static int +channel_6g_240mhz_to_id(uint ch) +{ + /* The 6GHz center channels start at 23, and have a spacing of 48 */ + if (ch >= CH_MIN_6G_240M_CHANNEL && + ch <= CH_MAX_6G_240M_CHANNEL && + ((ch - CH_MIN_6G_240M_CHANNEL) % 48) == 0) { + return (ch - CH_MIN_6G_240M_CHANNEL) / 48; + } + + return -1; +} + +/* return index of the 6G 320MHz channel from channel number + * return -1 on error + */ +static int +channel_6g_320mhz_to_id(uint ch) +{ + /* The 6GHz center channels start at 31, and have a spacing of 64 */ + if (ch >= CH_MIN_6G_320M_CHANNEL && + ch <= CH_MAX_6G_320M_CHANNEL && + ((ch - CH_MIN_6G_320M_CHANNEL) % 64) == 0) { + return (ch - CH_MIN_6G_320M_CHANNEL) / 64; + } + + return -1; +} + +/** + * This function returns the the 6GHz 240MHz center channel for the given chanspec 240MHz ID + * + * @param chan_240MHz_id 240MHz chanspec ID + * + * @return Return the center channel number, or 0 on error. + * + */ +static uint8 +wf_chspec_6G_id240_to_ch(uint8 chan_240MHz_id) +{ + uint8 ch = 0; + + if (chan_240MHz_id < WF_NUM_6G_240M_CHANS) { + /* The 6GHz center channels have a spacing of 48 + * starting from the first 240MHz center + */ + ch = CH_MIN_6G_240M_CHANNEL + (chan_240MHz_id * 48); + } + + return ch; +} + +/* Retrive the chan_id and convert it to center channel */ +uint8 +wf_chspec_240_id2cch(chanspec_t chanspec) +{ + if (CHSPEC_BAND(chanspec) == WL_CHANSPEC_BAND_6G && + CHSPEC_BW(chanspec) == WL_CHANSPEC_BW_240) { + uint8 ch_id = CHSPEC_GE240_CHAN(chanspec); + + return wf_chspec_6G_id240_to_ch(ch_id); + } + return 0; +} + +/** + * This function returns the the 6GHz 320MHz center channel for the given chanspec 320MHz ID + * + * @param chan_320MHz_id 320MHz chanspec ID + * + * @return Return the center channel number, or 0 on error. + * + */ +static uint8 +wf_chspec_6G_id320_to_ch(uint8 chan_320MHz_id) +{ + uint8 ch = 0; + + if (chan_320MHz_id < WF_NUM_6G_320M_CHANS) { + /* The 6GHz center channels have a spacing of 64 + * starting from the first 320MHz center + */ + ch = CH_MIN_6G_320M_CHANNEL + (chan_320MHz_id * 64); + } + + return ch; +} + +/* Retrive the chan_id and convert it to center channel */ +uint8 +wf_chspec_320_id2cch(chanspec_t chanspec) +{ + if (CHSPEC_BAND(chanspec) == WL_CHANSPEC_BAND_6G && + CHSPEC_BW(chanspec) == WL_CHANSPEC_BW_320) { + uint8 ch_id = CHSPEC_GE240_CHAN(chanspec); + + return wf_chspec_6G_id320_to_ch(ch_id); + } + return 0; +} + +/** + * Convert chanspec to ascii string, or formats hex of an invalid chanspec. + * + * @param chspec chanspec to format + * @param buf pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * + * @return Returns pointer to passed in buf. The buffer will have the ascii + * representation of the given chspec, or "invalid 0xHHHH" where + * 0xHHHH is the hex representation of the invalid chanspec. + * + * @see CHANSPEC_STR_LEN + * + * Wrapper function for wf_chspec_ntoa. In case of an error it puts + * the original chanspec in the output buffer, prepended with "invalid". + * Can be directly used in print routines as it takes care of null + */ +char * +wf_chspec_ntoa_ex(chanspec_t chspec, char *buf) +{ + if (wf_chspec_ntoa(chspec, buf) == NULL) + snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec); + return buf; +} + +/** + * Convert chanspec to ascii string, or return NULL on error. + * + * @param chspec chanspec to format + * @param buf pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * + * @return Returns pointer to passed in buf or NULL on error. On sucess, the buffer + * will have the ascii representation of the given chspec. + * + * @see CHANSPEC_STR_LEN + * + * Given a chanspec and a string buffer, format the chanspec as a + * string, and return the original pointer buf. + * Min buffer length must be CHANSPEC_STR_LEN. + * On error return NULL. + */ +char * +wf_chspec_ntoa(chanspec_t chspec, char *buf) +{ + const char *band; + uint pri_chan; + + if (wf_chspec_malformed(chspec)) + return NULL; + + band = ""; + + /* check for non-default band spec */ + if (CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) { + band = "2g"; + } else if (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL) { + band = "5g"; + } else if (CHSPEC_IS6G(chspec)) { + band = "6g"; + } + + /* primary20 channel */ + pri_chan = wf_chspec_primary20_chan(chspec); + + /* bandwidth and primary20 sideband */ + if (CHSPEC_IS20(chspec)) { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, pri_chan); + } else if (CHSPEC_IS240(chspec)) { + /* 240 */ + const char *bw; + + bw = wf_chspec_to_bw_str(chspec); + + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw); + } else if (CHSPEC_IS320(chspec)) { + /* 320 */ + const char *bw; + + bw = wf_chspec_to_bw_str(chspec); + + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw); + } else { + const char *bw; + const char *sb = ""; + + bw = wf_chspec_to_bw_str(chspec); + +#ifdef CHANSPEC_NEW_40MHZ_FORMAT + /* primary20 sideband string if needed for 2g 40MHz */ + if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + } + + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, pri_chan, bw, sb); +#else + /* primary20 sideband string instead of BW for 40MHz */ + if (CHSPEC_IS40(chspec) && !CHSPEC_IS6G(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, pri_chan, sb); + } else { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw); + } +#endif /* CHANSPEC_NEW_40MHZ_FORMAT */ + } + + return (buf); +} + +static int +read_uint(const char **p, unsigned int *num) +{ + unsigned long val; + char *endp = NULL; + + val = strtoul(*p, &endp, 10); + /* if endp is the initial pointer value, then a number was not read */ + if (endp == *p) + return 0; + + /* advance the buffer pointer to the end of the integer string */ + *p = endp; + /* return the parsed integer */ + *num = (unsigned int)val; + + return 1; +} + +/** + * Convert ascii string to chanspec + * + * @param a pointer to input string + * + * @return Return > 0 if successful or 0 otherwise + */ +chanspec_t +wf_chspec_aton(const char *a) +{ + chanspec_t chspec; + chanspec_band_t chspec_band; + chanspec_subband_t chspec_sb; + chanspec_bw_t chspec_bw; + uint bw; + uint num, pri_ch; + char c, sb_ul = '\0'; + + bw = 20; + chspec_sb = 0; + + /* parse channel num or band */ + if (!read_uint(&a, &num)) + return 0; + /* if we are looking at a 'g', then the first number was a band */ + c = tolower((int)a[0]); + if (c == 'g') { + a++; /* consume the char */ + + /* band must be "2", "5", or "6" */ + if (num == 2) + chspec_band = WL_CHANSPEC_BAND_2G; + else if (num == 5) + chspec_band = WL_CHANSPEC_BAND_5G; + else if (num == 6) + chspec_band = WL_CHANSPEC_BAND_6G; + else + return 0; + + /* read the channel number */ + if (!read_uint(&a, &pri_ch)) + return 0; + + c = tolower((int)a[0]); + } else { + /* first number is channel, use default for band */ + pri_ch = num; + chspec_band = ((pri_ch <= CH_MAX_2G_CHANNEL) ? + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + } + + if (c == '\0') { + /* default BW of 20MHz */ + chspec_bw = WL_CHANSPEC_BW_20; + goto done_read; + } + + a ++; /* consume the 'u','l', or '/' */ + + /* check 'u'/'l' */ + if (c == 'u' || c == 'l') { + sb_ul = c; + chspec_bw = WL_CHANSPEC_BW_40; + goto done_read; + } + + /* next letter must be '/' */ + if (c != '/') + return 0; + + /* read bandwidth */ + if (!read_uint(&a, &bw)) + return 0; + + /* convert to chspec value */ + if (bw == 20) { + chspec_bw = WL_CHANSPEC_BW_20; + } else if (bw == 40) { + chspec_bw = WL_CHANSPEC_BW_40; + } else if (bw == 80) { + chspec_bw = WL_CHANSPEC_BW_80; + } else if (bw == 160) { + chspec_bw = WL_CHANSPEC_BW_160; + } else if (WFC_BW_EQ(bw, 240)) { + chspec_bw = WL_CHANSPEC_BW_240; + } else if (WFC_BW_EQ(bw, 320)) { + chspec_bw = WL_CHANSPEC_BW_320; + } else { + return 0; + } + + /* So far we have g/ + * Can now be followed by u/l if bw = 40, + */ + + c = tolower((int)a[0]); + + /* if we have a 2g/40 channel, we should have a l/u spec now */ + if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) { + if (c == 'u' || c == 'l') { + a ++; /* consume the u/l char */ + sb_ul = c; + goto done_read; + } + } + + /* check for 80+80 or 160+160 */ + if (c == '+') { + return 0; + } + +done_read: + /* skip trailing white space */ + while (a[0] == ' ') { + a ++; + } + + /* must be end of string */ + if (a[0] != '\0') + return 0; + + /* Now have all the chanspec string parts read; + * chspec_band, pri_ch, chspec_bw, sb_ul. + * chspec_band and chspec_bw are chanspec values. + * Need to convert pri_ch, and sb_ul into + * a center channel (or two) and sideband. + */ + + /* if a sb u/l string was given, just use that, + * guaranteed to be bw = 40 by string parse. + */ + if (sb_ul != '\0') { + if (sb_ul == 'l') { + chspec_sb = WL_CHANSPEC_CTL_SB_LLL; + } else if (sb_ul == 'u') { + chspec_sb = WL_CHANSPEC_CTL_SB_LLU; + } + chspec = wf_create_40MHz_chspec_primary_sb(pri_ch, chspec_sb, chspec_band); + } else if (chspec_bw == WL_CHANSPEC_BW_20) { + /* if the bw is 20, only need the primary channel and band */ + chspec = wf_create_20MHz_chspec(pri_ch, chspec_band); + } else { + /* If the bw is 40/80/160/240/320 (and not 40MHz 2G), the channels are + * non-overlapping in 5G or 6G bands. Each primary channel is contained + * in only one higher bandwidth channel. The wf_create_chspec_from_primary() + * will create the chanspec. 2G 40MHz is handled just above, assuming a {u,l} + * sub-band spec was given. + */ + chspec = wf_create_chspec_from_primary(pri_ch, chspec_bw, chspec_band); + } + + if (wf_chspec_malformed(chspec)) + return 0; + + return chspec; +} + +/** + * Verify the chanspec is using a legal set of parameters, i.e. that the + * chanspec specified a band, bw, pri_sb and channel and that the + * combination could be legal given any set of circumstances. + * + * @param chanspec the chanspec to check + * + * @return Returns TRUE if the chanspec is malformed, FALSE if it looks good. + */ +bool +#ifdef BCMPOSTTRAPFN +BCMPOSTTRAPFN(wf_chspec_malformed)(chanspec_t chanspec) +#else +wf_chspec_malformed(chanspec_t chanspec) +#endif +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_sb; + + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth for 2G */ + if (!BW_LE40(chspec_bw)) { + return TRUE; + } + + /* check for invalid channel number */ + if (CHSPEC_CHANNEL(chanspec) == INVCHANNEL) { + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec) || CHSPEC_IS6G(chanspec)) { + if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) { + uint ch_id; + + ch_id = CHSPEC_GE240_CHAN(chanspec); + + /* channel IDs in 240 must be in range */ + if (CHSPEC_IS6G(chanspec)) { + if (ch_id >= WF_NUM_6G_240M_CHANS) { + /* bad 240MHz channel ID for the band */ + return TRUE; + } + } else { + return TRUE; + } + } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) { + uint ch_id; + + ch_id = CHSPEC_GE240_CHAN(chanspec); + + /* channel IDs in 320 must be in range */ + if (CHSPEC_IS6G(chanspec)) { + if (ch_id >= WF_NUM_6G_320M_CHANS) { + /* bad 320MHz channel ID for the band */ + return TRUE; + } + } else { + return TRUE; + } + } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 || + chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) { + + /* check for invalid channel number */ + if (CHSPEC_CHANNEL(chanspec) == INVCHANNEL) { + return TRUE; + } + } else { + /* invalid bandwidth */ + return TRUE; + } + } else { + /* must be a valid band */ + return TRUE; + } + + /* retrive sideband */ + if ((WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) || + (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320))) { + chspec_sb = CHSPEC_GE240_SB(chanspec); + } else { + chspec_sb = CHSPEC_CTL_SB(chanspec); + } + + /* side band needs to be consistent with bandwidth */ + if (chspec_bw == WL_CHANSPEC_BW_20) { + if (chspec_sb != WL_CHANSPEC_CTL_SB_LLL) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (chspec_sb > WL_CHANSPEC_CTL_SB_LLU) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + /* both 80MHz and 80+80MHz use 80MHz side bands. + * 80+80 SB info is relative to the primary 80MHz sub-band. + */ + if (chspec_sb > WL_CHANSPEC_CTL_SB_LUU) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + ASSERT(chspec_sb <= WL_CHANSPEC_CTL_SB_UUU); + } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) { + /* FIXME: define the max sideband index */ + ASSERT((chspec_sb >> WL_CHANSPEC_GE240_SB_SHIFT) <= 11); + } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) { + /* FIXME: define the max sideband index */ + ASSERT((chspec_sb >> WL_CHANSPEC_GE240_SB_SHIFT) <= 15); + } + + return FALSE; +} + +/** + * Verify the chanspec specifies a valid channel according to 802.11. + * + * @param chanspec the chanspec to check + * + * @return Returns TRUE if the chanspec is a valid 802.11 channel + */ +bool +wf_chspec_valid(chanspec_t chanspec) +{ + chanspec_band_t chspec_band = CHSPEC_BAND(chanspec); + chanspec_bw_t chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = -1; + + if (wf_chspec_malformed(chanspec)) { + return FALSE; + } + + if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) { + if (CHSPEC_IS6G(chanspec)) { + chspec_ch = wf_chspec_6G_id240_to_ch(CHSPEC_GE240_CHAN(chanspec)); + } else { + return FALSE; + } + } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) { + if (CHSPEC_IS6G(chanspec)) { + chspec_ch = wf_chspec_6G_id320_to_ch(CHSPEC_GE240_CHAN(chanspec)); + } else { + return FALSE; + } + } else { + chspec_ch = CHSPEC_CHANNEL(chanspec); + } + + /* After the malformed check, we know that we have + * a valid band field, + * a valid bandwidth for the band, + * and a valid sub-band value for the bandwidth. + * + * Since all sub-band specs are valid for any channel, the only thing remaining to + * check is that + * the 20MHz channel, + * or the center channel for higher BW, + * or both center channels for an 80+80MHz channel, + * are valid for the specified band. + * Also, 80+80MHz channels need to be non-contiguous. + */ + + if (chspec_bw == WL_CHANSPEC_BW_20) { + + return wf_valid_20MHz_chan(chspec_ch, chspec_band); + + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + + return wf_valid_40MHz_center_chan(chspec_ch, chspec_band); + + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + + return wf_valid_80MHz_center_chan(chspec_ch, chspec_band); + + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + + return wf_valid_160MHz_center_chan(chspec_ch, chspec_band); + + } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) { + + return wf_valid_240MHz_center_chan(chspec_ch, chspec_band); + + } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) { + + return wf_valid_320MHz_center_chan(chspec_ch, chspec_band); + + } + + return FALSE; +} + +/* 5G band 20MHz channel ranges with even (+4) channel spacing */ +static const struct wf_iter_range wf_5g_iter_ranges[] = { + {36, 64}, + {100, 144}, + {149, 165} +}; + +#define RANGE_ID_INVAL 0xFFu +enum wf_iter_state { + WF_ITER_INIT = 0, + WF_ITER_RUN = 1, + WF_ITER_DONE = 2 +}; + +/** + * @brief Initialize a chanspec iteration structure. + */ +bool +wf_chanspec_iter_init(wf_chanspec_iter_t *iter, chanspec_band_t band, chanspec_bw_t bw) +{ + if (iter == NULL) { + return FALSE; + } + + /* Initialize the iter structure to the "DONE" state + * in case the parameter validation fails. + * If the validation fails then the iterator will return INVCHANSPEC as the current + * chanspec, and wf_chanspec_iter_next() will return FALSE. + */ + memset(iter, 0, sizeof(*iter)); + iter->state = WF_ITER_DONE; + iter->chanspec = INVCHANSPEC; + + if (band != WL_CHANSPEC_BAND_2G && + band != WL_CHANSPEC_BAND_5G && + band != WL_CHANSPEC_BAND_6G) { + ASSERT(0); + return FALSE; + } + + /* make sure the BW is unspecified (INVCHANSPEC), 20/40, + * or (not 2g and 80/160) + */ + if (!(bw == INVCHANSPEC || + bw == WL_CHANSPEC_BW_20 || + bw == WL_CHANSPEC_BW_40 || + (band != WL_CHANSPEC_BAND_2G && + (bw == WL_CHANSPEC_BW_80 || + bw == WL_CHANSPEC_BW_160 || + WFC_BW_EQ(bw, WL_CHANSPEC_BW_240) || + WFC_BW_EQ(bw, WL_CHANSPEC_BW_320))))) { + + ASSERT(0); + return FALSE; + } + + /* Validation of the params is successful so move to the "INIT" state to + * allow the first wf_chanspec_iter_next() move the iteration to the first + * chanspec in the set. + */ + iter->state = WF_ITER_INIT; + iter->band = band; + iter->bw = bw; + iter->range_id = RANGE_ID_INVAL; + + return TRUE; +} + +/** + * Start the iterator off from the 'init' state. + * The internal state is set up and advanced to the first chanspec. + */ +static void +wf_chanspec_iter_firstchan(wf_chanspec_iter_t *iter) +{ + chanspec_band_t band = iter->band; + chanspec_bw_t bw = iter->bw; + chanspec_t chspec; + + /* if BW unspecified (INVCHANSPEC), start with 20 MHz */ + if (bw == INVCHANSPEC) { + bw = WL_CHANSPEC_BW_20; + } + + /* calc the initial channel based on band */ + if (band == WL_CHANSPEC_BAND_2G) { + /* 2g has overlapping 40MHz channels, so cannot just use the + * wf_create_chspec_from_primary() fn. + */ + if (bw == WL_CHANSPEC_BW_20) { + chspec = wf_create_20MHz_chspec(CH_MIN_2G_CHANNEL, band); + } else { + chspec = (WL_CHANSPEC_BAND_2G | bw | WL_CHANSPEC_CTL_SB_L | + CH_MIN_2G_40M_CHANNEL); + } + } else { + if (band == WL_CHANSPEC_BAND_5G) { + wf_chanspec_iter_next_5g_range(iter, bw); + } else { + wf_chanspec_iter_6g_range_init(iter, bw); + } + chspec = wf_create_chspec_from_primary(iter->range.start, bw, band); + } + + iter->chanspec = chspec; +} + +/** + * @brief Return the current chanspec of the iteration. + */ +chanspec_t +wf_chanspec_iter_current(wf_chanspec_iter_t *iter) +{ + return iter->chanspec; +} + +/** + * @brief Advance the iteration to the next chanspec in the set. + */ +bool +wf_chanspec_iter_next(wf_chanspec_iter_t *iter, chanspec_t *chspec) +{ + bool ok = FALSE; + chanspec_band_t band = iter->band; + + /* Handle the INIT and DONE states. Otherwise, we are in the RUN state + * and will dispatch to the 'next' function for the appropriate band. + */ + if (iter->state == WF_ITER_INIT) { + iter->state = WF_ITER_RUN; + wf_chanspec_iter_firstchan(iter); + ok = TRUE; + } else if (iter->state == WF_ITER_DONE) { + ok = FALSE; + } else if (band == WL_CHANSPEC_BAND_2G) { + ok = wf_chanspec_iter_next_2g(iter); + } else if (band == WL_CHANSPEC_BAND_5G) { + ok = wf_chanspec_iter_next_5g(iter); + } else if (band == WL_CHANSPEC_BAND_6G) { + ok = wf_chanspec_iter_next_6g(iter); + } + + /* Return the new chanspec if a pointer was provided. + * In case the iteration is done, the return will be INVCHANSPEC. + */ + if (chspec != NULL) { + *chspec = iter->chanspec; + } + + return ok; +} + +/** + * When the iterator completes a particular bandwidth, this function + * returns the next BW, or INVCHANSPEC when done. + * + * Internal iterator helper. + */ +static chanspec_bw_t +wf_iter_next_bw(chanspec_bw_t bw) +{ + switch (bw) { + case WL_CHANSPEC_BW_20: + bw = WL_CHANSPEC_BW_40; + break; + case WL_CHANSPEC_BW_40: + bw = WL_CHANSPEC_BW_80; + break; + case WL_CHANSPEC_BW_80: + bw = WL_CHANSPEC_BW_160; + break; +#ifdef WL11BE + case WL_CHANSPEC_BW_160: + bw = WL_CHANSPEC_BW_240; + break; + case WL_CHANSPEC_BW_240: + bw = WL_CHANSPEC_BW_320; + break; +#endif + default: + bw = INVCHANSPEC; + break; + } + return bw; +} + +/** + * This is the _iter_next() helper for 2g band chanspec iteration. + */ +static bool +wf_chanspec_iter_next_2g(wf_chanspec_iter_t *iter) +{ + chanspec_t chspec = iter->chanspec; + uint8 ch = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_IS20(chspec)) { + if (ch < CH_MAX_2G_CHANNEL) { + ch++; + chspec = wf_create_20MHz_chspec(ch, WL_CHANSPEC_BAND_2G); + } else if (iter->bw == INVCHANSPEC) { + /* hit the end of 20M channels, go to 40M if bw was unspecified */ + ch = CH_MIN_2G_40M_CHANNEL; + chspec = wf_create_40MHz_chspec(LOWER_20_SB(ch), ch, WL_CHANSPEC_BAND_2G); + } else { + /* done */ + iter->state = WF_ITER_DONE; + chspec = INVCHANSPEC; + } + } else { + /* step through low then high primary sideband, then next 40 center channel */ + if (CHSPEC_SB_LOWER(iter->chanspec)) { + /* move from lower primary 20 to upper */ + chspec = wf_create_40MHz_chspec(UPPER_20_SB(ch), + ch, WL_CHANSPEC_BAND_2G); + } else if (ch < CH_MAX_2G_40M_CHANNEL) { + /* move to next 40M center and lower primary 20 */ + ch++; + chspec = wf_create_40MHz_chspec(LOWER_20_SB(ch), + ch, WL_CHANSPEC_BAND_2G); + } else { + /* done */ + iter->state = WF_ITER_DONE; + chspec = INVCHANSPEC; + } + } + + iter->chanspec = chspec; + + return (chspec != INVCHANSPEC); +} + +/** + * This is the _iter_next() helper for 5g band chanspec iteration. + * The 5g iterator uses ranges of primary 20MHz channels, and the current BW, to create + * each chanspec in the set. + * When a 5g range is exhausted, wf_chanspec_iter_next_5g_range() is called to get the next + * range appropriate to the current BW. + */ +static bool +wf_chanspec_iter_next_5g(wf_chanspec_iter_t *iter) +{ + chanspec_t chspec = iter->chanspec; + chanspec_bw_t bw = CHSPEC_BW(chspec); + uint8 ch = wf_chspec_primary20_chan(chspec); + uint8 end = iter->range.end; + + if (ch < end) { + /* not at the end of the current range, so + * step to the next 20MHz channel and create the current BW + * channel with that new primary 20MHz. + */ + ch += CH_20MHZ_APART; + } else if (wf_chanspec_iter_next_5g_range(iter, bw)) { + /* there was a new range in the current BW, so start at the beginning */ + ch = iter->range.start; + } else if (iter->bw == INVCHANSPEC) { + /* hit the end of current bw, so move to the next bw */ + bw = wf_iter_next_bw(bw); + if (bw != INVCHANSPEC) { + /* initialize the first range */ + iter->range_id = RANGE_ID_INVAL; + wf_chanspec_iter_next_5g_range(iter, bw); + ch = iter->range.start; + } else { + /* no more BWs */ + chspec = INVCHANSPEC; + } + } else { + /* no more channels, ranges, or BWs */ + chspec = INVCHANSPEC; + } + + /* if we are not at the end of the iteration, calc the next chanspec from components */ + if (chspec != INVCHANSPEC) { + chspec = wf_create_chspec_from_primary(ch, bw, WL_CHANSPEC_BAND_5G); + } + + iter->chanspec = chspec; + if (chspec != INVCHANSPEC) { + return TRUE; + } else { + iter->state = WF_ITER_DONE; + return FALSE; + } +} + +/** + * Helper function to set up the next range of primary 20MHz channels to + * iterate over for the current BW. This will advance + * iter->range_id + * and set up + * iter->range.start + * iter->range.end + * for the new range. + * Returns FALSE if there are no more ranges in the current BW. + */ +static int +wf_chanspec_iter_next_5g_range(wf_chanspec_iter_t *iter, chanspec_bw_t bw) +{ + uint8 range_id = iter->range_id; + const uint8 *channels; + uint count; + + if (bw == WL_CHANSPEC_BW_20) { + if (range_id == RANGE_ID_INVAL) { + range_id = 0; + } else { + range_id++; + } + + if (range_id < ARRAYSIZE(wf_5g_iter_ranges)) { + iter->range_id = range_id; + iter->range = wf_5g_iter_ranges[range_id]; + return TRUE; + } + + return FALSE; + } + + if (bw == WL_CHANSPEC_BW_40) { + channels = wf_5g_40m_chans; + count = WF_NUM_5G_40M_CHANS; + } else if (bw == WL_CHANSPEC_BW_80) { + channels = wf_5g_80m_chans; + count = WF_NUM_5G_80M_CHANS; + } else if (bw == WL_CHANSPEC_BW_160) { + channels = wf_5g_160m_chans; + count = WF_NUM_5G_160M_CHANS; + } else { + return FALSE; + } + + if (range_id == RANGE_ID_INVAL) { + range_id = 0; + } else { + range_id++; + } + if (range_id < count) { + uint8 ch = channels[range_id]; + uint offset = center_chan_to_edge(bw); + + iter->range_id = range_id; + iter->range.start = ch - offset; + iter->range.end = ch + offset; + return TRUE; + } + + return FALSE; +} + +/** + * This is the _iter_next() helper for 6g band chanspec iteration. + * The 6g iterator uses ranges of primary 20MHz channels, and the current BW, to create + * each chanspec in the set. + * Each BW in 6g has one contiguous range of primary 20MHz channels. When a range is + * exhausted, the iterator moves to the next BW. + */ +static bool +wf_chanspec_iter_next_6g(wf_chanspec_iter_t *iter) +{ + chanspec_t chspec = iter->chanspec; + chanspec_bw_t bw = CHSPEC_BW(chspec); + uint8 ch = wf_chspec_primary20_chan(chspec); + uint8 end = iter->range.end; + + if (ch < end) { + /* not at the end of the current range, so + * step to the next 20MHz channel and create the current BW + * channel with that new primary 20MHz. + */ + ch += CH_20MHZ_APART; + + /* try to create a valid channel of the current BW + * with a primary20 'ch' + */ + chspec = wf_create_chspec_from_primary(ch, bw, WL_CHANSPEC_BAND_6G); + + /* if chspec is INVCHANSPEC, then we hit the end + * of the valid channels in the range. + */ + } else { + /* hit the end of the current range */ + chspec = INVCHANSPEC; + } + + /* if we are at the end of the current channel range + * check if there is another BW to iterate + * Note: (iter->bw == INVCHANSPEC) indicates an unspecified BW for the interation, + * so it will iterate over all BWs. + */ + if (chspec == INVCHANSPEC && + iter->bw == INVCHANSPEC && + (bw = wf_iter_next_bw(bw)) != INVCHANSPEC) { + /* start the new bw with the first primary20 */ + ch = iter->range.start; + chspec = wf_create_chspec_from_primary(ch, bw, WL_CHANSPEC_BAND_6G); + } + + iter->chanspec = chspec; + if (chspec != INVCHANSPEC) { + return TRUE; + } else { + iter->state = WF_ITER_DONE; + return FALSE; + } +} + +/** + * Helper used by wf_chanspec_iter_firstchan() to set up the first range of + * primary channels for the 6g band and for the BW being iterated. + */ +static void +wf_chanspec_iter_6g_range_init(wf_chanspec_iter_t *iter, chanspec_bw_t bw) +{ + switch (bw) { + case WL_CHANSPEC_BW_20: + case WL_CHANSPEC_BW_40: + case WL_CHANSPEC_BW_80: + case WL_CHANSPEC_BW_160: +#ifdef WL11BE + case WL_CHANSPEC_BW_240: + case WL_CHANSPEC_BW_320: +#endif + iter->range.start = CH_MIN_6G_CHANNEL; + iter->range.end = CH_MAX_6G_CHANNEL; + break; + default: + ASSERT(0); + break; + } +} + +/** + * Verify that the channel is a valid 20MHz channel according to 802.11. + * + * @param channel 20MHz channel number to validate + * @param band chanspec band + * + * @return Return TRUE if valid + */ +bool +wf_valid_20MHz_chan(uint channel, chanspec_band_t band) +{ + if (band == WL_CHANSPEC_BAND_2G) { + /* simple range check for 2GHz */ + return (channel >= CH_MIN_2G_CHANNEL && + channel <= CH_MAX_2G_CHANNEL); + } else if (band == WL_CHANSPEC_BAND_5G) { + const uint8 *center_ch = wf_5g_40m_chans; + uint num_ch = WF_NUM_5G_40M_CHANS; + uint i; + + /* We don't have an array of legal 20MHz 5G channels, but they are + * each side of the legal 40MHz channels. Check the chanspec + * channel against either side of the 40MHz channels. + */ + for (i = 0; i < num_ch; i ++) { + if (channel == (uint)LOWER_20_SB(center_ch[i]) || + channel == (uint)UPPER_20_SB(center_ch[i])) { + break; /* match found */ + } + } + + if (i == num_ch) { + /* check for channel 165 which is not the side band + * of 40MHz 5G channel + */ + if (channel == 165) { + i = 0; + } + + /* check for legacy JP channels on failure */ + if (channel == 34 || channel == 38 || + channel == 42 || channel == 46) { + i = 0; + } + } + + if (i < num_ch) { + /* match found */ + return TRUE; + } + } + + else if (band == WL_CHANSPEC_BAND_6G) { + /* Use the simple pattern of 6GHz 20MHz channels for validity check */ + if ((channel >= CH_MIN_6G_CHANNEL && + channel <= CH_MAX_6G_CHANNEL) && + ((((channel - CH_MIN_6G_CHANNEL) % 4) == 0) || // even multiple of 4 + channel == 2)) { // Or the oddball channel 2 + return TRUE; + } + } + + return FALSE; +} + +/** + * Verify that the center channel is a valid 40MHz center channel according to 802.11. + * + * @param center_channel 40MHz center channel to validate + * @param band chanspec band + * + * @return Return TRUE if valid + */ +bool +wf_valid_40MHz_center_chan(uint center_channel, chanspec_band_t band) +{ + if (band == WL_CHANSPEC_BAND_2G) { + /* simple range check for 2GHz */ + return (center_channel >= CH_MIN_2G_40M_CHANNEL && + center_channel <= CH_MAX_2G_40M_CHANNEL); + } else if (band == WL_CHANSPEC_BAND_5G) { + uint i; + + /* use the 5GHz lookup of 40MHz channels */ + for (i = 0; i < WF_NUM_5G_40M_CHANS; i++) { + if (center_channel == wf_5g_40m_chans[i]) { + return TRUE; + } + } + } + else if (band == WL_CHANSPEC_BAND_6G) { + /* Use the simple pattern of 6GHz center channels */ + if ((center_channel >= CH_MIN_6G_40M_CHANNEL && + center_channel <= CH_MAX_6G_40M_CHANNEL) && + ((center_channel - CH_MIN_6G_40M_CHANNEL) % 8) == 0) { // even multiple of 8 + return TRUE; + } + } + + return FALSE; +} + +/** + * Verify that the center channel is a valid 80MHz center channel according to 802.11. + * + * @param center_channel 80MHz center channel to validate + * @param band chanspec band + * + * @return Return TRUE if valid + */ +bool +wf_valid_80MHz_center_chan(uint center_channel, chanspec_band_t band) +{ + if (band == WL_CHANSPEC_BAND_5G) { + /* use the 80MHz ID lookup to validate the center channel */ + if (channel_80mhz_to_id(center_channel) >= 0) { + return TRUE; + } + } else if (band == WL_CHANSPEC_BAND_6G) { + /* use the 80MHz ID lookup to validate the center channel */ + if (channel_6g_80mhz_to_id(center_channel) >= 0) { + return TRUE; + } + } + + return FALSE; +} + +/** + * Verify that the center channel is a valid 160MHz center channel according to 802.11. + * + * @param center_channel 160MHz center channel to validate + * @param band chanspec band + * + * @return Return TRUE if valid + */ +bool +wf_valid_160MHz_center_chan(uint center_channel, chanspec_band_t band) +{ + if (band == WL_CHANSPEC_BAND_5G) { + uint i; + + /* use the 5GHz lookup of 40MHz channels */ + for (i = 0; i < WF_NUM_5G_160M_CHANS; i++) { + if (center_channel == wf_5g_160m_chans[i]) { + return TRUE; + } + } + } else if (band == WL_CHANSPEC_BAND_6G) { + /* Use the simple pattern of 6GHz center channels */ + if ((center_channel >= CH_MIN_6G_160M_CHANNEL && + center_channel <= CH_MAX_6G_160M_CHANNEL) && + ((center_channel - CH_MIN_6G_160M_CHANNEL) % 32) == 0) { // even multiple of 32 + return TRUE; + } + } + + return FALSE; +} + +/** + * Verify that the center channel is a valid 240MHz center channel according to 802.11. + * + * @param center_channel 240MHz center channel to validate + * @param band chanspec band + * + * @return Return TRUE if valid + */ +bool +wf_valid_240MHz_center_chan(uint center_channel, chanspec_band_t band) +{ + if (band == WL_CHANSPEC_BAND_6G) { + /* Use the simple pattern of 6GHz center channels */ + if ((center_channel >= CH_MIN_6G_240M_CHANNEL && + center_channel <= CH_MAX_6G_240M_CHANNEL) && + ((center_channel - CH_MIN_6G_240M_CHANNEL) % 48) == 0) { // even multiple of 48 + return TRUE; + } + } + + return FALSE; +} + +/** + * Verify that the center channel is a valid 320MHz center channel according to 802.11. + * + * @param center_channel 320MHz center channel to validate + * @param band chanspec band + * + * @return Return TRUE if valid + */ +bool +wf_valid_320MHz_center_chan(uint center_channel, chanspec_band_t band) +{ + if (band == WL_CHANSPEC_BAND_6G) { + /* Use the simple pattern of 6GHz center channels */ + if ((center_channel >= CH_MIN_6G_320M_CHANNEL && + center_channel <= CH_MAX_6G_320M_CHANNEL) && + ((center_channel - CH_MIN_6G_320M_CHANNEL) % 64) == 0) { // even multiple of 64 + return TRUE; + } + } + + return FALSE; +} + +/* + * This function returns TRUE if both the chanspec can co-exist in PHY. + * Addition to primary20 channel, the function checks for side band for 2g 40 channels + */ +bool +wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2) +{ + bool same_primary; + + same_primary = (wf_chspec_primary20_chan(chspec1) == wf_chspec_primary20_chan(chspec2)); + + if (same_primary && CHSPEC_IS2G(chspec1)) { + if (CHSPEC_IS40(chspec1) && CHSPEC_IS40(chspec2)) { + return (CHSPEC_CTL_SB(chspec1) == CHSPEC_CTL_SB(chspec2)); + } + } + return same_primary; +} + +/** + * Create a 20MHz chanspec for the given band. + * + * This function returns a 20MHz chanspec in the given band. + * + * @param channel 20MHz channel number + * @param band a chanspec band (e.g. WL_CHANSPEC_BAND_2G) + * + * @return Returns a 20MHz chanspec, or IVNCHANSPEC in case of error. + */ +chanspec_t +wf_create_20MHz_chspec(uint channel, chanspec_band_t band) +{ + chanspec_t chspec; + + if (channel <= WL_CHANSPEC_CHAN_MASK && + (band == WL_CHANSPEC_BAND_2G || + band == WL_CHANSPEC_BAND_5G || + band == WL_CHANSPEC_BAND_6G)) { + chspec = band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | channel; + if (!wf_chspec_valid(chspec)) { + chspec = INVCHANSPEC; + } + } else { + chspec = INVCHANSPEC; + } + + return chspec; +} + +/** + * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + * + * @param primary_channel primary 20Mhz channel + * @param center_channel center channel of the 40MHz channel + * @param band band of the 40MHz channel (chanspec_band_t value) + * + * The center_channel can be one of the 802.11 spec valid 40MHz chenter channels + * in the given band. + * + * @return returns a 40MHz chanspec, or INVCHANSPEC in case of error + */ +chanspec_t +wf_create_40MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band) +{ + int sb; + + /* Calculate the sideband value for the center and primary channel. + * Will return -1 if not a valid pair for 40MHz + */ + sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_40); + + /* return err if the sideband was bad or the center channel is not + * valid for the given band. + */ + if (sb < 0 || !wf_valid_40MHz_center_chan(center_channel, band)) { + return INVCHANSPEC; + } + + /* othewise construct and return the valid 40MHz chanspec */ + return (chanspec_t)(center_channel | WL_CHANSPEC_BW_40 | band | + ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT)); +} + +/** + * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number, + * the sub-band for the primary 20MHz channel, and the band. + * + * @param primary_channel primary 20Mhz channel + * @param primary_subband sub-band of the 20MHz primary channel (chanspec_subband_t value) + * @param band band of the 40MHz channel (chanspec_band_t value) + * + * The primary channel and sub-band should describe one of the 802.11 spec valid + * 40MHz channels in the given band. + * + * @return returns a 40MHz chanspec, or INVCHANSPEC in case of error + */ +chanspec_t +wf_create_40MHz_chspec_primary_sb(uint primary_channel, chanspec_subband_t primary_subband, + chanspec_band_t band) +{ + uint center_channel; + + /* find the center channel */ + if (primary_subband == WL_CHANSPEC_CTL_SB_L) { + center_channel = primary_channel + CH_10MHZ_APART; + } else if (primary_subband == WL_CHANSPEC_CTL_SB_U) { + center_channel = primary_channel - CH_10MHZ_APART; + } else { + return INVCHANSPEC; + } + + return wf_create_40MHz_chspec(primary_channel, center_channel, band); +} + +/** + * Returns the chanspec for an 80MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + * + * @param primary_channel primary 20Mhz channel + * @param center_channel center channel of the 80MHz channel + * @param band band of the 80MHz channel (chanspec_band_t value) + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} for 5G, + * or {7 + 16*X for 0 <= X <= 13} for 6G. + * + * @return returns an 80MHz chanspec, or INVCHANSPEC in case of error + */ +chanspec_t +wf_create_80MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band) +{ + int sb; + + /* Calculate the sideband value for the center and primary channel. + * Will return -1 if not a valid pair for 80MHz + */ + sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_80); + + /* return err if the sideband was bad or the center channel is not + * valid for the given band. + */ + if (sb < 0 || !wf_valid_80MHz_center_chan(center_channel, band)) { + return INVCHANSPEC; + } + + /* othewise construct and return the valid 80MHz chanspec */ + return (chanspec_t)(center_channel | WL_CHANSPEC_BW_80 | band | + ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT)); +} + +/** + * Returns the chanspec for an 160MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + * + * @param primary_channel primary 20Mhz channel + * @param center_channel center channel of the 160MHz channel + * @param band band of the 160MHz channel (chanspec_band_t value) + * + * The center_channel can be one of {50, 114} for 5G, + * or {15 + 32*X for 0 <= X <= 7} for 6G. + * + * @return returns an 160MHz chanspec, or INVCHANSPEC in case of error + */ +chanspec_t +wf_create_160MHz_chspec(uint primary_channel, uint center_channel, chanspec_band_t band) +{ + int sb; + + /* Calculate the sideband value for the center and primary channel. + * Will return -1 if not a valid pair for 160MHz + */ + sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_160); + + /* return err if the sideband was bad or the center channel is not + * valid for the given band. + */ + if (sb < 0 || !wf_valid_160MHz_center_chan(center_channel, band)) { + return INVCHANSPEC; + } + + /* othewise construct and return the valid 160MHz chanspec */ + return (chanspec_t)(center_channel | WL_CHANSPEC_BW_160 | band | + ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT)); +} + +/** + * Returns the chanspec for an 80+80MHz channel given the primary 20MHz channel number, + * the center channel numbers for each frequency segment, and the band. + * + * @param primary_channel primary 20 Mhz channel + * @param chan0 center channel number of one frequency segment + * @param chan1 center channel number of the other frequency segment + * @param band band of the 80+80 MHz channel (chanspec_band_t value) + * + * Parameters chan0 and chan1 are valid 80 MHz center channel numbers for the given band. + * The primary channel must be contained in one of the 80 MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * @return returns an 80+80 MHz chanspec, or INVCHANSPEC in case of error + * + * Refer to 802.11-2016 section 21.3.14 "Channelization". + */ +chanspec_t +wf_create_8080MHz_chspec(uint primary_channel, uint chan0, uint chan1, + chanspec_band_t band) +{ + int sb = 0; + chanspec_t chanspec = 0; + int chan0_id = -1, chan1_id = -1; + int seg0, seg1; + + /* frequency segments need to be non-contiguous, so the channel separation needs + * to be greater than 80MHz + */ + if ((uint)ABS((int)(chan0 - chan1)) <= CH_80MHZ_APART) { + return INVCHANSPEC; + } + + if (band == WL_CHANSPEC_BAND_5G) { + chan0_id = channel_80mhz_to_id(chan0); + chan1_id = channel_80mhz_to_id(chan1); + } else if (band == WL_CHANSPEC_BAND_6G) { + chan0_id = channel_6g_80mhz_to_id(chan0); + chan1_id = channel_6g_80mhz_to_id(chan1); + } + + /* make sure the channel numbers were valid */ + if (chan0_id == -1 || chan1_id == -1) { + return INVCHANSPEC; + } + + /* does the primary channel fit with the 1st 80MHz channel ? */ + sb = channel_to_sb(chan0, primary_channel, WL_CHANSPEC_BW_80); + if (sb >= 0) { + /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */ + seg0 = chan0_id; + seg1 = chan1_id; + } else { + /* no, so does the primary channel fit with the 2nd 80MHz channel ? */ + sb = channel_to_sb(chan1, primary_channel, WL_CHANSPEC_BW_80); + if (sb < 0) { + /* no match for pri_ch to either 80MHz center channel */ + return INVCHANSPEC; + } + /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */ + seg0 = chan1_id; + seg1 = chan0_id; + } + + chanspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN1_SHIFT) | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + WL_CHANSPEC_BW_8080 | + band); + + return chanspec; +} + +/** + * Returns the chanspec for an 160+160MHz channel given the primary 20MHz channel number, + * the center channel numbers for each frequency segment, and the band. + * + * @param primary_channel primary 20 Mhz channel + * @param chan0 center channel number of one frequency segment + * @param chan1 center channel number of the other frequency segment + * @param band band of the 160+160 MHz channel (chanspec_band_t value) + * + * Parameters chan0 and chan1 are valid 160 MHz center channel numbers for the given band. + * The primary channel must be contained in one of the 160 MHz channels. This routine + * will determine which frequency segment is the primary 160 MHz segment. + * + * @return returns an 160+160 MHz chanspec, or INVCHANSPEC in case of error + * + * Refer to "Channelization". + */ +chanspec_t +wf_create_160160MHz_chspec(uint primary_channel, uint chan0, uint chan1, + chanspec_band_t band) +{ + int sb = 0; + chanspec_t chanspec = 0; + int chan0_id = -1, chan1_id = -1; + int seg0, seg1; + + /* frequency segments need to be non-contiguous, so the channel separation needs + * to be greater than 160MHz + */ + if ((uint)ABS((int)(chan0 - chan1)) <= CH_160MHZ_APART) { + return INVCHANSPEC; + } + + if (band == WL_CHANSPEC_BAND_5G) { + chan0_id = channel_5g_160mhz_to_id(chan0); + chan1_id = channel_5g_160mhz_to_id(chan1); + } else if (band == WL_CHANSPEC_BAND_6G) { + chan0_id = channel_6g_160mhz_to_id(chan0); + chan1_id = channel_6g_160mhz_to_id(chan1); + } + + /* make sure the channel numbers were valid */ + if (chan0_id == -1 || chan1_id == -1) { + return INVCHANSPEC; + } + + /* does the primary channel fit with the 1st 160MHz channel ? */ + sb = channel_to_sb(chan0, primary_channel, WL_CHANSPEC_BW_160); + if (sb >= 0) { + /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */ + seg0 = chan0_id; + seg1 = chan1_id; + } else { + /* no, so does the primary channel fit with the 2nd 160MHz channel ? */ + sb = channel_to_sb(chan1, primary_channel, WL_CHANSPEC_BW_160); + if (sb < 0) { + /* no match for pri_ch to either 160MHz center channel */ + return INVCHANSPEC; + } + /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */ + seg0 = chan1_id; + seg1 = chan0_id; + } + + chanspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN1_SHIFT) | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + WL_CHANSPEC_BW_160160 | + band); + + return chanspec; +} + +/** + * Returns the chanspec for an 240MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + * + * @param primary_channel primary 20 Mhz channel + * @param chan center channel number + * @param band band of the 240 MHz channel (chanspec_band_t value) + * + * @return returns an 240 MHz chanspec, or INVCHANSPEC in case of error + * + * Refer to "Channelization". + */ +chanspec_t +wf_create_240MHz_chspec(uint primary_channel, uint center_channel, chanspec_band_t band) +{ + int sb = 0; + chanspec_t chanspec = 0; + int chan_id = -1; + + if (band == WL_CHANSPEC_BAND_6G) { + chan_id = channel_6g_240mhz_to_id(center_channel); + } + + /* make sure the channel number were valid */ + if (chan_id == -1) { + return INVCHANSPEC; + } + + /* Calculate the sideband value for the center and primary channel. + * Will return -1 if not a valid pair for 240MHz + */ + sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_240); + + /* return err if the sideband was bad or the center channel is not + * valid for the given band. + */ + if (sb < 0 || !wf_valid_240MHz_center_chan(center_channel, band)) { + return INVCHANSPEC; + } + + chanspec = ((chan_id << WL_CHANSPEC_GE240_CHAN_SHIFT) | + (sb << WL_CHANSPEC_GE240_SB_SHIFT) | + WL_CHANSPEC_BW_240 | + band); + + return chanspec; +} + +/** + * Returns the chanspec for an 320MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + * + * @param primary_channel primary 20 Mhz channel + * @param chan center channel number + * @param band band of the 320 MHz channel (chanspec_band_t value) + * + * Parameters chan is valid 320 MHz center channel numbers for the given band. + * The primary channel must be contained in one of the 320 MHz channels. + * + * @return returns an 320 MHz chanspec, or INVCHANSPEC in case of error + * + * Refer to "Channelization". + */ +chanspec_t +wf_create_320MHz_chspec(uint primary_channel, uint center_channel, chanspec_band_t band) +{ + int sb = 0; + chanspec_t chanspec = 0; + int chan_id = -1; + + if (band == WL_CHANSPEC_BAND_6G) { + chan_id = channel_6g_320mhz_to_id(center_channel); + } + + /* make sure the channel number were valid */ + if (chan_id == -1) { + return INVCHANSPEC; + } + + /* Calculate the sideband value for the center and primary channel. + * Will return -1 if not a valid pair for 320MHz + */ + sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_320); + + /* return err if the sideband was bad or the center channel is not + * valid for the given band. + */ + if (sb < 0 || !wf_valid_320MHz_center_chan(center_channel, band)) { + return INVCHANSPEC; + } + + chanspec = ((chan_id << WL_CHANSPEC_GE240_CHAN_SHIFT) | + (sb << WL_CHANSPEC_GE240_SB_SHIFT) | + WL_CHANSPEC_BW_320 | + band); + + return chanspec; +} + +/** + * Returns the chanspec given the primary 20MHz channel number, + * the center channel number, channel width, and the band. The channel width + * must be 20, 40, 80, 160, 240 or 320 MHz. + * 80+80 or 160+160 MHz chanspec creation is not handled by this function, + * use wf_create_8080MHz_chspec() or wf_create_160160MHz_chspec()instead. + * + * @param primary_channel primary 20Mhz channel + * @param center_channel center channel of the channel + * @param bw width of the channel (chanspec_bw_t) + * @param band chanspec band of channel (chanspec_band_t) + * + * The center_channel can be one of the 802.11 spec valid center channels + * for the given bandwidth in the given band. + * + * @return returns a chanspec, or INVCHANSPEC in case of error + */ +chanspec_t +wf_create_chspec(uint primary_channel, uint center_channel, + chanspec_bw_t bw, chanspec_band_t band) +{ + chanspec_t chspec = INVCHANSPEC; + int sb = -1; + uint sb_shift; + + /* 20MHz channels have matching center and primary channels */ + if (bw == WL_CHANSPEC_BW_20 && primary_channel == center_channel) { + + sb = 0; + + } else if (bw == WL_CHANSPEC_BW_40 || + bw == WL_CHANSPEC_BW_80 || + bw == WL_CHANSPEC_BW_160 || + WFC_BW_EQ(bw, WL_CHANSPEC_BW_240) || + WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) { + + /* calculate the sub-band index */ + sb = channel_to_sb(center_channel, primary_channel, bw); + } + + /* if we have a good sub-band, assemble the chanspec, and use wf_chspec_valid() + * to check it for correctness + */ + if (sb >= 0) { + if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240)) { + if (band == WL_CHANSPEC_BAND_6G) { + center_channel = channel_6g_240mhz_to_id(center_channel); + sb_shift = WL_CHANSPEC_GE240_SB_SHIFT; + } else { + return INVCHANSPEC; + } + } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) { + if (band == WL_CHANSPEC_BAND_6G) { + center_channel = channel_6g_320mhz_to_id(center_channel); + sb_shift = WL_CHANSPEC_GE240_SB_SHIFT; + } else { + return INVCHANSPEC; + } + } else { + sb_shift = WL_CHANSPEC_CTL_SB_SHIFT; + } + chspec = center_channel | band | bw | + ((uint)sb << sb_shift); + if (!wf_chspec_valid(chspec)) { + chspec = INVCHANSPEC; + } + } + + return chspec; +} + +/** + * Returns the chanspec given the primary 20MHz channel number, + * channel width, and the band. + * + * @param primary_channel primary 20Mhz channel + * @param bw width of the channel (chanspec_bw_t) + * @param band chanspec band of channel (chanspec_band_t) + * + * @return returns a chanspec, or INVCHANSPEC in case of error + * + * This function is a similar to wf_create_chspec() but does not require the + * center_channel parameter. As a result, it can not create 40MHz channels on + * the 2G band. + * + * This function supports creating 20MHz bandwidth chanspecs on any band. + * + * For the 2GHz band, 40MHz channels overlap, so two 40MHz channels may + * have the same primary 20MHz channel. This function will return INVCHANSPEC + * whenever called with a bandwidth of 40MHz or wider for the 2GHz band. + * + * 5GHz and 6GHz bands have non-overlapping 40/80/160 MHz channels, so a + * 20MHz primary channel uniquely specifies a wider channel in a given band. + * + * 80+80MHz channels also cannot be uniquely defined. This function will return + * INVCHANSPEC whenever bandwidth of WL_CHANSPEC_BW_8080. + */ +chanspec_t +wf_create_chspec_from_primary(uint primary_channel, chanspec_bw_t bw, chanspec_band_t band) +{ + chanspec_t chspec = INVCHANSPEC; + + if (bw == WL_CHANSPEC_BW_20) { + chspec = wf_create_20MHz_chspec(primary_channel, band); + } else if (band == WL_CHANSPEC_BAND_2G || band == WL_CHANSPEC_BAND_5G) { + /* For 5GHz, use the lookup tables for valid 40/80/160 center channels + * and search for a center channel compatible with the given primary channel. + */ + const uint8 *center_ch = NULL; + uint num_ch, i; + + if (band == WL_CHANSPEC_BAND_2G && bw == WL_CHANSPEC_BW_40) { + center_ch = wf_2g_40m_chans; + num_ch = WF_NUM_2G_40M_CHANS; + } else + if (bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + num_ch = 0; + } + + for (i = 0; i < num_ch; i ++) { + chspec = wf_create_chspec(primary_channel, center_ch[i], bw, band); + if (chspec != INVCHANSPEC) { + break; + } + } + } + else if (band == WL_CHANSPEC_BAND_6G) { + /* For 6GHz, use a formula to calculate the valid 40/80/160 center channel from + * the primary channel. + */ + uint ch_per_block; + uint mask; + uint base, center; + + if (bw == WL_CHANSPEC_BW_40) { + ch_per_block = 8; + } else if (bw == WL_CHANSPEC_BW_80) { + ch_per_block = 16; + } else if (bw == WL_CHANSPEC_BW_160) { + ch_per_block = 32; + } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240)) { + ch_per_block = 48; + } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) { + ch_per_block = 64; + } else { + ch_per_block = 0; + } + + if (ch_per_block) { + /* calculate the base of the block of channel numbers + * covered by the given bw + */ + mask = ~(ch_per_block - 1); + base = 1 + ((primary_channel - 1) & mask); + + /* calculate the center channel from the base channel */ + center = base + center_chan_to_edge(bw); + + chspec = wf_create_chspec(primary_channel, center, bw, band); + } + } + + return chspec; +} + +/** + * Return the primary 20MHz channel. + * + * This function returns the channel number of the primary 20MHz channel. For + * 20MHz channels this is just the channel number. For 40MHz or wider channels + * it is the primary 20MHz channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the channel number of the primary 20MHz channel + */ +uint8 +wf_chspec_primary20_chan(chanspec_t chspec) +{ + uint center_chan = INVCHANNEL; + chanspec_bw_t bw; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (CHSPEC_IS20(chspec)) { + return CHSPEC_CHANNEL(chspec); + } else { + if ((CHSPEC_IS240(chspec)) || (CHSPEC_IS320(chspec))) { + sb = CHSPEC_GE240_SB(chspec) >> WL_CHANSPEC_GE240_SB_SHIFT; + } else { + sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + } + + if (CHSPEC_IS240(chspec)) { + /* use bw 240MHz for the primary channel lookup */ + bw = WL_CHANSPEC_BW_240; + + /* convert from channel index to channel number */ + if (CHSPEC_IS6G(chspec)) { + center_chan = wf_chspec_6G_id240_to_ch(CHSPEC_GE240_CHAN(chspec)); + } + } else if (CHSPEC_IS320(chspec)) { + /* use bw 320MHz for the primary channel lookup */ + bw = WL_CHANSPEC_BW_320; + + /* convert from channel index to channel number */ + if (CHSPEC_IS6G(chspec)) { + center_chan = wf_chspec_6G_id320_to_ch(CHSPEC_GE240_CHAN(chspec)); + } + /* What to return otherwise? */ + } + else { + bw = CHSPEC_BW(chspec); + center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT; + } + + return (uint8)(channel_to_primary20_chan((uint8)center_chan, bw, sb)); + } +} + +/** + * Return the bandwidth string for a given chanspec + * + * This function returns the bandwidth string for the passed chanspec. + * + * @param chspec input chanspec + * + * @return Returns the bandwidth string: + * "320", "160+160", "20", "40", "80", "160", "80+80", "240" + */ +const char * +BCMRAMFN(wf_chspec_to_bw_str)(chanspec_t chspec) +{ + return wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; +} + +/** + * Return the primary 20MHz chanspec of a given chanspec + * + * This function returns the chanspec of the primary 20MHz channel. For 20MHz + * channels this is just the chanspec. For 40MHz or wider channels it is the + * chanspec of the primary 20MHz channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the chanspec of the primary 20MHz channel + */ +chanspec_t +wf_chspec_primary20_chspec(chanspec_t chspec) +{ + chanspec_t pri_chspec = chspec; + uint8 pri_chan; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (!CHSPEC_IS20(chspec)) { + pri_chan = wf_chspec_primary20_chan(chspec); + pri_chspec = pri_chan | WL_CHANSPEC_BW_20; + pri_chspec |= CHSPEC_BAND(chspec); + } + return pri_chspec; +} + +/* return chanspec given primary 20MHz channel and bandwidth + * return 0 on error + * does not support 6G + */ +uint16 +wf_channel2chspec(uint pri_ch, uint bw) +{ + uint16 chspec; + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + int i = 0; + + chspec = ((pri_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + + chspec |= bw; + + if (bw == WL_CHANSPEC_BW_40) { + if (pri_ch <= CH_MAX_2G_CHANNEL) { + center_ch = wf_2g_40m_chans; + num_ch = WF_NUM_2G_40M_CHANS; + } else { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } + } else if (bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else if (bw == WL_CHANSPEC_BW_20) { + chspec |= pri_ch; + return chspec; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], pri_ch, (chanspec_bw_t)bw); + if (sb >= 0) { + chspec |= center_ch[i]; + chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT); + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + + return chspec; +} + +/** + * Return the primary 40MHz chanspec or a 40MHz or wider channel + * + * This function returns the chanspec for the primary 40MHz of an 80MHz or wider channel. + * The primary 40MHz channel is the 40MHz sub-band that contains the primary 20MHz channel. + * The primary 20MHz channel of the returned 40MHz chanspec is the same as the primary 20MHz + * channel of the input chanspec. + * + * @param chspec input chanspec + * + * @return Returns the chanspec of the primary 20MHz channel + */ +chanspec_t +wf_chspec_primary40_chspec(chanspec_t chspec) +{ + chanspec_t chspec40 = chspec; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */ + if (CHSPEC_IS160(chspec)) { + chspec = wf_chspec_primary80_chspec(chspec); + } + + /* determine primary 40 MHz sub-channel of an 80 MHz chanspec */ + if (CHSPEC_IS80(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_UL) { + /* Primary 40MHz is on lower side */ + center_chan -= CH_20MHZ_APART; + /* sideband bits are the same for LL/LU and L/U */ + } else { + /* Primary 40MHz is on upper side */ + center_chan += CH_20MHZ_APART; + /* sideband bits need to be adjusted by UL offset */ + sb -= WL_CHANSPEC_CTL_SB_UL; + } + + /* Create primary 40MHz chanspec */ + chspec40 = (CHSPEC_BAND(chspec) | WL_CHANSPEC_BW_40 | + sb | center_chan); + } + + return chspec40; +} + +/** + * Return the channel number for a given frequency and base frequency. + * + * @param freq frequency in MHz of the channel center + * @param start_factor starting base frequency in 500 KHz units + * + * @return Returns a channel number > 0, or -1 on error + * + * The returned channel number is relative to the given base frequency. + * + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G, and WF_CHAN_FACTOR_6_G are + * defined for 2.4 GHz, 5 GHz, and 6 GHz bands. + * + * If the given base frequency is zero these base frequencies are assumed: + * + * freq (GHz) -> assumed base freq (GHz) + * 2G band 2.4 - 2.5 2.407 + * 5G band 5.0 - 5.940 5.000 + * 6G band 5.940 - 7.205 5.940 + * + * It is an error if the start_factor is zero and the freq is not in one of + * these ranges. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band, + * [1, 253] for 6 GHz band, or [1, 200] otherwise. + * + * It is an error if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel. For any other start factor the frequency + * must be an even 5 MHz multiple greater than the base frequency. + * + * For a start_factor WF_CHAN_FACTOR_6_G, the frequency may be up to 7.205 MHz + * (channel 253). For any other start_factor, the frequence can be up to + * 1 GHz from the base freqency (channel 200). + * + * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3 + */ +int +wf_mhz2channel(uint freq, uint start_factor) +{ + int ch = -1; + uint base; + int offset; + + /* take the default channel start frequency */ + if (start_factor == 0) { + if (freq >= 2400 && freq <= 2500) { + start_factor = WF_CHAN_FACTOR_2_4_G; + } else if (freq >= 5000 && freq < 5935) { + start_factor = WF_CHAN_FACTOR_5_G; + } else if (freq >= 5935 && freq <= 7205) { + start_factor = WF_CHAN_FACTOR_6_G; + } + } + + if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) { + return 14; + } else if (freq == 5935 && start_factor == WF_CHAN_FACTOR_6_G) { + /* channel #2 is an oddball, 10MHz below chan #1 */ + return 2; + } else if (freq == 5960 && start_factor == WF_CHAN_FACTOR_6_G) { + /* do not return ch #2 for the convetional location that #2 would appear */ + return -1; + } + + base = start_factor / 2; + + if (freq < base) { + return -1; + } + + offset = freq - base; + ch = offset / 5; + + /* check that frequency is a 5MHz multiple from the base */ + if (offset != (ch * 5)) + return -1; + + /* channel range checks */ + if (start_factor == WF_CHAN_FACTOR_2_4_G) { + /* 2G should only be up to 13 here as 14 is + * handled above as it is a non-5MHz offset + */ + if (ch > 13) { + ch = -1; + } + } + else if (start_factor == WF_CHAN_FACTOR_6_G) { + /* 6G has a higher channel range than 5G channelization specifies [1,200] */ + if ((uint)ch > CH_MAX_6G_CHANNEL) { + ch = -1; + } + } else if (ch > 200) { + ch = -1; + } + + return ch; +} + +/** + * Return the center frequency in MHz of the given channel and base frequency. + * + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band, [1,253] in the 6 GHz + * band, and [1, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G, and WF_CHAN_FACTOR_6_G are + * defined for 2.4 GHz, 5 GHz, and 6 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3 + * + * @param channel input channel number + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a frequency in MHz + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + * @see WF_CHAN_FACTOR_6_G + */ +int +wf_channel2mhz(uint ch, uint start_factor) +{ + int freq; + + if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || + (start_factor == WF_CHAN_FACTOR_6_G && (ch < 1 || ch > 253)) || + (start_factor != WF_CHAN_FACTOR_6_G && (ch < 1 || ch > 200))) { + freq = -1; + } else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) { + freq = 2484; + } else if ((start_factor == WF_CHAN_FACTOR_6_G) && (ch == 2)) { + freq = 5935; + } else { + freq = ch * 5 + start_factor / 2; + } + + return freq; +} + +static const uint16 sidebands[] = { + WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU, + WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU, + WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU, + WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU +}; + +/* + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + * + * does not support 6G + */ +chanspec_t +wf_chspec_80(uint8 center_channel, uint8 primary_channel) +{ + + chanspec_t chanspec = INVCHANSPEC; + chanspec_t chanspec_cur; + uint i; + + for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) { + chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]); + if (primary_channel == wf_chspec_primary20_chan(chanspec_cur)) { + chanspec = chanspec_cur; + break; + } + } + /* If the loop ended early, we are good, otherwise we did not + * find a 80MHz chanspec with the given center_channel that had a primary channel + *matching the given primary_channel. + */ + return chanspec; +} + +/* + * Returns the 80+80 chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0 - center channel number of one frequency segment + * chan1 - center channel number of the other frequency segment + * + * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to 802.11-2016 section 22.3.14 "Channelization". + * + * does not support 6G + */ +chanspec_t +wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1) +{ + int sb = 0; + uint16 chanspec = 0; + int chan0_id = 0, chan1_id = 0; + int seg0, seg1; + + chan0_id = channel_80mhz_to_id(chan0); + chan1_id = channel_80mhz_to_id(chan1); + + /* make sure the channel numbers were valid */ + if (chan0_id == -1 || chan1_id == -1) + return INVCHANSPEC; + + /* does the primary channel fit with the 1st 80MHz channel ? */ + sb = channel_to_sb(chan0, primary_20mhz, WL_CHANSPEC_BW_80); + if (sb >= 0) { + /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */ + seg0 = chan0_id; + seg1 = chan1_id; + } else { + /* no, so does the primary channel fit with the 2nd 80MHz channel ? */ + sb = channel_to_sb(chan1, primary_20mhz, WL_CHANSPEC_BW_80); + if (sb < 0) { + /* no match for pri_ch to either 80MHz center channel */ + return INVCHANSPEC; + } + /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */ + seg0 = chan1_id; + seg1 = chan0_id; + } + + chanspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN1_SHIFT) | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + WL_CHANSPEC_BW_8080 | + WL_CHANSPEC_BAND_5G); + + return chanspec; +} + +/* + * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec + */ +uint8 +wf_chspec_primary80_channel(chanspec_t chanspec) +{ + chanspec_t primary80_chspec; + uint8 primary80_chan; + + primary80_chspec = wf_chspec_primary80_chspec(chanspec); + + if (primary80_chspec == INVCHANSPEC) { + primary80_chan = INVCHANNEL; + } else { + primary80_chan = CHSPEC_CHANNEL(primary80_chspec); + } + + return primary80_chan; +} + +/* + * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec + */ +uint8 +wf_chspec_secondary80_channel(chanspec_t chanspec) +{ + chanspec_t secondary80_chspec; + uint8 secondary80_chan; + + secondary80_chspec = wf_chspec_secondary80_chspec(chanspec); + + if (secondary80_chspec == INVCHANSPEC) { + secondary80_chan = INVCHANNEL; + } else { + secondary80_chan = CHSPEC_CHANNEL(secondary80_chspec); + } + + return secondary80_chan; +} + +/* + * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel + */ +chanspec_t +wf_chspec_primary80_chspec(chanspec_t chspec) +{ + chanspec_t chspec80; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + if (CHSPEC_IS80(chspec)) { + chspec80 = chspec; + } else if (CHSPEC_IS160(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_ULL) { + /* Primary 80MHz is on lower side */ + center_chan -= CH_40MHZ_APART; + } + else { + /* Primary 80MHz is on upper side */ + center_chan += CH_40MHZ_APART; + sb -= WL_CHANSPEC_CTL_SB_ULL; + } + + /* Create primary 80MHz chanspec */ + chspec80 = (CHSPEC_BAND(chspec) | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else { + chspec80 = INVCHANSPEC; + } + + return chspec80; +} + +/* + * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel + */ +chanspec_t +wf_chspec_secondary80_chspec(chanspec_t chspec) +{ + chanspec_t chspec80; + uint center_chan; + + ASSERT(!wf_chspec_malformed(chspec)); + + if (CHSPEC_IS160(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_CTL_SB(chspec) < WL_CHANSPEC_CTL_SB_ULL) { + /* Primary 80MHz is on lower side, so the secondary is on + * the upper side + */ + center_chan += CH_40MHZ_APART; + } else { + /* Primary 80MHz is on upper side, so the secondary is on + * the lower side + */ + center_chan -= CH_40MHZ_APART; + } + + /* Create secondary 80MHz chanspec */ + chspec80 = (CHSPEC_BAND(chspec) | + WL_CHANSPEC_BW_80 | + WL_CHANSPEC_CTL_SB_LL | + center_chan); + } + else { + chspec80 = INVCHANSPEC; + } + + return chspec80; +} + +/* + * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels + * + * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1 + */ +void +wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch) +{ + + if (CHSPEC_IS160(chspec)) { + uint8 center_chan = CHSPEC_CHANNEL(chspec); + ch[0] = center_chan - CH_40MHZ_APART; + ch[1] = center_chan + CH_40MHZ_APART; + } + else { + /* for 20, 40, and 80 Mhz */ + ch[0] = CHSPEC_CHANNEL(chspec); + ch[1] = -1; + } + return; + +} + +/* + * Returns the center channel of the primary 160MHz sub-band of the provided chanspec + */ +uint8 +wf_chspec_primary160_channel(chanspec_t chanspec) +{ + chanspec_t primary160_chspec; + uint8 primary160_chan; + + primary160_chspec = wf_chspec_primary160_chspec(chanspec); + + if (primary160_chspec == INVCHANSPEC) { + primary160_chan = INVCHANNEL; + } else { + primary160_chan = CHSPEC_CHANNEL(primary160_chspec); + } + + return primary160_chan; +} + +/* + * Returns the chanspec for the primary 160MHz sub-band of an 240/320MHz or 160+160 channel + */ +chanspec_t +wf_chspec_primary160_chspec(chanspec_t chspec) +{ + chanspec_t chspec160; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + if (CHSPEC_IS160(chspec)) { + chspec160 = chspec; + } + else if (CHSPEC_IS240(chspec)) { + uint8 ch_id = CHSPEC_GE240_CHAN(chspec); + center_chan = wf_chspec_240_id2cch(chspec); + sb = CHSPEC_GE240_SB(chspec) >> WL_CHANSPEC_GE240_SB_SHIFT; + /* + * Identify the chanspec is of the form 160+80 or 80+160 from the channel ID. + * Channel ID : even for 160+80 and odd for 80+160 + */ + if ((!(ch_id & 0x1u)) && (sb < 8u)) { + /* Primary 160MHz is on lower side */ + center_chan -= CH_40MHZ_APART; + } else if ((ch_id & 0x1u) && (sb >= 4u)) { + /* Primary 160MHz is on upper side */ + center_chan += CH_40MHZ_APART; + sb -= 4u; + } else { + chspec160 = INVCHANSPEC; + goto done; + } + + /* Create primary 160MHz chanspec */ + chspec160 = (CHSPEC_BAND(chspec) | + WL_CHANSPEC_BW_160 | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + center_chan); + } else if (CHSPEC_IS320(chspec)) { + center_chan = wf_chspec_320_id2cch(chspec); + sb = CHSPEC_GE240_SB(chspec) >> WL_CHANSPEC_GE240_SB_SHIFT; + + if (sb < 8u) { + /* Primary 160MHz is on lower side */ + center_chan -= CH_80MHZ_APART; + } + else { + /* Primary 160MHz is on upper side */ + center_chan += CH_80MHZ_APART; + sb -= 8u; + } + + /* Create primary 160MHz chanspec */ + chspec160 = (CHSPEC_BAND(chspec) | + WL_CHANSPEC_BW_160 | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + center_chan); + } + else { + chspec160 = INVCHANSPEC; + } +done: + return chspec160; +} + +/* Populates array with all 20MHz side bands of a given chanspec_t in the following order: + * primary20, secondary20, two secondary40s, four secondary80s. + * 'chspec' is the chanspec of interest + * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec + * + * Works with 20, 40, 80, and 160MHz chspec + */ +void +wf_get_all_ext(chanspec_t chspec, uint8 *pext) +{ + chanspec_t t = (CHSPEC_IS160(chspec)) ? /* if bw > 80MHz */ + wf_chspec_primary80_chspec(chspec) : (chspec); /* extract primary 80 */ + /* primary20 channel as first element */ + uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t); + + if (CHSPEC_IS20(chspec)) { + return; /* nothing more to do since 20MHz chspec */ + } + /* 20MHz EXT */ + (pext)[1] = (IS_CTL_IN_L20(t) ? pri_ch + CH_20MHZ_APART : pri_ch - CH_20MHZ_APART); + + if (CHSPEC_IS40(chspec)) { + return; /* nothing more to do since 40MHz chspec */ + } + /* center 40MHz EXT */ + t = wf_channel2chspec((IS_CTL_IN_L40(chspec) ? + pri_ch + CH_40MHZ_APART : pri_ch - CH_40MHZ_APART), WL_CHANSPEC_BW_40); + GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */ + + if (CHSPEC_IS80(chspec)) { + return; /* nothing more to do since 80MHz chspec */ + } + t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL); + /* get the 20MHz side bands in 80MHz EXT (secondary) */ + GET_ALL_SB(t, &((pext)[4])); +} + +/* + * Given two chanspecs, returns true if they overlap. + * (Overlap: At least one 20MHz subband is common between the two chanspecs provided) + */ +bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1) +{ + uint8 ch0, ch1; + + if (CHSPEC_BAND(chspec0) != CHSPEC_BAND(chspec1)) { + return FALSE; + } + + FOREACH_20_SB(chspec0, ch0) { + FOREACH_20_SB(chspec1, ch1) { + if ((uint)ABS(ch0 - ch1) < CH_20MHZ_APART) { + return TRUE; + } + } + } + + return FALSE; +} + +uint8 +channel_bw_to_width(chanspec_t chspec) +{ + uint8 channel_width; + + if (CHSPEC_IS80(chspec)) + channel_width = VHT_OP_CHAN_WIDTH_80; + else if (CHSPEC_IS160(chspec)) + channel_width = VHT_OP_CHAN_WIDTH_160; + else + channel_width = VHT_OP_CHAN_WIDTH_20_40; + + return channel_width; +} + +uint wf_chspec_first_20_sb(chanspec_t chspec) +{ +#if defined(WL_BW160MHZ) + if (CHSPEC_IS160(chspec)) { + return LLL_20_SB_160(CHSPEC_CHANNEL(chspec)); + } else +#endif + if (CHSPEC_IS80(chspec)) { + return LL_20_SB(CHSPEC_CHANNEL(chspec)); + } else if (CHSPEC_IS40(chspec)) { + return LOWER_20_SB(CHSPEC_CHANNEL(chspec)); + } else { + return CHSPEC_CHANNEL(chspec); + } +} + +chanspec_t +wf_create_chspec_sb(uint sb, uint center_channel, chanspec_bw_t bw, chanspec_band_t band) +{ + chanspec_t chspec; + if (sb > (WL_CHANSPEC_CTL_SB_MASK >> WL_CHANSPEC_CTL_SB_SHIFT)) { + return INVCHANSPEC; + } + chspec = center_channel | band | bw | ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT); + return wf_chspec_valid(chspec) ? chspec : INVCHANSPEC; +} + +chanspec_t +wf_create_160160MHz_chspec_sb(uint sb, uint chan0, uint chan1, chanspec_band_t band) +{ + int chan0_id, chan1_id, seg0, seg1; + chanspec_t chspec; + + if (sb > (WL_CHANSPEC_CTL_SB_UUU >> WL_CHANSPEC_CTL_SB_SHIFT)) { + return INVCHANSPEC; + } + /* From here on sb is not an index, but value for SB field */ + sb <<= WL_CHANSPEC_CTL_SB_SHIFT; + + /* frequency segments need to be non-contiguous, so the channel + * separation needs to be greater than 160MHz + */ + if ((uint)ABS((int)(chan0 - chan1)) <= CH_160MHZ_APART) { + return INVCHANSPEC; + } + + if (band == WL_CHANSPEC_BAND_5G) { + chan0_id = channel_5g_160mhz_to_id(chan0); + chan1_id = channel_5g_160mhz_to_id(chan1); + } else if (band == WL_CHANSPEC_BAND_6G) { + chan0_id = channel_6g_160mhz_to_id(chan0); + chan1_id = channel_6g_160mhz_to_id(chan1); + } else { + return INVCHANSPEC; + } + + /* make sure the channel numbers were valid */ + if ((chan0_id == -1) || (chan1_id == -1)) { + return INVCHANSPEC; + } + /* Optionally swapping channel IDs to make sure that control subchannel + * is in chan0 + */ + if (sb < WL_CHANSPEC_CTL_SB_ULL) { + seg0 = chan0_id; + seg1 = chan1_id; + } else { + seg0 = chan1_id; + seg1 = chan0_id; + sb -= WL_CHANSPEC_CTL_SB_ULL; + } + chspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN1_SHIFT) | + sb | WL_CHANSPEC_BW_160160 | band); + return wf_chspec_valid(chspec) ? chspec : INVCHANSPEC; +} diff --git a/bcmdhd.101.10.361.x/bcmwifi_monitor.c b/bcmdhd.101.10.361.x/bcmwifi_monitor.c new file mode 100755 index 0000000..1b606eb --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmwifi_monitor.c @@ -0,0 +1,1071 @@ +/* + * Monitor Mode routines. + * This header file housing the Monitor Mode routines implementation. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct monitor_info { + ratespec_t ampdu_rspec; /* spec value for AMPDU sniffing */ + uint16 ampdu_counter; + uint16 amsdu_len; + uint8* amsdu_pkt; + int8 headroom; + d11_info_t *d11_info; + uint8 ampdu_plcp[D11_PHY_HDR_LEN]; +}; + +struct he_ltf_gi_info { + uint8 gi; + uint8 ltf_size; + uint8 num_ltf; +}; + +struct he_mu_ltf_mp_info { + uint8 num_ltf; + uint8 mid_per; +}; + +/* + * su ppdu - mapping of ltf and gi values from plcp to rtap data format + * https://www.radiotap.org/fields/HE.html + */ +static const struct he_ltf_gi_info he_plcp2ltf_gi[4] = { + {3, 0, 7}, /* reserved, reserved, reserved */ + {0, 2, 1}, /* 0.8us, 2x, 2x */ + {1, 2, 1}, /* 1.6us, 2x, 2x */ + {2, 3, 2} /* 3.2us, 4x, 4x */ +}; + +/* + * mu ppdu - mapping of ru type value from phy rxstatus to rtap data format + * https://www.radiotap.org/fields/HE.html + */ +static const uint8 he_mu_phyrxs2ru_type[7] = { + 4, /* 26-tone RU */ + 5, /* 52-tone RU */ + 6, /* 106-tone RU */ + 7, /* 242-tone RU */ + 8, /* 484-tone RU */ + 9, /* 996-tone RU */ + 10 /* 2x996-tone RU */ +}; + +/* + * mu ppdu - doppler:1, mapping of ltf and midamble periodicity values from plcp to rtap data format + * https://www.radiotap.org/fields/HE.html + */ +static const struct he_mu_ltf_mp_info he_mu_plcp2ltf_mp[8] = { + {0, 0}, /* 1x, 10 */ + {1, 0}, /* 2x, 10 */ + {2, 0}, /* 4x, 10 */ + {7, 0}, /* reserved, reserved */ + {0, 1}, /* 1x, 20 */ + {1, 1}, /* 2x, 20 */ + {2, 1}, /* 4x, 20 */ + {7, 0} /* reserved, reserved */ +}; + +/* + * mu ppdu - doppler:0, mapping of ltf value from plcp to rtap data format + * https://www.radiotap.org/fields/HE.html + */ +static const uint8 he_mu_plcp2ltf[8] = { + 0, /* 1x */ + 1, /* 2x */ + 2, /* 4x */ + 3, /* 6x */ + 4, /* 8x */ + 7, /* reserved */ + 7, /* reserved */ + 7 /* reserved */ +}; + +/** Calculate the rate of a received frame and return it as a ratespec (monitor mode) */ +static ratespec_t +BCMFASTPATH(wlc_recv_mon_compute_rspec)(monitor_info_t* info, wlc_d11rxhdr_t *wrxh, uint8 *plcp) +{ + d11rxhdr_t *rxh = &wrxh->rxhdr; + ratespec_t rspec = 0; + uint16 phy_ft; + uint corerev = info->d11_info->major_revid; + uint corerev_minor = info->d11_info->minor_revid; + BCM_REFERENCE(corerev_minor); + + phy_ft = D11PPDU_FT(rxh, corerev); + switch (phy_ft) { + case FT_CCK: + rspec = CCK_RSPEC(CCK_PHY2MAC_RATE(((cck_phy_hdr_t *)plcp)->signal)); + rspec |= WL_RSPEC_BW_20MHZ; + break; + case FT_OFDM: + rspec = OFDM_RSPEC(OFDM_PHY2MAC_RATE(((ofdm_phy_hdr_t *)plcp)->rlpt[0])); + rspec |= WL_RSPEC_BW_20MHZ; + break; + case FT_HT: { + uint ht_sig1, ht_sig2; + uint8 stbc; + + ht_sig1 = plcp[0]; /* only interested in low 8 bits */ + ht_sig2 = plcp[3] | (plcp[4] << 8); /* only interested in low 10 bits */ + + rspec = HT_RSPEC((ht_sig1 & HT_SIG1_MCS_MASK)); + if (ht_sig1 & HT_SIG1_CBW) { + /* indicate rspec is for 40 MHz mode */ + rspec |= WL_RSPEC_BW_40MHZ; + } else { + /* indicate rspec is for 20 MHz mode */ + rspec |= WL_RSPEC_BW_20MHZ; + } + if (ht_sig2 & HT_SIG2_SHORT_GI) + rspec |= WL_RSPEC_SGI; + if (ht_sig2 & HT_SIG2_FEC_CODING) + rspec |= WL_RSPEC_LDPC; + stbc = ((ht_sig2 & HT_SIG2_STBC_MASK) >> HT_SIG2_STBC_SHIFT); + if (stbc != 0) { + rspec |= WL_RSPEC_STBC; + } + break; + } + case FT_VHT: + rspec = wf_vht_plcp_to_rspec(plcp); + break; +#ifdef WL11AX + case FT_HE: + rspec = wf_he_plcp_to_rspec(plcp); + break; +#endif /* WL11AX */ +#ifdef WL11BE + case FT_EHT: + rspec = wf_eht_plcp_to_rspec(plcp); + break; +#endif + default: + /* return a valid rspec if not a debug/assert build */ + rspec = OFDM_RSPEC(6) | WL_RSPEC_BW_20MHZ; + break; + } + + return rspec; +} /* wlc_recv_compute_rspec */ + +static void +wlc_he_su_fill_rtap_data(struct wl_rxsts *sts, uint8 *plcp) +{ + ASSERT(plcp); + + /* he ppdu format */ + sts->data1 |= WL_RXS_HEF_SIGA_PPDU_SU; + + /* bss color */ + sts->data1 |= WL_RXS_HEF_SIGA_BSS_COLOR; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, BSS_COLOR); + + /* beam change */ + sts->data1 |= WL_RXS_HEF_SIGA_BEAM_CHANGE; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, BEAM_CHANGE); + + /* ul/dl */ + sts->data1 |= WL_RXS_HEF_SIGA_DL_UL; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DL_UL); + + /* data mcs */ + sts->data1 |= WL_RXS_HEF_SIGA_MCS; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, MCS); + + /* data dcm */ + sts->data1 |= WL_RXS_HEF_SIGA_DCM; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DCM); + + /* coding */ + sts->data1 |= WL_RXS_HEF_SIGA_CODING; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, CODING); + + /* ldpc extra symbol segment */ + sts->data1 |= WL_RXS_HEF_SIGA_LDPC; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, LDPC); + + /* stbc */ + sts->data1 |= WL_RXS_HEF_SIGA_STBC; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, STBC); + + /* spatial reuse */ + sts->data1 |= WL_RXS_HEF_SIGA_SPATIAL_REUSE; + sts->data4 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, SR); + + /* data bw */ + sts->data1 |= WL_RXS_HEF_SIGA_BW; + sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, BW); + + /* gi */ + sts->data2 |= WL_RXS_HEF_SIGA_GI; + sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, SU, GI, gi); + + /* ltf symbol size */ + sts->data2 |= WL_RXS_HEF_SIGA_LTF_SIZE; + sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, SU, LTF_SIZE, ltf_size); + + /* number of ltf symbols */ + sts->data2 |= WL_RXS_HEF_SIGA_NUM_LTF; + sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, SU, NUM_LTF, num_ltf); + + /* pre-fec padding factor */ + sts->data2 |= WL_RXS_HEF_SIGA_PADDING; + sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, PADDING); + + /* txbf */ + sts->data2 |= WL_RXS_HEF_SIGA_TXBF; + sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, TXBF); + + /* pe disambiguity */ + sts->data2 |= WL_RXS_HEF_SIGA_PE; + sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, PE); + + /* + * if doppler (bit:41) is set in plcp to 1 then, + * - bit:25 indicates 'midamble periodicity' + * - bit:23-24 indicate 'nsts' + * + * if doppler (bit:41) is set to 0 then, + * - bit:23-25 indicate 'nsts' + */ + if (HE_EXTRACT_FROM_PLCP(plcp, SU, DOPPLER)) { + /* doppler */ + sts->data1 |= WL_RXS_HEF_SIGA_DOPPLER; + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DOPPLER); + + /* midamble periodicity */ + sts->data2 |= WL_RXS_HEF_SIGA_MIDAMBLE; + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, MIDAMBLE); + + /* nsts */ + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DOPPLER_SET_NSTS); + } else { + /* nsts */ + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DOPPLER_NOTSET_NSTS); + } + + /* txop */ + sts->data2 |= WL_RXS_HEF_SIGA_TXOP; + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, TXOP); +} + +static void +wlc_he_dl_ofdma_fill_rtap_data(struct wl_rxsts *sts, d11rxhdr_t *rxh, + uint8 *plcp, uint32 corerev, uint32 corerev_minor) +{ + uint8 doppler, midamble, val; + ASSERT(rxh); + ASSERT(plcp); + + /* he ppdu format */ + sts->data1 |= WL_RXS_HEF_SIGA_PPDU_MU; + + /* bss color */ + sts->data1 |= WL_RXS_HEF_SIGA_BSS_COLOR; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, BSS_COLOR); + + /* beam change (doesn't apply to mu ppdu) */ + sts->data1 &= ~WL_RXS_HEF_SIGA_BEAM_CHANGE; + + /* ul/dl */ + sts->data1 |= WL_RXS_HEF_SIGA_DL_UL; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, DL_UL); + + /* data mcs */ + sts->data1 |= WL_RXS_HEF_SIGA_MCS; + sts->data3 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, MCS); + + /* data dcm */ + sts->data1 |= WL_RXS_HEF_SIGA_DCM; + sts->data3 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, DCM); + + /* coding */ + sts->data1 |= WL_RXS_HEF_SIGA_CODING; + sts->data3 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, CODING); + + /* ldpc extra symbol segment */ + sts->data1 |= WL_RXS_HEF_SIGA_LDPC; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, LDPC); + + /* stbc */ + sts->data1 |= WL_RXS_HEF_SIGA_STBC; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, STBC); + + /* spatial reuse */ + sts->data1 |= WL_RXS_HEF_SIGA_SPATIAL_REUSE; + sts->data4 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SR); + + /* sta-id */ + sts->data1 |= WL_RXS_HEF_SIGA_STA_ID; + sts->data4 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, STAID); + + /* ru allocation */ + val = he_mu_phyrxs2ru_type[D11PPDU_RU_TYPE(rxh, corerev, corerev_minor)]; + sts->data1 |= WL_RXS_HEF_SIGA_RU_ALLOC; + sts->data5 |= HE_PACK_RTAP_FROM_VAL(val, RU_ALLOC); + + /* doppler */ + sts->data1 |= WL_RXS_HEF_SIGA_DOPPLER; + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, DOPPLER); + + doppler = HE_EXTRACT_FROM_PLCP(plcp, MU, DOPPLER); + midamble = HE_EXTRACT_FROM_PLCP(plcp, MU, MIDAMBLE); + if (doppler) { + /* number of ltf symbols */ + val = he_mu_plcp2ltf_mp[midamble].num_ltf; + sts->data2 |= WL_RXS_HEF_SIGA_NUM_LTF; + sts->data5 |= HE_PACK_RTAP_FROM_VAL(val, NUM_LTF); + + /* midamble periodicity */ + val = he_mu_plcp2ltf_mp[midamble].mid_per; + sts->data2 |= WL_RXS_HEF_SIGA_MIDAMBLE; + sts->data6 |= HE_PACK_RTAP_FROM_VAL(val, MIDAMBLE); + } else { + /* number of ltf symbols */ + val = he_mu_plcp2ltf[midamble]; + sts->data2 |= WL_RXS_HEF_SIGA_NUM_LTF; + sts->data5 |= HE_PACK_RTAP_FROM_VAL(val, NUM_LTF); + } + + /* nsts */ + sts->data6 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, NSTS); + + /* gi */ + sts->data2 |= WL_RXS_HEF_SIGA_GI; + sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, MU, GI, gi); + + /* ltf symbol size */ + sts->data2 |= WL_RXS_HEF_SIGA_LTF_SIZE; + sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, MU, LTF_SIZE, ltf_size); + + /* pre-fec padding factor */ + sts->data2 |= WL_RXS_HEF_SIGA_PADDING; + sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, PADDING); + + /* txbf */ + sts->data2 |= WL_RXS_HEF_SIGA_TXBF; + sts->data5 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, TXBF); + + /* pe disambiguity */ + sts->data2 |= WL_RXS_HEF_SIGA_PE; + sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, PE); + + /* txop */ + sts->data2 |= WL_RXS_HEF_SIGA_TXOP; + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, TXOP); +} + +static void +wlc_he_dl_ofdma_fill_rtap_flag(struct wl_rxsts *sts, uint8 *plcp, uint32 corerev) +{ + ASSERT(plcp); + + /* sig-b mcs */ + sts->flag1 |= WL_RXS_HEF_SIGB_MCS_KNOWN; + sts->flag1 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_MCS); + + /* sig-b dcm */ + sts->flag1 |= WL_RXS_HEF_SIGB_DCM_KNOWN; + sts->flag1 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_DCM); + + /* sig-b compression */ + sts->flag1 |= WL_RXS_HEF_SIGB_COMP_KNOWN; + sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_COMP); + + /* # of he-sig-b symbols/mu-mimo users */ + sts->flag1 |= WL_RXS_HEF_NUM_SIGB_SYMB_KNOWN; + sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_SYM_MU_MIMO_USER); + + /* bandwidth from bandwidth field in he-sig-a */ + sts->flag2 |= WL_RXS_HEF_BW_SIGA_KNOWN; + sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, BW_SIGA); + + /* preamble puncturing from bandwidth field in he-sig-a */ + sts->flag2 |= WL_RXS_HEF_PREPUNCR_SIGA_KNOWN; + sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, PRE_PUNCR_SIGA); +} + +static void +wlc_he_ul_ofdma_fill_rtap_data(struct wl_rxsts *sts, d11rxhdr_t *rxh, uint8 *plcp, + uint32 corerev) +{ + ASSERT(rxh); + ASSERT(plcp); + + BCM_REFERENCE(rxh); + + /* he ppdu format */ + sts->data1 |= WL_RXS_HEF_SIGA_PPDU_TRIG; + + /* bss color */ + sts->data1 |= WL_RXS_HEF_SIGA_BSS_COLOR; + sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, TRIG, BSS_COLOR); + + /* beam change (doesn't apply to mu ppdu) */ + sts->data1 &= ~WL_RXS_HEF_SIGA_BEAM_CHANGE; + + /* ul/dl */ + sts->data1 |= WL_RXS_HEF_SIGA_DL_UL; + sts->data3 |= HE_PACK_RTAP_FROM_VAL(1, DL_UL); + + /* txop */ + sts->data2 |= WL_RXS_HEF_SIGA_TXOP; + sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, TRIG, TXOP); +} + +/* recover 32bit TSF value from the 16bit TSF value */ +/* assumption is time in rxh is within 65ms of the current tsf */ +/* local TSF inserted in the rxh is at RxStart which is before 802.11 header */ +static uint32 +wlc_recover_tsf32(uint16 rxh_tsf, uint32 ts_tsf) +{ + uint16 rfdly; + + /* adjust rx dly added in RxTSFTime */ + /* comment in d11.h: + * BWL_PRE_PACKED_STRUCT struct d11rxhdr { + * ... + * uint16 RxTSFTime; RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY + * ... + * } + */ + + /* TODO: add PHY type specific value here... */ + rfdly = M_BPHY_PLCPRX_DLY; + + rxh_tsf -= rfdly; + + return (((ts_tsf - rxh_tsf) & 0xFFFF0000) | rxh_tsf); +} + +static uint8 +wlc_vht_get_gid(uint8 *plcp) +{ + uint32 plcp0 = plcp[0] | (plcp[1] << 8); + return (plcp0 & VHT_SIGA1_GID_MASK) >> VHT_SIGA1_GID_SHIFT; +} + +static uint16 +wlc_vht_get_aid(uint8 *plcp) +{ + uint32 plcp0 = plcp[0] | (plcp[1] << 8) | (plcp[2] << 16); + return (plcp0 & VHT_SIGA1_PARTIAL_AID_MASK) >> VHT_SIGA1_PARTIAL_AID_SHIFT; +} + +static bool +wlc_vht_get_txop_ps_not_allowed(uint8 *plcp) +{ + return !!(plcp[2] & (VHT_SIGA1_TXOP_PS_NOT_ALLOWED >> 16)); +} + +static bool +wlc_vht_get_sgi_nsym_da(uint8 *plcp) +{ + return !!(plcp[3] & VHT_SIGA2_GI_W_MOD10); +} + +static bool +wlc_vht_get_ldpc_extra_symbol(uint8 *plcp) +{ + return !!(plcp[3] & VHT_SIGA2_LDPC_EXTRA_OFDM_SYM); +} + +static bool +wlc_vht_get_beamformed(uint8 *plcp) +{ + return !!(plcp[4] & (VHT_SIGA2_BEAMFORM_ENABLE >> 8)); +} +/* Convert htflags and mcs values to +* rate in units of 500kbps +*/ +static uint16 +wlc_ht_phy_get_rate(uint8 htflags, uint8 mcs) +{ + + ratespec_t rspec = HT_RSPEC(mcs); + + if (htflags & WL_RXS_HTF_40) + rspec |= WL_RSPEC_BW_40MHZ; + + if (htflags & WL_RXS_HTF_SGI) + rspec |= WL_RSPEC_SGI; + + return RSPEC2KBPS(rspec)/500; +} + +static void +bcmwifi_update_rxpwr_per_ant(monitor_pkt_rxsts_t *pkt_rxsts, wlc_d11rxhdr_t *wrxh) +{ + int i = 0; + wlc_d11rxhdr_ext_t *wrxh_ext = (wlc_d11rxhdr_ext_t *)((uint8 *)wrxh - WLC_SWRXHDR_EXT_LEN); + + BCM_REFERENCE(wrxh_ext); + + pkt_rxsts->corenum = 0; + + for (i = 0; i < WL_RSSI_ANT_MAX; i++) { +#ifdef BCM_MON_QDBM_RSSI + pkt_rxsts->rxpwr[i].dBm = wrxh_ext->rxpwr[i].dBm; + pkt_rxsts->rxpwr[i].decidBm = wrxh_ext->rxpwr[i].decidBm; +#else + pkt_rxsts->rxpwr[i].dBm = wrxh->rxpwr[i]; + pkt_rxsts->rxpwr[i].decidBm = 0; +#endif + if (pkt_rxsts->rxpwr[i].dBm == 0) { + break; + } + pkt_rxsts->corenum ++; + } +} + +static void +bcmwifi_parse_ampdu(monitor_info_t *info, d11rxhdr_t *rxh, uint16 subtype, ratespec_t rspec, + uint8 *plcp, struct wl_rxsts *sts) +{ + uint32 corerev = info->d11_info->major_revid; + uint32 corerev_minor = info->d11_info->minor_revid; + uint32 ft = D11PPDU_FT(rxh, corerev); + uint8 plcp_len = D11_PHY_RXPLCP_LEN(corerev); + BCM_REFERENCE(corerev_minor); + if ((subtype == FC_SUBTYPE_QOS_DATA) || (subtype == FC_SUBTYPE_QOS_NULL)) { + /* A-MPDU parsing */ + switch (ft) { + case FT_HT: + if (WLC_IS_MIMO_PLCP_AMPDU(plcp)) { + sts->nfrmtype |= WL_RXS_NFRM_AMPDU_FIRST; + /* Save the rspec & plcp for later */ + info->ampdu_rspec = rspec; + /* src & dst len are same */ + (void)memcpy_s(info->ampdu_plcp, plcp_len, plcp, plcp_len); + } else if (!PLCP_VALID(plcp)) { + sts->nfrmtype |= WL_RXS_NFRM_AMPDU_SUB; + /* Use the saved rspec & plcp */ + rspec = info->ampdu_rspec; + /* src & dst len are same */ + (void)memcpy_s(plcp, plcp_len, info->ampdu_plcp, plcp_len); + } + break; + + case FT_VHT: + case FT_HE: + case FT_EHT: + if (PLCP_VALID(plcp) && + !IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor)) { + /* First MPDU: + * PLCP header is valid, Phy RxStatus is not valid + */ + sts->nfrmtype |= WL_RXS_NFRM_AMPDU_FIRST; + /* Save the rspec & plcp for later */ + info->ampdu_rspec = rspec; + /* src & dst len are same */ + (void)memcpy_s(info->ampdu_plcp, plcp_len, plcp, plcp_len); + info->ampdu_counter++; + } else if (!PLCP_VALID(plcp) && + !IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor)) { + /* Sub MPDU: * PLCP header is not valid, + * Phy RxStatus is not valid + */ + sts->nfrmtype |= WL_RXS_NFRM_AMPDU_SUB; + /* Use the saved rspec & plcp */ + rspec = info->ampdu_rspec; + /* src & dst len are same */ + (void)memcpy_s(plcp, plcp_len, info->ampdu_plcp, plcp_len); + } else if (PLCP_VALID(plcp) && + IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor)) { + /* MPDU is not a part of A-MPDU: + * PLCP header is valid and Phy RxStatus is valid + */ + info->ampdu_counter++; + } else { + /* Last MPDU */ + /* done to take care of the last MPDU in A-mpdu + * VHT packets are considered A-mpdu + * Use the saved rspec + */ + rspec = info->ampdu_rspec; + /* src & dst len are same */ + (void)memcpy_s(plcp, plcp_len, info->ampdu_plcp, plcp_len); + } + + sts->ampdu_counter = info->ampdu_counter; + break; + + case FT_OFDM: + break; + default: + printf("invalid frame type: %d\n", ft); + break; + } + } +} + +static void +bcmwifi_update_rate_modulation_info(monitor_info_t *info, d11rxhdr_t *rxh, d11rxhdr_t *rxh_last, + ratespec_t rspec, uint8* plcp, struct wl_rxsts *sts) +{ + uint32 corerev = info->d11_info->major_revid; + uint32 corerev_minor = info->d11_info->minor_revid; + + /* prepare rate/modulation info */ + if (RSPEC_ISVHT(rspec)) { + uint32 bw = RSPEC_BW(rspec); + /* prepare VHT rate/modulation info */ + sts->nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + sts->mcs = (rspec & WL_RSPEC_VHT_MCS_MASK); + + if (CHSPEC_IS80(sts->chanspec)) { + if (bw == WL_RSPEC_BW_20MHZ) { + switch (CHSPEC_CTL_SB(sts->chanspec)) { + default: + case WL_CHANSPEC_CTL_SB_LL: + sts->bw = WL_RXS_VHT_BW_20LL; + break; + case WL_CHANSPEC_CTL_SB_LU: + sts->bw = WL_RXS_VHT_BW_20LU; + break; + case WL_CHANSPEC_CTL_SB_UL: + sts->bw = WL_RXS_VHT_BW_20UL; + break; + case WL_CHANSPEC_CTL_SB_UU: + sts->bw = WL_RXS_VHT_BW_20UU; + break; + } + } else if (bw == WL_RSPEC_BW_40MHZ) { + switch (CHSPEC_CTL_SB(sts->chanspec)) { + default: + case WL_CHANSPEC_CTL_SB_L: + sts->bw = WL_RXS_VHT_BW_40L; + break; + case WL_CHANSPEC_CTL_SB_U: + sts->bw = WL_RXS_VHT_BW_40U; + break; + } + } else { + sts->bw = WL_RXS_VHT_BW_80; + } + } else if (CHSPEC_IS40(sts->chanspec)) { + if (bw == WL_RSPEC_BW_20MHZ) { + switch (CHSPEC_CTL_SB(sts->chanspec)) { + default: + case WL_CHANSPEC_CTL_SB_L: + sts->bw = WL_RXS_VHT_BW_20L; + break; + case WL_CHANSPEC_CTL_SB_U: + sts->bw = WL_RXS_VHT_BW_20U; + break; + } + } else if (bw == WL_RSPEC_BW_40MHZ) { + sts->bw = WL_RXS_VHT_BW_40; + } + } else { + sts->bw = WL_RXS_VHT_BW_20; + } + + if (RSPEC_ISSTBC(rspec)) + sts->vhtflags |= WL_RXS_VHTF_STBC; + if (wlc_vht_get_txop_ps_not_allowed(plcp)) + sts->vhtflags |= WL_RXS_VHTF_TXOP_PS; + if (RSPEC_ISSGI(rspec)) { + sts->vhtflags |= WL_RXS_VHTF_SGI; + if (wlc_vht_get_sgi_nsym_da(plcp)) + sts->vhtflags |= WL_RXS_VHTF_SGI_NSYM_DA; + } + if (RSPEC_ISLDPC(rspec)) { + sts->coding = WL_RXS_VHTF_CODING_LDCP; + if (wlc_vht_get_ldpc_extra_symbol(plcp)) { + /* need to un-set for MU-MIMO */ + sts->vhtflags |= WL_RXS_VHTF_LDPC_EXTRA; + } + } + if (wlc_vht_get_beamformed(plcp)) + sts->vhtflags |= WL_RXS_VHTF_BF; + + sts->gid = wlc_vht_get_gid(plcp); + sts->aid = wlc_vht_get_aid(plcp); + sts->datarate = RSPEC2KBPS(rspec)/500; + } else if (RSPEC_ISHT(rspec)) { + /* prepare HT rate/modulation info */ + sts->mcs = (rspec & WL_RSPEC_HT_MCS_MASK); + + if (CHSPEC_IS40(sts->chanspec) || CHSPEC_IS80(sts->chanspec)) { + uint32 bw = RSPEC_BW(rspec); + + if (bw == WL_RSPEC_BW_20MHZ) { + if (CHSPEC_CTL_SB(sts->chanspec) == WL_CHANSPEC_CTL_SB_L) { + sts->htflags = WL_RXS_HTF_20L; + } else { + sts->htflags = WL_RXS_HTF_20U; + } + } else if (bw == WL_RSPEC_BW_40MHZ) { + sts->htflags = WL_RXS_HTF_40; + } + } + + if (RSPEC_ISSGI(rspec)) + sts->htflags |= WL_RXS_HTF_SGI; + if (RSPEC_ISLDPC(rspec)) + sts->htflags |= WL_RXS_HTF_LDPC; + if (RSPEC_ISSTBC(rspec)) + sts->htflags |= (1 << WL_RXS_HTF_STBC_SHIFT); + + sts->datarate = wlc_ht_phy_get_rate(sts->htflags, sts->mcs); + } else if (FALSE || +#ifdef WL11BE + RSPEC_ISHEEXT(rspec) || +#else + RSPEC_ISHE(rspec) || +#endif + FALSE) { + sts->nss = (rspec & WL_RSPEC_NSS_MASK) >> WL_RSPEC_NSS_SHIFT; + sts->mcs = (rspec & WL_RSPEC_MCS_MASK); + + if (D11PPDU_ISMU_REV80(rxh_last, corerev, corerev_minor)) { + if (IS_PHYRXHDR_VALID(rxh_last, corerev, corerev_minor)) { + uint16 ff_type = D11PPDU_FF_TYPE(rxh_last, + corerev, corerev_minor); + + switch (ff_type) { + case HE_MU_PPDU: + wlc_he_dl_ofdma_fill_rtap_data(sts, rxh_last, + plcp, corerev, corerev_minor); + wlc_he_dl_ofdma_fill_rtap_flag(sts, plcp, corerev); + break; + case HE_TRIG_PPDU: + wlc_he_ul_ofdma_fill_rtap_data(sts, rxh_last, + plcp, corerev); + break; + default: + /* should not have come here */ + ASSERT(0); + break; + } + } + } else { + /* frame format is either SU or SU_RE (assumption only SU is supported) */ + wlc_he_su_fill_rtap_data(sts, plcp); + } + } else { + /* round non-HT data rate to nearest 500bkps unit */ + sts->datarate = RSPEC2KBPS(rspec)/500; + } +} + +/* Convert RX hardware status to standard format and send to wl_monitor + * assume p points to plcp header + */ +static uint16 +wl_d11rx_to_rxsts(monitor_info_t* info, monitor_pkt_info_t* pkt_info, wlc_d11rxhdr_t *wrxh, + wlc_d11rxhdr_t *wrxh_last, void *pkt, uint16 len, void* pout, uint16 pad_req) +{ + struct wl_rxsts sts; + monitor_pkt_rxsts_t pkt_rxsts; + ratespec_t rspec; + uint16 chan_num; + uint8 *plcp; + uint8 *p = (uint8*)pkt; + uint8 hwrxoff = 0; + uint32 corerev = 0; + uint32 corerev_minor = 0; + struct dot11_header *h; + uint16 subtype; + d11rxhdr_t *rxh = &(wrxh->rxhdr); + d11rxhdr_t *rxh_last = &(wrxh_last->rxhdr); + d11_info_t* d11i = info->d11_info; + uint8 plcp_len = 0; + + BCM_REFERENCE(chan_num); + + ASSERT(p); + ASSERT(info); + pkt_rxsts.rxsts = &sts; + + hwrxoff = (pkt_info->marker >> 16) & 0xff; + corerev = d11i->major_revid; + corerev_minor = d11i->minor_revid; + BCM_REFERENCE(corerev_minor); + + plcp = (uint8*)p + hwrxoff; + plcp_len = D11_PHY_RXPLCP_LEN(corerev); + + /* only non short rxstatus is expected */ + if (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor)) { + printf("short rxstatus is not expected here!\n"); + ASSERT(0); + return 0; + } + + if (RXHDR_GET_PAD_PRES(rxh, corerev, corerev_minor)) { + plcp += 2; + } + + bzero((void *)&sts, sizeof(wl_rxsts_t)); + + sts.mactime = wlc_recover_tsf32(pkt_info->ts.ts_high, pkt_info->ts.ts_low); + + /* update rxpwr per antenna */ + bcmwifi_update_rxpwr_per_ant(&pkt_rxsts, wrxh); + + /* calculate rspec based on ppdu frame type */ + rspec = wlc_recv_mon_compute_rspec(info, wrxh, plcp); + + h = (struct dot11_header *)(plcp + plcp_len); + subtype = (ltoh16(h->fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT; + + /* parse & cache respec for ampdu */ + bcmwifi_parse_ampdu(info, rxh, subtype, rspec, plcp, &sts); + + /* A-MSDU parsing */ + if (RXHDR_GET_AMSDU(rxh, corerev, corerev_minor)) { + /* it's chained buffer, break it if necessary */ + sts.nfrmtype |= WL_RXS_NFRM_AMSDU_FIRST | WL_RXS_NFRM_AMSDU_SUB; + } + + sts.signal = (pkt_info->marker >> 8) & 0xff; + sts.noise = (int8)pkt_info->marker; + sts.chanspec = D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxChan); + + if (wf_chspec_malformed(sts.chanspec)) { + printf("Malformed chspec, %x\n", sts.chanspec); + return 0; + } + + /* 4360: is chan_num supposed to be primary or CF channel? */ + chan_num = CHSPEC_CHANNEL(sts.chanspec); + + if (PRXS5_ACPHY_DYNBWINNONHT(rxh)) + sts.vhtflags |= WL_RXS_VHTF_DYN_BW_NONHT; + else + sts.vhtflags &= ~WL_RXS_VHTF_DYN_BW_NONHT; + + switch (PRXS5_ACPHY_CHBWINNONHT(rxh)) { + default: case PRXS5_ACPHY_CHBWINNONHT_20MHZ: + sts.bw_nonht = WLC_20_MHZ; + break; + case PRXS5_ACPHY_CHBWINNONHT_40MHZ: + sts.bw_nonht = WLC_40_MHZ; + break; + case PRXS5_ACPHY_CHBWINNONHT_80MHZ: + sts.bw_nonht = WLC_80_MHZ; + break; + case PRXS5_ACPHY_CHBWINNONHT_160MHZ: + sts.bw_nonht = WLC_160_MHZ; + break; + } + + /* update rate and modulation info */ + bcmwifi_update_rate_modulation_info(info, rxh, rxh_last, rspec, plcp, &sts); + + sts.pktlength = FRAMELEN(corerev, corerev_minor, rxh) - plcp_len; + + sts.phytype = WL_RXS_PHY_N; + + if (RSPEC_ISCCK(rspec)) { + sts.encoding = WL_RXS_ENCODING_DSSS_CCK; + sts.preamble = (PRXS_SHORTH(rxh, corerev, corerev_minor) ? + WL_RXS_PREAMBLE_SHORT : WL_RXS_PREAMBLE_LONG); + } else if (RSPEC_ISOFDM(rspec)) { + sts.encoding = WL_RXS_ENCODING_OFDM; + sts.preamble = WL_RXS_PREAMBLE_SHORT; + } if (RSPEC_ISVHT(rspec)) { + sts.encoding = WL_RXS_ENCODING_VHT; + } else if (RSPEC_ISHE(rspec)) { + sts.encoding = WL_RXS_ENCODING_HE; + } else if (RSPEC_ISEHT(rspec)) { + sts.encoding = WL_RXS_ENCODING_EHT; + } else { /* MCS rate */ + sts.encoding = WL_RXS_ENCODING_HT; + sts.preamble = (uint32)((D11HT_MMPLCPLen(rxh) != 0) ? + WL_RXS_PREAMBLE_HT_MM : WL_RXS_PREAMBLE_HT_GF); + } + + /* translate error code */ + if (D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxStatus1) & RXS_DECERR) + sts.pkterror |= WL_RXS_DECRYPT_ERR; + if (D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxStatus1) & RXS_FCSERR) + sts.pkterror |= WL_RXS_CRC_ERROR; + + if (RXHDR_GET_PAD_PRES(rxh, corerev, corerev_minor)) { + p += 2; len -= 2; + } + + p += (hwrxoff + D11_PHY_RXPLCP_LEN(corerev)); + len -= (hwrxoff + D11_PHY_RXPLCP_LEN(corerev)); + return (wl_rxsts_to_rtap(&pkt_rxsts, p, len, pout, pad_req)); +} + +#ifndef MONITOR_DNGL_CONV +/* Collect AMSDU subframe packets */ +static uint16 +wl_monitor_amsdu(monitor_info_t* info, monitor_pkt_info_t* pkt_info, wlc_d11rxhdr_t *wrxh, + wlc_d11rxhdr_t *wrxh_last, void *pkt, uint16 len, void* pout, uint16* offset) +{ + uint8 *p = pkt; + uint8 hwrxoff = (pkt_info->marker >> 16) & 0xff; + uint16 frame_len = 0; + uint16 aggtype = (wrxh->rxhdr.lt80.RxStatus2 & RXS_AGGTYPE_MASK) >> RXS_AGGTYPE_SHIFT; + + switch (aggtype) { + case RXS_AMSDU_FIRST: + case RXS_AMSDU_N_ONE: + /* Flush any previously collected */ + if (info->amsdu_len) { + info->amsdu_len = 0; + } + + info->headroom = MAX_RADIOTAP_SIZE - D11_PHY_RXPLCP_LEN(corerev) - hwrxoff; + info->headroom -= (wrxh->rxhdr.lt80.RxStatus1 & RXS_PBPRES) ? 2 : 0; + + /* Save the new starting AMSDU subframe */ + info->amsdu_len = len; + info->amsdu_pkt = (uint8*)pout + (info->headroom > 0 ? + info->headroom : 0); + + memcpy(info->amsdu_pkt, p, len); + + if (aggtype == RXS_AMSDU_N_ONE) { + /* all-in-one AMSDU subframe */ + frame_len = wl_d11rx_to_rxsts(info, pkt_info, wrxh, wrxh, p, + len, info->amsdu_pkt - info->headroom, 0); + + *offset = ABS(info->headroom); + frame_len += *offset; + + info->amsdu_len = 0; + } + break; + + case RXS_AMSDU_INTERMEDIATE: + case RXS_AMSDU_LAST: + default: + /* Check for previously collected */ + if (info->amsdu_len) { + /* Append next AMSDU subframe */ + p += hwrxoff; len -= hwrxoff; + + if (wrxh->rxhdr.lt80.RxStatus1 & RXS_PBPRES) { + p += 2; len -= 2; + } + + memcpy(info->amsdu_pkt + info->amsdu_len, p, len); + info->amsdu_len += len; + + /* complete AMSDU frame */ + if (aggtype == RXS_AMSDU_LAST) { + frame_len = wl_d11rx_to_rxsts(info, pkt_info, wrxh, wrxh, + info->amsdu_pkt, info->amsdu_len, + info->amsdu_pkt - info->headroom, 0); + + *offset = ABS(info->headroom); + frame_len += *offset; + + info->amsdu_len = 0; + } + } + break; + } + + return frame_len; +} +#endif /* MONITOR_DNGL_CONV */ + +uint16 bcmwifi_monitor_create(monitor_info_t** info) +{ + *info = MALLOCZ(NULL, sizeof(struct monitor_info)); + if ((*info) == NULL) { + return FALSE; + } + + (*info)->d11_info = MALLOCZ(NULL, sizeof(struct d11_info)); + if ((*info)->d11_info == NULL) { + goto fail; + } + + return TRUE; + +fail: + bcmwifi_monitor_delete(*info); + + return FALSE; +} + +void +bcmwifi_set_corerev_major(monitor_info_t* info, int8 corerev) +{ + d11_info_t* d11i = info->d11_info; + d11i->major_revid = corerev; +} + +void +bcmwifi_set_corerev_minor(monitor_info_t* info, int8 corerev) +{ + d11_info_t* d11i = info->d11_info; + d11i->minor_revid = corerev; +} + +void +bcmwifi_monitor_delete(monitor_info_t* info) +{ + if (info == NULL) { + return; + } + + if (info->d11_info != NULL) { + MFREE(NULL, info->d11_info, sizeof(struct d11_info)); + } + + MFREE(NULL, info, sizeof(struct monitor_info)); +} + +uint16 +bcmwifi_monitor(monitor_info_t* info, monitor_pkt_info_t* pkt_info, void *pkt, uint16 len, + void* pout, uint16* offset, uint16 pad_req, void *wrxh_in, void *wrxh_last) +{ + wlc_d11rxhdr_t *wrxh; + int hdr_ext_offset = 0; + +#ifdef MONITOR_DNGL_CONV + wrxh = (wlc_d11rxhdr_t *)wrxh_in; + if (info == NULL) { + return 0; + } +#else + +#ifdef BCM_MON_QDBM_RSSI + hdr_ext_offset = WLC_SWRXHDR_EXT_LEN; +#endif + /* move beyond the extension, if any */ + pkt = (void *)((uint8 *)pkt + hdr_ext_offset); + wrxh = (wlc_d11rxhdr_t *)pkt; + + if ((wrxh->rxhdr.lt80.RxStatus2 & htol16(RXS_AMSDU_MASK))) { + /* Need to add support for AMSDU */ + return wl_monitor_amsdu(info, pkt_info, wrxh, wrxh_last, pkt, len, pout, offset); + } else +#endif /* NO MONITOR_DNGL_CONV */ + { + info->amsdu_len = 0; /* reset amsdu */ + *offset = 0; + return wl_d11rx_to_rxsts(info, pkt_info, wrxh, wrxh_last, + pkt, len - hdr_ext_offset, pout, pad_req); + } +} diff --git a/bcmdhd.101.10.361.x/bcmwifi_radiotap.c b/bcmdhd.101.10.361.x/bcmwifi_radiotap.c new file mode 100755 index 0000000..7832981 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmwifi_radiotap.c @@ -0,0 +1,1035 @@ +/* + * RadioTap utility routines for WL + * This file housing the functions use by + * wl driver. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include + +const struct rtap_field rtap_parse_info[] = { + {8, 8}, /* 0: IEEE80211_RADIOTAP_TSFT */ + {1, 1}, /* 1: IEEE80211_RADIOTAP_FLAGS */ + {1, 1}, /* 2: IEEE80211_RADIOTAP_RATE */ + {4, 2}, /* 3: IEEE80211_RADIOTAP_CHANNEL */ + {2, 2}, /* 4: IEEE80211_RADIOTAP_FHSS */ + {1, 1}, /* 5: IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ + {1, 1}, /* 6: IEEE80211_RADIOTAP_DBM_ANTNOISE */ + {2, 2}, /* 7: IEEE80211_RADIOTAP_LOCK_QUALITY */ + {2, 2}, /* 8: IEEE80211_RADIOTAP_TX_ATTENUATION */ + {2, 2}, /* 9: IEEE80211_RADIOTAP_DB_TX_ATTENUATION */ + {1, 1}, /* 10: IEEE80211_RADIOTAP_DBM_TX_POWER */ + {1, 1}, /* 11: IEEE80211_RADIOTAP_ANTENNA */ + {1, 1}, /* 12: IEEE80211_RADIOTAP_DB_ANTSIGNAL */ + {1, 1}, /* 13: IEEE80211_RADIOTAP_DB_ANTNOISE */ + {0, 0}, /* 14: netbsd */ + {2, 2}, /* 15: IEEE80211_RADIOTAP_TXFLAGS */ + {0, 0}, /* 16: missing */ + {1, 1}, /* 17: IEEE80211_RADIOTAP_RETRIES */ + {8, 4}, /* 18: IEEE80211_RADIOTAP_XCHANNEL */ + {3, 1}, /* 19: IEEE80211_RADIOTAP_MCS */ + {8, 4}, /* 20: IEEE80211_RADIOTAP_AMPDU_STATUS */ + {12, 2}, /* 21: IEEE80211_RADIOTAP_VHT */ + {0, 0}, /* 22: */ + {0, 0}, /* 23: */ + {0, 0}, /* 24: */ + {0, 0}, /* 25: */ + {0, 0}, /* 26: */ + {0, 0}, /* 27: */ + {0, 0}, /* 28: */ + {0, 0}, /* 29: IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE */ + {6, 2}, /* 30: IEEE80211_RADIOTAP_VENDOR_NAMESPACE */ + {0, 0} /* 31: IEEE80211_RADIOTAP_EXT */ +}; + +static int bitmap = 0; + +void +radiotap_add_vendor_ns(ieee80211_radiotap_header_t *hdr); + +void +radiotap_encode_multi_rssi(monitor_pkt_rxsts_t* rxsts, ieee80211_radiotap_header_t *hdr); +void +radiotap_encode_bw_signaling(uint16 mask, struct wl_rxsts* rxsts, ieee80211_radiotap_header_t *hdr); +#ifdef MONITOR_DNGL_CONV +void radiotap_encode_alignpad(ieee80211_radiotap_header_t *hdr, uint16 pad_req); +#endif + +static const uint8 brcm_oui[] = {0x00, 0x10, 0x18}; + +static void +wl_rtapParseReset(radiotap_parse_t *rtap) +{ + rtap->idx = 0; /* reset parse index */ + rtap->offset = 0; /* reset current field pointer */ +} + +static void* +wl_rtapParseFindField(radiotap_parse_t *rtap, uint search_idx) +{ + uint idx; /* first bit index to parse */ + uint32 btmap; /* presence bitmap */ + uint offset, field_offset; + uint align, len; + void *ptr = NULL; + + if (search_idx > IEEE80211_RADIOTAP_EXT) + return ptr; + + if (search_idx < rtap->idx) + wl_rtapParseReset(rtap); + + btmap = rtap->hdr->it_present; + idx = rtap->idx; + offset = rtap->offset; + + /* loop through each field index until we get to the target idx */ + while (idx <= search_idx) { + /* if field 'idx' is present, update the offset and check for a match */ + if ((1 << idx) & btmap) { + /* if we hit a field for which we have no parse info + * we need to just bail out + */ + if (rtap_parse_info[idx].align == 0) + break; + + /* step past any alignment padding */ + align = rtap_parse_info[idx].align; + len = rtap_parse_info[idx].len; + + /* ROUNDUP */ + field_offset = ((offset + (align - 1)) / align) * align; + + /* if this field is not in the boulds of the header + * just bail out + */ + if (field_offset + len > rtap->fields_len) + break; + + /* did we find the field? */ + if (idx == search_idx) + ptr = (uint8*)rtap->fields + field_offset; + + /* step past this field */ + offset = field_offset + len; + } + + idx++; + } + + rtap->idx = idx; + rtap->offset = offset; + + return ptr; +} + +ratespec_t +wl_calcRspecFromRTap(uint8 *rtap_header) +{ + ratespec_t rspec = 0; + radiotap_parse_t rtap; + uint8 rate = 0; + uint8 flags = 0; + int flags_present = FALSE; + uint8 mcs = 0; + uint8 mcs_flags = 0; + uint8 mcs_known = 0; + int mcs_present = FALSE; + void *p; + + wl_rtapParseInit(&rtap, rtap_header); + + p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_FLAGS); + if (p != NULL) { + flags_present = TRUE; + flags = ((uint8*)p)[0]; + } + + p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_RATE); + if (p != NULL) + rate = ((uint8*)p)[0]; + + p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_MCS); + if (p != NULL) { + mcs_present = TRUE; + mcs_known = ((uint8*)p)[0]; + mcs_flags = ((uint8*)p)[1]; + mcs = ((uint8*)p)[2]; + } + + if (rate != 0) { + /* validate the DSSS rates 1,2,5.5,11 */ + if (rate == 2 || rate == 4 || rate == 11 || rate == 22) { + rspec = LEGACY_RSPEC(rate) | WL_RSPEC_OVERRIDE_RATE; + if (flags_present && (flags & IEEE80211_RADIOTAP_F_SHORTPRE)) { + rspec |= WL_RSPEC_OVERRIDE_MODE | WL_RSPEC_SHORT_PREAMBLE; + } + } + } else if (mcs_present) { + /* validate the MCS value */ + if (mcs <= 23 || mcs == 32) { + uint32 override = 0; + if (mcs_known & + (IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_FMT | + IEEE80211_RADIOTAP_MCS_HAVE_FEC)) { + override = WL_RSPEC_OVERRIDE_MODE; + } + + rspec = HT_RSPEC(mcs) | WL_RSPEC_OVERRIDE_RATE; + + if ((mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI) && + (mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)) + rspec |= WL_RSPEC_SGI; + if ((mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FMT) && + (mcs_flags & IEEE80211_RADIOTAP_MCS_FMT_GF)) + rspec |= WL_RSPEC_SHORT_PREAMBLE; + if ((mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC) && + (mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC)) + rspec |= WL_RSPEC_LDPC; + + rspec |= override; + } + } + + return rspec; +} + +bool +wl_rtapFlags(uint8 *rtap_header, uint8* flags) +{ + radiotap_parse_t rtap; + void *p; + + wl_rtapParseInit(&rtap, rtap_header); + + p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_FLAGS); + if (p != NULL) { + *flags = ((uint8*)p)[0]; + } + + return (p != NULL); +} + +void +wl_rtapParseInit(radiotap_parse_t *rtap, uint8 *rtap_header) +{ + uint rlen; + uint32 *present_word; + struct ieee80211_radiotap_header *hdr = (struct ieee80211_radiotap_header*)rtap_header; + + bzero(rtap, sizeof(radiotap_parse_t)); + + rlen = hdr->it_len; /* total space in rtap_header */ + + /* If a precence word has the IEEE80211_RADIOTAP_EXT bit set it indicates + * that there is another precence word. + * Step over the presence words until we find the end of the list + */ + present_word = &hdr->it_present; + /* remaining length in header past it_present */ + rlen -= sizeof(struct ieee80211_radiotap_header); + + while ((*present_word & (1<= 4) { + present_word++; + rlen -= 4; /* account for 4 bytes of present_word */ + } + + rtap->hdr = hdr; + rtap->fields = (uint8*)(present_word + 1); + rtap->fields_len = rlen; + wl_rtapParseReset(rtap); +} + +uint +wl_radiotap_rx(struct dot11_header *mac_header, wl_rxsts_t *rxsts, bsd_header_rx_t *bsd_header) +{ + int channel_frequency; + uint32 channel_flags; + uint8 flags; + uint8 *cp; + uint pad_len; + uint32 field_map; + uint16 fc; + uint bsd_header_len; + uint16 ampdu_flags = 0; + + fc = LTOH16(mac_header->fc); + pad_len = 3; + field_map = WL_RADIOTAP_PRESENT_BASIC; + + if (CHSPEC_IS2G(rxsts->chanspec)) { + channel_flags = IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN; + channel_frequency = wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec), + WF_CHAN_FACTOR_2_4_G); + } else if (CHSPEC_IS5G(rxsts->chanspec)) { + channel_flags = IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM; + channel_frequency = wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec), + WF_CHAN_FACTOR_5_G); + } else { + channel_flags = IEEE80211_CHAN_OFDM; + channel_frequency = wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec), + WF_CHAN_FACTOR_6_G); + } + + if ((rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) || + (rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) { + + ampdu_flags = IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; + } + + flags = IEEE80211_RADIOTAP_F_FCS; + + if (rxsts->preamble == WL_RXS_PREAMBLE_SHORT) + flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + if ((fc & FC_WEP) == FC_WEP) + flags |= IEEE80211_RADIOTAP_F_WEP; + + if ((fc & FC_MOREFRAG) == FC_MOREFRAG) + flags |= IEEE80211_RADIOTAP_F_FRAG; + + if (rxsts->pkterror & WL_RXS_CRC_ERROR) + flags |= IEEE80211_RADIOTAP_F_BADFCS; + + if (rxsts->encoding == WL_RXS_ENCODING_HT) + field_map = WL_RADIOTAP_PRESENT_HT; + else if (rxsts->encoding == WL_RXS_ENCODING_VHT) + field_map = WL_RADIOTAP_PRESENT_VHT; + + bsd_header_len = sizeof(struct wl_radiotap_sna); /* start with sna size */ + /* Test for signal/noise values and update length and field bitmap */ + if (rxsts->signal == 0) { + field_map &= ~(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + pad_len = (pad_len - 1); + bsd_header_len--; + } + + if (rxsts->noise == 0) { + field_map &= ~(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); + pad_len = (pad_len - 1); + bsd_header_len--; + } + + if (rxsts->encoding == WL_RXS_ENCODING_HT || + rxsts->encoding == WL_RXS_ENCODING_VHT) { + struct wl_radiotap_hdr *rtht = &bsd_header->hdr; + struct wl_radiotap_ht_tail *tail; + + /* + * Header length is complicated due to dynamic + * presence of signal and noise fields + * and padding for xchannel following + * signal/noise/ant. + * Start with length of wl_radiotap_ht plus + * signal/noise/ant + */ + bsd_header_len += sizeof(struct wl_radiotap_hdr) + pad_len; + bsd_header_len += sizeof(struct wl_radiotap_xchan); + if (rxsts->nfrmtype == WL_RXS_NFRM_AMPDU_FIRST || + rxsts->nfrmtype == WL_RXS_NFRM_AMPDU_SUB) { + bsd_header_len += sizeof(struct wl_radiotap_ampdu); + } + /* add the length of the tail end of the structure */ + if (rxsts->encoding == WL_RXS_ENCODING_HT) + bsd_header_len += sizeof(struct wl_htmcs); + else if (rxsts->encoding == WL_RXS_ENCODING_VHT) + bsd_header_len += sizeof(struct wl_vhtmcs); + bzero((uint8 *)rtht, sizeof(*rtht)); + + rtht->ieee_radiotap.it_version = 0; + rtht->ieee_radiotap.it_pad = 0; + rtht->ieee_radiotap.it_len = (uint16)HTOL16(bsd_header_len); + rtht->ieee_radiotap.it_present = HTOL32(field_map); + + rtht->tsft = HTOL64((uint64)rxsts->mactime); + rtht->flags = flags; + rtht->channel_freq = (uint16)HTOL16(channel_frequency); + rtht->channel_flags = (uint16)HTOL16(channel_flags); + + cp = bsd_header->pad; + /* add in signal/noise/ant */ + if (rxsts->signal != 0) { + *cp++ = (int8)rxsts->signal; + pad_len--; + } + if (rxsts->noise != 0) { + *cp++ = (int8)rxsts->noise; + pad_len--; + } + *cp++ = (int8)rxsts->antenna; + pad_len--; + + tail = (struct wl_radiotap_ht_tail *)(bsd_header->ht); + /* Fill in XCHANNEL */ + if (CHSPEC_IS40(rxsts->chanspec)) { + if (CHSPEC_SB_UPPER(rxsts->chanspec)) + channel_flags |= IEEE80211_CHAN_HT40D; + else + channel_flags |= IEEE80211_CHAN_HT40U; + } else + channel_flags |= IEEE80211_CHAN_HT20; + + tail->xc.xchannel_flags = HTOL32(channel_flags); + tail->xc.xchannel_freq = (uint16)HTOL16(channel_frequency); + tail->xc.xchannel_channel = wf_chspec_ctlchan(rxsts->chanspec); + tail->xc.xchannel_maxpower = (17*2); + /* fill in A-mpdu Status */ + tail->ampdu.ref_num = mac_header->seq; + tail->ampdu.flags = ampdu_flags; + tail->ampdu.delimiter_crc = 0; + tail->ampdu.reserved = 0; + + if (rxsts->encoding == WL_RXS_ENCODING_HT) { + tail->u.ht.mcs_index = rxsts->mcs; + tail->u.ht.mcs_known = (IEEE80211_RADIOTAP_MCS_HAVE_BW | + IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_FEC | + IEEE80211_RADIOTAP_MCS_HAVE_FMT); + tail->u.ht.mcs_flags = 0; + + switch (rxsts->htflags & WL_RXS_HTF_BW_MASK) { + case WL_RXS_HTF_20L: + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20L; + break; + case WL_RXS_HTF_20U: + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20U; + break; + case WL_RXS_HTF_40: + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_40; + break; + default: + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20; + break; + } + + if (rxsts->htflags & WL_RXS_HTF_SGI) + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_SGI; + if (rxsts->preamble & WL_RXS_PREAMBLE_HT_GF) + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_FMT_GF; + if (rxsts->htflags & WL_RXS_HTF_LDPC) + tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; + } else if (rxsts->encoding == WL_RXS_ENCODING_VHT) { + tail->u.vht.vht_known = (IEEE80211_RADIOTAP_VHT_HAVE_STBC | + IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS | + IEEE80211_RADIOTAP_VHT_HAVE_GI | + IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA | + IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA | + IEEE80211_RADIOTAP_VHT_HAVE_BF | + IEEE80211_RADIOTAP_VHT_HAVE_BW | + IEEE80211_RADIOTAP_VHT_HAVE_GID | + IEEE80211_RADIOTAP_VHT_HAVE_PAID); + + tail->u.vht.vht_flags = (uint8)HTOL16(rxsts->vhtflags); + + switch (rxsts->bw) { + case WL_RXS_VHT_BW_20: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20; + break; + case WL_RXS_VHT_BW_40: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_40; + break; + case WL_RXS_VHT_BW_20L: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20L; + break; + case WL_RXS_VHT_BW_20U: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20U; + break; + case WL_RXS_VHT_BW_80: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_80; + break; + case WL_RXS_VHT_BW_40L: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_40L; + break; + case WL_RXS_VHT_BW_40U: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_40U; + break; + case WL_RXS_VHT_BW_20LL: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20LL; + break; + case WL_RXS_VHT_BW_20LU: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20LU; + break; + case WL_RXS_VHT_BW_20UL: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20UL; + break; + case WL_RXS_VHT_BW_20UU: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20UU; + break; + default: + tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20; + break; + } + + tail->u.vht.vht_mcs_nss[0] = (rxsts->mcs << 4) | + (rxsts->nss & IEEE80211_RADIOTAP_VHT_NSS); + tail->u.vht.vht_mcs_nss[1] = 0; + tail->u.vht.vht_mcs_nss[2] = 0; + tail->u.vht.vht_mcs_nss[3] = 0; + + tail->u.vht.vht_coding = rxsts->coding; + tail->u.vht.vht_group_id = rxsts->gid; + tail->u.vht.vht_partial_aid = HTOL16(rxsts->aid); + } + } else { + struct wl_radiotap_hdr *rtl = &bsd_header->hdr; + + /* + * Header length is complicated due to dynamic presence of signal and noise fields + * Start with length of wl_radiotap_legacy plus signal/noise/ant + */ + bsd_header_len = sizeof(struct wl_radiotap_hdr) + pad_len; + bzero((uint8 *)rtl, sizeof(*rtl)); + + rtl->ieee_radiotap.it_version = 0; + rtl->ieee_radiotap.it_pad = 0; + rtl->ieee_radiotap.it_len = (uint16)HTOL16(bsd_header_len); + rtl->ieee_radiotap.it_present = HTOL32(field_map); + + rtl->tsft = HTOL64((uint64)rxsts->mactime); + rtl->flags = flags; + rtl->u.rate = (uint8)rxsts->datarate; + rtl->channel_freq = (uint16)HTOL16(channel_frequency); + rtl->channel_flags = (uint16)HTOL16(channel_flags); + + /* add in signal/noise/ant */ + cp = bsd_header->pad; + if (rxsts->signal != 0) + *cp++ = (int8)rxsts->signal; + if (rxsts->noise != 0) + *cp++ = (int8)rxsts->noise; + *cp++ = (int8)rxsts->antenna; + } + return bsd_header_len; +} + +static int +wl_radiotap_rx_channel_frequency(wl_rxsts_t *rxsts) +{ + if (CHSPEC_IS2G(rxsts->chanspec)) { + return wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec), + WF_CHAN_FACTOR_2_4_G); + } else if (CHSPEC_IS5G(rxsts->chanspec)) { + return wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec), + WF_CHAN_FACTOR_5_G); + } else { + return wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec), + WF_CHAN_FACTOR_6_G); + } +} + +static uint16 +wl_radiotap_rx_channel_flags(wl_rxsts_t *rxsts) +{ + if (CHSPEC_IS2G(rxsts->chanspec)) { + return (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN); + } else if (CHSPEC_IS5G(rxsts->chanspec)) { + return (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM); + } else { + return (IEEE80211_CHAN_OFDM); + } +} + +static uint8 +wl_radiotap_rx_flags(struct dot11_header *mac_header, wl_rxsts_t *rxsts) +{ + uint8 flags; + uint16 fc; + + fc = ltoh16(mac_header->fc); + + flags = IEEE80211_RADIOTAP_F_FCS; + + if (rxsts->preamble == WL_RXS_PREAMBLE_SHORT) + flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + if (fc & FC_WEP) + flags |= IEEE80211_RADIOTAP_F_WEP; + + if (fc & FC_MOREFRAG) + flags |= IEEE80211_RADIOTAP_F_FRAG; + + return flags; +} + +uint +wl_radiotap_rx_legacy(struct dot11_header *mac_header, + wl_rxsts_t *rxsts, ieee80211_radiotap_header_t *rtap_hdr) +{ + int channel_frequency; + uint16 channel_flags; + uint8 flags; + uint16 rtap_len = LTOH16(rtap_hdr->it_len); + wl_radiotap_legacy_t *rtl = (wl_radiotap_legacy_t *)((uint8*)rtap_hdr + rtap_len); + + rtap_len += sizeof(wl_radiotap_legacy_t); + rtap_hdr->it_len = HTOL16(rtap_len); + rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_LEGACY); + + channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts); + channel_flags = wl_radiotap_rx_channel_flags(rxsts); + flags = wl_radiotap_rx_flags(mac_header, rxsts); + + rtl->basic.tsft_l = HTOL32(rxsts->mactime); + rtl->basic.tsft_h = 0; + rtl->basic.flags = flags; + rtl->basic.rate = (uint8)rxsts->datarate; + rtl->basic.channel_freq = (uint16)HTOL16(channel_frequency); + rtl->basic.channel_flags = HTOL16(channel_flags); + rtl->basic.signal = (int8)rxsts->signal; + rtl->basic.noise = (int8)rxsts->noise; + rtl->basic.antenna = (int8)rxsts->antenna; + + return 0; +} + +uint +wl_radiotap_rx_ht(struct dot11_header *mac_header, + wl_rxsts_t *rxsts, ieee80211_radiotap_header_t *rtap_hdr) +{ + int channel_frequency; + uint16 channel_flags; + uint32 xchannel_flags; + uint8 flags; + + uint16 rtap_len = LTOH16(rtap_hdr->it_len); + wl_radiotap_ht_t *rtht = (wl_radiotap_ht_t *)((uint8*)rtap_hdr + rtap_len); + + rtap_len += sizeof(wl_radiotap_ht_t); + rtap_hdr->it_len = HTOL16(rtap_len); + rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_HT); + + channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts); + channel_flags = wl_radiotap_rx_channel_flags(rxsts); + flags = wl_radiotap_rx_flags(mac_header, rxsts); + + rtht->basic.tsft_l = HTOL32(rxsts->mactime); + rtht->basic.tsft_h = 0; + rtht->basic.flags = flags; + rtht->basic.channel_freq = (uint16)HTOL16(channel_frequency); + rtht->basic.channel_flags = HTOL16(channel_flags); + rtht->basic.signal = (int8)rxsts->signal; + rtht->basic.noise = (int8)rxsts->noise; + rtht->basic.antenna = (uint8)rxsts->antenna; + + /* xchannel */ + xchannel_flags = (uint32)channel_flags; + if (CHSPEC_IS40(rxsts->chanspec)) { + if (CHSPEC_SB_UPPER(rxsts->chanspec)) + xchannel_flags |= IEEE80211_CHAN_HT40D; + else { + xchannel_flags |= IEEE80211_CHAN_HT40U; + } + } else { + xchannel_flags |= IEEE80211_CHAN_HT20; + } + + rtht->xchannel_flags = HTOL32(xchannel_flags); + rtht->xchannel_freq = (uint16)HTOL16(channel_frequency); + rtht->xchannel_channel = wf_chspec_ctlchan(rxsts->chanspec); + rtht->xchannel_maxpower = (17*2); + + /* add standard MCS */ + rtht->mcs_known = (IEEE80211_RADIOTAP_MCS_HAVE_BW | + IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_FEC | + IEEE80211_RADIOTAP_MCS_HAVE_FMT); + + rtht->mcs_flags = 0; + switch (rxsts->htflags & WL_RXS_HTF_BW_MASK) { + case WL_RXS_HTF_20L: + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20L; + break; + case WL_RXS_HTF_20U: + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20U; + break; + case WL_RXS_HTF_40: + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_40; + break; + default: + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20; + } + + if (rxsts->htflags & WL_RXS_HTF_SGI) { + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_SGI; + } + if (rxsts->preamble & WL_RXS_PREAMBLE_HT_GF) { + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_FMT_GF; + } + if (rxsts->htflags & WL_RXS_HTF_LDPC) { + rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; + } + rtht->mcs_index = rxsts->mcs; + rtht->ampdu_flags = 0; + rtht->ampdu_delim_crc = 0; + + rtht->ampdu_ref_num = rxsts->ampdu_counter; + + if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) && + !(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) { + rtht->ampdu_flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; + } else { + rtht->ampdu_flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; + } + return 0; +} + +uint +wl_radiotap_rx_vht(struct dot11_header *mac_header, + wl_rxsts_t *rxsts, ieee80211_radiotap_header_t *rtap_hdr) +{ + int channel_frequency; + uint16 channel_flags; + uint8 flags; + + uint16 rtap_len = LTOH16(rtap_hdr->it_len); + wl_radiotap_vht_t *rtvht = (wl_radiotap_vht_t *)((uint8*)rtap_hdr + rtap_len); + + rtap_len += sizeof(wl_radiotap_vht_t); + rtap_hdr->it_len = HTOL16(rtap_len); + rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_VHT); + + channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts); + channel_flags = wl_radiotap_rx_channel_flags(rxsts); + flags = wl_radiotap_rx_flags(mac_header, rxsts); + + rtvht->basic.tsft_l = HTOL32(rxsts->mactime); + rtvht->basic.tsft_h = 0; + rtvht->basic.flags = flags; + rtvht->basic.channel_freq = (uint16)HTOL16(channel_frequency); + rtvht->basic.channel_flags = HTOL16(channel_flags); + rtvht->basic.signal = (int8)rxsts->signal; + rtvht->basic.noise = (int8)rxsts->noise; + rtvht->basic.antenna = (uint8)rxsts->antenna; + + rtvht->vht_known = (IEEE80211_RADIOTAP_VHT_HAVE_STBC | + IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS | + IEEE80211_RADIOTAP_VHT_HAVE_GI | + IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA | + IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA | + IEEE80211_RADIOTAP_VHT_HAVE_BF | + IEEE80211_RADIOTAP_VHT_HAVE_BW | + IEEE80211_RADIOTAP_VHT_HAVE_GID | + IEEE80211_RADIOTAP_VHT_HAVE_PAID); + + STATIC_ASSERT(WL_RXS_VHTF_STBC == + IEEE80211_RADIOTAP_VHT_STBC); + STATIC_ASSERT(WL_RXS_VHTF_TXOP_PS == + IEEE80211_RADIOTAP_VHT_TXOP_PS); + STATIC_ASSERT(WL_RXS_VHTF_SGI == + IEEE80211_RADIOTAP_VHT_SGI); + STATIC_ASSERT(WL_RXS_VHTF_SGI_NSYM_DA == + IEEE80211_RADIOTAP_VHT_SGI_NSYM_DA); + STATIC_ASSERT(WL_RXS_VHTF_LDPC_EXTRA == + IEEE80211_RADIOTAP_VHT_LDPC_EXTRA); + STATIC_ASSERT(WL_RXS_VHTF_BF == + IEEE80211_RADIOTAP_VHT_BF); + + rtvht->vht_flags = (uint8)HTOL16(rxsts->vhtflags); + + STATIC_ASSERT(WL_RXS_VHT_BW_20 == + IEEE80211_RADIOTAP_VHT_BW_20); + STATIC_ASSERT(WL_RXS_VHT_BW_40 == + IEEE80211_RADIOTAP_VHT_BW_40); + STATIC_ASSERT(WL_RXS_VHT_BW_20L == + IEEE80211_RADIOTAP_VHT_BW_20L); + STATIC_ASSERT(WL_RXS_VHT_BW_20U == + IEEE80211_RADIOTAP_VHT_BW_20U); + STATIC_ASSERT(WL_RXS_VHT_BW_80 == + IEEE80211_RADIOTAP_VHT_BW_80); + STATIC_ASSERT(WL_RXS_VHT_BW_40L == + IEEE80211_RADIOTAP_VHT_BW_40L); + STATIC_ASSERT(WL_RXS_VHT_BW_40U == + IEEE80211_RADIOTAP_VHT_BW_40U); + STATIC_ASSERT(WL_RXS_VHT_BW_20LL == + IEEE80211_RADIOTAP_VHT_BW_20LL); + STATIC_ASSERT(WL_RXS_VHT_BW_20LU == + IEEE80211_RADIOTAP_VHT_BW_20LU); + STATIC_ASSERT(WL_RXS_VHT_BW_20UL == + IEEE80211_RADIOTAP_VHT_BW_20UL); + STATIC_ASSERT(WL_RXS_VHT_BW_20UU == + IEEE80211_RADIOTAP_VHT_BW_20UU); + + rtvht->vht_bw = rxsts->bw; + + rtvht->vht_mcs_nss[0] = (rxsts->mcs << 4) | + (rxsts->nss & IEEE80211_RADIOTAP_VHT_NSS); + rtvht->vht_mcs_nss[1] = 0; + rtvht->vht_mcs_nss[2] = 0; + rtvht->vht_mcs_nss[3] = 0; + + STATIC_ASSERT(WL_RXS_VHTF_CODING_LDCP == + IEEE80211_RADIOTAP_VHT_CODING_LDPC); + + rtvht->vht_coding = rxsts->coding; + rtvht->vht_group_id = rxsts->gid; + rtvht->vht_partial_aid = HTOL16(rxsts->aid); + + rtvht->ampdu_flags = 0; + rtvht->ampdu_delim_crc = 0; + rtvht->ampdu_ref_num = HTOL32(rxsts->ampdu_counter); + if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) && + !(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) { + rtvht->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_IS_LAST); + } else { + rtvht->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN); + } + + return 0; +} + +/* Rx status to radiotap conversion of HE type */ +uint +wl_radiotap_rx_he(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t *rtap_hdr) +{ + int channel_frequency; + uint16 channel_flags; + uint8 flags; + uint16 rtap_len = LTOH16(rtap_hdr->it_len); + wl_radiotap_he_t *rthe = (wl_radiotap_he_t *)((uint8*)rtap_hdr + rtap_len); + + rtap_len += sizeof(wl_radiotap_he_t); + rtap_hdr->it_len = HTOL16(rtap_len); + rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_HE); + + channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts); + channel_flags = wl_radiotap_rx_channel_flags(rxsts); + flags = wl_radiotap_rx_flags(mac_header, rxsts); + + rthe->basic.tsft_l = HTOL32(rxsts->mactime); + rthe->basic.tsft_h = 0; + rthe->basic.flags = flags; + rthe->basic.channel_freq = (uint16)HTOL16(channel_frequency); + rthe->basic.channel_flags = HTOL16(channel_flags); + rthe->basic.signal = (int8)rxsts->signal; + rthe->basic.noise = (int8)rxsts->noise; + rthe->basic.antenna = (uint8)rxsts->antenna; + + rthe->ampdu_flags = 0; + rthe->ampdu_delim_crc = 0; + rthe->ampdu_ref_num = HTOL32(rxsts->ampdu_counter); + if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) && + !(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) { + rthe->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_IS_LAST); + } else { + rthe->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN); + } + + rthe->data1 = HTOL16(rxsts->data1); + rthe->data2 = HTOL16(rxsts->data2); + rthe->data3 = HTOL16(rxsts->data3); + rthe->data4 = HTOL16(rxsts->data4); + rthe->data5 = HTOL16(rxsts->data5); + rthe->data6 = HTOL16(rxsts->data6); + + return 0; +} + +/* Rx status to radiotap conversion of EHT type */ +uint +wl_radiotap_rx_eht(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t *rtap_hdr) +{ + ASSERT(!"wl_radiotap_rx_eht: not implemented!"); + return 0; +} + +uint16 +wl_rxsts_to_rtap(monitor_pkt_rxsts_t *pkt_rxsts, void *payload, + uint16 len, void* pout, uint16 pad_req) +{ + uint16 rtap_len = 0; + struct dot11_header* mac_header; + uint8* p = payload; + ieee80211_radiotap_header_t* rtap_hdr = (ieee80211_radiotap_header_t*)pout; + wl_rxsts_t* rxsts; + + ASSERT(p && pkt_rxsts); + rxsts = pkt_rxsts->rxsts; + rtap_hdr->it_version = 0; + rtap_hdr->it_pad = 0; + rtap_hdr->it_len = HTOL16(sizeof(*rtap_hdr)); + rtap_hdr->it_present = 0; + bitmap = 0; + +#ifdef MONITOR_DNGL_CONV + if (pad_req) { + radiotap_add_vendor_ns(rtap_hdr); + } +#endif + +#ifdef BCM_MON_QDBM_RSSI + /* if per-core RSSI is present, add vendor element */ + if (pkt_rxsts->corenum != 0) { + radiotap_add_vendor_ns(rtap_hdr); + } +#endif + mac_header = (struct dot11_header *)(p); + + if (rxsts->encoding == WL_RXS_ENCODING_EHT) { + wl_radiotap_rx_eht(mac_header, rxsts, rtap_hdr); + } else if (rxsts->encoding == WL_RXS_ENCODING_HE) { + wl_radiotap_rx_he(mac_header, rxsts, rtap_hdr); + } else if (rxsts->encoding == WL_RXS_ENCODING_VHT) { + wl_radiotap_rx_vht(mac_header, rxsts, rtap_hdr); + } else if (rxsts->encoding == WL_RXS_ENCODING_HT) { + wl_radiotap_rx_ht(mac_header, rxsts, rtap_hdr); + } else { + uint16 mask = ltoh16(mac_header->fc) & FC_KIND_MASK; + if (mask == FC_RTS || mask == FC_CTS) { + radiotap_add_vendor_ns(rtap_hdr); + } + wl_radiotap_rx_legacy(mac_header, rxsts, rtap_hdr); + if (mask == FC_RTS || mask == FC_CTS) { + radiotap_encode_bw_signaling(mask, rxsts, rtap_hdr); + } + } +#ifdef BCM_MON_QDBM_RSSI + /* if per-core RSSI is present, add vendor element */ + if (pkt_rxsts->corenum != 0) { + radiotap_encode_multi_rssi(pkt_rxsts, rtap_hdr); + } +#endif +#ifdef MONITOR_DNGL_CONV + if (pad_req) { + radiotap_encode_alignpad(rtap_hdr, pad_req); + } +#endif + rtap_len = LTOH16(rtap_hdr->it_len); + len += rtap_len; + +#ifndef MONITOR_DNGL_CONV + if (len > MAX_MON_PKT_SIZE) { + return 0; + } + /* copy payload */ + if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMSDU_FIRST) && + !(rxsts->nfrmtype & WL_RXS_NFRM_AMSDU_SUB)) { + memcpy((uint8*)pout + rtap_len, (uint8*)p, len - rtap_len); + } +#endif +#ifdef MONITOR_DNGL_CONV + return rtap_len; +#else + return len; +#endif +} + +#ifdef BCM_MON_QDBM_RSSI +void +radiotap_encode_multi_rssi(monitor_pkt_rxsts_t* rxsts, ieee80211_radiotap_header_t *hdr) +{ + uint16 cur_len = LTOH16(hdr->it_len); + uint16 len = ROUNDUP(1 + rxsts->corenum * sizeof(monitor_pkt_rssi_t), 4); + int i = 0; + uint8 *vend_p = (uint8 *)hdr + cur_len; + radiotap_vendor_ns_t *vendor_ns = (radiotap_vendor_ns_t*)vend_p; + memcpy(vendor_ns->vend_oui, brcm_oui, sizeof(vendor_ns->vend_oui)); + vendor_ns->sns = 1; + vendor_ns->skip_len = HTOL16(len); + vend_p += sizeof(*vendor_ns); + vend_p[0] = rxsts->corenum; + for (i = 0; i < rxsts->corenum; i++) { + vend_p[2*i + 1] = rxsts->rxpwr[i].dBm; + vend_p[2*i + 2] = rxsts->rxpwr[i].decidBm; + } + hdr->it_len = HTOL16(cur_len + sizeof(radiotap_vendor_ns_t) + len); +} +#endif /* BCM_CORE_RSSI */ + +#ifdef MONITOR_DNGL_CONV +#define AILIGN_4BYTES (4u) +void +radiotap_encode_alignpad(ieee80211_radiotap_header_t *hdr, uint16 pad_req) +{ + uint16 cur_len = LTOH16(hdr->it_len); + uint8 *vend_p = (uint8 *)hdr + cur_len; + radiotap_vendor_ns_t *vendor_ns = (radiotap_vendor_ns_t*)vend_p; + uint16 len; + uint16 align_pad = 0; + + memcpy(vendor_ns->vend_oui, brcm_oui, sizeof(vendor_ns->vend_oui)); + vendor_ns->sns = WL_RADIOTAP_BRCM_PAD_SNS; + len = cur_len + sizeof(radiotap_vendor_ns_t); + if (len % AILIGN_4BYTES != 0) { + align_pad = (AILIGN_4BYTES - (len % AILIGN_4BYTES)); + } + hdr->it_len = HTOL16(len + pad_req + align_pad); + vendor_ns->skip_len = HTOL16(pad_req + align_pad); +} +#endif /* MONITOR_DNGL_CONV */ + +void +radiotap_encode_bw_signaling(uint16 mask, + struct wl_rxsts* rxsts, ieee80211_radiotap_header_t *hdr) +{ + uint16 cur_len = LTOH16(hdr->it_len); + uint8 *vend_p = (uint8 *)hdr + cur_len; + radiotap_vendor_ns_t *vendor_ns = (radiotap_vendor_ns_t *)vend_p; + wl_radiotap_nonht_vht_t* nonht_vht; + + memcpy(vendor_ns->vend_oui, brcm_oui, sizeof(vendor_ns->vend_oui)); + vendor_ns->sns = 0; + vendor_ns->skip_len = sizeof(wl_radiotap_nonht_vht_t); + nonht_vht = (wl_radiotap_nonht_vht_t *)(vend_p + sizeof(*vendor_ns)); + + /* VHT b/w signalling */ + bzero((uint8 *)nonht_vht, sizeof(wl_radiotap_nonht_vht_t)); + nonht_vht->len = WL_RADIOTAP_NONHT_VHT_LEN; + nonht_vht->flags |= WL_RADIOTAP_F_NONHT_VHT_BW; + nonht_vht->bw = (uint8)rxsts->bw_nonht; + + if (mask == FC_RTS) { + if (rxsts->vhtflags & WL_RXS_VHTF_DYN_BW_NONHT) { + nonht_vht->flags |= WL_RADIOTAP_F_NONHT_VHT_DYN_BW; + } + } + hdr->it_len = HTOL16(cur_len + sizeof(radiotap_vendor_ns_t) + + sizeof(wl_radiotap_nonht_vht_t)); +} + +void +radiotap_add_vendor_ns(ieee80211_radiotap_header_t *hdr) +{ + + uint32 * it_present = &hdr->it_present; + uint16 len = LTOH16(hdr->it_len); + + /* if the last bitmap has a vendor ns, add a new one */ + if (it_present[bitmap] & (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) { + it_present[bitmap] |= 1 << IEEE80211_RADIOTAP_EXT; + bitmap++; + /* align to 8 bytes */ + if (bitmap%2) { + hdr->it_len = HTOL16(len + 8); + } + it_present[bitmap] = 1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE; + } else { + it_present[bitmap] |= 1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE; + } +} diff --git a/bcmdhd.101.10.361.x/bcmwifi_rates.c b/bcmdhd.101.10.361.x/bcmwifi_rates.c new file mode 100755 index 0000000..c5cebbf --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmwifi_rates.c @@ -0,0 +1,607 @@ +/* + * Common [OS-independent] rate management + * 802.11 Networking Adapter Device Driver. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#include +#ifdef BCMDRIVER +#include +#else +#include +#ifndef ASSERT +#define ASSERT(e) assert(e) +#endif +#ifndef ASSERT_FP +#define ASSERT_FP(e) assert(e) +#endif +#endif /* BCMDRIVER */ +#include <802.11.h> +#include <802.11ax.h> +#include + +#include +#include + +/* TODO: Consolidate rate utility functions from wlc_rate.c and bcmwifi_monitor.c + * into here if they're shared by non wl layer as well... + */ + +/* ============================================ */ +/* Moved from wlc_rate.c */ +/* ============================================ */ + +/* HE mcs info */ +struct ieee_80211_mcs_rate_info { + uint8 constellation_bits; + uint8 coding_q; + uint8 coding_d; + uint8 dcm_capable; /* 1 if dcm capable */ +}; + +static const struct ieee_80211_mcs_rate_info wlc_mcs_info[] = { + { 1, 1, 2, 1 }, /* MCS 0: MOD: BPSK, CR 1/2, dcm capable */ + { 2, 1, 2, 1 }, /* MCS 1: MOD: QPSK, CR 1/2, dcm capable */ + { 2, 3, 4, 0 }, /* MCS 2: MOD: QPSK, CR 3/4, NOT dcm capable */ + { 4, 1, 2, 1 }, /* MCS 3: MOD: 16QAM, CR 1/2, dcm capable */ + { 4, 3, 4, 1 }, /* MCS 4: MOD: 16QAM, CR 3/4, dcm capable */ + { 6, 2, 3, 0 }, /* MCS 5: MOD: 64QAM, CR 2/3, NOT dcm capable */ + { 6, 3, 4, 0 }, /* MCS 6: MOD: 64QAM, CR 3/4, NOT dcm capable */ + { 6, 5, 6, 0 }, /* MCS 7: MOD: 64QAM, CR 5/6, NOT dcm capable */ + { 8, 3, 4, 0 }, /* MCS 8: MOD: 256QAM, CR 3/4, NOT dcm capable */ + { 8, 5, 6, 0 }, /* MCS 9: MOD: 256QAM, CR 5/6, NOT dcm capable */ + { 10, 3, 4, 0 }, /* MCS 10: MOD: 1024QAM, CR 3/4, NOT dcm capable */ + { 10, 5, 6, 0 }, /* MCS 11: MOD: 1024QAM, CR 5/6, NOT dcm capable */ +#ifdef WL11BE + /* TODO: for now EHT shares this table with HE, + * create a new table if needed once we know more + * about EHT rate calculation... + */ + { 12, 3, 4, 0 }, /* MCS 12: MOD: 4096QAM, CR 3/4, NOT dcm capable */ + { 12, 5, 6, 0 }, /* MCS 13: MOD: 4096QAM, CR 5/6, NOT dcm capable */ +#endif +}; + +/* Nsd values Draft0.4 Table 26.63 onwards */ +static const uint wlc_he_nsd[] = { + 234, /* BW20 */ + 468, /* BW40 */ + 980, /* BW80 */ + 1960, /* BW160 */ +#ifdef WL11BE + /* TODO: for now EHT shares this table with HE, + * create a new table if needed once we know more + * about EHT rate calculation... + */ + 2940, /* BW240 */ + 3920 /* BW320 */ +#endif +}; + +/* Nsd values Draft3.3 Table 28-15 */ +static const uint wlc_he_ru_nsd[] = { + 24, /* 26T */ + 48, /* 52T */ + 102, /* 106T */ + 234, /* 242T/BW20 */ + 468, /* 484T/BW40 */ + 980, /* 996T/BW80 */ + 1960, /* 2*996T/BW160 */ +#ifdef WL11BE + /* TODO: for now EHT shares this table with HE, + * create a new table if needed once we know more + * about EHT rate calculation... + */ + 2940, /* 3*996T/BW240 */ + 3920 /* 4*996T/BW320 */ +#endif +}; + +#define HE_RU_TO_NSD(ru_idx) \ + (ru_idx < ARRAYSIZE(wlc_he_ru_nsd)) ? \ + wlc_he_ru_nsd[ru_idx] : 0 + +/* sym_len = 12.8 us. For calculation purpose, *10 */ +#define HE_SYM_LEN_FACTOR (128) + +/* GI values = 0.8 , 1.6 or 3.2 us. For calculation purpose, *10 */ +#define HE_GI_800us_FACTOR (8) +#define HE_GI_1600us_FACTOR (16) +#define HE_GI_3200us_FACTOR (32) + +/* To avoid ROM invalidation use the old macro as is... */ +#ifdef WL11BE +#define HE_BW_TO_NSD(bwi) \ + ((bwi) > 0u && (bwi) <= ARRAYSIZE(wlc_he_nsd)) ? \ + wlc_he_nsd[(bwi) - 1u] : 0u +#else +#define HE_BW_TO_NSD(bwi) \ + ((bwi) > 0 && ((bwi) << WL_RSPEC_BW_SHIFT) <= WL_RSPEC_BW_160MHZ) ? \ + wlc_he_nsd[(bwi)-1] : 0 +#endif /* WL11BE */ + +#define ksps 250 /* kilo symbols per sec, 4 us sym */ + +#ifdef WL11BE +/* Table "wlc_nsd" is derived from HT and VHT #defines below, but extended for HE + * for rate calculation purpose at a given NSS and bandwidth combination. + * + * It should and can only be used in where it wants to know the relative rate in kbps + * for a different NSS and bandwidth combination at a given mcs e.g. in fallback rate + * search. It shouldn not and can not be used in where it calculates the absolute rate + * i.e. the result doesn't agree with what the spec says otherwise. + * + * See Std 802.11-2016 "Table 21-61 VHT-MCSs for optional 160 MHz and 80+80 MHz, NSS = 8" + * for VHT, and P802.11ax/D6.0 "Table 27-111 HE-MCSs for 2x996-tone RU, NSS = 8" for HE, + * for 160Mhz bandwidth for resulting rate comparison. + * + * It's again extended for EHT 240/320Mhz bandwidth, for the same purpose. + */ +static const uint16 wlc_nsd[] = { + 52, /* 20MHz */ + 108, /* 40MHz */ + 234, /* 80Mhz */ + 468, /* 160MHz */ + 702, /* 240MHz */ + 936, /* 320MHz */ +}; + +#define BW_TO_NSD(bwi) \ + ((bwi) > 0u && (bwi) <= ARRAYSIZE(wlc_nsd)) ? \ + wlc_nsd[(bwi) - 1u] : 0u + +static uint +wf_nsd2ndbps(uint mcs, uint nss, uint nsd, bool dcm) +{ + uint Ndbps; + + /* multiply number of spatial streams, + * bits per number from the constellation, + * and coding quotient + */ + Ndbps = nsd * nss * + wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits; + + /* adjust for the coding rate divisor */ + Ndbps = Ndbps / wlc_mcs_info[mcs].coding_d; + + /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */ + if (dcm) { + if (wlc_mcs_info[mcs].dcm_capable) { + Ndbps >>= 1u; + } + } + + return Ndbps; +} +#else +/* for HT and VHT? */ +#define Nsd_20MHz 52 +#define Nsd_40MHz 108 +#define Nsd_80MHz 234 +#define Nsd_160MHz 468 +#endif /* WL11BE */ + +uint +wf_he_mcs_to_Ndbps(uint mcs, uint nss, uint bw, bool dcm) +{ + uint Nsd; + uint Ndbps; + + /* find the number of complex numbers per symbol */ + Nsd = HE_BW_TO_NSD(bw >> WL_RSPEC_BW_SHIFT); + +#ifdef WL11BE + Ndbps = wf_nsd2ndbps(mcs, nss, Nsd, dcm); +#else + /* multiply number of spatial streams, + * bits per number from the constellation, + * and coding quotient + */ + Ndbps = Nsd * nss * + wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits; + + /* adjust for the coding rate divisor */ + Ndbps = Ndbps / wlc_mcs_info[mcs].coding_d; + + /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */ + if (dcm) { + if (wlc_mcs_info[mcs].dcm_capable) { + Ndbps >>= 1; + } + } +#endif /* WL11BE */ + + return Ndbps; +} + +uint32 +wf_he_mcs_ru_to_ndbps(uint8 mcs, uint8 nss, bool dcm, uint8 ru_index) +{ + uint32 nsd; + uint32 ndbps; + + /* find the number of complex numbers per symbol */ + nsd = HE_RU_TO_NSD(ru_index); + +#ifdef WL11BE + ndbps = wf_nsd2ndbps(mcs, nss, nsd, dcm); +#else + /* multiply number of spatial streams, + * bits per number from the constellation, + * and coding quotient + * Ndbps = Nss x Nsd x (Nbpscs x R) x (DCM/2) + */ + ndbps = nsd * nss * + wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits; + + /* adjust for the coding rate divisor */ + ndbps = ndbps / wlc_mcs_info[mcs].coding_d; + + /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */ + if (dcm && wlc_mcs_info[mcs].dcm_capable) { + ndbps >>= 1; + } +#endif /* WL11BE */ + return ndbps; +} + +/** + * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi/dcm combination. + * 'mcs' : a *single* spatial stream MCS (11ax) + * formula as per http: + * WLAN&preview=/323036249/344457953/11ax_rate_table.xlsx + * Symbol length = 12.8 usec [given as sym_len/10 below] + * GI value = 0.8 or 1.6 or 3.2 usec [given as GI_value/10 below] + * rate (Kbps) = (Nsd * Nbpscs * nss * (coding_q/coding_d) * 1000) / ((sym_len/10) + (GI_value/10)) + * Note that, for calculation purpose, following is used. [to be careful with overflows] + * rate (Kbps) = (Nsd * Nbpscs * nss * (coding_q/coding_d) * 1000) / ((sym_len + GI_value) / 10) + * rate (Kbps) = (Nsd * Nbpscs * nss * (coding_q/coding_d) * 1000) / (sym_len + GI_value) * 10 + */ +uint +wf_he_mcs_to_rate(uint mcs, uint nss, uint bw, uint gi, bool dcm) +{ + uint rate; + uint rate_deno; + + rate = HE_BW_TO_NSD(bw >> WL_RSPEC_BW_SHIFT); + +#ifdef WL11BE + rate = wf_nsd2ndbps(mcs, nss, rate, dcm); +#else + /* Nbpscs: multiply by bits per number from the constellation in use */ + rate = rate * wlc_mcs_info[mcs].constellation_bits; + + /* Nss: adjust for the number of spatial streams */ + rate = rate * nss; + + /* R: adjust for the coding rate given as a quotient and divisor */ + rate = (rate * wlc_mcs_info[mcs].coding_q) / wlc_mcs_info[mcs].coding_d; + + /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */ + if (dcm) { + if (wlc_mcs_info[mcs].dcm_capable) { + rate >>= 1; + } + } +#endif /* WL11BE */ + + /* add sym len factor */ + rate_deno = HE_SYM_LEN_FACTOR; + + /* get GI for denominator */ + if (HE_IS_GI_3_2us(gi)) { + rate_deno += HE_GI_3200us_FACTOR; + } else if (HE_IS_GI_1_6us(gi)) { + rate_deno += HE_GI_1600us_FACTOR; + } else { + /* assuming HE_GI_0_8us */ + rate_deno += HE_GI_800us_FACTOR; + } + + /* as per above formula */ + rate *= 1000; /* factor of 10. *100 to accommodate 2 places */ + rate /= rate_deno; + rate *= 10; /* *100 was already done above. Splitting is done to avoid overflow. */ + + return rate; +} + +uint +wf_mcs_to_Ndbps(uint mcs, uint nss, uint bw) +{ + uint Nsd; + uint Ndbps; + + /* This calculation works for 11n HT and 11ac VHT if the HT mcs values + * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8. + * That is, HT MCS 23 is a base MCS = 7, Nss = 3 + */ + + /* find the number of complex numbers per symbol */ +#ifdef WL11BE + Nsd = BW_TO_NSD(bw >> WL_RSPEC_BW_SHIFT); + + Ndbps = wf_nsd2ndbps(mcs, nss, Nsd, FALSE); +#else + if (bw == WL_RSPEC_BW_20MHZ) { + Nsd = Nsd_20MHz; + } else if (bw == WL_RSPEC_BW_40MHZ) { + Nsd = Nsd_40MHz; + } else if (bw == WL_RSPEC_BW_80MHZ) { + Nsd = Nsd_80MHz; + } else if (bw == WL_RSPEC_BW_160MHZ) { + Nsd = Nsd_160MHz; + } else { + Nsd = 0; + } + + /* multiply number of spatial streams, + * bits per number from the constellation, + * and coding quotient + */ + Ndbps = Nsd * nss * + wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits; + + /* adjust for the coding rate divisor */ + Ndbps = Ndbps / wlc_mcs_info[mcs].coding_d; +#endif /* WL11BE */ + + return Ndbps; +} + +/** + * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination. + * 'mcs' : a *single* spatial stream MCS (11n or 11ac) + */ +uint +wf_mcs_to_rate(uint mcs, uint nss, uint bw, int sgi) +{ + uint rate; + + if (mcs == 32) { + /* just return fixed values for mcs32 instead of trying to parametrize */ + rate = (sgi == 0) ? 6000 : 6778; + } else { + /* This calculation works for 11n HT, 11ac VHT and 11ax HE if the HT mcs values + * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8. + * That is, HT MCS 23 is a base MCS = 7, Nss = 3 + */ + +#if defined(WLPROPRIETARY_11N_RATES) + switch (mcs) { + case 87: + mcs = 8; /* MCS 8: MOD: 256QAM, CR 3/4 */ + break; + case 88: + mcs = 9; /* MCS 9: MOD: 256QAM, CR 5/6 */ + break; + default: + break; + } +#endif /* WLPROPRIETARY_11N_RATES */ + +#ifdef WL11BE + rate = wf_mcs_to_Ndbps(mcs, nss, bw); +#else + /* find the number of complex numbers per symbol */ + if (RSPEC_IS20MHZ(bw)) { + /* 4360 TODO: eliminate Phy const in rspec bw, then just compare + * as in 80 and 160 case below instead of RSPEC_IS20MHZ(bw) + */ + rate = Nsd_20MHz; + } else if (RSPEC_IS40MHZ(bw)) { + /* 4360 TODO: eliminate Phy const in rspec bw, then just compare + * as in 80 and 160 case below instead of RSPEC_IS40MHZ(bw) + */ + rate = Nsd_40MHz; + } else if (bw == WL_RSPEC_BW_80MHZ) { + rate = Nsd_80MHz; + } else if (bw == WL_RSPEC_BW_160MHZ) { + rate = Nsd_160MHz; + } else { + rate = 0; + } + + /* multiply by bits per number from the constellation in use */ + rate = rate * wlc_mcs_info[mcs].constellation_bits; + + /* adjust for the number of spatial streams */ + rate = rate * nss; + + /* adjust for the coding rate given as a quotient and divisor */ + rate = (rate * wlc_mcs_info[mcs].coding_q) / wlc_mcs_info[mcs].coding_d; +#endif /* WL11BE */ + + /* multiply by Kilo symbols per sec to get Kbps */ + rate = rate * ksps; + + /* adjust the symbols per sec for SGI + * symbol duration is 4 us without SGI, and 3.6 us with SGI, + * so ratio is 10 / 9 + */ + if (sgi) { + /* add 4 for rounding of division by 9 */ + rate = ((rate * 10) + 4) / 9; + } + } + + return rate; +} /* wf_mcs_to_rate */ + +/* This function needs update to handle MU frame PLCP as well (MCS is conveyed via VHT-SIGB + * field in case of MU frames). Currently this support needs to be added in uCode to communicate + * MCS information for an MU frame + * + * For VHT frame: + * bit 0-3 mcs index + * bit 6-4 nsts for VHT + * bit 7: 1 for VHT + * Note: bit 7 is used to indicate to the rate sel the mcs is a non HT mcs! + * + * Essentially it's the NSS:MCS portions of the rspec + */ +uint8 +wf_vht_plcp_to_rate(uint8 *plcp) +{ + uint8 rate, gid; + uint nss; + uint32 plcp0 = plcp[0] + (plcp[1] << 8); /* don't need plcp[2] */ + + gid = (plcp0 & VHT_SIGA1_GID_MASK) >> VHT_SIGA1_GID_SHIFT; + if (gid > VHT_SIGA1_GID_TO_AP && gid < VHT_SIGA1_GID_NOT_TO_AP) { + /* for MU packet we hacked Signal Tail field in VHT-SIG-A2 to save nss and mcs, + * copy from murate in d11 rx header. + * nss = bit 18:19 (for 11ac 2 bits to indicate maximum 4 nss) + * mcs = 20:23 + */ + rate = (plcp[5] & 0xF0) >> 4; + nss = ((plcp[5] & 0x0C) >> 2) + 1; + } else { + rate = (plcp[3] >> VHT_SIGA2_MCS_SHIFT); + nss = ((plcp0 & VHT_SIGA1_NSTS_SHIFT_MASK_USER0) >> + VHT_SIGA1_NSTS_SHIFT) + 1; + if (plcp0 & VHT_SIGA1_STBC) + nss = nss >> 1; + } + rate |= ((nss << WL_RSPEC_VHT_NSS_SHIFT) | WF_NON_HT_MCS); + + return rate; +} + +/** + * Function for computing NSS:MCS from HE SU PLCP or + * MCS:LTF-GI from HE MU PLCP + * + * based on rev3.10 : + * https://docs.google.com/spreadsheets/d/ + * 1eP6ZCRrtnF924ds1R-XmbcH0IdQ0WNJpS1-FHmWeb9g/edit#gid=1492656555 + * + * For HE SU frame: + * bit 0-3 mcs index + * bit 6-4 nsts for HE + * bit 7: 1 for HE + * Note: bit 7 is used to indicate to the rate sel the mcs is a non HT mcs! + * Essentially it's the NSS:MCS portions of the rspec + * + * For HE MU frame: + * bit 0-3 mcs index + * bit 4-5 LTF-GI value + * bit 6 STBC + * Essentially it's the MCS and LTF-GI portion of the rspec + */ +/* Macros to be used for calculating rate from PLCP */ +#define HE_SU_PLCP2RATE_MCS_MASK 0x0F +#define HE_SU_PLCP2RATE_MCS_SHIFT 0 +#define HE_SU_PLCP2RATE_NSS_MASK 0x70 +#define HE_SU_PLCP2RATE_NSS_SHIFT 4 +#define HE_MU_PLCP2RATE_LTF_GI_MASK 0x30 +#define HE_MU_PLCP2RATE_LTF_GI_SHIFT 4 +#define HE_MU_PLCP2RATE_STBC_MASK 0x40 +#define HE_MU_PLCP2RATE_STBC_SHIFT 6 + +uint8 +wf_he_plcp_to_rate(uint8 *plcp, bool is_mu) +{ + uint8 rate = 0; + uint8 nss = 0; + uint32 plcp0 = 0; + uint32 plcp1 = 0; + uint8 he_ltf_gi; + uint8 stbc; + + ASSERT(plcp); + + BCM_REFERENCE(nss); + BCM_REFERENCE(he_ltf_gi); + + plcp0 = ((plcp[3] << 24) | (plcp[2] << 16) | (plcp[1] << 8) | plcp[0]); + plcp1 = ((plcp[5] << 8) | plcp[4]); + + if (!is_mu) { + /* For SU frames return rate in MCS:NSS format */ + rate = ((plcp0 & HE_SU_RE_SIGA_MCS_MASK) >> HE_SU_RE_SIGA_MCS_SHIFT); + nss = ((plcp0 & HE_SU_RE_SIGA_NSTS_MASK) >> HE_SU_RE_SIGA_NSTS_SHIFT) + 1; + rate |= ((nss << HE_SU_PLCP2RATE_NSS_SHIFT) | WF_NON_HT_MCS); + } else { + /* For MU frames return rate in MCS:LTF-GI format */ + rate = (plcp0 & HE_MU_SIGA_SIGB_MCS_MASK) >> HE_MU_SIGA_SIGB_MCS_SHIFT; + he_ltf_gi = (plcp0 & HE_MU_SIGA_GI_LTF_MASK) >> HE_MU_SIGA_GI_LTF_SHIFT; + stbc = (plcp1 & HE_MU_SIGA_STBC_MASK) >> HE_MU_SIGA_STBC_SHIFT; + + /* LTF-GI shall take the same position as NSS */ + rate |= (he_ltf_gi << HE_MU_PLCP2RATE_LTF_GI_SHIFT); + + /* STBC needs to be filled in bit 6 */ + rate |= (stbc << HE_MU_PLCP2RATE_STBC_SHIFT); + } + + return rate; +} + +/** + * Function for computing NSS:MCS from EHT SU PLCP or + * MCS:LTF-GI from EHT MU PLCP + * + * TODO: add link to the HW spec. + * FIXME: do we really need to support mu? + */ +uint8 +wf_eht_plcp_to_rate(uint8 *plcp, bool is_mu) +{ + BCM_REFERENCE(plcp); + BCM_REFERENCE(is_mu); + ASSERT(!"wf_eht_plcp_to_rate: not implemented!"); + return 0; +} + +/* ============================================ */ +/* Moved from wlc_rate_def.c */ +/* ============================================ */ + +/** + * Some functions require a single stream MCS as an input parameter. Given an MCS, this function + * returns the single spatial stream MCS equivalent. + */ +uint8 +wf_get_single_stream_mcs(uint mcs) +{ + if (mcs < 32) { + return mcs % 8; + } + switch (mcs) { + case 32: + return 32; + case 87: + case 99: + case 101: + return 87; /* MCS 87: SS 1, MOD: 256QAM, CR 3/4 */ + default: + return 88; /* MCS 88: SS 1, MOD: 256QAM, CR 5/6 */ + } +} + +/* ============================================ */ +/* Moved from wlc_phy_iovar.c */ +/* ============================================ */ + +const uint8 plcp_ofdm_rate_tbl[] = { + DOT11_RATE_48M, /* 8: 48Mbps */ + DOT11_RATE_24M, /* 9: 24Mbps */ + DOT11_RATE_12M, /* A: 12Mbps */ + DOT11_RATE_6M, /* B: 6Mbps */ + DOT11_RATE_54M, /* C: 54Mbps */ + DOT11_RATE_36M, /* D: 36Mbps */ + DOT11_RATE_18M, /* E: 18Mbps */ + DOT11_RATE_9M /* F: 9Mbps */ +}; diff --git a/bcmdhd.101.10.361.x/bcmwifi_rspec.c b/bcmdhd.101.10.361.x/bcmwifi_rspec.c new file mode 100755 index 0000000..e3c5957 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmwifi_rspec.c @@ -0,0 +1,274 @@ +/* + * Common [OS-independent] rate management + * 802.11 Networking Adapter Device Driver. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#include +#include +#include +#include <802.11ax.h> + +#include +#include + +/* TODO: Consolidate rspec utility functions from wlc_rate.c and bcmwifi_monitor.c + * into here if they're shared by non wl layer as well... + */ + +/* ============================================ */ +/* Moved from wlc_rate.c */ +/* ============================================ */ + +/** + * Returns the rate in [Kbps] units. + */ +static uint +wf_he_rspec_to_rate(ratespec_t rspec, uint max_mcs, uint max_nss) +{ + uint mcs = (rspec & WL_RSPEC_HE_MCS_MASK); + uint nss = (rspec & WL_RSPEC_HE_NSS_MASK) >> WL_RSPEC_HE_NSS_SHIFT; + bool dcm = (rspec & WL_RSPEC_DCM) != 0; + uint bw = RSPEC_BW(rspec); + uint gi = RSPEC_HE_LTF_GI(rspec); + + ASSERT(mcs <= max_mcs); + ASSERT(nss <= max_nss); + + if (mcs > max_mcs) { + return 0; + } + BCM_REFERENCE(max_nss); + + return wf_he_mcs_to_rate(mcs, nss, bw, gi, dcm); +} /* wf_he_rspec_to_rate */ + +/* take a well formed ratespec_t arg and return phy rate in [Kbps] units. + * 'rsel' indicates if the call comes from rate selection. + */ +static uint +_wf_rspec_to_rate(ratespec_t rspec, bool rsel) +{ + uint rate = (uint)(-1); + + if (RSPEC_ISLEGACY(rspec)) { + rate = 500 * RSPEC2RATE(rspec); + } else if (RSPEC_ISHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_HT_MCS_MASK); + + ASSERT_FP(mcs <= 32 || IS_PROPRIETARY_11N_MCS(mcs)); + + if (mcs == 32) { + rate = wf_mcs_to_rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec)); + } else { +#if defined(WLPROPRIETARY_11N_RATES) + uint nss = GET_11N_MCS_NSS(mcs); + mcs = wf_get_single_stream_mcs(mcs); +#else /* this ifdef prevents ROM abandons */ + uint nss = 1 + (mcs / 8); + mcs = mcs % 8; +#endif /* WLPROPRIETARY_11N_RATES */ + + rate = wf_mcs_to_rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } + } else if (RSPEC_ISVHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK); + uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + + if (rsel) { + rate = wf_mcs_to_rate(mcs, nss, RSPEC_BW(rspec), 0); + } else { + ASSERT_FP(mcs <= WLC_MAX_VHT_MCS); + ASSERT_FP(nss <= 8); + + rate = wf_mcs_to_rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } + } else if (RSPEC_ISHE(rspec)) { + rate = wf_he_rspec_to_rate(rspec, WLC_MAX_HE_MCS, 8); + } else if (RSPEC_ISEHT(rspec)) { + rate = wf_he_rspec_to_rate(rspec, WLC_MAX_EHT_MCS, 16); + } else { + ASSERT(0); + } + + return (rate == 0) ? (uint)(-1) : rate; +} + +/* take a well formed ratespec_t 'rspec' and return phy rate in [Kbps] units */ +uint +wf_rspec_to_rate(ratespec_t rspec) +{ + return _wf_rspec_to_rate(rspec, FALSE); +} + +/* take a well formed ratespec_t 'rspec' and return phy rate in [Kbps] units, + * FOR RATE SELECTION ONLY, WHICH USES LEGACY, HT, AND VHT RATES, AND VHT MCS + * COULD BE BIGGER THAN WLC_MAX_VHT_MCS! + */ +uint +wf_rspec_to_rate_rsel(ratespec_t rspec) +{ + return _wf_rspec_to_rate(rspec, TRUE); +} + +#ifdef BCMDBG +/* Return the rate in 500Kbps units if the rspec is legacy rate, assert otherwise */ +uint +wf_rspec_to_rate_legacy(ratespec_t rspec) +{ + ASSERT(RSPEC_ISLEGACY(rspec)); + + return rspec & WL_RSPEC_LEGACY_RATE_MASK; +} +#endif + +/** + * Function for computing RSPEC from EHT PLCP + * + * TODO: add link to the HW spec. + */ +ratespec_t +wf_eht_plcp_to_rspec(uint8 *plcp) +{ + ASSERT(!"wf_eht_plcp_to_rspec: not implemented!"); + return 0; +} + +/** + * Function for computing RSPEC from HE PLCP + * + * based on rev3.10 : + * https://docs.google.com/spreadsheets/d/ + * 1eP6ZCRrtnF924ds1R-XmbcH0IdQ0WNJpS1-FHmWeb9g/edit#gid=1492656555 + */ +ratespec_t +wf_he_plcp_to_rspec(uint8 *plcp) +{ + uint8 rate; + uint8 nss; + uint8 bw; + uint8 gi; + ratespec_t rspec; + + /* HE plcp - 6 B */ + uint32 plcp0; + uint16 plcp1; + + ASSERT(plcp); + + plcp0 = ((plcp[3] << 24) | (plcp[2] << 16) | (plcp[1] << 8) | plcp[0]); + plcp1 = ((plcp[5] << 8) | plcp[4]); + + /* TBD: only SU supported now */ + rate = (plcp0 & HE_SU_RE_SIGA_MCS_MASK) >> HE_SU_RE_SIGA_MCS_SHIFT; + /* PLCP contains (NSTS - 1) while RSPEC stores NSTS */ + nss = ((plcp0 & HE_SU_RE_SIGA_NSTS_MASK) >> HE_SU_RE_SIGA_NSTS_SHIFT) + 1; + rspec = HE_RSPEC(rate, nss); + + /* GI info comes from CP/LTF */ + gi = (plcp0 & HE_SU_RE_SIGA_GI_LTF_MASK) >> HE_SU_RE_SIGA_GI_LTF_SHIFT; + rspec |= HE_GI_TO_RSPEC(gi); + + /* b19-b20 of plcp indicate bandwidth in the format (2-bit): + * 0 for 20M, 1 for 40M, 2 for 80M, and 3 for 80p80/160M + * SW store this BW in rspec format (3-bit): + * 1 for 20M, 2 for 40M, 3 for 80M, and 4 for 80p80/160M + */ + bw = ((plcp0 & HE_SU_SIGA_BW_MASK) >> HE_SU_SIGA_BW_SHIFT) + 1; + rspec |= (bw << WL_RSPEC_BW_SHIFT); + + if (plcp1 & HE_SU_RE_SIGA_BEAMFORM_MASK) + rspec |= WL_RSPEC_TXBF; + if (plcp1 & HE_SU_RE_SIGA_CODING_MASK) + rspec |= WL_RSPEC_LDPC; + if (plcp1 & HE_SU_RE_SIGA_STBC_MASK) + rspec |= WL_RSPEC_STBC; + if (plcp0 & HE_SU_RE_SIGA_DCM_MASK) + rspec |= WL_RSPEC_DCM; + + return rspec; +} + +ratespec_t +wf_vht_plcp_to_rspec(uint8 *plcp) +{ + uint8 rate; + uint vht_sig_a1, vht_sig_a2; + ratespec_t rspec; + + ASSERT(plcp); + + rate = wf_vht_plcp_to_rate(plcp) & ~WF_NON_HT_MCS; + + vht_sig_a1 = plcp[0] | (plcp[1] << 8); + vht_sig_a2 = plcp[3] | (plcp[4] << 8); + + rspec = VHT_RSPEC((rate & WL_RSPEC_VHT_MCS_MASK), + (rate >> WL_RSPEC_VHT_NSS_SHIFT)); +#if ((((VHT_SIGA1_20MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_20MHZ) || \ + (((VHT_SIGA1_40MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_40MHZ) || \ + (((VHT_SIGA1_80MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_80MHZ) || \ + (((VHT_SIGA1_160MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_160MHZ)) +#error "VHT SIGA BW mapping to RSPEC BW needs correction" +#endif + rspec |= ((vht_sig_a1 & VHT_SIGA1_160MHZ_VAL) + 1) << WL_RSPEC_BW_SHIFT; + if (vht_sig_a1 & VHT_SIGA1_STBC) + rspec |= WL_RSPEC_STBC; + if (vht_sig_a2 & VHT_SIGA2_GI_SHORT) + rspec |= WL_RSPEC_SGI; + if (vht_sig_a2 & VHT_SIGA2_CODING_LDPC) + rspec |= WL_RSPEC_LDPC; + + return rspec; +} + +ratespec_t +wf_ht_plcp_to_rspec(uint8 *plcp) +{ + return HT_RSPEC(plcp[0] & MIMO_PLCP_MCS_MASK); +} + +/* ============================================ */ +/* Moved from wlc_rate_def.c */ +/* ============================================ */ + +/** + * Rate info per rate: tells for *pre* 802.11n rates whether a given rate is OFDM or not and its + * phy_rate value. Table index is a rate in [500Kbps] units, from 0 to 54Mbps. + * Contents of a table element: + * d[7] : 1=OFDM rate, 0=DSSS/CCK rate + * d[3:0] if DSSS/CCK rate: + * index into the 'M_RATE_TABLE_B' table maintained by ucode in shm + * d[3:0] if OFDM rate: encode rate per 802.11a-1999 sec 17.3.4.1, with lsb transmitted first. + * index into the 'M_RATE_TABLE_A' table maintained by ucode in shm + */ +/* Note: make this table 128 elements so the result of (rspec & 0x7f) can be safely + * used as the index into this table... + */ +const uint8 rate_info[128] = { + /* 0 1 2 3 4 5 6 7 8 9 */ +/* 0 */ 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 10 */ 0x00, 0x37, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x00, +/* 20 */ 0x00, 0x00, 0x6e, 0x00, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00, +/* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x00, +/* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 70 */ 0x00, 0x00, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 90 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, +/* 100 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, +/* ------------- guard ------------ */ 0x00, +/* 110 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; diff --git a/bcmdhd.101.10.361.x/bcmwpa.c b/bcmdhd.101.10.361.x/bcmwpa.c new file mode 100755 index 0000000..62738c1 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmwpa.c @@ -0,0 +1,2648 @@ +/* + * bcmwpa.c - shared WPA-related functions + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +/* include wl driver config file if this file is compiled for driver */ +#ifdef BCMDRIVER +#include +/* HACK: this case for external supplicant use */ +#else +#include +#if defined(BCMEXTSUP) +#include +#else +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMEXTSUP */ +#endif /* BCMDRIVER */ + +#include +#include +#include <802.11.h> +#include +#include <802.11r.h> + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#ifdef WL_OCV +#include +#endif /* WL_OCV */ + +#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK) || \ + defined(WL_OKC) || defined(WLTDLS) || defined(GTKOE) || defined(WLHOSTFBT) +#ifdef WLHOSTFBT +#include +#endif +#endif /* defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK) || + * defined(WL_OKC) || defined(WLTDLS) || defined(GTKOE) || defined(WLHOSTFBT) + */ + +/* prefix strings */ +#define PMK_NAME_PFX "PMK Name" +#define FT_PTK_PFX "FT-PTK" +#define FT_R0_PFX "FT-R0" +#define FT_R0N_PFX "FT-R0N" +#define FT_R1_PFX "FT-R1" +#define FT_R1N_PFX "FT-R1N" +#define WPA_PTK_PFX "Pairwise key expansion" +#define TDLS_PMK_PFX "TDLS PMK" +/* end prefix strings */ + +#ifndef BIT +#define BIT(x) (1 << (x)) +#endif + +#define PRF_PREFIXES_NUM 5u + +typedef struct key_length_entry { + uint8 suite; + uint8 len; +} key_length_entry_t; + +/* EAPOL key(PMK/KCK/KEK/TK) length lookup tables */ +static const key_length_entry_t eapol_pmk_len[] = { + {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_PMK_SHA384_LEN}, + {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_PMK_SHA384_LEN}, + {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_PMK_SHA384_LEN}, + {0u, EAPOL_WPA_PMK_DEFAULT_LEN} /* default */ +}; + +static const key_length_entry_t eapol_kck_mic_len[] = { + {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_KCK_MIC_SHA384_LEN}, + {RSN_AKM_FILS_SHA256, 0u}, + {RSN_AKM_FILS_SHA384, 0u}, + {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_KCK_MIC_DEFAULT_LEN}, + {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_KCK2_SHA384_LEN}, + {RSN_AKM_OWE, EAPOL_WPA_KCK_MIC_DEFAULT_LEN}, + {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_KCK_MIC_SHA384_LEN}, + {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_KCK_MIC_SHA384_LEN}, + {0u, EAPOL_WPA_KCK_MIC_DEFAULT_LEN} /* default */ +}; + +static const key_length_entry_t eapol_kck_len[] = { + {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_KCK_SHA384_LEN}, + {RSN_AKM_FILS_SHA256, 0u}, + {RSN_AKM_FILS_SHA384, 0u}, + {RSN_AKM_FBT_SHA256_FILS, 0u}, + {RSN_AKM_FBT_SHA384_FILS, 0u}, + {RSN_AKM_OWE, EAPOL_WPA_KCK_DEFAULT_LEN}, + {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_KCK_SHA384_LEN}, + {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_KCK_SHA384_LEN}, + {0u, EAPOL_WPA_KCK_DEFAULT_LEN} /* default */ +}; + +static const key_length_entry_t eapol_kek_len[] = { + {RSN_AKM_FILS_SHA384, EAPOL_WPA_ENCR_KEY_MAX_LEN}, + {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_ENCR_KEY_MAX_LEN}, + {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2}, + {RSN_AKM_FILS_SHA256, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2}, + {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2}, + {RSN_AKM_OWE, EAPOL_WPA_ENCR_KEY_DEFAULT_LEN}, + {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2}, + {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2}, + {0u, EAPOL_WPA_ENCR_KEY_DEFAULT_LEN} /* default */ +}; + +static const key_length_entry_t eapol_tk_len[] = { + {WPA_CIPHER_CCMP_256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN}, + {WPA_CIPHER_AES_GCM256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN}, + {WPA_CIPHER_BIP_GMAC_256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN}, + {WPA_CIPHER_BIP_CMAC_256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN}, + {WPA_CIPHER_AES_CCM, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN / 2}, + {WPA_CIPHER_AES_GCM, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN / 2}, + {WPA_CIPHER_BIP_GMAC_128, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN / 2}, + {WPA_CIPHER_TKIP, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN}, + {0u, 0u} /* default */ +}; + +#if defined(WL_FILS) && defined(WLFBT) +static const key_length_entry_t eapol_kck2_len[] = { + {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_KCK2_SHA256_LEN}, + {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_KCK2_SHA384_LEN}, + {0u, 0u} /* default */ +}; + +static const key_length_entry_t eapol_kek2_len[] = { + {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_KEK2_SHA256_LEN}, + {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_KEK2_SHA384_LEN}, + {0u, 0u} /* default */ +}; +#endif /* WL_FILS && WLFBT */ + +typedef struct key_length_lookup { + const eapol_key_type_t key; + const key_length_entry_t *key_entry; +} key_length_lookup_t; + +static const key_length_lookup_t eapol_key_lookup_tbl[] = { + {EAPOL_KEY_PMK, eapol_pmk_len}, + {EAPOL_KEY_KCK_MIC, eapol_kck_mic_len}, + {EAPOL_KEY_KCK, eapol_kck_len}, + {EAPOL_KEY_KEK, eapol_kek_len}, + {EAPOL_KEY_TK, eapol_tk_len}, +#if defined(WL_FILS) && defined(WLFBT) + {EAPOL_KEY_KCK2, eapol_kck2_len}, + {EAPOL_KEY_KEK2, eapol_kek2_len}, +#endif /* WL_FILS && WLFBT */ +}; + +typedef struct rsn_akm_lookup_entry { + const rsn_akm_t rsn_akm; + const sha2_hash_type_t hash_type; +} rsn_akm_lookup_entry_t; + +static const rsn_akm_lookup_entry_t rsn_akm_lookup_tbl[] = { + {RSN_AKM_NONE, HASH_SHA1}, + {RSN_AKM_UNSPECIFIED, HASH_SHA1}, + {RSN_AKM_PSK, HASH_SHA1}, + {RSN_AKM_FBT_1X, HASH_SHA256}, + {RSN_AKM_FBT_PSK, HASH_SHA256}, + {RSN_AKM_MFP_1X, HASH_SHA256}, + {RSN_AKM_MFP_PSK, HASH_SHA256}, + {RSN_AKM_SHA256_1X, HASH_SHA256}, + {RSN_AKM_SHA256_PSK, HASH_SHA256}, + {RSN_AKM_TPK, HASH_SHA256}, + {RSN_AKM_SAE_PSK, HASH_SHA256}, + {RSN_AKM_SAE_FBT, HASH_SHA256}, + {RSN_AKM_SUITEB_SHA256_1X, HASH_SHA256}, + {RSN_AKM_SUITEB_SHA384_1X, HASH_SHA384}, + {RSN_AKM_FBT_SHA384_1X, HASH_SHA384}, + {RSN_AKM_FILS_SHA256, HASH_SHA256}, + {RSN_AKM_FILS_SHA384, HASH_SHA384}, + {RSN_AKM_FBT_SHA256_FILS, HASH_SHA256}, + {RSN_AKM_FBT_SHA384_FILS, HASH_SHA384}, + {RSN_AKM_OWE, HASH_SHA256}, + {RSN_AKM_FBT_SHA384_PSK, HASH_SHA384}, + {RSN_AKM_PSK_SHA384, HASH_SHA384}, +}; + +typedef struct rsn_akm_cipher_match_entry { + uint16 akm_type; + uint32 u_cast; /* BITMAP */ + uint32 m_cast; /* BITMAP */ + uint32 g_mgmt; /* BITMAP */ +} rsn_akm_cipher_match_entry_t; + +/* list only explicit cipher restriction for given AKM (e.g SuiteB) + * refer to 802.11 spec 9.4.2.24.3 + * If not listed here, it means no restriction in using any ciphers. + */ +static const rsn_akm_cipher_match_entry_t rsn_akm_cipher_match_table[] = { + {RSN_AKM_SUITEB_SHA256_1X, + BCM_BIT(WPA_CIPHER_AES_GCM), + BCM_BIT(WPA_CIPHER_AES_GCM), + BCM_BIT(WPA_CIPHER_BIP_GMAC_128)}, + {RSN_AKM_SUITEB_SHA384_1X, + BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_CCMP_256), + BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_AES_GCM256), + BCM_BIT(WPA_CIPHER_BIP_GMAC_256) | BCM_BIT(WPA_CIPHER_BIP_CMAC_256)}, + {RSN_AKM_FBT_SHA384_1X, + BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_CCMP_256), + BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_AES_GCM256), + BCM_BIT(WPA_CIPHER_BIP_GMAC_256) | BCM_BIT(WPA_CIPHER_BIP_CMAC_256)} +}; + +#if defined(WL_BAND6G) +static const rsn_akm_mask_t rsn_akm_6g_inval_mask = + BCM_BIT(RSN_AKM_PSK) | + BCM_BIT(RSN_AKM_FBT_PSK) | + BCM_BIT(RSN_AKM_SHA256_PSK) | + BCM_BIT(RSN_AKM_FBT_SHA384_PSK) | + BCM_BIT(RSN_AKM_PSK_SHA384); + +static const rsn_ciphers_t cipher_6g_inval_mask = + BCM_BIT(WPA_CIPHER_NONE) | + BCM_BIT(WPA_CIPHER_WEP_40) | + BCM_BIT(WPA_CIPHER_TKIP) | + BCM_BIT(WPA_CIPHER_WEP_104); +#endif /* WL_BAND6G */ + +#if defined(BCMSUP_PSK) || defined(BCMSUPPL) +typedef struct group_cipher_algo_entry { + rsn_cipher_t g_mgmt_cipher; + uint8 bip_algo; +} group_cipher_algo_entry_t; + +static const group_cipher_algo_entry_t group_mgmt_cipher_algo[] = { + {WPA_CIPHER_BIP_GMAC_256, CRYPTO_ALGO_BIP_GMAC256}, + {WPA_CIPHER_BIP_CMAC_256, CRYPTO_ALGO_BIP_CMAC256}, + {WPA_CIPHER_BIP_GMAC_128, CRYPTO_ALGO_BIP_GMAC}, + {WPA_CIPHER_BIP, CRYPTO_ALGO_BIP}, +}; +#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) */ + +static uint16 wlc_calc_rsn_desc_version(const rsn_ie_info_t *rsn_info); +static int bcmwpa_is_valid_akm(const rsn_akm_t akm); +#if defined(BCMSUP_PSK) || defined(BCMAUTH_PSK) || defined(WLFBT) || defined(GTKOE) +static sha2_hash_type_t bcmwpa_rsn_akm_to_hash(const rsn_akm_t akm); +#ifdef RSN_IE_INFO_STRUCT_RELOCATED +static int bcmwpa_decode_cipher_suite(rsn_ie_info_t *info, const uint8 **ptr, uint ie_len, uint + *remain_len, uint16 *p_count); +#endif +#endif /* defined(BCMSUP_PSK) || defined(BCMAUTH_PSK) || defined(WLFBT) || defined(GTKOE) */ +#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(WL_OKC) || defined(WLHOSTFBT) +#include + +/* calculate wpa PMKID: HMAC-SHA1-128(PMK, "PMK Name" | AA | SPA) */ +static void +wpa_calc_pmkid_impl(sha2_hash_type_t hash_type, + const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *pmk, uint pmk_len, uint8 *pmkid) +{ + int err; + hmac_sha2_ctx_t ctx; + + err = hmac_sha2_init(&ctx, hash_type, pmk, pmk_len); + if (err != BCME_OK) + goto done; + hmac_sha2_update(&ctx, (const uint8 *)PMK_NAME_PFX, sizeof(PMK_NAME_PFX) - 1); + hmac_sha2_update(&ctx, (const uint8 *)auth_ea, ETHER_ADDR_LEN); + hmac_sha2_update(&ctx, (const uint8 *)sta_ea, ETHER_ADDR_LEN); + hmac_sha2_final(&ctx, pmkid, WPA2_PMKID_LEN); +done:; +} + +void +wpa_calc_pmkid(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *pmk, uint pmk_len, uint8 *pmkid) +{ + wpa_calc_pmkid_impl(HASH_SHA1, auth_ea, sta_ea, pmk, pmk_len, pmkid); +} + +void +kdf_calc_pmkid(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *key, uint key_len, uint8 *pmkid, rsn_ie_info_t *rsn_info) +{ + sha2_hash_type_t hash_type; + + if (rsn_info->sta_akm == RSN_AKM_SUITEB_SHA384_1X) { + hash_type = HASH_SHA384; + } else { + hash_type = HASH_SHA256; + } + + wpa_calc_pmkid_impl(hash_type, auth_ea, sta_ea, key, key_len, pmkid); +} + +#if defined(WLFBT) || defined(WLHOSTFBT) +void +wpa_calc_pmkR0(sha2_hash_type_t hash_type, const uint8 *ssid, uint ssid_len, + uint16 mdid, const uint8 *r0kh, uint r0kh_len, const struct ether_addr *sta_ea, + const uint8 *pmk, uint pmk_len, uint8 *pmkr0, uint8 *pmkr0name) +{ + uint8 out[FBT_R0KH_ID_LEN + WPA2_PMKID_LEN - 1]; + int out_len = FBT_R0KH_ID_LEN - 1; + bcm_const_xlvp_t pfx[7]; + bcm_const_xlvp_t pfx2[2]; + int npfx = 0; + int npfx2 = 0; + uint8 mdid_le[2]; + uint8 pfx_ssid_len; + uint8 pfx_r0kh_len; + + if (hash_type == HASH_SHA384) { + out_len += WPA2_PMKID_LEN; + } + + /* create prefixes for pmkr0 */ + pfx[npfx].len = sizeof(FT_R0_PFX) - 1; + pfx[npfx++].data = (uint8 *)FT_R0_PFX; + + /* ssid length and ssid */ + pfx_ssid_len = ssid_len & 0xff; + pfx[npfx].len = (uint16)sizeof(pfx_ssid_len); + pfx[npfx++].data = &pfx_ssid_len; + + pfx[npfx].len = (uint16)(ssid_len & 0xffff); + pfx[npfx++].data = ssid; + + /* mdid */ + htol16_ua_store(mdid, mdid_le); + pfx[npfx].len = sizeof(mdid_le); + pfx[npfx++].data = mdid_le; + + /* r0kh len and r0kh */ + pfx_r0kh_len = r0kh_len & 0xff; + pfx[npfx].len = sizeof(pfx_r0kh_len); + pfx[npfx++].data = &pfx_r0kh_len; + + pfx[npfx].len = (uint16)(r0kh_len & 0xffff); + pfx[npfx++].data = r0kh; + + /* sta addr */ + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *)sta_ea; + + hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, out, out_len); + (void)memcpy_s(pmkr0, pmk_len, out, pmk_len); + + /* coverity checks overflow if pfx size changes */ + + /* create prefixes for pmkr0 name */ + pfx2[npfx2].len = sizeof(FT_R0N_PFX) - 1; + pfx2[npfx2++].data = (uint8 *)FT_R0N_PFX; + pfx2[npfx2].len = WPA2_PMKID_LEN; + pfx2[npfx2++].data = &out[pmk_len]; + + (void)sha2(hash_type, pfx2, npfx2, NULL, 0, pmkr0name, WPA2_PMKID_LEN); +} + +void +wpa_calc_pmkR1(sha2_hash_type_t hash_type, const struct ether_addr *r1kh, + const struct ether_addr *sta_ea, const uint8 *pmk, uint pmk_len, const uint8 *pmkr0name, + uint8 *pmkr1, uint8 *pmkr1name) +{ + bcm_const_xlvp_t pfx[3]; + bcm_const_xlvp_t pfx2[4]; + int npfx = 0; + int npfx2 = 0; + + if (!pmkr1 && !pmkr1name) + goto done; + else if (!pmkr1) + goto calc_r1name; + + /* create prefixes for pmkr1 */ + pfx[npfx].len = sizeof(FT_R1_PFX) - 1; + pfx[npfx++].data = (uint8 *)FT_R1_PFX; + + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *)r1kh; + + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *)sta_ea; + + hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, + pmkr1, sha2_digest_len(hash_type)); + +calc_r1name: + /* create prefixes for pmkr1 name */ + pfx2[npfx2].len = sizeof(FT_R1N_PFX) - 1; + pfx2[npfx2++].data = (uint8 *)FT_R1N_PFX; + + pfx2[npfx2].len = WPA2_PMKID_LEN; + pfx2[npfx2++].data = pmkr0name; + + pfx2[npfx2].len = ETHER_ADDR_LEN; + pfx2[npfx2++].data = (const uint8 *)r1kh; + + pfx2[npfx2].len = ETHER_ADDR_LEN; + pfx2[npfx2++].data = (const uint8 *)sta_ea; + + sha2(hash_type, pfx2, npfx2, NULL, 0, pmkr1name, WPA2_PMKID_LEN); +done:; +} + +void +wpa_calc_ft_ptk(sha2_hash_type_t hash_type, + const struct ether_addr *bssid, const struct ether_addr *sta_ea, + const uint8 *anonce, const uint8* snonce, + const uint8 *pmk, uint pmk_len, uint8 *ptk, uint ptk_len) +{ + bcm_const_xlvp_t pfx[5]; + int npfx = 0; + + /* FT-PTK||SNONCE||ANONCE||BSSID||STA Addr */ + + pfx[npfx].len = sizeof(FT_PTK_PFX) - 1; + pfx[npfx++].data = (uint8 *)FT_PTK_PFX; + + pfx[npfx].len = EAPOL_WPA_KEY_NONCE_LEN; + pfx[npfx++].data = snonce; + + pfx[npfx].len = EAPOL_WPA_KEY_NONCE_LEN; + pfx[npfx++].data = anonce; + + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *)bssid; + + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *)sta_ea; + + hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, ptk, ptk_len); +} + +void +wpa_derive_pmkR1_name(sha2_hash_type_t hash_type, + struct ether_addr *r1kh, struct ether_addr *sta_ea, + uint8 *pmkr0name, uint8 *pmkr1name) +{ + wpa_calc_pmkR1(hash_type, r1kh, sta_ea, NULL /* pmk */, 0, + pmkr0name, NULL /* pmkr1 */, pmkr1name); +} +#endif /* WLFBT || WLHOSTFBT */ +#endif /* BCMSUP_PSK || WLFBT || WL_OKC */ + +#if defined(BCMSUP_PSK) || defined(GTKOE) || defined(BCMAUTH_PSK) || defined(WLFBT) +/* Decrypt a key data from a WPA key message */ +int +wpa_decr_key_data(eapol_wpa_key_header_t *body, uint16 key_info, uint8 *ekey, + uint8 *encrkey, rc4_ks_t *rc4key, const rsn_ie_info_t *rsn_info, uint16 *dec_len) +{ + uint16 len; + int err = BCME_OK; + uint8 *key_data; + + switch (key_info & (WPA_KEY_DESC_V1 | WPA_KEY_DESC_V2)) { + case WPA_KEY_DESC_V1: + err = memcpy_s(encrkey, EAPOL_WPA_KEY_IV_LEN + EAPOL_WPA_ENCR_KEY_MAX_LEN, + body->iv, EAPOL_WPA_KEY_IV_LEN); + if (err) { + ASSERT(0); + return err; + } + err = memcpy_s(&encrkey[EAPOL_WPA_KEY_IV_LEN], EAPOL_WPA_ENCR_KEY_MAX_LEN, + ekey, rsn_info->kek_len); + if (err) { + ASSERT(0); + return err; + } + /* decrypt the key data */ + prepare_key(encrkey, EAPOL_WPA_KEY_IV_LEN + rsn_info->kek_len, rc4key); + rc4(NULL, WPA_KEY_DATA_LEN_256, rc4key); /* dump 256 bytes */ + len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, rsn_info->kck_mic_len)); + key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len); + rc4(key_data, len, rc4key); + break; + + case WPA_KEY_DESC_V2: + case WPA_KEY_DESC_V3: + case WPA_KEY_DESC_V0: + /* fallthrough */ + len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, rsn_info->kck_mic_len)); + if (!len) { + *dec_len = 0; + break; /* ignore zero length */ + } + key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len); + if (aes_unwrap(rsn_info->kek_len, ekey, len, key_data, key_data)) { + *dec_len = 0; + err = BCME_DECERR; + break; + } + *dec_len = (len > AKW_BLOCK_LEN) ? (len - AKW_BLOCK_LEN) : 0; + break; + + default: + *dec_len = 0; + err = BCME_UNSUPPORTED; /* may need revisiting - see 802.11-2016 */ + break; + } + + return err; +} + +/* internal function - assumes enouch space allocated, retuns written number */ +static int +wpa_calc_ptk_prefixes(const uint8 *prefix, uint prefix_len, + const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *anonce, uint8 anonce_len, const uint8 *snonce, uint8 snonce_len, + bcm_const_xlvp_t *pfx) +{ + int npfx = 0; + const uint8 *nonce; + + /* prefix || min ea || max ea || min nonce || max nonce */ + pfx[npfx].len = (uint16)(prefix_len & 0xffff); + pfx[npfx++].data = prefix; + + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *) wpa_array_cmp(MIN_ARRAY, + (const uint8 *)auth_ea, (const uint8 *)sta_ea, ETHER_ADDR_LEN); + + pfx[npfx].len = ETHER_ADDR_LEN; + pfx[npfx++].data = (const uint8 *) wpa_array_cmp(MAX_ARRAY, + (const uint8 *)auth_ea, (const uint8 *)sta_ea, ETHER_ADDR_LEN); + + nonce = (const uint8 *)wpa_array_cmp(MIN_ARRAY, snonce, anonce, snonce_len); + + if (nonce == snonce) { + pfx[npfx].len = snonce_len; + pfx[npfx++].data = snonce; + pfx[npfx].len = anonce_len; + pfx[npfx++].data = anonce; + } else { + pfx[npfx].len = anonce_len; + pfx[npfx++].data = anonce; + pfx[npfx].len = snonce_len; + pfx[npfx++].data = snonce; + } + + return npfx; +} + +void +kdf_calc_ptk(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *anonce, const uint8* snonce, + const uint8 *pmk, uint pmk_len, uint8 *ptk, uint ptk_len) +{ + bcm_const_xlvp_t pfx[5]; + int npfx; + + /* note: kdf omits trailing NULL in prefix */ + npfx = wpa_calc_ptk_prefixes((uint8 *)WPA_PTK_PFX, sizeof(WPA_PTK_PFX) - 1, + auth_ea, sta_ea, anonce, EAPOL_WPA_KEY_NONCE_LEN, snonce, + EAPOL_WPA_KEY_NONCE_LEN, pfx); + hmac_sha2_n(HASH_SHA256, pmk, pmk_len, pfx, npfx, NULL, 0, ptk, ptk_len); +} +#endif /* BCMSUP_PSK || GTKOE || BCMAUTH_PSK || WLFBT */ + +#if defined(BCMSUP_PSK) || defined(BCMAUTH_PSK) || defined(WLFBT) || defined(GTKOE) +/* Compute Message Integrity Code (MIC) over EAPOL message */ +int +wpa_make_mic(eapol_header_t *eapol, uint key_desc, uint8 *mic_key, + rsn_ie_info_t *rsn_info, uchar *mic, uint mic_len) +{ + uint data_len; + int err = BCME_OK; + sha2_hash_type_t type = HASH_NONE; + + /* length of eapol pkt from the version field on */ + data_len = 4 + ntoh16_ua((uint8 *)&eapol->length); + + /* Create the MIC for the pkt */ + switch (key_desc) { + case WPA_KEY_DESC_V1: + type = HASH_MD5; + break; + case WPA_KEY_DESC_V2: + /* note: transparent truncation to mic_len */ + type = HASH_SHA1; + break; + case WPA_KEY_DESC_V3: + aes_cmac_calc(NULL, 0, &eapol->version, data_len, mic_key, + mic_len, mic, AES_BLOCK_SZ); + goto exit; + case WPA_KEY_DESC_V0: + ASSERT(rsn_info != NULL); + if (rsn_info == NULL) { + return BCME_BADARG; + } + if (IS_SAE_AKM(rsn_info->sta_akm)) { + aes_cmac_calc(NULL, 0, &eapol->version, data_len, mic_key, + mic_len, mic, AES_BLOCK_SZ); + goto exit; + } + type = bcmwpa_rsn_akm_to_hash(rsn_info->sta_akm); + break; + default: + /* 11mc D8.0 some AKMs use descriptor version 0 */ + err = BCME_UNSUPPORTED; + goto exit; + } + + if (type) { + err = hmac_sha2(type, mic_key, mic_len, NULL, 0, (uint8 *)&eapol->version, data_len, + mic, mic_len); + } +exit: + return err; +} + +int +wpa_calc_ptk(rsn_akm_t akm, const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *anonce, uint8 anon_len, const uint8 *snonce, uint8 snon_len, const uint8 *pmk, + uint pmk_len, uint8 *ptk, uint ptk_len) +{ + bcm_const_xlvp_t pfx[PRF_PREFIXES_NUM]; + int npfx; + int ret = BCME_OK; + sha2_hash_type_t hash_type; + uint label_len; + + if (RSN_AKM_USE_KDF(akm)) { + label_len = sizeof(WPA_PTK_PFX) - 1u; + } else { //WPA AKMS + label_len = sizeof(WPA_PTK_PFX); /* note: wpa needs trailing NULL in prefix */ + } + + hash_type = bcmwpa_rsn_akm_to_hash(akm); + + npfx = wpa_calc_ptk_prefixes((uint8 *)WPA_PTK_PFX, label_len, + auth_ea, sta_ea, anonce, anon_len, snonce, snon_len, pfx); + ret = hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, ptk, ptk_len); + return ret; +} + +bool +wpa_encr_key_data(eapol_wpa_key_header_t *body, uint16 key_info, uint8 *ekey, + uint8 *gtk, uint8 *data, uint8 *encrkey, rc4_ks_t *rc4key, const rsn_ie_info_t *rsn_info) +{ + uint16 len; + uint8 *key_data; + + switch (key_info & (WPA_KEY_DESC_V1 | WPA_KEY_DESC_V2)) { + case WPA_KEY_DESC_V1: + if (gtk) { + len = ntoh16_ua((uint8 *)&body->key_len); + } else { + len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, + rsn_info->kck_mic_len)); + } + + /* create the iv/ptk key */ + if (memcpy_s(encrkey, EAPOL_WPA_KEY_IV_LEN, body->iv, sizeof(body->iv))) { + return FALSE; + } + if (memcpy_s(&encrkey[EAPOL_WPA_KEY_IV_LEN], EAPOL_WPA_ENCR_KEY_DEFAULT_LEN, + ekey, EAPOL_WPA_ENCR_KEY_DEFAULT_LEN)) { + return FALSE; + } + /* encrypt the key data */ + prepare_key(encrkey, EAPOL_WPA_KEY_IV_LEN + EAPOL_WPA_ENCR_KEY_DEFAULT_LEN, + rc4key); + rc4(data, WPA_KEY_DATA_LEN_256, rc4key); /* dump 256 bytes */ + key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len); + rc4(key_data, len, rc4key); + break; + case WPA_KEY_DESC_V2: /* fall through */ + case WPA_KEY_DESC_V3: + case WPA_KEY_DESC_V0: + len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, + rsn_info->kck_mic_len)); + /* FIXME: data_len is length to encrypt, but need to make sure + * buffer is big enought + * for expansion. how? problem for caller? + */ + key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len); + /* pad if needed - min. 16 bytes, 8 byte aligned */ + /* padding is 0xdd followed by 0's */ + if (len < 2u *AKW_BLOCK_LEN) { + key_data[len] = WPA2_KEY_DATA_PAD; + bzero(&key_data[len + 1u], 2u * AKW_BLOCK_LEN - (len + 1u)); + len = 2u *AKW_BLOCK_LEN; + } else if (len % AKW_BLOCK_LEN) { + key_data[len] = WPA2_KEY_DATA_PAD; + bzero(&key_data[len + 1u], + AKW_BLOCK_LEN - ((len + 1u) % AKW_BLOCK_LEN)); + len += AKW_BLOCK_LEN - (len % AKW_BLOCK_LEN); + } + if (aes_wrap(rsn_info->kek_len, ekey, len, key_data, key_data)) { + return FALSE; + } + len += AKW_BLOCK_LEN; + hton16_ua_store(len, + (uint8 *)EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, + rsn_info->kck_mic_len)); + break; + default: + /* 11mc D8.0 key descriptor version 0 used */ + return FALSE; + } + + return TRUE; +} + +/* Check MIC of EAPOL message */ +bool +wpa_check_mic(eapol_header_t *eapol, uint key_desc, uint8 *mic_key, rsn_ie_info_t *rsn_info) +{ + eapol_wpa_key_header_t *body = NULL; + uchar digest[SHA2_MAX_DIGEST_LEN]; + uchar mic[EAPOL_WPA_KEY_MAX_MIC_LEN]; + + if (!mic_key || !rsn_info || !eapol) { + return FALSE; + } + + body = (eapol_wpa_key_header_t *)eapol->body; + +#ifndef EAPOL_KEY_HDR_VER_V2 + if (rsn_info->kck_mic_len != EAPOL_WPA_KCK_DEFAULT_LEN) +#else + if (rsn_info->kck_mic_len > EAPOL_WPA_KEY_MAX_MIC_LEN) +#endif /* EAPOL_KEY_HDR_VER_V2 */ + { + ASSERT(0); + return FALSE; + } + /* save MIC and clear its space in message */ + if (memcpy_s(mic, sizeof(mic), EAPOL_WPA_KEY_HDR_MIC_PTR(body), + rsn_info->kck_mic_len)) { + return FALSE; + } + bzero(EAPOL_WPA_KEY_HDR_MIC_PTR(body), rsn_info->kck_mic_len); + if (wpa_make_mic(eapol, key_desc, mic_key, rsn_info, digest, rsn_info->kck_mic_len) + != BCME_OK) { + return FALSE; + } + return !memcmp(digest, mic, rsn_info->kck_mic_len); +} + +static sha2_hash_type_t bcmwpa_rsn_akm_to_hash(const rsn_akm_t akm) +{ + uint i = 0; + sha2_hash_type_t type = HASH_NONE; + + for (i = 0; i < ARRAYSIZE(rsn_akm_lookup_tbl); i++) { + if (akm == rsn_akm_lookup_tbl[i].rsn_akm) { + type = rsn_akm_lookup_tbl[i].hash_type; + break; + } + } + return type; +} +#endif /* BCMSUP_PSK || BCMAUTH_PSK || WLFBT || GTKOE */ + +#ifdef WLTDLS +void +wpa_calc_tpk(const struct ether_addr *init_ea, const struct ether_addr *resp_ea, + const struct ether_addr *bssid, const uint8 *anonce, const uint8* snonce, + uint8 *tpk, uint tpk_len) +{ + uint8 pmk[SHA2_MAX_DIGEST_LEN]; + uint pmk_len; + bcm_const_xlvp_t ikpfx[2]; + int nikpfx = 0; + bcm_const_xlvp_t tpkpfx[4]; + int ntpkpfx = 0; + + pmk_len = sha2_digest_len(HASH_SHA256); + + /* compute pmk to use - using anonce and snonce - min and then max */ + ikpfx[nikpfx].len = EAPOL_WPA_KEY_NONCE_LEN; + ikpfx[nikpfx++].data = wpa_array_cmp(MIN_ARRAY, snonce, anonce, + EAPOL_WPA_KEY_NONCE_LEN), + + ikpfx[nikpfx].len = EAPOL_WPA_KEY_NONCE_LEN; + ikpfx[nikpfx++].data = wpa_array_cmp(MAX_ARRAY, snonce, anonce, + EAPOL_WPA_KEY_NONCE_LEN), + + (void)sha2(HASH_SHA256, ikpfx, nikpfx, NULL, 0, pmk, SHA2_SHA256_DIGEST_LEN); + + /* compute the tpk - using prefix, min ea, max ea, bssid */ + tpkpfx[ntpkpfx].len = sizeof(TDLS_PMK_PFX) - 1; + tpkpfx[ntpkpfx++].data = (const uint8 *)TDLS_PMK_PFX; + + tpkpfx[ntpkpfx].len = ETHER_ADDR_LEN; + tpkpfx[ntpkpfx++].data = wpa_array_cmp(MIN_ARRAY, (const uint8 *)init_ea, + (const uint8 *)resp_ea, ETHER_ADDR_LEN), + + tpkpfx[ntpkpfx].len = ETHER_ADDR_LEN; + tpkpfx[ntpkpfx++].data = wpa_array_cmp(MAX_ARRAY, (const uint8 *)init_ea, + (const uint8 *)resp_ea, ETHER_ADDR_LEN), + + tpkpfx[ntpkpfx].len = ETHER_ADDR_LEN; + tpkpfx[ntpkpfx++].data = (const uint8 *)bssid; + + (void)hmac_sha2_n(HASH_SHA256, pmk, pmk_len, tpkpfx, ntpkpfx, NULL, 0, tpk, tpk_len); +} +#endif /* WLTDLS */ + +/* Convert WPA/WPA2 IE cipher suite to locally used value */ +static bool +rsn_cipher(wpa_suite_t *suite, ushort *cipher, const uint8 *std_oui, bool wep_ok) +{ + bool ret = TRUE; + + if (!memcmp((const char *)suite->oui, std_oui, DOT11_OUI_LEN)) { + switch (suite->type) { + case WPA_CIPHER_TKIP: + *cipher = CRYPTO_ALGO_TKIP; + break; + case WPA_CIPHER_AES_CCM: + *cipher = CRYPTO_ALGO_AES_CCM; + break; + case WPA_CIPHER_AES_GCM: + *cipher = CRYPTO_ALGO_AES_GCM; + break; + case WPA_CIPHER_AES_GCM256: + *cipher = CRYPTO_ALGO_AES_GCM256; + break; + case WPA_CIPHER_WEP_40: + if (wep_ok) + *cipher = CRYPTO_ALGO_WEP1; + else + ret = FALSE; + break; + case WPA_CIPHER_WEP_104: + if (wep_ok) + *cipher = CRYPTO_ALGO_WEP128; + else + ret = FALSE; + break; + default: + ret = FALSE; + break; + } + return ret; + } + + return FALSE; +} + +bool +wpa_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok) +{ + return rsn_cipher(suite, cipher, (const uchar*)WPA_OUI, wep_ok); +} + +bool +wpa2_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok) +{ + return rsn_cipher(suite, cipher, (const uchar*)WPA2_OUI, wep_ok); +} + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +bool +bcm_has_ie(uint8 *ie, uint8 **tlvs, uint *tlvs_len, const uint8 *oui, uint oui_len, uint8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return TRUE; + } + + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (uint)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return FALSE; +} + +wpa_ie_fixed_t * +bcm_find_wpaie(uint8 *parse, uint len) +{ + return (wpa_ie_fixed_t *) bcm_find_ie(parse, len, DOT11_MNG_VS_ID, + WPA_OUI_LEN, (const char*) WPA_OUI, WPA_OUI_TYPE); +} + +int +bcm_find_security_ies(uint8 *buf, uint buflen, void **wpa_ie, + void **rsn_ie) +{ + bcm_tlv_t *tlv = NULL; + uint totlen = 0; + uint8 *end = NULL; + uint len = 0; + uint tlvs_len = 0; + uint8 *tlvs = NULL; + + if ((tlv = (bcm_tlv_t*)buf) == NULL || + !wpa_ie || !rsn_ie || buflen == 0) { + return BCME_BADARG; + } + + totlen = buflen; + *rsn_ie = *wpa_ie = NULL; + end = buf; + end += buflen; + + /* find rsn ie and wpa ie */ + while (totlen >= TLV_HDR_LEN) { + len = tlv->len; + tlvs_len = buflen; + tlvs = buf; + + /* check if tlv overruns buffer */ + if (totlen < (len + TLV_HDR_LEN)) { + return BCME_BUFTOOSHORT; + } + + /* validate remaining totlen */ + if (totlen >= (len + TLV_HDR_LEN)) { + if ((*rsn_ie == NULL) && (tlv->id == DOT11_MNG_RSN_ID)) { + *rsn_ie = tlv; + } else if ((*wpa_ie == NULL) && (tlv->id == DOT11_MNG_VS_ID)) { + /* if vendor ie, check if its wpa ie */ + if (bcm_is_wpa_ie((uint8 *)tlv, &tlvs, &tlvs_len)) + *wpa_ie = tlv; + } + } + + if (*rsn_ie && *wpa_ie) + break; + + tlv = (bcm_tlv_t*)((uint8*)tlv + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + + if (totlen > buflen) { + return BCME_BUFTOOLONG; + } + + if ((uint8 *)tlv > end) { + return BCME_BUFTOOSHORT; + } + + } + + if (*wpa_ie || *rsn_ie) + return BCME_OK; + else + return BCME_NOTFOUND; +} + +bcm_tlv_t * +bcm_find_wmeie(uint8 *parse, uint len, uint8 subtype, uint8 subtype_len) +{ + bcm_tlv_t *ie; + if ((ie = bcm_find_ie(parse, len, DOT11_MNG_VS_ID, WME_OUI_LEN, + (const char*) WME_OUI, WME_OUI_TYPE))) { + uint ie_len = TLV_HDR_LEN + ie->len; + wme_ie_t *ie_data = (wme_ie_t *)ie->data; + /* the subtype_len must include OUI+type+subtype */ + if (subtype_len > WME_OUI_LEN + 1 && + ie_len == (uint)TLV_HDR_LEN + subtype_len && + ie_data->subtype == subtype) { + return ie; + } + /* move to next IE */ + len -= (uint)((uint8 *)ie + ie_len - parse); + parse = (uint8 *)ie + ie_len; + } + return NULL; +} + +wps_ie_fixed_t * +bcm_find_wpsie(const uint8 *parse, uint len) +{ + uint8 type = WPS_OUI_TYPE; + + return (wps_ie_fixed_t *)bcm_find_vendor_ie(parse, len, WPS_OUI, &type, sizeof(type)); +} + +/* locate the Attribute in the WPS IE */ +/* assume the caller has validated the WPS IE tag and length */ +wps_at_fixed_t * +bcm_wps_find_at(wps_at_fixed_t *at, uint len, uint16 id) +{ + while ((int)len >= WPS_AT_FIXED_LEN) { + uint alen = WPS_AT_FIXED_LEN + ntoh16_ua(((wps_at_fixed_t *)at)->len); + if (ntoh16_ua(((wps_at_fixed_t *)at)->at) == id && alen <= len) + return at; + at = (wps_at_fixed_t *)((uint8 *)at + alen); + len -= alen; + } + return NULL; +} + +#ifdef WLP2P +wifi_p2p_ie_t * +bcm_find_p2pie(const uint8 *parse, uint len) +{ + uint8 type = P2P_OUI_TYPE; + + return (wifi_p2p_ie_t *)bcm_find_vendor_ie(parse, len, P2P_OUI, &type, sizeof(type)); +} +#endif + +bcm_tlv_t * +bcm_find_hs20ie(uint8 *parse, uint len) +{ + return bcm_find_ie(parse, len, DOT11_MNG_VS_ID, WFA_OUI_LEN, + (const char *)WFA_OUI, WFA_OUI_TYPE_HS20); +} + +bcm_tlv_t * +bcm_find_osenie(uint8 *parse, uint len) +{ + return bcm_find_ie(parse, len, DOT11_MNG_VS_ID, WFA_OUI_LEN, + (const char *) WFA_OUI, WFA_OUI_TYPE_OSEN); +} + +#if defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS) +#define wpa_is_kde(ie, tlvs, len, type) bcm_has_ie(ie, tlvs, len, \ + (const uint8 *)WPA2_OUI, WPA2_OUI_LEN, type) + +eapol_wpa2_encap_data_t * +wpa_find_kde(const uint8 *parse, uint len, uint8 type) +{ + return (eapol_wpa2_encap_data_t *) bcm_find_ie(parse, len, + DOT11_MNG_PROPR_ID, WPA2_OUI_LEN, (const char *) WPA2_OUI, type); +} + +bool +wpa_is_gtk_encap(uint8 *ie, uint8 **tlvs, uint *tlvs_len) +{ + return wpa_is_kde(ie, tlvs, tlvs_len, WPA2_KEY_DATA_SUBTYPE_GTK); +} + +eapol_wpa2_encap_data_t * +wpa_find_gtk_encap(uint8 *parse, uint len) +{ + eapol_wpa2_encap_data_t *data; + + /* minimum length includes kde upto gtk field in eapol_wpa2_key_gtk_encap_t */ + data = wpa_find_kde(parse, len, WPA2_KEY_DATA_SUBTYPE_GTK); + if (data && (data->length < EAPOL_WPA2_GTK_ENCAP_MIN_LEN)) { + data = NULL; + } + + return data; +} + +int +wpa_find_eapol_kde_data(eapol_header_t* eapol, uint8 eapol_mic_len, + uint8 subtype, eapol_wpa2_encap_data_t **out_data) +{ + eapol_wpa_key_header_t *body; + uint8 *parse; + uint16 body_len; + uint16 data_len; + + if (!eapol) { + return BCME_BADARG; + } + + body = (eapol_wpa_key_header_t *)eapol->body; + body_len = ntoh16_ua(&eapol->length); + + data_len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, + eapol_mic_len)); + + parse = EAPOL_WPA_KEY_HDR_DATA_PTR(body, eapol_mic_len); + + if (((uint8 *)body + body_len) < ((uint8 *)parse + data_len)) { + return BCME_BUFTOOSHORT; + } + + return wpa_find_kde_data(parse, data_len, subtype, out_data); +} + +int +wpa_find_kde_data(const uint8 *kde_buf, uint16 buf_len, + uint8 subtype, eapol_wpa2_encap_data_t **out_data) +{ + eapol_wpa2_encap_data_t *data; + uint8 min_len; + + if (!kde_buf) { + return BCME_BADARG; + } + + /* minimum length includes kde upto gtk field in eapol_wpa2_key_gtk_encap_t */ + data = wpa_find_kde(kde_buf, buf_len, subtype); + if (!data) { + return BCME_IE_NOTFOUND; + } + + switch (subtype) { + case WPA2_KEY_DATA_SUBTYPE_GTK: + min_len = EAPOL_WPA2_GTK_ENCAP_MIN_LEN; + break; + case WPA2_KEY_DATA_SUBTYPE_IGTK: + min_len = EAPOL_WPA2_BIGTK_ENCAP_MIN_LEN; + break; + case WPA2_KEY_DATA_SUBTYPE_BIGTK: + min_len = EAPOL_WPA2_IGTK_ENCAP_MIN_LEN; + break; +#ifdef WL_OCV + case WPA2_KEY_DATA_SUBTYPE_OCI: + min_len = EAPOL_WPA2_OCI_ENCAP_MIN_LEN; + break; +#endif /* WL_OCV */ + default: + return BCME_UNSUPPORTED; + } + + if (data->length < min_len) { + return BCME_BADLEN; + } + + *out_data = data; + + return BCME_OK; +} + +#ifdef WL_OCV +bool +wpa_check_ocv_caps(uint16 local_caps, uint16 peer_caps) +{ + bool ocv_enabled = + ((local_caps & RSN_CAP_OCVC) && + (peer_caps & RSN_CAP_OCVC)); + bool mfp_enabled = + ((peer_caps & RSN_CAP_MFPC) || + (peer_caps & RSN_CAP_MFPR)); + + return (ocv_enabled && mfp_enabled); +} + +int +wpa_add_oci_encap(chanspec_t chspec, uint8* buf, uint buf_len) +{ + int retval = BCME_OK; + eapol_wpa2_encap_data_t* oci_kde; + uint len = buf_len; + + if (buf_len < WPA_OCV_OCI_KDE_SIZE) { + retval = BCME_BUFTOOSHORT; + goto done; + } + + oci_kde = (eapol_wpa2_encap_data_t*)buf; + + oci_kde->type = DOT11_MNG_WPA_ID; + oci_kde->subtype = WPA2_KEY_DATA_SUBTYPE_OCI; + oci_kde->length = (WPA_OCV_OCI_KDE_SIZE - TLV_HDR_LEN); + + oci_kde->oui[0u] = WPA2_OUI[0u]; + oci_kde->oui[1u] = WPA2_OUI[1u]; + oci_kde->oui[2u] = WPA2_OUI[2u]; + + buf += EAPOL_WPA2_ENCAP_DATA_HDR_LEN; + len -= EAPOL_WPA2_ENCAP_DATA_HDR_LEN; + + retval = bcm_ocv_write_oci(chspec, buf, len); + if (retval != BCME_OK) { + goto done; + } + +done: + return retval; +} + +int +wpa_add_oci_ie(chanspec_t chspec, uint8* buf, uint buf_len) +{ + int retval = BCME_OK; + uint8* oci_buf = buf + BCM_TLV_EXT_HDR_SIZE; + + if (buf_len < (bcm_ocv_get_oci_len() + BCM_TLV_EXT_HDR_SIZE)) { + retval = BCME_BUFTOOSHORT; + goto done; + } + + retval = bcm_ocv_write_oci(chspec, oci_buf, bcm_ocv_get_oci_len()); + if (retval != BCME_OK) { + goto done; + } + + (void)bcm_write_tlv_ext(DOT11_MNG_ID_EXT_ID, + OCV_EXTID_MNG_OCI_ID, oci_buf, bcm_ocv_get_oci_len(), buf); + +done: + return retval; +} + +int +wpa_add_oci_ft_subelem(chanspec_t chspec, uint8* buf, uint buf_len) +{ + int retval = BCME_OK; + uint8* oci_buf = buf + BCM_TLV_HDR_SIZE; + + if (buf_len < (bcm_ocv_get_oci_len() + BCM_TLV_HDR_SIZE)) { + retval = BCME_BUFTOOSHORT; + goto done; + } + + retval = bcm_ocv_write_oci(chspec, oci_buf, bcm_ocv_get_oci_len()); + if (retval != BCME_OK) { + goto done; + } + + bcm_write_tlv_safe(DOT11_FBT_SUBELEM_ID_OCI, + oci_buf, bcm_ocv_get_oci_len(), buf, buf_len); + +done: + return retval; +} + +int wpa_validate_oci_encap(chanspec_t chspec, const uint8* buf, uint buf_len) +{ + int retval = BCME_OK; + eapol_wpa2_encap_data_t *encap = NULL; + + retval = wpa_find_kde_data(buf, buf_len, WPA2_KEY_DATA_SUBTYPE_OCI, &encap); + if (retval != BCME_OK) { + retval = BCME_NOTFOUND; + goto done; + } + + retval = bcm_ocv_validate_oci(chspec, + encap->data, encap->length); + if (retval != BCME_OK) { + goto done; + } + +done: + return retval; +} + +int wpa_validate_oci_ie(chanspec_t chspec, const uint8* buf, uint buf_len) +{ + int retval = BCME_OK; + bcm_tlv_ext_t *oci_ie; + + oci_ie = (bcm_tlv_ext_t *)bcm_parse_tlvs_dot11(buf, buf_len, + OCV_EXTID_MNG_OCI_ID, TRUE); + + if (!oci_ie) { + retval = BCME_NOTFOUND; + goto done; + } + + retval = bcm_ocv_validate_oci(chspec, oci_ie->data, oci_ie->len); + if (retval != BCME_OK) { + goto done; + } + +done: + return retval; +} + +int wpa_validate_oci_ft_subelem(chanspec_t chspec, const uint8* buf, uint buf_len) +{ + int retval = BCME_OK; + bcm_tlv_t *oci_ie; + + oci_ie = (bcm_tlv_t *)bcm_parse_tlvs_dot11(buf, buf_len, + DOT11_FBT_SUBELEM_ID_OCI, FALSE); + + if (!oci_ie) { + retval = BCME_NOTFOUND; + goto done; + } + + retval = bcm_ocv_validate_oci(chspec, oci_ie->data, oci_ie->len); + if (retval != BCME_OK) { + goto done; + } + +done: + return retval; +} +#endif /* WL_OCV */ +#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS) */ + +const uint8 * +wpa_array_cmp(int max_array, const uint8 *x, const uint8 *y, uint len) +{ + uint i; + const uint8 *ret = x; + + for (i = 0; i < len; i++) + if (x[i] != y[i]) + break; + + if (i == len) { + /* returning null will cause crash, return value used for copying */ + /* return first param in this case to close security loophole */ + return x; + } + if (max_array && (y[i] > x[i])) + ret = y; + if (!max_array && (y[i] < x[i])) + ret = y; + + return (ret); +} + +void +wpa_incr_array(uint8 *array, uint len) +{ + int i; + + for (i = (len-1); i >= 0; i--) + if (array[i]++ != 0xff) { + break; + } +} + +bool +bcmwpa_akm2WPAauth(uint8 *akm, uint32 *auth, bool sta_iswpa) +{ + uint i; + oui_akm_wpa_tbl_t wpa_auth_tbl_match[] = { + {WPA2_OUI, RSN_AKM_NONE, WPA_AUTH_NONE}, + {WPA2_OUI, RSN_AKM_UNSPECIFIED, WPA2_AUTH_UNSPECIFIED}, + {WPA2_OUI, RSN_AKM_PSK, WPA2_AUTH_PSK}, + {WPA2_OUI, RSN_AKM_FBT_1X, WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT}, + {WPA2_OUI, RSN_AKM_FBT_PSK, WPA2_AUTH_PSK | WPA2_AUTH_FT}, + {WPA2_OUI, RSN_AKM_SHA256_1X, WPA2_AUTH_1X_SHA256}, + {WPA2_OUI, RSN_AKM_SHA256_PSK, WPA2_AUTH_PSK_SHA256}, + {WPA2_OUI, RSN_AKM_FILS_SHA256, WPA2_AUTH_FILS_SHA256}, + {WPA2_OUI, RSN_AKM_FILS_SHA384, WPA2_AUTH_FILS_SHA384}, + {WPA2_OUI, RSN_AKM_FBT_SHA256_FILS, WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT}, + {WPA2_OUI, RSN_AKM_FBT_SHA384_FILS, WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT}, + {WPA2_OUI, RSN_AKM_SAE_PSK, WPA3_AUTH_SAE_PSK}, + {WPA2_OUI, RSN_AKM_SAE_FBT, WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT}, + {WPA2_OUI, RSN_AKM_OWE, WPA3_AUTH_OWE}, + {WPA2_OUI, RSN_AKM_SUITEB_SHA256_1X, WPA3_AUTH_1X_SUITE_B_SHA256}, + {WPA2_OUI, RSN_AKM_SUITEB_SHA384_1X, WPA3_AUTH_1X_SUITE_B_SHA384}, + {WFA_OUI, OSEN_AKM_UNSPECIFIED, WPA2_AUTH_UNSPECIFIED}, + {WFA_OUI, RSN_AKM_FBT_SHA256_FILS, WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT}, + {WFA_OUI, RSN_AKM_FBT_SHA384_FILS, WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT}, + {WFA_OUI, RSN_AKM_DPP, WPA3_AUTH_DPP_AKM}, + +#ifdef BCMWAPI_WAI + {WAPI_OUI, RSN_AKM_NONE, WAPI_AUTH_NONE}, + {WAPI_OUI, RSN_AKM_UNSPECIFIED, WAPI_AUTH_UNSPECIFIED}, + {WAPI_OUI, RSN_AKM_PSK, WAPI_AUTH_PSK}, +#endif /* BCMWAPI_WAI */ + + {WPA_OUI, RSN_AKM_NONE, WPA_AUTH_NONE}, + {WPA_OUI, RSN_AKM_UNSPECIFIED, WPA_AUTH_UNSPECIFIED}, + {WPA_OUI, RSN_AKM_PSK, WPA_AUTH_PSK}, + }; + + BCM_REFERENCE(sta_iswpa); + + for (i = 0; i < ARRAYSIZE(wpa_auth_tbl_match); i++) { + if (!memcmp(akm, wpa_auth_tbl_match[i].oui, DOT11_OUI_LEN)) { + if (wpa_auth_tbl_match[i].rsn_akm == akm[DOT11_OUI_LEN]) { + *auth = wpa_auth_tbl_match[i].wpa_auth; + return TRUE; + } + } + } + return FALSE; +} + +/* map cipher suite to internal WSEC_XXXX */ +/* cs points 4 byte cipher suite, and only the type is used for non CCX ciphers */ +bool +bcmwpa_cipher2wsec(uint8 *cipher, uint32 *wsec) +{ + +#ifdef BCMWAPI_WAI + if (!memcmp(cipher, WAPI_OUI, DOT11_OUI_LEN)) { + switch (WAPI_CSE_WPI_2_CIPHER(cipher[DOT11_OUI_LEN])) { + case WAPI_CIPHER_NONE: + *wsec = 0; + break; + case WAPI_CIPHER_SMS4: + *wsec = SMS4_ENABLED; + break; + default: + return FALSE; + } + return TRUE; + } +#endif /* BCMWAPI_WAI */ + + switch (cipher[DOT11_OUI_LEN]) { + case WPA_CIPHER_NONE: + *wsec = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + *wsec = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + *wsec = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + /* fall through */ + case WPA_CIPHER_AES_GCM: + /* fall through */ + case WPA_CIPHER_AES_GCM256: + *wsec = AES_ENABLED; + break; + +#ifdef BCMWAPI_WAI + case WAPI_CIPHER_SMS4: + *wsec = SMS4_ENABLED; + break; +#endif /* BCMWAPI_WAI */ + + default: + return FALSE; + } + return TRUE; +} + +#ifdef RSN_IE_INFO_STRUCT_RELOCATED +/* map WPA/RSN cipher to internal WSEC */ +uint32 +bcmwpa_wpaciphers2wsec(uint32 wpacipher) +{ + uint32 wsec = 0; + + switch (wpacipher) { + case BCM_BIT(WPA_CIPHER_WEP_40): + case BCM_BIT(WPA_CIPHER_WEP_104): + wsec = WEP_ENABLED; + break; + case BCM_BIT(WPA_CIPHER_TKIP): + wsec = TKIP_ENABLED; + break; + case BCM_BIT(WPA_CIPHER_AES_OCB): + /* fall through */ + case BCM_BIT(WPA_CIPHER_AES_CCM): + wsec = AES_ENABLED; + break; + case BCM_BIT(WPA_CIPHER_AES_GCM): + /* fall through */ + case BCM_BIT(WPA_CIPHER_AES_GCM256): + wsec = AES_ENABLED; + break; + +#ifdef BCMWAPI_WAI + case BCM_BIT(WAPI_CIPHER_SMS4): + wsec = SMS4_ENABLED; + break; +#endif /* BCMWAPI_WAI */ + + default: + break; + } + + return wsec; +} + +uint32 +wlc_convert_rsn_to_wsec_bitmap(uint32 ap_cipher_mask) +{ + + uint32 ap_wsec = 0; + uint32 tmp_mask = ap_cipher_mask; + uint32 c; + + FOREACH_BIT(c, tmp_mask) { + ap_wsec |= bcmwpa_wpaciphers2wsec(c); + } + + return ap_wsec; +} + +#else /* Not RSN_IE_INFO_STRUCT_RELOCATED */ +uint32 +bcmwpa_wpaciphers2wsec(uint8 wpacipher) +{ + uint32 wsec = 0; + + switch (wpacipher) { + case WPA_CIPHER_NONE: + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + wsec = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + wsec = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_OCB: + /* fall through */ + case WPA_CIPHER_AES_CCM: + wsec = AES_ENABLED; + break; + case WPA_CIPHER_AES_GCM: + /* fall through */ + case WPA_CIPHER_AES_GCM256: + wsec = AES_ENABLED; + break; + +#ifdef BCMWAPI_WAI + case WAPI_CIPHER_SMS4: + wsec = SMS4_ENABLED; + break; +#endif /* BCMWAPI_WAI */ + + default: + break; + } + + return wsec; +} +#endif /* RSN_IE_INFO_STRUCT_RELOCATED */ + +bool +bcmwpa_is_wpa_auth(uint32 auth) +{ + if ((auth == WPA_AUTH_NONE) || + (auth == WPA_AUTH_UNSPECIFIED) || + (auth == WPA_AUTH_PSK)) + return TRUE; + else + return FALSE; +} + +bool +bcmwpa_includes_wpa_auth(uint32 auth) +{ + if (auth & (WPA_AUTH_NONE | + WPA_AUTH_UNSPECIFIED | + WPA_AUTH_PSK)) + return TRUE; + else + return FALSE; +} + +bool +bcmwpa_is_rsn_auth(uint32 auth) +{ + auth = auth & ~WPA2_AUTH_FT; + + if ((auth == WPA2_AUTH_UNSPECIFIED) || + (auth == WPA2_AUTH_PSK) || + (auth == BRCM_AUTH_PSK) || + (auth == WPA2_AUTH_1X_SHA256) || + (auth == WPA2_AUTH_PSK_SHA256) || + (auth == WPA3_AUTH_SAE_PSK) || + (auth == WPA3_AUTH_OWE) || + WPA2_AUTH_IS_FILS(auth) || + (auth == WPA3_AUTH_1X_SUITE_B_SHA256) || + (auth == WPA3_AUTH_1X_SUITE_B_SHA384) || + (auth == WPA3_AUTH_PSK_SHA384) || + (auth == WPA3_AUTH_DPP_AKM)) { + return TRUE; + } else { + return FALSE; + } +} + +bool +bcmwpa_includes_rsn_auth(uint32 auth) +{ + if (auth & (WPA2_AUTH_UNSPECIFIED | + WPA2_AUTH_PSK | + BRCM_AUTH_PSK | WPA2_AUTH_1X_SHA256 | WPA2_AUTH_PSK_SHA256 | + WPA2_AUTH_IS_FILS(auth) | WPA3_AUTH_SAE_PSK | WPA3_AUTH_OWE | + WPA3_AUTH_1X_SUITE_B_SHA256 | WPA3_AUTH_1X_SUITE_B_SHA384 | + WPA3_AUTH_PSK_SHA384 | WPA3_AUTH_DPP_AKM)) + return TRUE; + else + return FALSE; +} + +#ifdef RSN_IE_INFO_STRUCT_RELOCATED +/* decode unicast/multicast cipher in RSNIE */ +static int +bcmwpa_decode_cipher_suite(rsn_ie_info_t *info, const uint8 **ptr_inc, uint ie_len, uint + *remain_len, uint16 *p_count) +{ + const wpa_suite_ucast_t *ucast; + const wpa_suite_mcast_t *mcast; + uint i; + + if (!(*remain_len)) { + info->g_cipher = WPA_CIPHER_UNSPECIFIED; + info->p_ciphers = WPA_P_CIPHERS_UNSPECIFIED; + goto done; /* only have upto ver */ + } + *ptr_inc += ie_len - *remain_len; + + if (*remain_len < sizeof(wpa_suite_mcast_t)) { + info->parse_status = BCME_BADLEN; + goto done; + } + mcast = (const wpa_suite_mcast_t *)*ptr_inc; + + if (IS_WPA_CIPHER(mcast->type)) { + info->g_cipher = mcast->type; + } else { + info->parse_status = BCME_BAD_IE_DATA; + goto done; + } + + /* for rsn pairwise cipher suite */ + *ptr_inc += sizeof(wpa_suite_mcast_t); + *remain_len -= sizeof(wpa_suite_mcast_t); + + if (!(*remain_len)) { + info->p_ciphers = WPA_P_CIPHERS_UNSPECIFIED; + info->sta_akm = WPA_CIPHER_UNSPECIFIED; + goto done; + } + + ucast = (const wpa_suite_ucast_t *)*ptr_inc; + + if ((*remain_len) < sizeof(ucast->count)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (!ucast->count.low && !ucast->count.high) { + info->parse_status = BCME_BADLEN; + goto done; + } + + *p_count = ltoh16_ua(&ucast->count); + if (info->dev_type == DEV_STA && *p_count != 1u) { + info->parse_status = BCME_BAD_IE_DATA; + goto done; + } + if ((*remain_len) < (*p_count * WPA_SUITE_LEN + sizeof(ucast->count))) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (info->dev_type == DEV_STA) { + if (IS_WPA_CIPHER(ucast->list[0].type)) { + /* update the pairwise cipher */ + info->sta_cipher = ucast->list[0].type; + } else { + info->parse_status = BCME_BAD_IE_DATA; + goto done; + } + } else { + for (i = 0; i < *p_count; i++) { + if (IS_WPA_CIPHER(ucast->list[i].type)) { + info->p_ciphers |= BIT(ucast->list[i].type); + info->rsn_p_ciphers = info->p_ciphers; + } else { + info->parse_status = BCME_BAD_IE_DATA; + goto done; + } + } + } + + /* update buffer ptr and remaining length */ + *ptr_inc += (*p_count * WPA_SUITE_LEN) + sizeof(ucast->count); + *remain_len -= (*p_count * WPA_SUITE_LEN) + sizeof(ucast->count); + +done: + + if (info->parse_status == BCME_OK) { + if (info->g_cipher == WPA_CIPHER_UNSPECIFIED) { + info->g_cipher = WPA_CIPHER_AES_CCM; + } + if (info->p_ciphers == WPA_P_CIPHERS_UNSPECIFIED) { + info->p_ciphers = BIT(WPA_CIPHER_AES_CCM); + info->rsn_p_ciphers = info->p_ciphers; + } + } + + return info->parse_status; +} +/* sta_akm/sta_cipher must be set before this call */ +int +bcmwpa_rsnie_eapol_key_len(rsn_ie_info_t *info) +{ + info->pmk_len = bcmwpa_eapol_key_length(EAPOL_KEY_PMK, info->sta_akm, 0); + info->kck_mic_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK_MIC, info->sta_akm, 0); + info->kck_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK, info->sta_akm, 0); + info->kek_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK, info->sta_akm, 0); + info->tk_len = bcmwpa_eapol_key_length(EAPOL_KEY_TK, 0, info->sta_cipher); + info->ptk_len = info->kck_len + info->kek_len + info->tk_len; +#if defined(WL_FILS) && defined(WLFBT) + info->kck2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK2, info->sta_akm, 0); + info->kek2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK2, info->sta_akm, 0); + if (WPA_IS_FILS_FT_AKM(info->sta_akm)) { + info->ptk_len += (info->kck2_len + info->kek2_len); + } +#endif /* WL_FILS && WLFBT */ + return BCME_OK; +} +/* Extract and store information from WPA or RSN IEs + * + * called after either + * -an association request has been built (STA), + * - an association was received (AP) + * - a probe request has been built (AP) + * - a probe response was received (STA) + * + * All available information is extracted to be used for subsequent + * bss pruning, association request validation, key descriptor compuation etc. + * + * To be expanded as needed. + * + * ie: RSN IE input + * rsn_info: parsed information. Placed in either bsscfg for self, or scb for peer. + * dev_type: STA_RSN or AP_RSN + * + * Return : parse status. + * NOTE: the parse status is also saved in the the parse_status field. + * NOTE 2 : the IE itself is copied at the end of the structure. Since there is + * no reference to the osh available here, the allocation has to happen outside + * and so the structure cannot be zeroed in this function. + * For the STA, it should happen everytime. + * For the AP, it should happen right after a new beacon/probe has been acquired. + */ + +int +bcmwpa_parse_rsnie(const bcm_tlv_t *ie, rsn_ie_info_t *info, device_type_t dev_type) +{ + + const uint8 *ptr_inc = NULL; + const wpa_suite_mcast_t *mcast; + const wpa_suite_auth_key_mgmt_t *mgmt; + const wpa_pmkid_list_t *pmkid_list; + uint32 remain_len = 0, i; + uint8 auth_ie_type; + uint16 p_count = 0; + uint16 akm_count; + + ASSERT(info != NULL); + + /* this function might be called from place where there + * is no error detection. + * e.g. fron the iem callback. Store status here. + */ + + info->parse_status = BCME_OK; + + if (!ie) { + info->parse_status = BCME_BADARG; + goto done; + } + + /* For AP, do not zero this structure since there could be multiple + * IEs. In that case, add to the existing + * bits in field (ciphers, akms) as necessary. + */ + if (dev_type == DEV_AP) { + /* if already created, check device type */ + if (info->dev_type != DEV_NONE) { + if (info->dev_type != DEV_AP) { + info->parse_status = BCME_BADARG; + goto done; + } + } + } + info->dev_type = dev_type; + ptr_inc = ie->data; + + /* decode auth IE (WPA vs RSN). Fill in the auth_ie_type and version. + * Modify remain_len to indicate the position of the pointer. + */ + /* NOTE the status field will be updated in this call */ + if (bcmwpa_decode_ie_type(ie, info, &remain_len, &auth_ie_type) != BCME_OK) { + goto done; + } + + /* decode multicast, unicast ciphers */ + if (bcmwpa_decode_cipher_suite(info, &ptr_inc, ie->len, &remain_len, &p_count) != BCME_OK) { + goto done; + } + + if (!(remain_len)) { + info->akms = BIT(RSN_AKM_UNSPECIFIED); + goto done; + } + + mgmt = (const wpa_suite_auth_key_mgmt_t *)ptr_inc; + + if (remain_len < sizeof(mgmt->count)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + akm_count = ltoh16_ua(&mgmt->count); + + if (!akm_count) { + info->parse_status = BCME_BADARG; + goto done; + } + + if (dev_type == DEV_STA && akm_count != 1) { + info->parse_status = BCME_BADARG; + goto done; + } + + if ((remain_len) < (akm_count * WPA_SUITE_LEN + sizeof(mgmt->count))) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (dev_type == DEV_STA) { + info->sta_akm = mgmt->list[0].type; + } + for (i = 0; i < akm_count; i++) { + if (bcmwpa_is_valid_akm(mgmt->list[i].type) == BCME_OK) { + ASSERT((mgmt->list[i].type) < + (sizeof(info->akms) * NBBY)); + info->akms |= BIT(mgmt->list[i].type); + } + } + + /* save IE dependent values in their respective fields */ + if (dev_type == DEV_AP) { + if (auth_ie_type == RSN_AUTH_IE) { + info->rsn_akms = info->akms; + } else if (auth_ie_type == WPA_AUTH_IE) { + info->wpa_akms = info->akms; + info->wpa_p_ciphers = info->p_ciphers; + } + } + + /* as a STA, at this point, we can compute the key descriptor version */ + if (dev_type == DEV_STA) { + info->key_desc = wlc_calc_rsn_desc_version(info); + /* For STA, we can set the auth ie */ + if (auth_ie_type == RSN_AUTH_IE) { + info->auth_ie = info->rsn_ie; + info->auth_ie_len = info->rsn_ie_len; + } else { + info->auth_ie = info->wpa_ie; + info->auth_ie_len = info->wpa_ie_len; + } + } + + /* RSN AKM/cipher suite related EAPOL key length update */ + bcmwpa_rsnie_eapol_key_len(info); + + /* for rsn capabilities */ + ptr_inc += akm_count * WPA_SUITE_LEN + sizeof(mgmt->count); + remain_len -= akm_count * WPA_SUITE_LEN + sizeof(mgmt->count); + + if (!(remain_len)) { + goto done; + } + if (remain_len < RSN_CAP_LEN) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (ie->id == DOT11_MNG_RSN_ID) { + info->caps = ltoh16_ua(ptr_inc); + } + + /* check if AKMs require MFP capable to be set */ + if ((info->akms & RSN_MFPC_AKM_MASK) && !(info->caps & RSN_CAP_MFPC)) { + /* NOTE: Acting as WPA3 CTT testbed device, it requires to send assoc request frame + with user provided mfp value as is. So should not return error here. + */ +#ifndef WPA3_CTT + info->parse_status = BCME_EPERM; + goto done; +#endif /* WPA3_CTT */ + } + + /* for rsn PMKID */ + ptr_inc += RSN_CAP_LEN; + remain_len -= RSN_CAP_LEN; + + if (!(remain_len)) { + goto done; + } + + /* here's possible cases after RSN_CAP parsed + * a) pmkid_count 2B(00 00) + * b) pmkid_count 2B(00 00) + BIP 4B + * c) pmkid_count 2B(non zero) + pmkid_count * 16B + * d) pmkid_count 2B(non zero) + pmkid_count * 16B + BIP 4B + */ + + /* pmkids_offset set to + * 1) if pmkid_count field(2B) present, point to first PMKID offset in the RSN ID + * no matter what pmkid_count value is. (true, even if pmkid_count == 00 00) + * 2) if pmkid_count field(2B) not present, it shall be zero. + */ + + pmkid_list = (const wpa_pmkid_list_t*)ptr_inc; + + if ((remain_len) < sizeof(pmkid_list->count)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + info->pmkid_count = (uint8)ltoh16_ua(&pmkid_list->count); + ptr_inc += sizeof(pmkid_list->count); + remain_len -= sizeof(pmkid_list->count); + + if (remain_len < (uint32)(info->pmkid_count * WPA2_PMKID_LEN)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + info->pmkids_offset = ie->len + TLV_HDR_LEN - remain_len; + /* for rsn group management cipher suite */ + ptr_inc += info->pmkid_count * WPA2_PMKID_LEN; + remain_len -= info->pmkid_count * WPA2_PMKID_LEN; + + if (!(remain_len)) { + goto done; + } + /* + * from WPA2_Security_Improvements_Test_Plan_v1.0 + * 4.2.4 APUT RSNE bounds verification using WPA2-PSK + * May content RSNE extensibile element ay this point + */ + if (remain_len < sizeof(wpa_suite_mcast_t)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + mcast = (const wpa_suite_mcast_t *)ptr_inc; + if (IS_VALID_BIP_CIPHER((rsn_cipher_t)mcast->type)) { + info->g_mgmt_cipher = (rsn_cipher_t)mcast->type; + } + +done: + return info->parse_status; +} + +/* Determine if the IE is of WPA or RSN type. Decode + * up to version field. Modify the remaining len parameter to + * indicate where the next field is. + * Store and return error status. + */ + +int +bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, uint32 *remaining, + uint8 *type) +{ + const uint8 * ptr_inc = (const uint8 *)ie->data; + uint32 remain_len = ie->len; + uint8 version, version_len; + + if (ie->id == DOT11_MNG_WPA_ID) { + /* min len check */ + if (remain_len < WPA_IE_FIXED_LEN) { + info->parse_status = BCME_BADLEN; + goto done; + } + /* WPA IE */ + if (memcmp(WPA_OUI, ie->data, WPA_OUI_LEN)) { + /* bad OUI */ + info->parse_status = BCME_BADARG; + goto done; + } + ptr_inc += WPA_OUI_LEN; + if (*ptr_inc == WPA_OUI_TYPE) { + *type = WPA_AUTH_IE; + } else if (*ptr_inc == WFA_OUI_TYPE_OSEN) { + *type = OSEN_AUTH_IE; + } + else { + /* wrong type */ + info->parse_status = BCME_BADARG; + goto done; + } + + ptr_inc ++; + remain_len -= WPA_OUI_LEN + 1u; + version_len = WPA_VERSION_LEN; + } + else if (ie->id == DOT11_MNG_RSN_ID) { + if (remain_len < WPA2_VERSION_LEN) { + info->parse_status = BCME_BADLEN; + goto done; + } + /* RSN IE */ + *type = RSN_AUTH_IE; + version_len = WPA2_VERSION_LEN; + } else { + printf("IE ID %d\n", ie->id); + /* TODO : add support for CCX, WAPI ? */ + info->parse_status = BCME_UNSUPPORTED; + goto done; + } + info->auth_ie_type |= *type; + /* mask down to uint8 for Windows build */ + version = 0xff & ltoh16_ua(ptr_inc); + if (version > MAX_RSNE_SUPPORTED_VERSION) { + info->parse_status = BCME_UNSUPPORTED; + goto done; + } + + info->version = (uint8)version; + *remaining = remain_len - version_len; +done: + return info->parse_status; +} + +/* rsn info allocation management. + * + * In some cases, the rsn ie info structures are embedded in the scan results + * which can be shared by different lists. + * To keep track of their allocation, we use a reference counter. + * The counter is incremented on demand by rsn_ie_info_add_ref() + * at the time the reference is shared. + * It is decremented in rsn_ie_info_rel_ref + * When ref_count gets to 0, bcmwpa_rsn_ie_info_free_mem + * is called to free the whole structure. + */ + +/* free rsn_ie and wpa_ie, if any, and zero the rsn_info */ +void +bcmwpa_rsn_ie_info_reset(rsn_ie_info_t *rsn_info, osl_t *osh) +{ + uint8 ref_count; + if (rsn_info == NULL) { + return; + } + ref_count = rsn_info->ref_count; + MFREE(osh, rsn_info->rsn_ie, rsn_info->rsn_ie_len); + MFREE(osh, rsn_info->wpa_ie, rsn_info->wpa_ie_len); + MFREE(osh, rsn_info->rsnxe, rsn_info->rsnxe_len); + bzero(rsn_info, sizeof(*rsn_info)); + rsn_info->ref_count = ref_count; + +} + +static +void bcmwpa_rsn_ie_info_free_mem(rsn_ie_info_t **rsn_info, osl_t *osh) +{ + bcmwpa_rsn_ie_info_reset(*rsn_info, osh); + MFREE(osh, *rsn_info, sizeof(**rsn_info)); + *rsn_info = NULL; +} + +void bcmwpa_rsn_ie_info_rel_ref(rsn_ie_info_t **rsn_info, osl_t *osh) +{ + + if (rsn_info == NULL || *rsn_info == NULL) { + return; + } + + /* already freed ? */ + if ((*rsn_info)->ref_count == 0) { + ASSERT(0); + return; + } + /* decrement ref count */ + (*rsn_info)->ref_count -= 1; + /* clear reference. */ + if ((*rsn_info)->ref_count > 0) { + *rsn_info = NULL; + return; + } + /* free memory and clear reference */ + bcmwpa_rsn_ie_info_free_mem(rsn_info, osh); +} + +int +bcmwpa_rsn_ie_info_add_ref(rsn_ie_info_t *rsn_info) +{ + int status = BCME_OK; + if (rsn_info == NULL) { + goto done; + } + if (rsn_info->ref_count == 0) { + /* don't increase from 0, which means this structure has been freed earlier. + * That reference should not exist anymore. + */ + ASSERT(0); + status = BCME_BADARG; + goto done; + } + rsn_info->ref_count++; +done: + return status; +} + +#else /* Not RSN_IE_INFO_STRUCT_RELOCATED */ + +int +bcmwpa_parse_rsnie(const bcm_tlv_t *ie, rsn_ie_info_t *info, device_type_t dev_type) +{ + + const uint8 *ptr_inc = NULL; + const wpa_suite_ucast_t *ucast; + const wpa_suite_mcast_t *mcast; + const wpa_suite_auth_key_mgmt_t *mgmt; + const wpa_pmkid_list_t *pmkid_list; + uint32 remain_len = 0, i; + + ASSERT(info != NULL); + + /* this function might be called from place where there + * is no error detection. + * e.g. fron the iem callback. Store status here. + */ + + info->parse_status = BCME_OK; + + if (!ie) { + info->parse_status = BCME_BADARG; + goto done; + } + + /* For AP, do not zero this structure since there could be multiple + * IEs. In that case, add to the existing + * bits in field (ciphers, akms) as necessary. + */ + if (dev_type != DEV_AP) { + bzero(info, sizeof(*info)); + } else { + /* if already created, check device type */ + if (info->dev_type != DEV_NONE) { + if (info->dev_type != DEV_AP) { + info->parse_status = BCME_BADARG; + goto done; + } + } + } + info->dev_type = dev_type; + ptr_inc = ie->data; + + /* decode auth IE (WPA vs RSN). Fill in the auth_ie_type and version. + * Modify remain_len to indicate the position of the pointer. + */ + /* NOTE the status field will be updated in this call */ + if (bcmwpa_decode_ie_type(ie, info, &remain_len) != BCME_OK) { + goto done; + } + + if (!(remain_len)) { + info->g_cipher = WPA_CIPHER_NONE; + goto done; /* only have upto ver */ + } + ptr_inc += ie->len - remain_len; + + if (remain_len < sizeof(wpa_suite_mcast_t)) { + info->parse_status = BCME_BADLEN; + goto done; + } + mcast = (const wpa_suite_mcast_t *)ptr_inc; + + if (IS_WPA_CIPHER(mcast->type)) { + info->g_cipher = mcast->type; + } + + /* for rsn pairwise cipher suite */ + ptr_inc += sizeof(wpa_suite_mcast_t); + remain_len -= sizeof(wpa_suite_mcast_t); + + if (!(remain_len)) { + goto done; + } + + ucast = (const wpa_suite_ucast_t *)ptr_inc; + + if ((remain_len) < sizeof(ucast->count)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (!ucast->count.low && !ucast->count.high) { + info->parse_status = BCME_BADLEN; + goto done; + } + + info->p_count = (uint8)ltoh16_ua(&ucast->count); + + if (dev_type == DEV_STA && info->p_count != 1) { + info->parse_status = BCME_BADARG; + goto done; + } + if ((remain_len) < (info->p_count * WPA_SUITE_LEN + sizeof(ucast->count))) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (IS_WPA_CIPHER(ucast->list[0].type)) { + /* update the pairwise cipher */ + /* set cipher to invald value */ + if (dev_type == DEV_STA) { + info->sta_cipher = ucast->list[0].type; + } else { + for (i = 0; i < info->p_count; i++) { + if (IS_WPA_CIPHER(ucast->list[i].type)) { + info->p_ciphers |= BIT(ucast->list[i].type); + } else { + info->parse_status = BCME_BAD_IE_DATA; + goto done; + } + } + } + } else { + info->parse_status = BCME_BAD_IE_DATA; + goto done; + } + + /* for rsn AKM authentication */ + ptr_inc += info->p_count * WPA_SUITE_LEN + sizeof(ucast->count); + remain_len -= (info->p_count * WPA_SUITE_LEN + sizeof(ucast->count)); + + mgmt = (const wpa_suite_auth_key_mgmt_t *)ptr_inc; + + if (remain_len < sizeof(mgmt->count)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + info->akm_count = (uint8)ltoh16_ua(&mgmt->count); + + if (!info->akm_count) { + info->parse_status = BCME_BADARG; + goto done; + } + + if (dev_type == DEV_STA && info->akm_count != 1) { + info->parse_status = BCME_BADARG; + goto done; + } + + if ((remain_len) < (info->akm_count * WPA_SUITE_LEN + sizeof(mgmt->count))) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (dev_type == DEV_STA) { + info->sta_akm = mgmt->list[0].type; + } + for (i = 0; i < info->akm_count; i++) { + if (bcmwpa_is_valid_akm(mgmt->list[i].type) == BCME_OK) { + ASSERT((mgmt->list[i].type) < + (sizeof(info->akms) * NBBY)); + info->akms |= BIT(mgmt->list[i].type); + } + } + + /* RSN AKM/cipher suite related EAPOL key length update */ + info->pmk_len = bcmwpa_eapol_key_length(EAPOL_KEY_PMK, info->sta_akm, 0); + info->kck_mic_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK_MIC, info->sta_akm, 0); + info->kck_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK, info->sta_akm, 0); + info->kek_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK, info->sta_akm, 0); + info->tk_len = bcmwpa_eapol_key_length(EAPOL_KEY_TK, 0, info->sta_cipher); + info->ptk_len = info->kck_mic_len + info->kek_len + info->tk_len; +#if defined(WL_FILS) && defined(WLFBT) + info->kck2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK2, info->sta_akm, 0); + info->kek2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK2, info->sta_akm, 0); +#endif /* WL_FILS && WLFBT */ + + /* for rsn capabilities */ + ptr_inc += info->akm_count * WPA_SUITE_LEN + sizeof(mgmt->count); + remain_len -= info->akm_count * WPA_SUITE_LEN + sizeof(mgmt->count); + + /* as a STA, at this point, we can compute the key descriptor version */ + if (dev_type == DEV_STA) { + info->key_desc = wlc_calc_rsn_desc_version(info); + } + + if (!(remain_len)) { + goto done; + } + if (remain_len < RSN_CAP_LEN) { + info->parse_status = BCME_BADLEN; + goto done; + } + + if (ie->id == DOT11_MNG_RSN_ID) { + info->caps = ltoh16_ua(ptr_inc); + } + + /* for WFA If MFP required, check that we are using a SHA256 AKM + * or higher and nothing else. + * In case MFP Required and MFP Capable do not enforce check of AKM. + */ + if ((info->caps & RSN_CAP_MFPR) && !(info->akms & (1u << RSN_AKM_PSK))) { + if ((info->akms & (AKM_SHA256_MASK | AKM_SHA384_MASK)) == 0 || + (info->akms & ~(AKM_SHA256_MASK | AKM_SHA384_MASK))) { + info->parse_status = BCME_EPERM; + goto done; + } + } + + /* check if AKMs require MFP capable to be set */ + if ((info->akms & RSN_MFPC_AKM_MASK) && !(info->caps & RSN_CAP_MFPC)) { + info->parse_status = BCME_EPERM; + goto done; + } + + /* for rsn PMKID */ + ptr_inc += RSN_CAP_LEN; + remain_len -= RSN_CAP_LEN; + + if (!(remain_len)) { + goto done; + } + + pmkid_list = (const wpa_pmkid_list_t*)ptr_inc; + + if ((remain_len) < sizeof(pmkid_list->count)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + info->pmkid_count = (uint8)ltoh16_ua(&pmkid_list->count); + ptr_inc += sizeof(pmkid_list->count); + remain_len -= sizeof(pmkid_list->count); + + if (info->pmkid_count) { + if (remain_len < (uint32)(info->pmkid_count * WPA2_PMKID_LEN)) { + info->parse_status = BCME_BADLEN; + goto done; + } + info->pmkids_offset = ie->len + TLV_HDR_LEN - remain_len; + /* for rsn group management cipher suite */ + ptr_inc += info->pmkid_count * WPA2_PMKID_LEN; + remain_len -= info->pmkid_count * WPA2_PMKID_LEN; + } + + if (!(remain_len)) { + goto done; + } + /* + * from WPA2_Security_Improvements_Test_Plan_v1.0 + * 4.2.4 APUT RSNE bounds verification using WPA2-PSK + * May content RSNE extensibile element ay this point + */ + if (remain_len < sizeof(wpa_suite_mcast_t)) { + info->parse_status = BCME_BADLEN; + goto done; + } + + mcast = (const wpa_suite_mcast_t *)ptr_inc; + if (IS_VALID_BIP_CIPHER((rsn_cipher_t)mcast->type)) { + info->g_mgmt_cipher = (rsn_cipher_t)mcast->type; + } + +done: + return info->parse_status; +} + +/* Determine if the IE is of WPA or RSN type. Decode + * up to version field. Modify the remaining len parameter to + * indicate where the next field is. + * Store and return error status. + */ + +int +bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, uint32 *remaining) +{ + const uint8 * ptr_inc = (const uint8 *)ie->data; + uint32 remain_len = ie->len; + uint8 version, version_len; + + if (ie->id == DOT11_MNG_WPA_ID) { + /* min len check */ + if (remain_len < WPA_IE_FIXED_LEN) { + info->parse_status = BCME_BADLEN; + goto done; + } + /* WPA IE */ + if (memcmp(WPA_OUI, ie->data, WPA_OUI_LEN)) { + /* bad OUI */ + info->parse_status = BCME_BADARG; + goto done; + } + ptr_inc += WPA_OUI_LEN; + if (*ptr_inc != WPA_OUI_TYPE) { + /* wrong type */ + info->parse_status = BCME_BADARG; + goto done; + } + ptr_inc ++; + remain_len -= WPA_OUI_LEN + 1u; + info->auth_ie_type |= WPA_AUTH_IE; + version_len = WPA_VERSION_LEN; + } + else if (ie->id == DOT11_MNG_RSN_ID) { + if (remain_len < WPA2_VERSION_LEN) { + info->parse_status = BCME_BADLEN; + goto done; + } + /* RSN IE */ + info->auth_ie_type |= RSN_AUTH_IE; + version_len = WPA2_VERSION_LEN; + } else { + /* TODO : add support for CCX, WAPI ? */ + info->parse_status = BCME_UNSUPPORTED; + goto done; + } + + /* mask down to uint8 for Windows build */ + version = 0xff & ltoh16_ua(ptr_inc); + if (version > MAX_RSNE_SUPPORTED_VERSION) { + info->parse_status = BCME_UNSUPPORTED; + goto done; + } + + info->version = (uint8)version; + *remaining = remain_len - version_len; +done: + return info->parse_status; +} + +#endif /* RSN_IE_INFO_STRUCT_RELOCATED */ + +/* return the key descriptor version based on the AKM suite + * applicable only for STA with RSN + */ +static uint16 +wlc_calc_rsn_desc_version(const rsn_ie_info_t *rsn_info) +{ + uint16 key_desc_ver = WPA_KEY_DESC_V0; + uint8 akm; + + ASSERT(rsn_info != NULL); + ASSERT(rsn_info->dev_type == DEV_STA); + akm = rsn_info->sta_akm; + + /* Refer Draft 802.11REVmd_D1.0.pdf Section 12.7.2 */ + if ((akm == RSN_AKM_UNSPECIFIED) || + (akm == RSN_AKM_PSK)) { + if ((rsn_info->sta_cipher == WPA_CIPHER_TKIP) || + (rsn_info->sta_cipher == WPA_CIPHER_NONE)) { + key_desc_ver = WPA_KEY_DESC_V1; + } else if ((rsn_info->sta_cipher != WPA_CIPHER_TKIP) || + (rsn_info->g_cipher != WPA_CIPHER_TKIP)) { + key_desc_ver = WPA_KEY_DESC_V2; + } + } else if ((akm == RSN_AKM_FBT_1X) || + (akm == RSN_AKM_FBT_PSK) || + (akm == RSN_AKM_SHA256_1X) || + (akm == RSN_AKM_SHA256_PSK)) { + key_desc_ver = WPA_KEY_DESC_V3; + } + return key_desc_ver; +} + +/* get EAPOL key length based on RSN IE AKM/Cipher(unicast) suite + * key: EAPOL key type + * akm: RSN AKM suite selector + * cipher: RSN unicast cipher suite selector + * return: key length found in matching key_length_entry table + */ +uint8 +bcmwpa_eapol_key_length(eapol_key_type_t key, rsn_akm_t akm, rsn_cipher_t cipher) +{ + uint i; + uint8 key_length = 0; + uint8 suite; + const key_length_entry_t *key_entry = NULL; + + if (key == EAPOL_KEY_TK) { + suite = cipher; + } else { + suite = akm; + } + for (i = 0; i < ARRAYSIZE(eapol_key_lookup_tbl); i++) { + if (eapol_key_lookup_tbl[i].key == key) { + key_entry = eapol_key_lookup_tbl[i].key_entry; + break; + } + } + + if (key_entry) { + i = 0; + do { + if (key_entry[i].suite == suite || key_entry[i].suite == 0) { + key_length = key_entry[i].len; + break; + } + i++; + } while (i > 0); + } + + return key_length; +} + +/* check if RSM AKM suite is valid */ +static int bcmwpa_is_valid_akm(const rsn_akm_t akm) +{ + uint i = 0; + for (i = 0; i < ARRAYSIZE(rsn_akm_lookup_tbl); i++) { + if (akm == rsn_akm_lookup_tbl[i].rsn_akm) { + return BCME_OK; + } + } + return BCME_ERROR; +} + +/* checking cipher suite selector restriction based on AKM */ +int +bcmwpa_rsn_akm_cipher_match(rsn_ie_info_t *rsn_info) +{ + uint i; + const rsn_akm_cipher_match_entry_t *p_entry = NULL; + + for (i = 0; i < ARRAYSIZE(rsn_akm_cipher_match_table); i++) { + /* akm match */ + if (rsn_info->sta_akm == rsn_akm_cipher_match_table[i].akm_type) { + p_entry = &rsn_akm_cipher_match_table[i]; + break; + } + } + + if (p_entry) { + /* unicast cipher match */ + if (!(rsn_info->p_ciphers & p_entry->u_cast)) { + return BCME_UNSUPPORTED; + } + /* multicast cipher match */ + if (!(BCM_BIT(rsn_info->g_cipher) & p_entry->m_cast)) { + return BCME_UNSUPPORTED; + } + /* group management cipher match */ + if (!(BCM_BIT(rsn_info->g_mgmt_cipher) & p_entry->g_mgmt)) { + return BCME_UNSUPPORTED; + } + } + return BCME_OK; +} + +#if defined(BCMSUP_PSK) || defined(BCMSUPPL) +uint8 bcmwpa_find_group_mgmt_algo(rsn_cipher_t g_mgmt_cipher) +{ + uint8 i; + uint8 algo = CRYPTO_ALGO_BIP; + + for (i = 0; i < ARRAYSIZE(group_mgmt_cipher_algo); i++) { + if ((group_mgmt_cipher_algo[i].g_mgmt_cipher == g_mgmt_cipher)) { + algo = group_mgmt_cipher_algo[i].bip_algo; + break; + } + } + + return algo; +} +#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) */ + +#if defined(WL_BAND6G) +bool +bcmwpa_is_invalid_6g_akm(const rsn_akm_mask_t akms_bmp) +{ + if (akms_bmp & rsn_akm_6g_inval_mask) { + return TRUE; + } + return FALSE; +} + +bool +bcmwpa_is_invalid_6g_cipher(const rsn_ciphers_t ciphers_bmp) +{ + if (ciphers_bmp & cipher_6g_inval_mask) { + return TRUE; + } + return FALSE; +} +#endif /* WL_BAND6G */ + +/* + * bcmwpa_get_algo_key_len returns the key_length for the algorithm. + * API : bcm_get_algorithm key length + * input: algo: Get the crypto algorithm. + * km: Keymgmt information. + * output: returns the key length and error status. + * BCME_OK is valid else BCME_UNSUPPORTED if not supported + */ +int +bcmwpa_get_algo_key_len(uint8 algo, uint16 *key_len) +{ + int err = BCME_OK; + + if (key_len == NULL) { + return BCME_BADARG; + } + + switch (algo) { + case CRYPTO_ALGO_WEP1: + *key_len = WEP1_KEY_SIZE; + break; + + case CRYPTO_ALGO_TKIP: + *key_len = TKIP_KEY_SIZE; + break; + + case CRYPTO_ALGO_WEP128: + *key_len = WEP128_KEY_SIZE; + break; + + case CRYPTO_ALGO_AES_CCM: /* fall through */ + case CRYPTO_ALGO_AES_GCM: /* fall through */ + case CRYPTO_ALGO_AES_OCB_MSDU : /* fall through */ + case CRYPTO_ALGO_AES_OCB_MPDU: + *key_len = AES_KEY_SIZE; + break; + +#ifdef BCMWAPI_WPI + /* TODO: Need to double check */ + case CRYPTO_ALGO_SMS4: + *key_len = SMS4_KEY_LEN + SMS4_WPI_CBC_MAC_LEN; + break; +#endif /* BCMWAPI_WPI */ + + case CRYPTO_ALGO_BIP: /* fall through */ + case CRYPTO_ALGO_BIP_GMAC: + *key_len = BIP_KEY_SIZE; + break; + + case CRYPTO_ALGO_AES_CCM256: /* fall through */ + case CRYPTO_ALGO_AES_GCM256: /* fall through */ + case CRYPTO_ALGO_BIP_CMAC256: /* fall through */ + case CRYPTO_ALGO_BIP_GMAC256: + *key_len = AES256_KEY_SIZE; + break; + + case CRYPTO_ALGO_OFF: + *key_len = 0; + break; + +#if !defined(BCMCCX) && !defined(BCMEXTCCX) + case CRYPTO_ALGO_NALG: /* fall through */ +#else + case CRYPTO_ALGO_CKIP: /* fall through */ + case CRYPTO_ALGO_CKIP_MMH: /* fall through */ + case CRYPTO_ALGO_WEP_MMH: /* fall through */ + case CRYPTO_ALGO_PMK: /* fall through default */ +#endif /* !defined(BCMCCX) && !defined(BCMEXTCCX) */ + default: + *key_len = 0; + err = BCME_UNSUPPORTED; + break; + } + return err; +} diff --git a/bcmdhd.101.10.361.x/bcmxtlv.c b/bcmdhd.101.10.361.x/bcmxtlv.c new file mode 100755 index 0000000..ddc6351 --- /dev/null +++ b/bcmdhd.101.10.361.x/bcmxtlv.c @@ -0,0 +1,647 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#else /* !BCMDRIVER */ +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* !BCMDRIVER */ + +#include +#include +#include + +int +BCMPOSTTRAPFN(bcm_xtlv_hdr_size)(bcm_xtlv_opts_t opts) +{ + int len = (int)OFFSETOF(bcm_xtlv_t, data); /* nominal */ + if (opts & BCM_XTLV_OPTION_LENU8) --len; + if (opts & BCM_XTLV_OPTION_IDU8) --len; + + return len; +} + +bool +bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts) +{ + return elt != NULL && + buf_len >= bcm_xtlv_hdr_size(opts) && + buf_len >= bcm_xtlv_size(elt, opts); +} + +int +BCMPOSTTRAPFN(bcm_xtlv_size_for_data)(int dlen, bcm_xtlv_opts_t opts) +{ + int hsz; + + hsz = bcm_xtlv_hdr_size(opts); + return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + hsz, 4) + : (dlen + hsz)); +} + +int +bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int size; /* size including header, data, and any pad */ + int len; /* length wthout padding */ + + len = BCM_XTLV_LEN_EX(elt, opts); + size = bcm_xtlv_size_for_data(len, opts); + return size; +} + +int +bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + const uint8 *lenp; + int len; + + lenp = (const uint8 *)elt + OFFSETOF(bcm_xtlv_t, len); /* nominal */ + if (opts & BCM_XTLV_OPTION_IDU8) { + --lenp; + } + + if (opts & BCM_XTLV_OPTION_LENU8) { + len = *lenp; + } else if (opts & BCM_XTLV_OPTION_LENBE) { + len = (uint32)hton16(elt->len); + } else { + len = ltoh16_ua(lenp); + } + + return len; +} + +int +bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int id = 0; + if (opts & BCM_XTLV_OPTION_IDU8) { + id = *(const uint8 *)elt; + } else if (opts & BCM_XTLV_OPTION_IDBE) { + id = (uint32)hton16(elt->id); + } else { + id = ltoh16_ua((const uint8 *)elt); + } + + return id; +} + +bcm_xtlv_t * +bcm_next_xtlv(const bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts) +{ + uint sz; + + COV_TAINTED_DATA_SINK(buflen); + COV_NEG_SINK(buflen); + + /* validate current elt */ + if (!bcm_valid_xtlv(elt, *buflen, opts)) + return NULL; + + /* advance to next elt */ + sz = BCM_XTLV_SIZE_EX(elt, opts); + elt = (const bcm_xtlv_t*)((const uint8 *)elt + sz); + +#if defined(__COVERITY__) + /* The 'sz' value is tainted in Coverity because it is read from the tainted data pointed + * to by 'elt'. However, bcm_valid_xtlv() verifies that the elt pointer is a valid element, + * so its size, sz = BCM_XTLV_SIZE_EX(elt, opts), is in the bounds of the buffer. + * Clearing the tainted attribute of 'sz' for Coverity. + */ + __coverity_tainted_data_sanitize__(sz); + if (sz > *buflen) { + return NULL; + } +#endif /* __COVERITY__ */ + + *buflen -= sz; + + /* validate next elt */ + if (!bcm_valid_xtlv(elt, *buflen, opts)) + return NULL; + + COV_TAINTED_DATA_ARG(elt); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() + return (bcm_xtlv_t *)(elt); + GCC_DIAGNOSTIC_POP() +} + +int +bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts) +{ + if (!tlv_buf || !buf || !len) + return BCME_BADARG; + + tlv_buf->opts = opts; + tlv_buf->size = len; + tlv_buf->head = buf; + tlv_buf->buf = buf; + return BCME_OK; +} + +uint16 +bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf) +{ + uint16 len; + + if (tbuf) + len = (uint16)(tbuf->buf - tbuf->head); + else + len = 0; + + return len; +} + +uint16 +bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf) +{ + uint16 rlen; + if (tbuf) + rlen = tbuf->size - bcm_xtlv_buf_len(tbuf); + else + rlen = 0; + + return rlen; +} + +uint8 * +bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf) +{ + return tbuf ? tbuf->buf : NULL; +} + +uint8 * +bcm_xtlv_head(bcm_xtlvbuf_t *tbuf) +{ + return tbuf ? tbuf->head : NULL; +} + +void +BCMPOSTTRAPFN(bcm_xtlv_pack_xtlv)(bcm_xtlv_t *xtlv, uint16 type, uint16 len, const uint8 *data, + bcm_xtlv_opts_t opts) +{ + uint8 *data_buf; + bcm_xtlv_opts_t mask = BCM_XTLV_OPTION_IDU8 | BCM_XTLV_OPTION_LENU8; + + if (!(opts & mask)) { /* default */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + sizeof(xtlv->id); + htol16_ua_store(type, idp); + htol16_ua_store(len, lenp); + data_buf = lenp + sizeof(uint16); + } else if ((opts & mask) == mask) { /* u8 id and u8 len */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + 1; + *idp = (uint8)type; + *lenp = (uint8)len; + data_buf = lenp + sizeof(uint8); + } else if (opts & BCM_XTLV_OPTION_IDU8) { /* u8 id, u16 len */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + 1; + *idp = (uint8)type; + htol16_ua_store(len, lenp); + data_buf = lenp + sizeof(uint16); + } else if (opts & BCM_XTLV_OPTION_LENU8) { /* u16 id, u8 len */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + sizeof(uint16); + htol16_ua_store(type, idp); + *lenp = (uint8)len; + data_buf = lenp + sizeof(uint8); + } else { + ASSERT(!"Unexpected xtlv option"); + return; + } + + if (opts & BCM_XTLV_OPTION_LENU8) { + ASSERT(len <= 0x00ff); + len &= 0xff; + } + + if (data != NULL) { + memcpy(data_buf, data, len); + } +} + +/* xtlv header is always packed in LE order */ +void +bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len, + const uint8 **data, bcm_xtlv_opts_t opts) +{ + if (type) + *type = (uint16)bcm_xtlv_id(xtlv, opts); + if (len) + *len = (uint16)bcm_xtlv_len(xtlv, opts); + if (data) + *data = (const uint8 *)xtlv + BCM_XTLV_HDR_SIZE_EX(opts); +} + +int +bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + + size = bcm_xtlv_size_for_data(n, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + bcm_xtlv_pack_xtlv(xtlv, type, (uint16)n, data, tbuf->opts); + tbuf->buf += size; /* note: data may be NULL, reserves space */ + return BCME_OK; +} + +static int +bcm_xtlv_put_int(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n, int int_sz) +{ + bcm_xtlv_t *xtlv; + int xtlv_len; + uint8 *xtlv_data; + int err = BCME_OK; + + if (tbuf == NULL) { + err = BCME_BADARG; + goto done; + } + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + + /* put type and length in xtlv and reserve data space */ + xtlv_len = n * int_sz; + err = bcm_xtlv_put_data(tbuf, type, NULL, xtlv_len); + if (err != BCME_OK) + goto done; + + xtlv_data = (uint8 *)xtlv + bcm_xtlv_hdr_size(tbuf->opts); + + /* write data w/ little-endianness into buffer - single loop, aligned access */ + for (; n != 0; --n, xtlv_data += int_sz, data += int_sz) { + switch (int_sz) { + case sizeof(uint8): + break; + case sizeof(uint16): + { + uint16 v = load16_ua(data); + htol16_ua_store(v, xtlv_data); + break; + } + case sizeof(uint32): + { + uint32 v = load32_ua(data); + htol32_ua_store(v, xtlv_data); + break; + } + case sizeof(uint64): + { + uint64 v = load64_ua(data); + htol64_ua_store(v, xtlv_data); + break; + } + default: + err = BCME_UNSUPPORTED; + goto done; + } + } + +done: + return err; +} + +int +bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n) +{ + return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint16)); +} + +int +bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n) +{ + return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint32)); +} + +int +bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n) +{ + return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint64)); +} + +/* + * upacks xtlv record from buf checks the type + * copies data to callers buffer + * advances tlv pointer to next record + * caller's resposible for dst space check + */ +int +bcm_unpack_xtlv_entry(const uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, + uint8 *dst_data, bcm_xtlv_opts_t opts) +{ + const bcm_xtlv_t *ptlv = (const bcm_xtlv_t *)*tlv_buf; + uint16 len; + uint16 type; + const uint8 *data; + + ASSERT(ptlv); + + bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts); + if (len) { + if ((type != xpct_type) || (len > xpct_len)) + return BCME_BADARG; + if (dst_data && data) + memcpy(dst_data, data, len); /* copy data to dst */ + } + + *tlv_buf += BCM_XTLV_SIZE_EX(ptlv, opts); + return BCME_OK; +} + +/* + * packs user data into tlv record and advances tlv pointer to next xtlv slot + * buflen is used for tlv_buf space check + */ +int +bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, + const uint8 *src_data, bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + int size; + + ASSERT(ptlv); + + size = bcm_xtlv_size_for_data(len, opts); + + /* copy data from tlv buffer to dst provided by user */ + if (size > *buflen) { + return BCME_BADLEN; + } + + bcm_xtlv_pack_xtlv(ptlv, type, len, src_data, opts); + + /* advance callers pointer to tlv buff */ + *tlv_buf = (uint8*)(*tlv_buf) + size; + /* decrement the len */ + *buflen -= (uint16)size; + return BCME_OK; +} + +/* + * unpack all xtlv records from the issue a callback + * to set function one call per found tlv record + */ +int +bcm_unpack_xtlv_buf(void *ctx, const uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_xtlv_unpack_cbfn_t *cbfn) +{ + uint16 len; + uint16 type; + int res = BCME_OK; + int size; + const bcm_xtlv_t *ptlv; + int sbuflen = buflen; + const uint8 *data; + int hdr_size; + + ASSERT(!buflen || tlv_buf); + ASSERT(!buflen || cbfn); + + hdr_size = BCM_XTLV_HDR_SIZE_EX(opts); + while (sbuflen >= hdr_size) { + ptlv = (const bcm_xtlv_t *)tlv_buf; + + bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts); + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + + /* check for buffer overrun */ + if (sbuflen < 0) { + break; + } + + if ((res = cbfn(ctx, data, type, len)) != BCME_OK) { + break; + } + tlv_buf += size; + } + return res; +} + +int +bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next, + int *outlen) +{ + int res = BCME_OK; + uint16 tlv_id; + uint16 tlv_len; + uint8 *startp; + uint8 *endp; + uint8 *buf; + bool more; + int size; + int hdr_size; + + ASSERT(get_next && pack_next); + + buf = tlv_buf; + startp = buf; + endp = (uint8 *)buf + buflen; + more = TRUE; + hdr_size = BCM_XTLV_HDR_SIZE_EX(opts); + + while (more && (buf < endp)) { + more = get_next(ctx, &tlv_id, &tlv_len); + size = bcm_xtlv_size_for_data(tlv_len, opts); + if ((buf + size) > endp) { + res = BCME_BUFTOOSHORT; + goto done; + } + + bcm_xtlv_pack_xtlv((bcm_xtlv_t *)buf, tlv_id, tlv_len, NULL, opts); + pack_next(ctx, tlv_id, tlv_len, buf + hdr_size); + buf += size; + } + + if (more) + res = BCME_BUFTOOSHORT; + +done: + if (outlen) { + *outlen = (int)(buf - startp); + } + return res; +} + +/* + * pack xtlv buffer from memory according to xtlv_desc_t + */ +int +bcm_pack_xtlv_buf_from_mem(uint8 **tlv_buf, uint16 *buflen, const xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + uint8 *ptlv = *tlv_buf; + + while (items->type != 0) { + if (items->len && items->ptr) { + res = bcm_pack_xtlv_entry(&ptlv, buflen, items->type, + items->len, items->ptr, opts); + if (res != BCME_OK) + break; + } + items++; + } + + *tlv_buf = ptlv; /* update the external pointer */ + return res; +} + +/* + * unpack xtlv buffer to memory according to xtlv_desc_t + * + */ +int +bcm_unpack_xtlv_buf_to_mem(const uint8 *tlv_buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + const bcm_xtlv_t *elt; + + elt = bcm_valid_xtlv((const bcm_xtlv_t *)tlv_buf, *buflen, opts) ? + (const bcm_xtlv_t *)tlv_buf : NULL; + if (!elt || !items) { + res = BCME_BADARG; + return res; + } + + for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) { + /* find matches in desc_t items */ + xtlv_desc_t *dst_desc = items; + uint16 len, type; + const uint8 *data; + + bcm_xtlv_unpack_xtlv(elt, &type, &len, &data, opts); + while (dst_desc->type != 0) { + if (type == dst_desc->type) { + if (len != dst_desc->len) { + res = BCME_BADLEN; + } else { + memcpy(dst_desc->ptr, data, len); + } + break; + } + dst_desc++; + } + } + + if (res == BCME_OK && *buflen != 0) /* this does not look right */ + res = BCME_BUFTOOSHORT; + + return res; +} + +/* + * return data pointer of a given ID from xtlv buffer. + * If the specified xTLV ID is found, on return *datalen will contain + * the the data length of the xTLV ID. + */ +const uint8* +bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen, bcm_xtlv_opts_t opts) +{ + const uint8 *retptr = NULL; + uint16 type, len; + int size; + const bcm_xtlv_t *ptlv; + int sbuflen = buflen; + const uint8 *data; + int hdr_size; + + COV_TAINTED_DATA_SINK(buflen); + COV_NEG_SINK(buflen); + + hdr_size = BCM_XTLV_HDR_SIZE_EX(opts); + + /* Init the datalength */ + if (datalen) { + *datalen = 0; + } + while (sbuflen >= hdr_size) { + ptlv = (const bcm_xtlv_t *)tlv_buf; + bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts); + + size = bcm_xtlv_size_for_data(len, opts); + sbuflen -= size; + if (sbuflen < 0) /* buffer overrun? */ + break; + + if (id == type) { + retptr = data; + if (datalen) + *datalen = len; + break; + } + + tlv_buf += size; + } + + COV_TAINTED_DATA_ARG(retptr); + + return retptr; +} + +bcm_xtlv_t* +bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst, + int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *dst_next = NULL; + src = (src && bcm_valid_xtlv(src, src_buf_len, opts)) ? src : NULL; + if (src && dst) { + uint16 type; + uint16 len; + const uint8 *data; + int size; + bcm_xtlv_unpack_xtlv(src, &type, &len, &data, opts); + size = bcm_xtlv_size_for_data(len, opts); + if (size <= dst_buf_len) { + bcm_xtlv_pack_xtlv(dst, type, len, data, opts); + dst_next = (bcm_xtlv_t *)((uint8 *)dst + size); + } + } + + return dst_next; +} diff --git a/bcmdhd.101.10.361.x/dbus.c b/bcmdhd.101.10.361.x/dbus.c new file mode 100755 index 0000000..f86a864 --- /dev/null +++ b/bcmdhd.101.10.361.x/dbus.c @@ -0,0 +1,2928 @@ +/** @file dbus.c + * + * Hides details of USB / SDIO / SPI interfaces and OS details. It is intended to shield details and + * provide the caller with one common bus interface for all dongle devices. In practice, it is only + * used for USB interfaces. DBUS is not a protocol, but an abstraction layer. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus.c 553311 2015-04-29 10:23:08Z $ + */ + + +#include "osl.h" +#include "dbus.h" +#include +#include +#include +#include +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ +#include +#endif +#include + +#if defined(BCM_REQUEST_FW) +#include +#include +#include +#include +#include +#include +#include +#endif + + + +#if defined(BCM_REQUEST_FW) +#ifndef VARS_MAX +#define VARS_MAX 8192 +#endif +#endif + +#ifdef DBUS_USB_LOOPBACK +extern bool is_loopback_pkt(void *buf); +extern int matches_loopback_pkt(void *buf); +#endif + +/** General info for all BUS types */ +typedef struct dbus_irbq { + dbus_irb_t *head; + dbus_irb_t *tail; + int cnt; +} dbus_irbq_t; + +/** + * This private structure dhd_bus_t is also declared in dbus_usb_linux.c. + * All the fields must be consistent in both declarations. + */ +typedef struct dhd_bus { + dbus_pub_t pub; /* MUST BE FIRST */ + dhd_pub_t *dhd; + + void *cbarg; + dbus_callbacks_t *cbs; /* callbacks to higher level, e.g. dhd_linux.c */ + void *bus_info; + dbus_intf_t *drvintf; /* callbacks to lower level, e.g. dbus_usb.c or dbus_usb_linux.c */ + uint8 *fw; + int fwlen; + uint32 errmask; + int rx_low_watermark; /* avoid rx overflow by filling rx with free IRBs */ + int tx_low_watermark; + bool txoff; + bool txoverride; /* flow control related */ + bool rxoff; + bool tx_timer_ticking; + + + dbus_irbq_t *rx_q; + dbus_irbq_t *tx_q; + + uint8 *nvram; + int nvram_len; + uint8 *image; /* buffer for combine fw and nvram */ + int image_len; + uint8 *orig_fw; + int origfw_len; + int decomp_memsize; + dbus_extdl_t extdl; + int nvram_nontxt; +#if defined(BCM_REQUEST_FW) + void *firmware; + void *nvfile; +#endif + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ +} dhd_bus_t; + +struct exec_parms { + union { + /* Can consolidate same params, if need be, but this shows + * group of parameters per function + */ + struct { + dbus_irbq_t *q; + dbus_irb_t *b; + } qenq; + + struct { + dbus_irbq_t *q; + } qdeq; + }; +}; + +#define EXEC_RXLOCK(info, fn, a) \ + info->drvintf->exec_rxlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a)) + +#define EXEC_TXLOCK(info, fn, a) \ + info->drvintf->exec_txlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a)) + +/* + * Callbacks common for all BUS + */ +static void dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb); +static void dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status); +static void dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status); +static void dbus_if_errhandler(void *handle, int err); +static void dbus_if_ctl_complete(void *handle, int type, int status); +static void dbus_if_state_change(void *handle, int state); +static void *dbus_if_pktget(void *handle, uint len, bool send); +static void dbus_if_pktfree(void *handle, void *p, bool send); +static struct dbus_irb *dbus_if_getirb(void *cbarg, bool send); +static void dbus_if_rxerr_indicate(void *handle, bool on); + +void * dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); +void dhd_dbus_disconnect_cb(void *arg); +void dbus_detach(dhd_bus_t *pub); + +/** functions in this file that are called by lower DBUS levels, e.g. dbus_usb.c */ +static dbus_intf_callbacks_t dbus_intf_cbs = { + dbus_if_send_irb_timeout, + dbus_if_send_irb_complete, + dbus_if_recv_irb_complete, + dbus_if_errhandler, + dbus_if_ctl_complete, + dbus_if_state_change, + NULL, /* isr */ + NULL, /* dpc */ + NULL, /* watchdog */ + dbus_if_pktget, + dbus_if_pktfree, + dbus_if_getirb, + dbus_if_rxerr_indicate +}; + +/* + * Need global for probe() and disconnect() since + * attach() is not called at probe and detach() + * can be called inside disconnect() + */ +static dbus_intf_t *g_busintf = NULL; +static probe_cb_t probe_cb = NULL; +static disconnect_cb_t disconnect_cb = NULL; +static void *probe_arg = NULL; +static void *disc_arg = NULL; + +#if defined(BCM_REQUEST_FW) +int8 *nonfwnvram = NULL; /* stand-alone multi-nvram given with driver load */ +int nonfwnvramlen = 0; +#endif /* #if defined(BCM_REQUEST_FW) */ + +static void* q_enq(dbus_irbq_t *q, dbus_irb_t *b); +static void* q_enq_exec(struct exec_parms *args); +static dbus_irb_t*q_deq(dbus_irbq_t *q); +static void* q_deq_exec(struct exec_parms *args); +static int dbus_tx_timer_init(dhd_bus_t *dhd_bus); +static int dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout); +static int dbus_tx_timer_stop(dhd_bus_t *dhd_bus); +static int dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb); +static int dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb); +static int dbus_rxirbs_fill(dhd_bus_t *dhd_bus); +static int dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info); +static void dbus_disconnect(void *handle); +static void *dbus_probe(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); + +#if defined(BCM_REQUEST_FW) +extern char * dngl_firmware; +extern unsigned int dngl_fwlen; +#ifndef EXTERNAL_FW_PATH +static int dbus_get_nvram(dhd_bus_t *dhd_bus); +static int dbus_jumbo_nvram(dhd_bus_t *dhd_bus); +static int dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev); +static int dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen, +uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len); +#endif /* !EXTERNAL_FW_PATH */ +extern int dbus_zlib_decomp(dhd_bus_t *dhd_bus); +extern void *dbus_zlib_calloc(int num, int size); +extern void dbus_zlib_free(void *ptr); +#endif + +/* function */ +void +dbus_flowctrl_tx(void *dbi, bool on) +{ + dhd_bus_t *dhd_bus = dbi; + + if (dhd_bus == NULL) + return; + + DBUSTRACE(("%s on %d\n", __FUNCTION__, on)); + + if (dhd_bus->txoff == on) + return; + + dhd_bus->txoff = on; + + if (dhd_bus->cbs && dhd_bus->cbs->txflowcontrol) + dhd_bus->cbs->txflowcontrol(dhd_bus->cbarg, on); +} + +/** + * if lower level DBUS signaled a rx error, more free rx IRBs should be allocated or flow control + * should kick in to make more free rx IRBs available. + */ +static void +dbus_if_rxerr_indicate(void *handle, bool on) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + DBUSTRACE(("%s, on %d\n", __FUNCTION__, on)); + + if (dhd_bus == NULL) + return; + + if (dhd_bus->txoverride == on) + return; + + dhd_bus->txoverride = on; /* flow control */ + + if (!on) + dbus_rxirbs_fill(dhd_bus); + +} + +/** q_enq()/q_deq() are executed with protection via exec_rxlock()/exec_txlock() */ +static void* +q_enq(dbus_irbq_t *q, dbus_irb_t *b) +{ + ASSERT(q->tail != b); + ASSERT(b->next == NULL); + b->next = NULL; + if (q->tail) { + q->tail->next = b; + q->tail = b; + } else + q->head = q->tail = b; + + q->cnt++; + + return b; +} + +static void* +q_enq_exec(struct exec_parms *args) +{ + return q_enq(args->qenq.q, args->qenq.b); +} + +static dbus_irb_t* +q_deq(dbus_irbq_t *q) +{ + dbus_irb_t *b; + + b = q->head; + if (b) { + q->head = q->head->next; + b->next = NULL; + + if (q->head == NULL) + q->tail = q->head; + + q->cnt--; + } + return b; +} + +static void* +q_deq_exec(struct exec_parms *args) +{ + return q_deq(args->qdeq.q); +} + +/** + * called during attach phase. Status @ Dec 2012: this function does nothing since for all of the + * lower DBUS levels dhd_bus->drvintf->tx_timer_init is NULL. + */ +static int +dbus_tx_timer_init(dhd_bus_t *dhd_bus) +{ + if (dhd_bus && dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_init) + return dhd_bus->drvintf->tx_timer_init(dhd_bus->bus_info); + else + return DBUS_ERR; +} + +static int +dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout) +{ + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->tx_timer_ticking) + return DBUS_OK; + + if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_start) { + if (dhd_bus->drvintf->tx_timer_start(dhd_bus->bus_info, timeout) == DBUS_OK) { + dhd_bus->tx_timer_ticking = TRUE; + return DBUS_OK; + } + } + + return DBUS_ERR; +} + +static int +dbus_tx_timer_stop(dhd_bus_t *dhd_bus) +{ + if (dhd_bus == NULL) + return DBUS_ERR; + + if (!dhd_bus->tx_timer_ticking) + return DBUS_OK; + + if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_stop) { + if (dhd_bus->drvintf->tx_timer_stop(dhd_bus->bus_info) == DBUS_OK) { + dhd_bus->tx_timer_ticking = FALSE; + return DBUS_OK; + } + } + + return DBUS_ERR; +} + +/** called during attach phase. */ +static int +dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb) +{ + int i; + dbus_irb_t *irb; + + ASSERT(q); + ASSERT(dhd_bus); + + for (i = 0; i < nq; i++) { + /* MALLOC dbus_irb_tx or dbus_irb_rx, but cast to simple dbus_irb_t linkedlist */ + irb = (dbus_irb_t *) MALLOC(dhd_bus->pub.osh, size_irb); + if (irb == NULL) { + ASSERT(irb); + return DBUS_ERR; + } + bzero(irb, size_irb); + + /* q_enq() does not need to go through EXEC_xxLOCK() during init() */ + q_enq(q, irb); + } + + return DBUS_OK; +} + +/** called during detach phase or when attach failed */ +static int +dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb) +{ + dbus_irb_t *irb; + + ASSERT(q); + ASSERT(dhd_bus); + + /* q_deq() does not need to go through EXEC_xxLOCK() + * during deinit(); all callbacks are stopped by this time + */ + while ((irb = q_deq(q)) != NULL) { + MFREE(dhd_bus->pub.osh, irb, size_irb); + } + + if (q->cnt) + DBUSERR(("deinit: q->cnt=%d > 0\n", q->cnt)); + return DBUS_OK; +} + +/** multiple code paths require the rx queue to be filled with more free IRBs */ +static int +dbus_rxirbs_fill(dhd_bus_t *dhd_bus) +{ + int err = DBUS_OK; + + + dbus_irb_rx_t *rxirb; + struct exec_parms args; + + ASSERT(dhd_bus); + if (dhd_bus->pub.busstate != DBUS_STATE_UP) { + DBUSERR(("dbus_rxirbs_fill: DBUS not up \n")); + return DBUS_ERR; + } else if (!dhd_bus->drvintf || (dhd_bus->drvintf->recv_irb == NULL)) { + /* Lower edge bus interface does not support recv_irb(). + * No need to pre-submit IRBs in this case. + */ + return DBUS_ERR; + } + + /* The dongle recv callback is freerunning without lock. So multiple callbacks(and this + * refill) can run in parallel. While the rxoff condition is triggered outside, + * below while loop has to check and abort posting more to avoid RPC rxq overflow. + */ + args.qdeq.q = dhd_bus->rx_q; + while ((!dhd_bus->rxoff) && + (rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) { + err = dhd_bus->drvintf->recv_irb(dhd_bus->bus_info, rxirb); + if (err == DBUS_ERR_RXDROP || err == DBUS_ERR_RXFAIL) { + /* Add the the free rxirb back to the queue + * and wait till later + */ + bzero(rxirb, sizeof(dbus_irb_rx_t)); + args.qenq.q = dhd_bus->rx_q; + args.qenq.b = (dbus_irb_t *) rxirb; + EXEC_RXLOCK(dhd_bus, q_enq_exec, &args); + break; + } else if (err != DBUS_OK) { + int i = 0; + while (i++ < 100) { + DBUSERR(("%s :: memory leak for rxirb note?\n", __FUNCTION__)); + } + } + } + return err; +} /* dbus_rxirbs_fill */ + +/** called when the DBUS interface state changed. */ +void +dbus_flowctrl_rx(dbus_pub_t *pub, bool on) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if (dhd_bus == NULL) + return; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus->rxoff == on) + return; + + dhd_bus->rxoff = on; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + if (!on) { + /* post more irbs, resume rx if necessary */ + dbus_rxirbs_fill(dhd_bus); + if (dhd_bus && dhd_bus->drvintf->recv_resume) { + dhd_bus->drvintf->recv_resume(dhd_bus->bus_info); + } + } else { + /* ??? cancell posted irbs first */ + + if (dhd_bus && dhd_bus->drvintf->recv_stop) { + dhd_bus->drvintf->recv_stop(dhd_bus->bus_info); + } + } + } +} + +/** + * Several code paths in this file want to send a buffer to the dongle. This function handles both + * sending of a buffer or a pkt. + */ +static int +dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + dbus_irb_tx_t *txirb = NULL; + int txirb_pending; + struct exec_parms args; + + if (dhd_bus == NULL) + return DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + args.qdeq.q = dhd_bus->tx_q; + if (dhd_bus->drvintf) + txirb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args); + + if (txirb == NULL) { + DBUSERR(("Out of tx dbus_bufs\n")); + return DBUS_ERR; + } + + if (pkt != NULL) { + txirb->pkt = pkt; + txirb->buf = NULL; + txirb->len = 0; + } else if (buf != NULL) { + txirb->pkt = NULL; + txirb->buf = buf; + txirb->len = len; + } else { + ASSERT(0); /* Should not happen */ + } + txirb->info = info; + txirb->arg = NULL; + txirb->retry_count = 0; + + if (dhd_bus->drvintf && dhd_bus->drvintf->send_irb) { + /* call lower DBUS level send_irb function */ + err = dhd_bus->drvintf->send_irb(dhd_bus->bus_info, txirb); + if (err == DBUS_ERR_TXDROP) { + /* tx fail and no completion routine to clean up, reclaim irb NOW */ + DBUSERR(("%s: send_irb failed, status = %d\n", __FUNCTION__, err)); + bzero(txirb, sizeof(dbus_irb_tx_t)); + args.qenq.q = dhd_bus->tx_q; + args.qenq.b = (dbus_irb_t *) txirb; + EXEC_TXLOCK(dhd_bus, q_enq_exec, &args); + } else { + dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL); + txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt; + if (txirb_pending > (dhd_bus->tx_low_watermark * 3)) { + dbus_flowctrl_tx(dhd_bus, TRUE); + } + } + } + } else { + err = DBUS_ERR_TXFAIL; + DBUSTRACE(("%s: bus down, send_irb failed\n", __FUNCTION__)); + } + + return err; +} /* dbus_send_irb */ + +#if defined(BCM_REQUEST_FW) + +/** + * Before downloading a firmware image into the dongle, the validity of the image must be checked. + */ +static int +check_file(osl_t *osh, unsigned char *headers) +{ + struct trx_header *trx; + int actual_len = -1; + + /* Extract trx header */ + trx = (struct trx_header *)headers; + if (ltoh32(trx->magic) != TRX_MAGIC) { + printf("Error: trx bad hdr %x\n", ltoh32(trx->magic)); + return -1; + } + + headers += SIZEOF_TRX(trx); + + /* TRX V1: get firmware len */ + /* TRX V2: get firmware len and DSG/CFG lengths */ + if (ltoh32(trx->flag_version) & TRX_UNCOMP_IMAGE) { + actual_len = ltoh32(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]) + + SIZEOF_TRX(trx); +#ifdef BCMTRXV2 + if (ISTRX_V2(trx)) { + actual_len += ltoh32(trx->offsets[TRX_OFFSETS_DSG_LEN_IDX]) + + ltoh32(trx->offsets[TRX_OFFSETS_CFG_LEN_IDX]); + } +#endif + return actual_len; + } else { + printf("compressed image\n"); + } + + return -1; +} + +#ifdef EXTERNAL_FW_PATH +static int +dbus_get_fw_nvram(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path) +{ + int bcmerror = -1, i; + uint len, total_len; + void *nv_image = NULL, *fw_image = NULL; + char *nv_memblock = NULL, *fw_memblock = NULL; + char *bufp; + bool file_exists; + uint8 nvram_words_pad = 0; + uint memblock_size = 2048; + uint8 *memptr; + int actual_fwlen; + struct trx_header *hdr; + uint32 img_offset = 0; + int offset = 0; + + /* For Get nvram */ + file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + if (file_exists) { + nv_image = dhd_os_open_image1(dhd_bus->dhd, pnv_path); + if (nv_image == NULL) { + printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path); + goto err; + } + } + nv_memblock = MALLOC(dhd_bus->pub.osh, MAX_NVRAMBUF_SIZE); + if (nv_memblock == NULL) { + DBUSERR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + len = dhd_os_get_image_block(nv_memblock, MAX_NVRAMBUF_SIZE, nv_image); + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)nv_memblock; + bufp[len] = 0; + dhd_bus->nvram_len = process_nvram_vars(bufp, len); + if (dhd_bus->nvram_len % 4) + nvram_words_pad = 4 - dhd_bus->nvram_len % 4; + } else { + DBUSERR(("%s: error reading nvram file: %d\n", __FUNCTION__, len)); + bcmerror = DBUS_ERR_NVRAM; + goto err; + } + if (nv_image) { + dhd_os_close_image1(dhd_bus->dhd, nv_image); + nv_image = NULL; + } + + /* For Get first block of fw to calculate total_len */ + file_exists = ((pfw_path != NULL) && (pfw_path[0] != '\0')); + if (file_exists) { + fw_image = dhd_os_open_image1(dhd_bus->dhd, pfw_path); + if (fw_image == NULL) { + printf("%s: Open fw file failed %s\n", __FUNCTION__, pfw_path); + goto err; + } + } + memptr = fw_memblock = MALLOC(dhd_bus->pub.osh, memblock_size); + if (fw_memblock == NULL) { + DBUSERR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } + len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image); + if ((actual_fwlen = check_file(dhd_bus->pub.osh, memptr)) <= 0) { + DBUSERR(("%s: bad firmware format!\n", __FUNCTION__)); + goto err; + } + + total_len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad; +#if defined(CONFIG_DHD_USE_STATIC_BUF) + dhd_bus->image = (uint8*)DHD_OS_PREALLOC(dhd_bus->dhd, + DHD_PREALLOC_MEMDUMP_RAM, total_len); +#else + dhd_bus->image = MALLOC(dhd_bus->pub.osh, total_len); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + dhd_bus->image_len = total_len; + if (dhd_bus->image == NULL) { + DBUSERR(("%s: malloc failed! size=%d\n", __FUNCTION__, total_len)); + goto err; + } + + /* Step1: Copy trx header + firmwre */ + memptr = fw_memblock; + do { + if (len < 0) { + DBUSERR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + bcopy(memptr, dhd_bus->image+offset, len); + offset += len; + } while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image))); + /* Step2: Copy NVRAM + pad */ + hdr = (struct trx_header *)dhd_bus->image; + img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX]; + bcopy(nv_memblock, (uint8 *)(dhd_bus->image + img_offset), + dhd_bus->nvram_len); + img_offset += dhd_bus->nvram_len; + if (nvram_words_pad) { + bzero(&dhd_bus->image[img_offset], nvram_words_pad); + img_offset += nvram_words_pad; + } +#ifdef BCMTRXV2 + /* Step3: Copy DSG/CFG for V2 */ + if (ISTRX_V2(hdr) && + (hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] || + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) { + DBUSERR(("%s: fix me\n", __FUNCTION__)); + } +#endif /* BCMTRXV2 */ + /* Step4: update TRX header for nvram size */ + hdr = (struct trx_header *)dhd_bus->image; + hdr->len = htol32(total_len); + /* Pass the actual fw len */ + hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] = + htol32(dhd_bus->nvram_len + nvram_words_pad); + /* Calculate CRC over header */ + hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version, + SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version), + CRC32_INIT_VALUE); + + /* Calculate CRC over data */ + for (i = SIZEOF_TRX(hdr); i < total_len; ++i) + hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32); + hdr->crc32 = htol32(hdr->crc32); + + bcmerror = DBUS_OK; + +err: + if (fw_memblock) + MFREE(dhd_bus->pub.osh, fw_memblock, MAX_NVRAMBUF_SIZE); + if (fw_image) + dhd_os_close_image1(dhd_bus->dhd, fw_image); + if (nv_memblock) + MFREE(dhd_bus->pub.osh, nv_memblock, MAX_NVRAMBUF_SIZE); + if (nv_image) + dhd_os_close_image1(dhd_bus->dhd, nv_image); + + return bcmerror; +} + +/** + * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into + * the dongle + */ +static int +dbus_do_download(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path) +{ + int err = DBUS_OK; + + err = dbus_get_fw_nvram(dhd_bus, pfw_path, pnv_path); + if (err) { + DBUSERR(("dbus_do_download: fail to get nvram %d\n", err)); + return err; + } + + if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) { + err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info, + dhd_bus->image, dhd_bus->image_len); + if (err == DBUS_OK) { + err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info); + } + } else + err = DBUS_ERR; + + if (dhd_bus->image) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) + DHD_OS_PREFREE(dhd_bus->dhd, dhd_bus->image, dhd_bus->image_len); +#else + MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + dhd_bus->image = NULL; + dhd_bus->image_len = 0; + } + + return err; +} /* dbus_do_download */ +#else + +/** + * It is easy for the user to pass one jumbo nvram file to the driver than a set of smaller files. + * The 'jumbo nvram' file format is essentially a set of nvram files. Before commencing firmware + * download, the dongle needs to be probed so that the correct nvram contents within the jumbo nvram + * file is selected. + */ +static int +dbus_jumbo_nvram(dhd_bus_t *dhd_bus) +{ + int8 *nvram = NULL; + int nvram_len = 0; + int ret = DBUS_OK; + uint16 boardrev = 0xFFFF; + uint16 boardtype = 0xFFFF; + + /* read the otp for boardrev & boardtype + * if boardtype/rev are present in otp + * select nvram data for that boardtype/rev + */ + dbus_otp(dhd_bus, &boardtype, &boardrev); + + ret = dbus_select_nvram(dhd_bus, dhd_bus->extdl.vars, dhd_bus->extdl.varslen, + boardtype, boardrev, &nvram, &nvram_len); + + if (ret == DBUS_JUMBO_BAD_FORMAT) + return DBUS_ERR_NVRAM; + else if (ret == DBUS_JUMBO_NOMATCH && + (boardtype != 0xFFFF || boardrev != 0xFFFF)) { + DBUSERR(("No matching NVRAM for boardtype 0x%02x boardrev 0x%02x\n", + boardtype, boardrev)); + return DBUS_ERR_NVRAM; + } + dhd_bus->nvram = nvram; + dhd_bus->nvram_len = nvram_len; + + return DBUS_OK; +} + +/** before commencing fw download, the correct NVRAM image to download has to be picked */ +static int +dbus_get_nvram(dhd_bus_t *dhd_bus) +{ + int len, i; + struct trx_header *hdr; + int actual_fwlen; + uint32 img_offset = 0; + + dhd_bus->nvram_len = 0; + if (dhd_bus->extdl.varslen) { + if (DBUS_OK != dbus_jumbo_nvram(dhd_bus)) + return DBUS_ERR_NVRAM; + DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len)); + } +#if defined(BCM_REQUEST_FW) + else if (nonfwnvram) { + dhd_bus->nvram = nonfwnvram; + dhd_bus->nvram_len = nonfwnvramlen; + DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len)); + } +#endif + if (dhd_bus->nvram) { + uint8 nvram_words_pad = 0; + /* Validate the format/length etc of the file */ + if ((actual_fwlen = check_file(dhd_bus->pub.osh, dhd_bus->fw)) <= 0) { + DBUSERR(("%s: bad firmware format!\n", __FUNCTION__)); + return DBUS_ERR_NVRAM; + } + + if (!dhd_bus->nvram_nontxt) { + /* host supplied nvram could be in .txt format + * with all the comments etc... + */ + dhd_bus->nvram_len = process_nvram_vars(dhd_bus->nvram, + dhd_bus->nvram_len); + } + if (dhd_bus->nvram_len % 4) + nvram_words_pad = 4 - dhd_bus->nvram_len % 4; + + len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad; + dhd_bus->image = MALLOC(dhd_bus->pub.osh, len); + dhd_bus->image_len = len; + if (dhd_bus->image == NULL) { + DBUSERR(("%s: malloc failed!\n", __FUNCTION__)); + return DBUS_ERR_NVRAM; + } + hdr = (struct trx_header *)dhd_bus->fw; + /* Step1: Copy trx header + firmwre */ + img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX]; + bcopy(dhd_bus->fw, dhd_bus->image, img_offset); + /* Step2: Copy NVRAM + pad */ + bcopy(dhd_bus->nvram, (uint8 *)(dhd_bus->image + img_offset), + dhd_bus->nvram_len); + img_offset += dhd_bus->nvram_len; + if (nvram_words_pad) { + bzero(&dhd_bus->image[img_offset], + nvram_words_pad); + img_offset += nvram_words_pad; + } +#ifdef BCMTRXV2 + /* Step3: Copy DSG/CFG for V2 */ + if (ISTRX_V2(hdr) && + (hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] || + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) { + + bcopy(dhd_bus->fw + SIZEOF_TRX(hdr) + + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX] + + hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX], + dhd_bus->image + img_offset, + hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] + + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX]); + + img_offset += hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] + + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX]; + } +#endif /* BCMTRXV2 */ + /* Step4: update TRX header for nvram size */ + hdr = (struct trx_header *)dhd_bus->image; + hdr->len = htol32(len); + /* Pass the actual fw len */ + hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] = + htol32(dhd_bus->nvram_len + nvram_words_pad); + /* Calculate CRC over header */ + hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version, + SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version), + CRC32_INIT_VALUE); + + /* Calculate CRC over data */ + for (i = SIZEOF_TRX(hdr); i < len; ++i) + hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32); + hdr->crc32 = htol32(hdr->crc32); + } else { + dhd_bus->image = dhd_bus->fw; + dhd_bus->image_len = (uint32)dhd_bus->fwlen; + } + + return DBUS_OK; +} /* dbus_get_nvram */ + +/** + * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into + * the dongle + */ +static int +dbus_do_download(dhd_bus_t *dhd_bus) +{ + int err = DBUS_OK; +#ifndef BCM_REQUEST_FW + int decomp_override = 0; +#endif +#ifdef BCM_REQUEST_FW + uint16 boardrev = 0xFFFF, boardtype = 0xFFFF; + int8 *temp_nvram; + int temp_len; +#endif + +#if defined(BCM_REQUEST_FW) + dhd_bus->firmware = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid, + dhd_bus->pub.attrib.chiprev, &dhd_bus->fw, &dhd_bus->fwlen, + DBUS_FIRMWARE, 0, 0); + if (!dhd_bus->firmware) + return DBUS_ERR; +#endif + + dhd_bus->image = dhd_bus->fw; + dhd_bus->image_len = (uint32)dhd_bus->fwlen; + +#ifndef BCM_REQUEST_FW + if (UNZIP_ENAB(dhd_bus) && !decomp_override) { + err = dbus_zlib_decomp(dhd_bus); + if (err) { + DBUSERR(("dbus_attach: fw decompress fail %d\n", err)); + return err; + } + } +#endif + +#if defined(BCM_REQUEST_FW) + /* check if firmware is appended with nvram file */ + err = dbus_otp(dhd_bus, &boardtype, &boardrev); + /* check if nvram is provided as separte file */ + nonfwnvram = NULL; + nonfwnvramlen = 0; + dhd_bus->nvfile = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid, + dhd_bus->pub.attrib.chiprev, (void *)&temp_nvram, &temp_len, + DBUS_NVFILE, boardtype, boardrev); + if (dhd_bus->nvfile) { + int8 *tmp = MALLOC(dhd_bus->pub.osh, temp_len); + if (tmp) { + bcopy(temp_nvram, tmp, temp_len); + nonfwnvram = tmp; + nonfwnvramlen = temp_len; + } else { + err = DBUS_ERR; + goto fail; + } + } +#endif /* defined(BCM_REQUEST_FW) */ + + err = dbus_get_nvram(dhd_bus); + if (err) { + DBUSERR(("dbus_do_download: fail to get nvram %d\n", err)); + return err; + } + + + if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) { + err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info, + dhd_bus->image, dhd_bus->image_len); + + if (err == DBUS_OK) + err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info); + } else + err = DBUS_ERR; + + if (dhd_bus->nvram) { + MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len); + dhd_bus->image = dhd_bus->fw; + dhd_bus->image_len = (uint32)dhd_bus->fwlen; + } + +#ifndef BCM_REQUEST_FW + if (UNZIP_ENAB(dhd_bus) && (!decomp_override) && dhd_bus->orig_fw) { + MFREE(dhd_bus->pub.osh, dhd_bus->fw, dhd_bus->decomp_memsize); + dhd_bus->image = dhd_bus->fw = dhd_bus->orig_fw; + dhd_bus->image_len = dhd_bus->fwlen = dhd_bus->origfw_len; + } +#endif + +#if defined(BCM_REQUEST_FW) +fail: + if (dhd_bus->firmware) { + dbus_release_fw_nvfile(dhd_bus->firmware); + dhd_bus->firmware = NULL; + } + if (dhd_bus->nvfile) { + dbus_release_fw_nvfile(dhd_bus->nvfile); + dhd_bus->nvfile = NULL; + } + if (nonfwnvram) { + MFREE(dhd_bus->pub.osh, nonfwnvram, nonfwnvramlen); + nonfwnvram = NULL; + nonfwnvramlen = 0; + } +#endif + return err; +} /* dbus_do_download */ +#endif /* EXTERNAL_FW_PATH */ +#endif + +/** required for DBUS deregistration */ +static void +dbus_disconnect(void *handle) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (disconnect_cb) + disconnect_cb(disc_arg); +} + +/** + * This function is called when the sent irb times out without a tx response status. + * DBUS adds reliability by resending timed out IRBs DBUS_TX_RETRY_LIMIT times. + */ +static void +dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + if ((dhd_bus == NULL) || (dhd_bus->drvintf == NULL) || (txirb == NULL)) { + return; + } + + DBUSTRACE(("%s\n", __FUNCTION__)); + + return; + +} /* dbus_if_send_irb_timeout */ + +/** + * When lower DBUS level signals that a send IRB completed, either successful or not, the higher + * level (e.g. dhd_linux.c) has to be notified, and transmit flow control has to be evaluated. + */ +static void +dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + int txirb_pending; + struct exec_parms args; + void *pktinfo; + + if ((dhd_bus == NULL) || (txirb == NULL)) { + return; + } + + DBUSTRACE(("%s: status = %d\n", __FUNCTION__, status)); + + dbus_tx_timer_stop(dhd_bus); + + /* re-queue BEFORE calling send_complete which will assume that this irb + is now available. + */ + pktinfo = txirb->info; + bzero(txirb, sizeof(dbus_irb_tx_t)); + args.qenq.q = dhd_bus->tx_q; + args.qenq.b = (dbus_irb_t *) txirb; + EXEC_TXLOCK(dhd_bus, q_enq_exec, &args); + + if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) { + if ((status == DBUS_OK) || (status == DBUS_ERR_NODEVICE)) { + if (dhd_bus->cbs && dhd_bus->cbs->send_complete) + dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo, + status); + + if (status == DBUS_OK) { + txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt; + if (txirb_pending) + dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL); + if ((txirb_pending < dhd_bus->tx_low_watermark) && + dhd_bus->txoff && !dhd_bus->txoverride) { + dbus_flowctrl_tx(dhd_bus, OFF); + } + } + } else { + DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__, + pktinfo)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) + if (pktinfo) + if (dhd_bus->cbs && dhd_bus->cbs->send_complete) + dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo, + status); +#else + dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE); +#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) */ + } + } else { + DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__, + pktinfo)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) + if (pktinfo) + if (dhd_bus->cbs && dhd_bus->cbs->send_complete) + dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo, + status); +#else + dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE); +#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) defined(BCM_RPC_TOC) */ + } +} /* dbus_if_send_irb_complete */ + +/** + * When lower DBUS level signals that a receive IRB completed, either successful or not, the higher + * level (e.g. dhd_linux.c) has to be notified, and fresh free receive IRBs may have to be given + * to lower levels. + */ +static void +dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + int rxirb_pending; + struct exec_parms args; + + if ((dhd_bus == NULL) || (rxirb == NULL)) { + return; + } + DBUSTRACE(("%s\n", __FUNCTION__)); + if (dhd_bus->pub.busstate != DBUS_STATE_DOWN && + dhd_bus->pub.busstate != DBUS_STATE_SLEEP) { + if (status == DBUS_OK) { + if ((rxirb->buf != NULL) && (rxirb->actual_len > 0)) { +#ifdef DBUS_USB_LOOPBACK + if (is_loopback_pkt(rxirb->buf)) { + matches_loopback_pkt(rxirb->buf); + } else +#endif + if (dhd_bus->cbs && dhd_bus->cbs->recv_buf) { + dhd_bus->cbs->recv_buf(dhd_bus->cbarg, rxirb->buf, + rxirb->actual_len); + } + } else if (rxirb->pkt != NULL) { + if (dhd_bus->cbs && dhd_bus->cbs->recv_pkt) + dhd_bus->cbs->recv_pkt(dhd_bus->cbarg, rxirb->pkt); + } else { + ASSERT(0); /* Should not happen */ + } + + rxirb_pending = dhd_bus->pub.nrxq - dhd_bus->rx_q->cnt - 1; + if ((rxirb_pending <= dhd_bus->rx_low_watermark) && + !dhd_bus->rxoff) { + DBUSTRACE(("Low watermark so submit more %d <= %d \n", + dhd_bus->rx_low_watermark, rxirb_pending)); + dbus_rxirbs_fill(dhd_bus); + } else if (dhd_bus->rxoff) + DBUSTRACE(("rx flow controlled. not filling more. cut_rxq=%d\n", + dhd_bus->rx_q->cnt)); + } else if (status == DBUS_ERR_NODEVICE) { + DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__, status, + rxirb->buf)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + if (rxirb->buf) { + PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf); + PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE); + } +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */ + } else { + if (status != DBUS_ERR_RXZLP) + DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__, + status, rxirb->buf)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + if (rxirb->buf) { + PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf); + PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE); + } +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */ + } + } else { + DBUSTRACE(("%s: DBUS down, ignoring recv callback. buf %p\n", __FUNCTION__, + rxirb->buf)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + if (rxirb->buf) { + PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf); + PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE); + } +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */ + } + if (dhd_bus->rx_q != NULL) { + bzero(rxirb, sizeof(dbus_irb_rx_t)); + args.qenq.q = dhd_bus->rx_q; + args.qenq.b = (dbus_irb_t *) rxirb; + EXEC_RXLOCK(dhd_bus, q_enq_exec, &args); + } else + MFREE(dhd_bus->pub.osh, rxirb, sizeof(dbus_irb_tx_t)); +} /* dbus_if_recv_irb_complete */ + +/** + * Accumulate errors signaled by lower DBUS levels and signal them to higher (e.g. dhd_linux.c) + * level. + */ +static void +dbus_if_errhandler(void *handle, int err) +{ + dhd_bus_t *dhd_bus = handle; + uint32 mask = 0; + + if (dhd_bus == NULL) + return; + + switch (err) { + case DBUS_ERR_TXFAIL: + dhd_bus->pub.stats.tx_errors++; + mask |= ERR_CBMASK_TXFAIL; + break; + case DBUS_ERR_TXDROP: + dhd_bus->pub.stats.tx_dropped++; + mask |= ERR_CBMASK_TXFAIL; + break; + case DBUS_ERR_RXFAIL: + dhd_bus->pub.stats.rx_errors++; + mask |= ERR_CBMASK_RXFAIL; + break; + case DBUS_ERR_RXDROP: + dhd_bus->pub.stats.rx_dropped++; + mask |= ERR_CBMASK_RXFAIL; + break; + default: + break; + } + + if (dhd_bus->cbs && dhd_bus->cbs->errhandler && (dhd_bus->errmask & mask)) + dhd_bus->cbs->errhandler(dhd_bus->cbarg, err); +} + +/** + * When lower DBUS level signals control IRB completed, higher level (e.g. dhd_linux.c) has to be + * notified. + */ +static void +dbus_if_ctl_complete(void *handle, int type, int status) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return; + } + + if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) { + if (dhd_bus->cbs && dhd_bus->cbs->ctl_complete) + dhd_bus->cbs->ctl_complete(dhd_bus->cbarg, type, status); + } +} + +/** + * Rx related functionality (flow control, posting of free IRBs to rx queue) is dependent upon the + * bus state. When lower DBUS level signals a change in the interface state, take appropriate action + * and forward the signaling to the higher (e.g. dhd_linux.c) level. + */ +static void +dbus_if_state_change(void *handle, int state) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + int old_state; + + if (dhd_bus == NULL) + return; + + if (dhd_bus->pub.busstate == state) + return; + old_state = dhd_bus->pub.busstate; + if (state == DBUS_STATE_DISCONNECT) { + DBUSERR(("DBUS disconnected\n")); + } + + /* Ignore USB SUSPEND while not up yet */ + if (state == DBUS_STATE_SLEEP && old_state != DBUS_STATE_UP) + return; + + DBUSTRACE(("dbus state change from %d to to %d\n", old_state, state)); + + /* Don't update state if it's PnP firmware re-download */ + if (state != DBUS_STATE_PNP_FWDL) + dhd_bus->pub.busstate = state; + else + dbus_flowctrl_rx(handle, FALSE); + if (state == DBUS_STATE_SLEEP) + dbus_flowctrl_rx(handle, TRUE); + if (state == DBUS_STATE_UP) { + dbus_rxirbs_fill(dhd_bus); + dbus_flowctrl_rx(handle, FALSE); + } + + if (dhd_bus->cbs && dhd_bus->cbs->state_change) + dhd_bus->cbs->state_change(dhd_bus->cbarg, state); +} + +/** Forward request for packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */ +static void * +dbus_if_pktget(void *handle, uint len, bool send) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + void *p = NULL; + + if (dhd_bus == NULL) + return NULL; + + if (dhd_bus->cbs && dhd_bus->cbs->pktget) + p = dhd_bus->cbs->pktget(dhd_bus->cbarg, len, send); + else + ASSERT(0); + + return p; +} + +/** Forward request to free packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */ +static void +dbus_if_pktfree(void *handle, void *p, bool send) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + if (dhd_bus == NULL) + return; + + if (dhd_bus->cbs && dhd_bus->cbs->pktfree) + dhd_bus->cbs->pktfree(dhd_bus->cbarg, p, send); + else + ASSERT(0); +} + +/** Lower DBUS level requests either a send or receive IRB */ +static struct dbus_irb* +dbus_if_getirb(void *cbarg, bool send) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) cbarg; + struct exec_parms args; + struct dbus_irb *irb; + + if ((dhd_bus == NULL) || (dhd_bus->pub.busstate != DBUS_STATE_UP)) + return NULL; + + if (send == TRUE) { + args.qdeq.q = dhd_bus->tx_q; + irb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args); + } else { + args.qdeq.q = dhd_bus->rx_q; + irb = EXEC_RXLOCK(dhd_bus, q_deq_exec, &args); + } + + return irb; +} + +/** + * Called as part of DBUS bus registration. Calls back into higher level (e.g. dhd_linux.c) probe + * function. + */ +static void * +dbus_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no, + uint16 slot, uint32 hdrlen) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + if (probe_cb) { + disc_arg = probe_cb(probe_arg, desc, bustype, bus_no, slot, hdrlen); + return disc_arg; + } + + return (void *)DBUS_ERR; +} + +/** + * As part of initialization, higher level (e.g. dhd_linux.c) requests DBUS to prepare for + * action. + */ +int +dhd_bus_register(void) +{ + int err; + + DBUSTRACE(("%s: Enter\n", __FUNCTION__)); + + probe_cb = dhd_dbus_probe_cb; + disconnect_cb = dhd_dbus_disconnect_cb; + probe_arg = NULL; + + err = dbus_bus_register(0xa5c, 0x48f, dbus_probe, /* call lower DBUS level register function */ + dbus_disconnect, NULL, &g_busintf, NULL, NULL); + + /* Device not detected */ + if (err == DBUS_ERR_NODEVICE) + err = DBUS_OK; + + return err; +} + +dhd_pub_t *g_pub = NULL; +void +dhd_bus_unregister(void) +{ + int ret; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + DHD_MUTEX_LOCK(); + if (g_pub) { + g_pub->dhd_remove = TRUE; + if (!g_pub->bus) { + dhd_dbus_disconnect_cb(g_pub->bus); + } + } + probe_cb = NULL; + DHD_MUTEX_UNLOCK(); + ret = dbus_bus_deregister(); + disconnect_cb = NULL; + probe_arg = NULL; +} + +/** As part of initialization, data structures have to be allocated and initialized */ +dhd_bus_t * +dbus_attach(osl_t *osh, int rxsize, int nrxq, int ntxq, dhd_pub_t *pub, + dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh) +{ + dhd_bus_t *dhd_bus; + int err; + + if ((g_busintf == NULL) || (g_busintf->attach == NULL) || (cbs == NULL)) + return NULL; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if ((nrxq <= 0) || (ntxq <= 0)) + return NULL; + + dhd_bus = MALLOC(osh, sizeof(dhd_bus_t)); + if (dhd_bus == NULL) { + DBUSERR(("%s: malloc failed %zu\n", __FUNCTION__, sizeof(dhd_bus_t))); + return NULL; + } + + bzero(dhd_bus, sizeof(dhd_bus_t)); + + /* BUS-specific driver interface (at a lower DBUS level) */ + dhd_bus->drvintf = g_busintf; + dhd_bus->cbarg = pub; + dhd_bus->cbs = cbs; + + dhd_bus->pub.sh = sh; + dhd_bus->pub.osh = osh; + dhd_bus->pub.rxsize = rxsize; + + dhd_bus->pub.nrxq = nrxq; + dhd_bus->rx_low_watermark = nrxq / 2; /* keep enough posted rx urbs */ + dhd_bus->pub.ntxq = ntxq; + dhd_bus->tx_low_watermark = ntxq / 4; /* flow control when too many tx urbs posted */ + + dhd_bus->tx_q = MALLOC(osh, sizeof(dbus_irbq_t)); + if (dhd_bus->tx_q == NULL) + goto error; + else { + bzero(dhd_bus->tx_q, sizeof(dbus_irbq_t)); + err = dbus_irbq_init(dhd_bus, dhd_bus->tx_q, ntxq, sizeof(dbus_irb_tx_t)); + if (err != DBUS_OK) + goto error; + } + + dhd_bus->rx_q = MALLOC(osh, sizeof(dbus_irbq_t)); + if (dhd_bus->rx_q == NULL) + goto error; + else { + bzero(dhd_bus->rx_q, sizeof(dbus_irbq_t)); + err = dbus_irbq_init(dhd_bus, dhd_bus->rx_q, nrxq, sizeof(dbus_irb_rx_t)); + if (err != DBUS_OK) + goto error; + } + + + dhd_bus->bus_info = (void *)g_busintf->attach(&dhd_bus->pub, + dhd_bus, &dbus_intf_cbs); + if (dhd_bus->bus_info == NULL) + goto error; + + dbus_tx_timer_init(dhd_bus); + +#if defined(BCM_REQUEST_FW) + /* Need to copy external image for re-download */ + if (extdl && extdl->fw && (extdl->fwlen > 0)) { + dhd_bus->extdl.fw = MALLOC(osh, extdl->fwlen); + if (dhd_bus->extdl.fw) { + bcopy(extdl->fw, dhd_bus->extdl.fw, extdl->fwlen); + dhd_bus->extdl.fwlen = extdl->fwlen; + } + } + + if (extdl && extdl->vars && (extdl->varslen > 0)) { + dhd_bus->extdl.vars = MALLOC(osh, extdl->varslen); + if (dhd_bus->extdl.vars) { + bcopy(extdl->vars, dhd_bus->extdl.vars, extdl->varslen); + dhd_bus->extdl.varslen = extdl->varslen; + } + } +#endif + + return (dhd_bus_t *)dhd_bus; + +error: + DBUSERR(("%s: Failed\n", __FUNCTION__)); + dbus_detach(dhd_bus); + return NULL; +} /* dbus_attach */ + +void +dbus_detach(dhd_bus_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + osl_t *osh; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return; + + dbus_tx_timer_stop(dhd_bus); + + osh = pub->pub.osh; + + if (dhd_bus->drvintf && dhd_bus->drvintf->detach) + dhd_bus->drvintf->detach((dbus_pub_t *)dhd_bus, dhd_bus->bus_info); + + if (dhd_bus->tx_q) { + dbus_irbq_deinit(dhd_bus, dhd_bus->tx_q, sizeof(dbus_irb_tx_t)); + MFREE(osh, dhd_bus->tx_q, sizeof(dbus_irbq_t)); + dhd_bus->tx_q = NULL; + } + + if (dhd_bus->rx_q) { + dbus_irbq_deinit(dhd_bus, dhd_bus->rx_q, sizeof(dbus_irb_rx_t)); + MFREE(osh, dhd_bus->rx_q, sizeof(dbus_irbq_t)); + dhd_bus->rx_q = NULL; + } + + + if (dhd_bus->extdl.fw && (dhd_bus->extdl.fwlen > 0)) { + MFREE(osh, dhd_bus->extdl.fw, dhd_bus->extdl.fwlen); + dhd_bus->extdl.fw = NULL; + dhd_bus->extdl.fwlen = 0; + } + + if (dhd_bus->extdl.vars && (dhd_bus->extdl.varslen > 0)) { + MFREE(osh, dhd_bus->extdl.vars, dhd_bus->extdl.varslen); + dhd_bus->extdl.vars = NULL; + dhd_bus->extdl.varslen = 0; + } + + MFREE(osh, dhd_bus, sizeof(dhd_bus_t)); +} /* dbus_detach */ + +int dbus_dlneeded(dhd_bus_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int dlneeded = DBUS_ERR; + + if (!dhd_bus) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate)); + + if (dhd_bus->drvintf->dlneeded) { + dlneeded = dhd_bus->drvintf->dlneeded(dhd_bus->bus_info); + } + printf("%s: dlneeded=%d\n", __FUNCTION__, dlneeded); + + /* dlneeded > 0: need to download + * dlneeded = 0: downloaded + * dlneeded < 0: bus error*/ + return dlneeded; +} + +#if defined(BCM_REQUEST_FW) +int dbus_download_firmware(dhd_bus_t *pub, char *pfw_path, char *pnv_path) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + + if (!dhd_bus) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate)); + + dhd_bus->pub.busstate = DBUS_STATE_DL_PENDING; +#ifdef EXTERNAL_FW_PATH + err = dbus_do_download(dhd_bus, pfw_path, pnv_path); +#else + err = dbus_do_download(dhd_bus); +#endif /* EXTERNAL_FW_PATH */ + if (err == DBUS_OK) { + dhd_bus->pub.busstate = DBUS_STATE_DL_DONE; + } else { + DBUSERR(("%s: download failed (%d)\n", __FUNCTION__, err)); + } + + return err; +} +#endif + +/** + * higher layer requests us to 'up' the interface to the dongle. Prerequisite is that firmware (not + * bootloader) must be active in the dongle. + */ +int +dbus_up(struct dhd_bus *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + if ((dhd_bus->pub.busstate == DBUS_STATE_DL_DONE) || + (dhd_bus->pub.busstate == DBUS_STATE_DOWN) || + (dhd_bus->pub.busstate == DBUS_STATE_SLEEP)) { + if (dhd_bus->drvintf && dhd_bus->drvintf->up) { + err = dhd_bus->drvintf->up(dhd_bus->bus_info); + + if (err == DBUS_OK) { + dbus_rxirbs_fill(dhd_bus); + } + } + } else + err = DBUS_ERR; + + return err; +} + +/** higher layer requests us to 'down' the interface to the dongle. */ +int +dbus_down(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + dbus_tx_timer_stop(dhd_bus); + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->down) + return dhd_bus->drvintf->down(dhd_bus->bus_info); + } + + return DBUS_ERR; +} + +int +dbus_shutdown(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->shutdown) + return dhd_bus->drvintf->shutdown(dhd_bus->bus_info); + + return DBUS_OK; +} + +int +dbus_stop(struct dhd_bus *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->stop) + return dhd_bus->drvintf->stop(dhd_bus->bus_info); + } + + return DBUS_ERR; +} + +int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf) +{ + return dbus_send_pkt(dbus, pktbuf, pktbuf /* pktinfo */); +} + +int +dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info) +{ + return dbus_send_irb(pub, buf, len, NULL, info); +} + +int +dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info) +{ + return dbus_send_irb(pub, NULL, 0, pkt, info); +} + +int +dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if (dhd_bus == NULL) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->send_ctl) + return dhd_bus->drvintf->send_ctl(dhd_bus->bus_info, buf, len); + } else { + DBUSERR(("%s: bustate=%d\n", __FUNCTION__, dhd_bus->pub.busstate)); + } + + return DBUS_ERR; +} + +int +dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (buf == NULL)) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->recv_ctl) + return dhd_bus->drvintf->recv_ctl(dhd_bus->bus_info, buf, len); + } + + return DBUS_ERR; +} + +/** Only called via RPC (Dec 2012) */ +int +dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + dbus_irb_rx_t *rxirb; + struct exec_parms args; + int status; + + + if (dhd_bus == NULL) + return DBUS_ERR; + + args.qdeq.q = dhd_bus->rx_q; + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) { + if ((rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) { + status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info, + rxirb, ep_idx); + if (status == DBUS_ERR_RXDROP) { + bzero(rxirb, sizeof(dbus_irb_rx_t)); + args.qenq.q = dhd_bus->rx_q; + args.qenq.b = (dbus_irb_t *) rxirb; + EXEC_RXLOCK(dhd_bus, q_enq_exec, &args); + } + } + } + } + + return DBUS_ERR; +} + +/** only called by dhd_cdc.c (Dec 2012) */ +int +dbus_poll_intr(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + int status = DBUS_ERR; + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) { + status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info, + NULL, 0xff); + } + } + return status; +} + +/** called by nobody (Dec 2012) */ +void * +dbus_pktget(dbus_pub_t *pub, int len) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (len < 0)) + return NULL; + + return PKTGET(dhd_bus->pub.osh, len, TRUE); +} + +/** called by nobody (Dec 2012) */ +void +dbus_pktfree(dbus_pub_t *pub, void* pkt) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (pkt == NULL)) + return; + + PKTFREE(dhd_bus->pub.osh, pkt, TRUE); +} + +/** called by nobody (Dec 2012) */ +int +dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (stats == NULL)) + return DBUS_ERR; + + bcopy(&dhd_bus->pub.stats, stats, sizeof(dbus_stats_t)); + + return DBUS_OK; +} + +int +dbus_get_attrib(dhd_bus_t *pub, dbus_attrib_t *attrib) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + if ((dhd_bus == NULL) || (attrib == NULL)) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->get_attrib) { + err = dhd_bus->drvintf->get_attrib(dhd_bus->bus_info, + &dhd_bus->pub.attrib); + } + + bcopy(&dhd_bus->pub.attrib, attrib, sizeof(dbus_attrib_t)); + return err; +} + +int +dbus_get_device_speed(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if (dhd_bus == NULL) + return INVALID_SPEED; + + return (dhd_bus->pub.device_speed); +} + +int +dbus_set_config(dbus_pub_t *pub, dbus_config_t *config) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + if ((dhd_bus == NULL) || (config == NULL)) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->set_config) { + err = dhd_bus->drvintf->set_config(dhd_bus->bus_info, + config); + + if ((config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) && + (!err) && + (dhd_bus->pub.busstate == DBUS_STATE_UP)) { + dbus_rxirbs_fill(dhd_bus); + } + } + + return err; +} + +int +dbus_get_config(dbus_pub_t *pub, dbus_config_t *config) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + if ((dhd_bus == NULL) || (config == NULL)) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->get_config) { + err = dhd_bus->drvintf->get_config(dhd_bus->bus_info, + config); + } + + return err; +} + +int +dbus_set_errmask(dbus_pub_t *pub, uint32 mask) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + + if (dhd_bus == NULL) + return DBUS_ERR; + + dhd_bus->errmask = mask; + return err; +} + +int +dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + bool fwdl = FALSE; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + return DBUS_OK; + } + + + + if (dhd_bus->drvintf->pnp) { + err = dhd_bus->drvintf->pnp(dhd_bus->bus_info, + DBUS_PNP_RESUME); + } + + if (dhd_bus->drvintf->recv_needed) { + if (dhd_bus->drvintf->recv_needed(dhd_bus->bus_info)) { + /* Refill after sleep/hibernate */ + dbus_rxirbs_fill(dhd_bus); + } + } + + + if (fw_reload) + *fw_reload = fwdl; + + return err; +} /* dbus_pnp_resume */ + +int +dbus_pnp_sleep(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + dbus_tx_timer_stop(dhd_bus); + + if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) { + err = dhd_bus->drvintf->pnp(dhd_bus->bus_info, + DBUS_PNP_SLEEP); + } + + return err; +} + +int +dbus_pnp_disconnect(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + dbus_tx_timer_stop(dhd_bus); + + if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) { + err = dhd_bus->drvintf->pnp(dhd_bus->bus_info, + DBUS_PNP_DISCONNECT); + } + + return err; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) dhdp->bus; + int err = DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->iovar_op) { + err = dhd_bus->drvintf->iovar_op(dhd_bus->bus_info, + name, params, plen, arg, len, set); + } + + return err; +} + + +void * +dhd_dbus_txq(const dbus_pub_t *pub) +{ + return NULL; +} + +uint +dhd_dbus_hdrlen(const dbus_pub_t *pub) +{ + return 0; +} + +void * +dbus_get_devinfo(dbus_pub_t *pub) +{ + return pub->dev_info; +} + +#if defined(BCM_REQUEST_FW) && !defined(EXTERNAL_FW_PATH) +static int +dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev) +{ + uint32 value = 0; + uint8 *cis; + uint16 *otpinfo; + uint32 i; + bool standard_cis = TRUE; + uint8 tup, tlen; + bool btype_present = FALSE; + bool brev_present = FALSE; + int ret; + int devid; + uint16 btype = 0; + uint16 brev = 0; + uint32 otp_size = 0, otp_addr = 0, otp_sw_rgn = 0; + + if (dhd_bus == NULL || dhd_bus->drvintf == NULL || + dhd_bus->drvintf->readreg == NULL) + return DBUS_ERR; + + devid = dhd_bus->pub.attrib.devid; + + if ((devid == BCM43234_CHIP_ID) || (devid == BCM43235_CHIP_ID) || + (devid == BCM43236_CHIP_ID)) { + + otp_size = BCM_OTP_SIZE_43236; + otp_sw_rgn = BCM_OTP_SW_RGN_43236; + otp_addr = BCM_OTP_ADDR_43236; + + } else { + return DBUS_ERR_NVRAM; + } + + cis = MALLOC(dhd_bus->pub.osh, otp_size * 2); + if (cis == NULL) + return DBUS_ERR; + + otpinfo = (uint16 *) cis; + + for (i = 0; i < otp_size; i++) { + + ret = dhd_bus->drvintf->readreg(dhd_bus->bus_info, + otp_addr + ((otp_sw_rgn + i) << 1), 2, &value); + + if (ret != DBUS_OK) { + MFREE(dhd_bus->pub.osh, cis, otp_size * 2); + return ret; + } + otpinfo[i] = (uint16) value; + } + + for (i = 0; i < (otp_size << 1); ) { + + if (standard_cis) { + tup = cis[i++]; + if (tup == CISTPL_NULL || tup == CISTPL_END) + tlen = 0; + else + tlen = cis[i++]; + } else { + if (cis[i] == CISTPL_NULL || cis[i] == CISTPL_END) { + tlen = 0; + tup = cis[i]; + } else { + tlen = cis[i]; + tup = CISTPL_BRCM_HNBU; + } + ++i; + } + + if (tup == CISTPL_END || (i + tlen) >= (otp_size << 1)) { + break; + } + + switch (tup) { + + case CISTPL_BRCM_HNBU: + + switch (cis[i]) { + + case HNBU_BOARDTYPE: + + btype = (uint16) ((cis[i + 2] << 8) + cis[i + 1]); + btype_present = TRUE; + DBUSTRACE(("%s: HNBU_BOARDTYPE = 0x%2x\n", __FUNCTION__, + (uint32)btype)); + break; + + case HNBU_BOARDREV: + + if (tlen == 2) + brev = (uint16) cis[i + 1]; + else + brev = (uint16) ((cis[i + 2] << 8) + cis[i + 1]); + brev_present = TRUE; + DBUSTRACE(("%s: HNBU_BOARDREV = 0x%2x\n", __FUNCTION__, + (uint32)*boardrev)); + break; + + case HNBU_HNBUCIS: + DBUSTRACE(("%s: HNBU_HNBUCIS\n", __FUNCTION__)); + tlen++; + standard_cis = FALSE; + break; + } + break; + } + + i += tlen; + } + + MFREE(dhd_bus->pub.osh, cis, otp_size * 2); + + if (btype_present == TRUE && brev_present == TRUE) { + *boardtype = btype; + *boardrev = brev; + DBUSERR(("otp boardtype = 0x%2x boardrev = 0x%2x\n", + *boardtype, *boardrev)); + + return DBUS_OK; + } + else + return DBUS_ERR; +} /* dbus_otp */ + +static int +dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen, +uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len) +{ + /* Multi board nvram file format is contenation of nvram info with \r + * The file format for two contatenated set is + * \nBroadcom Jumbo Nvram file\nfirst_set\nsecond_set\nthird_set\n + */ + uint8 *nvram_start = NULL, *nvram_end = NULL; + uint8 *nvram_start_prev = NULL, *nvram_end_prev = NULL; + uint16 btype = 0, brev = 0; + int len = 0; + char *field; + + *nvram = NULL; + *nvram_len = 0; + + if (strncmp(BCM_JUMBO_START, jumbonvram, strlen(BCM_JUMBO_START))) { + /* single nvram file in the native format */ + DBUSTRACE(("%s: Non-Jumbo NVRAM File \n", __FUNCTION__)); + *nvram = jumbonvram; + *nvram_len = jumbolen; + return DBUS_OK; + } else { + DBUSTRACE(("%s: Jumbo NVRAM File \n", __FUNCTION__)); + } + + /* sanity test the end of the config sets for proper ending */ + if (jumbonvram[jumbolen - 1] != BCM_JUMBO_NVRAM_DELIMIT || + jumbonvram[jumbolen - 2] != '\0') { + DBUSERR(("%s: Bad Jumbo NVRAM file format\n", __FUNCTION__)); + return DBUS_JUMBO_BAD_FORMAT; + } + + dhd_bus->nvram_nontxt = DBUS_NVRAM_NONTXT; + + nvram_start = jumbonvram; + + while (*nvram_start != BCM_JUMBO_NVRAM_DELIMIT && len < jumbolen) { + + /* consume the first file info line + * \nBroadcom Jumbo Nvram file\nfile1\n ... + */ + len ++; + nvram_start ++; + } + + nvram_end = nvram_start; + + /* search for "boardrev=0xabcd" and "boardtype=0x1234" information in + * the concatenated nvram config files /sets + */ + + while (len < jumbolen) { + + if (*nvram_end == '\0') { + /* end of a config set is marked by multiple null characters */ + len ++; + nvram_end ++; + DBUSTRACE(("%s: NULL chr len = %d char = 0x%x\n", __FUNCTION__, + len, *nvram_end)); + continue; + + } else if (*nvram_end == BCM_JUMBO_NVRAM_DELIMIT) { + + /* config set delimiter is reached */ + /* check if next config set is present or not + * return if next config is not present + */ + + /* start search the next config set */ + nvram_start_prev = nvram_start; + nvram_end_prev = nvram_end; + + nvram_end ++; + nvram_start = nvram_end; + btype = brev = 0; + DBUSTRACE(("%s: going to next record len = %d " + "char = 0x%x \n", __FUNCTION__, len, *nvram_end)); + len ++; + if (len >= jumbolen) { + + *nvram = nvram_start_prev; + *nvram_len = (int)(nvram_end_prev - nvram_start_prev); + + DBUSTRACE(("%s: no more len = %d nvram_end = 0x%p", + __FUNCTION__, len, nvram_end)); + + return DBUS_JUMBO_NOMATCH; + + } else { + continue; + } + + } else { + + DBUSTRACE(("%s: config str = %s\n", __FUNCTION__, nvram_end)); + + if (bcmp(nvram_end, "boardtype", strlen("boardtype")) == 0) { + + field = strchr(nvram_end, '='); + field++; + btype = (uint16)bcm_strtoul(field, NULL, 0); + + DBUSTRACE(("%s: btype = 0x%x boardtype = 0x%x \n", __FUNCTION__, + btype, boardtype)); + } + + if (bcmp(nvram_end, "boardrev", strlen("boardrev")) == 0) { + + field = strchr(nvram_end, '='); + field++; + brev = (uint16)bcm_strtoul(field, NULL, 0); + + DBUSTRACE(("%s: brev = 0x%x boardrev = 0x%x \n", __FUNCTION__, + brev, boardrev)); + } + if (btype == boardtype && brev == boardrev) { + /* locate nvram config set end - ie.find '\r' char */ + while (*nvram_end != BCM_JUMBO_NVRAM_DELIMIT) + nvram_end ++; + *nvram = nvram_start; + *nvram_len = (int) (nvram_end - nvram_start); + DBUSTRACE(("found len = %d nvram_start = 0x%p " + "nvram_end = 0x%p\n", *nvram_len, nvram_start, nvram_end)); + return DBUS_OK; + } + + len += (strlen(nvram_end) + 1); + nvram_end += (strlen(nvram_end) + 1); + } + } + return DBUS_JUMBO_NOMATCH; +} /* dbus_select_nvram */ + +#endif + +#define DBUS_NRXQ 50 +#define DBUS_NTXQ 100 + +static void +dhd_dbus_send_complete(void *handle, void *info, int status) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + void *pkt = info; + + if ((dhd == NULL) || (pkt == NULL)) { + DBUSERR(("dhd or pkt is NULL\n")); + return; + } + + if (status == DBUS_OK) { + dhd->dstats.tx_packets++; + } else { + DBUSERR(("TX error=%d\n", status)); + dhd->dstats.tx_errors++; + } +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) && + (dhd_wlfc_txcomplete(dhd, pkt, status == 0) != WLFC_UNSUPPORTED)) { + return; + } else +#endif /* PROP_TXSTATUS */ + dhd_txcomplete(dhd, pkt, status == 0); + PKTFREE(dhd->osh, pkt, TRUE); +} + +static void +dhd_dbus_recv_pkt(void *handle, void *pkt) +{ + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + uint pkt_count; + dhd_pub_t *dhd = (dhd_pub_t *)handle; + int ifidx = 0; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + /* If the protocol uses a data header, check and remove it */ + if (dhd_prot_hdrpull(dhd, &ifidx, pkt, reorder_info_buf, + &reorder_info_len) != 0) { + DBUSERR(("rx protocol error\n")); + PKTFREE(dhd->osh, pkt, FALSE); + dhd->rx_errors++; + return; + } + + if (reorder_info_len) { + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(dhd, reorder_info_buf, reorder_info_len, + &pkt, &pkt_count); + if (pkt_count == 0) + return; + } + else { + pkt_count = 1; + } + dhd_rx_frame(dhd, ifidx, pkt, pkt_count, 0); +} + +static void +dhd_dbus_recv_buf(void *handle, uint8 *buf, int len) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + void *pkt; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if ((pkt = PKTGET(dhd->osh, len, FALSE)) == NULL) { + DBUSERR(("PKTGET (rx) failed=%d\n", len)); + return; + } + + bcopy(buf, PKTDATA(dhd->osh, pkt), len); + dhd_dbus_recv_pkt(dhd, pkt); +} + +static void +dhd_dbus_txflowcontrol(void *handle, bool onoff) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + bool wlfc_enabled = FALSE; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, onoff, !onoff) != WLFC_UNSUPPORTED); +#endif + + if (!wlfc_enabled) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, onoff); + } +} + +static void +dhd_dbus_errhandler(void *handle, int err) +{ +} + +static void +dhd_dbus_ctl_complete(void *handle, int type, int status) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (type == DBUS_CBCTL_READ) { + if (status == DBUS_OK) + dhd->rx_ctlpkts++; + else + dhd->rx_ctlerrs++; + } else if (type == DBUS_CBCTL_WRITE) { + if (status == DBUS_OK) + dhd->tx_ctlpkts++; + else + dhd->tx_ctlerrs++; + } + + dhd_prot_ctl_complete(dhd); +} + +static void +dhd_dbus_state_change(void *handle, int state) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + unsigned long flags; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + switch (state) { + + case DBUS_STATE_DL_NEEDED: + DBUSERR(("%s: firmware request cannot be handled\n", __FUNCTION__)); + break; + case DBUS_STATE_DOWN: + DBUSTRACE(("%s: DBUS is down\n", __FUNCTION__)); + DHD_LINUX_GENERAL_LOCK(dhd, flags); + dhd_txflowcontrol(dhd, ALL_INTERFACES, ON); + dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(dhd, flags); + break; + case DBUS_STATE_UP: + DBUSTRACE(("%s: DBUS is up\n", __FUNCTION__)); + DHD_LINUX_GENERAL_LOCK(dhd, flags); + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + dhd->busstate = DHD_BUS_DATA; + DHD_LINUX_GENERAL_UNLOCK(dhd, flags); + break; + case DBUS_STATE_SLEEP: + DBUSTRACE(("%s: DBUS is suspend\n", __FUNCTION__)); + DHD_LINUX_GENERAL_LOCK(dhd, flags); + dhd_txflowcontrol(dhd, ALL_INTERFACES, ON); + dhd->busstate = DHD_BUS_SUSPEND; + DHD_LINUX_GENERAL_UNLOCK(dhd, flags); + break; + default: + break; + } + + DBUSERR(("%s: DBUS current state=%d\n", __FUNCTION__, state)); +} + +static void * +dhd_dbus_pktget(void *handle, uint len, bool send) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + void *p = NULL; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return NULL; + } + + if (send == TRUE) { + dhd_os_sdlock_txq(dhd); + p = PKTGET(dhd->osh, len, TRUE); + dhd_os_sdunlock_txq(dhd); + } else { + dhd_os_sdlock_rxq(dhd); + p = PKTGET(dhd->osh, len, FALSE); + dhd_os_sdunlock_rxq(dhd); + } + + return p; +} + +static void +dhd_dbus_pktfree(void *handle, void *p, bool send) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (send == TRUE) { +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(p)) && + (dhd_wlfc_txcomplete(dhd, p, FALSE) != WLFC_UNSUPPORTED)) { + return; + } +#endif /* PROP_TXSTATUS */ + + dhd_os_sdlock_txq(dhd); + PKTFREE(dhd->osh, p, TRUE); + dhd_os_sdunlock_txq(dhd); + } else { + dhd_os_sdlock_rxq(dhd); + PKTFREE(dhd->osh, p, FALSE); + dhd_os_sdunlock_rxq(dhd); + } +} + + +static dbus_callbacks_t dhd_dbus_cbs = { + dhd_dbus_send_complete, + dhd_dbus_recv_buf, + dhd_dbus_recv_pkt, + dhd_dbus_txflowcontrol, + dhd_dbus_errhandler, + dhd_dbus_ctl_complete, + dhd_dbus_state_change, + dhd_dbus_pktget, + dhd_dbus_pktfree +}; + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus != NULL); + return bus->pub.attrib.devid; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus != NULL); + return bus->pub.attrib.chiprev; +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + bcm_bprintf(strbuf, "Bus USB\n"); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ +} + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pktbuf) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + if (bus->txoff) { + DBUSTRACE(("txoff\n")); + return BCME_EPERM; + } + return dbus_send_txdata(&bus->pub, pktbuf); +} + +static void +dhd_dbus_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhd_dbus_advertise_bus_remove(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_REMOVE; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + unsigned long flags; + wifi_adapter_info_t *adapter = (wifi_adapter_info_t *)dhdp->adapter; + + if (flag == TRUE) { + if (!dhdp->dongle_reset) { + DBUSERR(("%s: == Power OFF ==\n", __FUNCTION__)); + dhd_dbus_advertise_bus_cleanup(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + dbus_stop(dhdp->bus); + + dhdp->dongle_reset = TRUE; + dhdp->up = FALSE; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + wifi_clr_adapter_status(adapter, WIFI_STATUS_FW_READY); + + printf("%s: WLAN OFF DONE\n", __FUNCTION__); + /* App can now remove power from device */ + } else + bcmerror = BCME_ERROR; + } else { + /* App must have restored power to device before calling */ + printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__); + if (dhdp->dongle_reset) { + /* Turn on WLAN */ + DHD_MUTEX_UNLOCK(); + wait_event_interruptible_timeout(adapter->status_event, + wifi_get_adapter_status(adapter, WIFI_STATUS_FW_READY), + msecs_to_jiffies(DHD_FW_READY_TIMEOUT)); + DHD_MUTEX_LOCK(); + bcmerror = dbus_up(dhdp->bus); + if (bcmerror == BCME_OK) { + dhdp->dongle_reset = FALSE; + dhdp->up = TRUE; +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF); +#endif + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DBUSTRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + DBUSERR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, bcmerror)); + } + } + } + + return bcmerror; +} + +void +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, + char *pnv_path, char *pclm_path, char *pconf_path) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (bus == NULL) { + DBUSERR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; + + dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path); + +} + +/* + * hdrlen is space to reserve in pkt headroom for DBUS + */ +void * +dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen) +{ + osl_t *osh = NULL; + dhd_bus_t *bus = NULL; + dhd_pub_t *pub = NULL; + uint rxsz; + int dlneeded = 0; + wifi_adapter_info_t *adapter = NULL; + + DBUSTRACE(("%s: Enter\n", __FUNCTION__)); + + adapter = dhd_wifi_platform_get_adapter(bustype, bus_no, slot); + + if (!g_pub) { + /* Ask the OS interface part for an OSL handle */ + if (!(osh = osl_attach(NULL, bustype, TRUE))) { + DBUSERR(("%s: OSL attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS interface */ + if (!(pub = dhd_attach(osh, bus, hdrlen, adapter))) { + DBUSERR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + } else { + pub = g_pub; + osh = pub->osh; + } + + if (pub->bus) { + DBUSERR(("%s: wrong probe\n", __FUNCTION__)); + goto fail; + } + + rxsz = dhd_get_rxsz(pub); + bus = dbus_attach(osh, rxsz, DBUS_NRXQ, DBUS_NTXQ, pub, &dhd_dbus_cbs, NULL, NULL); + if (bus) { + pub->bus = bus; + bus->dhd = pub; + + dlneeded = dbus_dlneeded(bus); + if (dlneeded >= 0) { + if (!g_pub) { + dhd_conf_reset(pub); + dhd_conf_set_chiprev(pub, bus->pub.attrib.devid, bus->pub.attrib.chiprev); + dhd_conf_preinit(pub); + } + } + + if (g_pub || dhd_download_fw_on_driverload) { + if (dlneeded == 0) { + wifi_set_adapter_status(adapter, WIFI_STATUS_FW_READY); +#ifdef BCM_REQUEST_FW + } else if (dlneeded > 0) { + wifi_clr_adapter_status(adapter, WIFI_STATUS_FW_READY); + dhd_set_path(bus->dhd); + if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK) + goto fail; + bus->dhd->busstate = DHD_BUS_LOAD; +#endif + } else { + goto fail; + } + } + } else { + DBUSERR(("%s: dbus_attach failed\n", __FUNCTION__)); + } + + if (!g_pub) { + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_attach_net(bus->dhd, TRUE) != 0) + { + DBUSERR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + pub->hang_report = TRUE; +#if defined(MULTIPLE_SUPPLICANT) + wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif + g_pub = pub; + } + + DBUSTRACE(("%s: Exit\n", __FUNCTION__)); + wifi_clr_adapter_status(adapter, WIFI_STATUS_DETTACH); + wifi_set_adapter_status(adapter, WIFI_STATUS_ATTACH); + wake_up_interruptible(&adapter->status_event); + /* This is passed to dhd_dbus_disconnect_cb */ + return bus; + +fail: + if (pub && pub->bus) { + dbus_detach(pub->bus); + pub->bus = NULL; + } + /* Release resources in reverse order */ + if (!g_pub) { + if (pub) { + dhd_detach(pub); + dhd_free(pub); + } + if (osh) { + osl_detach(osh); + } + } + + printf("%s: Failed\n", __FUNCTION__); + return NULL; +} + +void +dhd_dbus_disconnect_cb(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t *)arg; + dhd_pub_t *pub = g_pub; + osl_t *osh; + wifi_adapter_info_t *adapter = NULL; + + adapter = (wifi_adapter_info_t *)pub->adapter; + + if (pub && !pub->dhd_remove && bus == NULL) { + DBUSERR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + if (!adapter) { + DBUSERR(("%s: adapter is NULL\n", __FUNCTION__)); + return; + } + + printf("%s: Enter dhd_remove=%d on %s\n", __FUNCTION__, + pub->dhd_remove, adapter->name); + if (!pub->dhd_remove) { + /* Advertise bus remove during rmmod */ + dhd_dbus_advertise_bus_remove(bus->dhd); + dbus_detach(pub->bus); + pub->bus = NULL; + wifi_clr_adapter_status(adapter, WIFI_STATUS_ATTACH); + wifi_set_adapter_status(adapter, WIFI_STATUS_DETTACH); + wake_up_interruptible(&adapter->status_event); + } else { + osh = pub->osh; + dhd_detach(pub); + if (pub->bus) { + dbus_detach(pub->bus); + pub->bus = NULL; + } + dhd_free(pub); + g_pub = NULL; + if (MALLOCED(osh)) { + DBUSERR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh))); + } + osl_detach(osh); + } + + DBUSTRACE(("%s: Exit\n", __FUNCTION__)); +} + +#ifdef LINUX_EXTERNAL_MODULE_DBUS + +static int __init +bcm_dbus_module_init(void) +{ + printf("Inserting bcm_dbus module \n"); + return 0; +} + +static void __exit +bcm_dbus_module_exit(void) +{ + printf("Removing bcm_dbus module \n"); + return; +} + +EXPORT_SYMBOL(dbus_pnp_sleep); +EXPORT_SYMBOL(dbus_get_devinfo); +EXPORT_SYMBOL(dbus_detach); +EXPORT_SYMBOL(dbus_get_attrib); +EXPORT_SYMBOL(dbus_down); +EXPORT_SYMBOL(dbus_pnp_resume); +EXPORT_SYMBOL(dbus_set_config); +EXPORT_SYMBOL(dbus_flowctrl_rx); +EXPORT_SYMBOL(dbus_up); +EXPORT_SYMBOL(dbus_get_device_speed); +EXPORT_SYMBOL(dbus_send_pkt); +EXPORT_SYMBOL(dbus_recv_ctl); +EXPORT_SYMBOL(dbus_attach); + +MODULE_LICENSE("GPL"); + +module_init(bcm_dbus_module_init); +module_exit(bcm_dbus_module_exit); + +#endif /* #ifdef LINUX_EXTERNAL_MODULE_DBUS */ diff --git a/bcmdhd.101.10.361.x/dbus_usb.c b/bcmdhd.101.10.361.x/dbus_usb.c new file mode 100755 index 0000000..5cee418 --- /dev/null +++ b/bcmdhd.101.10.361.x/dbus_usb.c @@ -0,0 +1,1173 @@ +/* + * Dongle BUS interface for USB, OS independent + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus_usb.c 565557 2015-06-22 19:29:44Z $ + */ + +/** + * @file @brief + * This file contains DBUS code that is USB, but not OS specific. DBUS is a Broadcom proprietary + * host specific abstraction layer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +uint dbus_msglevel = DBUS_ERROR_VAL; +module_param(dbus_msglevel, int, 0); + + +#define USB_DLIMAGE_RETRY_TIMEOUT 3000 /* retry Timeout */ +#define USB_SFLASH_DLIMAGE_SPINWAIT 150 /* in unit of ms */ +#define USB_SFLASH_DLIMAGE_LIMIT 2000 /* spinwait limit (ms) */ +#define POSTBOOT_ID 0xA123 /* ID to detect if dongle has boot up */ +#define USB_RESETCFG_SPINWAIT 1 /* wait after resetcfg (ms) */ +#define USB_DEV_ISBAD(u) (u->pub->attrib.devid == 0xDEAD) +#define USB_DLGO_SPINWAIT 100 /* wait after DL_GO (ms) */ +#define TEST_CHIP 0x4328 + +typedef struct { + dbus_pub_t *pub; + + void *cbarg; + dbus_intf_callbacks_t *cbs; /** callbacks into higher DBUS level (dbus.c) */ + dbus_intf_t *drvintf; + void *usbosl_info; + uint32 rdlram_base_addr; + uint32 rdlram_size; +} usb_info_t; + +/* + * Callbacks common to all USB + */ +static void dbus_usb_disconnect(void *handle); +static void dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb); +static void dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status); +static void dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status); +static void dbus_usb_errhandler(void *handle, int err); +static void dbus_usb_ctl_complete(void *handle, int type, int status); +static void dbus_usb_state_change(void *handle, int state); +static struct dbus_irb* dbus_usb_getirb(void *handle, bool send); +static void dbus_usb_rxerr_indicate(void *handle, bool on); +#if !defined(BCM_REQUEST_FW) +static int dbus_usb_resetcfg(usb_info_t *usbinfo); +#endif +static int dbus_usb_iovar_op(void *bus, const char *name, + void *params, int plen, void *arg, int len, bool set); +static int dbus_iovar_process(usb_info_t* usbinfo, const char *name, + void *params, int plen, void *arg, int len, bool set); +static int dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid, + const char *name, void *params, int plen, void *arg, int len, int val_size); +static int dhdusb_downloadvars(usb_info_t *bus, void *arg, int len); + +static int dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen); +static int dbus_usb_dlstart(void *bus, uint8 *fw, int len); +static int dbus_usb_dlneeded(void *bus); +static int dbus_usb_dlrun(void *bus); +static int dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo); + + +/* OS specific */ +extern bool dbus_usbos_dl_cmd(void *info, uint8 cmd, void *buffer, int buflen); +extern int dbus_usbos_wait(void *info, uint16 ms); +extern int dbus_write_membytes(usb_info_t *usbinfo, bool set, uint32 address, + uint8 *data, uint size); +extern bool dbus_usbos_dl_send_bulk(void *info, void *buffer, int len); +extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size); + +/** + * These functions are called by the lower DBUS level (dbus_usb_os.c) to notify this DBUS level + * (dbus_usb.c) of an event. + */ +static dbus_intf_callbacks_t dbus_usb_intf_cbs = { + dbus_usb_send_irb_timeout, + dbus_usb_send_irb_complete, + dbus_usb_recv_irb_complete, + dbus_usb_errhandler, + dbus_usb_ctl_complete, + dbus_usb_state_change, + NULL, /* isr */ + NULL, /* dpc */ + NULL, /* watchdog */ + NULL, /* dbus_if_pktget */ + NULL, /* dbus_if_pktfree */ + dbus_usb_getirb, + dbus_usb_rxerr_indicate +}; + +/* IOVar table */ +enum { + IOV_SET_DOWNLOAD_STATE = 1, + IOV_DBUS_MSGLEVEL, + IOV_MEMBYTES, + IOV_VARS, + IOV_LOOPBACK_TX +}; + +const bcm_iovar_t dhdusb_iovars[] = { + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"dbus_msglevel", IOV_DBUS_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"usb_lb_txfer", IOV_LOOPBACK_TX, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {NULL, 0, 0, 0, 0 } +}; + +/* + * Need global for probe() and disconnect() since + * attach() is not called at probe and detach() + * can be called inside disconnect() + */ +static probe_cb_t probe_cb = NULL; +static disconnect_cb_t disconnect_cb = NULL; +static void *probe_arg = NULL; +static void *disc_arg = NULL; +static dbus_intf_t *g_dbusintf = NULL; +static dbus_intf_t dbus_usb_intf; /** functions called by higher layer DBUS into lower layer */ + +/* + * dbus_intf_t common to all USB + * These functions override dbus_usb_.c. + */ +static void *dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs); +static void dbus_usb_detach(dbus_pub_t *pub, void *info); +static void * dbus_usb_probe(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); + +/* functions */ + +/** + * As part of DBUS initialization/registration, the higher level DBUS (dbus.c) needs to know what + * lower level DBUS functions to call (in both dbus_usb.c and dbus_usb_os.c). + */ +static void * +dbus_usb_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no, + uint16 slot, uint32 hdrlen) +{ + DBUSTRACE(("%s(): \n", __FUNCTION__)); + if (probe_cb) { + + if (g_dbusintf != NULL) { + /* First, initialize all lower-level functions as default + * so that dbus.c simply calls directly to dbus_usb_os.c. + */ + bcopy(g_dbusintf, &dbus_usb_intf, sizeof(dbus_intf_t)); + + /* Second, selectively override functions we need, if any. */ + dbus_usb_intf.attach = dbus_usb_attach; + dbus_usb_intf.detach = dbus_usb_detach; + dbus_usb_intf.iovar_op = dbus_usb_iovar_op; + dbus_usb_intf.dlstart = dbus_usb_dlstart; + dbus_usb_intf.dlneeded = dbus_usb_dlneeded; + dbus_usb_intf.dlrun = dbus_usb_dlrun; + } + + disc_arg = probe_cb(probe_arg, "DBUS USB", USB_BUS, bus_no, slot, hdrlen); + return disc_arg; + } + + return NULL; +} + +/** + * On return, *intf contains this or lower-level DBUS functions to be called by higher + * level (dbus.c) + */ +int +dbus_bus_register(int vid, int pid, probe_cb_t prcb, + disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2) +{ + int err; + + DBUSTRACE(("%s(): \n", __FUNCTION__)); + probe_cb = prcb; + disconnect_cb = discb; + probe_arg = prarg; + + *intf = &dbus_usb_intf; + + err = dbus_bus_osl_register(vid, pid, dbus_usb_probe, + dbus_usb_disconnect, NULL, &g_dbusintf, param1, param2); + + ASSERT(g_dbusintf); + return err; +} + +int +dbus_bus_deregister() +{ + DBUSTRACE(("%s(): \n", __FUNCTION__)); + return dbus_bus_osl_deregister(); +} + +/** initialization consists of registration followed by 'attach'. */ +void * +dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs) +{ + usb_info_t *usb_info; + + DBUSTRACE(("%s(): \n", __FUNCTION__)); + + if ((g_dbusintf == NULL) || (g_dbusintf->attach == NULL)) + return NULL; + + /* Sanity check for BUS_INFO() */ + ASSERT(OFFSETOF(usb_info_t, pub) == 0); + + usb_info = MALLOC(pub->osh, sizeof(usb_info_t)); + if (usb_info == NULL) + return NULL; + + bzero(usb_info, sizeof(usb_info_t)); + + usb_info->pub = pub; + usb_info->cbarg = cbarg; + usb_info->cbs = cbs; + + usb_info->usbosl_info = (dbus_pub_t *)g_dbusintf->attach(pub, + usb_info, &dbus_usb_intf_cbs); + if (usb_info->usbosl_info == NULL) { + MFREE(pub->osh, usb_info, sizeof(usb_info_t)); + return NULL; + } + + /* Save USB OS-specific driver entry points */ + usb_info->drvintf = g_dbusintf; + + pub->bus = usb_info; +#if !defined(BCM_REQUEST_FW) + if (!dbus_usb_resetcfg(usb_info)) { + usb_info->pub->busstate = DBUS_STATE_DL_DONE; + } +#endif + /* Return Lower layer info */ + return (void *) usb_info->usbosl_info; +} + +void +dbus_usb_detach(dbus_pub_t *pub, void *info) +{ + usb_info_t *usb_info = (usb_info_t *) pub->bus; + osl_t *osh = pub->osh; + + if (usb_info == NULL) + return; + + if (usb_info->drvintf && usb_info->drvintf->detach) + usb_info->drvintf->detach(pub, usb_info->usbosl_info); + + MFREE(osh, usb_info, sizeof(usb_info_t)); +} + +void +dbus_usb_disconnect(void *handle) +{ + DBUSTRACE(("%s(): \n", __FUNCTION__)); + if (disconnect_cb) + disconnect_cb(disc_arg); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->send_irb_timeout) + usb_info->cbs->send_irb_timeout(usb_info->cbarg, txirb); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->send_irb_complete) + usb_info->cbs->send_irb_complete(usb_info->cbarg, txirb, status); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->recv_irb_complete) + usb_info->cbs->recv_irb_complete(usb_info->cbarg, rxirb, status); +} + +/** Lower DBUS level (dbus_usb_os.c) requests a free IRB. Pass this on to the higher DBUS level. */ +static struct dbus_irb* +dbus_usb_getirb(void *handle, bool send) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return NULL; + + if (usb_info->cbs && usb_info->cbs->getirb) + return usb_info->cbs->getirb(usb_info->cbarg, send); + + return NULL; +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_rxerr_indicate(void *handle, bool on) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->rxerr_indicate) + usb_info->cbs->rxerr_indicate(usb_info->cbarg, on); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_errhandler(void *handle, int err) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->errhandler) + usb_info->cbs->errhandler(usb_info->cbarg, err); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_ctl_complete(void *handle, int type, int status) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usb_info == NULL) { + DBUSERR(("%s: usb_info is NULL\n", __FUNCTION__)); + return; + } + + if (usb_info->cbs && usb_info->cbs->ctl_complete) + usb_info->cbs->ctl_complete(usb_info->cbarg, type, status); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_state_change(void *handle, int state) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->state_change) + usb_info->cbs->state_change(usb_info->cbarg, state); +} + +/** called by higher DBUS level (dbus.c) */ +static int +dbus_usb_iovar_op(void *bus, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int err = DBUS_OK; + + err = dbus_iovar_process((usb_info_t*)bus, name, params, plen, arg, len, set); + return err; +} + +/** process iovar request from higher DBUS level */ +static int +dbus_iovar_process(usb_info_t* usbinfo, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DBUSTRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdusb_iovars, name)) == NULL) { + /* Not Supported */ + bcmerror = BCME_UNSUPPORTED; + DBUSTRACE(("%s: IOVAR %s is not supported\n", name, __FUNCTION__)); + goto exit; + + } + + DBUSTRACE(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dbus_usb_doiovar(usbinfo, vi, actionid, + name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} /* dbus_iovar_process */ + +static int +dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + int32 int_val2 = 0; + bool bool_val = 0; + + DBUSTRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + if (plen >= (int)sizeof(int_val) * 2) + bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + switch (actionid) { + + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + BCM_REFERENCE(address); + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DBUSTRACE(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + DBUSTRACE(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dbus_usb_dl_writeimage(BUS_INFO(bus, usb_info_t), data, size); + } + break; + + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + + if (bool_val == TRUE) { + bcmerror = dbus_usb_dlneeded(bus); + dbus_usb_rdl_dwnld_state(BUS_INFO(bus, usb_info_t)); + } else { + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + bcmerror = dbus_usb_dlrun(bus); + usbinfo->pub->busstate = DBUS_STATE_DL_DONE; + } + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdusb_downloadvars(BUS_INFO(bus, usb_info_t), arg, len); + break; + + case IOV_GVAL(IOV_DBUS_MSGLEVEL): + int_val = (int32)dbus_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DBUS_MSGLEVEL): + dbus_msglevel = int_val; + break; + +#ifdef DBUS_USB_LOOPBACK + case IOV_SVAL(IOV_LOOPBACK_TX): + bcmerror = dbus_usbos_loopback_tx(BUS_INFO(bus, usb_info_t), int_val, + int_val2); + break; +#endif + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} /* dbus_usb_doiovar */ + +/** higher DBUS level (dbus.c) wants to set NVRAM variables in dongle */ +static int +dhdusb_downloadvars(usb_info_t *bus, void *arg, int len) +{ + int bcmerror = 0; + uint32 varsize; + uint32 varaddr; + uint32 varsizew; + + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* RAM size is not set. Set it at dbus_usb_dlneeded */ + if (!bus->rdlram_size) + bcmerror = BCME_ERROR; + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = len ? ROUNDUP(len, 4) : 0; + varaddr = (bus->rdlram_size - 4) - varsize; + + /* Write the vars list */ + DBUSTRACE(("WriteVars: @%x varsize=%d\n", varaddr, varsize)); + bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, (varaddr + bus->rdlram_base_addr), + arg, varsize); + + /* adjust to the user specified RAM */ + DBUSTRACE(("Usable memory size: %d\n", bus->rdlram_size)); + DBUSTRACE(("Vars are at %d, orig varsize is %d\n", varaddr, varsize)); + + varsize = ((bus->rdlram_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DBUSTRACE(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, ((bus->rdlram_size - 4) + + bus->rdlram_base_addr), (uint8*)&varsizew, 4); +err: + return bcmerror; +} /* dbus_usb_doiovar */ + +#if !defined(BCM_REQUEST_FW) +/** + * After downloading firmware into dongle and starting it, we need to know if the firmware is + * indeed up and running. + */ +static int +dbus_usb_resetcfg(usb_info_t *usbinfo) +{ + void *osinfo; + bootrom_id_t id; + uint16 waittime = 0; + + uint32 starttime = 0; + uint32 endtime = 0; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + osinfo = usbinfo->usbosl_info; + ASSERT(osinfo); + + /* Give dongle chance to boot */ + dbus_usbos_wait(osinfo, USB_SFLASH_DLIMAGE_SPINWAIT); + waittime = USB_SFLASH_DLIMAGE_SPINWAIT; + while (waittime < USB_DLIMAGE_RETRY_TIMEOUT) { + + starttime = OSL_SYSUPTIME(); + + id.chip = 0xDEAD; /* Get the ID */ + dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t)); + id.chip = ltoh32(id.chip); + + endtime = OSL_SYSUPTIME(); + waittime += (endtime - starttime); + + if (id.chip == POSTBOOT_ID) + break; + } + + if (id.chip == POSTBOOT_ID) { + DBUSERR(("%s: download done. Bootup time = %d ms postboot chip 0x%x/rev 0x%x\n", + __FUNCTION__, waittime, id.chip, id.chiprev)); + + dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t)); + + dbus_usbos_wait(osinfo, USB_RESETCFG_SPINWAIT); + return DBUS_OK; + } else { + DBUSERR(("%s: Cannot talk to Dongle. Wait time = %d ms. Firmware is not UP \n", + __FUNCTION__, waittime)); + return DBUS_ERR; + } + + return DBUS_OK; +} +#endif + +/** before firmware download, the dongle has to be prepared to receive the fw image */ +static int +dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo) +{ + void *osinfo = usbinfo->usbosl_info; + rdl_state_t state; + int err = DBUS_OK; + + /* 1) Prepare USB boot loader for runtime image */ + dbus_usbos_dl_cmd(osinfo, DL_START, &state, sizeof(rdl_state_t)); + + state.state = ltoh32(state.state); + state.bytes = ltoh32(state.bytes); + + /* 2) Check we are in the Waiting state */ + if (state.state != DL_WAITING) { + DBUSERR(("%s: Failed to DL_START\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + } + +fail: + return err; +} + +/** + * Dongle contains bootcode in ROM but firmware is (partially) contained in dongle RAM. Therefore, + * firmware has to be downloaded into dongle RAM. + */ +static int +dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen) +{ + osl_t *osh = usbinfo->pub->osh; + void *osinfo = usbinfo->usbosl_info; + unsigned int sendlen, sent, dllen; + char *bulkchunk = NULL, *dlpos; + rdl_state_t state; + int err = DBUS_OK; + bootrom_id_t id; + uint16 wait, wait_time; + uint32 dl_trunk_size = RDL_CHUNK; + + if (BCM4350_CHIP(usbinfo->pub->attrib.devid)) + dl_trunk_size = RDL_CHUNK_MAX; + + while (!bulkchunk) { + bulkchunk = MALLOC(osh, dl_trunk_size); + if (dl_trunk_size == RDL_CHUNK) + break; + if (!bulkchunk) { + dl_trunk_size /= 2; + if (dl_trunk_size < RDL_CHUNK) + dl_trunk_size = RDL_CHUNK; + } + } + + if (bulkchunk == NULL) { + err = DBUS_ERR; + goto fail; + } + + sent = 0; + dlpos = fw; + dllen = fwlen; + + /* Get chip id and rev */ + id.chip = usbinfo->pub->attrib.devid; + id.chiprev = usbinfo->pub->attrib.chiprev; + + DBUSTRACE(("enter %s: fwlen=%d\n", __FUNCTION__, fwlen)); + + dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t)); + + /* 3) Load the image */ + while ((sent < dllen)) { + /* Wait until the usb device reports it received all the bytes we sent */ + + if (sent < dllen) { + if ((dllen-sent) < dl_trunk_size) + sendlen = dllen-sent; + else + sendlen = dl_trunk_size; + + /* simply avoid having to send a ZLP by ensuring we never have an even + * multiple of 64 + */ + if (!(sendlen % 64)) + sendlen -= 4; + + /* send data */ + memcpy(bulkchunk, dlpos, sendlen); + if (!dbus_usbos_dl_send_bulk(osinfo, bulkchunk, sendlen)) { + err = DBUS_ERR; + goto fail; + } + + dlpos += sendlen; + sent += sendlen; + DBUSTRACE(("%s: sendlen %d\n", __FUNCTION__, sendlen)); + } + + wait = 0; + wait_time = USB_SFLASH_DLIMAGE_SPINWAIT; + while (!dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, + sizeof(rdl_state_t))) { + if ((id.chip == 43236) && (id.chiprev == 0)) { + DBUSERR(("%s: 43236a0 SFlash delay, waiting for dongle crc check " + "completion!!!\n", __FUNCTION__)); + dbus_usbos_wait(osinfo, wait_time); + wait += wait_time; + if (wait >= USB_SFLASH_DLIMAGE_LIMIT) { + DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + break; + } + } else { + DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + } + } + + state.state = ltoh32(state.state); + state.bytes = ltoh32(state.bytes); + + /* restart if an error is reported */ + if ((state.state == DL_BAD_HDR) || (state.state == DL_BAD_CRC)) { + DBUSERR(("%s: Bad Hdr or Bad CRC\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + } + + } +fail: + if (bulkchunk) + MFREE(osh, bulkchunk, dl_trunk_size); + + return err; +} /* dbus_usb_dl_writeimage */ + +/** Higher level DBUS layer (dbus.c) requests this layer to download image into dongle */ +static int +dbus_usb_dlstart(void *bus, uint8 *fw, int len) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + int err; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + if (USB_DEV_ISBAD(usbinfo)) + return DBUS_ERR; + + err = dbus_usb_rdl_dwnld_state(usbinfo); + + if (DBUS_OK == err) { + err = dbus_usb_dl_writeimage(usbinfo, fw, len); + if (err == DBUS_OK) + usbinfo->pub->busstate = DBUS_STATE_DL_DONE; + else + usbinfo->pub->busstate = DBUS_STATE_DL_PENDING; + } else + usbinfo->pub->busstate = DBUS_STATE_DL_PENDING; + + return err; +} + +static bool +dbus_usb_update_chipinfo(usb_info_t *usbinfo, uint32 chip) +{ + bool retval = TRUE; + /* based on the CHIP Id, store the ram size which is needed for NVRAM download. */ + switch (chip) { + + case 0x4319: + usbinfo->rdlram_size = RDL_RAM_SIZE_4319; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4319; + break; + + case 0x4329: + usbinfo->rdlram_size = RDL_RAM_SIZE_4329; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4329; + break; + + case 43234: + case 43235: + case 43236: + usbinfo->rdlram_size = RDL_RAM_SIZE_43236; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_43236; + break; + + case 0x4328: + usbinfo->rdlram_size = RDL_RAM_SIZE_4328; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4328; + break; + + case 0x4322: + usbinfo->rdlram_size = RDL_RAM_SIZE_4322; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4322; + break; + + case 0x4360: + case 0xAA06: + usbinfo->rdlram_size = RDL_RAM_SIZE_4360; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4360; + break; + + case 43242: + case 43243: + usbinfo->rdlram_size = RDL_RAM_SIZE_43242; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_43242; + break; + + case 43143: + usbinfo->rdlram_size = RDL_RAM_SIZE_43143; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_43143; + break; + + case 0x4350: + case 43556: + case 43558: + case 43569: + usbinfo->rdlram_size = RDL_RAM_SIZE_4350; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4350; + break; + + case POSTBOOT_ID: + break; + + default: + DBUSERR(("%s: Chip 0x%x Ram size is not known\n", __FUNCTION__, chip)); + retval = FALSE; + break; + + } + + return retval; +} /* dbus_usb_update_chipinfo */ + +/** higher DBUS level (dbus.c) wants to know if firmware download is required. */ +static int +dbus_usb_dlneeded(void *bus) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + void *osinfo; + bootrom_id_t id; + int dl_needed = 1; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + osinfo = usbinfo->usbosl_info; + ASSERT(osinfo); + + /* Check if firmware downloaded already by querying runtime ID */ + id.chip = 0xDEAD; + dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t)); + + id.chip = ltoh32(id.chip); + id.chiprev = ltoh32(id.chiprev); + + if (FALSE == dbus_usb_update_chipinfo(usbinfo, id.chip)) { + dl_needed = DBUS_ERR; + goto exit; + } + + DBUSERR(("%s: chip 0x%x rev 0x%x\n", __FUNCTION__, id.chip, id.chiprev)); + if (id.chip == POSTBOOT_ID) { + /* This code is needed to support two enumerations on USB1.1 scenario */ + DBUSERR(("%s: Firmware already downloaded\n", __FUNCTION__)); + + dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t)); + dl_needed = DBUS_OK; + if (usbinfo->pub->busstate == DBUS_STATE_DL_PENDING) + usbinfo->pub->busstate = DBUS_STATE_DL_DONE; + } else { + usbinfo->pub->attrib.devid = id.chip; + usbinfo->pub->attrib.chiprev = id.chiprev; + } + +exit: + return dl_needed; +} + +/** After issuing firmware download, higher DBUS level (dbus.c) wants to start the firmware. */ +static int +dbus_usb_dlrun(void *bus) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + void *osinfo; + rdl_state_t state; + int err = DBUS_OK; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + if (USB_DEV_ISBAD(usbinfo)) + return DBUS_ERR; + + osinfo = usbinfo->usbosl_info; + ASSERT(osinfo); + + /* Check we are runnable */ + dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t)); + + state.state = ltoh32(state.state); + state.bytes = ltoh32(state.bytes); + + /* Start the image */ + if (state.state == DL_RUNNABLE) { + DBUSTRACE(("%s: Issue DL_GO\n", __FUNCTION__)); + dbus_usbos_dl_cmd(osinfo, DL_GO, &state, sizeof(rdl_state_t)); + + if (usbinfo->pub->attrib.devid == TEST_CHIP) + dbus_usbos_wait(osinfo, USB_DLGO_SPINWAIT); + +// dbus_usb_resetcfg(usbinfo); + /* The Donlge may go for re-enumeration. */ + } else { + DBUSERR(("%s: Dongle not runnable\n", __FUNCTION__)); + err = DBUS_ERR; + } + + return err; +} + +/** + * As preparation for firmware download, higher DBUS level (dbus.c) requests the firmware image + * to be used for the type of dongle detected. Directly called by dbus.c (so not via a callback + * construction) + */ +void +dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + unsigned int devid; + unsigned int crev; + + devid = usbinfo->pub->attrib.devid; + crev = usbinfo->pub->attrib.chiprev; + + *fw = NULL; + *fwlen = 0; + + switch (devid) { + case BCM43236_CHIP_ID: + case BCM43235_CHIP_ID: + case BCM43234_CHIP_ID: + case BCM43238_CHIP_ID: { + if (crev == 3 || crev == 2 || crev == 1) { +#ifdef EMBED_IMAGE_43236b + *fw = (uint8 *)dlarray_43236b; + *fwlen = sizeof(dlarray_43236b); + +#endif + } + } break; + case BCM4360_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: +#ifdef EMBED_IMAGE_43526a + if (crev <= 2) { + *fw = (uint8 *)dlarray_43526a; + *fwlen = sizeof(dlarray_43526a); + } +#endif +#ifdef EMBED_IMAGE_43526b + if (crev > 2) { + *fw = (uint8 *)dlarray_43526b; + *fwlen = sizeof(dlarray_43526b); + } +#endif + break; + + case BCM43242_CHIP_ID: +#ifdef EMBED_IMAGE_43242a0 + *fw = (uint8 *)dlarray_43242a0; + *fwlen = sizeof(dlarray_43242a0); +#endif + break; + + case BCM43143_CHIP_ID: +#ifdef EMBED_IMAGE_43143a0 + *fw = (uint8 *)dlarray_43143a0; + *fwlen = sizeof(dlarray_43143a0); +#endif +#ifdef EMBED_IMAGE_43143b0 + *fw = (uint8 *)dlarray_43143b0; + *fwlen = sizeof(dlarray_43143b0); +#endif + break; + + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: +#ifdef EMBED_IMAGE_4350a0 + if (crev == 0) { + *fw = (uint8 *)dlarray_4350a0; + *fwlen = sizeof(dlarray_4350a0); + } +#endif +#ifdef EMBED_IMAGE_4350b0 + if (crev == 1) { + *fw = (uint8 *)dlarray_4350b0; + *fwlen = sizeof(dlarray_4350b0); + } +#endif +#ifdef EMBED_IMAGE_4350b1 + if (crev == 2) { + *fw = (uint8 *)dlarray_4350b1; + *fwlen = sizeof(dlarray_4350b1); + } +#endif +#ifdef EMBED_IMAGE_43556b1 + if (crev == 2) { + *fw = (uint8 *)dlarray_43556b1; + *fwlen = sizeof(dlarray_43556b1); + } +#endif +#ifdef EMBED_IMAGE_4350c0 + if (crev == 3) { + *fw = (uint8 *)dlarray_4350c0; + *fwlen = sizeof(dlarray_4350c0); + } +#endif /* EMBED_IMAGE_4350c0 */ +#ifdef EMBED_IMAGE_4350c1 + if (crev == 4) { + *fw = (uint8 *)dlarray_4350c1; + *fwlen = sizeof(dlarray_4350c1); + } +#endif /* EMBED_IMAGE_4350c1 */ + break; + case BCM43569_CHIP_ID: +#ifdef EMBED_IMAGE_43569a0 + if (crev == 0) { + *fw = (uint8 *)dlarray_43569a0; + *fwlen = sizeof(dlarray_43569a0); + } +#endif /* EMBED_IMAGE_43569a0 */ + break; + default: +#ifdef EMBED_IMAGE_GENERIC + *fw = (uint8 *)dlarray; + *fwlen = sizeof(dlarray); +#endif + break; + } +} /* dbus_bus_fw_get */ diff --git a/bcmdhd.101.10.361.x/dbus_usb_linux.c b/bcmdhd.101.10.361.x/dbus_usb_linux.c new file mode 100755 index 0000000..0bd7181 --- /dev/null +++ b/bcmdhd.101.10.361.x/dbus_usb_linux.c @@ -0,0 +1,3405 @@ +/* + * Dongle BUS interface + * USB Linux Implementation + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus_usb_linux.c 564663 2015-06-18 02:34:42Z $ + */ + +/** + * @file @brief + * This file contains DBUS code that is USB *and* OS (Linux) specific. DBUS is a Broadcom + * proprietary host specific abstraction layer. + */ + +#include +#include + +/** + * DBUS_LINUX_RXDPC is created for router platform performance tuning. A separate thread is created + * to handle USB RX and avoid the call chain getting too long and enhance cache hit rate. + * + * DBUS_LINUX_RXDPC setting is in wlconfig file. + */ + +/* + * If DBUS_LINUX_RXDPC is off, spin_lock_bh() for CTFPOOL in + * linux_osl.c has to be changed to spin_lock_irqsave() because + * PKTGET/PKTFREE are no longer in bottom half. + * + * Right now we have another queue rpcq in wl_linux.c. Maybe we + * can eliminate that one to reduce the overhead. + * + * Enabling 2nd EP and DBUS_LINUX_RXDPC causing traffic from + * both EP's to be queued in the same rx queue. If we want + * RXDPC to work with 2nd EP. The EP for RPC call return + * should bypass the dpc and go directly up. + */ + +/* #define DBUS_LINUX_RXDPC */ + +/* Dbus histogram for ntxq, nrxq, dpc parameter tuning */ +/* #define DBUS_LINUX_HIST */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(USBOS_THREAD) || defined(USBOS_TX_THREAD) + +/** + * The usb-thread is designed to provide currency on multiprocessors and SMP linux kernels. On the + * dual cores platform, the WLAN driver, without threads, executed only on CPU0. The driver consumed + * almost of 100% on CPU0, while CPU1 remained idle. The behavior was observed on Broadcom's STB. + * + * The WLAN driver consumed most of CPU0 and not CPU1 because tasklets/queues, software irq, and + * hardware irq are executing from CPU0, only. CPU0 became the system's bottle-neck. TPUT is lower + * and system's responsiveness is slower. + * + * To improve system responsiveness and TPUT usb-thread was implemented. The system's threads could + * be scheduled to run on any core. One core could be processing data in the usb-layer and the other + * core could be processing data in the wl-layer. + * + * For further info see [WlThreadAndUsbThread] Twiki. + */ + +#include +#include +#include +#include +#include +#include +#endif /* USBOS_THREAD || USBOS_TX_THREAD */ + + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif + +/** + * Starting with the 3.10 kernel release, dynamic PM support for USB is present whenever + * the kernel was built with CONFIG_PM_RUNTIME enabled. The CONFIG_USB_SUSPEND option has + * been eliminated. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)) && defined(CONFIG_USB_SUSPEND)) \ + || ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) && defined(CONFIG_PM_RUNTIME)) \ + || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +/* For USB power management support, see Linux kernel: Documentation/usb/power-management.txt */ +#define USB_SUSPEND_AVAILABLE +#endif + +/* Define alternate fw/nvram paths used in Android */ +#ifdef OEM_ANDROID +#define CONFIG_ANDROID_BCMDHD_FW_PATH "broadcom/dhd/firmware/fw.bin.trx" +#define CONFIG_ANDROID_BCMDHD_NVRAM_PATH "broadcom/dhd/nvrams/nvm.txt" +#endif /* OEM_ANDROID */ + +static inline int usb_submit_urb_linux(struct urb *urb) +{ + +#ifdef BCM_MAX_URB_LEN + if (urb && (urb->transfer_buffer_length > BCM_MAX_URB_LEN)) { + DBUSERR(("URB transfer length=%d exceeded %d ra=%p\n", urb->transfer_buffer_length, + BCM_MAX_URB_LEN, __builtin_return_address(0))); + return DBUS_ERR; + } +#endif + +#ifdef KERNEL26 + return usb_submit_urb(urb, GFP_ATOMIC); +#else + return usb_submit_urb(urb); +#endif + +} + +#define USB_SUBMIT_URB(urb) usb_submit_urb_linux(urb) + +#ifdef KERNEL26 + +#define USB_ALLOC_URB() usb_alloc_urb(0, GFP_ATOMIC) +#define USB_UNLINK_URB(urb) (usb_kill_urb(urb)) +#define USB_FREE_URB(urb) (usb_free_urb(urb)) +#define USB_REGISTER() usb_register(&dbus_usbdev) +#define USB_DEREGISTER() usb_deregister(&dbus_usbdev) + +#ifdef USB_SUSPEND_AVAILABLE + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#define USB_AUTOPM_SET_INTERFACE(intf) usb_autopm_set_interface(intf) +#else +#define USB_ENABLE_AUTOSUSPEND(udev) usb_enable_autosuspend(udev) +#define USB_DISABLE_AUTOSUSPEND(udev) usb_disable_autosuspend(udev) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + +#define USB_AUTOPM_GET_INTERFACE(intf) usb_autopm_get_interface(intf) +#define USB_AUTOPM_PUT_INTERFACE(intf) usb_autopm_put_interface(intf) +#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) usb_autopm_get_interface_async(intf) +#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) usb_autopm_put_interface_async(intf) +#define USB_MARK_LAST_BUSY(dev) usb_mark_last_busy(dev) + +#else /* USB_SUSPEND_AVAILABLE */ + +#define USB_AUTOPM_GET_INTERFACE(intf) do {} while (0) +#define USB_AUTOPM_PUT_INTERFACE(intf) do {} while (0) +#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_MARK_LAST_BUSY(dev) do {} while (0) +#endif /* USB_SUSPEND_AVAILABLE */ + +#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \ + usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \ + (data), (size), (timeout)) +#define USB_BULK_MSG(dev, pipe, data, len, actual_length, timeout) \ + usb_bulk_msg((dev), (pipe), (data), (len), (actual_length), (timeout)) +#define USB_BUFFER_ALLOC(dev, size, mem, dma) usb_buffer_alloc(dev, size, mem, dma) +#define USB_BUFFER_FREE(dev, size, data, dma) usb_buffer_free(dev, size, data, dma) + +#ifdef WL_URB_ZPKT +#define URB_QUEUE_BULK URB_ZERO_PACKET +#else +#define URB_QUEUE_BULK 0 +#endif /* WL_URB_ZPKT */ + +#define CALLBACK_ARGS struct urb *urb, struct pt_regs *regs +#define CALLBACK_ARGS_DATA urb, regs +#define CONFIGDESC(usb) (&((usb)->actconfig)->desc) +#define IFPTR(usb, idx) ((usb)->actconfig->interface[idx]) +#define IFALTS(usb, idx) (IFPTR((usb), (idx))->altsetting[0]) +#define IFDESC(usb, idx) IFALTS((usb), (idx)).desc +#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[ep]).desc + +#else /* KERNEL26 */ + +#define USB_ALLOC_URB() usb_alloc_urb(0) +#define USB_UNLINK_URB(urb) usb_unlink_urb(urb) +#define USB_FREE_URB(urb) (usb_free_urb(urb)) +#define USB_REGISTER() usb_register(&dbus_usbdev) +#define USB_DEREGISTER() usb_deregister(&dbus_usbdev) +#define USB_AUTOPM_GET_INTERFACE(intf) do {} while (0) +#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_MARK_LAST_BUSY(dev) do {} while (0) + +#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \ + usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \ + (data), (size), (timeout)) +#define USB_BUFFER_ALLOC(dev, size, mem, dma) kmalloc(size, mem) +#define USB_BUFFER_FREE(dev, size, data, dma) kfree(data) + +#ifdef WL_URB_ZPKT +#define URB_QUEUE_BULK USB_QUEUE_BULK|URB_ZERO_PACKET +#else +#define URB_QUEUE_BULK 0 +#endif /* WL_URB_ZPKT */ + +#define CALLBACK_ARGS struct urb *urb +#define CALLBACK_ARGS_DATA urb +#define CONFIGDESC(usb) ((usb)->actconfig) +#define IFPTR(usb, idx) (&(usb)->actconfig->interface[idx]) +#define IFALTS(usb, idx) ((usb)->actconfig->interface[idx].altsetting[0]) +#define IFDESC(usb, idx) IFALTS((usb), (idx)) +#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[ep]) + + +#endif /* KERNEL26 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) +#define USB_SPEED_SUPER 5 +#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) */ + +#define CONTROL_IF 0 +#define BULK_IF 0 + +#ifdef BCMUSBDEV_COMPOSITE +#define USB_COMPIF_MAX 4 + +#define USB_CLASS_WIRELESS 0xe0 +#define USB_CLASS_MISC 0xef +#define USB_SUBCLASS_COMMON 0x02 +#define USB_PROTO_IAD 0x01 +#define USB_PROTO_VENDOR 0xff + +#define USB_QUIRK_NO_SET_INTF 0x04 /* device does not support set_interface */ +#endif /* BCMUSBDEV_COMPOSITE */ + +#define USB_SYNC_WAIT_TIMEOUT 300 /* ms */ + +/* Private data kept in skb */ +#define SKB_PRIV(skb, idx) (&((void **)skb->cb)[idx]) +#define SKB_PRIV_URB(skb) (*(struct urb **)SKB_PRIV(skb, 0)) + +#ifndef DBUS_USB_RXQUEUE_BATCH_ADD +/* items to add each time within limit */ +#define DBUS_USB_RXQUEUE_BATCH_ADD 8 +#endif + +#ifndef DBUS_USB_RXQUEUE_LOWER_WATERMARK +/* add a new batch req to rx queue when waiting item count reduce to this number */ +#define DBUS_USB_RXQUEUE_LOWER_WATERMARK 4 +#endif + +enum usbos_suspend_state { + USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow suspend */ + USBOS_SUSPEND_STATE_SUSPEND_PENDING, /* Device is idle, can be suspended */ + /* Wating PM to suspend */ + USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */ +}; + +enum usbos_request_state { + USBOS_REQUEST_STATE_UNSCHEDULED = 0, /* USB TX request not scheduled */ + USBOS_REQUEST_STATE_SCHEDULED, /* USB TX request given to TX thread */ + USBOS_REQUEST_STATE_SUBMITTED /* USB TX request submitted */ +}; + +typedef struct { + uint32 notification; + uint32 reserved; +} intr_t; + +typedef struct { + dbus_pub_t *pub; + + void *cbarg; + dbus_intf_callbacks_t *cbs; + + /* Imported */ + struct usb_device *usb; /* USB device pointer from OS */ + struct urb *intr_urb; /* URB for interrupt endpoint */ + struct list_head req_rxfreeq; + struct list_head req_txfreeq; + struct list_head req_rxpostedq; /* Posted down to USB driver for RX */ + struct list_head req_txpostedq; /* Posted down to USB driver for TX */ + spinlock_t rxfree_lock; /* Lock for rx free list */ + spinlock_t txfree_lock; /* Lock for tx free list */ + spinlock_t rxposted_lock; /* Lock for rx posted list */ + spinlock_t txposted_lock; /* Lock for tx posted list */ + uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; /* Pipe numbers for USB I/O */ + uint rxbuf_len; + + struct list_head req_rxpendingq; /* RXDPC: Pending for dpc to send up */ + spinlock_t rxpending_lock; /* RXDPC: Lock for rx pending list */ + long dpc_pid; + struct semaphore dpc_sem; + struct completion dpc_exited; + int rxpending; + + struct urb *ctl_urb; + int ctl_in_pipe, ctl_out_pipe; + struct usb_ctrlrequest ctl_write; + struct usb_ctrlrequest ctl_read; + struct semaphore ctl_lock; /* Lock for CTRL transfers via tx_thread */ +#ifdef USBOS_TX_THREAD + enum usbos_request_state ctl_state; +#endif /* USBOS_TX_THREAD */ + + spinlock_t rxlock; /* Lock for rxq management */ + spinlock_t txlock; /* Lock for txq management */ + + int intr_size; /* Size of interrupt message */ + int interval; /* Interrupt polling interval */ + intr_t intr; /* Data buffer for interrupt endpoint */ + + int maxps; + atomic_t txposted; + atomic_t rxposted; + atomic_t txallocated; + atomic_t rxallocated; + bool rxctl_deferrespok; /* Get a response for setup from dongle */ + + wait_queue_head_t wait; + bool waitdone; + int sync_urb_status; + + struct urb *blk_urb; /* Used for downloading embedded image */ + +#ifdef USBOS_THREAD + spinlock_t ctrl_lock; + spinlock_t usbos_list_lock; + struct list_head usbos_list; + struct list_head usbos_free_list; + atomic_t usbos_list_cnt; + wait_queue_head_t usbos_queue_head; + struct task_struct *usbos_kt; +#endif /* USBOS_THREAD */ + +#ifdef USBOS_TX_THREAD + spinlock_t usbos_tx_list_lock; + struct list_head usbos_tx_list; + wait_queue_head_t usbos_tx_queue_head; + struct task_struct *usbos_tx_kt; +#endif /* USBOS_TX_THREAD */ + + struct dma_pool *qtd_pool; /* QTD pool for USB optimization only */ + int tx_ep, rx_ep, rx2_ep; /* EPs for USB optimization */ + struct usb_device *usb_device; /* USB device for optimization */ +} usbos_info_t; + +typedef struct urb_req { + void *pkt; + int buf_len; + struct urb *urb; + void *arg; + usbos_info_t *usbinfo; + struct list_head urb_list; +} urb_req_t; + +#ifdef USBOS_THREAD +typedef struct usbos_list_entry { + struct list_head list; /* must be first */ + void *urb_context; + int urb_length; + int urb_status; +} usbos_list_entry_t; + +static void* dbus_usbos_thread_init(usbos_info_t *usbos_info); +static void dbus_usbos_thread_deinit(usbos_info_t *usbos_info); +static void dbus_usbos_dispatch_schedule(CALLBACK_ARGS); +static int dbus_usbos_thread_func(void *data); +#endif /* USBOS_THREAD */ + +#ifdef USBOS_TX_THREAD +void* dbus_usbos_tx_thread_init(usbos_info_t *usbos_info); +void dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info); +int dbus_usbos_tx_thread_func(void *data); +#endif /* USBOS_TX_THREAD */ + +/* Shared Function prototypes */ +bool dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen); +int dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms); +bool dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len); +int dbus_write_membytes(usbos_info_t *usbinfo, bool set, uint32 address, uint8 *data, uint size); + +/* Local function prototypes */ +static void dbus_usbos_send_complete(CALLBACK_ARGS); +static void dbus_usbos_recv_complete(CALLBACK_ARGS); +static int dbus_usbos_errhandler(void *bus, int err); +static int dbus_usbos_state_change(void *bus, int state); +static void dbusos_stop(usbos_info_t *usbos_info); + +#ifdef KERNEL26 +static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id); +static void dbus_usbos_disconnect(struct usb_interface *intf); +#if defined(USB_SUSPEND_AVAILABLE) +static int dbus_usbos_resume(struct usb_interface *intf); +static int dbus_usbos_suspend(struct usb_interface *intf, pm_message_t message); +/* at the moment, used for full dongle host driver only */ +static int dbus_usbos_reset_resume(struct usb_interface *intf); +#endif /* USB_SUSPEND_AVAILABLE */ +#else /* KERNEL26 */ +static void *dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum, + const struct usb_device_id *id); +static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr); +#endif /* KERNEL26 */ + + +/** + * have to disable missing-field-initializers warning as last element {} triggers it + * and different versions of kernel have different number of members so it is impossible + * to specify the initializer. BTW issuing the warning here is bug og GCC as universal + * zero {0} specified in C99 standard as correct way of initialization of struct to all zeros + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif + +static struct usb_device_id devid_table[] = { + { USB_DEVICE(BCM_DNGL_VID, 0x0000) }, /* Configurable via register() */ +#if defined(BCM_REQUEST_FW) + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4328) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4322) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4319) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43236) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43143) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43242) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4360) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4350) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43569) }, +#endif +#ifdef EXTENDED_VID_PID + EXTENDED_VID_PID, +#endif /* EXTENDED_VID_PID */ + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BDC_PID) }, /* Default BDC */ + { } +}; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic pop +#endif + +MODULE_DEVICE_TABLE(usb, devid_table); + +/** functions called by the Linux kernel USB subsystem */ +static struct usb_driver dbus_usbdev = { + name: "dbus_usbdev", + probe: dbus_usbos_probe, + disconnect: dbus_usbos_disconnect, + id_table: devid_table, +#if defined(USB_SUSPEND_AVAILABLE) + suspend: dbus_usbos_suspend, + resume: dbus_usbos_resume, + reset_resume: dbus_usbos_reset_resume, + /* Linux USB core will allow autosuspend for devices bound to this driver */ + supports_autosuspend: 1 +#endif /* USB_SUSPEND_AVAILABLE */ +}; + +/** + * This stores USB info during Linux probe callback since attach() is not called yet at this point + */ +typedef struct { + void *usbos_info; + struct usb_device *usb; /* USB device pointer from OS */ + uint rx_pipe; /* Pipe numbers for USB I/O */ + uint tx_pipe; /* Pipe numbers for USB I/O */ + uint intr_pipe; /* Pipe numbers for USB I/O */ + uint rx_pipe2; /* Pipe numbers for USB I/O */ + int intr_size; /* Size of interrupt message */ + int interval; /* Interrupt polling interval */ + bool dldone; + int vid; + int pid; + bool dereged; + bool disc_cb_done; + DEVICE_SPEED device_speed; + enum usbos_suspend_state suspend_state; + struct usb_interface *intf; +} probe_info_t; + +/* + * USB Linux dbus_intf_t + */ +static void *dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs); +static void dbus_usbos_intf_detach(dbus_pub_t *pub, void *info); +static int dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb); +static int dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb); +static int dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx); +static int dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb); +static int dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len); +static int dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len); +static int dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib); +static int dbus_usbos_intf_up(void *bus); +static int dbus_usbos_intf_down(void *bus); +static int dbus_usbos_intf_stop(void *bus); +static int dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value); +extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size); +int dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data); +static int dbus_usbos_intf_set_config(void *bus, dbus_config_t *config); +static bool dbus_usbos_intf_recv_needed(void *bus); +static void *dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args); +static void *dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args); +#ifdef BCMUSBDEV_COMPOSITE +static int dbus_usbos_intf_wlan(struct usb_device *usb); +#endif /* BCMUSBDEV_COMPOSITE */ + +/** functions called by dbus_usb.c */ +static dbus_intf_t dbus_usbos_intf = { + .attach = dbus_usbos_intf_attach, + .detach = dbus_usbos_intf_detach, + .up = dbus_usbos_intf_up, + .down = dbus_usbos_intf_down, + .send_irb = dbus_usbos_intf_send_irb, + .recv_irb = dbus_usbos_intf_recv_irb, + .cancel_irb = dbus_usbos_intf_cancel_irb, + .send_ctl = dbus_usbos_intf_send_ctl, + .recv_ctl = dbus_usbos_intf_recv_ctl, + .get_stats = NULL, + .get_attrib = dbus_usbos_intf_get_attrib, + .remove = NULL, + .resume = NULL, + .suspend = NULL, + .stop = dbus_usbos_intf_stop, + .reset = NULL, + .pktget = NULL, + .pktfree = NULL, + .iovar_op = NULL, + .dump = NULL, + .set_config = dbus_usbos_intf_set_config, + .get_config = NULL, + .device_exists = NULL, + .dlneeded = NULL, + .dlstart = NULL, + .dlrun = NULL, + .recv_needed = dbus_usbos_intf_recv_needed, + .exec_rxlock = dbus_usbos_intf_exec_rxlock, + .exec_txlock = dbus_usbos_intf_exec_txlock, + + .tx_timer_init = NULL, + .tx_timer_start = NULL, + .tx_timer_stop = NULL, + + .sched_dpc = NULL, + .lock = NULL, + .unlock = NULL, + .sched_probe_cb = NULL, + + .shutdown = NULL, + + .recv_stop = NULL, + .recv_resume = NULL, + + .recv_irb_from_ep = dbus_usbos_intf_recv_irb_from_ep, + .readreg = dbus_usbos_readreg +}; + +static probe_info_t g_probe_info; +static probe_cb_t probe_cb = NULL; +static disconnect_cb_t disconnect_cb = NULL; +static void *probe_arg = NULL; +static void *disc_arg = NULL; + + + +static volatile int loopback_rx_cnt, loopback_tx_cnt; +int loopback_size; +bool is_loopback_pkt(void *buf); +int matches_loopback_pkt(void *buf); + +/** + * multiple code paths in this file dequeue a URB request, this function makes sure that it happens + * in a concurrency save manner. Don't call this from a sleepable process context. + */ +static urb_req_t * +dbus_usbos_qdeq(struct list_head *urbreq_q, spinlock_t *lock) +{ + unsigned long flags; + urb_req_t *req; + + ASSERT(urbreq_q != NULL); + + spin_lock_irqsave(lock, flags); + + if (list_empty(urbreq_q)) { + req = NULL; + } else { + ASSERT(urbreq_q->next != NULL); + ASSERT(urbreq_q->next != urbreq_q); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + req = list_entry(urbreq_q->next, urb_req_t, urb_list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + list_del_init(&req->urb_list); + } + + spin_unlock_irqrestore(lock, flags); + + return req; +} + +static void +dbus_usbos_qenq(struct list_head *urbreq_q, urb_req_t *req, spinlock_t *lock) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + list_add_tail(&req->urb_list, urbreq_q); + + spin_unlock_irqrestore(lock, flags); +} + +/** + * multiple code paths in this file remove a URB request from a list, this function makes sure that + * it happens in a concurrency save manner. Don't call this from a sleepable process context. + * Is quite similar to dbus_usbos_qdeq(), I wonder why this function is needed. + */ +static void +dbus_usbos_req_del(urb_req_t *req, spinlock_t *lock) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + list_del_init(&req->urb_list); + + spin_unlock_irqrestore(lock, flags); +} + + +/** + * Driver requires a pool of URBs to operate. This function is called during + * initialization (attach phase), allocates a number of URBs, and puts them + * on the free (req_rxfreeq and req_txfreeq) queue + */ +static int +dbus_usbos_urbreqs_alloc(usbos_info_t *usbos_info, uint32 count, bool is_rx) +{ + int i; + int allocated = 0; + int err = DBUS_OK; + + for (i = 0; i < count; i++) { + urb_req_t *req; + + req = MALLOC(usbos_info->pub->osh, sizeof(urb_req_t)); + if (req == NULL) { + DBUSERR(("%s: MALLOC req failed\n", __FUNCTION__)); + err = DBUS_ERR_NOMEM; + goto fail; + } + bzero(req, sizeof(urb_req_t)); + + req->urb = USB_ALLOC_URB(); + if (req->urb == NULL) { + DBUSERR(("%s: USB_ALLOC_URB req->urb failed\n", __FUNCTION__)); + err = DBUS_ERR_NOMEM; + goto fail; + } + + INIT_LIST_HEAD(&req->urb_list); + + if (is_rx) { +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* don't allocate now. Do it on demand */ + req->pkt = NULL; +#else + /* pre-allocate buffers never to be released */ + req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len); + if (req->pkt == NULL) { + DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__)); + err = DBUS_ERR_NOMEM; + goto fail; + } +#endif + req->buf_len = usbos_info->rxbuf_len; + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + } else { + req->buf_len = 0; + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); + } + allocated++; + continue; + +fail: + if (req) { + if (is_rx && req->pkt) { +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* req->pkt is NULL in "NOCOPY" mode */ +#else + MFREE(usbos_info->pub->osh, req->pkt, req->buf_len); +#endif + } + if (req->urb) { + USB_FREE_URB(req->urb); + } + MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t)); + } + break; + } + + atomic_add(allocated, is_rx ? &usbos_info->rxallocated : &usbos_info->txallocated); + + if (is_rx) { + DBUSTRACE(("%s: add %d (total %d) rx buf, each has %d bytes\n", __FUNCTION__, + allocated, atomic_read(&usbos_info->rxallocated), usbos_info->rxbuf_len)); + } else { + DBUSTRACE(("%s: add %d (total %d) tx req\n", __FUNCTION__, + allocated, atomic_read(&usbos_info->txallocated))); + } + + return err; +} /* dbus_usbos_urbreqs_alloc */ + +/** Typically called during detach or when attach failed. Don't call until all URBs unlinked */ +static int +dbus_usbos_urbreqs_free(usbos_info_t *usbos_info, bool is_rx) +{ + int rtn = 0; + urb_req_t *req; + struct list_head *req_q; + spinlock_t *lock; + + if (is_rx) { + req_q = &usbos_info->req_rxfreeq; + lock = &usbos_info->rxfree_lock; + } else { + req_q = &usbos_info->req_txfreeq; + lock = &usbos_info->txfree_lock; + } + while ((req = dbus_usbos_qdeq(req_q, lock)) != NULL) { + + if (is_rx) { + if (req->pkt) { + /* We do MFREE instead of PKTFREE because the pkt has been + * converted to native already + */ + MFREE(usbos_info->pub->osh, req->pkt, req->buf_len); + req->pkt = NULL; + req->buf_len = 0; + } + } else { + /* sending req should not be assigned pkt buffer */ + ASSERT(req->pkt == NULL); + } + + if (req->urb) { + USB_FREE_URB(req->urb); + req->urb = NULL; + } + MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t)); + + rtn++; + } + return rtn; +} /* dbus_usbos_urbreqs_free */ + +/** + * called by Linux kernel on URB completion. Upper DBUS layer (dbus_usb.c) has to be notified of + * send completion. + */ +void +dbus_usbos_send_complete(CALLBACK_ARGS) +{ + urb_req_t *req = urb->context; + dbus_irb_tx_t *txirb = req->arg; + usbos_info_t *usbos_info = req->usbinfo; + unsigned long flags; + int status = DBUS_OK; + int txposted; + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + + spin_lock_irqsave(&usbos_info->txlock, flags); + + dbus_usbos_req_del(req, &usbos_info->txposted_lock); + txposted = atomic_dec_return(&usbos_info->txposted); + if (unlikely (txposted < 0)) { + DBUSERR(("%s ERROR: txposted is negative (%d)!!\n", __FUNCTION__, txposted)); + } + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + if (unlikely (urb->status)) { + status = DBUS_ERR_TXFAIL; + DBUSTRACE(("txfail status %d\n", urb->status)); + } + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* sending req should not be assigned pkt buffer */ + ASSERT(req->pkt == NULL); +#endif + /* txirb should always be set, except for ZLP. ZLP is reusing this callback function. */ + if (txirb != NULL) { + if (txirb->send_buf != NULL) { + MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len); + txirb->send_buf = NULL; + req->buf_len = 0; + } + if (likely (usbos_info->cbarg && usbos_info->cbs)) { + if (likely (usbos_info->cbs->send_irb_complete != NULL)) + usbos_info->cbs->send_irb_complete(usbos_info->cbarg, txirb, status); + } + } + + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); +} /* dbus_usbos_send_complete */ + +/** + * In order to receive USB traffic from the dongle, we need to supply the Linux kernel with a free + * URB that is going to contain received data. + */ +static int +dbus_usbos_recv_urb_submit(usbos_info_t *usbos_info, dbus_irb_rx_t *rxirb, uint32 ep_idx) +{ + urb_req_t *req; + int ret = DBUS_OK; + unsigned long flags; + void *p; + uint rx_pipe; + int rxposted; + + BCM_REFERENCE(rxposted); + + if (!(req = dbus_usbos_qdeq(&usbos_info->req_rxfreeq, &usbos_info->rxfree_lock))) { + DBUSTRACE(("%s No free URB!\n", __FUNCTION__)); + return DBUS_ERR_RXDROP; + } + + spin_lock_irqsave(&usbos_info->rxlock, flags); + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + req->pkt = rxirb->pkt = PKTGET(usbos_info->pub->osh, req->buf_len, FALSE); + if (!rxirb->pkt) { + DBUSERR(("%s: PKTGET failed\n", __FUNCTION__)); + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + ret = DBUS_ERR_RXDROP; + goto fail; + } + /* consider the packet "native" so we don't count it as MALLOCED in the osl */ + PKTTONATIVE(usbos_info->pub->osh, req->pkt); + rxirb->buf = NULL; + p = PKTDATA(usbos_info->pub->osh, req->pkt); +#else + if (req->buf_len != usbos_info->rxbuf_len) { + ASSERT(req->pkt); + MFREE(usbos_info->pub->osh, req->pkt, req->buf_len); + DBUSTRACE(("%s: replace rx buff: old len %d, new len %d\n", __FUNCTION__, + req->buf_len, usbos_info->rxbuf_len)); + req->buf_len = 0; + req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len); + if (req->pkt == NULL) { + DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__)); + ret = DBUS_ERR_NOMEM; + goto fail; + } + req->buf_len = usbos_info->rxbuf_len; + } + rxirb->buf = req->pkt; + p = rxirb->buf; +#endif /* defined(BCM_RPC_NOCOPY) */ + rxirb->buf_len = req->buf_len; + req->usbinfo = usbos_info; + req->arg = rxirb; + if (ep_idx == 0) { + rx_pipe = usbos_info->rx_pipe; + } else { + rx_pipe = usbos_info->rx_pipe2; + ASSERT(usbos_info->rx_pipe2); + } + /* Prepare the URB */ + usb_fill_bulk_urb(req->urb, usbos_info->usb, rx_pipe, + p, + rxirb->buf_len, + (usb_complete_t)dbus_usbos_recv_complete, req); + req->urb->transfer_flags |= URB_QUEUE_BULK; + + if ((ret = USB_SUBMIT_URB(req->urb))) { + DBUSERR(("%s USB_SUBMIT_URB failed. status %d\n", __FUNCTION__, ret)); + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + ret = DBUS_ERR_RXFAIL; + goto fail; + } + rxposted = atomic_inc_return(&usbos_info->rxposted); + + dbus_usbos_qenq(&usbos_info->req_rxpostedq, req, &usbos_info->rxposted_lock); +fail: + spin_unlock_irqrestore(&usbos_info->rxlock, flags); + return ret; +} /* dbus_usbos_recv_urb_submit */ + + +/** + * Called by worked thread when a 'receive URB' completed or Linux kernel when it returns a URB to + * this driver. + */ +static void +dbus_usbos_recv_complete_handle(urb_req_t *req, int len, int status) +{ + dbus_irb_rx_t *rxirb = req->arg; + usbos_info_t *usbos_info = req->usbinfo; + unsigned long flags; + int rxallocated, rxposted; + int dbus_status = DBUS_OK; + bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0; + + spin_lock_irqsave(&usbos_info->rxlock, flags); + dbus_usbos_req_del(req, &usbos_info->rxposted_lock); + rxposted = atomic_dec_return(&usbos_info->rxposted); + rxallocated = atomic_read(&usbos_info->rxallocated); + spin_unlock_irqrestore(&usbos_info->rxlock, flags); + + if ((rxallocated < usbos_info->pub->nrxq) && (!status) && + (rxposted == DBUS_USB_RXQUEUE_LOWER_WATERMARK)) { + DBUSTRACE(("%s: need more rx buf: rxallocated %d rxposted %d!\n", + __FUNCTION__, rxallocated, rxposted)); + dbus_usbos_urbreqs_alloc(usbos_info, + MIN(DBUS_USB_RXQUEUE_BATCH_ADD, + usbos_info->pub->nrxq - rxallocated), TRUE); + } + + /* Handle errors */ + if (status) { + /* + * Linux 2.4 disconnect: -ENOENT or -EILSEQ for CRC error; rmmod: -ENOENT + * Linux 2.6 disconnect: -EPROTO, rmmod: -ESHUTDOWN + */ + if ((status == -ENOENT && (!killed))|| status == -ESHUTDOWN) { + /* NOTE: unlink() can not be called from URB callback(). + * Do not call dbusos_stop() here. + */ + DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status)); + dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN); + } else if (status == -EPROTO) { + DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status)); + } else if (killed && (status == -EHOSTUNREACH || status == -ENOENT)) { + /* Device is suspended */ + } else { + DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status)); + dbus_usbos_errhandler(usbos_info, DBUS_ERR_RXFAIL); + } + + /* On error, don't submit more URBs yet */ + rxirb->buf = NULL; + rxirb->actual_len = 0; + dbus_status = DBUS_ERR_RXFAIL; + goto fail; + } + + /* Make the skb represent the received urb */ + rxirb->actual_len = len; + + if (rxirb->actual_len < sizeof(uint32)) { + DBUSTRACE(("small pkt len %d, process as ZLP\n", rxirb->actual_len)); + dbus_status = DBUS_ERR_RXZLP; + } + +fail: +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* detach the packet from the queue */ + req->pkt = NULL; +#endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY */ + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->recv_irb_complete) { + usbos_info->cbs->recv_irb_complete(usbos_info->cbarg, rxirb, dbus_status); + } + } + + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + + /* Mark the interface as busy to reset USB autosuspend timer */ + USB_MARK_LAST_BUSY(usbos_info->usb); +} /* dbus_usbos_recv_complete_handle */ + +/** called by Linux kernel when it returns a URB to this driver */ +static void +dbus_usbos_recv_complete(CALLBACK_ARGS) +{ +#ifdef USBOS_THREAD + dbus_usbos_dispatch_schedule(CALLBACK_ARGS_DATA); +#else /* !USBOS_THREAD */ + dbus_usbos_recv_complete_handle(urb->context, urb->actual_length, urb->status); +#endif /* USBOS_THREAD */ +} + + +/** + * If Linux notifies our driver that a control read or write URB has completed, we should notify + * the DBUS layer above us (dbus_usb.c in this case). + */ +static void +dbus_usbos_ctl_complete(usbos_info_t *usbos_info, int type, int urbstatus) +{ + int status = DBUS_ERR; + + if (usbos_info == NULL) + return; + + switch (urbstatus) { + case 0: + status = DBUS_OK; + break; + case -EINPROGRESS: + case -ENOENT: + default: +#ifdef INTR_EP_ENABLE + DBUSERR(("%s:%d fail status %d bus:%d susp:%d intr:%d ctli:%d ctlo:%d\n", + __FUNCTION__, type, urbstatus, + usbos_info->pub->busstate, g_probe_info.suspend_state, + usbos_info->intr_urb_submitted, usbos_info->ctlin_urb_submitted, + usbos_info->ctlout_urb_submitted)); +#else + DBUSERR(("%s: failed with status %d\n", __FUNCTION__, urbstatus)); + status = DBUS_ERR; + break; +#endif /* INTR_EP_ENABLE */ + } + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->ctl_complete) + usbos_info->cbs->ctl_complete(usbos_info->cbarg, type, status); + } +} + +/** called by Linux */ +static void +dbus_usbos_ctlread_complete(CALLBACK_ARGS) +{ + usbos_info_t *usbos_info = (usbos_info_t *)urb->context; + + ASSERT(urb); + usbos_info = (usbos_info_t *)urb->context; + + dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_READ, urb->status); + +#ifdef USBOS_THREAD + if (usbos_info->rxctl_deferrespok) { + usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS | + USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = 1; + } +#endif + + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); +} + +/** called by Linux */ +static void +dbus_usbos_ctlwrite_complete(CALLBACK_ARGS) +{ + usbos_info_t *usbos_info = (usbos_info_t *)urb->context; + + ASSERT(urb); + usbos_info = (usbos_info_t *)urb->context; + + dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_WRITE, urb->status); + +#ifdef USBOS_TX_THREAD + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; +#endif /* USBOS_TX_THREAD */ + + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); +} + +#ifdef INTR_EP_ENABLE +/** called by Linux */ +static void +dbus_usbos_intr_complete(CALLBACK_ARGS) +{ + usbos_info_t *usbos_info = (usbos_info_t *)urb->context; + bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0; + + if (usbos_info == NULL || usbos_info->pub == NULL) + return; + if ((urb->status == -ENOENT && (!killed)) || urb->status == -ESHUTDOWN || + urb->status == -ENODEV) { + dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN); + } + + if (usbos_info->pub->busstate == DBUS_STATE_DOWN) { + DBUSERR(("%s: intr cb when DBUS down, ignoring\n", __FUNCTION__)); + return; + } + dbus_usbos_ctl_complete(usbos_info, DBUS_CBINTR_POLL, urb->status); +} +#endif /* INTR_EP_ENABLE */ + +/** + * when the bus is going to sleep or halt, the Linux kernel requires us to take ownership of our + * URBs again. Multiple code paths in this file require a list of URBs to be cancelled in a + * concurrency save manner. + */ +static void +dbus_usbos_unlink(struct list_head *urbreq_q, spinlock_t *lock) +{ + urb_req_t *req; + + /* dbus_usbos_recv_complete() adds req back to req_freeq */ + while ((req = dbus_usbos_qdeq(urbreq_q, lock)) != NULL) { + ASSERT(req->urb != NULL); + USB_UNLINK_URB(req->urb); + } +} + +/** multiple code paths in this file require the bus to stop */ +static void +dbus_usbos_cancel_all_urbs(usbos_info_t *usbos_info) +{ + int rxposted, txposted; + + DBUSTRACE(("%s: unlink all URBs\n", __FUNCTION__)); + +#ifdef USBOS_TX_THREAD + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; + + /* Yield the CPU to TX thread so all pending requests are submitted */ + while (!list_empty(&usbos_info->usbos_tx_list)) { + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); + OSL_SLEEP(10); + } +#endif /* USBOS_TX_THREAD */ + + /* tell Linux kernel to cancel a single intr, ctl and blk URB */ + if (usbos_info->intr_urb) + USB_UNLINK_URB(usbos_info->intr_urb); + if (usbos_info->ctl_urb) + USB_UNLINK_URB(usbos_info->ctl_urb); + if (usbos_info->blk_urb) + USB_UNLINK_URB(usbos_info->blk_urb); + + dbus_usbos_unlink(&usbos_info->req_txpostedq, &usbos_info->txposted_lock); + dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock); + + /* Wait until the callbacks for all submitted URBs have been called, because the + * handler needs to know is an USB suspend is in progress. + */ + SPINWAIT((atomic_read(&usbos_info->txposted) != 0 || + atomic_read(&usbos_info->rxposted) != 0), 10000); + + txposted = atomic_read(&usbos_info->txposted); + rxposted = atomic_read(&usbos_info->rxposted); + if (txposted != 0 || rxposted != 0) { + DBUSERR(("%s ERROR: REQs posted, rx=%d tx=%d!\n", + __FUNCTION__, rxposted, txposted)); + } +} /* dbus_usbos_cancel_all_urbs */ + +/** multiple code paths require the bus to stop */ +static void +dbusos_stop(usbos_info_t *usbos_info) +{ + urb_req_t *req; + int rxposted; + req = NULL; + BCM_REFERENCE(req); + + ASSERT(usbos_info); + + dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN); + + dbus_usbos_cancel_all_urbs(usbos_info); + +#ifdef USBOS_THREAD + /* yield the CPU to rx packet thread */ + while (1) { + if (atomic_read(&usbos_info->usbos_list_cnt) <= 0) break; + wake_up_interruptible(&usbos_info->usbos_queue_head); + OSL_SLEEP(3); + } +#endif /* USBOS_THREAD */ + + rxposted = atomic_read(&usbos_info->rxposted); + if (rxposted > 0) { + DBUSERR(("%s ERROR: rx REQs posted=%d in stop!\n", __FUNCTION__, + rxposted)); + } + + ASSERT(atomic_read(&usbos_info->txposted) == 0 && rxposted == 0); + +} /* dbusos_stop */ + +#if defined(USB_SUSPEND_AVAILABLE) + +/** + * Linux kernel sports a 'USB auto suspend' feature. See: http://lwn.net/Articles/373550/ + * The suspend method is called by the Linux kernel to warn the driver that the device is going to + * be suspended. If the driver returns a negative error code, the suspend will be aborted. If the + * driver returns 0, it must cancel all outstanding URBs (usb_kill_urb()) and not submit any more. + */ +static int +dbus_usbos_suspend(struct usb_interface *intf, + pm_message_t message) +{ + DBUSERR(("%s suspend state: %d\n", __FUNCTION__, g_probe_info.suspend_state)); + /* DHD for full dongle model */ + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPEND_PENDING; + dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_SLEEP); + dbus_usbos_cancel_all_urbs((usbos_info_t*)g_probe_info.usbos_info); + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPENDED; + + return 0; +} + +/** + * The resume method is called to tell the driver that the device has been resumed and the driver + * can return to normal operation. URBs may once more be submitted. + */ +static int dbus_usbos_resume(struct usb_interface *intf) +{ + DBUSERR(("%s Device resumed\n", __FUNCTION__)); + + dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_UP); + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE; + return 0; +} + +/** +* This function is directly called by the Linux kernel, when the suspended device has been reset +* instead of being resumed +*/ +static int dbus_usbos_reset_resume(struct usb_interface *intf) +{ + DBUSERR(("%s Device reset resumed\n", __FUNCTION__)); + + /* The device may have lost power, so a firmware download may be required */ + dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_DL_NEEDED); + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE; + return 0; +} + +#endif /* USB_SUSPEND_AVAILABLE */ + +/** + * Called by Linux kernel at initialization time, kernel wants to know if our driver will accept the + * caller supplied USB interface. Note that USB drivers are bound to interfaces, and not to USB + * devices. + */ +#ifdef KERNEL26 +#define DBUS_USBOS_PROBE() static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id) +#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_interface *intf) +#else +#define DBUS_USBOS_PROBE() static void * dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum, const struct usb_device_id *id) +#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr) +#endif /* KERNEL26 */ + +DBUS_USBOS_PROBE() +{ + int ep; + struct usb_endpoint_descriptor *endpoint; + int ret = 0; +#ifdef KERNEL26 + struct usb_device *usb = interface_to_usbdev(intf); +#else + int claimed = 0; +#endif + int num_of_eps; +#ifdef BCMUSBDEV_COMPOSITE + int wlan_if = -1; + bool intr_ep = FALSE; +#endif /* BCMUSBDEV_COMPOSITE */ + wifi_adapter_info_t *adapter; + + DHD_MUTEX_LOCK(); + + DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__, + usb->bus->busnum, usb->portnum)); + adapter = dhd_wifi_platform_attach_adapter(USB_BUS, usb->bus->busnum, + usb->portnum, WIFI_STATUS_POWER_ON); + if (adapter == NULL) { + DBUSERR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMUSBDEV_COMPOSITE + wlan_if = dbus_usbos_intf_wlan(usb); +#ifdef KERNEL26 + if ((wlan_if >= 0) && (IFPTR(usb, wlan_if) == intf)) +#else + if (wlan_if == ifnum) +#endif /* KERNEL26 */ + { +#endif /* BCMUSBDEV_COMPOSITE */ + g_probe_info.usb = usb; + g_probe_info.dldone = TRUE; +#ifdef BCMUSBDEV_COMPOSITE + } else { + DBUSTRACE(("dbus_usbos_probe: skip probe for non WLAN interface\n")); + ret = BCME_UNSUPPORTED; + goto fail; + } +#endif /* BCMUSBDEV_COMPOSITE */ + +#ifdef KERNEL26 + g_probe_info.intf = intf; +#endif /* KERNEL26 */ + +#ifdef BCMUSBDEV_COMPOSITE + if (IFDESC(usb, wlan_if).bInterfaceNumber > USB_COMPIF_MAX) +#else + if (IFDESC(usb, CONTROL_IF).bInterfaceNumber) +#endif /* BCMUSBDEV_COMPOSITE */ + { + ret = -1; + goto fail; + } + if (id != NULL) { + g_probe_info.vid = id->idVendor; + g_probe_info.pid = id->idProduct; + } + +#ifdef KERNEL26 + usb_set_intfdata(intf, &g_probe_info); +#endif + + /* Check that the device supports only one configuration */ + if (usb->descriptor.bNumConfigurations != 1) { + ret = -1; + goto fail; + } + + if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) { +#ifdef BCMUSBDEV_COMPOSITE + if ((usb->descriptor.bDeviceClass != USB_CLASS_MISC) && + (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS)) { +#endif /* BCMUSBDEV_COMPOSITE */ + ret = -1; + goto fail; +#ifdef BCMUSBDEV_COMPOSITE + } +#endif /* BCMUSBDEV_COMPOSITE */ + } + + /* + * Only the BDC interface configuration is supported: + * Device class: USB_CLASS_VENDOR_SPEC + * if0 class: USB_CLASS_VENDOR_SPEC + * if0/ep0: control + * if0/ep1: bulk in + * if0/ep2: bulk out (ok if swapped with bulk in) + */ + if (CONFIGDESC(usb)->bNumInterfaces != 1) { +#ifdef BCMUSBDEV_COMPOSITE + if (CONFIGDESC(usb)->bNumInterfaces > USB_COMPIF_MAX) { +#endif /* BCMUSBDEV_COMPOSITE */ + ret = -1; + goto fail; +#ifdef BCMUSBDEV_COMPOSITE + } +#endif /* BCMUSBDEV_COMPOSITE */ + } + + /* Check interface */ +#ifndef KERNEL26 +#ifdef BCMUSBDEV_COMPOSITE + if (usb_interface_claimed(IFPTR(usb, wlan_if))) +#else + if (usb_interface_claimed(IFPTR(usb, CONTROL_IF))) +#endif /* BCMUSBDEV_COMPOSITE */ + { + ret = -1; + goto fail; + } +#endif /* !KERNEL26 */ + +#ifdef BCMUSBDEV_COMPOSITE + if ((IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_VENDOR_SPEC || + IFDESC(usb, wlan_if).bInterfaceSubClass != 2 || + IFDESC(usb, wlan_if).bInterfaceProtocol != 0xff) && + (IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_MISC || + IFDESC(usb, wlan_if).bInterfaceSubClass != USB_SUBCLASS_COMMON || + IFDESC(usb, wlan_if).bInterfaceProtocol != USB_PROTO_IAD)) +#else + if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC || + IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 || + IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) +#endif /* BCMUSBDEV_COMPOSITE */ + { +#ifdef BCMUSBDEV_COMPOSITE + DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n", + __FUNCTION__, + IFDESC(usb, wlan_if).bInterfaceClass, + IFDESC(usb, wlan_if).bInterfaceSubClass, + IFDESC(usb, wlan_if).bInterfaceProtocol)); +#else + DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n", + __FUNCTION__, + IFDESC(usb, CONTROL_IF).bInterfaceClass, + IFDESC(usb, CONTROL_IF).bInterfaceSubClass, + IFDESC(usb, CONTROL_IF).bInterfaceProtocol)); +#endif /* BCMUSBDEV_COMPOSITE */ + ret = -1; + goto fail; + } + + /* Check control endpoint */ +#ifdef BCMUSBDEV_COMPOSITE + endpoint = &IFEPDESC(usb, wlan_if, 0); +#else + endpoint = &IFEPDESC(usb, CONTROL_IF, 0); +#endif /* BCMUSBDEV_COMPOSITE */ + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) { +#ifdef BCMUSBDEV_COMPOSITE + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != + USB_ENDPOINT_XFER_BULK) { +#endif /* BCMUSBDEV_COMPOSITE */ + DBUSERR(("%s: invalid control endpoint %d\n", + __FUNCTION__, endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); + ret = -1; + goto fail; +#ifdef BCMUSBDEV_COMPOSITE + } +#endif /* BCMUSBDEV_COMPOSITE */ + } + +#ifdef BCMUSBDEV_COMPOSITE + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { +#endif /* BCMUSBDEV_COMPOSITE */ + g_probe_info.intr_pipe = + usb_rcvintpipe(usb, endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); +#ifdef BCMUSBDEV_COMPOSITE + intr_ep = TRUE; + } +#endif /* BCMUSBDEV_COMPOSITE */ + +#ifndef KERNEL26 + /* Claim interface */ +#ifdef BCMUSBDEV_COMPOSITE + usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, wlan_if), &g_probe_info); +#else + usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF), &g_probe_info); +#endif /* BCMUSBDEV_COMPOSITE */ + claimed = 1; +#endif /* !KERNEL26 */ + g_probe_info.rx_pipe = 0; + g_probe_info.rx_pipe2 = 0; + g_probe_info.tx_pipe = 0; +#ifdef BCMUSBDEV_COMPOSITE + if (intr_ep) + ep = 1; + else + ep = 0; + num_of_eps = IFDESC(usb, wlan_if).bNumEndpoints - 1; +#else + num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1; +#endif /* BCMUSBDEV_COMPOSITE */ + + if ((num_of_eps != 2) && (num_of_eps != 3)) { +#ifdef BCMUSBDEV_COMPOSITE + if (num_of_eps > 7) +#endif /* BCMUSBDEV_COMPOSITE */ + ASSERT(0); + } + /* Check data endpoints and get pipes */ +#ifdef BCMUSBDEV_COMPOSITE + for (; ep <= num_of_eps; ep++) +#else + for (ep = 1; ep <= num_of_eps; ep++) +#endif /* BCMUSBDEV_COMPOSITE */ + { +#ifdef BCMUSBDEV_COMPOSITE + endpoint = &IFEPDESC(usb, wlan_if, ep); +#else + endpoint = &IFEPDESC(usb, BULK_IF, ep); +#endif /* BCMUSBDEV_COMPOSITE */ + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != + USB_ENDPOINT_XFER_BULK) { + DBUSERR(("%s: invalid data endpoint %d\n", + __FUNCTION__, ep)); + ret = -1; + goto fail; + } + + if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { + /* direction: dongle->host */ + if (!g_probe_info.rx_pipe) { + g_probe_info.rx_pipe = usb_rcvbulkpipe(usb, + (endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)); + } else { + g_probe_info.rx_pipe2 = usb_rcvbulkpipe(usb, + (endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)); + } + + } else + g_probe_info.tx_pipe = usb_sndbulkpipe(usb, (endpoint->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK)); + } + + /* Allocate interrupt URB and data buffer */ + /* RNDIS says 8-byte intr, our old drivers used 4-byte */ +#ifdef BCMUSBDEV_COMPOSITE + g_probe_info.intr_size = (IFEPDESC(usb, wlan_if, 0).wMaxPacketSize == 16) ? 8 : 4; + g_probe_info.interval = IFEPDESC(usb, wlan_if, 0).bInterval; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)) + usb->quirks |= USB_QUIRK_NO_SET_INTF; +#endif +#else + g_probe_info.intr_size = (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == 16) ? 8 : 4; + g_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval; +#endif /* BCMUSBDEV_COMPOSITE */ + +#ifndef KERNEL26 + /* usb_fill_int_urb does the interval decoding in 2.6 */ + if (usb->speed == USB_SPEED_HIGH) + g_probe_info.interval = 1 << (g_probe_info.interval - 1); +#endif + if (usb->speed == USB_SPEED_SUPER) { + g_probe_info.device_speed = SUPER_SPEED; + DBUSERR(("super speed device detected\n")); + } else if (usb->speed == USB_SPEED_HIGH) { + g_probe_info.device_speed = HIGH_SPEED; + DBUSERR(("high speed device detected\n")); + } else { + g_probe_info.device_speed = FULL_SPEED; + DBUSERR(("full speed device detected\n")); + } + if (g_probe_info.dereged == FALSE && probe_cb) { + disc_arg = probe_cb(probe_arg, "", USB_BUS, usb->bus->busnum, usb->portnum, 0); + } + + g_probe_info.disc_cb_done = FALSE; + +#ifdef KERNEL26 + intf->needs_remote_wakeup = 1; +#endif /* KERNEL26 */ + DHD_MUTEX_UNLOCK(); + + /* Success */ +#ifdef KERNEL26 + return DBUS_OK; +#else + usb_inc_dev_use(usb); + return &g_probe_info; +#endif + +fail: + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); +#ifdef BCMUSBDEV_COMPOSITE + if (ret != BCME_UNSUPPORTED) +#endif /* BCMUSBDEV_COMPOSITE */ + DBUSERR(("%s: failed with errno %d\n", __FUNCTION__, ret)); +#ifndef KERNEL26 + if (claimed) +#ifdef BCMUSBDEV_COMPOSITE + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if)); +#else + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF)); +#endif /* BCMUSBDEV_COMPOSITE */ +#endif /* !KERNEL26 */ + + DHD_MUTEX_UNLOCK(); +#ifdef KERNEL26 + usb_set_intfdata(intf, NULL); + return ret; +#else + return NULL; +#endif +} /* dbus_usbos_probe */ + +/** Called by Linux kernel, is the counter part of dbus_usbos_probe() */ +DBUS_USBOS_DISCONNECT() +{ +#ifdef KERNEL26 + struct usb_device *usb = interface_to_usbdev(intf); + probe_info_t *probe_usb_init_data = usb_get_intfdata(intf); +#else + probe_info_t *probe_usb_init_data = (probe_info_t *) ptr; +#endif + usbos_info_t *usbos_info; + + DHD_MUTEX_LOCK(); + + DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__, + usb->bus->busnum, usb->portnum)); + + if (probe_usb_init_data) { + usbos_info = (usbos_info_t *) probe_usb_init_data->usbos_info; + if (usbos_info) { + if ((probe_usb_init_data->dereged == FALSE) && disconnect_cb && disc_arg) { + disconnect_cb(disc_arg); + disc_arg = NULL; + probe_usb_init_data->disc_cb_done = TRUE; + } + } + } + + if (usb) { +#ifndef KERNEL26 +#ifdef BCMUSBDEV_COMPOSITE + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if)); +#else + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF)); +#endif /* BCMUSBDEV_COMPOSITE */ + usb_dec_dev_use(usb); +#endif /* !KERNEL26 */ + } + DHD_MUTEX_UNLOCK(); +} /* dbus_usbos_disconnect */ + +#define LOOPBACK_PKT_START 0xBABE1234 + +bool is_loopback_pkt(void *buf) +{ + + uint32 *buf_ptr = (uint32 *) buf; + + if (*buf_ptr == LOOPBACK_PKT_START) + return TRUE; + return FALSE; + +} + +int matches_loopback_pkt(void *buf) +{ + int i, j; + unsigned char *cbuf = (unsigned char *) buf; + + for (i = 4; i < loopback_size; i++) { + if (cbuf[i] != (i % 256)) { + printf("%s: mismatch at i=%d %d : ", __FUNCTION__, i, cbuf[i]); + for (j = i; ((j < i+ 16) && (j < loopback_size)); j++) { + printf("%d ", cbuf[j]); + } + printf("\n"); + return 0; + } + } + loopback_rx_cnt++; + return 1; +} + +int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size) +{ + usbos_info_t *usbos_info = (usbos_info_t *) usbos_info_ptr; + unsigned char *buf; + int j; + void* p = NULL; + int rc, last_rx_cnt; + int tx_failed_cnt; + int max_size = 1650; + int usb_packet_size = 512; + int min_packet_size = 10; + + if (size % usb_packet_size == 0) { + size = size - 1; + DBUSERR(("%s: overriding size=%d \n", __FUNCTION__, size)); + } + + if (size < min_packet_size) { + size = min_packet_size; + DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, min_packet_size)); + } + if (size > max_size) { + size = max_size; + DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, max_size)); + } + + loopback_tx_cnt = 0; + loopback_rx_cnt = 0; + tx_failed_cnt = 0; + loopback_size = size; + + while (loopback_tx_cnt < cnt) { + uint32 *x; + int pkt_size = loopback_size; + + p = PKTGET(usbos_info->pub->osh, pkt_size, TRUE); + if (p == NULL) { + DBUSERR(("%s:%d Failed to allocate packet sz=%d\n", + __FUNCTION__, __LINE__, pkt_size)); + return BCME_ERROR; + } + x = (uint32*) PKTDATA(usbos_info->pub->osh, p); + *x = LOOPBACK_PKT_START; + buf = (unsigned char*) x; + for (j = 4; j < pkt_size; j++) { + buf[j] = j % 256; + } + rc = dbus_send_buf(usbos_info->pub, buf, pkt_size, p); + if (rc != BCME_OK) { + DBUSERR(("%s:%d Freeing packet \n", __FUNCTION__, __LINE__)); + PKTFREE(usbos_info->pub->osh, p, TRUE); + dbus_usbos_wait(usbos_info, 1); + tx_failed_cnt++; + } else { + loopback_tx_cnt++; + tx_failed_cnt = 0; + } + if (tx_failed_cnt == 5) { + DBUSERR(("%s : Failed to send loopback packets cnt=%d loopback_tx_cnt=%d\n", + __FUNCTION__, cnt, loopback_tx_cnt)); + break; + } + } + printf("Transmitted %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size); + + last_rx_cnt = loopback_rx_cnt; + while (loopback_rx_cnt < loopback_tx_cnt) { + dbus_usbos_wait(usbos_info, 1); + if (loopback_rx_cnt <= last_rx_cnt) { + DBUSERR(("%s: Matched rx cnt stuck at %d \n", __FUNCTION__, last_rx_cnt)); + return BCME_ERROR; + } + last_rx_cnt = loopback_rx_cnt; + } + printf("Received %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size); + + return BCME_OK; +} /* dbus_usbos_loopback_tx */ + +/** + * Higher layer (dbus_usb.c) wants to transmit an I/O Request Block + * @param[in] txirb txirb->pkt, if non-zero, contains a single or a chain of packets + */ +static int +dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + urb_req_t *req, *req_zlp = NULL; + int ret = DBUS_OK; + unsigned long flags; + void *pkt; + uint32 buffer_length; + uint8 *buf; + + if ((usbos_info == NULL) || !usbos_info->tx_pipe) { + return DBUS_ERR; + } + + if (txirb->pkt != NULL) { + buffer_length = pkttotlen(usbos_info->pub->osh, txirb->pkt); + /* In case of multiple packets the values below may be overwritten */ + txirb->send_buf = NULL; + buf = PKTDATA(usbos_info->pub->osh, txirb->pkt); + } else { /* txirb->buf != NULL */ + ASSERT(txirb->buf != NULL); + ASSERT(txirb->send_buf == NULL); + buffer_length = txirb->len; + buf = txirb->buf; + } + + if (!(req = dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) { + DBUSERR(("%s No free URB!\n", __FUNCTION__)); + return DBUS_ERR_TXDROP; + } + + /* If not using standard Linux kernel functionality for handling Zero Length Packet(ZLP), + * the dbus needs to generate ZLP when length is multiple of MaxPacketSize. + */ +#ifndef WL_URB_ZPKT + if (!(buffer_length % usbos_info->maxps)) { + if (!(req_zlp = + dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) { + DBUSERR(("%s No free URB for ZLP!\n", __FUNCTION__)); + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); + return DBUS_ERR_TXDROP; + } + + /* No txirb, so that dbus_usbos_send_complete can differentiate between + * DATA and ZLP. + */ + req_zlp->arg = NULL; + req_zlp->usbinfo = usbos_info; + req_zlp->buf_len = 0; + + usb_fill_bulk_urb(req_zlp->urb, usbos_info->usb, usbos_info->tx_pipe, NULL, + 0, (usb_complete_t)dbus_usbos_send_complete, req_zlp); + + req_zlp->urb->transfer_flags |= URB_QUEUE_BULK; + } +#endif /* !WL_URB_ZPKT */ + +#ifndef USBOS_TX_THREAD + /* Disable USB autosuspend until this request completes, request USB resume if needed. + * Because this call runs asynchronously, there is no guarantee the bus is resumed before + * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid + * this. + */ + USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf); +#endif /* !USBOS_TX_THREAD */ + + spin_lock_irqsave(&usbos_info->txlock, flags); + + req->arg = txirb; + req->usbinfo = usbos_info; + req->buf_len = 0; + + /* Prepare the URB */ + if (txirb->pkt != NULL) { + uint32 pktlen; + uint8 *transfer_buf; + + /* For multiple packets, allocate contiguous buffer and copy packet data to it */ + if (PKTNEXT(usbos_info->pub->osh, txirb->pkt)) { + transfer_buf = MALLOC(usbos_info->pub->osh, buffer_length); + if (!transfer_buf) { + ret = DBUS_ERR_TXDROP; + DBUSERR(("fail to alloc to usb buffer\n")); + goto fail; + } + + pkt = txirb->pkt; + txirb->send_buf = transfer_buf; + req->buf_len = buffer_length; + + while (pkt) { + pktlen = PKTLEN(usbos_info->pub->osh, pkt); + bcopy(PKTDATA(usbos_info->pub->osh, pkt), transfer_buf, pktlen); + transfer_buf += pktlen; + pkt = PKTNEXT(usbos_info->pub->osh, pkt); + } + + ASSERT(((uint8 *) txirb->send_buf + buffer_length) == transfer_buf); + + /* Overwrite buf pointer with pointer to allocated contiguous transfer_buf + */ + buf = txirb->send_buf; + } + } + + usb_fill_bulk_urb(req->urb, usbos_info->usb, usbos_info->tx_pipe, buf, + buffer_length, (usb_complete_t)dbus_usbos_send_complete, req); + + req->urb->transfer_flags |= URB_QUEUE_BULK; + +#ifdef USBOS_TX_THREAD + /* Enqueue TX request, the TX thread will resume the bus if needed and submit + * it asynchronously + */ + dbus_usbos_qenq(&usbos_info->usbos_tx_list, req, &usbos_info->usbos_tx_list_lock); + if (req_zlp != NULL) { + dbus_usbos_qenq(&usbos_info->usbos_tx_list, req_zlp, + &usbos_info->usbos_tx_list_lock); + } + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); + return DBUS_OK; +#else + if ((ret = USB_SUBMIT_URB(req->urb))) { + ret = DBUS_ERR_TXDROP; + goto fail; + } + + dbus_usbos_qenq(&usbos_info->req_txpostedq, req, &usbos_info->txposted_lock); + atomic_inc(&usbos_info->txposted); + + if (req_zlp != NULL) { + if ((ret = USB_SUBMIT_URB(req_zlp->urb))) { + DBUSERR(("failed to submit ZLP URB!\n")); + ASSERT(0); + ret = DBUS_ERR_TXDROP; + goto fail2; + } + + dbus_usbos_qenq(&usbos_info->req_txpostedq, req_zlp, &usbos_info->txposted_lock); + /* Also increment txposted for zlp packet, as it will be decremented in + * dbus_usbos_send_complete() + */ + atomic_inc(&usbos_info->txposted); + } + + spin_unlock_irqrestore(&usbos_info->txlock, flags); + return DBUS_OK; +#endif /* USBOS_TX_THREAD */ + +fail: + if (txirb->send_buf != NULL) { + MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len); + txirb->send_buf = NULL; + req->buf_len = 0; + } + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); +#ifndef USBOS_TX_THREAD +fail2: +#endif + if (req_zlp != NULL) { + dbus_usbos_qenq(&usbos_info->req_txfreeq, req_zlp, &usbos_info->txfree_lock); + } + + spin_unlock_irqrestore(&usbos_info->txlock, flags); + +#ifndef USBOS_TX_THREAD + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); +#endif /* !USBOS_TX_THREAD */ + + return ret; +} /* dbus_usbos_intf_send_irb */ + +/** Higher layer (dbus_usb.c) recycles a received (and used) packet. */ +static int +dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + int ret = DBUS_OK; + + if (usbos_info == NULL) + return DBUS_ERR; + + ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, 0); + return ret; +} + +static int +dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + int ret = DBUS_OK; + + if (usbos_info == NULL) + return DBUS_ERR; + +#ifdef INTR_EP_ENABLE + /* By specifying the ep_idx value of 0xff, the cdc layer is asking to + * submit an interrupt URB + */ + if (rxirb == NULL && ep_idx == 0xff) { + /* submit intr URB */ + if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb)) < 0) { + DBUSERR(("%s intr USB_SUBMIT_URB failed, status %d\n", + __FUNCTION__, ret)); + } + return ret; + } +#else + if (rxirb == NULL) { + return DBUS_ERR; + } +#endif /* INTR_EP_ENABLE */ + + ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, ep_idx); + return ret; +} + +/** Higher layer (dbus_usb.c) want to cancel an IRB */ +static int +dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + return DBUS_ERR; +} + +/** Only one CTL transfer can be pending at any time. This function may block. */ +static int +dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + uint16 size; +#ifndef USBOS_TX_THREAD + int status; +#endif /* USBOS_TX_THREAD */ + + if ((usbos_info == NULL) || (buf == NULL) || (len == 0)) + return DBUS_ERR; + + if (usbos_info->ctl_urb == NULL) + return DBUS_ERR; + + /* Block until a pending CTL transfer has completed */ + if (down_interruptible(&usbos_info->ctl_lock) != 0) { + return DBUS_ERR_TXCTLFAIL; + } + +#ifdef USBOS_TX_THREAD + ASSERT(usbos_info->ctl_state == USBOS_REQUEST_STATE_UNSCHEDULED); +#else + /* Disable USB autosuspend until this request completes, request USB resume if needed. + * Because this call runs asynchronously, there is no guarantee the bus is resumed before + * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid + * this. + */ + USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf); +#endif /* USBOS_TX_THREAD */ + + size = len; + usbos_info->ctl_write.wLength = cpu_to_le16p(&size); + usbos_info->ctl_urb->transfer_buffer_length = size; + + usb_fill_control_urb(usbos_info->ctl_urb, + usbos_info->usb, + usb_sndctrlpipe(usbos_info->usb, 0), + (unsigned char *) &usbos_info->ctl_write, + buf, size, (usb_complete_t)dbus_usbos_ctlwrite_complete, usbos_info); + +#ifdef USBOS_TX_THREAD + /* Enqueue CTRL request for transmission by the TX thread. The + * USB bus will first be resumed if needed. + */ + usbos_info->ctl_state = USBOS_REQUEST_STATE_SCHEDULED; + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); +#else + status = USB_SUBMIT_URB(usbos_info->ctl_urb); + if (status < 0) { + DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status)); + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + + return DBUS_ERR_TXCTLFAIL; + } +#endif /* USBOS_TX_THREAD */ + + return DBUS_OK; +} /* dbus_usbos_intf_send_ctl */ + +/** This function does not seem to be called by anyone, including dbus_usb.c */ +static int +dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + int status; + uint16 size; + + if ((usbos_info == NULL) || (buf == NULL) || (len == 0)) + return DBUS_ERR; + + if (usbos_info->ctl_urb == NULL) + return DBUS_ERR; + + /* Block until a pending CTRL transfer has completed */ + if (down_interruptible(&usbos_info->ctl_lock) != 0) { + return DBUS_ERR_TXCTLFAIL; + } + + /* Disable USB autosuspend until this request completes, request USB resume if needed. */ + USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf); + + size = len; + usbos_info->ctl_read.wLength = cpu_to_le16p(&size); + usbos_info->ctl_urb->transfer_buffer_length = size; + + if (usbos_info->rxctl_deferrespok) { + /* BMAC model */ + usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = DL_DEFER_RESP_OK; + } else { + /* full dongle model */ + usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS | + USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = 1; + } + + usb_fill_control_urb(usbos_info->ctl_urb, + usbos_info->usb, + usb_rcvctrlpipe(usbos_info->usb, 0), + (unsigned char *) &usbos_info->ctl_read, + buf, size, (usb_complete_t)dbus_usbos_ctlread_complete, usbos_info); + + status = USB_SUBMIT_URB(usbos_info->ctl_urb); + if (status < 0) { + DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status)); + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + + return DBUS_ERR_RXCTLFAIL; + } + + return DBUS_OK; +} + +static int +dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if ((usbos_info == NULL) || (attrib == NULL)) + return DBUS_ERR; + + attrib->bustype = DBUS_USB; + attrib->vid = g_probe_info.vid; + attrib->pid = g_probe_info.pid; + attrib->devid = 0x4322; + + attrib->nchan = 1; + + /* MaxPacketSize for USB hi-speed bulk out is 512 bytes + * and 64-bytes for full-speed. + * When sending pkt > MaxPacketSize, Host SW breaks it + * up into multiple packets. + */ + attrib->mtu = usbos_info->maxps; + + return DBUS_OK; +} + +/** Called by higher layer (dbus_usb.c) when it wants to 'up' the USB interface to the dongle */ +static int +dbus_usbos_intf_up(void *bus) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + uint16 ifnum; +#ifdef BCMUSBDEV_COMPOSITE + int wlan_if = 0; +#endif + if (usbos_info == NULL) + return DBUS_ERR; + + if (usbos_info->usb == NULL) + return DBUS_ERR; + +#if defined(INTR_EP_ENABLE) + /* full dongle use intr EP, bmac doesn't use it */ + if (usbos_info->intr_urb) { + int ret; + + usb_fill_int_urb(usbos_info->intr_urb, usbos_info->usb, + usbos_info->intr_pipe, &usbos_info->intr, + usbos_info->intr_size, (usb_complete_t)dbus_usbos_intr_complete, + usbos_info, usbos_info->interval); + + if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb))) { + DBUSERR(("%s USB_SUBMIT_URB failed with status %d\n", __FUNCTION__, ret)); + return DBUS_ERR; + } + } +#endif + + if (usbos_info->ctl_urb) { + usbos_info->ctl_in_pipe = usb_rcvctrlpipe(usbos_info->usb, 0); + usbos_info->ctl_out_pipe = usb_sndctrlpipe(usbos_info->usb, 0); + +#ifdef BCMUSBDEV_COMPOSITE + wlan_if = dbus_usbos_intf_wlan(usbos_info->usb); + ifnum = cpu_to_le16(IFDESC(usbos_info->usb, wlan_if).bInterfaceNumber); +#else + ifnum = cpu_to_le16(IFDESC(usbos_info->usb, CONTROL_IF).bInterfaceNumber); +#endif /* BCMUSBDEV_COMPOSITE */ + /* CTL Write */ + usbos_info->ctl_write.bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + usbos_info->ctl_write.bRequest = 0; + usbos_info->ctl_write.wValue = cpu_to_le16(0); + usbos_info->ctl_write.wIndex = cpu_to_le16p(&ifnum); + + /* CTL Read */ + usbos_info->ctl_read.bRequestType = + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = 1; + usbos_info->ctl_read.wValue = cpu_to_le16(0); + usbos_info->ctl_read.wIndex = cpu_to_le16p(&ifnum); + } + + /* Success, indicate usbos_info is fully up */ + dbus_usbos_state_change(usbos_info, DBUS_STATE_UP); + + return DBUS_OK; +} /* dbus_usbos_intf_up */ + +static int +dbus_usbos_intf_down(void *bus) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + dbusos_stop(usbos_info); + return DBUS_OK; +} + +static int +dbus_usbos_intf_stop(void *bus) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + dbusos_stop(usbos_info); + return DBUS_OK; +} + + +/** Called by higher layer (dbus_usb.c) */ +static int +dbus_usbos_intf_set_config(void *bus, dbus_config_t *config) +{ + int err = DBUS_ERR; + usbos_info_t* usbos_info = bus; + + if (config->config_id == DBUS_CONFIG_ID_RXCTL_DEFERRES) { + usbos_info->rxctl_deferrespok = config->rxctl_deferrespok; + err = DBUS_OK; + } else if (config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) { + /* DBUS_CONFIG_ID_AGGR_LIMIT shouldn't be called after probe stage */ + ASSERT(disc_arg == NULL); + ASSERT(config->aggr_param.maxrxsf > 0); + ASSERT(config->aggr_param.maxrxsize > 0); + if (config->aggr_param.maxrxsize > usbos_info->rxbuf_len) { + int state = usbos_info->pub->busstate; + dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock); + while (atomic_read(&usbos_info->rxposted)) { + DBUSTRACE(("%s rxposted is %d, delay 1 ms\n", __FUNCTION__, + atomic_read(&usbos_info->rxposted))); + dbus_usbos_wait(usbos_info, 1); + } + usbos_info->rxbuf_len = config->aggr_param.maxrxsize; + dbus_usbos_state_change(usbos_info, state); + } + err = DBUS_OK; + } + + return err; +} + + +/** Called by dbus_usb.c when it wants to download firmware into the dongle */ +bool +dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen) +{ + int transferred; + int index = 0; + char *tmpbuf; + + if ((usbinfo == NULL) || (buffer == NULL) || (buflen == 0)) + return FALSE; + + tmpbuf = (char *) MALLOC(usbinfo->pub->osh, buflen); + if (!tmpbuf) { + DBUSERR(("%s: Unable to allocate memory \n", __FUNCTION__)); + return FALSE; + } + +#ifdef BCM_REQUEST_FW + if (cmd == DL_GO) { + index = 1; + } +#endif + + /* Disable USB autosuspend until this request completes, request USB resume if needed. */ + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0), + cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + 0, index, + (void*) tmpbuf, buflen, USB_CTRL_EP_TIMEOUT); + if (transferred == buflen) { + memcpy(buffer, tmpbuf, buflen); + } else { + DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred)); + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + MFREE(usbinfo->pub->osh, tmpbuf, buflen); + return (transferred == buflen); +} + +/** + * Called by dbus_usb.c when it wants to download a buffer into the dongle (e.g. as part of the + * download process, when writing nvram variables). + */ +int +dbus_write_membytes(usbos_info_t* usbinfo, bool set, uint32 address, uint8 *data, uint size) +{ + hwacc_t hwacc; + int write_bytes = 4; + int status; + int retval = 0; + + DBUSTRACE(("Enter:%s\n", __FUNCTION__)); + + /* Read is not supported */ + if (set == 0) { + DBUSERR(("Currently read is not supported!!\n")); + return -1; + } + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + hwacc.cmd = DL_CMD_WRHW; + hwacc.addr = address; + + DBUSTRACE(("Address:%x size:%d", hwacc.addr, size)); + do { + if (size >= 4) { + write_bytes = 4; + } else if (size >= 2) { + write_bytes = 2; + } else { + write_bytes = 1; + } + + hwacc.len = write_bytes; + + while (size >= write_bytes) { + hwacc.data = *((unsigned int*)data); + + status = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0), + DL_WRHW, (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + 1, 0, (char *)&hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT); + + if (status < 0) { + retval = -1; + DBUSERR((" Ctrl write hwacc failed w/status %d @ address:%x \n", + status, hwacc.addr)); + goto err; + } + + hwacc.addr += write_bytes; + data += write_bytes; + size -= write_bytes; + } + } while (size > 0); + +err: + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return retval; +} + +int +dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value) +{ + usbos_info_t *usbinfo = (usbos_info_t *) bus; + int ret = DBUS_OK; + int transferred; + uint32 cmd; + hwacc_t hwacc; + + if (usbinfo == NULL) + return DBUS_ERR; + + if (datalen == 1) + cmd = DL_RDHW8; + else if (datalen == 2) + cmd = DL_RDHW16; + else + cmd = DL_RDHW32; + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0), + cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + (uint16)(regaddr), (uint16)(regaddr >> 16), + (void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT); + + if (transferred >= sizeof(hwacc_t)) { + *value = hwacc.data; + } else { + DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred)); + ret = DBUS_ERR; + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return ret; +} + +int +dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data) +{ + usbos_info_t *usbinfo = (usbos_info_t *) bus; + int ret = DBUS_OK; + int transferred; + uint32 cmd = DL_WRHW; + hwacc_t hwacc; + + if (usbinfo == NULL) + return DBUS_ERR; + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + hwacc.cmd = DL_WRHW; + hwacc.addr = regaddr; + hwacc.data = data; + hwacc.len = datalen; + + transferred = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0), + cmd, (USB_DIR_OUT| USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + 1, 0, + (void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT); + + if (transferred != sizeof(hwacc_t)) { + DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred)); + ret = DBUS_ERR; + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return ret; +} + +int +dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + if (in_interrupt()) + mdelay(ms); + else + msleep_interruptible(ms); +#else + wait_ms(ms); +#endif + return DBUS_OK; +} + +/** Called by dbus_usb.c as part of the firmware download process */ +bool +dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len) +{ + bool ret = TRUE; + int status; + int transferred = 0; + + if (usbinfo == NULL) + return DBUS_ERR; + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + status = USB_BULK_MSG(usbinfo->usb, usbinfo->tx_pipe, + buffer, len, + &transferred, USB_BULK_EP_TIMEOUT); + + if (status < 0) { + DBUSERR(("%s: usb_bulk_msg failed %d\n", __FUNCTION__, status)); + ret = FALSE; + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return ret; +} + +static bool +dbus_usbos_intf_recv_needed(void *bus) +{ + return FALSE; +} + +/** + * Higher layer (dbus_usb.c) wants to execute a function on the condition that the rx spin lock has + * been acquired. + */ +static void* +dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + void *ret; + unsigned long flags; + + if (usbos_info == NULL) + return NULL; + + spin_lock_irqsave(&usbos_info->rxlock, flags); + ret = cb(args); + spin_unlock_irqrestore(&usbos_info->rxlock, flags); + + return ret; +} + +static void* +dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + void *ret; + unsigned long flags; + + if (usbos_info == NULL) + return NULL; + + spin_lock_irqsave(&usbos_info->txlock, flags); + ret = cb(args); + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + return ret; +} + +/** + * if an error condition was detected in this module, the higher DBUS layer (dbus_usb.c) has to + * be notified. + */ +int +dbus_usbos_errhandler(void *bus, int err) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->errhandler) + usbos_info->cbs->errhandler(usbos_info->cbarg, err); + } + + return DBUS_OK; +} + +/** + * if a change in bus state was detected in this module, the higher DBUS layer (dbus_usb.c) has to + * be notified. + */ +int +dbus_usbos_state_change(void *bus, int state) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->state_change) + usbos_info->cbs->state_change(usbos_info->cbarg, state); + } + + usbos_info->pub->busstate = state; + return DBUS_OK; +} + +int +dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, + disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2) +{ + bzero(&g_probe_info, sizeof(probe_info_t)); + + probe_cb = prcb; + disconnect_cb = discb; + probe_arg = prarg; + + devid_table[0].idVendor = vid; + devid_table[0].idProduct = pid; + + *intf = &dbus_usbos_intf; + + USB_REGISTER(); + + return DBUS_ERR_NODEVICE; +} + +int +dbus_bus_osl_deregister() +{ + g_probe_info.dereged = TRUE; + + DHD_MUTEX_LOCK(); + if (disconnect_cb && disc_arg && (g_probe_info.disc_cb_done == FALSE)) { + disconnect_cb(disc_arg); + disc_arg = NULL; + } + DHD_MUTEX_UNLOCK(); + + USB_DEREGISTER(); + + return DBUS_OK; +} + +void * +dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs) +{ + usbos_info_t *usbos_info; + + if (g_probe_info.dldone == FALSE) { + DBUSERR(("%s: err device not downloaded!\n", __FUNCTION__)); + return NULL; + } + + /* Sanity check for BUS_INFO() */ + ASSERT(OFFSETOF(usbos_info_t, pub) == 0); + + usbos_info = MALLOC(pub->osh, sizeof(usbos_info_t)); + if (usbos_info == NULL) + return NULL; + + bzero(usbos_info, sizeof(usbos_info_t)); + + usbos_info->pub = pub; + usbos_info->cbarg = cbarg; + usbos_info->cbs = cbs; + + /* Needed for disconnect() */ + g_probe_info.usbos_info = usbos_info; + + /* Update USB Info */ + usbos_info->usb = g_probe_info.usb; + usbos_info->rx_pipe = g_probe_info.rx_pipe; + usbos_info->rx_pipe2 = g_probe_info.rx_pipe2; + usbos_info->tx_pipe = g_probe_info.tx_pipe; + usbos_info->intr_pipe = g_probe_info.intr_pipe; + usbos_info->intr_size = g_probe_info.intr_size; + usbos_info->interval = g_probe_info.interval; + usbos_info->pub->device_speed = g_probe_info.device_speed; + if (usbos_info->rx_pipe2) { + usbos_info->pub->attrib.has_2nd_bulk_in_ep = 1; + } else { + usbos_info->pub->attrib.has_2nd_bulk_in_ep = 0; + } + + if (usbos_info->tx_pipe) + usbos_info->maxps = usb_maxpacket(usbos_info->usb, + usbos_info->tx_pipe, usb_pipeout(usbos_info->tx_pipe)); + + INIT_LIST_HEAD(&usbos_info->req_rxfreeq); + INIT_LIST_HEAD(&usbos_info->req_txfreeq); + INIT_LIST_HEAD(&usbos_info->req_rxpostedq); + INIT_LIST_HEAD(&usbos_info->req_txpostedq); + spin_lock_init(&usbos_info->rxfree_lock); + spin_lock_init(&usbos_info->txfree_lock); + spin_lock_init(&usbos_info->rxposted_lock); + spin_lock_init(&usbos_info->txposted_lock); + spin_lock_init(&usbos_info->rxlock); + spin_lock_init(&usbos_info->txlock); + + atomic_set(&usbos_info->rxposted, 0); + atomic_set(&usbos_info->txposted, 0); + + +#ifdef USB_DISABLE_INT_EP + usbos_info->intr_urb = NULL; +#else + if (!(usbos_info->intr_urb = USB_ALLOC_URB())) { + DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__)); + goto fail; + } +#endif + + if (!(usbos_info->ctl_urb = USB_ALLOC_URB())) { + DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__)); + goto fail; + } + + init_waitqueue_head(&usbos_info->wait); + + if (!(usbos_info->blk_urb = USB_ALLOC_URB())) { /* for embedded image downloading */ + DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__)); + goto fail; + } + + usbos_info->rxbuf_len = (uint)usbos_info->pub->rxsize; + + + + atomic_set(&usbos_info->txallocated, 0); + if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info, + usbos_info->pub->ntxq, FALSE)) { + goto fail; + } + + atomic_set(&usbos_info->rxallocated, 0); + if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info, + MIN(DBUS_USB_RXQUEUE_BATCH_ADD, usbos_info->pub->nrxq), + TRUE)) { + goto fail; + } + + sema_init(&usbos_info->ctl_lock, 1); + +#ifdef USBOS_THREAD + if (dbus_usbos_thread_init(usbos_info) == NULL) + goto fail; +#endif /* USBOS_THREAD */ + +#ifdef USBOS_TX_THREAD + if (dbus_usbos_tx_thread_init(usbos_info) == NULL) + goto fail; +#endif /* USBOS_TX_THREAD */ + + pub->dev_info = g_probe_info.usb; + + + return (void *) usbos_info; +fail: + if (usbos_info->intr_urb) { + USB_FREE_URB(usbos_info->intr_urb); + usbos_info->intr_urb = NULL; + } + + if (usbos_info->ctl_urb) { + USB_FREE_URB(usbos_info->ctl_urb); + usbos_info->ctl_urb = NULL; + } + +#if defined(BCM_REQUEST_FW) + if (usbos_info->blk_urb) { + USB_FREE_URB(usbos_info->blk_urb); + usbos_info->blk_urb = NULL; + } +#endif + + dbus_usbos_urbreqs_free(usbos_info, TRUE); + atomic_set(&usbos_info->rxallocated, 0); + dbus_usbos_urbreqs_free(usbos_info, FALSE); + atomic_set(&usbos_info->txallocated, 0); + + g_probe_info.usbos_info = NULL; + + MFREE(pub->osh, usbos_info, sizeof(usbos_info_t)); + return NULL; +} /* dbus_usbos_intf_attach */ + +void +dbus_usbos_intf_detach(dbus_pub_t *pub, void *info) +{ + usbos_info_t *usbos_info = (usbos_info_t *) info; + osl_t *osh = pub->osh; + + if (usbos_info == NULL) { + return; + } + +#ifdef USBOS_TX_THREAD + dbus_usbos_tx_thread_deinit(usbos_info); +#endif /* USBOS_TX_THREAD */ + + /* Must unlink all URBs prior to driver unload; + * otherwise an URB callback can occur after driver + * has been de-allocated and rmmod'd + */ + dbusos_stop(usbos_info); + + if (usbos_info->intr_urb) { + USB_FREE_URB(usbos_info->intr_urb); + usbos_info->intr_urb = NULL; + } + + if (usbos_info->ctl_urb) { + USB_FREE_URB(usbos_info->ctl_urb); + usbos_info->ctl_urb = NULL; + } + + if (usbos_info->blk_urb) { + USB_FREE_URB(usbos_info->blk_urb); + usbos_info->blk_urb = NULL; + } + + dbus_usbos_urbreqs_free(usbos_info, TRUE); + atomic_set(&usbos_info->rxallocated, 0); + dbus_usbos_urbreqs_free(usbos_info, FALSE); + atomic_set(&usbos_info->txallocated, 0); + +#ifdef USBOS_THREAD + dbus_usbos_thread_deinit(usbos_info); +#endif /* USBOS_THREAD */ + + g_probe_info.usbos_info = NULL; + MFREE(osh, usbos_info, sizeof(usbos_info_t)); +} /* dbus_usbos_intf_detach */ + + +#ifdef USBOS_TX_THREAD + +void* +dbus_usbos_tx_thread_init(usbos_info_t *usbos_info) +{ + spin_lock_init(&usbos_info->usbos_tx_list_lock); + INIT_LIST_HEAD(&usbos_info->usbos_tx_list); + init_waitqueue_head(&usbos_info->usbos_tx_queue_head); + + usbos_info->usbos_tx_kt = kthread_create(dbus_usbos_tx_thread_func, + usbos_info, "usb-tx-thread"); + + if (IS_ERR(usbos_info->usbos_tx_kt)) { + DBUSERR(("Thread Creation failed\n")); + return (NULL); + } + + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; + wake_up_process(usbos_info->usbos_tx_kt); + + return (usbos_info->usbos_tx_kt); +} + +void +dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info) +{ + urb_req_t *req; + + if (usbos_info->usbos_tx_kt) { + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); + kthread_stop(usbos_info->usbos_tx_kt); + } + + /* Move pending requests to free queue so they can be freed */ + while ((req = dbus_usbos_qdeq( + &usbos_info->usbos_tx_list, &usbos_info->usbos_tx_list_lock)) != NULL) { + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); + } +} + +/** + * Allow USB in-band resume to block by submitting CTRL and DATA URBs on a separate thread. + */ +int +dbus_usbos_tx_thread_func(void *data) +{ + usbos_info_t *usbos_info = (usbos_info_t *)data; + urb_req_t *req; + dbus_irb_tx_t *txirb; + int ret; + unsigned long flags; + +#ifdef WL_THREADNICE + set_user_nice(current, WL_THREADNICE); +#endif + + while (1) { + /* Wait until there are URBs to submit */ + wait_event_interruptible_timeout( + usbos_info->usbos_tx_queue_head, + !list_empty(&usbos_info->usbos_tx_list) || + usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED, + 100); + + if (kthread_should_stop()) + break; + + /* Submit CTRL URB if needed */ + if (usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED) { + + /* Disable USB autosuspend until this request completes. If the + * interface was suspended, this call blocks until it has been resumed. + */ + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + usbos_info->ctl_state = USBOS_REQUEST_STATE_SUBMITTED; + + ret = USB_SUBMIT_URB(usbos_info->ctl_urb); + if (ret != 0) { + DBUSERR(("%s CTRL USB_SUBMIT_URB failed, status %d\n", + __FUNCTION__, ret)); + + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + } + } + + /* Submit all available TX URBs */ + while ((req = dbus_usbos_qdeq(&usbos_info->usbos_tx_list, + &usbos_info->usbos_tx_list_lock)) != NULL) { + + /* Disable USB autosuspend until this request completes. If the + * interface was suspended, this call blocks until it has been resumed. + */ + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + spin_lock_irqsave(&usbos_info->txlock, flags); + + ret = USB_SUBMIT_URB(req->urb); + if (ret == 0) { + /* URB submitted successfully */ + dbus_usbos_qenq(&usbos_info->req_txpostedq, req, + &usbos_info->txposted_lock); + atomic_inc(&usbos_info->txposted); + } else { + /* Submitting the URB failed. */ + DBUSERR(("%s TX USB_SUBMIT_URB failed, status %d\n", + __FUNCTION__, ret)); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + } + + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + if (ret != 0) { + /* Cleanup and notify higher layers */ + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, + &usbos_info->txfree_lock); + + txirb = req->arg; + if (txirb->send_buf) { + MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len); + txirb->send_buf = NULL; + req->buf_len = 0; + } + + if (likely (usbos_info->cbarg && usbos_info->cbs)) { + if (likely (usbos_info->cbs->send_irb_complete != NULL)) + usbos_info->cbs->send_irb_complete( + usbos_info->cbarg, txirb, DBUS_ERR_TXDROP); + } + } + } + } + + return 0; +} /* dbus_usbos_tx_thread_func */ + +#endif /* USBOS_TX_THREAD */ + +#ifdef USBOS_THREAD + +/** + * Increase system performance by creating a USB thread that runs parallel to other system + * activity. + */ +static void* +dbus_usbos_thread_init(usbos_info_t *usbos_info) +{ + usbos_list_entry_t *entry; + unsigned long flags, ii; + + spin_lock_init(&usbos_info->usbos_list_lock); + spin_lock_init(&usbos_info->ctrl_lock); + INIT_LIST_HEAD(&usbos_info->usbos_list); + INIT_LIST_HEAD(&usbos_info->usbos_free_list); + init_waitqueue_head(&usbos_info->usbos_queue_head); + atomic_set(&usbos_info->usbos_list_cnt, 0); + + + for (ii = 0; ii < (usbos_info->pub->nrxq + usbos_info->pub->ntxq); ii++) { + entry = MALLOC(usbos_info->pub->osh, sizeof(usbos_list_entry_t)); + if (entry) { + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + list_add_tail((struct list_head*) entry, &usbos_info->usbos_free_list); + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + } else { + DBUSERR(("Failed to create list\n")); + } + } + + usbos_info->usbos_kt = kthread_create(dbus_usbos_thread_func, + usbos_info, "usb-thread"); + + if (IS_ERR(usbos_info->usbos_kt)) { + DBUSERR(("Thread Creation failed\n")); + return (NULL); + } + + wake_up_process(usbos_info->usbos_kt); + + return (usbos_info->usbos_kt); +} + +static void +dbus_usbos_thread_deinit(usbos_info_t *usbos_info) +{ + struct list_head *cur, *next; + usbos_list_entry_t *entry; + unsigned long flags; + + if (usbos_info->usbos_kt) { + wake_up_interruptible(&usbos_info->usbos_queue_head); + kthread_stop(usbos_info->usbos_kt); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_safe(cur, next, &usbos_info->usbos_list) + { + entry = list_entry(cur, struct usbos_list_entry, list); + /* detach this entry from the list and then free the entry */ + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + list_del(cur); + MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t)); + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + } + + list_for_each_safe(cur, next, &usbos_info->usbos_free_list) + { + entry = list_entry(cur, struct usbos_list_entry, list); + /* detach this entry from the list and then free the entry */ + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + list_del(cur); + MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t)); + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +} + +/** Process completed URBs in a worker thread */ +static int +dbus_usbos_thread_func(void *data) +{ + usbos_info_t *usbos_info = (usbos_info_t *)data; + usbos_list_entry_t *entry; + struct list_head *cur, *next; + unsigned long flags; + +#ifdef WL_THREADNICE + set_user_nice(current, WL_THREADNICE); +#endif + + while (1) { + /* If the list is empty, then go to sleep */ + wait_event_interruptible_timeout + (usbos_info->usbos_queue_head, + atomic_read(&usbos_info->usbos_list_cnt) > 0, + 100); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + + /* For each entry on the list, process it. Remove the entry from + * the list when done. + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_safe(cur, next, &usbos_info->usbos_list) + { + urb_req_t *req; + int len; + int stat; + usbos_info_t *usbos_info_local; + + entry = list_entry(cur, struct usbos_list_entry, list); + if (entry == NULL) + break; + + req = entry->urb_context; + len = entry->urb_length; + stat = entry->urb_status; + usbos_info_local = req->usbinfo; + + /* detach this entry from the list and attach it to the free list */ + list_del_init(cur); + spin_unlock_irqrestore(&usbos_info_local->usbos_list_lock, flags); + + dbus_usbos_recv_complete_handle(req, len, stat); + + spin_lock_irqsave(&usbos_info_local->usbos_list_lock, flags); + + list_add_tail(cur, &usbos_info_local->usbos_free_list); + + atomic_dec(&usbos_info_local->usbos_list_cnt); + } + + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + return 0; +} /* dbus_usbos_thread_func */ + +/** Called on Linux calling URB callback, see dbus_usbos_recv_complete() */ +static void +dbus_usbos_dispatch_schedule(CALLBACK_ARGS) +{ + urb_req_t *req = urb->context; + usbos_info_t *usbos_info = req->usbinfo; + usbos_list_entry_t *entry; + unsigned long flags; + struct list_head *cur; + + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + + cur = usbos_info->usbos_free_list.next; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + entry = list_entry(cur, struct usbos_list_entry, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + /* detach this entry from the free list and prepare it insert it to use list */ + list_del_init(cur); + + if (entry) { + entry->urb_context = urb->context; + entry->urb_length = urb->actual_length; + entry->urb_status = urb->status; + + atomic_inc(&usbos_info->usbos_list_cnt); + list_add_tail(cur, &usbos_info->usbos_list); + } else { + DBUSERR(("!!!!!!OUT OF MEMORY!!!!!!!\n")); + } + + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + + /* thread */ + wake_up_interruptible(&usbos_info->usbos_queue_head); +} /* dbus_usbos_dispatch_schedule */ + +#endif /* USBOS_THREAD */ + + + + +#ifdef BCM_REQUEST_FW + +struct request_fw_context { + const struct firmware *firmware; + struct semaphore lock; +}; + +/* + * Callback for dbus_request_firmware(). + */ +static void +dbus_request_firmware_done(const struct firmware *firmware, void *ctx) +{ + struct request_fw_context *context = (struct request_fw_context*)ctx; + + /* Store the received firmware handle in the context and wake requester */ + context->firmware = firmware; + up(&context->lock); +} + +/* + * Send a firmware request and wait for completion. + * + * The use of the asynchronous version of request_firmware() is needed to avoid + * kernel oopses when we just come out of system hibernate. + */ +static int +dbus_request_firmware(const char *name, const struct firmware **firmware) +{ + struct request_fw_context *context; + int ret; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + sema_init(&context->lock, 0); + + ret = request_firmware_nowait(THIS_MODULE, true, name, &g_probe_info.usb->dev, + GFP_KERNEL, context, dbus_request_firmware_done); + if (ret) { + kfree(context); + return ret; + } + + /* Wait for completion */ + if (down_interruptible(&context->lock) != 0) { + kfree(context); + return -ERESTARTSYS; + } + + *firmware = context->firmware; + kfree(context); + + return *firmware != NULL ? 0 : -ENOENT; +} + +static void * +dbus_get_fwfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev) +{ + const struct firmware *firmware = NULL; +#ifndef OEM_ANDROID + s8 *device_id = NULL; + s8 *chip_rev = ""; +#endif /* OEM_ANDROID */ + s8 file_name[64]; + int ret; + +#ifndef OEM_ANDROID + switch (devid) { + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + device_id = "4350"; + break; + case BCM43143_CHIP_ID: + device_id = "43143"; + break; + case BCM43234_CHIP_ID: + case BCM43235_CHIP_ID: + case BCM43236_CHIP_ID: + device_id = "43236"; + break; + case BCM43242_CHIP_ID: + device_id = "43242"; + break; + case BCM43238_CHIP_ID: + device_id = "43238"; + break; + case BCM43526_CHIP_ID: + device_id = "43526"; + break; + case BCM43569_CHIP_ID: + device_id = "43569"; + switch (chiprev) { + case 0: + chip_rev = "a0"; + break; + case 2: + chip_rev = "a2"; + break; + default: + break; + } + break; + default: + DBUSERR(("unsupported device %x\n", devid)); + return NULL; + } + + /* Load firmware */ + snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-firmware.bin", device_id, chip_rev); +#else + snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_FW_PATH); +#endif /* OEM_ANDROID */ + + ret = dbus_request_firmware(file_name, &firmware); + if (ret) { + DBUSERR(("fail to request firmware %s\n", file_name)); + return NULL; + } + + *fwlen = firmware->size; + *fw = (uint8 *)firmware->data; + return (void *)firmware; + +} + +static void * +dbus_get_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev) +{ + const struct firmware *firmware = NULL; +#ifndef OEM_ANDROID + s8 *device_id = NULL; + s8 *chip_rev = ""; +#endif /* OEM_ANDROID */ + s8 file_name[64]; + int ret; + +#ifndef OEM_ANDROID + switch (devid) { + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + device_id = "4350"; + break; + case BCM43143_CHIP_ID: + device_id = "43143"; + break; + case BCM43234_CHIP_ID: + device_id = "43234"; + break; + case BCM43235_CHIP_ID: + device_id = "43235"; + break; + case BCM43236_CHIP_ID: + device_id = "43236"; + break; + case BCM43238_CHIP_ID: + device_id = "43238"; + break; + case BCM43242_CHIP_ID: + device_id = "43242"; + break; + case BCM43526_CHIP_ID: + device_id = "43526"; + break; + case BCM43569_CHIP_ID: + device_id = "43569"; + switch (chiprev) { + case 0: + chip_rev = "a0"; + break; + case 2: + chip_rev = "a2"; + break; + default: + break; + } + break; + default: + DBUSERR(("unsupported device %x\n", devid)); + return NULL; + } + + /* Load board specific nvram file */ + snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-%2x-%2x.nvm", + device_id, chip_rev, boardtype, boardrev); +#else + snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_NVRAM_PATH); +#endif /* OEM_ANDROID */ + + ret = dbus_request_firmware(file_name, &firmware); + if (ret) { + DBUSERR(("fail to request nvram %s\n", file_name)); + +#ifndef OEM_ANDROID + /* Load generic nvram file */ + snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s.nvm", + device_id, chip_rev); + + ret = dbus_request_firmware(file_name, &firmware); +#endif /* OEM_ANDROID */ + + if (ret) { + DBUSERR(("fail to request nvram %s\n", file_name)); + return NULL; + } + } + + *fwlen = firmware->size; + *fw = (uint8 *)firmware->data; + return (void *)firmware; +} + +void * +dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, uint16 boardtype, + uint16 boardrev) +{ + switch (type) { + case DBUS_FIRMWARE: + return dbus_get_fwfile(devid, chiprev, fw, fwlen, boardtype, boardrev); + case DBUS_NVFILE: + return dbus_get_nvfile(devid, chiprev, fw, fwlen, boardtype, boardrev); + default: + return NULL; + } +} + +void +dbus_release_fw_nvfile(void *firmware) +{ + release_firmware((struct firmware *)firmware); +} +#endif /* BCM_REQUEST_FW */ + +#ifdef BCMUSBDEV_COMPOSITE +/** + * For a composite device the interface order is not guaranteed, scan the device struct for the WLAN + * interface. + */ +static int +dbus_usbos_intf_wlan(struct usb_device *usb) +{ + int i, num_of_eps, ep, intf_wlan = -1; + int num_intf = CONFIGDESC(usb)->bNumInterfaces; + struct usb_endpoint_descriptor *endpoint; + + for (i = 0; i < num_intf; i++) { + if (IFDESC(usb, i).bInterfaceClass != USB_CLASS_VENDOR_SPEC) + continue; + num_of_eps = IFDESC(usb, i).bNumEndpoints; + + for (ep = 0; ep < num_of_eps; ep++) { + endpoint = &IFEPDESC(usb, i, ep); + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK) { + intf_wlan = i; + break; + } + } + if (ep < num_of_eps) + break; + } + + return intf_wlan; +} +#endif /* BCMUSBDEV_COMPOSITE */ diff --git a/bcmdhd.101.10.361.x/dhd.h b/bcmdhd.101.10.361.x/dhd.h new file mode 100755 index 0000000..fd53811 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd.h @@ -0,0 +1,4655 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_h_ +#define _dhd_h_ + +#if defined(LINUX) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) +#include +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */ +#ifdef DHD_BUZZZ_LOG_ENABLED +#include +#endif /* DHD_BUZZZ_LOG_ENABLED */ +/* The kernel threading is sdio-specific */ +struct task_struct; +struct sched_param; +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ +int setScheduler(struct task_struct *p, int policy, struct sched_param *param); +int get_scheduler_policy(struct task_struct *p); +#else /* LINUX */ +#define ENOMEM 1 +#define EFAULT 2 +#define EINVAL 3 +#define EIO 4 +#define ETIMEDOUT 5 +#define ENODATA 6 +#define EREMOTEIO 7 +#define ENODEV 8 +#define ERESTARTSYS 512 +#endif /* LINUX */ +#define MAX_EVENT 16 + +#define ALL_INTERFACES 0xff + +/* H2D and D2H ring dump is enabled by default */ +#ifdef PCIE_FULL_DONGLE +#define DHD_DUMP_PCIE_RINGS +#endif /* PCIE_FULL_DONGLE */ + +#include + +#include +#include +#include +#include +#if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG) +#include +#endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */ + +#if defined(BCMWDF) +#include +#include +#endif /* (BCMWDF) */ + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT +#include +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + +#ifdef DHD_ERPOM +#include +#ifdef PCIE_OOB +/* + * Both ERPOM and PCIE_OOB depend on ftdi to programme GPIOs. + * Both features operating parallelly make the GPIOs go outof sync. + * So only one feature is expected to be present at a time. + */ +#error "PCIE_OOB enabled" +#endif /* PCIE_OOB */ +#endif /* DHD_ERPOM */ + +#include +#include + +#ifdef DEBUG_DPC_THREAD_WATCHDOG +#define MAX_RESCHED_CNT 600 +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + +#if defined(LINUX) || defined(linux) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT)) +#define WL_VENDOR_EXT_SUPPORT +#endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */ +#endif /* defined (LINUX) || defined(linux) */ + +#if defined(KEEP_ALIVE) +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define KEEP_ALIVE_PERIOD 55000 +#define NULL_PKT_STR "null_pkt" +#endif /* KEEP_ALIVE */ + +/* By default enabled from here, later the WQ code will be removed */ +#define DHD_USE_KTHREAD_FOR_LOGTRACE + +/* Forward decls */ +struct dhd_bus; +struct dhd_prot; +struct dhd_info; +struct dhd_ioctl; +struct dhd_dbg; +struct dhd_ts; +#ifdef DNGL_AXI_ERROR_LOGGING +struct dhd_axi_error_dump; +#endif /* DNGL_AXI_ERROR_LOGGING */ + +/* The level of bus communication with the dongle */ +enum dhd_bus_state { + DHD_BUS_DOWN, /* Not ready for frame transfers */ + DHD_BUS_LOAD, /* Download access only (CPU reset) */ + DHD_BUS_DATA, /* Ready for frame transfers */ + DHD_BUS_SUSPEND, /* Bus has been suspended */ + DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */ + DHD_BUS_REMOVE, /* Bus has been removed */ +}; + +/* The level of bus communication with the dongle */ +enum dhd_bus_devreset_type { + DHD_BUS_DEVRESET_ON = 0, /* ON */ + DHD_BUS_DEVRESET_OFF = 1, /* OFF */ + DHD_BUS_DEVRESET_FLR = 2, /* FLR */ + DHD_BUS_DEVRESET_FLR_FORCE_FAIL = 3, /* FLR FORCE FAIL */ + DHD_BUS_DEVRESET_QUIESCE = 4, /* FLR */ +}; + +/* + * Bit fields to Indicate clean up process that wait till they are finished. + * Future synchronizable processes can add their bit filed below and update + * their functionalities accordingly + */ +#define DHD_BUS_BUSY_IN_TX 0x01 +#define DHD_BUS_BUSY_IN_SEND_PKT 0x02 +#define DHD_BUS_BUSY_IN_DPC 0x04 +#define DHD_BUS_BUSY_IN_WD 0x08 +#define DHD_BUS_BUSY_IN_IOVAR 0x10 +#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20 +#define DHD_BUS_BUSY_SUSPEND_IN_PROGRESS 0x40 +#define DHD_BUS_BUSY_RESUME_IN_PROGRESS 0x80 +#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100 +#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200 +#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400 +#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \ + DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \ + DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_IN_CHECKDIED 0x800 +#define DHD_BUS_BUSY_IN_MEMDUMP 0x1000 +#define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000 +#define DHD_BUS_BUSY_IN_LOGDUMP 0x4000 +#define DHD_BUS_BUSY_IN_HALDUMP 0x8000 +#define DHD_BUS_BUSY_IN_NAPI 0x10000 +#define DHD_BUS_BUSY_IN_DS_DEASSERT 0x20000 + +#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX +#define DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT +#define DHD_BUS_BUSY_SET_IN_DPC(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC +#define DHD_BUS_BUSY_SET_IN_WD(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD +#define DHD_BUS_BUSY_SET_IN_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR +#define DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR +#define DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE +#define DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED +#define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP +#define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP +#define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP +#define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP +#define DHD_BUS_BUSY_SET_IN_NAPI(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_NAPI +#define DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DS_DEASSERT + +#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX +#define DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT +#define DHD_BUS_BUSY_CLEAR_IN_DPC(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC +#define DHD_BUS_BUSY_CLEAR_IN_WD(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD +#define DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR +#define DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR +#define DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE +#define DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED +#define DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_MEMDUMP +#define DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP +#define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP +#define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP +#define DHD_BUS_BUSY_CLEAR_IN_NAPI(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_NAPI +#define DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DS_DEASSERT + +#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) +#define DHD_BUS_BUSY_CHECK_IN_SEND_PKT(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SEND_PKT) +#define DHD_BUS_BUSY_CHECK_IN_DPC(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DPC) +#define DHD_BUS_BUSY_CHECK_IN_WD(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_WD) +#define DHD_BUS_BUSY_CHECK_IN_IOVAR(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_IOVAR) +#define DHD_BUS_BUSY_CHECK_IN_DHD_IOVAR(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DHD_IOVAR) +#define DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_SUSPEND_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RESUME_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) +#define DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_ALL(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) +#define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED) +#define DHD_BUS_BUSY_CHECK_IN_MEMDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_MEMDUMP) +#define DHD_BUS_BUSY_CHECK_IN_SSSRDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP) +#define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP) +#define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) +#define DHD_BUS_BUSY_CHECK_IN_DS_DEASSERT(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DS_DEASSERT) +#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \ + ((dhdp)->dhd_bus_busy_state == 0) + +#define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \ + (DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \ + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS || \ + (dhdp)->busstate == DHD_BUS_REMOVE) + +#define DHD_BUS_CHECK_REMOVE(dhdp) \ + ((dhdp)->busstate == DHD_BUS_REMOVE) + +/* IOVar flags for common error checks */ +#define DHD_IOVF_PWRREQ_BYPASS (1<<0) /* flags to prevent bp access during host sleep state */ + +#define MAX_MTU_SZ (1600u) + +#ifdef PCIE_INB_DW +#define DHD_CHECK_CFG_IN_PROGRESS(dhdp) \ + ((INBAND_DW_ENAB((dhdp)->bus)) ? dhd_check_cfg_in_progress(dhdp) : FALSE) +#else +#define DHD_CHECK_CFG_IN_PROGRESS(dhdp) FALSE +#endif /* PCIE_INB_DW */ + +#ifndef USEC_PER_SEC +#define USEC_PER_SEC (1000 * 1000) +#endif +#if (defined (LINUX) || defined(linux)) +/* (u64)result = (u64)dividend / (u64)divisor */ +#define DIV_U64_BY_U64(dividend, divisor) div64_u64(dividend, divisor) + +/* (u64)result = (u64)dividend / (u32)divisor */ +#define DIV_U64_BY_U32(dividend, divisor) div_u64(dividend, divisor) + +/* Be careful while using this, as it divides dividend also + * (u32)remainder = (u64)dividend % (u32)divisor + * (u64)dividend = (u64)dividend / (u32)divisor + */ +#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) do_div(dividend, divisor) + +/* (u32)remainder = (u64)dividend % (u32)divisor */ +#define MOD_U64_BY_U32(dividend, divisor) ({ \ + uint64 temp_dividend = (dividend); \ + uint32 rem = DIV_AND_MOD_U64_BY_U32(temp_dividend, (divisor)); \ + rem; \ +}) + +#define SEC_USEC_FMT \ + "%5llu.%06u" +#else +/* (u64)result = (u64)dividend / (u64)divisor */ +#define DIV_U64_BY_U64(dividend, divisor) (uint64)(dividend) / (uint64)(divisor) + +/* (u64)result = (u64)dividend / (u32)divisor */ +#define DIV_U64_BY_U32(dividend, divisor) (uint64)(dividend) / (uint32)(divisor) + +/* Be careful while using this, as it divides dividend also + * (u32)remainder = (u64)dividend % (u32)divisor + * (u64)dividend = (u64)dividend / (u32)divisor + */ +#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) ({ \ + uint32 rem = (uint64)(dividend) % (uint32)(divisor); \ + (dividend) = (uint64)(dividend) / (uint32)(divisor); \ + rem; \ +}) + +/* (u32)remainder = (u64)dividend % (u32)divisor */ +#define MOD_U64_BY_U32(dividend, divisor) (uint32)((uint64)(dividend) % (uint32)(divisor)) + +#define SEC_USEC_FMT \ + "%015llu.%06u" +#endif /* LINUX || linux */ + +/* t: time in nano second */ +#define GET_SEC_USEC(t) \ + DIV_U64_BY_U32(t, NSEC_PER_SEC), \ + ((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC)) + +/* Download Types */ +typedef enum download_type { + FW, + NVRAM, + CLM_BLOB, + TXCAP_BLOB +} download_type_t; + +#if defined(NDIS) +/* Firmware requested operation mode */ +#define STA_MASK 0x0001 +#define HOSTAPD_MASK 0x0002 +#define WFD_MASK 0x0004 +#define SOFTAP_FW_MASK 0x0008 +#define P2P_GO_ENABLED 0x0010 +#define P2P_GC_ENABLED 0x0020 +#define CONCURENT_MASK 0x00F0 +#endif /* #if defined(NDIS) */ + +/* For supporting multiple interfaces */ +#define DHD_MAX_IFS 16 +#ifndef DHD_MAX_STATIC_IFS +#define DHD_MAX_STATIC_IFS 1 +#endif +#define DHD_DEL_IF -0xE +#define DHD_BAD_IF -0xF +#define DHD_DUMMY_INFO_IF 0xDEAF /* Hack i/f to handle events from INFO Ring */ +/* XXX to avoid build error for NDIS for timebeing */ +#define DHD_EVENT_IF DHD_DUMMY_INFO_IF + +#if defined(LINUX) || defined(linux) +enum dhd_op_flags { +/* Firmware requested operation mode */ + DHD_FLAG_STA_MODE = (1 << (0)), /* STA only */ + DHD_FLAG_HOSTAP_MODE = (1 << (1)), /* SOFTAP only */ + DHD_FLAG_P2P_MODE = (1 << (2)), /* P2P Only */ + /* STA + P2P */ + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE), + /* STA + SoftAP */ + DHD_FLAG_CONCURR_STA_HOSTAP_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_HOSTAP_MODE), + /* XXX MULTI_CHAN mode is meaningful only if it is conccurncy mode */ + DHD_FLAG_CONCURR_MULTI_CHAN_MODE = (1 << (4)), /* STA + P2P */ + /* Current P2P mode for P2P connection */ + DHD_FLAG_P2P_GC_MODE = (1 << (5)), + DHD_FLAG_P2P_GO_MODE = (1 << (6)), + DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */ + DHD_FLAG_IBSS_MODE = (1 << (8)), + DHD_FLAG_MFG_MODE = (1 << (9)), + DHD_FLAG_RSDB_MODE = (1 << (10)), + DHD_FLAG_MP2P_MODE = (1 << (11)) +}; +#endif /* defined (LINUX) || defined(linux) */ + +#if defined(BCMDONGLEHOST) +#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \ + (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1) +#define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) \ + (dhd ? (((((dhd_pub_t *)dhd)->op_mode) & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) == \ + DHD_FLAG_CONCURR_STA_HOSTAP_MODE) : 0) +#else +#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) -1 +#define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) 0 +#endif /* defined (BCMDONGLEHOST) */ + +/* Max sequential TX/RX Control timeouts to set HANG event */ +#ifndef MAX_CNTL_TX_TIMEOUT +#define MAX_CNTL_TX_TIMEOUT 2 +#endif /* MAX_CNTL_TX_TIMEOUT */ +#ifndef MAX_CNTL_RX_TIMEOUT +#define MAX_CNTL_RX_TIMEOUT 1 +#endif /* MAX_CNTL_RX_TIMEOUT */ + +#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */ +#ifndef CUSTOM_SCAN_UNASSOC_ACTIVE_TIME +#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */ +#else +#define DHD_SCAN_UNASSOC_ACTIVE_TIME CUSTOM_SCAN_UNASSOC_ACTIVE_TIME +#endif /* CUSTOM_SCAN_UNASSOC_ACTIVE_TIME */ +#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */ +#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */ +#ifndef CUSTOM_SCAN_PASSIVE_TIME +#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ +#else +#define DHD_SCAN_PASSIVE_TIME CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */ +#endif /* CUSTOM_SCAN_PASSIVE_TIME */ + +#ifndef POWERUP_MAX_RETRY +#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */ +#endif +#ifndef POWERUP_WAIT_MS +#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */ +#endif +/* + * MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds + * the NVRAM data. That is the size of the buffer pointed by bus->vars + * This also needs to be increased to 24K to support NVRAM size higher than 16K + */ +#define MAX_NVRAMBUF_SIZE (24 * 1024) /* max nvram buf size */ +#define MAX_CLM_BUF_SIZE (48 * 1024) /* max clm blob size */ +#define MAX_TXCAP_BUF_SIZE (16 * 1024) /* max txcap blob size */ +#ifdef DHD_DEBUG +#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ +#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */ +#endif /* DHD_DEBUG */ + +#ifndef CONFIG_BCMDHD_CLM_PATH +#ifdef OEM_ANDROID +#if defined(CUSTOMER_HW4) && defined(PLATFORM_SLP) +#define CONFIG_BCMDHD_CLM_PATH "/lib/firmware/bcmdhd_clm.blob" +#else +#define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob" +#endif /* CUSTOMER_HW4 && PLATFORM_SLP */ +#elif defined(LINUX) || defined(linux) +#define CONFIG_BCMDHD_CLM_PATH "/var/run/bcmdhd_clm.blob" +#else +/* clm download will fail on empty path */ +#define CONFIG_BCMDHD_CLM_PATH "" +#endif /* OEM_ANDROID */ +#endif /* CONFIG_BCMDHD_CLM_PATH */ +#define WL_CCODE_NULL_COUNTRY "#n" + +#ifdef DHD_EFI +#define FW_VER_STR_LEN 256 +#else +#define FW_VER_STR_LEN 128 +#endif +#define FWID_STR_LEN 256 +#define CLM_VER_STR_LEN 128 +#define BUS_API_REV_STR_LEN 128 +#define FW_VER_STR "Version" +#define FWID_STR_1 "FWID: 01-" +#define FWID_STR_2 "FWID=01-" +extern char bus_api_revision[]; + +enum dhd_bus_wake_state { + WAKE_LOCK_OFF = 0, + WAKE_LOCK_PRIV = 1, + WAKE_LOCK_DPC = 2, + WAKE_LOCK_IOCTL = 3, + WAKE_LOCK_DOWNLOAD = 4, + WAKE_LOCK_TMOUT = 5, + WAKE_LOCK_WATCHDOG = 6, + WAKE_LOCK_LINK_DOWN_TMOUT = 7, + WAKE_LOCK_PNO_FIND_TMOUT = 8, + WAKE_LOCK_SOFTAP_SET = 9, + WAKE_LOCK_SOFTAP_STOP = 10, + WAKE_LOCK_SOFTAP_START = 11, + WAKE_LOCK_SOFTAP_THREAD = 12 +}; + +enum { + EVENT_BUF_POOL_LOW = 32, + EVENT_BUF_POOL_MEDIUM = 64, + EVENT_BUF_POOL_HIGH = 128, + EVENT_BUF_POOL_HIGHEST = 256 +}; + +#ifdef PCIE_INB_DW +enum dhd_bus_ds_state { + DW_DEVICE_DS_INVALID = -1, + DW_DEVICE_DS_DEV_SLEEP = 0, + DW_DEVICE_DS_DEV_SLEEP_PEND = 1, + DW_DEVICE_DS_DISABLED_WAIT = 2, + DW_DEVICE_DS_DEV_WAKE = 3, + DW_DEVICE_DS_ACTIVE = 4, + DW_DEVICE_HOST_SLEEP_WAIT = 5, + DW_DEVICE_HOST_SLEEP = 6, + DW_DEVICE_HOST_WAKE_WAIT = 7, + DW_DEVICE_DS_D3_INFORM_WAIT = 8 +}; +#endif /* PCIE_INB_DW */ + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF = 1, + DHD_PREALLOC_DATABUF = 2, + DHD_PREALLOC_OSL_BUF = 3, + DHD_PREALLOC_SKB_BUF = 4, + DHD_PREALLOC_WIPHY_ESCAN0 = 5, + DHD_PREALLOC_WIPHY_ESCAN1 = 6, + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, + DHD_PREALLOC_IF_FLOW_LKUP = 9, + /* 10 */ + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_ESCAN = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, + DHD_PREALLOC_NAN_EVENT_RING = 23 +}; + +enum dhd_dongledump_mode { + DUMP_DISABLED = 0, + DUMP_MEMONLY = 1, + DUMP_MEMFILE = 2, + DUMP_MEMFILE_BUGON = 3, + DUMP_MEMFILE_MAX = 4 +}; + +enum dhd_dongledump_type { + DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, + DUMP_TYPE_D3_ACK_TIMEOUT = 2, + DUMP_TYPE_DONGLE_TRAP = 3, + DUMP_TYPE_MEMORY_CORRUPTION = 4, + DUMP_TYPE_PKTID_AUDIT_FAILURE = 5, + DUMP_TYPE_PKTID_INVALID = 6, + DUMP_TYPE_SCAN_TIMEOUT = 7, + DUMP_TYPE_SCAN_BUSY = 8, + DUMP_TYPE_BY_SYSDUMP = 9, + DUMP_TYPE_BY_LIVELOCK = 10, + DUMP_TYPE_AP_LINKUP_FAILURE = 11, + DUMP_TYPE_AP_ABNORMAL_ACCESS = 12, + DUMP_TYPE_CFG_VENDOR_TRIGGERED = 13, + DUMP_TYPE_RESUMED_ON_TIMEOUT_TX = 14, + DUMP_TYPE_RESUMED_ON_TIMEOUT_RX = 15, + DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR = 16, + DUMP_TYPE_TRANS_ID_MISMATCH = 17, + DUMP_TYPE_IFACE_OP_FAILURE = 18, + DUMP_TYPE_DONGLE_INIT_FAILURE = 19, + DUMP_TYPE_READ_SHM_FAIL = 20, + DUMP_TYPE_DONGLE_HOST_EVENT = 21, + DUMP_TYPE_SMMU_FAULT = 22, + DUMP_TYPE_RESUMED_UNKNOWN = 23, + DUMP_TYPE_DUE_TO_BT = 24, + DUMP_TYPE_LOGSET_BEYOND_RANGE = 25, + DUMP_TYPE_BY_USER = 26, + DUMP_TYPE_CTO_RECOVERY = 27, + DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR = 28, + DUMP_TYPE_PROXD_TIMEOUT = 29, + DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE = 30, + DUMP_TYPE_PKTID_POOL_DEPLETED = 31, + DUMP_TYPE_ESCAN_SYNCID_MISMATCH = 32, + DUMP_TYPE_INVALID_SHINFO_NRFRAGS = 33 +}; + +enum dhd_hang_reason { + HANG_REASON_MASK = 0x8000, + HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, + HANG_REASON_DONGLE_TRAP = 0x8002, + HANG_REASON_D3_ACK_TIMEOUT = 0x8003, + HANG_REASON_BUS_DOWN = 0x8004, + HANG_REASON_MSGBUF_LIVELOCK = 0x8006, + HANG_REASON_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_HT_AVAIL_ERROR = 0x8008, + HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, + HANG_REASON_PCIE_PKTID_ERROR = 0x800A, + HANG_REASON_IFACE_ADD_FAILURE = 0x800B, + HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR = 0x800C, + HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR = 0x800D, + HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR = 0x800E, + HANG_REASON_SCAN_BUSY = 0x800F, + HANG_REASON_BSS_UP_FAILURE = 0x8010, + HANG_REASON_BSS_DOWN_FAILURE = 0x8011, + HANG_REASON_IOCTL_SUSPEND_ERROR = 0x8012, + HANG_REASON_ESCAN_SYNCID_MISMATCH = 0x8013, + HANG_REASON_PCIE_LINK_DOWN_RC_DETECT = 0x8805, + HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806, + HANG_REASON_UNKNOWN = 0x8807, + HANG_REASON_PCIE_LINK_DOWN_EP_DETECT = 0x8808, + HANG_REASON_PCIE_CTO_DETECT = 0x8809, + HANG_REASON_MAX = 0x880A +}; + +#define WLC_E_DEAUTH_MAX_REASON 0x0FFF + +enum dhd_rsdb_scan_features { + /* Downgraded scan feature for AP active */ + RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01, + /* Downgraded scan feature for P2P Discovery */ + RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02, + /* Enable channel pruning for ROAM SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10, + /* Enable channel pruning for any SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20 +}; + +#define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1) +#ifdef DHD_EWPR_VER2 +#define VENDOR_SEND_HANG_EXT_INFO_VER 20181111 +#else +#define VENDOR_SEND_HANG_EXT_INFO_VER 20170905 +#endif /* DHD_EWPR_VER2 */ + +#define HANG_INFO_TRAP_T_NAME_MAX 6 +#define HANG_INFO_TRAP_T_REASON_IDX 0 +#define HANG_INFO_TRAP_T_SUBTYPE_IDX 2 +#define HANG_INFO_TRAP_T_OFFSET_IDX 3 +#define HANG_INFO_TRAP_T_EPC_IDX 4 +#define HANG_FIELD_STR_MAX_LEN 9 +#define HANG_FIELD_CNT_MAX 69 +#define HANG_FIELD_IF_FAILURE_CNT 10 +#define HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT 8 +#define HANG_FIELD_TRAP_T_STACK_CNT_MAX 16 +#define HANG_FIELD_MISMATCH_CNT 10 +#define HANG_INFO_BIGDATA_KEY_STACK_CNT 4 + +#define DEBUG_DUMP_TIME_BUF_LEN (16 + 1) +/* delimiter between values */ +#define HANG_KEY_DEL ' ' +#define HANG_RAW_DEL '_' + +#ifdef DHD_EWPR_VER2 +#define HANG_INFO_BIGDATA_EXTRA_KEY 4 +#define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5 +#endif + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + +#define DHD_TX_CONTEXT_MASK 0xff +#define DHD_TX_START_XMIT 0x01 +#define DHD_TX_SEND_PKT 0x02 +#define DHD_IF_SET_TX_ACTIVE(ifp, context) \ + ifp->tx_paths_active |= context; +#define DHD_IF_CLR_TX_ACTIVE(ifp, context) \ + ifp->tx_paths_active &= ~context; +#define DHD_IF_IS_TX_ACTIVE(ifp) \ + (ifp->tx_paths_active) +/** + * DMA-able buffer parameters + * - dmaaddr_t is 32bits on a 32bit host. + * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr + * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer. + */ +typedef struct dhd_dma_buf { + void *va; /* virtual address of buffer */ + uint32 len; /* user requested buffer length */ + dmaaddr_t pa; /* physical address of buffer */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ + uint32 _alloced; /* actual size of buffer allocated with align and pad */ +} dhd_dma_buf_t; + +/* host reordering packts logic */ +/* followed the structure to hold the reorder buffers (void **p) */ +typedef struct reorder_info { + void **p; + uint8 flow_id; + uint8 cur_idx; + uint8 exp_idx; + uint8 max_idx; + uint8 pend_pkts; +} reorder_info_t; + +/* throughput test packet format */ +typedef struct tput_pkt { + /* header */ + uint8 mac_sta[ETHER_ADDR_LEN]; + uint8 mac_ap[ETHER_ADDR_LEN]; + uint16 pkt_type; + uint8 PAD[2]; + /* data */ + uint32 crc32; + uint32 pkt_id; + uint32 num_pkts; +} tput_pkt_t; + +typedef enum { + TPUT_PKT_TYPE_NORMAL, + TPUT_PKT_TYPE_STOP +} tput_pkt_type_t; + +#define TPUT_TEST_MAX_PAYLOAD 1500 +#define TPUT_TEST_WAIT_TIMEOUT_DEFAULT 5000 + +#ifdef DHDTCPACK_SUPPRESS + +enum { + /* TCPACK suppress off */ + TCPACK_SUP_OFF, + /* Replace TCPACK in txq when new coming one has higher ACK number. */ + TCPACK_SUP_REPLACE, + /* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA. + * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that + * 1. we are able to read TCP DATA packets first from the bus + * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed. + */ + TCPACK_SUP_DELAYTX, + TCPACK_SUP_HOLD, + TCPACK_SUP_LAST_MODE +}; +#endif /* DHDTCPACK_SUPPRESS */ + +#if defined(BCM_ROUTER_DHD) +#define DHD_DWM_TBL_SIZE 57 +/* DSCP WMM AC Mapping macros and structures */ +#define DHD_TRF_MGMT_DWM_FILTER_BIT 0x8 +#define DHD_TRF_MGMT_DWM_PRIO_BITS 0x7 +#define DHD_TRF_MGMT_DWM_FAVORED_BIT 0x10 +#define DHD_TRF_MGMT_DWM_PRIO(dwm_tbl_entry) ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_PRIO_BITS) +#define DHD_TRF_MGMT_DWM_IS_FAVORED_SET(dwm_tbl_entry) \ + ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FAVORED_BIT) +#define DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry) \ + ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FAVORED_BIT) +#define DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_tbl_entry) \ + ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FILTER_BIT) +#define DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry) \ + ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FILTER_BIT) + +typedef struct { + uint8 dhd_dwm_enabled; + uint8 dhd_dwm_tbl[DHD_DWM_TBL_SIZE]; +} dhd_trf_mgmt_dwm_tbl_t; +#endif /* for BCM_ROUTER_DHD */ + +#define DHD_NULL_CHK_AND_RET(cond) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + return; \ + } + +#define DHD_NULL_CHK_AND_RET_VAL(cond, value) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + return value; \ + } + +#define DHD_NULL_CHK_AND_GOTO(cond, label) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + goto label; \ + } + +/* + * Accumulating the queue lengths of all flowring queues in a parent object, + * to assert flow control, when the cummulative queue length crosses an upper + * threshold defined on a parent object. Upper threshold may be maintained + * at a station level, at an interface level, or at a dhd instance. + * + * cumm_ctr_t abstraction: + * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis + * pause on/off threshold callback. + * All macros use the address of the cummulative length in the parent objects. + * + * Cummulative counters in parent objects may be updated without spinlocks. + * + * If a cummulative queue length is desired across all flows + * belonging to either of (a station, or an interface or a dhd instance), then + * an atomic operation is required using an atomic_t cummulative counters or + * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct. + */ +#if defined(BCM_ROUTER_DHD) + +typedef atomic_t cumm_ctr_t; /* BCM_ROUTER_DHD Linux: atomic operations */ +#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen)) +#define DHD_CUMM_CTR(clen) DHD_CUMM_CTR_PTR(clen) /* atomic accessor */ +#define DHD_CUMM_CTR_READ(clen) atomic_read(DHD_CUMM_CTR(clen)) /* read */ +#define DHD_CUMM_CTR_INIT(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); \ + atomic_set(DHD_CUMM_CTR(clen), 0); +#define DHD_CUMM_CTR_INCR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); \ + atomic_add(1, DHD_CUMM_CTR(clen)); \ + ASSERT(DHD_CUMM_CTR_READ(clen) != 0); /* ensure it does not wrap */ +#define DHD_CUMM_CTR_DECR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); \ + ASSERT(DHD_CUMM_CTR_READ(clen) > 0); \ + atomic_sub(1, DHD_CUMM_CTR(clen)); + +#else /* ! BCM_ROUTER_DHD */ + +/* Cummulative length not supported. */ +typedef uint32 cumm_ctr_t; +#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen)) +#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */ +#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */ +#define DHD_CUMM_CTR_INIT(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_INCR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_DECR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); + +#endif /* ! BCM_ROUTER_DHD */ + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) +struct tdls_peer_node { + uint8 addr[ETHER_ADDR_LEN]; + struct tdls_peer_node *next; +}; +typedef struct tdls_peer_node tdls_peer_node_t; +typedef struct { + tdls_peer_node_t *node; + uint8 tdls_peer_count; +} tdls_peer_tbl_t; +#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +typedef enum dhd_ring_id { + DEBUG_RING_ID_INVALID = 0x1, + FW_VERBOSE_RING_ID = 0x2, + DHD_EVENT_RING_ID = 0x3, + DRIVER_LOG_RING_ID = 0x4, + ROAM_STATS_RING_ID = 0x5, + BT_LOG_RING_ID = 0x6, + DEBUG_RING_ID_MAX = 0x7 +} dhd_ring_id_t; + +#ifdef DHD_LOG_DUMP +#define DUMP_SSSR_ATTR_START 2 +#define DUMP_SSSR_ATTR_COUNT 10 + +typedef enum { + SSSR_C0_D11_BEFORE = 0, + SSSR_C0_D11_AFTER = 1, + SSSR_C1_D11_BEFORE = 2, + SSSR_C1_D11_AFTER = 3, + SSSR_C2_D11_BEFORE = 4, + SSSR_C2_D11_AFTER = 5, + SSSR_DIG_BEFORE = 6, + SSSR_DIG_AFTER = 7 +} EWP_SSSR_DUMP; + +typedef enum { + DLD_BUF_TYPE_GENERAL = 0, + DLD_BUF_TYPE_PRESERVE = 1, + DLD_BUF_TYPE_SPECIAL = 2, + DLD_BUF_TYPE_ECNTRS = 3, + DLD_BUF_TYPE_FILTER = 4, + DLD_BUF_TYPE_ALL = 5 +} log_dump_type_t; + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +struct dhd_dbg_ring_buf +{ + void *dhd_pub; +}; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + +#define LOG_DUMP_MAGIC 0xDEB3DEB3 +#define HEALTH_CHK_BUF_SIZE 256 +#ifdef EWP_ECNTRS_LOGGING +#define ECNTR_RING_ID 0xECDB +#define ECNTR_RING_NAME "ewp_ecntr_ring" +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING +#define RTT_RING_ID 0xADCD +#define RTT_RING_NAME "ewp_rtt_ring" +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_BCM_TRACE +#define BCM_TRACE_RING_ID 0xBCBC +#define BCM_TRACE_RING_NAME "ewp_bcm_trace_ring" +#endif /* EWP_BCM_TRACE */ + +/* + * XXX: Always add new enums at the end to compatible with parser, + * also add new section in split_ret of EWP_config.py + */ +typedef enum { + LOG_DUMP_SECTION_GENERAL = 0, + LOG_DUMP_SECTION_ECNTRS, + LOG_DUMP_SECTION_SPECIAL, + LOG_DUMP_SECTION_DHD_DUMP, + LOG_DUMP_SECTION_EXT_TRAP, + LOG_DUMP_SECTION_HEALTH_CHK, + LOG_DUMP_SECTION_PRESERVE, + LOG_DUMP_SECTION_COOKIE, + LOG_DUMP_SECTION_FLOWRING, + LOG_DUMP_SECTION_STATUS, + LOG_DUMP_SECTION_RTT, + LOG_DUMP_SECTION_BCM_TRACE +} log_dump_section_type_t; + +/* Each section in the debug_dump log file shall begin with a header */ +typedef struct { + uint32 magic; /* 0xDEB3DEB3 */ + uint32 type; /* of type log_dump_section_type_t */ + uint64 timestamp; + uint32 length; /* length of the section that follows */ +} log_dump_section_hdr_t; + +/* below structure describe ring buffer. */ +struct dhd_log_dump_buf +{ +#if defined(LINUX) || defined(linux) || defined(ANDROID) || defined(OEM_ANDROID) + spinlock_t lock; +#endif + void *dhd_pub; + unsigned int enable; + unsigned int wraparound; + unsigned long max; + unsigned int remain; + char* present; + char* front; + char* buffer; +}; + +#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 +#define DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE (80 * 1024) + +extern void dhd_log_dump_write(int type, char *binary_data, + int binary_len, const char *fmt, ...); +#endif /* DHD_LOG_DUMP */ + +/* DEBUG_DUMP SUB COMMAND */ +enum { + CMD_DEFAULT, + CMD_UNWANTED, + CMD_DISCONNECTED, + CMD_MAX +}; + +#define DHD_LOG_DUMP_TS_MULTIPLIER_VALUE 60 +#define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS "%02d%02d%02d%02d%02d%02d%04d" +#define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS "%02d%02d%02d%02d%02d%02d" +#define DHD_DEBUG_DUMP_TYPE "debug_dump" +#define DHD_DUMP_SUBSTR_UNWANTED "_unwanted" +#define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected" + +#ifdef DNGL_AXI_ERROR_LOGGING +#define DHD_DUMP_AXI_ERROR_FILENAME "axi_error" +#define DHD_DUMP_HAL_FILENAME_SUFFIX "_hal" +#endif /* DNGL_AXI_ERROR_LOGGING */ + +extern void get_debug_dump_time(char *str); +extern void clear_debug_dump_time(char *str); +#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING) +extern void copy_debug_dump_time(char *dest, char *src); +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */ + +#define FW_LOGSET_MASK_ALL 0xFFFFu + +#if defined(CUSTOMER_HW4) +#ifndef DHD_COMMON_DUMP_PATH +#define DHD_COMMON_DUMP_PATH "/data/log/wifi/" +#endif /* !DHD_COMMON_DUMP_PATH */ +#elif defined(CUSTOMER_HW2_DEBUG) +#define DHD_COMMON_DUMP_PATH PLATFORM_PATH +#elif defined(BOARD_HIKEY) +#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/" +#elif defined(CUSTOMER_HW_AMLOGIC) +#define DHD_COMMON_DUMP_PATH "/data/vendor/misc/wifi/" +#elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__) +#define DHD_COMMON_DUMP_PATH "/data/vendor/wifi/" +#elif defined(OEM_ANDROID) /* For Brix Live Image */ +#define DHD_COMMON_DUMP_PATH "/installmedia/" +#else /* Default */ +#define DHD_COMMON_DUMP_PATH "/root/" +#endif /* CUSTOMER_HW4 */ + +#define DHD_MEMDUMP_LONGSTR_LEN 180 + +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; + +#ifdef DHD_PKTTS +#if defined(linux) || defined(LINUX) +extern uint dhd_msgbuf_get_ipv6_id(void *pkt); +#else +static INLINE uint dhd_msgbuf_get_ipv6_id(void *pkt) { return 0; } +#endif /* linux || LINUX */ +int dhd_send_msg_to_ts(struct sk_buff *skb, void *data, int size); +#endif /* DHD_PKTTS */ + +#if defined(LINUX) || defined(linux) +int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size); +#endif /* LINUX || linux */ +#ifdef REPORT_FATAL_TIMEOUTS +typedef struct timeout_info { + void *scan_timer_lock; + void *join_timer_lock; + void *cmd_timer_lock; + void *bus_timer_lock; + uint32 scan_timeout_val; + uint32 join_timeout_val; + uint32 cmd_timeout_val; + uint32 bus_timeout_val; + bool scan_timer_active; + bool join_timer_active; + bool cmd_timer_active; + bool bus_timer_active; + osl_timer_t *scan_timer; + osl_timer_t *join_timer; + osl_timer_t *cmd_timer; + osl_timer_t *bus_timer; + uint32 cmd_request_id; + uint32 cmd; + uint32 cmd_join_error; + uint16 escan_syncid; + bool escan_aborted; + uint16 abort_syncid; +} timeout_info_t; +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef DMAMAP_STATS +typedef struct dmamap_stats { + uint64 txdata; + uint64 txdata_sz; + uint64 rxdata; + uint64 rxdata_sz; + uint64 ioctl_rx; + uint64 ioctl_rx_sz; + uint64 event_rx; + uint64 event_rx_sz; + uint64 info_rx; + uint64 info_rx_sz; + uint64 tsbuf_rx; + uint64 tsbuf_rx_sz; +} dma_stats_t; +#endif /* DMAMAP_STATS */ + +#ifdef BT_OVER_PCIE +enum dhd_bus_quiesce_state { + DHD_QUIESCE_INIT = 0, + REQUEST_BT_QUIESCE = 1, + RESPONSE_BT_QUIESCE = 2, + REQUEST_BT_RESUME = 3, + RESPONSE_BT_RESUME = 4 +}; +#endif /* BT_OVER_PCIE */ + +/* see wlfc_proto.h for tx status details */ +#define DHD_MAX_TX_STATUS_MSGS 9u + +#ifdef TX_STATUS_LATENCY_STATS +typedef struct dhd_if_tx_status_latency { + /* total number of tx_status received on this interface */ + uint64 num_tx_status; + /* cumulative tx_status latency for this interface */ + uint64 cum_tx_status_latency; +} dhd_if_tx_status_latency_t; +#endif /* TX_STATUS_LATENCY_STATS */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) +#define AWDL_NUM_SLOTS 16u /* 0 to 15 are the AWDL slots FW operates on */ +#define AWDL_SLOT_MULT 4u /* AWDL slot information sent by FW is in multiples of 4 */ +typedef struct dhd_awdl_statistics { + uint64 slot_start_time; /* AWDL slot start time in us */ + uint64 cum_slot_time; /* Cumulative time for which this AWDL slot was active */ + uint64 num_slots; /* Number of times this AWDL slot was active */ + uint64 cum_tx_status_latency; /* cum tx_status latency while this AWDL slot is active */ + uint64 num_tx_status; /* Num of AWDL(flowring with role as AWDL) tx status received */ + uint64 fw_cum_slot_time; /* Cumulative FW time for which this AWDL slot was active */ + uint32 fw_slot_start_time; /* AWDL slot start time sent by FW in us */ +#if defined(BCMDBG) + uint32 tx_status[DHD_MAX_TX_STATUS_MSGS]; /* Dongle return val wrt TX packet sent out */ +#endif /* BCMDBG */ +} dhd_awdl_stats_t; +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + +/* Bit in dhd_pub_t::gdb_proxy_stop_count set when firmware is stopped by GDB */ +#define GDB_PROXY_STOP_MASK 1 + +/* Enable Reserve STA flowrings only for Android */ +#if defined(OEM_ANDROID) +#define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS +#endif /* OEM_ANDROID */ + +typedef enum { + FW_UNLOADED = 0, + FW_DOWNLOAD_IN_PROGRESS = 1, + FW_DOWNLOAD_DONE = 2 +} fw_download_status_t; + +#define PCIE_DB7_MAGIC_NUMBER_ISR_TRAP 0xdead0001 +#define PCIE_DB7_MAGIC_NUMBER_DPC_TRAP 0xdead0002 + +typedef struct dhd_db7_info { + bool fw_db7w_trap; + bool fw_db7w_trap_inprogress; + uint32 db7_magic_number; + + uint32 debug_db7_send_cnt; + uint32 debug_db7_trap_cnt; + uint32 debug_db7_timing_error_cnt; + uint64 debug_db7_send_time; + uint64 debug_db7_trap_time; + uint64 debug_max_db7_dur; + uint64 debug_max_db7_send_time; + uint64 debug_max_db7_trap_time; +} dhd_db7_info_t; + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE +typedef struct fwtrace_info fwtrace_info_t; /* forward declaration */ +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +typedef enum dhd_induce_error_states +{ + DHD_INDUCE_ERROR_CLEAR = 0x0, + DHD_INDUCE_IOCTL_TIMEOUT = 0x1, + DHD_INDUCE_D3_ACK_TIMEOUT = 0x2, + DHD_INDUCE_LIVELOCK = 0x3, + DHD_INDUCE_DROP_OOB_IRQ = 0x4, + DHD_INDUCE_DROP_AXI_SIG = 0x5, + DHD_INDUCE_TX_BIG_PKT = 0x6, + DHD_INDUCE_IOCTL_SUSPEND_ERROR = 0x7, + /* Big hammer induction */ + DHD_INDUCE_BH_ON_FAIL_ONCE = 0x10, + DHD_INDUCE_BH_ON_FAIL_ALWAYS = 0x11, + DHD_INDUCE_BH_CBP_HANG = 0x12, + DHD_INDUCE_ERROR_MAX +} dhd_induce_error_states_t; + +#ifdef DHD_HP2P +#define MAX_TX_HIST_BIN 16 +#define MAX_RX_HIST_BIN 10 +#define MAX_HP2P_FLOWS 16 +#define HP2P_PRIO 7 +#define HP2P_PKT_THRESH 48 +#define HP2P_TIME_THRESH 200 +#define HP2P_PKT_EXPIRY 40 +#define HP2P_TIME_SCALE 32 + +typedef struct hp2p_info { + void *dhd_pub; + uint16 flowid; + bool hrtimer_init; + void *ring; + struct hrtimer timer; + uint64 num_pkt_limit; + uint64 num_timer_limit; + uint64 num_timer_start; + uint64 tx_t0[MAX_TX_HIST_BIN]; + uint64 tx_t1[MAX_TX_HIST_BIN]; + uint64 rx_t0[MAX_RX_HIST_BIN]; +} hp2p_info_t; +#endif /* DHD_HP2P */ + +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) +/* Timestamps to trace dhd_logtrace_thread() */ +struct dhd_logtrace_thr_ts { + uint64 entry_time; + uint64 sem_down_time; + uint64 flush_time; + uint64 unexpected_break_time; + uint64 complete_time; +}; +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ + +/** + * Common structure for module and instance linkage. + * Instantiated once per hardware (dongle) instance that this DHD manages. + */ +typedef struct dhd_pub { + /* Linkage ponters */ + osl_t *osh; /* OSL handle */ + struct dhd_bus *bus; /* Bus module handle */ + struct dhd_prot *prot; /* Protocol module handle */ + struct dhd_info *info; /* Info module handle */ + struct dhd_dbg *dbg; /* Debugability module handle */ +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) + struct dhd_logtrace_thr_ts logtrace_thr_ts; +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ + + /* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + +#ifdef BCMDBUS + struct dbus_pub *dbus; +#endif /* BCMDBUS */ + + /* Internal dhd items */ + bool up; /* Driver up/down (to OS) */ +#ifdef WL_CFG80211 + spinlock_t up_lock; /* Synchronization with CFG80211 down */ +#endif /* WL_CFG80211 */ + bool txoff; /* Transmit flow-controlled */ + bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ + enum dhd_bus_state busstate; + uint dhd_bus_busy_state; /* Bus busy state */ + uint hdrlen; /* Total DHD header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ + uint rxsz; /* Rx buffer size bus module should use */ + uint8 wme_dp; /* wme discard priority */ +#ifdef DNGL_AXI_ERROR_LOGGING + uint32 axierror_logbuf_addr; + bool axi_error; + struct dhd_axi_error_dump *axi_err_dump; +#endif /* DNGL_AXI_ERROR_LOGGING */ + /* Dongle media info */ + bool iswl; /* Dongle-resident driver is wl */ + ulong drv_version; /* Version of dongle-resident driver */ + struct ether_addr mac; /* MAC address obtained from dongle */ + dngl_stats_t dstats; /* Stats for dongle-based data */ + + /* Additional stats for the bus level */ + ulong tx_packets; /* Data packets sent to dongle */ + ulong actual_tx_pkts; /* Actual data packets sent to dongle */ + ulong tot_txcpl; /* Total Tx completion received */ + ulong tx_dropped; /* Data packets dropped in dhd */ + ulong tx_multicast; /* Multicast data packets sent to dongle */ + ulong tx_errors; /* Errors in sending data to dongle */ + ulong tx_ctlpkts; /* Control packets sent to dongle */ + ulong tx_ctlerrs; /* Errors sending control frames to dongle */ + ulong rx_packets; /* Packets sent up the network interface */ + ulong rx_multicast; /* Multicast packets sent up the network interface */ + ulong rx_errors; /* Errors processing rx data packets */ + ulong rx_ctlpkts; /* Control frames processed from dongle */ + ulong rx_ctlerrs; /* Errors in processing rx control frames */ + ulong rx_dropped; /* Packets dropped locally (no memory) */ + ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ + ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ + ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */ + ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */ + ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ + ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ + ulong fc_packets; /* Number of flow control pkts recvd */ + ulong tx_big_packets; /* Dropped data packets that are larger than MAX_MTU_SZ */ +#ifdef DMAMAP_STATS + /* DMA Mapping statistics */ + dma_stats_t dma_stats; +#endif /* DMAMAP_STATS */ +#ifdef WL_MONITOR + bool monitor_enable; +#endif /* WL_MONITOR */ + /* Last error return */ + int bcmerror; + uint tickcnt; + + /* Last error from dongle */ + int dongle_error; + + uint8 country_code[WLC_CNTRY_BUF_SZ]; + + /* Suspend disable flag and "in suspend" flag */ + int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ + int in_suspend; /* flag set to 1 when early suspend called */ +#ifdef PNO_SUPPORT + int pno_enable; /* pno status : "1" is pno enable */ + int pno_suspend; /* pno suspend status : "1" is pno suspended */ +#endif /* PNO_SUPPORT */ + /* DTIM skip value, default 0(or 1) means wake each DTIM + * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3) + */ + int suspend_bcn_li_dtim; /* bcn_li_dtim value in suspend mode */ + int early_suspended; /* Early suspend status */ +#ifdef PKT_FILTER_SUPPORT + int dhcp_in_progress; /* DHCP period */ +#endif + + /* Pkt filter defination */ + char * pktfilter[100]; + int pktfilter_count; + + wl_country_t dhd_cspec; /* Current Locale info */ +#ifdef CUSTOM_COUNTRY_CODE + uint dhd_cflags; +#endif /* CUSTOM_COUNTRY_CODE */ +#if defined(DHD_BLOB_EXISTENCE_CHECK) + bool is_blob; /* Checking for existance of Blob file */ +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + bool force_country_change; + int op_mode; /* STA, HostAPD, WFD, SoftAP */ + +#if defined(LINUX) || defined(linux) +#if defined(OEM_ANDROID) + struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ + struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ +#endif /* defined(OEM_ANDROID) */ +#endif /* defined (LINUX) || defined(linux) */ + +#ifdef NDIS + PDEVICE_OBJECT pdo; + PDEVICE_OBJECT fdo; + PDEVICE_OBJECT nextDeviceObj; +#if defined(BCMWDF) + WDFDEVICE wdfDevice; +#endif /* (BCMWDF) */ +#endif /* NDIS */ +#ifdef PROP_TXSTATUS + bool wlfc_enabled; + int wlfc_mode; + void* wlfc_state; + /* + Mode in which the dhd flow control shall operate. Must be set before + traffic starts to the device. + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + 3 - Only AMPDU hostreorder used. no wlfc. + */ + uint8 proptxstatus_mode; + bool proptxstatus_txoff; + bool proptxstatus_module_ignore; + bool proptxstatus_credit_ignore; + bool proptxstatus_txstatus_ignore; + + bool wlfc_rxpkt_chk; +#ifdef LIMIT_BORROW + bool wlfc_borrow_allowed; +#endif /* LIMIT_BORROW */ + /* + * implement below functions in each platform if needed. + */ + /* platform specific function whether to skip flow control */ + bool (*skip_fc)(void * dhdp, uint8 ifx); + /* platform specific function for wlfc_enable and wlfc_deinit */ + void (*plat_init)(void *dhd); + void (*plat_deinit)(void *dhd); +#ifdef DHD_WLFC_THREAD + bool wlfc_thread_go; +#if defined(LINUX) + struct task_struct* wlfc_thread; + wait_queue_head_t wlfc_wqhead; +#else + #error "wlfc thread not enabled" +#endif /* LINUX */ +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + void *pno_state; +#endif +#ifdef RTT_SUPPORT + void *rtt_state; + bool rtt_supported; +#endif +#ifdef ROAM_AP_ENV_DETECTION + bool roam_env_detection; +#endif + bool dongle_isolation; + bool is_pcie_watchdog_reset; + +/* Begin - Variables to track Bus Errors */ + bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ +#ifdef BT_OVER_PCIE + bool dongle_trap_due_to_bt; /* flag to indicate that dongle has trapped due to BT */ +#endif /* BT_OVER_PCIE */ + bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */ + bool invalid_shinfo_nrfrags; /* flag to indicate invlaid shinfo nrfrags */ + bool is_sched_error; /* flag to indicate timeout due to scheduling issue */ +#ifdef PCIE_FULL_DONGLE + bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */ + bool livelock_occured; /* flag to indicate livelock occured */ + bool pktid_audit_failed; /* flag to indicate pktid audit failure */ +#endif /* PCIE_FULL_DONGLE */ + bool iface_op_failed; /* flag to indicate interface operation failed */ + bool scan_timeout_occurred; /* flag to indicate scan has timedout */ + bool scan_busy_occurred; /* flag to indicate scan busy occurred */ +#ifdef BT_OVER_SDIO + bool is_bt_recovery_required; +#endif + bool smmu_fault_occurred; /* flag to indicate SMMU Fault */ +/* + * Add any new variables to track Bus errors above + * this line. Also ensure that the variable is + * cleared from dhd_clear_bus_errors + */ +/* End - Variables to track Bus Errors */ + + int hang_was_sent; + int hang_was_pending; + int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ + int txcnt_timeout; /* counter txcnt timeout to send HANG */ +#ifdef BCMPCIE + int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */ +#endif /* BCMPCIE */ + bool hang_report; /* enable hang report by default */ + uint16 hang_reason; /* reason codes for HANG event */ +#if defined(DHD_HANG_SEND_UP_TEST) + uint req_hang_type; +#endif /* DHD_HANG_SEND_UP_TEST */ +#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG + uint hang_count; +#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */ +#ifdef WLTDLS + bool tdls_enable; +#endif + struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS]; + #define WLC_IOCTL_MAXBUF_FWCAP 1024 + char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP]; + #define DHD_IOCTL_MAXBUF_DHDCAP 1024 + char dhd_capabilities[DHD_IOCTL_MAXBUF_DHDCAP]; + #define MAXSKBPEND 1024 + void *skbbuf[MAXSKBPEND]; + uint32 store_idx; + uint32 sent_idx; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; /* TCPACK suppress mode */ + void *tcpack_sup_module; /* TCPACK suppress module */ + uint32 tcpack_sup_ratio; + uint32 tcpack_sup_delay; +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(ARP_OFFLOAD_SUPPORT) + uint32 arp_version; + bool hmac_updated; +#endif +#if defined(BCMSUP_4WAY_HANDSHAKE) + bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */ +#endif +#ifdef BCMINTERNAL + bool loopback; /* 1- enable loopback of tx packets, 0 - disable */ +#endif /* BCMINTERNAL */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool dhd_bug_on; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ +#ifdef CUSTOM_SET_CPUCORE + struct task_struct * current_dpc; + struct task_struct * current_rxf; + int chan_isvht80; +#endif /* CUSTOM_SET_CPUCORE */ + void *sta_pool; /* pre-allocated pool of sta objects */ + void *staid_allocator; /* allocator of sta indexes */ +#ifdef PCIE_FULL_DONGLE + bool flow_rings_inited; /* set this flag after initializing flow rings */ +#endif /* PCIE_FULL_DONGLE */ + void *flowid_allocator; /* unique flowid allocator */ +#if defined(DHD_HTPUT_TUNABLES) + void *htput_flowid_allocator; /* unique htput flowid allocator */ + uint8 htput_client_flow_rings; /* current number of htput client flowrings */ + uint8 htput_flow_ring_start; /* start index of htput flow rings */ +#endif /* DHD_HTPUT_TUNABLES */ + void *flow_ring_table; /* flow ring table, include prot and bus info */ + void *if_flow_lkup; /* per interface flowid lkup hash table */ + void *flowid_lock; /* per os lock for flowid info protection */ + void *flowring_list_lock; /* per os lock for flowring list protection */ + uint8 max_multi_client_flow_rings; + uint8 multi_client_flow_rings; + uint32 num_h2d_rings; /* Max h2d rings including static and dynamic rings */ + uint32 max_tx_flowid; /* used to validate flowid */ + cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ + cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */ + uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ + uint8 flow_prio_map[NUMPRIO]; + uint8 flow_prio_map_type; + char enable_log[MAX_EVENT]; + bool dma_d2h_ring_upd_support; + bool dma_h2d_ring_upd_support; + bool dma_ring_upd_overwrite; /* host overwrites support setting */ + + bool idma_enable; + uint idma_inited; + + bool ifrm_enable; /* implicit frm enable */ + uint ifrm_inited; /* implicit frm init */ + + bool dar_enable; /* use DAR registers */ + uint dar_inited; + + bool fast_delete_ring_support; /* fast delete ring supported */ + +#ifdef DHD_WMF + bool wmf_ucast_igmp; +#ifdef DHD_IGMP_UCQUERY + bool wmf_ucast_igmp_query; +#endif +#ifdef DHD_UCAST_UPNP + bool wmf_ucast_upnp; +#endif +#endif /* DHD_WMF */ +#if defined(BCM_ROUTER_DHD) + dhd_trf_mgmt_dwm_tbl_t dhd_tm_dwm_tbl; +#endif /* BCM_ROUTER_DHD */ +#ifdef DHD_L2_FILTER + unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */ +#endif /* DHD_L2_FILTER */ +#ifdef DHD_SSSR_DUMP + bool sssr_inited; + bool sssr_dump_collected; /* Flag to indicate sssr dump is collected */ + sssr_reg_info_cmn_t *sssr_reg_info; + uint8 *sssr_mempool; +#ifdef DHD_SSSR_DUMP_BEFORE_SR + uint *sssr_d11_before[MAX_NUM_D11_CORES_WITH_SCAN]; + uint *sssr_dig_buf_before; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + uint *sssr_d11_after[MAX_NUM_D11_CORES_WITH_SCAN]; + bool sssr_d11_outofreset[MAX_NUM_D11_CORES_WITH_SCAN]; + uint *sssr_dig_buf_after; + uint32 sssr_dump_mode; + bool collect_sssr; /* Flag to indicate SSSR dump is required */ + bool fis_triggered; +#endif /* DHD_SSSR_DUMP */ +#ifdef DHD_SDTC_ETB_DUMP + etb_addr_info_t etb_addr_info; + uint8 *sdtc_etb_mempool; + bool sdtc_etb_inited; + bool collect_sdtc; /* Flag to indicate SDTC dump is required */ +#endif /* DHD_SDTC_ETB_DUMP */ + uint8 *soc_ram; + uint32 soc_ram_length; + uint32 memdump_type; +#ifdef DHD_COREDUMP + char memdump_str[DHD_MEMDUMP_LONGSTR_LEN]; +#endif /* DHD_COREDUMP */ +#ifdef DHD_RND_DEBUG + uint8 *rnd_buf; + uint32 rnd_len; +#endif /* DHD_RND_DEBUG */ +#ifdef DHD_FW_COREDUMP + uint32 memdump_enabled; +#ifdef DHD_DEBUG_UART + bool memdump_success; +#endif /* DHD_DEBUG_UART */ +#endif /* DHD_FW_COREDUMP */ +#ifdef PCIE_FULL_DONGLE +#ifdef WLTDLS + tdls_peer_tbl_t peer_tbl; +#endif /* WLTDLS */ +#if defined(LINUX) || defined(linux) + uint8 tx_in_progress; +#endif /* LINUX || linux */ +#endif /* PCIE_FULL_DONGLE */ +#ifdef CACHE_FW_IMAGES + char *cached_fw; + int cached_fw_length; + char *cached_nvram; + int cached_nvram_length; + char *cached_clm; + int cached_clm_length; + char *cached_txcap; + int cached_txcap_length; +#endif +#ifdef KEEP_JP_REGREV +/* XXX Needed by customer's request */ + char vars_ccode[WLC_CNTRY_BUF_SZ]; + uint vars_regrev; +#endif /* KEEP_JP_REGREV */ +#ifdef WLTDLS + uint32 tdls_mode; +#endif +#ifdef GSCAN_SUPPORT + bool lazy_roam_enable; +#endif +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + bool apf_set; +#endif /* PKT_FILTER_SUPPORT && APF */ + void *macdbg_info; +#ifdef DHD_WET + void *wet_info; +#endif + bool h2d_phase_supported; + bool force_dongletrap_on_bad_h2d_phase; + uint32 dongle_trap_data; + fw_download_status_t fw_download_status; + trap_t last_trap_info; /* trap info from the last trap */ + uint8 rand_mac_oui[DOT11_OUI_LEN]; +#ifdef DHD_LOSSLESS_ROAMING + uint8 dequeue_prec_map; + uint8 prio_8021x; +#endif +#ifdef WL_NATOE + struct dhd_nfct_info *nfct; + spinlock_t nfct_lock; +#endif /* WL_NATOE */ + /* timesync link */ + struct dhd_ts *ts; + bool d2h_hostrdy_supported; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_t block_bus; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) + bool d11_tx_status; +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ + uint16 ndo_version; /* ND offload version supported */ +#ifdef NDO_CONFIG_SUPPORT + bool ndo_enable; /* ND offload feature enable */ + bool ndo_host_ip_overflow; /* # of host ip addr exceed FW capacity */ + uint32 ndo_max_host_ip; /* # of host ip addr supported by FW */ +#endif /* NDO_CONFIG_SUPPORT */ +#if defined(DHD_LOG_DUMP) +#if defined(DHD_EFI) + uint8 log_capture_enable; +#endif /* DHD_EFI */ + /* buffer to hold 'dhd dump' data before dumping to file */ + uint8 *concise_dbg_buf; + uint64 last_file_posn; + int logdump_periodic_flush; +#ifdef EWP_ECNTRS_LOGGING + void *ecntr_dbg_ring; +#endif +#ifdef EWP_RTT_LOGGING + void *rtt_dbg_ring; +#endif +#ifdef EWP_BCM_TRACE + void *bcm_trace_dbg_ring; +#endif +#ifdef DNGL_EVENT_SUPPORT + uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE]; +#endif + void *logdump_cookie; +#endif /* DHD_LOG_DUMP */ + uint32 dhd_console_ms; /** interval for polling the dongle for console (log) messages */ + bool ext_trap_data_supported; + uint32 *extended_trap_data; +#ifdef DUMP_IOCTL_IOV_LIST + /* dump iovar list */ + dll_t dump_iovlist_head; + uint8 dump_iovlist_len; +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef REPORT_FATAL_TIMEOUTS + timeout_info_t *timeout_info; + uint16 esync_id; /* used to track escans */ + osl_atomic_t set_ssid_rcvd; /* to track if WLC_E_SET_SSID is received during join IOVAR */ + bool secure_join; /* field to note that the join is secure or not */ +#endif /* REPORT_FATAL_TIMEOUTS */ +#ifdef CUSTOM_SET_ANTNPM + uint32 mimo_ant_set; +#endif /* CUSTOM_SET_ANTNPM */ +#ifdef CUSTOM_SET_OCLOFF + bool ocl_off; +#endif /* CUSTOM_SET_OCLOFF */ +#ifdef DHD_DEBUG + /* memwaste feature */ + dll_t mw_list_head; /* memwaste list head */ + uint32 mw_id; /* memwaste list unique id */ +#endif /* DHD_DEBUG */ +#ifdef WLTDLS + spinlock_t tdls_lock; +#endif /* WLTDLS */ + uint pcie_txs_metadata_enable; +#ifdef BTLOG + bool bt_logging; + bool submit_count_WAR; /* submission count WAR */ + bool bt_logging_enabled; +#endif /* BTLOG */ + uint wbtext_policy; /* wbtext policy of dongle */ + bool wbtext_support; /* for product policy only */ +#ifdef PCIE_OOB + bool d2h_no_oob_dw; +#endif /* PCIE_OOB */ +#ifdef PCIE_INB_DW + bool d2h_inband_dw; + enum dhd_bus_ds_state ds_state; +#endif /* PCIE_INB_DW */ + bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */ +#ifdef SNAPSHOT_UPLOAD + bool snapshot_upload; +#endif /* SNAPSHOT_UPLOAD */ + tput_test_t tput_data; + uint64 tput_start_ts; + uint64 tput_stop_ts; + uint dhd_watchdog_ms_backup; + bool wl_event_enabled; + bool logtrace_pkt_sendup; +#ifdef GDB_PROXY + /* True if firmware runs under gdb control (this may cause timeouts at any point) */ + bool gdb_proxy_active; + /* True if deadman_to shall be forced to 0 */ + bool gdb_proxy_nodeadman; + /* Counter incremented at each firmware stop/go transition. LSB (GDB_PROXY_STOP_MASK) + * is set when firmwar eis stopped, clear when running + */ + uint32 gdb_proxy_stop_count; +#endif /* GDB_PROXY */ + int debug_dump_subcmd; + uint64 debug_dump_time_sec; + bool hscb_enable; +#if defined(DHD_AWDL) +#if defined(AWDL_SLOT_STATS) + dhd_awdl_stats_t awdl_stats[AWDL_NUM_SLOTS]; + uint8 awdl_tx_status_slot; /* Slot in which AWDL is active right now */ + void *awdl_stats_lock; /* Lock to protect against parallel AWDL stats updates */ + uint16 awdl_aw_counter; + uint32 pkt_latency; +#endif /* AWDL_SLOT_STATS */ + uint32 awdl_ifidx; + uint16 awdl_seq; + uint8 awdl_minext; + uint8 awdl_presmode; + bool awdl_llc_enabled; +#endif /* DHD_AWDL */ + uint32 logset_prsrv_mask; +#ifdef DHD_PKT_LOGGING + struct dhd_pktlog *pktlog; + char debug_dump_time_pktlog_str[DEBUG_DUMP_TIME_BUF_LEN]; + bool pktlog_debug; +#endif /* DHD_PKT_LOGGING */ +#ifdef EWP_EDL + bool dongle_edl_support; + dhd_dma_buf_t edl_ring_mem; +#endif /* EWP_EDL */ +#if defined (LINUX) || defined(linux) + struct mutex ndev_op_sync; +#endif /* defined (LINUX) || defined(linux) */ + bool debug_buf_dest_support; + uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX]; +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + char *hang_info; + int hang_info_cnt; + char debug_dump_time_hang_str[DEBUG_DUMP_TIME_BUF_LEN]; +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + char debug_dump_time_str[DEBUG_DUMP_TIME_BUF_LEN]; + void *event_log_filter; + uint tput_test_done; +#if defined(LINUX) || defined(linux) + wait_queue_head_t tx_tput_test_wait; + wait_queue_head_t tx_completion_wait; +#ifdef WL_NANHO + void *nanhoi; /* NANHO instance */ +#endif /* WL_NANHO */ +#endif /* defined(LINUX) || defined(linux) */ +#ifdef DHD_ERPOM + bool enable_erpom; + pom_func_handler_t pom_wlan_handler; + int (*pom_func_register)(pom_func_handler_t *func); + int (*pom_func_deregister)(pom_func_handler_t *func); + int (*pom_toggle_reg_on)(uchar func_id, uchar reason); +#endif /* DHD_ERPOM */ +#if defined(DHD_H2D_LOG_TIME_SYNC) +#define DHD_H2D_LOG_TIME_STAMP_MATCH (10000) /* 10 Seconds */ + /* + * Interval for updating the dongle console message time stamp with the Host (DHD) + * time stamp + */ + uint32 dhd_rte_time_sync_ms; +#endif /* DHD_H2D_LOG_TIME_SYNC */ + uint32 batch_tx_pkts_cmpl; + uint32 batch_tx_num_pkts; +#ifdef DHD_EFI + bool insert_random_mac; + /* threshold # of pkts Tx'd/Rx'd after which efi dhd + * will switch intr poll period to 100us + */ + uint64 npkts_thresh; + /* the period of time in which if no pkt is Tx'd/Rx'd + * efi dhd will restore intr poll period to default value + */ + uint64 pkt_intvl_thresh_us; + /* time stamp of last Tx'd pkt */ + uint64 tx_last_pkt_ts; + /* time stamp of last Rx'd pkt */ + uint64 rx_last_pkt_ts; + /* used to temporarily store the current intr poll period + * during efi dhd iovar execution, so as to restore it back + * once iovar completes + */ + uint32 cur_intr_poll_period; + /* the intr poll period set by user through dhd iovar */ + uint32 iovar_intr_poll_period; + bool pcie_readshared_done; +#endif /* DHD_EFI */ +#ifdef DHD_DUMP_MNGR + struct _dhd_dump_file_manage *dump_file_manage; +#endif /* DHD_DUMP_MNGR */ +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + fwtrace_info_t *fwtrace_info; /* f/w trace information */ +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + bool event_log_max_sets_queried; + uint32 event_log_max_sets; +#ifdef DHD_STATUS_LOGGING + void *statlog; +#endif /* DHD_STATUS_LOGGING */ +#ifdef DHD_HP2P + /* whether enabled from host by user iovar */ + bool hp2p_enable; + bool hp2p_infra_enable; + /* whether fw supports it */ + bool hp2p_capable; + bool hp2p_mf_enable; + bool hp2p_ts_capable; + uint16 pkt_thresh; + uint16 time_thresh; + uint16 pkt_expiry; + hp2p_info_t hp2p_info[MAX_HP2P_FLOWS]; + /* Flag to allow more hp2p ring creation */ + bool hp2p_ring_more; +#endif /* D2H_HP2P */ +#ifdef DHD_DB0TS + bool db0ts_capable; +#endif /* DHD_DB0TS */ + bool extdtxs_in_txcpl; + bool hostrdy_after_init; + uint16 dhd_induce_error; + uint16 dhd_induce_bh_error; + int wlc_ver_major; + int wlc_ver_minor; +#ifdef DHD_PKTTS + /* stores the packet meta data buffer length queried via iovar */ + uint16 pkt_metadata_version; + uint16 pkt_metadata_buflen; +#endif +#ifdef SUPPORT_SET_TID + uint8 tid_mode; + uint32 target_uid; + uint8 target_tid; +#endif /* SUPPORT_SET_TID */ +#ifdef CONFIG_SILENT_ROAM + bool sroam_turn_on; /* Silent roam monitor enable flags */ + bool sroamed; /* Silent roam monitor check flags */ +#endif /* CONFIG_SILENT_ROAM */ +#ifdef DHD_PKTDUMP_ROAM + void *pktcnts; +#endif /* DHD_PKTDUMP_ROAM */ + dhd_db7_info_t db7_trap; + bool fw_preinit; + bool ring_attached; +#ifdef DHD_PCIE_RUNTIMEPM + bool rx_pending_due_to_rpm; +#endif /* DHD_PCIE_RUNTIMEPM */ + bool disable_dtim_in_suspend; /* Disable set bcn_li_dtim in suspend */ + union { + wl_roam_stats_v1_t v1; + } roam_evt; + bool arpoe_enable; + bool arpol_configured; +#ifdef DHD_TX_PROFILE + bool tx_profile_enab; + uint8 num_profiles; + dhd_tx_profile_protocol_t *protocol_filters; +#endif /* defined(DHD_TX_PROFILE) */ +#ifdef DHD_MEM_STATS + void *mem_stats_lock; + uint64 txpath_mem; + uint64 rxpath_mem; +#endif /* DHD_MEM_STATS */ +#ifdef DHD_LB_RXP + atomic_t lb_rxp_flow_ctrl; + uint32 lb_rxp_stop_thr; + uint32 lb_rxp_strt_thr; +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_STATS + uint64 lb_rxp_stop_thr_hitcnt; + uint64 lb_rxp_strt_thr_hitcnt; + uint64 lb_rxp_napi_sched_cnt; + uint64 lb_rxp_napi_complete_cnt; +#endif /* DHD_LB_STATS */ + bool check_trap_rot; + /* if FW supports host insertion of SFH LLC */ + bool host_sfhllc_supported; +#ifdef DHD_GRO_ENABLE_HOST_CTRL + bool permitted_gro; +#endif /* DHD_GRO_ENABLE_HOST_CTRL */ +#ifdef CSI_SUPPORT + struct list_head csi_list; + int csi_count; +#endif /* CSI_SUPPORT */ + char *clm_path; /* module_param: path to clm vars file */ + char *conf_path; /* module_param: path to config vars file */ + struct dhd_conf *conf; /* Bus module handle */ + void *adapter; /* adapter information, interrupt, fw path etc. */ + void *event_params; +#ifdef BCMDBUS + bool dhd_remove; +#endif /* BCMDBUS */ +#ifdef WL_ESCAN + struct wl_escan_info *escan; +#endif +#if defined(WL_WIRELESS_EXT) + void *wext_info; +#endif +#ifdef WL_EXT_IAPSTA + void *iapsta_params; +#endif + int hostsleep; +#ifdef SENDPROB + bool recv_probereq; +#endif +#ifdef DHD_NOTIFY_MAC_CHANGED + bool skip_dhd_stop; +#endif /* DHD_NOTIFY_MAC_CHANGED */ +#ifdef WL_EXT_GENL + void *zconf; +#endif +} dhd_pub_t; + +#if defined(__linux__) +int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on); +#else +static INLINE int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on) { return 0; } +#endif /* __linux__ */ + +typedef struct { + uint rxwake; + uint rcwake; +#ifdef DHD_WAKE_RX_STATUS + uint rx_bcast; + uint rx_arp; + uint rx_mcast; + uint rx_multi_ipv6; + uint rx_icmpv6; + uint rx_icmpv6_ra; + uint rx_icmpv6_na; + uint rx_icmpv6_ns; + uint rx_multi_ipv4; + uint rx_multi_other; + uint rx_ucast; +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + uint rc_event[WLC_E_LAST]; +#endif /* DHD_WAKE_EVENT_STATUS */ +} wake_counts_t; + +#if defined(PCIE_FULL_DONGLE) +/* + * XXX: WARNING: dhd_wlfc.h also defines a dhd_pkttag_t + * making wlfc incompatible with PCIE_FULL DONGLE + */ + +/* Packet Tag for PCIE Full Dongle DHD */ +typedef struct dhd_pkttag_fd { + uint16 flowid; /* Flowring Id */ + uint16 ifid; +#ifdef DHD_SBN + uint8 pkt_udr; + uint8 pad; +#endif /* DHD_SBN */ +#if defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3) + uint16 dataoff; /* start of packet */ +#endif /* BCM_ROUTER_DHD && BCM_GMAC3 */ +#ifndef DHD_PCIE_PKTID + uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */ + dmaaddr_t pa; /* physical address */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ +#endif /* !DHD_PCIE_PKTID */ +#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_PKTTS) + uint64 q_time_us; /* time when tx pkt queued to flowring */ +#endif /* TX_STATUS_LATENCY_STATS || DHD_PKTTS */ +} dhd_pkttag_fd_t; + +/* Packet Tag for DHD PCIE Full Dongle */ +#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt))) + +#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid) +#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \ + DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid) + +#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff) +#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \ + DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff) + +#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len) +#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \ + DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len) + +#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa) +#define DHD_PKT_SET_PA(pkt, pkt_pa) \ + DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa) + +#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah) +#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \ + DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah) + +#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma) +#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \ + DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma) + +#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_PKTTS) +#define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us) +#define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \ + DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us) +#endif /* TX_STATUS_LATENCY_STATS || DHD_PKTTS */ +#endif /* PCIE_FULL_DONGLE */ + +#if defined(BCMWDF) +typedef struct { + dhd_pub_t *dhd_pub; +} dhd_workitem_context_t; + +WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context) +#endif /* (BCMWDF) */ + +#if defined(LINUX) || defined(linux) +#if defined(CONFIG_PM_SLEEP) + + #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define _DHD_PM_RESUME_WAIT(a, b) do {\ + int retry = 0; \ + SMP_RD_BARRIER_DEPENDS(); \ + while (dhd_mmc_suspend && retry++ != b) { \ + SMP_RD_BARRIER_DEPENDS(); \ + wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \ + } \ + } while (0) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) + #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) + #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ + if (dhd_mmc_suspend) { \ + printf("%s[%d]: mmc is still in suspend state!!!\n", \ + __FUNCTION__, __LINE__); \ + return a; \ + } \ + } while (0) + #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) + + #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9999; \ + while ((exp) && (countdown >= 10000)) { \ + wait_event_interruptible_timeout(a, FALSE, 1); \ + countdown -= 10000; \ + } \ + } while (0) + +#else + + #define DHD_PM_RESUME_WAIT_INIT(a) + #define DHD_PM_RESUME_WAIT(a) + #define DHD_PM_RESUME_WAIT_FOREVER(a) + #define DHD_PM_RESUME_RETURN_ERROR(a) + #define DHD_PM_RESUME_RETURN + + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) + +#endif /* CONFIG_PM_SLEEP */ +#else + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) +#endif /* defined (LINUX) || defined(linux) */ + +#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ + +#ifdef PNO_SUPPORT +int dhd_pno_clean(dhd_pub_t *dhd); +#endif /* PNO_SUPPORT */ + +/* + * Wake locks are an Android power management concept. They are used by applications and services + * to request CPU resources. + */ +#if defined(linux) && defined(OEM_ANDROID) +extern int dhd_os_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); +extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); +extern void dhd_event_wake_lock(dhd_pub_t *pub); +extern void dhd_event_wake_unlock(dhd_pub_t *pub); +extern void dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_pm_wake_unlock(dhd_pub_t *pub); +extern void dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_txfl_wake_unlock(dhd_pub_t *pub); +extern void dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_nan_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); +extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub); +extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); +extern void dhd_os_wake_lock_init(struct dhd_info *dhd); +extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd); +#ifdef DHD_USE_SCAN_WAKELOCK +extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_SCAN_WAKELOCK */ + +#ifdef WLEASYMESH +extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast); +extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast); +#endif /* WLEASYMESH */ + +inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) +{ +#if defined(OEM_ANDROID) + mutex_init(&dhdp->wl_softap_lock); +#endif /* OEM_ANDROID */ +} + +inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if defined(OEM_ANDROID) + mutex_lock(&dhdp->wl_softap_lock); +#endif /* OEM_ANDROID */ +} + +inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if defined(OEM_ANDROID) + mutex_unlock(&dhdp->wl_softap_lock); +#endif /* OEM_ANDROID */ +} + +#ifdef DHD_DEBUG_WAKE_LOCK +#define DHD_OS_WAKE_LOCK(pub) \ + do { \ + printf("call wake_lock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock(pub); \ + } while (0) +#define DHD_OS_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_unlock(pub); \ + } while (0) +#define DHD_EVENT_WAKE_LOCK(pub) \ + do { \ + printf("call event wake_lock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_event_wake_lock(pub); \ + } while (0) +#define DHD_EVENT_WAKE_UNLOCK(pub) \ + do { \ + printf("call event wake_unlock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_event_wake_unlock(pub); \ + } while (0) +#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call pm_wake_timeout enable\n"); \ + dhd_pm_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_PM_WAKE_UNLOCK(pub) \ + do { \ + printf("call pm_wake unlock\n"); \ + dhd_pm_wake_unlock(pub); \ + } while (0) +#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call pm_wake_timeout enable\n"); \ + dhd_txfl_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_TXFL_WAKE_UNLOCK(pub) \ + do { \ + printf("call pm_wake unlock\n"); \ + dhd_txfl_wake_unlock(pub); \ + } while (0) +#define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call pm_wake_timeout enable\n"); \ + dhd_nan_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_NAN_WAKE_UNLOCK(pub) \ + do { \ + printf("call pm_wake unlock\n"); \ + dhd_nan_wake_unlock(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \ + do { \ + printf("call wake_lock_timeout: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_timeout(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ + do { \ + printf("call dhd_wake_lock_rx_timeout_enable[%d]: %s %d\n", \ + val, __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_rx_timeout_enable(pub, val); \ + } while (0) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ + do { \ + printf("call dhd_wake_lock_ctrl_timeout_enable[%d]: %s %d\n", \ + val, __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \ + } while (0) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ + do { \ + printf("call dhd_wake_lock_ctrl_timeout_cancel: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) \ + do { \ + printf("call dhd_wake_lock_waive: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_waive(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) \ + do { \ + printf("call dhd_wake_lock_restore: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_restore(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_INIT(dhd) \ + do { \ + printf("call dhd_wake_lock_init: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_init(dhd); \ + } while (0) +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) \ + do { \ + printf("call dhd_wake_dhd_lock_destroy: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_destroy(dhd); \ + } while (0) +#else +#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) +#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub) +#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub) +#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) dhd_pm_wake_lock_timeout(pub, val) +#define DHD_PM_WAKE_UNLOCK(pub) dhd_pm_wake_unlock(pub) +#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) dhd_txfl_wake_lock_timeout(pub, val) +#define DHD_TXFL_WAKE_UNLOCK(pub) dhd_txfl_wake_unlock(pub) +#define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_nan_wake_lock_timeout(pub, val) +#define DHD_NAN_WAKE_UNLOCK(pub) dhd_nan_wake_unlock(pub) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_rx_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) +#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd); +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd); +#endif /* DHD_DEBUG_WAKE_LOCK */ + +#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) +#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) + +#ifdef DHD_USE_SCAN_WAKELOCK +#ifdef DHD_DEBUG_SCAN_WAKELOCK +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call wake_lock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_unlock(pub); \ + } while (0) +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub) +#endif /* DHD_DEBUG_SCAN_WAKELOCK */ +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) +#endif /* DHD_USE_SCAN_WAKELOCK */ + +#else + +/* Wake lock are used in Android only (until the Linux community accepts it) */ +#define DHD_OS_WAKE_LOCK(pub) +#define DHD_OS_WAKE_UNLOCK(pub) +#define DHD_EVENT_WAKE_LOCK(pub) +#define DHD_EVENT_WAKE_UNLOCK(pub) +#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_PM_WAKE_UNLOCK(pub) +#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_TXFL_WAKE_UNLOCK(pub) +#define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_NAN_WAKE_UNLOCK(pub) +#define DHD_OS_WD_WAKE_LOCK(pub) +#define DHD_OS_WD_WAKE_UNLOCK(pub) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) UNUSED_PARAMETER(val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) UNUSED_PARAMETER(val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub, val) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) +#define DHD_OS_WAKE_LOCK_INIT(dhd) +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) + +#endif /* #defined(linux) && defined(OEM_ANDROID) */ + +#ifdef BCMPCIE_OOB_HOST_WAKE +#define OOB_WAKE_LOCK_TIMEOUT 500 +extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); + +#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) +#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#define DHD_PACKET_TIMEOUT_MS 500 +#define DHD_EVENT_TIMEOUT_MS 1500 +#define SCAN_WAKE_LOCK_TIMEOUT 10000 +#define MAX_TX_TIMEOUT 500 + +/* Enum for IOCTL recieved status */ +typedef enum dhd_ioctl_recieved_status +{ + IOCTL_WAIT = 0, + IOCTL_RETURN_ON_SUCCESS, + IOCTL_RETURN_ON_TRAP, + IOCTL_RETURN_ON_BUS_STOP, + IOCTL_RETURN_ON_ERROR +} dhd_ioctl_recieved_status_t; + +/* interface operations (register, remove) should be atomic, use this lock to prevent race + * condition among wifi on/off and interface operation functions + */ +#if defined(LINUX) +void dhd_net_if_lock(struct net_device *dev); +void dhd_net_if_unlock(struct net_device *dev); +#endif /* LINUX */ + +#if defined(LINUX) || defined(linux) +#if defined(MULTIPLE_SUPPLICANT) +extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif /* MULTIPLE_SUPPLICANT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(MULTIPLE_SUPPLICANT) +extern struct mutex _dhd_mutex_lock_; +#define DHD_MUTEX_IS_LOCK_RETURN() \ + if (mutex_is_locked(&_dhd_mutex_lock_) != 0) { \ + printf("%s : probe is already running! return.\n", __FUNCTION__); \ + return -EBUSY;; \ + } +#define DHD_MUTEX_LOCK() \ + do { \ + if (mutex_is_locked(&_dhd_mutex_lock_) == 0) { \ + printf("%s : no mutex held\n", __FUNCTION__); \ + } else { \ + printf("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__); \ + } \ + mutex_lock(&_dhd_mutex_lock_); \ + printf("%s : set mutex lock\n", __FUNCTION__); \ + } while (0) +#define DHD_MUTEX_UNLOCK() \ + do { \ + printf("%s : mutex is released.\n", __FUNCTION__); \ + mutex_unlock(&_dhd_mutex_lock_); \ + } while (0) +#else +#define DHD_MUTEX_IS_LOCK_RETURN(a) do {} while (0) +#define DHD_MUTEX_LOCK(a) do {} while (0) +#define DHD_MUTEX_UNLOCK(a) do {} while (0) +#endif +#endif /* defined (LINUX) || defined(linux) */ + +typedef enum dhd_attach_states +{ + DHD_ATTACH_STATE_INIT = 0x0, + DHD_ATTACH_STATE_NET_ALLOC = 0x1, + DHD_ATTACH_STATE_DHD_ALLOC = 0x2, + DHD_ATTACH_STATE_ADD_IF = 0x4, + DHD_ATTACH_STATE_PROT_ATTACH = 0x8, + DHD_ATTACH_STATE_WL_ATTACH = 0x10, + DHD_ATTACH_STATE_THREADS_CREATED = 0x20, + DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40, + DHD_ATTACH_STATE_CFG80211 = 0x80, + DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100, + DHD_ATTACH_TIMESYNC_ATTACH_DONE = 0x200, + DHD_ATTACH_LOGTRACE_INIT = 0x400, + DHD_ATTACH_STATE_LB_ATTACH_DONE = 0x800, + DHD_ATTACH_STATE_DONE = 0x1000 +} dhd_attach_states_t; + +/* Value -1 means we are unsuccessful in creating the kthread. */ +#define DHD_PID_KT_INVALID -1 +/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */ +#define DHD_PID_KT_TL_INVALID -2 + +/* default reporting period */ +#define ECOUNTERS_DEFAULT_PERIOD 0 + +/* default number of reports. '0' indicates forever */ +#define ECOUNTERS_NUM_REPORTS 0 + +typedef struct ecounters_cfg { + uint16 type; + uint16 if_slice_idx; + uint16 stats_rep; +} ecounters_cfg_t; + +typedef struct event_ecounters_cfg { + uint16 event_id; + uint16 type; + uint16 if_slice_idx; + uint16 stats_rep; +} event_ecounters_cfg_t; + +typedef struct ecountersv2_xtlv_list_elt { + /* Not quite the exact bcm_xtlv_t type as data could be pointing to other pieces in + * memory at the time of parsing arguments. + */ + uint16 id; + uint16 len; + uint8 *data; + struct ecountersv2_xtlv_list_elt *next; +} ecountersv2_xtlv_list_elt_t; + +typedef struct ecountersv2_processed_xtlv_list_elt { + uint8 *data; + struct ecountersv2_processed_xtlv_list_elt *next; +} ecountersv2_processed_xtlv_list_elt; + +/* + * Exported from dhd OS modules (dhd_linux/dhd_ndis) + */ + +/* Indication from bus module regarding presence/insertion of dongle. + * Return dhd_pub_t pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen +#ifdef BCMDBUS + , void *adapter +#endif +); +extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock); +#if defined(WLP2P) && defined(WL_CFG80211) +/* To allow attach/detach calls corresponding to p2p0 interface */ +extern int dhd_attach_p2p(dhd_pub_t *); +extern int dhd_detach_p2p(dhd_pub_t *); +#endif /* WLP2P && WL_CFG80211 */ +extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock); + +/* Indication from bus module regarding removal/absence of dongle */ +extern void dhd_detach(dhd_pub_t *dhdp); +extern void dhd_free(dhd_pub_t *dhdp); +extern void dhd_clear(dhd_pub_t *dhdp); + +/* Indication from bus module to change flow-control state */ +extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on); + +#ifdef BCMDONGLEHOST +/* Store the status of a connection attempt for later retrieval by an iovar */ +extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason); +#endif /* BCMDONGLEHOST */ + +extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); + +extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan); + +/* Return pointer to interface name */ +extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); + +#ifdef DHD_UCODE_DOWNLOAD +/* Returns the ucode path */ +extern char *dhd_get_ucode_path(dhd_pub_t *dhdp); +#endif /* DHD_UCODE_DOWNLOAD */ + +/* Request scheduling of the bus dpc */ +extern void dhd_sched_dpc(dhd_pub_t *dhdp); + +/* Notify tx completion */ +extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx); +extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */ +#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */ +#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */ +#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */ +#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */ +#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */ +#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */ +#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */ +#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */ +#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */ +#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */ +#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */ +#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */ +#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */ +#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */ +#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */ +#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */ +#define WIFI_FEATURE_LOGGER 0x20000 /* WiFi Logger */ +#define WIFI_FEATURE_HAL_EPNO 0x40000 /* WiFi PNO enhanced */ +#define WIFI_FEATURE_RSSI_MONITOR 0x80000 /* RSSI Monitor */ +#define WIFI_FEATURE_MKEEP_ALIVE 0x100000 /* WiFi mkeep_alive */ +#define WIFI_FEATURE_CONFIG_NDO 0x200000 /* ND offload configure */ +#define WIFI_FEATURE_TX_TRANSMIT_POWER 0x400000 /* Capture Tx transmit power levels */ +#define WIFI_FEATURE_CONTROL_ROAMING 0x800000 /* Enable/Disable firmware roaming */ +#define WIFI_FEATURE_FILTER_IE 0x1000000 /* Probe req ie filter */ +#define WIFI_FEATURE_SCAN_RAND 0x2000000 /* MAC & Prb SN randomization */ +#define WIFI_FEATURE_SET_TX_POWER_LIMIT 0x4000000 /* Support Tx Power Limit setting */ +#define WIFI_FEATURE_USE_BODY_HEAD_SAR 0x8000000 /* Support Body/Head Proximity SAR */ +#define WIFI_FEATURE_SET_LATENCY_MODE 0x40000000 /* Support Latency mode setting */ +#define WIFI_FEATURE_P2P_RAND_MAC 0x80000000 /* Support P2P MAC randomization */ +#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */ + +#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3 + +#if defined(linux) || defined(LINUX) || defined(OEM_ANDROID) +extern int dhd_dev_get_feature_set(struct net_device *dev); +extern int dhd_dev_get_feature_set_matrix(struct net_device *dev, int num); +extern int dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui); +extern int dhd_update_rand_mac_addr(dhd_pub_t *dhd); +#ifdef CUSTOM_FORCE_NODFS_FLAG +extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs); +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef NDO_CONFIG_SUPPORT +#ifndef NDO_MAX_HOST_IP_ENTRIES +#define NDO_MAX_HOST_IP_ENTRIES 10 +#endif /* NDO_MAX_HOST_IP_ENTRIES */ + +extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable); +extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev); +#endif /* NDO_CONFIG_SUPPORT */ +#endif /* (linux) || (LINUX) || (OEM_ANDROID) */ +extern int dhd_set_rand_mac_oui(dhd_pub_t *dhd); +#ifdef GSCAN_SUPPORT +extern int dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param); +extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable); +extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush); +#endif /* GSCAN_SUPPORT */ +#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT) +extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush); +extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist, + uint32 len, uint32 flush); +#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */ + +/* OS independent layer functions */ +extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub); +extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub); +void dhd_os_logdump_lock(dhd_pub_t *pub); +void dhd_os_logdump_unlock(dhd_pub_t *pub); +extern int dhd_os_proto_block(dhd_pub_t * pub); +extern int dhd_os_proto_unblock(dhd_pub_t * pub); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); +extern unsigned int dhd_os_get_ioctl_resp_timeout(void); +extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub); +extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub); +#ifdef PCIE_FULL_DONGLE +extern void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); +#else +static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason) +{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; } +#endif +#ifdef SHOW_LOGTRACE +/* Bound and delay are fine tuned after several experiments and these + * are the best case values to handle bombarding of console logs. + */ +#define DHD_EVENT_LOGTRACE_BOUND 10u +#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u +extern int dhd_os_read_file(void *file, char *buf, uint32 size); +extern int dhd_os_seek_file(void *file, int64 offset); +#endif /* SHOW_LOGTRACE */ +int dhd_os_write_file_posn(void *fp, unsigned long *posn, + void *buf, unsigned long buflen); +int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry, + uint message_number, bool unmask); + +#if defined(DHD_EFI) +void dhd_os_set_intr_poll_period(struct dhd_bus *bus, unsigned int period_us); +unsigned int dhd_os_get_intr_poll_period(void); +int dhd_intr_poll(struct dhd_bus *bus, char *arg, int len, int set); +#define INTR_POLL_PERIOD_CRITICAL 100 /* 100us -- in us */ +#define INTR_POLL_NPKTS_THRESH 1 +#define INTR_POLL_PKT_INTERVAL_THRESH 2000000 /* 2000ms -- in us */ +#if defined(DHD_INTR_POLL_PERIOD_DYNAMIC) +void dhd_intr_poll_pkt_thresholds(dhd_pub_t *dhd); +#endif /* DHD_INTR_POLL_PERIOD_DYNAMIC */ +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + +extern void +dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr); +extern void wl_dhdpcie_dump_regs(void * context); + +#define DHD_OS_IOCTL_RESP_LOCK(x) +#define DHD_OS_IOCTL_RESP_UNLOCK(x) + +#if defined(NDIS) +#define dhd_os_open_image(a) wl_os_open_image(a) +#define dhd_os_close_image(a) wl_os_close_image(a) +#define dhd_os_get_image_block(a, b, c) wl_os_get_image_block(a, b, c) +#define dhd_os_get_image_size(a) wl_os_get_image_size(a) +extern void dhd_os_wakeind(dhd_pub_t * pub, uint32 *val); +extern void dhd_bus_check_died(void *bus); +extern void pci_save_state(osl_t *osh, uint32 *buffer); +extern void pci_restore_state(osl_t *osh, uint32 *buffer); +#endif /* #if defined(NDIS) */ + +extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern int dhd_os_get_image_size(void * image); +#if defined(BT_OVER_SDIO) +extern int dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image); +extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub); +extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub); +#endif /* (BT_OVER_SDIO) */ +extern void *dhd_os_open_image1(dhd_pub_t *pub, char *filename); /* rev1 function signature */ +extern void dhd_os_close_image1(dhd_pub_t *pub, void *image); +extern void dhd_os_wd_timer(void *bus, uint wdtick); +#ifdef DHD_PCIE_RUNTIMEPM +extern void dhd_os_runtimepm_timer(void *bus, uint tick); +#endif /* DHD_PCIE_RUNTIMEPM */ +extern void dhd_os_sdlock(dhd_pub_t * pub); +extern void dhd_os_sdunlock(dhd_pub_t * pub); +extern void dhd_os_sdlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); +extern unsigned long dhd_os_sdlock_txoff(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txoff(dhd_pub_t * pub, unsigned long flags); +extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_tracelog(const char *format, ...); +#ifdef DHDTCPACK_SUPPRESS +extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub); +extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); +#endif /* DHDTCPACK_SUPPRESS */ + +extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr); +extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff); +extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf); +#if defined(CUSTOM_COUNTRY_CODE) +extern void get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags); +#else +extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec); +#endif /* CUSTOM_COUNTRY_CODE */ +extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); +extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret); +extern int dhd_os_send_hang_message(dhd_pub_t *dhdp); +extern void dhd_set_version_info(dhd_pub_t *pub, char *fw); +extern bool dhd_os_check_if_up(dhd_pub_t *pub); +extern int dhd_os_check_wakelock(dhd_pub_t *pub); +extern int dhd_os_check_wakelock_all(dhd_pub_t *pub); +extern int dhd_get_instance(dhd_pub_t *pub); +#ifdef CUSTOM_SET_CPUCORE +extern void dhd_set_cpucore(dhd_pub_t *dhd, int set); +#endif /* CUSTOM_SET_CPUCORE */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +#if defined(DHD_FW_COREDUMP) +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); +#endif /* DHD_FW_COREDUMP */ + +#if defined(linux) || defined(LINUX) +#if defined(DHD_SSSR_DUMP) +void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode); +#endif /* DHD_SSSR_DUMP */ +#ifdef DNGL_AXI_ERROR_LOGGING +void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type); +#endif /* DNGL_AXI_ERROR_LOGGING */ +#ifdef BCMPCIE +void dhd_schedule_cto_recovery(dhd_pub_t *dhdp); +#endif /* BCMPCIE */ +#else +#if defined(DHD_SSSR_DUMP) +static INLINE void dhd_write_sssr_dump(dhd_pub_t *dhd, uint32 dump_mode) { return; } +#endif /* DHD_SSSR_DUMP */ +#ifdef DNGL_AXI_ERROR_LOGGING +static INLINE void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type) { return; } +#endif /* DNGL_AXI_ERROR_LOGGING */ +/* For non-linux map dhd_schedule_cto_recovery to dhdpcie_cto_recovery_handler */ +#ifdef BCMPCIE +#define dhd_schedule_cto_recovery(dhdp) dhdpcie_cto_recovery_handler(dhdp) +#endif /* BCMPCIE */ +#endif /* linux || LINUX */ + +#ifdef EWP_EDL +#define EDL_SCHEDULE_DELAY 500 /* 500ms */ +#if defined(linux) || defined(LINUX) +void dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms); +#else +static INLINE void dhd_schedule_edl_work(dhd_pub_t *dhd, uint delay_ms) { return; } +#endif /* linux || LINUX */ +#endif /* EWP_EDL */ + +#ifdef SUPPORT_AP_POWERSAVE +extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); +#endif /* SUPPORT_AP_POWERSAVE */ + +#ifdef PKT_FILTER_SUPPORT +#define DHD_UNICAST_FILTER_NUM 0 +#define DHD_BROADCAST_FILTER_NUM 1 +#define DHD_MULTICAST4_FILTER_NUM 2 +#define DHD_MULTICAST6_FILTER_NUM 3 +#define DHD_MDNS_FILTER_NUM 4 +#define DHD_ARP_FILTER_NUM 5 +#define DHD_BROADCAST_ARP_FILTER_NUM 6 +#define DHD_IP4BCAST_DROP_FILTER_NUM 7 +#define DHD_LLC_STP_DROP_FILTER_NUM 8 +#define DHD_LLC_XID_DROP_FILTER_NUM 9 +#define DHD_UDPNETBIOS_DROP_FILTER_NUM 10 +#define DISCARD_IPV4_MCAST "102 1 6 IP4_H:16 0xf0 0xe0" +#define DISCARD_IPV6_MCAST "103 1 6 IP6_H:24 0xff 0xff" +#define DISCARD_IPV4_BCAST "107 1 6 IP4_H:16 0xffffffff 0xffffffff" +#define DISCARD_LLC_STP "108 1 6 ETH_H:14 0xFFFFFFFFFFFF 0xAAAA0300000C" +#define DISCARD_LLC_XID "109 1 6 ETH_H:14 0xFFFFFF 0x0001AF" +#define DISCARD_UDPNETBIOS "110 1 6 UDP_H:2 0xffff 0x0089" +extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); +extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd); +extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num); +extern int net_os_enable_packet_filter(struct net_device *dev, int val); +extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); +extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val); + +#define MAX_PKTFLT_BUF_SIZE 2048 +#define MAX_PKTFLT_FIXED_PATTERN_SIZE 32 +#define MAX_PKTFLT_FIXED_BUF_SIZE \ + (WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2) +#define MAXPKT_ARG 16 +#endif /* PKT_FILTER_SUPPORT */ + +#if defined(OEM_ANDROID) && defined(BCMPCIE) +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); +#else +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +#endif /* OEM_ANDROID && BCMPCIE */ + +extern bool dhd_support_sta_mode(dhd_pub_t *dhd); +extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); + +#ifdef RSSI_MONITOR_SUPPORT +extern int dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi); +#endif /* RSSI_MONITOR_SUPPORT */ + +#ifdef DHDTCPACK_SUPPRESS +int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable); +#endif /* DHDTCPACK_SUPPRESS */ + +#define DHD_RSSI_MONITOR_EVT_VERSION 1 +typedef struct { + uint8 version; + int8 cur_rssi; + struct ether_addr BSSID; +} dhd_rssi_monitor_evt_t; + +typedef struct { + uint32 limit; /* Expiration time (usec) */ + uint32 increment; /* Current expiration increment (usec) */ + uint32 elapsed; /* Current elapsed time (usec) */ + uint32 tick; /* O/S tick time (usec) */ +} dhd_timeout_t; + +#ifdef SHOW_LOGTRACE +typedef struct { + uint num_fmts; + char **fmts; + char *raw_fmts; + char *raw_sstr; + uint32 fmts_size; + uint32 raw_fmts_size; + uint32 raw_sstr_size; + uint32 ramstart; + uint32 rodata_start; + uint32 rodata_end; + char *rom_raw_sstr; + uint32 rom_raw_sstr_size; + uint32 rom_ramstart; + uint32 rom_rodata_start; + uint32 rom_rodata_end; +} dhd_event_log_t; +#endif /* SHOW_LOGTRACE */ + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +/* + * As per Google's current implementation, there will be only one APF filter. + * Therefore, userspace doesn't bother about filter id and because of that + * DHD has to manage the filter id. + */ +#define PKT_FILTER_APF_ID 200 +#define DHD_APF_LOCK(ndev) dhd_apf_lock(ndev) +#define DHD_APF_UNLOCK(ndev) dhd_apf_unlock(ndev) + +extern void dhd_apf_lock(struct net_device *dev); +extern void dhd_apf_unlock(struct net_device *dev); +extern int dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version); +extern int dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len); +extern int dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len); +extern int dhd_dev_apf_enable_filter(struct net_device *ndev); +extern int dhd_dev_apf_disable_filter(struct net_device *ndev); +extern int dhd_dev_apf_delete_filter(struct net_device *ndev); +#endif /* PKT_FILTER_SUPPORT && APF */ + +extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); +extern int dhd_timeout_expired(dhd_timeout_t *tmo); + +extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); +#ifdef LINUX +extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net); +extern struct net_device * dhd_idx2net(void *pub, int ifidx); +extern int net_os_send_hang_message(struct net_device *dev); +extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num); +#endif +extern bool dhd_wowl_cap(void *bus); +extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern int wl_process_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern int wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu); +extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); +extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, + int ifindex); +extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx); +extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx); +extern void dhd_common_init(osl_t *osh); + +#if defined(linux) || defined(LINUX) || defined(OEM_ANDROID) +extern int dhd_do_driver_init(struct net_device *net); +#endif +extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern int dhd_event_ifchange(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +#ifdef DHD_UPDATE_INTF_MAC +extern int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx); +#endif /* DHD_UPDATE_INTF_MAC */ +extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name); +extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock); +#ifdef WL_STATIC_IF +extern s32 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx, + uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state); +#endif /* WL_STATIC_IF */ +extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); +extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); +extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx); +extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len); + +#ifdef WL_NATOE +extern int dhd_natoe_ct_event(dhd_pub_t *dhd, char *data); +#endif /* WL_NATOE */ + +/* Send packet to dongle via data channel */ +extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt); + +/* send up locally generated event */ +extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +/* Send event to host */ +extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +#ifdef LOG_INTO_TCPDUMP +extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len); +#endif /* LOG_INTO_TCPDUMP */ +#if defined(SHOW_LOGTRACE) && defined(EWP_EDL) +void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg); +#endif +#if defined(WIFI_TURNON_USE_HALINIT) +extern int dhd_open(struct net_device *net); +#endif /* WIFI_TURNON_USE_HALINIT */ +extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag); +extern uint dhd_bus_status(dhd_pub_t *dhdp); +extern int dhd_bus_start(dhd_pub_t *dhdp); +extern int dhd_bus_suspend(dhd_pub_t *dhdpub); +extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage); +extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size); +extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line); +extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval); +#if defined(BCMSDIO) || defined(BCMPCIE) +extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp); +#endif /* defined(BCMSDIO) || defined(BCMPCIE) */ +#if defined(LINUX) || defined(linux) +int dhd_bus_get_fw_mode(dhd_pub_t *dhdp); +#else +static INLINE int dhd_bus_get_fw_mode(dhd_pub_t *dhdp) { return 0; } +#endif /* LINUX || linux */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +/* linux is defined for DHD EFI builds also, +* since its cross-compiled for EFI from linux. +* dbgring_lock apis are meant only for linux +* to use mutexes, other OSes will continue to +* use osl_spin_lock +*/ +#if (defined(LINUX) || defined(linux)) && !defined(DHD_EFI) +void *dhd_os_dbgring_lock_init(osl_t *osh); +void dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx); +unsigned long dhd_os_dbgring_lock(void *lock); +void dhd_os_dbgring_unlock(void *lock, unsigned long flags); +#endif /* (LINUX || linux) && !DHD_EFI */ + +#ifdef PCIE_INB_DW +#ifdef DHD_EFI +extern int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ds_enter_wake(dhd_pub_t * pub); +#else +static INLINE int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition) +{ return 1; } +static INLINE int dhd_os_ds_enter_wake(dhd_pub_t * pub) +{ return 0; } +#endif /* DHD_EFI */ +#endif /* PCIE_INB_DW */ + +#if defined(LINUX) || defined(linux) || defined(DHD_EFI) +extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); +extern int dhd_os_busbusy_wake(dhd_pub_t * pub); +extern void dhd_os_tx_completion_wake(dhd_pub_t *dhd); +extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition); +extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wake(dhd_pub_t * pub); +extern int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition); +extern int dhd_os_dmaxfer_wake(dhd_pub_t *pub); +int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var, + uint bitmask, uint condition); +#ifdef PCIE_INB_DW +extern int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ds_exit_wake(dhd_pub_t * pub); +#endif /* PCIE_INB_DW */ +int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition, uint timeout_ms); +int dhd_os_tput_test_wake(dhd_pub_t * pub); +#else +static INLINE int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition, + uint timeout_ms) +{ return 0; } +static INLINE int dhd_os_tput_test_wake(dhd_pub_t * pub) +{ return 0; } +static INLINE int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition) +{ return dhd_os_ioctl_resp_wait(pub, condition); } +static INLINE int dhd_os_d3ack_wake(dhd_pub_t * pub) +{ return dhd_os_ioctl_resp_wake(pub); } +#ifdef PCIE_INB_DW +static INLINE int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition) +{ DHD_ERROR(("%s is Not supported for this platform", __FUNCTION__)); return 0; } +static INLINE int dhd_os_ds_exit_wake(dhd_pub_t * pub) +{ DHD_ERROR(("%s is Not supported for this platform", __FUNCTION__)); return 0; } +#endif /* PCIE_INB_DW */ +static INLINE int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition) +{ return 1; } +static INLINE int dhd_os_busbusy_wake(dhd_pub_t * pub) +{ return 0; } +static INLINE int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition) +{ return 0; } +static INLINE int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition) +{ return 0; } +static INLINE int dhd_os_dmaxfer_wake(dhd_pub_t *pub) +{ return 0; } +static INLINE int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var, + uint bitmask, uint condition) +{ return 0; } +#endif /* LINUX || DHD_EFI */ + +#if defined(LINUX) || defined(linux) +/* + * Manage sta objects in an interface. Interface is identified by an ifindex and + * sta(s) within an interfaces are managed using a MacAddress of the sta. + */ +struct dhd_sta; +extern bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac); +extern struct dhd_sta *dhd_find_sta(void *pub, int ifidx, void *ea); +extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea); +extern void dhd_del_all_sta(void *pub, int ifidx); +extern void dhd_del_sta(void *pub, int ifidx, void *ea); +extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx); +extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp); +#else /* LINUX */ +static INLINE bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) +{ return FALSE;} +static INLINE void* dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL;} +static INLINE void *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +static INLINE void dhd_del_all_sta(void *pub, int ifidx) { } +static INLINE void dhd_del_sta(void *pub, int ifidx, void *ea) { } +static INLINE int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) { return 0; } +static INLINE int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) { return 0; } +static INLINE int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) { return 0; } +#endif /* LINUX */ + +extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd); +int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, + char *res_buf, uint res_len, bool set); +extern int dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, + uint cmd_len, char **resptr, uint resp_len); + +#ifdef DHD_MCAST_REGEN +extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val); +#endif +typedef enum cust_gpio_modes { + WLAN_RESET_ON, + WLAN_RESET_OFF, + WLAN_POWER_ON, + WLAN_POWER_OFF +} cust_gpio_modes_t; + +typedef struct dmaxref_mem_map { + dhd_dma_buf_t *srcmem; + dhd_dma_buf_t *dstmem; +} dmaxref_mem_map_t; + +#if defined(OEM_ANDROID) +extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); +#endif /* defined(OEM_ANDROID) */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +extern void dhd_flush_rx_tx_wq(dhd_pub_t *dhdp); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +/* + * Insmod parameters for debug/test + */ + +/* Watchdog timer interval */ +extern uint dhd_watchdog_ms; +extern bool dhd_os_wd_timer_enabled(void *bus); +#ifdef DHD_PCIE_RUNTIMEPM +extern uint dhd_runtimepm_ms; +#endif /* DHD_PCIE_RUNTIMEPM */ + +/** Default console output poll interval */ +extern uint dhd_console_ms; + +extern uint android_msg_level; +extern uint config_msg_level; +extern uint sd_msglevel; +extern uint dump_msg_level; +#ifdef BCMDBUS +extern uint dbus_msglevel; +#endif /* BCMDBUS */ +#ifdef WL_WIRELESS_EXT +extern uint iw_msg_level; +#endif +#ifdef WL_CFG80211 +extern uint wl_dbg_level; +#endif + +extern uint dhd_slpauto; + +/* Use interrupts */ +extern uint dhd_intr; + +/* Use polling */ +extern uint dhd_poll; + +/* ARP offload agent mode */ +extern uint dhd_arp_mode; + +/* Pkt filte enable control */ +extern uint dhd_pkt_filter_enable; + +/* Pkt filter init setup */ +extern uint dhd_pkt_filter_init; + +/* Pkt filter mode control */ +extern uint dhd_master_mode; + +/* Roaming mode control */ +extern uint dhd_roam_disable; + +/* Roaming mode control */ +extern uint dhd_radio_up; + +/* TCM verification control */ +extern uint dhd_tcm_test_enable; + +/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ +extern int dhd_idletime; +#ifdef DHD_USE_IDLECOUNT +#define DHD_IDLETIME_TICKS 5 +#else +#define DHD_IDLETIME_TICKS 1 +#endif /* DHD_USE_IDLECOUNT */ + +/* SDIO Drive Strength */ +extern uint dhd_sdiod_drive_strength; + +/* triggers bcm_bprintf to print to kernel log */ +extern bool bcm_bprintf_bypass; + +/* Override to force tx queueing all the time */ +extern uint dhd_force_tx_queueing; + +/* Default bcn_timeout value is 4 */ +#define DEFAULT_BCN_TIMEOUT_VALUE 4 +#ifndef CUSTOM_BCN_TIMEOUT_SETTING +#define CUSTOM_BCN_TIMEOUT_SETTING DEFAULT_BCN_TIMEOUT_VALUE +#endif + +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */ +#ifndef CUSTOM_KEEP_ALIVE_SETTING +#define CUSTOM_KEEP_ALIVE_SETTING DEFAULT_KEEP_ALIVE_VALUE +#endif /* DEFAULT_KEEP_ALIVE_VALUE */ + +#define NULL_PKT_STR "null_pkt" + +/* hooks for custom glom setting option via Makefile */ +#define DEFAULT_GLOM_VALUE -1 +#ifndef CUSTOM_GLOM_SETTING +#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE +#endif +#define WL_AUTO_ROAM_TRIGGER -75 +/* hooks for custom Roaming Trigger setting via Makefile */ +#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */ +#define DEFAULT_ROAM_TRIGGER_SETTING -1 +#ifndef CUSTOM_ROAM_TRIGGER_SETTING +#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE +#endif + +/* hooks for custom Roaming Romaing setting via Makefile */ +#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */ +#define DEFAULT_ROAM_DELTA_SETTING -1 +#ifndef CUSTOM_ROAM_DELTA_SETTING +#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE +#endif + +/* hooks for custom PNO Event wake lock to guarantee enough time + for the Platform to detect Event before system suspended +*/ +#define DEFAULT_PNO_EVENT_LOCK_xTIME 2 /* multiplay of DHD_PACKET_TIMEOUT_MS */ +#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME +#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME +#endif +/* hooks for custom dhd_dpc_prio setting option via Makefile */ +#define DEFAULT_DHP_DPC_PRIO 1 +#ifndef CUSTOM_DPC_PRIO_SETTING +#define CUSTOM_DPC_PRIO_SETTING DEFAULT_DHP_DPC_PRIO +#endif + +#ifndef CUSTOM_LISTEN_INTERVAL +#define CUSTOM_LISTEN_INTERVAL LISTEN_INTERVAL +#endif /* CUSTOM_LISTEN_INTERVAL */ + +#define DEFAULT_SUSPEND_BCN_LI_DTIM 3 +#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM +#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM +#endif + +#ifdef OEM_ANDROID +#ifndef BCN_TIMEOUT_IN_SUSPEND +#define BCN_TIMEOUT_IN_SUSPEND 6 /* bcn timeout value in suspend mode */ +#endif +#endif /* OEM_ANDROID */ + +#ifndef CUSTOM_RXF_PRIO_SETTING +#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1) +#endif + +#define DEFAULT_WIFI_TURNOFF_DELAY 0 +#ifndef WIFI_TURNOFF_DELAY +#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY +#endif /* WIFI_TURNOFF_DELAY */ + +#define DEFAULT_WIFI_TURNON_DELAY 200 +#ifndef WIFI_TURNON_DELAY +#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY +#endif /* WIFI_TURNON_DELAY */ + +#ifdef BCMSDIO +#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */ +#else +#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 0 /* msec */ +#endif +#ifndef CUSTOM_DHD_WATCHDOG_MS +#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS +#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */ + +#define DHD_INB_DW_DEASSERT_MS 250 + +#define DEFAULT_ASSOC_RETRY_MAX 3 +#ifndef CUSTOM_ASSOC_RETRY_MAX +#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX +#endif /* DEFAULT_ASSOC_RETRY_MAX */ + +#if defined(BCMSDIO) || defined(DISABLE_FRAMEBURST) +#define DEFAULT_FRAMEBURST_SET 0 +#else +#define DEFAULT_FRAMEBURST_SET 1 +#endif /* BCMSDIO */ + +#ifndef CUSTOM_FRAMEBURST_SET +#define CUSTOM_FRAMEBURST_SET DEFAULT_FRAMEBURST_SET +#endif /* CUSTOM_FRAMEBURST_SET */ + +#ifdef WLTDLS +#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING +#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */ +#endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH +#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */ +#endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW +#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */ +#endif +#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH +#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH 100 /* pkt/sec threshold for establishing TDLS link */ +#endif +#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW +#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW 10 /* pkt/sec threshold for tearing down TDLS link */ +#endif +#endif /* WLTDLS */ + +#if defined(VSDB) || defined(ROAM_ENABLE) +#define DEFAULT_BCN_TIMEOUT 6 +#else +#define DEFAULT_BCN_TIMEOUT 4 +#endif /* CUSTOMER_HW4 && (VSDB || ROAM_ENABLE) */ + +#ifndef CUSTOM_BCN_TIMEOUT +#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT +#endif + +#define DEFAULT_BCN_TIMEOUT_IN_SUSPEND 10 +#ifndef CUSTOM_BCN_TIMEOUT_IN_SUSPEND +#define CUSTOM_BCN_TIMEOUT_IN_SUSPEND DEFAULT_BCN_TIMEOUT_IN_SUSPEND +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ + +#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */ +#ifndef MAX_DTIM_ALLOWED_INTERVAL +#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */ +#endif + +#ifdef OEM_ANDROID +#ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND +#define MIN_DTIM_FOR_ROAM_THRES_EXTEND 600 /* minimum dtim interval to extend roam threshold */ +#endif +#endif /* OEM_ANDROID */ + +#ifdef CONFIG_ROAM_RSSI_LIMIT +extern int dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g); +extern int dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g); +#ifndef CUSTOM_ROAMRSSI_2G +#define CUSTOM_ROAMRSSI_2G ROAMRSSI_2G_DEFAULT +#endif /* CUSTOM_ROAMRSSI_2G */ +#ifndef CUSTOM_ROAMRSSI_5G +#define CUSTOM_ROAMRSSI_5G ROAMRSSI_5G_DEFAULT +#endif /* CUSTOM_ROAMRSSI_5G */ +#endif /* CONFIG_ROAM_RSSI_LIMIT */ +#ifdef CONFIG_ROAM_MIN_DELTA +extern int dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g); +extern int dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g); +#ifndef CUSTOM_ROAM_MIN_DELTA +#define CUSTOM_ROAM_MIN_DELTA ROAM_MIN_DELTA_DEFAULT +#endif /* CUSTOM_ROAM_MIN_DELTA */ +#endif /* CONFIG_ROAM_MIN_DELTA */ + +#define NO_DTIM_SKIP 1 +#ifdef SDTEST +/* Echo packet generator (SDIO), pkts/s */ +extern uint dhd_pktgen; + +/* Echo packet len (0 => sawtooth, max 1800) */ +extern uint dhd_pktgen_len; +#define MAX_PKTGEN_LEN 1800 +#endif + +#ifdef BCMSLTGT +/* Account for slow hardware (QT) */ +extern uint htclkratio; +extern int dngl_xtalfreq; +#endif + +/* optionally set by a module_param_string() */ +#define MOD_PARAM_PATHLEN 2048 +#define MOD_PARAM_INFOLEN 512 +#define MOD_PARAM_SRLEN 64 + +#ifdef SOFTAP +extern char fw_path2[MOD_PARAM_PATHLEN]; +#endif + +#if defined(CUSTOMER_HW4) +#define VENDOR_PATH "/vendor" +#else +#define VENDOR_PATH "" +#endif /* CUSTOMER_HW4 */ + +/* Platform path Name - + * Used to find out where to find the FW debug support files. + * 1) If the Platform Makefile mentions from where it should be + * picked from use it. + * 2) If Platform Makefile does not mention anything,use the + * scheme as mapped below + */ +#if !defined(PLATFORM_PATH) +/* First Overrides */ +#if defined(DHD_LEGACY_FILE_PATH) +/* If Legacy file path is to be used */ +#define PLATFORM_PATH "/data/" +#elif defined(PLATFORM_SLP) +/* Path Name for SLP */ +#define PLATFORM_PATH "/opt/etc/" +#else +/* End of Overrides, rely on what is dictated by Android */ +#if defined(CUSTOMER_HW4) +#define PLATFORM_PATH "/data/vendor/conn/" +#else +#define PLATFORM_PATH "/data/misc/conn/" +#endif /* CUSTOMER_HW4 */ +#define DHD_MAC_ADDR_EXPORT +#define DHD_ADPS_BAM_EXPORT +#define DHD_EXPORT_CNTL_FILE +#define DHD_SOFTAP_DUAL_IF_INFO +#define DHD_SEND_HANG_PRIVCMD_ERRORS +#endif /* DHD_LEGACY_FILE_PATH */ +#endif /* !PLATFORM_PATH */ + +#ifdef DHD_MAC_ADDR_EXPORT +extern struct ether_addr sysfs_mac_addr; +#endif /* DHD_MAC_ADDR_EXPORT */ + +#if defined(LINUX) || defined(linux) +/* Flag to indicate if we should download firmware on driver load */ +extern uint dhd_download_fw_on_driverload; +#ifndef BCMDBUS +extern int allow_delay_fwdl; +#endif /* !BCMDBUS */ + +extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); +extern int dhd_write_file(const char *filepath, char *buf, int buf_len); +extern int dhd_read_file(const char *filepath, char *buf, int buf_len); +extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len); +extern int dhd_file_delete(char *path); + +#ifdef READ_MACADDR +extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp); +#else +static INLINE int dhd_set_macaddr_from_file(dhd_pub_t *dhdp) { return 0; } +#endif /* READ_MACADDR */ +#ifdef WRITE_MACADDR +extern int dhd_write_macaddr(struct ether_addr *mac); +#else +static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; } +#endif /* WRITE_MACADDR */ + +#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG) +#if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF) +#define DHD_USE_CISINFO_FROM_OTP +/* For COB, can't check CID/MAC in OTP, so, define it here */ +#define DHD_READ_CIS_FROM_BP +#endif /* CONFIG_BCM4361 || CONFIG_BCM4375 || CONFIG_BCM4389_DEF */ +#define MAX_VNAME_LEN 64 +#define MAX_VID_LEN 8 +#define MODULE_NAME_INDEX_MAX 3 +#define MAX_EXTENSION 20 +typedef struct { + char cid_ext[MAX_EXTENSION]; + char nvram_ext[MAX_EXTENSION]; + char fw_ext[MAX_EXTENSION]; +} naming_info_t; +#ifdef DHD_EXPORT_CNTL_FILE +extern char cidinfostr[MAX_VNAME_LEN]; +#endif /* DHD_EXPORT_CNTL_FILE */ +extern int dhd_check_module_cid(dhd_pub_t *dhdp); +#else +static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; } +#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */ +#ifdef USE_CID_CHECK +extern char *dhd_get_cid_info(unsigned char *vid, int vid_length); +#endif /* USE_CID_CHECK */ +#ifdef GET_MAC_FROM_OTP +extern int dhd_check_module_mac(dhd_pub_t *dhdp); +#else +static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; } +#endif /* GET_MAC_FROM_OTP */ + +#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(USE_CID_CHECK) || \ + defined(GET_MAC_FROM_OTP) +#define DHD_USE_CISINFO +#endif /* READ_MACADDR || WRITE_MACADDR || USE_CID_CHECK || GET_MAC_FROM_OTP */ + +#ifdef DHD_USE_CISINFO +int dhd_read_cis(dhd_pub_t *dhdp); +int dhd_read_otp_sw_rgn(dhd_pub_t *dhdp); +void dhd_clear_cis(dhd_pub_t *dhdp); +int dhd_alloc_cis(dhd_pub_t *dhdp); +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) +extern int dhd_check_module_b85a(void); +extern int dhd_check_module_b90(void); +#define BCM4359_MODULE_TYPE_B90B 1 +#define BCM4359_MODULE_TYPE_B90S 2 +#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */ +#if defined(USE_CID_CHECK) +extern int dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem); +extern naming_info_t * +dhd_find_naming_info(dhd_pub_t *dhdp, char *module_type); +extern naming_info_t * dhd_find_naming_info_by_chip_rev(dhd_pub_t *dhdp, bool *is_murata_fem); +#endif /* defined(USE_CID_CHECK) */ +#ifdef USE_DIRECT_VID_TAG +#define VENDOR_OFF 1 +#define MD_REV_OFF 0 +#define A0_REV "_a0" +#define B0_REV "_b0" +extern int dhd_check_stored_module_info(char *vid); +extern int concate_nvram_by_vid(dhd_pub_t *dhdp, char *nv_path, char *chipstr); +#endif /* USE_DIRECT_VID_TAG */ +#if defined(USE_CID_CHECK) && defined(USE_DIRECT_VID_TAG) +#error Please use USE_CID_CHECK/USE_DIRECT_VID_TAG exclusively +#endif /* USE_CID_CHECK && USE_DIRECT_VID_TAG */ +#else +static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; } +static INLINE int dhd_read_otp_sw_rgn(dhd_pub_t *dhdp) { return 0; } +static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { } +static INLINE int dhd_alloc_cis(dhd_pub_t *dhdp) { return 0; } +#endif /* DHD_USE_CISINFO */ + +#else /* LINUX || linux */ +static INLINE int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) { return 0; } +#endif /* LINUX || linux */ + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +/* Flags to indicate if we distingish power off policy when + * user set the memu "Keep Wi-Fi on during sleep" to "Never" + */ +extern int trigger_deep_sleep; +int dhd_deepsleep(struct net_device *dev, int flag); +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + +extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); +extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); + +#define IFLOCK_INIT(lock) *lock = 0 +#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \ + NdisStallExecution(1); +#define IFUNLOCK(lock) InterlockedExchange((lock), 0) +#define IFLOCK_FREE(lock) +#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL)) +#ifdef ARP_OFFLOAD_SUPPORT +#define MAX_IPV4_ENTRIES 8 +void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode); +void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable); + +/* dhd_commn arp offload wrapers */ +void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx); +void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx); +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx); +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx); +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef WLTDLS +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac); +int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode); +#ifdef PCIE_FULL_DONGLE +int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event); +int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event); +int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub); +#endif /* PCIE_FULL_DONGLE */ +#endif /* WLTDLS */ + +/* Neighbor Discovery Offload Support */ +extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); +int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx); +int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx); + +/* Enhanced ND offload support */ +uint16 dhd_ndo_get_version(dhd_pub_t *dhdp); +int dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx); +int dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx); +int dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx); +int dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable); + +/* ioctl processing for nl80211 */ +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf); + +#if defined(SUPPORT_MULTIPLE_REVISION) +extern int +concate_revision(struct dhd_bus *bus, char *fwpath, char *nvpath); +#endif /* SUPPORT_MULTIPLE_REVISION */ +void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path); +void dhd_set_bus_state(void *bus, uint32 state); + +/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */ +typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ); +extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn); + +#ifdef PROP_TXSTATUS +int dhd_os_wlfc_block(dhd_pub_t *pub); +int dhd_os_wlfc_unblock(dhd_pub_t *pub); +extern const uint8 prio2fifo[]; +#endif /* PROP_TXSTATUS */ + +int dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size); +int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size); +int dhd_common_socram_dump(dhd_pub_t *dhdp); + +int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen); + +int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size); +void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname); +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail); +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) +#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE) +#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size) +#else +#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size) +#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size) +#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ + +#ifdef USE_WFA_CERT_CONF +enum { + SET_PARAM_BUS_TXGLOM_MODE, + SET_PARAM_ROAMOFF, +#ifdef USE_WL_FRAMEBURST + SET_PARAM_FRAMEBURST, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + SET_PARAM_TXBF, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + SET_PARAM_PROPTX, + SET_PARAM_PROPTXMODE, +#endif /* PROP_TXSTATUS */ + PARAM_LAST_VALUE +}; +extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); +#ifdef DHD_EXPORT_CNTL_FILE +#define VALUENOTSET 0xFFFFFFFFu +extern uint32 bus_txglom; +extern uint32 roam_off; +#ifdef USE_WL_FRAMEBURST +extern uint32 frameburst; +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF +extern uint32 txbf; +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS +extern uint32 proptx; +#endif /* PROP_TXSTATUS */ +#endif /* DHD_EXPORT_CNTL_FILE */ +#endif /* USE_WFA_CERT_CONF */ + +#if defined(BCM_ROUTER_DHD) +#if defined(HNDCTF) +bool dhd_ctf_hotbrc_check(dhd_pub_t *dhdp, uint8 *eh, int ifidx); +void dhd_ctf_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +bool dhd_l2_filter_chainable(dhd_pub_t *dhdp, uint8 *eh, int ifidx); +bool dhd_wet_chainable(dhd_pub_t *dhdap); +bool dhd_rx_pkt_chainable(dhd_pub_t *dhdp, int ifidx); +#endif /* HNDCTF */ +extern void dhd_schedule_trap_log_dump(dhd_pub_t *dhdp, + uint8 *buf, uint32 size); +/* When a new flowid is allocated/deallocated, inform dhd. */ +extern void dhd_add_flowid(dhd_pub_t * dhdp, int ifidx, + uint8 ac_prio, void * ea, uint16 flowid); +extern void dhd_del_flowid(dhd_pub_t * dhdp, int ifidx, uint16 flowid); +#else /* ! BCM_ROUTER_DHD */ +#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) +#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0) +bool dhd_wet_chainable(dhd_pub_t *dhdp); +#endif /* ! BCM_ROUTER_DHD */ + +extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub); +extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); + +/** Miscellaenous DHD Spin Locks */ + +/* Enable DHD general spin lock/unlock */ +#define DHD_GENERAL_LOCK(dhdp, flags) \ + (flags) = dhd_os_general_spin_lock(dhdp) +#define DHD_GENERAL_UNLOCK(dhdp, flags) \ + dhd_os_general_spin_unlock((dhdp), (flags)) + +/* Enable DHD timer spin lock/unlock */ +#define DHD_TIMER_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_TIMER_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags)) + +/* Enable DHD flowring spin lock/unlock */ +#define DHD_FLOWRING_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_FLOWRING_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring info spin lock/unlock */ +#define DHD_FLOWID_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_FLOWID_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring list spin lock/unlock */ +#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_RING_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_RING_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_BUS_LP_STATE_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BUS_LP_STATE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_BAR1_SWITCH_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BAR1_SWITCH_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_BUS_PWR_REQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BUS_PWR_REQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#ifdef PCIE_INB_DW +#define DHD_BUS_DONGLE_DS_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BUS_DONGLE_DS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) +#endif /* PCIE_INB_DW */ + +/* Enable DHD backplane spin lock/unlock */ +#define DHD_BACKPLANE_ACCESS_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +/* Enable DHD TDLS peer list spin lock/unlock */ +#ifdef WLTDLS +#define DHD_TDLS_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_TDLS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) +#endif /* WLTDLS */ + +#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#ifdef DBG_PKT_MON +/* Enable DHD PKT MON spin lock/unlock */ +#define DHD_PKT_MON_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_PKT_MON_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags)) +#endif /* DBG_PKT_MON */ + +#ifdef DHD_PKT_LOGGING +/* Enable DHD PKT LOG spin lock/unlock */ +#define DHD_PKT_LOG_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_PKT_LOG_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags)) +#endif /* DHD_PKT_LOGGING */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) +#define DHD_AWDL_STATS_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_AWDL_STATS_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags)) +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + +#if defined(linux) || defined(LINUX) +#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) DHD_GENERAL_LOCK(dhdp, flags) +#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) DHD_GENERAL_UNLOCK(dhdp, flags) +#else +#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) do {BCM_REFERENCE(flags);} while (0) +#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) do {BCM_REFERENCE(flags);} while (0) +#endif + +#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_RX_NAPI_QUEUE_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_RX_NAPI_QUEUE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_UP_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_UP_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_WAKE_SPIN_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_WAKE_SPIN_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +/* + * Temporarily change log dump lock to spin_lock_irqsave as DHD_ERROR/DHD_LOG_MEM + * are being called from dhdpcie_bus_isr. + * This will be reverted after proper solution is implemented to handle isr prints + */ +#define DHD_LOG_DUMP_BUF_LOCK(lock, flags) (flags) = osl_spin_lock_irq(lock) +#define DHD_LOG_DUMP_BUF_UNLOCK(lock, flags) osl_spin_unlock_irq((lock), (flags)) + +#define DHD_PKT_WAKE_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_PKT_WAKE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_OOB_IRQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_OOB_IRQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_IF_STA_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_IF_STA_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +#define DHD_DBG_RING_LOCK_INIT(osh) osl_spin_lock_init(osh) +#define DHD_DBG_RING_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, (lock)) +#define DHD_DBG_RING_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_DBG_RING_UNLOCK(lock, flags) osl_spin_unlock((lock), flags) + +#ifdef DHD_MEM_STATS +/* memory stats lock/unlock */ +#define DHD_MEM_STATS_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_MEM_STATS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) +#endif /* DHD_MEM_STATS */ + +extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp); + +#if defined(LINUX) || defined(linux) +extern void dhd_print_tasklet_status(dhd_pub_t *dhd); +#ifdef PCIE_INB_DW +extern bool dhd_check_cfg_in_progress(dhd_pub_t *dhdp); +#endif +#else +static INLINE void dhd_print_tasklet_status(dhd_pub_t *dhd) { } +static INLINE bool dhd_check_cfg_in_progress(dhd_pub_t *dhdp) +{ return FALSE; } +#endif /* LINUX | linux */ + +#ifdef BCMDBUS +extern uint dhd_get_rxsz(dhd_pub_t *pub); +extern void dhd_set_path(dhd_pub_t *pub); +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); +#endif /* BCMDBUS */ + +#ifdef DHD_L2_FILTER +extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val); +#endif /* DHD_L2_FILTER */ + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) +extern int dhd_set_qosmap_up_table(dhd_pub_t *dhdp, uint32 idx, bcm_tlv_t *qos_map_ie); +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + +typedef struct wl_io_pport { + dhd_pub_t *dhd_pub; + uint ifidx; +} wl_io_pport_t; + +typedef struct wl_evt_pport { + dhd_pub_t *dhd_pub; + int *ifidx; + void *pktdata; + uint data_len; + void **data_ptr; + void *raw_event; +} wl_evt_pport_t; + +extern void *dhd_pub_shim(dhd_pub_t *dhd_pub); +#ifdef DHD_FW_COREDUMP +void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length); +#endif /* DHD_FW_COREDUMP */ + +#if defined(SET_XPS_CPUS) +int dhd_xps_cpus_enable(struct net_device *net, int enable); +int custom_xps_map_set(struct net_device *net, char *buf, size_t len); +void custom_xps_map_clear(struct net_device *net); +#endif + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable); +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); +void custom_rps_map_clear(struct netdev_rx_queue *queue); +#define PRIMARY_INF 0 +#define VIRTUAL_INF 1 +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) +#define RPS_CPUS_MASK "10" +#define RPS_CPUS_MASK_P2P "10" +#define RPS_CPUS_MASK_IBSS "10" +#define RPS_CPUS_WLAN_CORE_ID 4 +#else +#if defined(DHD_TPUT_PATCH) +#define RPS_CPUS_MASK "f" +#define RPS_CPUS_MASK_P2P "f" +#define RPS_CPUS_MASK_IBSS "f" +#else +#define RPS_CPUS_MASK "6" +#define RPS_CPUS_MASK_P2P "6" +#define RPS_CPUS_MASK_IBSS "6" +#endif +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ +#endif // endif + +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length); + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length); + +int dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf, + uint32 len, char *iovar); + +int dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path, + uint32 len, char *iovar); + +int dhd_apply_default_txcap(dhd_pub_t *dhd, char *txcap_path); +int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path); + +#ifdef SHOW_LOGTRACE +int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, + dhd_event_log_t *event_log); +int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, + uint32 *rodata_start, uint32 *rodata_end); +#ifdef PCIE_FULL_DONGLE +int dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf, + dhd_event_log_t *event_data); +#endif /* PCIE_FULL_DONGLE */ +#endif /* SHOW_LOGTRACE */ + +/* + * control_logtrace: + * "0" -> do not print event log messages in any form + * "1" -> print event log messages as EL + * "2" -> print event log messages as formatted CONSOLE_E if logstrs.bin etc. files are available + */ +typedef enum logtrace_ctrl { + LOGTRACE_DISABLE = 0, + LOGTRACE_RAW_FMT = 1, + LOGTRACE_PARSED_FMT = 2 +} logtrace_ctrl_t; + +#define DEFAULT_CONTROL_LOGTRACE LOGTRACE_PARSED_FMT +#ifndef CUSTOM_CONTROL_LOGTRACE +#define CUSTOM_CONTROL_LOGTRACE DEFAULT_CONTROL_LOGTRACE +#endif + +extern uint8 control_logtrace; + +#ifdef BTLOG +int dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf); +#endif /* BTLOG */ + +#if defined(NDIS) +bool dhd_is_device_removed(dhd_pub_t *dhd); +#else +#define dhd_is_device_removed(x) FALSE +#define dhd_os_ind_firmware_stall(x) +#endif /* defined(NDIS) */ + +#if defined(DHD_FW_COREDUMP) +#if defined(linux) || defined(LINUX) +extern void dhd_get_memdump_info(dhd_pub_t *dhd); +#else +static INLINE void dhd_get_memdump_info(dhd_pub_t *dhd) +{ return; } +#endif /* linux || LINUX */ +#endif /* defined(DHD_FW_COREDUMP) */ +#ifdef BCMASSERT_LOG +extern void dhd_get_assert_info(dhd_pub_t *dhd); +#else +static INLINE void dhd_get_assert_info(dhd_pub_t *dhd) { } +#endif /* BCMASSERT_LOG */ + +#if defined(LINUX) || defined(linux) +#define DMAXFER_FREE(dhdp, dmap) dhd_schedule_dmaxfer_free(dhdp, dmap); +#else /* !(LINUX || linux) */ +#define DMAXFER_FREE(dhdp, dmmap) dmaxfer_free_prev_dmaaddr(dhdp, dmmap); +#endif /* linux || LINUX */ + +#if defined(PCIE_FULL_DONGLE) +extern void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); +void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); +#endif /* PCIE_FULL_DONGLE */ + +#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) +#if defined(DHD_LB_STATS) +#include +extern void dhd_lb_stats_init(dhd_pub_t *dhd); +extern void dhd_lb_stats_deinit(dhd_pub_t *dhd); +extern void dhd_lb_stats_reset(dhd_pub_t *dhd); +#ifdef DHD_MEM_STATS +extern uint64 dhd_lb_mem_usage(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +#endif /* DHD_MEM_STATS */ +extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp); +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); +#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_DEINIT(dhdp) dhd_lb_stats_deinit(dhdp) +/* Reset is called from common layer so it takes dhd_pub_t as argument */ +#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_reset(dhdp) +#define DHD_LB_STATS_CLR(x) (x) = 0U +#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 +#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \ + { \ + int cpu = get_cpu(); put_cpu(); \ + DHD_LB_STATS_INCR(x[cpu]); \ + } +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) +#else /* !DHD_LB_STATS */ +#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_DEINIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP +#endif /* !DHD_LB_STATS */ + +#ifdef BCMDBG +extern void dhd_schedule_macdbg_dump(dhd_pub_t *dhdp); +#endif /* BCMDBG */ + +#ifdef DHD_SSSR_DUMP +#ifdef DHD_SSSR_DUMP_BEFORE_SR +#define DHD_SSSR_MEMPOOL_SIZE (2 * 1024 * 1024) /* 2MB size */ +#else +#define DHD_SSSR_MEMPOOL_SIZE (1 * 1024 * 1024) /* 1MB size */ +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + +/* used in sssr_dump_mode */ +#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */ +#define SSSR_DUMP_MODE_FIS 1 /* dump *after* files only */ + +extern int dhd_sssr_mempool_init(dhd_pub_t *dhd); +extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd); +extern int dhd_sssr_dump_init(dhd_pub_t *dhd); +extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd); +extern int dhdpcie_sssr_dump(dhd_pub_t *dhd); +extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path); +extern int dhd_sssr_reg_info_init(dhd_pub_t *dhd); +extern void dhd_sssr_reg_info_deinit(dhd_pub_t *dhd); +extern uint dhd_sssr_dig_buf_size(dhd_pub_t *dhdp); +extern uint dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp); +extern uint dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx); +extern uint dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx); +extern uint dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx); + +#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp) +#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp) +#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp) +#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path) +#define DHD_SSSR_REG_INFO_INIT(dhdp) dhd_sssr_reg_info_init(dhdp) +#define DHD_SSSR_REG_INFO_DEINIT(dhdp) dhd_sssr_reg_info_deinit(dhdp) +#else +#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) do { /* noop */ } while (0) +#define DHD_SSSR_REG_INFO_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_REG_INFO_DEINIT(dhdp) do { /* noop */ } while (0) +#endif /* DHD_SSSR_DUMP */ + +#ifdef BCMPCIE +extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); +extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp); +extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd); +extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd); +#else +#define dhd_prot_debug_info_print(x) +static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub) +{ return 0; } +#endif /* BCMPCIE */ + +#if defined(LINUX) || defined(linux) +void dhd_show_kirqstats(dhd_pub_t *dhd); +#else +static INLINE void dhd_show_kirqstats(dhd_pub_t *dhd) +{ return; } +#endif /* defined(LINUX) || defined(linux) */ + +/* Bitmask used for Join Timeout */ +#define WLC_SSID_MASK 0x01 +#define WLC_WPA_MASK 0x02 + +#if defined(LINUX) || defined(linux) || defined(DHD_EFI) +fw_download_status_t dhd_fw_download_status(dhd_pub_t *dhd_pub); +extern int dhd_start_join_timer(dhd_pub_t *pub); +extern int dhd_stop_join_timer(dhd_pub_t *pub); +extern int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan); +extern int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id); +extern int dhd_start_cmd_timer(dhd_pub_t *pub); +extern int dhd_stop_cmd_timer(dhd_pub_t *pub); +extern int dhd_start_bus_timer(dhd_pub_t *pub); +extern int dhd_stop_bus_timer(dhd_pub_t *pub); +extern uint16 dhd_get_request_id(dhd_pub_t *pub); +extern int dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd); +extern void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask); +extern void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val); +extern int dhd_start_timesync_timer(dhd_pub_t *pub); +extern int dhd_stop_timesync_timer(dhd_pub_t *pub); +#else +static INLINE fw_download_status_t dhd_fw_download_status(dhd_pub_t *dhd_pub) +{ return FW_UNLOADED; } +static INLINE int dhd_start_join_timer(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_stop_join_timer(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan) { return 0; } +static INLINE int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id) { return 0; } +static INLINE int dhd_start_cmd_timer(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_stop_cmd_timer(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_start_bus_timer(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_stop_bus_timer(dhd_pub_t *pub) { return 0; } +static INLINE uint16 dhd_get_request_id(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_set_request_id(dhd_pub_t *pub, uint16 id) { return 0; } +static INLINE void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask) { return; } +static INLINE void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val) { return; } +static INLINE void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val) { return; } +static INLINE void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val) { return; } +static INLINE void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val) { return; } +static INLINE void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val) { return; } +static INLINE void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val) { return; } +static INLINE void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val) { return; } +static INLINE void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val) { return; } +static INLINE int dhd_start_timesync_timer(dhd_pub_t *pub) { return 0; } +static INLINE int dhd_stop_timesync_timer(dhd_pub_t *pub) { return 0; } +#endif /* defined(LINUX) || defined(linux) */ + +#ifdef DHD_PKTID_AUDIT_ENABLED +#if defined(LINUX) || defined(linux) +void dhd_pktid_error_handler(dhd_pub_t *dhdp); +#else /* !(LINUX || linux) */ +static INLINE void dhd_pktid_error_handler(dhd_pub_t *dhdp) { ASSERT(0); } +#endif /* LINUX || linux */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +#ifdef DHD_MAP_PKTID_LOGGING +#if defined(LINUX) || defined(linux) +extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp); +#else /* !(LINUX || linux) */ +static INLINE void dhd_pktid_logging_dump(dhd_pub_t *dhdp) { } +#endif /* LINUX || linux */ +#endif /* DHD_MAP_PKTID_LOGGING */ + +#ifdef DHD_PCIE_RUNTIMEPM +#define DEFAULT_DHD_RUNTIME_MS 100 +#ifndef CUSTOM_DHD_RUNTIME_MS +#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS +#endif /* CUSTOM_DHD_RUNTIME_MS */ + +#ifndef MAX_IDLE_COUNT +#define MAX_IDLE_COUNT 11 +#endif /* MAX_IDLE_COUNT */ + +extern bool dhd_runtimepm_state(dhd_pub_t *dhd); +extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr); +extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr); +extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp); +extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp); +/* Disable the Runtime PM thread and wake up if the bus is already in suspend */ +#define DHD_DISABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_disable(dhdp); \ +} while (0); + +/* Enable the Runtime PM thread */ +#define DHD_ENABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_enable(dhdp); \ +} while (0); + +/* Stop the timer and disable RPM thread */ +#define DHD_STOP_RPM_TIMER(dhdp) \ +do { \ + dhd_os_runtimepm_timer(dhdp, 0); \ + DHD_DISABLE_RUNTIME_PM(dhdp) \ +} while (0); + +/* Start the timer and enable RPM thread */ +#define DHD_START_RPM_TIMER(dhdp) \ +do { \ + dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); \ + DHD_ENABLE_RUNTIME_PM(dhdp) \ +} while (0); +#else +#define DHD_DISABLE_RUNTIME_PM(dhdp) +#define DHD_ENABLE_RUNTIME_PM(dhdp) +#define DHD_STOP_RPM_TIMER(dhdp) +#define DHD_START_RPM_TIMER(dhdp) +#endif /* DHD_PCIE_RUNTIMEPM */ + +extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info); +extern void dhd_prot_dump_ring_ptrs(void *prot_info); + +#if defined(LINUX) || defined(linux) +#if defined(DHD_TRACE_WAKE_LOCK) +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif +#endif /* LINUX || linux */ + +extern bool dhd_query_bus_erros(dhd_pub_t *dhdp); +void dhd_clear_bus_errors(dhd_pub_t *dhdp); + +#if (defined(linux) || defined(LINUX)) && defined(CONFIG_64BIT) +#define DHD_SUPPORT_64BIT +#elif defined(DHD_EFI) +#define DHD_SUPPORT_64BIT +/* by default disabled for other platforms, can enable appropriate macro to enable 64 bit support */ +#endif /* (linux || LINUX) && CONFIG_64BIT */ + +#if defined(DHD_EFI) || defined(DHD_ERPOM) +extern void dhd_schedule_reset(dhd_pub_t *dhdp); +#else +static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;} +#endif /* DHD_EFI || DHD_ERPOM */ + +extern void init_dhd_timeouts(dhd_pub_t *pub); +extern void deinit_dhd_timeouts(dhd_pub_t *pub); + +typedef enum timeout_resons { + DHD_REASON_COMMAND_TO, + DHD_REASON_JOIN_TO, + DHD_REASON_SCAN_TO, + DHD_REASON_OQS_TO +} timeout_reasons_t; + +#ifdef REPORT_FATAL_TIMEOUTS +void dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason); +#endif +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) +extern int dhd_bus_set_device_wake(struct dhd_bus *bus, bool val); +extern void dhd_bus_dw_deassert(dhd_pub_t *dhd); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ +extern void dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level); +int dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data); +void dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt); +#ifdef DHD_EFI +int dhd_get_max_txbufs(dhd_pub_t *dhdp); +#else +static INLINE int dhd_get_max_txbufs(dhd_pub_t *dhdp) +{ return -1; } +#endif + +#ifdef FILTER_IE +int dhd_read_from_file(dhd_pub_t *dhd); +int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf); +int dhd_get_filter_ie_count(dhd_pub_t *dhd, uint8 *buf); +int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len); +int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len); +#endif /* FILTER_IE */ + +uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp); + +#ifdef SET_PCIE_IRQ_CPU_CORE +enum { + DHD_AFFINITY_OFF = 0, + DHD_AFFINITY_TPUT_150MBPS, + DHD_AFFINITY_TPUT_300MBPS, + DHD_AFFINITY_LAST +}; + +extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd); +#endif /* SET_PCIE_IRQ_CPU_CORE */ +#if defined(DHD_HANG_SEND_UP_TEST) +extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num); +#endif /* DHD_HANG_SEND_UP_TEST */ +#ifdef BTLOG +extern void dhd_rx_bt_log(dhd_pub_t *dhdp, void *pkt); +#endif /* BTLOG */ + +#ifdef DHD_RND_DEBUG +int dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len); +int dhd_get_rnd_info(dhd_pub_t *dhd); +#endif /* DHD_RND_DEBUG */ + +#ifdef DHD_WAKE_STATUS +wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp); +#endif /* DHD_WAKE_STATUS */ +extern int dhd_get_random_bytes(uint8 *buf, uint len); + +#if defined(DHD_BLOB_EXISTENCE_CHECK) +extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + +int dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask); +#ifdef DHD_LOG_DUMP +void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type); +void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd); +#endif + +#ifdef DHD_LOG_DUMP +int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, + unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr, + uint32 sec_type); +int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf, + log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type); +int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, + const void *user_buf, unsigned long *f_pos); +int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf); +uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp); +int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size); +void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp); +void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type); +int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size); +int dhd_logdump_cookie_count(dhd_pub_t *dhdp); +int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp, + uint32 len, int type, void *pos); +#if defined(BCMPCIE) +int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp); +#endif /* BCMPCIE */ +int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos); +#ifdef DHD_DUMP_PCIE_RINGS +int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp); +#endif /* DHD_DUMP_PCIE_RINGS */ +#ifdef DHD_STATUS_LOGGING +extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, + const void *user_buf, void *fp, uint32 len, void *pos); +extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp); +#endif /* DHD_STATUS_LOGGING */ +int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, + char *dump_path, int size); +uint32 dhd_get_time_str_len(void); +uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_dld_len(int log_type); +void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr); +extern char *dhd_log_dump_get_timestamp(void); +bool dhd_log_dump_ecntr_enabled(void); +bool dhd_log_dump_rtt_enabled(void); +void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len); +int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type); +#ifdef DHD_SSSR_DUMP_BEFORE_SR +int +dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core); +int +dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len); +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ +int +dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core); +int +dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len); +#ifdef DHD_PKT_LOGGING +extern int dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len); +extern uint32 dhd_os_get_pktlog_dump_size(struct net_device *dev); +extern void dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len); +#endif /* DHD_PKT_LOGGING */ + +#ifdef DNGL_AXI_ERROR_LOGGING +extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len); +extern int dhd_os_get_axi_error_dump_size(struct net_device *dev); +extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len); +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#endif /* DHD_LOG_DUMP */ + +#define DHD_WORD_TO_LEN_SHIFT (2u) /* WORD to BYTES SHIFT */ + +#if defined(linux) || defined(LINUX) || defined(DHD_EFI) +int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos); +#else +static int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, + uint32 buf_len, void *pos) +{ return 0; } +#endif /* linux || LINUX */ +#if defined(linux) || defined(LINUX) +#define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev) +#define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev) +#elif defined(DHD_EFI) || defined(NDIS) +/* For EFI the pcie config space which is saved during init +* is the one that should always be restored, so NOP for save +*/ +#define DHD_PCIE_CONFIG_SAVE(bus) +#define DHD_PCIE_CONFIG_RESTORE(bus) dhdpcie_config_restore(bus, TRUE) +#else +#define DHD_PCIE_CONFIG_SAVE(bus) do { /* noop */ } while (0) +#define DHD_PCIE_CONFIG_RESTORE(bus) do { /* noop */ } while (0) +#endif /* linux || LINUX */ + +typedef struct dhd_pkt_parse { + uint32 proto; /* Network layer protocol */ + uint32 t1; /* n-tuple */ + uint32 t2; +} dhd_pkt_parse_t; + +/* ========= RING API functions : exposed to others ============= */ +#define DHD_RING_TYPE_FIXED 1 +#define DHD_RING_TYPE_SINGLE_IDX 2 +uint32 dhd_ring_get_hdr_size(void); +void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size, + uint32 elem_cnt, uint32 type); +void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring); +void *dhd_ring_get_first(void *_ring); +void dhd_ring_free_first(void *_ring); +void dhd_ring_set_read_idx(void *_ring, uint32 read_idx); +void dhd_ring_set_write_idx(void *_ring, uint32 write_idx); +uint32 dhd_ring_get_read_idx(void *_ring); +uint32 dhd_ring_get_write_idx(void *_ring); +void *dhd_ring_get_last(void *_ring); +void *dhd_ring_get_next(void *_ring, void *cur); +void *dhd_ring_get_prev(void *_ring, void *cur); +void *dhd_ring_get_empty(void *_ring); +int dhd_ring_get_cur_size(void *_ring); +void dhd_ring_lock(void *ring, void *fist_ptr, void *last_ptr); +void dhd_ring_lock_free(void *ring); +void *dhd_ring_lock_get_first(void *_ring); +void *dhd_ring_lock_get_last(void *_ring); +int dhd_ring_lock_get_count(void *_ring); +void dhd_ring_lock_free_first(void *ring); +void dhd_ring_whole_lock(void *ring); +void dhd_ring_whole_unlock(void *ring); + +#ifdef GDB_PROXY +/** Firmware loaded and GDB proxy may access memory and registers */ +#define DHD_GDB_PROXY_PROBE_ACCESS_ENABLED 0x00000001 +/** Firmware loaded, access to it is enabled but it is not running yet */ +#define DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING 0x00000002 +/** Firmware is running */ +#define DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING 0x00000004 +/** Firmware was started in bootloader mode */ +#define DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE 0x00000008 +/** Host memory code offload present */ +#define DHD_GDB_PROXY_PROBE_HOSTMEM_CODE 0x00000010 + +/* Data structure, returned by "gdb_proxy_probe" iovar */ +typedef struct dhd_gdb_proxy_probe_data { + uint32 data_len; /* Length of data in structure */ + uint32 magic; /* Must contain DHD_IOCTL_MAGIC */ + uint32 flags; /* Set of DHD_GDB_PROXY_PROBE_... bits */ + uint32 last_id; /* 0 or proxy ID last set */ + uint32 hostmem_code_win_base; /* Hostmem code window start in ARM physical address space + */ + uint32 hostmem_code_win_length; /* Hostmem code window length */ +} dhd_gdb_proxy_probe_data_t; +#endif /* GDB_PROXY */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) +void dhd_clear_awdl_stats(dhd_pub_t *dhd); +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ +#ifdef DHD_EFI +extern void dhd_insert_random_mac_addr(dhd_pub_t *dhd, char *nvram_mem, uint *len); +#endif /* DHD_EFI */ + +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); +#endif + +#ifdef DHD_DUMP_PCIE_RINGS +extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf, + unsigned long *file_posn, bool file_write); +#endif /* DHD_DUMP_PCIE_RINGS */ + +#ifdef EWP_EDL +#define DHD_EDL_RING_SIZE (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_ITEMSIZE) +int dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data, + void *evt_decode_data); +int dhd_edl_mem_init(dhd_pub_t *dhd); +void dhd_edl_mem_deinit(dhd_pub_t *dhd); +void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd); +#define DHD_EDL_MEM_INIT(dhdp) dhd_edl_mem_init(dhdp) +#define DHD_EDL_MEM_DEINIT(dhdp) dhd_edl_mem_deinit(dhdp) +#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) \ + dhd_prot_edl_ring_tcm_rd_update(dhdp) +#else +#define DHD_EDL_MEM_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_EDL_MEM_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) do { /* noop */ } while (0) +#endif /* EWP_EDL */ + +#ifdef BIGDATA_SOFTAP +void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e); +#endif /* BIGDATA_SOFTAP */ + +#ifdef DHD_PKTTS +int dhd_get_pktts_enab(dhd_pub_t *dhdp); +int dhd_set_pktts_enab(dhd_pub_t *dhdp, bool val); + +int dhd_get_pktts_flow(dhd_pub_t *dhdp, void *args, int len); +int dhd_set_pktts_flow(dhd_pub_t *dhdp, void *params, int plen); +pktts_flow_t *dhd_match_pktts_flow(dhd_pub_t *dhdp, uint32 checksum, + uint32 *idx, uint32 *num_config); +#endif /* DHD_PKTTS */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp); +void dhd_h2d_log_time_sync(dhd_pub_t *dhdp); +#endif /* DHD_H2D_LOG_TIME_SYNC */ +extern void dhd_cleanup_if(struct net_device *net); + +void dhd_schedule_logtrace(void *dhd_info); +int dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath); + +#if defined(LINUX) || defined(linux) +/* configuration of ecounters. API's tp start/stop. currently supported only for linux */ +extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable); +extern int dhd_start_ecounters(dhd_pub_t *dhd); +extern int dhd_stop_ecounters(dhd_pub_t *dhd); +extern int dhd_start_event_ecounters(dhd_pub_t *dhd); +extern int dhd_stop_event_ecounters(dhd_pub_t *dhd); +#endif /* LINUX || linux */ + +#define DHD_DUMP_TYPE_NAME_SIZE 32 +#define DHD_DUMP_FILE_PATH_SIZE 256 +#define DHD_DUMP_FILE_COUNT_MAX 5 +#define DHD_DUMP_TYPE_COUNT_MAX 10 + +#ifdef DHD_DUMP_MNGR +typedef struct _DFM_elem { + char type_name[DHD_DUMP_TYPE_NAME_SIZE]; + char file_path[DHD_DUMP_FILE_COUNT_MAX][DHD_DUMP_FILE_PATH_SIZE]; + int file_idx; +} DFM_elem_t; + +typedef struct _dhd_dump_file_manage { + DFM_elem_t elems[DHD_DUMP_TYPE_COUNT_MAX]; +} dhd_dump_file_manage_t; + +extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname); +#endif /* DHD_DUMP_MNGR */ + +#define HD_PREFIX_SIZE 2 /* hexadecimal prefix size */ +#define HD_BYTE_SIZE 2 /* hexadecimal byte size */ + +#ifdef DHD_HP2P +extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub); +extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags); +#endif /* DHD_HP2P */ + +#ifdef DNGL_AXI_ERROR_LOGGING +extern void dhd_axi_error(dhd_pub_t *dhd); +#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR +extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp); +#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef DHD_STATUS_LOGGING +#include +#else +#define ST(x) 0 +#define STDIR(x) 0 +#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \ + do { /* noop */ } while (0) +#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \ + do { BCM_REFERENCE(cond); } while (0) +#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \ + do { /* noop */ } while (0) +#endif /* DHD_STATUS_LOGGING */ + +#ifdef SUPPORT_SET_TID +enum dhd_set_tid_mode { + /* Disalbe changing TID */ + SET_TID_OFF = 0, + /* Change TID for all UDP frames */ + SET_TID_ALL_UDP, + /* Change TID for UDP frames based on UID */ + SET_TID_BASED_ON_UID +}; +#if defined(linux) || defined(LINUX) +extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt); +#else +static INLINE void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt) { return; } +#endif /* linux || LINUX */ +#endif /* SUPPORT_SET_TID */ + +#ifdef CONFIG_SILENT_ROAM +extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set); +typedef wlc_sroam_info_v1_t wlc_sroam_info_t; +#endif /* CONFIG_SILENT_ROAM */ + +#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL +#define FILE_NAME_HAL_TAG "" +#else +#define FILE_NAME_HAL_TAG "_hal" /* The tag name concatenated by HAL */ +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static inline uint32 next_larger_power2(uint32 num) +{ + if (num) { + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + } + return (num + 1); +} + +extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx); +uint8 dhd_d11_slices_num_get(dhd_pub_t *dhdp); +#ifdef WL_AUTO_QOS +extern void dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off); +#endif /* WL_AUTO_QOS */ + +void *dhd_get_roam_evt(dhd_pub_t *dhdp); +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab); +extern uint8 control_he_enab; +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ + +#ifdef DHD_SDTC_ETB_DUMP + +#define DHD_SDTC_ETB_MEMPOOL_SIZE (33 * 1024) +extern int dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd); +extern void dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd); +extern void dhd_sdtc_etb_init(dhd_pub_t *dhd); +extern void dhd_sdtc_etb_deinit(dhd_pub_t *dhd); +extern void dhd_sdtc_etb_dump(dhd_pub_t *dhd); +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef DHD_TX_PROFILE +int dhd_tx_profile_attach(dhd_pub_t *dhdp); +int dhd_tx_profile_detach(dhd_pub_t *dhdp); +#endif /* defined (DHD_TX_PROFILE) */ +#if defined(DHD_LB_RXP) +uint32 dhd_lb_rxp_process_qlen(dhd_pub_t *dhdp); +/* + * To avoid OOM, Flow control will be kicked in when packet size in process_queue + * crosses LB_RXP_STOP_THR * rcpl ring size * 1500(pkt size) and will stop + * when it goes below LB_RXP_STRT_THR * rcpl ring size * 1500(pkt size) + */ +#define LB_RXP_STOP_THR 200 /* 200 * 1024 * 1500 = 300MB */ +#define LB_RXP_STRT_THR 199 /* 199 * 1024 * 1500 = 291MB */ +#endif /* DHD_LB_RXP */ +#ifdef DHD_SUPPORT_HDM +extern bool hdm_trigger_init; +extern int dhd_module_init_hdm(void); +extern void dhd_hdm_wlan_sysfs_init(void); +extern void dhd_hdm_wlan_sysfs_deinit(struct work_struct *); +#define SYSFS_DEINIT_MS 10 +#endif /* DHD_SUPPORT_HDM */ + +#if defined(linux) || defined(LINUX) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT) +void dhd_ctrl_tcp_limit_output_bytes(int level); +#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */ +#endif /* linux || LINUX */ + +#if defined(__linux__) +extern void dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay); +extern void dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata, + uint32 pktid, uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake, + bool pkt_log); +#else +static INLINE void dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay) + { return; } +static INLINE void dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata, + uint32 pktid, uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake, + bool pkt_log) { return; } +#endif /* __linux */ + +#if defined(BCMPCIE) && defined(__linux__) +extern int dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, dmaaddr_t *pa, uint32 pktid); +#else +static INLINE int dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, dmaaddr_t *pa, + uint32 pktid) { return BCME_OK; } +#endif /* BCMPCIE && __linux__ */ + +#ifdef HOST_SFH_LLC +int dhd_ether_to_8023_hdr(osl_t *osh, struct ether_header *eh, void *p); +int dhd_8023_llc_to_ether_hdr(osl_t *osh, struct ether_header *eh8023, void *p); +#endif +int dhd_schedule_socram_dump(dhd_pub_t *dhdp); + +#ifdef DHD_AWDL +int dhd_ether_to_awdl_llc_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p); +int dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p); +#endif /* DHD_AWDL */ + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +#ifndef DEBUGABILITY +#error "DHD_DEBUGABILITY_LOG_DUMP_RING without DEBUGABILITY" +#endif /* DEBUGABILITY */ +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ +#ifdef WL_MONITOR +void dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val); +#endif /* WL_MONITOR */ +#endif /* _dhd_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_bitpack.c b/bcmdhd.101.10.361.x/dhd_bitpack.c new file mode 100755 index 0000000..c3aecaf --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_bitpack.c @@ -0,0 +1,228 @@ +/* + * Bit packing and Base64 utils for EWP + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include + +#define BIT_PACK_OVERFLOW 0xFFFFFFFFu + +const char* base64_table = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +#define BASE64_MAX_VALUE 63u + +#define BASE64_UNIT_LEN 6u + +#define BASE64_OFFSET0 0u +#define BASE64_OFFSET1 6u +#define BASE64_OFFSET2 12u + +#define MASK_UPPER_6BIT 0xfc +#define MASK_LOWER_6BIT 0x3f + +#define MASK_UPPER_4BIT 0xf0 +#define MASK_LOWER_4BIT 0x0f + +#define MASK_UPPER_2BIT 0xc0 +#define MASK_LOWER_2BIT 0x03 + +#define SHIFT_2BIT 2u +#define SHIFT_4BIT 4u +#define SHIFT_6BIT 6u + +#define BASE64_PADDING_MARGIN 4u + +/* + * Function: dhd_bit_pack + * + * Purpose: bit data packing to given buffer + * + * Input Parameters: + * buf buffer to pack bit data + * buf_len total buffer length + * bit_offset offset in buffer (bitwise) + * data data to pack (max 32 bit) + * bit_length bit length to pack + * + * Output: + * Updated bit offset in buf + */ +int32 +dhd_bit_pack(char *buf, int buf_len, int bit_offset, uint32 data, int32 bit_length) +{ + + int32 byte_shift = (bit_offset / 8); + int32 local_bit_offset = bit_offset % 8; + int32 available_bit = 8 - local_bit_offset; + int32 remain_bit = bit_length; + uint32 cropped_data; + int32 idx; + int32 total_byte = BYTE_SIZE(local_bit_offset + bit_length); + + if (bit_length > 32) { + /* exceeded max bit length, do nothing */ + return bit_offset; + } + if (BYTE_SIZE(bit_offset + bit_length) > buf_len) { + /* can't pack more bits if expected offset is + * exceeded then buffer size + */ + return bit_offset; + } + if (bit_length < 32 && data >= 1<> (32 - bit_length); + } else { + cropped_data = data << (32 - bit_length); + cropped_data = cropped_data >> (32 - bit_length); + } + + buf += byte_shift; + + remain_bit = bit_length; + if (total_byte > 10) { + return bit_offset; + } + for (idx = 0; idx < total_byte; idx++) { + char temp_byte = 0x00; + if (idx == 0) { + local_bit_offset = bit_offset % 8; + } else { + local_bit_offset = 0; + } + + available_bit = 8 - local_bit_offset; + remain_bit -= available_bit; + if (remain_bit >= 0) { + temp_byte = cropped_data >> remain_bit; + } else { + temp_byte = cropped_data << (-1*remain_bit); + } + *buf = *buf | temp_byte; + buf ++; + } + bit_offset += bit_length; + + return bit_offset; +} + +static char +dhd_base64_get_code(char input) +{ + if (input > BASE64_MAX_VALUE) { + return '='; + } + return base64_table[(int)input]; +} + +/* + * Function: dhd_base64_encode + * + * Purpose: base64 encoding module which converts from 8 bits to + * 6 bit based, base64 format using base64_table + * eg: input: hex-123456 + * bin-0001|0010|0011|0100|0101|0110 + * encode every 6 bit : + * bin-000100|100011|010001|010110 + * base64 code : + * base64-EjRW + * + * Input Parameters: + * in_buf input buffer + * in_buf_len length of input buffer + * out_buf output buffer + * out_buf_len length_ of output buffer + * + * Output: + * length of encoded base64 string + */ +int32 +dhd_base64_encode(char* in_buf, int32 in_buf_len, char* out_buf, int32 out_buf_len) +{ + char* input_pos; + char* input_end; + char* base64_out; + char* base64_out_pos; + char* base64_output_end; + char current_byte = 0; + char masked_byte = 0; + int32 estimated_out_len = 0; + int32 offset = 0; + + if (!in_buf || !out_buf || in_buf_len == 0 || out_buf_len == 0) { + /* wrong input parameters */ + return 0; + } + + input_pos = in_buf; + input_end = in_buf + in_buf_len; + base64_out = out_buf; + base64_out_pos = base64_out; + base64_output_end = out_buf + out_buf_len - BASE64_PADDING_MARGIN; + estimated_out_len = in_buf_len / 3 * 4; + + if (estimated_out_len > out_buf_len) { + /* estimated output length is + * larger than output buffer size + */ + return 0; + } + + while (input_pos != input_end) { + if (base64_out_pos > base64_output_end) { + /* outbuf buffer size exceeded, finish encoding */ + break; + } + if (offset == BASE64_OFFSET0) { + current_byte = *input_pos++; + masked_byte = (current_byte & MASK_UPPER_6BIT) >> SHIFT_2BIT; + *base64_out_pos++ = dhd_base64_get_code(masked_byte); + masked_byte = (current_byte & MASK_LOWER_2BIT) << SHIFT_4BIT; + offset += BASE64_UNIT_LEN; + } else if (offset == BASE64_OFFSET1) { + current_byte = *input_pos++; + masked_byte |= (current_byte & MASK_UPPER_4BIT) >> SHIFT_4BIT; + *base64_out_pos++ = dhd_base64_get_code(masked_byte); + masked_byte = (current_byte & MASK_LOWER_4BIT) << SHIFT_2BIT; + offset += BASE64_UNIT_LEN; + } else if (offset == BASE64_OFFSET2) { + current_byte = *input_pos++; + masked_byte |= (current_byte & MASK_UPPER_2BIT) >> SHIFT_6BIT; + *base64_out_pos++ = dhd_base64_get_code(masked_byte); + offset += BASE64_UNIT_LEN; + masked_byte = (current_byte & MASK_LOWER_6BIT); + *base64_out_pos++ = dhd_base64_get_code(masked_byte); + offset = BASE64_OFFSET0; + } + } + if (offset == BASE64_OFFSET1) { + *base64_out_pos++ = dhd_base64_get_code(masked_byte); + *base64_out_pos++ = '='; + *base64_out_pos++ = '='; + } else if (offset == BASE64_OFFSET2) { + *base64_out_pos++ = dhd_base64_get_code(masked_byte); + *base64_out_pos++ = '='; + } + + return base64_out_pos - base64_out; +} diff --git a/bcmdhd.101.10.361.x/dhd_bitpack.h b/bcmdhd.101.10.361.x/dhd_bitpack.h new file mode 100755 index 0000000..74bebf3 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_bitpack.h @@ -0,0 +1,33 @@ +/* + * Bit packing and Base64 utils for EWP + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __BITPACK_H_ +#define __BITPACK_H_ + +#define BYTE_SIZE(a) ((a + 7)/8) + +extern int32 dhd_bit_pack(char *buf, int32 buf_len, int bit_offset, uint32 data, int32 bit_length); +extern int32 dhd_base64_encode(char* in_buf, int32 in_buf_len, char* out_buf, int32 out_buf_len); +#endif /* __BITPACK_H */ diff --git a/bcmdhd.101.10.361.x/dhd_bus.h b/bcmdhd.101.10.361.x/dhd_bus.h new file mode 100755 index 0000000..5618b02 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_bus.h @@ -0,0 +1,424 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dhd_bus_h_ +#define _dhd_bus_h_ + +extern int dbus_up(struct dhd_bus *pub); +extern int dbus_stop(struct dhd_bus *pub); +extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len); +extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len); +/* + * Exported from dhd bus module (dhd_usb, dhd_sdio) + */ + +/* global variable for the bus */ +extern struct dhd_bus *g_dhd_bus; + +/* Indicate (dis)interest in finding dongles. */ +extern int dhd_bus_register(void); +extern void dhd_bus_unregister(void); + +/* Download firmware image and nvram image */ +extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *fw_path, char *nv_path, char *clm_path, char *conf_path); +#if defined(BT_OVER_SDIO) +extern int dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, char *btfw_path); +#endif /* defined (BT_OVER_SDIO) */ + +/* Stop bus module: clear pending frames, disable data flow */ +extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); + +/* Initialize bus module: prepare for communication w/dongle */ +extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex); + +/* Get the Bus Idle Time */ +extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime); + +/* Set the Bus Idle Time */ +extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); + +/* Send a data frame to the dongle. Callee disposes of txp. */ +#ifdef BCMPCIE +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); +#else +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); +#endif + +#ifdef BCMPCIE +extern uint16 dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd); +extern uint16 dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd); +extern uint16 dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd); +extern void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd); +#endif /* BCMPCIE */ + +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen); +extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen); + +/* Watchdog timer function */ +extern bool dhd_bus_watchdog(dhd_pub_t *dhd); + +extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable); +extern int dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp); +extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub); +extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub); +extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub); + +/* Device console input function */ +extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); +#ifdef CONSOLE_DPC +extern int dhd_bus_txcons(dhd_pub_t *dhd, uchar *msg, uint msglen); +#endif + +/* Deferred processing for the bus, return TRUE requests reschedule */ +extern bool dhd_bus_dpc(struct dhd_bus *bus); +extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, uint plen, void *arg, uint len, bool set); + +/* Add bus dump output to a buffer */ +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Clear any bus counters */ +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); + +/* return the dongle chipid */ +extern uint dhd_bus_chip(struct dhd_bus *bus); + +/* return the dongle chiprev */ +extern uint dhd_bus_chiprev(struct dhd_bus *bus); + +/* Set user-specified nvram parameters. */ +extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params); + +extern void *dhd_bus_pub(struct dhd_bus *bus); +extern void *dhd_bus_txq(struct dhd_bus *bus); +extern void *dhd_bus_sih(struct dhd_bus *bus); +extern uint dhd_bus_hdrlen(struct dhd_bus *bus); +#ifdef BCMSDIO +extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val); +/* return sdio io status */ +extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus); +#else +#define dhd_bus_set_dotxinrx(a, b) do {} while (0) +#endif + +#define DHD_SET_BUS_STATE_DOWN(_bus) do { \ + (_bus)->dhd->busstate = DHD_BUS_DOWN; \ +} while (0) + +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +extern int dhd_bus_reg_sdio_notify(void* semaphore); +extern void dhd_bus_unreg_sdio_notify(void); +extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable); +extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, + uint32 *slot_num); + +#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO)) +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +extern int dhd_bus_get_mem_dump(dhd_pub_t *dhdp); +#else +#define dhd_bus_mem_dump(x) +#define dhd_bus_get_mem_dump(x) +#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */ + +#ifdef BCMPCIE +enum { + /* Scratch buffer confiuguration update */ + D2H_DMA_SCRATCH_BUF, + D2H_DMA_SCRATCH_BUF_LEN, + + /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */ + H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */ + D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */ + D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */ + + /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */ + H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */ + H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */ + D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */ + D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */ + + /* DHD Indices array buffers and update for: H2D flow ring WR */ + H2D_IFRM_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_IFRM_INDX_WR_UPD, /* update H2D WR dma indices buf base addr to dongle */ + + /* H2D and D2H Mailbox data update */ + H2D_MB_DATA, + D2H_MB_DATA, + + /* (Common) MsgBuf Ring configuration update */ + RING_BUF_ADDR, /* update ring base address to dongle */ + RING_ITEM_LEN, /* update ring item size to dongle */ + RING_MAX_ITEMS, /* update ring max items to dongle */ + + /* Update of WR or RD index, for a MsgBuf Ring */ + RING_RD_UPD, /* update ring read index from/to dongle */ + RING_WR_UPD, /* update ring write index from/to dongle */ + + TOTAL_LFRAG_PACKET_CNT, + MAX_HOST_RXBUFS, + HOST_API_VERSION, +#ifdef D2H_MINIDUMP + DNGL_TO_HOST_TRAP_ADDR_LEN, +#endif /* D2H_MINIDUMP */ + DNGL_TO_HOST_TRAP_ADDR, + HOST_SCB_ADDR, /* update host scb base address to dongle */ +}; + +typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32); +typedef void (*dhd_mb_ring_2_t) (struct dhd_bus *, uint32, bool); +extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type, + uint16 ringid); +extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value); +extern void dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake); +extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid); +extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus); +extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count); +extern void dhd_bus_start_queue(struct dhd_bus *bus); +extern void dhd_bus_stop_queue(struct dhd_bus *bus); +extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus); +extern dhd_mb_ring_2_t dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus); +extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus, + void * data, uint16 flowid); +extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus, + void * data, uint8 flowid); +extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status); +extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); +extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); + +#ifdef IDLE_TX_FLOW_MGMT +extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef BCMDBG +extern void +dhd_bus_flow_ring_cnt_update(struct dhd_bus *bus, uint16 flowid, uint32 txstatus); +#endif + +#if defined(LINUX) || defined(linux) +extern int dhdpcie_bus_start_host_dev(struct dhd_bus *bus); +extern int dhdpcie_bus_stop_host_dev(struct dhd_bus *bus); +extern int dhdpcie_bus_enable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_disable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus); +extern void dhdpcie_bus_free_resource(struct dhd_bus *bus); +extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus); +extern int dhd_bus_release_dongle(struct dhd_bus *bus); +extern int dhd_bus_request_irq(struct dhd_bus *bus); +extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq); +extern void dhd_bus_aer_config(struct dhd_bus *bus); +#else +static INLINE void dhd_bus_aer_config(struct dhd_bus *bus) { } +#endif /* LINUX || linux */ + +extern struct device * dhd_bus_to_dev(struct dhd_bus *bus); + +extern int dhdpcie_cto_init(struct dhd_bus *bus, bool enable); +extern int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable); + +extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus); + +#ifdef DHD_FW_COREDUMP +extern int dhd_dongle_mem_dump(void); +#endif /* DHD_FW_COREDUMP */ + +#ifdef IDLE_TX_FLOW_MGMT +extern void dhd_bus_idle_tx_ring_suspend(dhd_pub_t *dhd, uint16 flow_ring_id); +#endif /* IDLE_TX_FLOW_MGMT */ +extern void dhd_bus_handle_mb_data(struct dhd_bus *bus, uint32 d2h_mb_data); +#endif /* BCMPCIE */ + +/* dump the device trap informtation */ +extern void dhd_bus_dump_trap_info(struct dhd_bus *bus, struct bcmstrbuf *b); +extern void dhd_bus_copy_trap_sig(struct dhd_bus *bus, trap_t *tr); +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT +extern void dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t *dhd, int *bytes_written); +void copy_hang_info_linkdown(dhd_pub_t *dhd); +void copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr); +void copy_hang_info_trap(dhd_pub_t *dhd); +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + +/* Function to set default min res mask */ +extern bool dhd_bus_set_default_min_res_mask(struct dhd_bus *bus); + +/* Function to reset PMU registers */ +extern void dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp); + +extern void dhd_bus_ucode_download(struct dhd_bus *bus); + +extern int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read); +extern int dhd_get_idletime(dhd_pub_t *dhd); +extern bool dhd_get_rpm_state(dhd_pub_t *dhd); +extern void dhd_set_rpm_state(dhd_pub_t *dhd, bool state); +#ifdef BCMPCIE +extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus); +extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp); +extern bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp); +extern int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type); +extern bool dhd_bus_check_driver_up(void); +extern int dhd_bus_get_cto(dhd_pub_t *dhdp); +extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); +extern int dhd_bus_get_linkdown(dhd_pub_t *dhdp); +#ifdef CONFIG_ARCH_MSM +extern void dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t *dhdp, bool up); +#endif /* CONFIG_ARCH_MSM */ +extern int dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size); +#else +#define dhd_bus_dump_console_buffer(x) +static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); } +static INLINE bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) { return 0; } +static INLINE int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) { return 0; } +static INLINE bool dhd_bus_check_driver_up(void) { return FALSE; } +static INLINE void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) { } +static INLINE int dhd_bus_get_linkdown(dhd_pub_t *dhdp) { return 0; } +static INLINE int dhd_bus_get_cto(dhd_pub_t *dhdp) { return 0; } +extern INLINE int dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size) { return 0; } +#endif /* BCMPCIE */ + +#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) +void dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, uint8 *ext_trap_data, + void *event_decode_data); +#endif + +extern uint16 dhd_get_chipid(struct dhd_bus *bus); + +#ifdef BTLOG +extern void dhd_bus_rx_bt_log(struct dhd_bus *bus, void* pkt); +#endif /* BTLOG */ + +#ifdef DHD_WAKE_STATUS +extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd); +extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd); +#endif /* DHD_WAKE_STATUS */ + +#ifdef BT_OVER_SDIO +/* + * SDIO layer clock control functions exposed to be called from other layers. + * This is required especially in the case where the BUS is shared between + * BT and SDIO and we have to control the clock. The callers of this function + * are expected to hold the sdlock + */ +int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait); +int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait); +void dhdsdio_reset_bt_use_count(struct dhd_bus *bus); +#endif /* BT_OVER_SDIO */ + +int dhd_bus_perform_flr(struct dhd_bus *bus, bool force_fail); +extern bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus); + +extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable); +extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable); + +bool dhd_bus_is_multibp_capable(struct dhd_bus *bus); + +#ifdef BT_OVER_PCIE +int dhd_bus_pwr_off(dhd_pub_t *dhdp, int reason); +int dhd_bus_pwr_on(dhd_pub_t *dhdp, int reason); +int dhd_bus_pwr_toggle(dhd_pub_t *dhdp, int reason); +bool dhdpcie_is_btop_chip(struct dhd_bus *bus); +bool dhdpcie_is_bt_loaded(struct dhd_bus *bus); +int dhdpcie_redownload_fw(dhd_pub_t *dhdp); +extern void dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus); +int dhd_bus_perform_flr_with_quiesce(dhd_pub_t *dhdp, struct dhd_bus *bus, + bool init_deinit_path); +#endif /* BT_OVER_PCIE */ + +#ifdef BCMPCIE +extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp); +extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd); +extern void dhdpcie_induce_cbp_hang(dhd_pub_t *dhd); +#endif /* BCMPCIE */ + +extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus); +extern void dhd_bwm_bt_quiesce(struct dhd_bus *bus); +extern void dhd_bwm_bt_resume(struct dhd_bus *bus); + +#ifdef DHD_SSSR_DUMP +extern int dhd_bus_fis_trigger(dhd_pub_t *dhd); +extern int dhd_bus_fis_dump(dhd_pub_t *dhd); +#endif /* DHD_SSSR_DUMP */ + +#ifdef PCIE_FULL_DONGLE +extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef D2H_MINIDUMP +#ifndef DHD_FW_COREDUMP +/* Minidump depends on DHD_FW_COREDUMP to dump minidup + * This dependency is intentional to avoid multiple work queue + * to dump the SOCRAM, minidum ..etc. + */ +#error "Minidump doesnot work as DHD_FW_COREDUMP is not defined" +#endif /* DHD_FW_COREDUMP */ +#ifdef BCM_BUZZZ +/* + * In pciedev_shared_t buzz_dbg_ptr and device_trap_debug_buffer_len + * are overloaded. So when BCM_BUZZZ is defined MINIDUMP should not be defined or + * vice versa. + */ +#error "Minidump doesnot work as BCM_BUZZZ is defined" +#endif /* BCM_BUZZZ */ +extern bool dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp); +dhd_dma_buf_t* dhd_prot_get_minidump_buf(dhd_pub_t *dhd); +#endif /* D2H_MINIDUMP */ + +#ifdef DHD_CFG80211_SUSPEND_RESUME +extern void dhd_cfg80211_suspend(dhd_pub_t *dhdp); +extern void dhd_cfg80211_resume(dhd_pub_t *dhdp); +#endif /* DHD_CFG80211_SUSPEND_RESUME */ + +#ifdef DHD_SDTC_ETB_DUMP +extern int dhd_bus_get_etb_info(dhd_pub_t *dhd, uint32 etb_info_addr, etb_info_t *etb_info); +extern int dhd_bus_get_sdtc_etb(dhd_pub_t *dhd, uint8 *sdtc_etb_mempool, + uint addr, uint read_bytes); +#endif /* DHD_SDTC_ETB_DUMP */ + +extern int dhd_socram_dump(struct dhd_bus *bus); + +extern int dhdpcie_get_max_eventbufpost(struct dhd_bus *bus); + +#ifdef DHD_FLOW_RING_STATUS_TRACE +extern void dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd); +extern void dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd); +#endif /* DHD_FLOW_RING_STATUS_TRACE */ +#endif /* _dhd_bus_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_buzzz.h b/bcmdhd.101.10.361.x/dhd_buzzz.h new file mode 100755 index 0000000..0e04c75 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_buzzz.h @@ -0,0 +1,224 @@ +#ifndef _DHD_BUZZZ_H_INCLUDED_ +#define _DHD_BUZZZ_H_INCLUDED_ + +*/ + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#if defined(DHD_BUZZZ_LOG_ENABLED) +/* + * Broadcom proprietary logging system. Deleted performance counters. + */ +void dhd_buzzz_attach(void); +void dhd_buzzz_detach(void); +void dhd_buzzz_panic(uint32 crash); +void dhd_buzzz_dump(void); +void dhd_buzzz_log_disable(void); +void dhd_buzzz_crash(void); + +void dhd_buzzz_log0(uint32 evt_id); +void dhd_buzzz_log1(uint32 evt_id, uint32 arg1); +void dhd_buzzz_log2(uint32 evt_id, uint32 arg1, uintptr arg2); + +void dhd_buzzz_fmt_reg(uint32 id, char * fmt); + +extern void* dhd_os_create_buzzz_thread(void); +extern void dhd_os_destroy_buzzz_thread(void *thr_hdl); +extern void dhd_os_sched_buzzz_thread(void *thr_hdl); + +#undef BUZZZ_EVT +#define BUZZZ_EVT(ID) BUZZZ_EVT__## ID, + +#undef BUZZZ_FMT +#define BUZZZ_FMT(ID, format) \ + dhd_buzzz_fmt_reg(BUZZZ_EVT__## ID, "\t" format); + +typedef enum buzzz_evt_id +{ + BUZZZ_EVT__DHD = 100, /* BUZZZ_EVT(DHD) */ + BUZZZ_EVT(GENERAL_LOCK) + BUZZZ_EVT(GENERAL_UNLOCK) + BUZZZ_EVT(FLOWRING_LOCK) + BUZZZ_EVT(FLOWRING_UNLOCK) + BUZZZ_EVT(FLOWID_LOCK) + BUZZZ_EVT(FLOWID_UNLOCK) + + BUZZZ_EVT(START_XMIT_BGN) + BUZZZ_EVT(START_XMIT_END) + BUZZZ_EVT(PROCESS_CTRL_BGN) + BUZZZ_EVT(PROCESS_CTRL_END) + BUZZZ_EVT(UPDATE_TXFLOWRINGS_BGN) + BUZZZ_EVT(UPDATE_TXFLOWRINGS_END) + BUZZZ_EVT(PROCESS_TXCPL_BGN) + BUZZZ_EVT(PROCESS_TXCPL_END) + BUZZZ_EVT(PROCESS_RXCPL_BGN) + BUZZZ_EVT(PROCESS_RXCPL_END) + + BUZZZ_EVT(GET_SRC_ADDR) + BUZZZ_EVT(WRITE_COMPLETE) + BUZZZ_EVT(ALLOC_RING_SPACE) + BUZZZ_EVT(ALLOC_RING_SPACE_RET) + BUZZZ_EVT(ALLOC_RING_SPACE_FAIL) + + BUZZZ_EVT(PKTID_MAP_CLEAR) + BUZZZ_EVT(PKTID_NOT_AVAILABLE) + BUZZZ_EVT(PKTID_MAP_RSV) + BUZZZ_EVT(PKTID_MAP_SAVE) + BUZZZ_EVT(PKTID_MAP_ALLOC) + BUZZZ_EVT(PKTID_MAP_FREE) + BUZZZ_EVT(LOCKER_INUSE_ABORT) + BUZZZ_EVT(BUFFER_TYPE_ABORT1) + BUZZZ_EVT(BUFFER_TYPE_ABORT2) + + BUZZZ_EVT(UPD_READ_IDX) + BUZZZ_EVT(STORE_RXCPLN_RD) + BUZZZ_EVT(EARLY_UPD_RXCPLN_RD) + + BUZZZ_EVT(POST_TXDATA) + BUZZZ_EVT(RETURN_RXBUF) + BUZZZ_EVT(RXBUF_POST) + BUZZZ_EVT(RXBUF_POST_EVENT) + BUZZZ_EVT(RXBUF_POST_IOCTL) + BUZZZ_EVT(RXBUF_POST_CTRL_PKTGET_FAIL) + BUZZZ_EVT(RXBUF_POST_PKTGET_FAIL) + BUZZZ_EVT(RXBUF_POST_PKTID_FAIL) + + BUZZZ_EVT(DHD_DUPLICATE_ALLOC) + BUZZZ_EVT(DHD_DUPLICATE_FREE) + BUZZZ_EVT(DHD_TEST_IS_ALLOC) + BUZZZ_EVT(DHD_TEST_IS_FREE) + + BUZZZ_EVT(DHD_PROT_IOCT_BGN) + BUZZZ_EVT(DHDMSGBUF_CMPLT_BGN) + BUZZZ_EVT(DHDMSGBUF_CMPLT_END) + BUZZZ_EVT(DHD_PROT_IOCT_END) + BUZZZ_EVT(DHD_FILLUP_IOCT_REQST_BGN) + BUZZZ_EVT(DHD_FILLUP_IOCT_REQST_END) + BUZZZ_EVT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_BGN) + BUZZZ_EVT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_END) + BUZZZ_EVT(DHD_PROT_IOCTCMPLT_PROCESS_ONE) + BUZZZ_EVT(DHD_PROT_IOCTCMPLT_PROCESS_TWO) + BUZZZ_EVT(DHD_PROT_EVENT_PROCESS_BGN) + BUZZZ_EVT(DHD_PROT_EVENT_PROCESS_END) + BUZZZ_EVT(DHD_PROT_D2H_SYNC_LIVELOCK) + BUZZZ_EVT(DHD_IOCTL_BUFPOST) + BUZZZ_EVT(DHD_EVENT_BUFPOST) + BUZZZ_EVT(DHD_PROC_MSG_TYPE) + BUZZZ_EVT(DHD_BUS_RXCTL_ONE) + BUZZZ_EVT(DHD_BUS_RXCTL_TWO) +} buzzz_evt_id_t; + +static inline void dhd_buzzz_fmt_init(void) +{ + BUZZZ_FMT(DHD, "DHD events") + BUZZZ_FMT(GENERAL_LOCK, "+++LOCK GENERAL flags<0x%08x>") + BUZZZ_FMT(GENERAL_UNLOCK, "---UNLK GENERAL flags<0x%08x>") + BUZZZ_FMT(FLOWRING_LOCK, "+++LOCK FLOWRING flags<0x%08x>") + BUZZZ_FMT(FLOWRING_UNLOCK, "---UNLK FLOWRING flags<0x%08x>") + BUZZZ_FMT(FLOWID_LOCK, "+++LOCK FLOWID flags<0x%08x>") + BUZZZ_FMT(FLOWID_UNLOCK, "---UNLK FLOWID flags<0x%08x>") + + BUZZZ_FMT(START_XMIT_BGN, "{ dhd_start_xmit() ifidx<%u> skb<0x%p>") + BUZZZ_FMT(START_XMIT_END, "} dhd_start_xmit()") + BUZZZ_FMT(PROCESS_CTRL_BGN, "{ dhd_prot_process_ctrlbuf()") + BUZZZ_FMT(PROCESS_CTRL_END, "} dhd_prot_process_ctrlbuf()") + BUZZZ_FMT(UPDATE_TXFLOWRINGS_BGN, "{ dhd_update_txflowrings()"); + BUZZZ_FMT(UPDATE_TXFLOWRINGS_END, "} dhd_update_txflowrings()"); + BUZZZ_FMT(PROCESS_TXCPL_BGN, "{ dhd_prot_process_msgbuf_txcpl()") + BUZZZ_FMT(PROCESS_TXCPL_END, "} dhd_prot_process_msgbuf_txcpl()") + BUZZZ_FMT(PROCESS_RXCPL_BGN, "{ dhd_prot_process_msgbuf_rxcpl()") + BUZZZ_FMT(PROCESS_RXCPL_END, "} dhd_prot_process_msgbuf_rxcpl()") + + BUZZZ_FMT(GET_SRC_ADDR, "bytes<%u> @<0x%p> prot_get_src_addr()") + BUZZZ_FMT(WRITE_COMPLETE, "WR<%u> prot_ring_write_complete") + BUZZZ_FMT(ALLOC_RING_SPACE, "{ dhd_alloc_ring_space nitems<%d>") + BUZZZ_FMT(ALLOC_RING_SPACE_RET, "} dhd_alloc_ring_space() alloc<%d> @<0x%p>") + BUZZZ_FMT(ALLOC_RING_SPACE_FAIL, "FAILURE } dhd_alloc_ring_space() alloc<%d>") + + BUZZZ_FMT(PKTID_MAP_CLEAR, "pktid map clear") + BUZZZ_FMT(PKTID_NOT_AVAILABLE, "FAILURE pktid pool depletion failures<%u>") + BUZZZ_FMT(PKTID_MAP_RSV, "pktid<%u> pkt<0x%p> dhd_pktid_map_reserve()") + BUZZZ_FMT(PKTID_MAP_SAVE, "pktid<%u> pkt<0x%p> dhd_pktid_map_save()") + BUZZZ_FMT(PKTID_MAP_ALLOC, "pktid<%u> pkt<0x%p> dhd_pktid_map_alloc()") + BUZZZ_FMT(PKTID_MAP_FREE, "pktid<%u> pkt<0x%p> dhd_pktid_map_free()") + BUZZZ_FMT(LOCKER_INUSE_ABORT, "ASSERT pktid<%u> pkt<0x%p> locker->inuse") + BUZZZ_FMT(BUFFER_TYPE_ABORT1, "ASSERT pktid<%u> pkt<0x%p> locker->dma") + BUZZZ_FMT(BUFFER_TYPE_ABORT2, "ASSERT locker->dma<%u> buf_type<%u>") + + BUZZZ_FMT(UPD_READ_IDX, "RD<%u> prot_upd_read_idx()") + BUZZZ_FMT(STORE_RXCPLN_RD, "RD<%u> prot_store_rxcpln_read_idx()") + BUZZZ_FMT(EARLY_UPD_RXCPLN_RD, "RD<%u> prot_early_upd_rxcpln_read_idx()") + + BUZZZ_FMT(POST_TXDATA, "flr<%u> pkt<0x%p> dhd_prot_txdata()") + BUZZZ_FMT(RETURN_RXBUF, "cnt<%u> dhd_prot_return_rxbuf()"); + BUZZZ_FMT(RXBUF_POST, "cnt<%u> dhd_prot_rxbufpost()"); + BUZZZ_FMT(RXBUF_POST_EVENT, "event dhd_prot_rxbufpost_ctrl()"); + BUZZZ_FMT(RXBUF_POST_IOCTL, "ioctl dhd_prot_rxbufpost_ctrl()"); + BUZZZ_FMT(RXBUF_POST_CTRL_PKTGET_FAIL, "FAILURE pktget dhd_prot_rxbufpost_ctrl()"); + BUZZZ_FMT(RXBUF_POST_PKTGET_FAIL, "FAILURE pktget loop<%u> dhd_prot_rxbufpost()") + BUZZZ_FMT(RXBUF_POST_PKTID_FAIL, "FAILURE pktid loop<%u> dhd_prot_rxbufpost()") + + BUZZZ_FMT(DHD_DUPLICATE_ALLOC, "ASSERT dhd_pktid_audit(%u) DHD_DUPLICATE_ALLOC") + BUZZZ_FMT(DHD_DUPLICATE_FREE, "ASSERT dhd_pktid_audit(%u) DHD_DUPLICATE_FREE") + BUZZZ_FMT(DHD_TEST_IS_ALLOC, "ASSERT dhd_pktid_audit(%u) DHD_TEST_IS_ALLOC") + BUZZZ_FMT(DHD_TEST_IS_FREE, "ASSERT dhd_pktid_audit(%u) DHD_TEST_IS_FREE") + + BUZZZ_FMT(DHD_PROT_IOCT_BGN, "{ dhd_prot_ioct pending<%u> thread<0x%p>") + BUZZZ_FMT(DHDMSGBUF_CMPLT_BGN, "{ dhdmsgbuf_cmplt bus::retlen<%u> bus::pktid<%u>") + BUZZZ_FMT(DHDMSGBUF_CMPLT_END, "} dhdmsgbuf_cmplt resp_len<%d> pktid<%u>") + BUZZZ_FMT(DHD_PROT_IOCT_END, "} dhd_prot_ioct pending<%u> thread<0x%p>") + BUZZZ_FMT(DHD_FILLUP_IOCT_REQST_BGN, "{ dhd_fillup_ioct_reqst_ptrbased cmd<%u> transid<%u>") + BUZZZ_FMT(DHD_FILLUP_IOCT_REQST_END, + "} dhd_fillup_ioct_reqst_ptrbased transid<%u> bus::pktid<%u>") + BUZZZ_FMT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_BGN, + "{ dhd_msgbuf_rxbuf_post_ioctlresp_bufs cur_posted<%u> bus::pktid<%u>") + BUZZZ_FMT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_END, + "} dhd_msgbuf_rxbuf_post_ioctlresp_bufs cur_posted<%u> bus::pktid<%u>") + BUZZZ_FMT(DHD_PROT_IOCTCMPLT_PROCESS_ONE, + "{ dhd_prot_ioctlcmplt_process cmd<%d> transid<%d>") + BUZZZ_FMT(DHD_PROT_IOCTCMPLT_PROCESS_TWO, + "} dhd_prot_ioctlcmplt_process resplen<%u> pktid<%u>") + BUZZZ_FMT(DHD_PROT_EVENT_PROCESS_BGN, "{ dhd_prot_event_process pktid<%u>") + BUZZZ_FMT(DHD_PROT_EVENT_PROCESS_END, "} dhd_prot_event_process buflen<%u> pkt<0x%p>") + BUZZZ_FMT(DHD_PROT_D2H_SYNC_LIVELOCK, " dhd_prot_d2h_sync_livelock seqnum<%u>") + BUZZZ_FMT(DHD_IOCTL_BUFPOST, " dhd_prot_rxbufpost_ctrl ioctl pktid<%u> phyaddr<0x%x>") + BUZZZ_FMT(DHD_EVENT_BUFPOST, " dhd_prot_rxbufpost_ctrl event pktid<%u> phyaddr<0x%x>") + BUZZZ_FMT(DHD_PROC_MSG_TYPE, " dhd_process_msgtype msg<0x%x> epoch<%u>") + BUZZZ_FMT(DHD_BUS_RXCTL_ONE, "dhd_bus_rxctl prev resplen<%u> pktid<%u>") + BUZZZ_FMT(DHD_BUS_RXCTL_TWO, "dhd_bus_rxctl cur resplen<%u> pktid<%u>") +} + +#define BUZZZ_LOG(ID, N, ARG...) dhd_buzzz_log ##N(BUZZZ_EVT__ ##ID, ##ARG) + +#else /* DHD_BUZZZ_LOG_ENABLED */ +/* + * Broadcom logging system - Empty implementaiton + */ + +#define dhd_buzzz_attach() do { /* noop */ } while (0) +#define dhd_buzzz_detach() do { /* noop */ } while (0) +#define dhd_buzzz_panic(x) do { /* noop */ } while (0) +#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0) + +#endif /* DHD_BUZZZ_LOG_ENABLED */ + +#endif /* _DHD_BUZZZ_H_INCLUDED_ */ diff --git a/bcmdhd.101.10.361.x/dhd_ccode.c b/bcmdhd.101.10.361.x/dhd_ccode.c new file mode 100755 index 0000000..62b9eec --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_ccode.c @@ -0,0 +1,274 @@ + +#ifdef CCODE_LIST +#ifdef CONFIG_COMPAT +#include +#endif /* COMFIG_COMPAT */ +#include +#include + +#ifdef BCMSDIO +#define CCODE_43438 +#define CCODE_43436 +#define CCODE_43455C0 +#endif +#if defined(BCMSDIO) || defined(BCMPCIE) +#define CCODE_4356A2 +#define CCODE_4359C0 +#endif +#if defined(BCMPCIE) +#define CCODE_4375B4 +#endif +#ifdef BCMDBUS +#define CCODE_4358U +#endif + +#ifdef BCMSDIO +#ifdef CCODE_43438 +const char ccode_43438[] = "RU/13"; +#else +const char ccode_43438[] = ""; +#endif + +#ifdef CCODE_43436 +const char ccode_43436[] = \ +"AE/1 AR/1 AT/1 AU/2 "\ +"BE/1 BG/1 BN/1 "\ +"CA/2 CH/1 CN/38 CY/1 CZ/1 "\ +"DE/3 DK/1 "\ +"EE/1 ES/1 "\ +"FI/1 FR/1 "\ +"GB/1 GR/1 "\ +"HR/1 HU/1 "\ +"ID/5 IE/1 IS/1 IT/1 "\ +"JP/3 "\ +"KR/4 KW/1 "\ +"LI/1 LT/1 LU/1 LV/1 "\ +"MA/1 MT/1 MX/1 "\ +"NL/1 NO/1 "\ +"PL/1 PT/1 PY/1 "\ +"RO/1 RU/5 "\ +"SE/1 SI/1 SK/1 "\ +"TR/7 TW/2 "\ +"US/26 "\ +"XZ/11"; +#else +const char ccode_43436[] = ""; +#endif + +#ifdef CCODE_43455C0 +const char ccode_43455c0[] = \ +"AE/6 AG/2 AI/1 AL/2 AS/12 AT/4 AU/6 AW/2 AZ/2 "\ +"BA/2 BD/1 BE/4 BG/4 BH/4 BM/12 BN/4 BR/2 BS/2 BY/3 "\ +"CA/2 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\ +"DE/7 DK/4 "\ +"EC/21 EE/4 EG/13 ES/4 ET/2 "\ +"FI/4 FR/5 "\ +"GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/30 "\ +"HK/2 HR/4 HU/4 "\ +"ID/1 IE/5 IL/14 IN/3 IS/4 IT/4 "\ +"JO/3 JP/45 "\ +"KH/2 KR/96 KW/5 KY/3 "\ +"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\ +"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MQ/2 MR/2 MT/4 MU/2 MV/3 MW/1 MX/44 MY/3 "\ +"NI/2 NL/4 NO/4 NZ/4 "\ +"OM/4 "\ +"PA/17 PE/20 PH/5 PL/4 PR/38 PT/4 PY/2 "\ +"Q2/993 "\ +"RE/2 RO/4 RS/2 RU/13 "\ +"SE/4 SI/4 SK/4 SV/25 "\ +"TH/5 TN/1 TR/7 TT/3 TW/65 "\ +"UA/8 US/988 "\ +"VA/2 VE/3 VG/2 VN/4 "\ +"XZ/11 "\ +"YT/2 "\ +"ZA/6"; +#else +const char ccode_43455c0[] = ""; +#endif +#endif + +#ifdef CCODE_4356A2 +const char ccode_4356a2[] = \ +"AE/6 AG/2 AI/1 AL/2 AN/2 AR/21 AS/12 AT/4 AU/6 AW/2 AZ/2 "\ +"BA/2 BD/2 BE/4 BG/4 BH/4 BM/12 BN/4 BR/4 BS/2 BY/3 "\ +"CA/31 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\ +"DE/7 DK/4 DZ/1 "\ +"EC/21 EE/4 ES/4 ET/2 "\ +"FI/4 FR/5 "\ +"GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/12 "\ +"HK/2 HR/4 HU/4 "\ +"ID/13 IE/5 IL/7 IN/28 IS/4 IT/4 "\ +"JO/3 JP/45 "\ +"KH/2 KR/57 KW/5 KY/3 "\ +"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\ +"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MO/2 MR/2 MT/4 MQ/2 MU/2 MV/3 MW/1 MX/20 MY/16 "\ +"NI/2 NL/4 NO/4 NP/3 NZ/4 "\ +"OM/4 "\ +"PA/17 PE/20 PG/2 PH/5 PL/4 PR/20 PT/4 PY/2 "\ +"RE/2 RO/4 RS/2 RU/986 "\ +"SE/4 SG/19 SI/4 SK/4 SN/2 SV/19 "\ +"TH/9 TN/1 TR/7 TT/3 TW/1 "\ +"UA/8 UG/2 US/1 UY/1 "\ +"VA/2 VE/3 VG/2 VI/13 VN/4 "\ +"XZ/11 "\ +"YT/2 "\ +"ZM/2 "\ +"E0/32"; +#else +const char ccode_4356a2[] = ""; +#endif + +#ifdef CCODE_4359C0 +const char ccode_4359c0[] = \ +"AD/1 AE/6 AG/2 AI/1 AL/3 AS/12 AT/21 AU/6 AW/2 AZ/8 "\ +"BA/4 BD/1 BE/19 BG/18 BH/4 BM/12 BN/4 BR/2 BS/2 BY/3 "\ +"CA/2 CN/38 CO/17 CR/17 CY/18 CZ/18 "\ +"DE/30 DK/19 "\ +"E0/32 EC/21 EE/18 EG/13 ES/21 ET/2 "\ +"FI/19 FR/21 "\ +"GB/996 GD/2 GE/1 GF/2 GP/2 GR/18 GT/1 GU/30 "\ +"HK/2 HR/18 HU/18 "\ +"ID/1 IE/21 IL/276 IN/3 IS/17 IT/20 "\ +"JO/3 JP/967 "\ +"KH/2 KR/70 KW/5 KY/3 "\ +"LA/2 LB/5 LI/17 LK/1 LS/2 LT/18 LU/18 LV/18 "\ +"MA/2 MC/2 MD/3 ME/5 MK/4 MN/1 MQ/2 MR/2 MT/18 MU/2 MV/3 MW/1 MX/44 MY/3 "\ +"NI/2 NL/19 NO/18 NZ/4 "\ +"OM/4 "\ +"PA/17 PE/20 PH/5 PL/18 PR/38 PT/20 PY/2 "\ +"Q1/947 Q2/993 "\ +"RE/2 RO/18 RS/4 RU/986 "\ +"SE/19 SI/18 SK/18 SM/1 SV/25 "\ +"TH/5 TN/1 TR/18 TT/3 TW/980 "\ +"UA/16 US/988 "\ +"VA/3 VE/3 VG/2 VN/4 "\ +"XZ/11 "\ +"YT/2 "\ +"ZA/6"; +#else +const char ccode_4359c0[] = ""; +#endif + +#ifdef CCODE_4375B4 +const char ccode_4375b4[] = \ +"AE/6 AL/2 AM/1 AN/5 AR/21 AT/4 AU/6 AZ/2 "\ +"BA/2 BE/4 BG/4 BH/4 BN/4 BO/5 BR/17 BY/3 "\ +"CA/2 CH/4 CL/7 CN/38 CO/17 CR/17 CY/4 CZ/4 "\ +"DE/7 DK/4 DZ/2 EC/18 EE/4 EG/13 ES/4 "\ +"FI/4 FR/5 "\ +"GB/6 GR/4 "\ +"HK/999 HN/8 HR/4 HU/4 "\ +"ID/5 IE/5 IL/7 IN/3 IS/4 IT/4 "\ +"JO/3 JP/72 "\ +"KE/1 KR/96 KW/5 KZ/5 "\ +"LA/2 LB/5 LI/4 LK/2 LT/4 LU/4 LV/4 "\ +"MA/7 MC/1 ME/2 MK/2 MO/4 MT/4 MX/20 MY/19 "\ +"NL/4 NO/4 NZ/4 "\ +"OM/4 "\ +"PA/17 PE/20 PH/5 PK/2 PL/4 PR/20 PT/4 "\ +"RO/4 RU/62 "\ +"SA/5 SE/4 SG/12 SI/4 SK/4 SV/17 "\ +"TH/5 TN/1 TR/7 TT/3 TW/65 "\ +"UA/16 US/140 UY/10 "\ +"VE/3 VN/4 "\ +"XZ/11 "\ +"ZA/19"; +#else +const char ccode_4375b4[] = ""; +#endif + +#ifdef CCODE_4358U +const char ccode_4358u[] = \ +"BE/4 BR/4 "\ +"CA/2 CH/4 CN/38 CY/4 "\ +"DE/7 DK/4 "\ +"ES/4 "\ +"FI/4 FR/5 "\ +"GB/6 GR/4 "\ +"HK/2 HU/4 "\ +"IE/5 IL/7 IS/4 IT/4 "\ +"JP/72 "\ +"KE/0 KR/4 "\ +"MY/3 "\ +"NL/4 "\ +"PT/4 "\ +"SA/5 SE/4 SG/0 SZ/0 "\ +"TH/5 TR/7 TW/230 "\ +"US/0 "\ +"VN/4"; +#else +const char ccode_4358u[] = ""; +#endif + +typedef struct ccode_list_map_t { + uint chip; + uint chiprev; + const char *ccode_list; + const char *ccode_ww; +} ccode_list_map_t; + +extern const char ccode_43438[]; +extern const char ccode_43455c0[]; +extern const char ccode_4356a2[]; +extern const char ccode_4359c0[]; +extern const char ccode_4358u[]; + +const ccode_list_map_t ccode_list_map[] = { + /* ChipID Chiprev ccode */ +#ifdef BCMSDIO + {BCM43430_CHIP_ID, 0, ccode_43438, ""}, + {BCM43430_CHIP_ID, 1, ccode_43438, ""}, + {BCM43430_CHIP_ID, 2, ccode_43436, ""}, + {BCM4345_CHIP_ID, 6, ccode_43455c0, "XZ/11"}, + {BCM43454_CHIP_ID, 6, ccode_43455c0, "XZ/11"}, + {BCM4345_CHIP_ID, 9, ccode_43455c0, "XZ/11"}, + {BCM43454_CHIP_ID, 9, ccode_43455c0, "XZ/11"}, + {BCM4354_CHIP_ID, 2, ccode_4356a2, "XZ/11"}, + {BCM4356_CHIP_ID, 2, ccode_4356a2, "XZ/11"}, + {BCM4371_CHIP_ID, 2, ccode_4356a2, "XZ/11"}, + {BCM4359_CHIP_ID, 9, ccode_4359c0, "XZ/11"}, +#endif +#ifdef BCMPCIE + {BCM4354_CHIP_ID, 2, ccode_4356a2, "XZ/11"}, + {BCM4356_CHIP_ID, 2, ccode_4356a2, "XZ/11"}, + {BCM4359_CHIP_ID, 9, ccode_4359c0, "XZ/11"}, + {BCM4375_CHIP_ID, 5, ccode_4375b4, "XZ/11"}, +#endif +#ifdef BCMDBUS + {BCM43569_CHIP_ID, 2, ccode_4358u, "XW/0"}, +#endif +}; + +int +dhd_ccode_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1, i; + uint chip = dhd->conf->chip, chiprev = dhd->conf->chiprev; + const char *ccode_list = NULL, *ccode_ww = NULL; + char *pch; + + for (i=0; ichip == chip && row->chiprev == chiprev) { + ccode_list = row->ccode_list; + ccode_ww = row->ccode_ww; + break; + } + } + + if (ccode_list) { + pch = strstr(ccode_list, cspec->ccode); + if (pch) { + cspec->rev = (int)simple_strtol(pch+strlen(cspec->ccode)+1, NULL, 0); + bcmerror = 0; + } + } + + if (bcmerror && ccode_ww && strlen(ccode_ww)>=4) { + memcpy(cspec->ccode, ccode_ww, 2); + cspec->rev = (int)simple_strtol(ccode_ww+3, NULL, 0); + } + + return bcmerror; +} +#endif diff --git a/bcmdhd.101.10.361.x/dhd_cdc.c b/bcmdhd.101.10.361.x/dhd_cdc.c new file mode 100755 index 0000000..0152c18 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_cdc.c @@ -0,0 +1,1035 @@ +/* + * DHD Protocol Module for CDC and BDC. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + * BDC is like CDC, except it includes a header for data packets to convey + * packet priority over the bus, and flags (e.g. to indicate checksum status + * for dongle offload.) + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef EXT_STA +#include +#include +#include +#endif /* EXT_STA */ + +#ifdef PROP_TXSTATUS +#include +#include +#endif +#ifdef BCMDBUS +#include +#endif /* BCMDBUS */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ +#define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE + * defined in dhd_sdio.c (amount of header tha might be added) + * plus any space that might be needed for alignment padding. + */ +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for + * round off at the end of buffer + */ + +/* This value is from Legacy chipsets */ +#define DEFAULT_WLC_API_VERSION_MAJOR 3 +#define DEFAULT_WLC_API_VERSION_MINOR 0 + +typedef struct dhd_prot { + uint16 reqid; + uint8 pending; + uint32 lastcmd; +#ifdef BCMDBUS + uint ctl_completed; +#endif /* BCMDBUS */ + uint8 bus_header[BUS_HEADER_LEN]; + cdc_ioctl_t msg; + unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN]; +} dhd_prot_t; + +uint16 +dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp) +{ + /* SDIO does not have ioctl_trans_id yet, so return -1 */ + return -1; +} + +static int +dhdcdc_msg(dhd_pub_t *dhd) +{ +#ifdef BCMDBUS + int timeout = 0; +#endif /* BCMDBUS */ + int err = 0; + dhd_prot_t *prot = dhd->prot; + int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_OS_WAKE_LOCK(dhd); + + /* NOTE : cdc->msg.len holds the desired length of the buffer to be + * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area + * is actually sent to the dongle + */ + if (len > CDC_MAX_MSG_SIZE) + len = CDC_MAX_MSG_SIZE; + + /* Send request */ +#ifdef BCMDBUS + prot->ctl_completed = FALSE; + err = dbus_send_ctl(dhd->bus, (void *)&prot->msg, len); + if (err) { + DHD_ERROR(("dbus_send_ctl error=0x%x\n", err)); + DHD_OS_WAKE_UNLOCK(dhd); + return err; + } +#else + err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len); +#endif /* BCMDBUS */ + +#ifdef BCMDBUS + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed); + if ((!timeout) || (!prot->ctl_completed)) { + DHD_ERROR(("Txctl timeout %d ctl_completed %d\n", + timeout, prot->ctl_completed)); + DHD_ERROR(("Txctl wait timed out\n")); + err = -1; + } +#endif /* BCMDBUS */ +#if defined(BCMDBUS) && defined(INTR_EP_ENABLE) + /* If the ctl write is successfully completed, wait for an acknowledgement + * that indicates that it is now ok to do ctl read from the dongle + */ + if (err != -1) { + prot->ctl_completed = FALSE; + if (dbus_poll_intr(dhd->dbus)) { + DHD_ERROR(("dbus_poll_intr not submitted\n")); + } else { + /* interrupt polling is sucessfully submitted. Wait for dongle to send + * interrupt + */ + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed); + if (!timeout) { + DHD_ERROR(("intr poll wait timed out\n")); + } + } + } +#endif /* defined(BCMDBUS) && defined(INTR_EP_ENABLE) */ + DHD_OS_WAKE_UNLOCK(dhd); + return err; +} + +static int +dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) +{ +#ifdef BCMDBUS + int timeout = 0; +#endif /* BCMDBUS */ + int ret; + int cdc_len = len + sizeof(cdc_ioctl_t); + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + do { +#ifdef BCMDBUS + prot->ctl_completed = FALSE; + ret = dbus_recv_ctl(dhd->bus, (uchar*)&prot->msg, cdc_len); + if (ret) { + DHD_ERROR(("dbus_recv_ctl error=0x%x(%d)\n", ret, ret)); + goto done; + } + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed); + if ((!timeout) || (!prot->ctl_completed)) { + DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n", + timeout, prot->ctl_completed)); + ret = -ETIMEDOUT; + goto done; + } + + /* XXX FIX: Must return cdc_len, not len, because after query_ioctl() + * it subtracts sizeof(cdc_ioctl_t); The other approach is + * to have dbus_recv_ctl() return actual len. + */ + ret = cdc_len; +#else + ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len); +#endif /* BCMDBUS */ + if (ret < 0) + break; + } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); + + /* update ret to len on success */ + if (ret == cdc_len) { + ret = len; + } + +#ifdef BCMDBUS +done: +#endif /* BCMDBUS */ + return ret; +} + +/* XXX: due to overlays this should not be called directly; call dhd_wl_ioctl_cmd() instead */ +static int +dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0, retries = 0; + uint32 id, flags = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), len); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + +#ifdef BCMSPI + /* 11bit gSPI bus allows 2048bytes of max-data. We restrict 'len' + * value which is 8Kbytes for various 'get' commands to 2000. 48 bytes are + * left for sw headers and misc. + */ + if (len > 2000) { + DHD_ERROR(("dhdcdc_query_ioctl: len is truncated to 2000 bytes\n")); + len = 2000; + } +#endif /* BCMSPI */ + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if ((id < prot->reqid) && (++retries < RETRIES)) + goto retry; + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Copy info buffer */ + if (buf) + { + if (ret < (int)len) + len = ret; + memcpy(buf, (void*) prot->buf, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +/* XXX: due to overlays this should not be called directly; call dhd_wl_ioctl_cmd() instead */ +static int +dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0; + uint32 flags, id; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + if (cmd == WLC_SET_PM) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } +#endif /* DHD_PM_CONTROL_FROM_FILE */ +#ifdef DHD_PM_OVERRIDE + { + extern bool g_pm_override; + if (g_pm_override == TRUE) { + DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } + } +#endif /* DHD_PM_OVERRIDE */ +#if defined(WLAIBSS) + if (dhd->op_mode == DHD_FLAG_IBSS_MODE) { + DHD_ERROR(("%s: SET PM ignored for IBSS!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } +#endif /* WLAIBSS */ + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET; + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret)); + goto done; + } + + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Copy fw response to buf */ + if (buf) { + ASSERT(ret == len); + memcpy(buf, (void*) prot->buf, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +#ifdef BCMDBUS +int +dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + dhd_prot_t *prot; + + if (dhd == NULL) + return BCME_ERROR; + + prot = dhd->prot; + + ASSERT(prot); + prot->ctl_completed = TRUE; + dhd_os_ioctl_resp_wake(dhd); + return 0; +} +#endif /* BCMDBUS */ + +/* XXX: due to overlays this should not be called directly; call dhd_wl_ioctl() instead */ +int +dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + dhd_prot_t *prot = dhd->prot; + int ret = -1; + uint8 action; + static int error_cnt = 0; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n", + __FUNCTION__, dhd->busstate, dhd->hang_was_sent)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + if (prot->pending == TRUE) { + DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", + ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, + (unsigned long)prot->lastcmd)); + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + DHD_TRACE(("iovar cmd=%s\n", buf ? (char*)buf : "\0")); + } + goto done; + } + + prot->pending = TRUE; + prot->lastcmd = ioc->cmd; + action = ioc->set; + if (action & WL_IOCTL_ACTION_SET) + ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + else { + ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret - sizeof(cdc_ioctl_t); + } + // terence 20130805: send hang event to wpa_supplicant + if (ret == -EIO) { + error_cnt++; + if (error_cnt > 2) + ret = -ETIMEDOUT; + } else + error_cnt = 0; + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) + ret = 0; + else { + cdc_ioctl_t *msg = &prot->msg; + ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */ + } + + /* Intercept the wme_dp ioctl here */ + if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + prot->pending = FALSE; + +done: + + return ret; +} + +int +dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +void +dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + if (!dhdp || !dhdp->prot) { + return; + } + + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); +#ifdef PROP_TXSTATUS + dhd_wlfc_dump(dhdp, strbuf); +#endif +} + +/* The FreeBSD PKTPUSH could change the packet buf pinter + so we need to make it changable +*/ +#define PKTBUF pktbuf +void +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ +#ifdef BDC + struct bdc_header *h; +#endif /* BDC */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Push BDC header used to convey priority for buses that don't */ + + PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN); + + h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF); + + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(PKTBUF)) + h->flags |= BDC_FLAG_SUM_NEEDED; + +#ifdef EXT_STA + /* save pkt encryption exemption info for dongle */ + h->flags &= ~BDC_FLAG_EXEMPT; + h->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(pktbuf)) & BDC_FLAG_EXEMPT); +#endif /* EXT_STA */ + + h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = 0; +#endif /* BDC */ + BDC_SET_IF_IDX(h, ifidx); +} +#undef PKTBUF /* Only defined in the above routine */ + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + uint hdrlen = 0; +#ifdef BDC + /* Length of BDC(+WLFC) headers pushed */ + hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4); +#endif + return hdrlen; +} + +int +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info, + uint *reorder_info_len) +{ +#ifdef BDC + struct bdc_header *h; +#endif + uint8 data_offset = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + if (reorder_info_len) + *reorder_info_len = 0; + /* Pop BDC header used to convey priority for buses that don't */ + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + if (!ifidx) { + /* for tx packet, skip the analysis */ + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); + goto exit; + } + + *ifidx = BDC_GET_IF_IDX(h); + + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { + DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1) + h->dataOffset = 0; + else + return BCME_ERROR; + } + + if (h->flags & BDC_FLAG_SUM_GOOD) { + DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + PKTSETSUMGOOD(pktbuf, TRUE); + } + + PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK)); + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); +#endif /* BDC */ + +#ifdef PROP_TXSTATUS + if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) { + /* + - parse txstatus only for packets that came from the firmware + */ + dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2), + reorder_buf_info, reorder_info_len); + +#ifdef BCMDBUS +#ifndef DHD_WLFC_THREAD + dhd_wlfc_commit_packets(dhd, + (f_commitpkt_t)dhd_bus_txdata, dhd->bus, NULL, FALSE); +#endif /* DHD_WLFC_THREAD */ +#endif /* BCMDBUS */ + } +#endif /* PROP_TXSTATUS */ + +exit: + PKTPULL(dhd->osh, pktbuf, (data_offset << 2)); + return 0; +} + +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + dhd_prot_t *cdc; + + if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(cdc, 0, sizeof(dhd_prot_t)); + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) { + DHD_ERROR(("dhd_prot_t is not correctly defined\n")); + goto fail; + } + + dhd->prot = cdc; +#ifdef BDC + dhd->hdrlen += BDC_HEADER_LEN; +#endif + dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN; + return 0; + +fail: + if (cdc != NULL) + DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t)); + return BCME_NOMEM; +} + +/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ +#ifdef PROP_TXSTATUS + dhd_wlfc_deinit(dhd); +#endif + DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); + dhd->prot = NULL; +} + +void +dhd_prot_dstats(dhd_pub_t *dhd) +{ + /* copy bus stats */ + + dhd->dstats.tx_packets = dhd->tx_packets; + dhd->dstats.tx_errors = dhd->tx_errors; + dhd->dstats.rx_packets = dhd->rx_packets; + dhd->dstats.rx_errors = dhd->rx_errors; + dhd->dstats.rx_dropped = dhd->rx_dropped; + dhd->dstats.multicast = dhd->rx_multicast; + return; +} + +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + char buf[128]; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifndef OEM_ANDROID + /* Get the device MAC address */ + strcpy(buf, "cur_etheraddr"); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0); + if (ret < 0) + goto done; + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); +#endif /* OEM_ANDROID */ +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ + +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) + goto done; +#if defined(BCMDBUS) + if (dhd_download_fw_on_driverload) { + dhd_conf_reset(dhd); + dhd_conf_set_chiprev(dhd, revinfo.chipnum, revinfo.chiprev); + dhd_conf_preinit(dhd); + dhd_conf_read_config(dhd, dhd->conf_path); + } +#endif /* BCMDBUS */ + + /* query for 'wlc_ver' to get version info from firmware */ + /* memsetting to zero */ + bzero(buf, sizeof(buf)); + ret = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf)); + if (ret == 0) { + ret = BCME_BUFTOOSHORT; + goto done; + } + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0); + if (ret == BCME_UNSUPPORTED) { + dhd->wlc_ver_major = DEFAULT_WLC_API_VERSION_MAJOR; + dhd->wlc_ver_minor = DEFAULT_WLC_API_VERSION_MINOR; + } else if (ret < 0) { + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + goto done; + } else { + dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major; + dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor; + } + DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor)); + +#if defined(BCMDBUS) && defined(BCMDHDUSB) + /* dbus_set_revinfo(dhd->dbus, revinfo.chipnum, revinfo.chiprev); */ +#endif /* BCMDBUS && BCMDHDUSB */ + + DHD_SSSR_DUMP_INIT(dhd); + + dhd_process_cid_mac(dhd, TRUE); + ret = dhd_preinit_ioctls(dhd); + dhd_process_cid_mac(dhd, FALSE); + + /* Always assumes wl for now */ + dhd->iswl = TRUE; + + /* XXX Could use WLC_GET_REVINFO to get driver version? */ +done: + return ret; +} + +int dhd_prot_init(dhd_pub_t *dhd) +{ + return BCME_OK; +} + +void +dhd_prot_stop(dhd_pub_t *dhd) +{ +/* Nothing to do for CDC */ +} + +static void +dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt, + uint32 *pkt_count, void **pplast, uint8 start, uint8 end) +{ + void *plast = NULL, *p; + uint32 pkt_cnt = 0; + + if (ptr->pend_pkts == 0) { + DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__)); + *pplast = NULL; + *pkt_count = 0; + *pkt = NULL; + return; + } + do { + p = (void *)(ptr->p[start]); + ptr->p[start] = NULL; + + if (p != NULL) { + if (plast == NULL) + *pkt = p; + else + PKTSETNEXT(osh, plast, p); + + plast = p; + pkt_cnt++; + } + start++; + if (start > ptr->max_idx) + start = 0; + } while (start != end); + *pplast = plast; + *pkt_count = pkt_cnt; + ptr->pend_pkts -= (uint8)pkt_cnt; +} + +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count) +{ + uint8 flow_id, max_idx, cur_idx, exp_idx; + struct reorder_info *ptr; + uint8 flags; + void *cur_pkt, *plast = NULL; + uint32 cnt = 0; + + if (pkt == NULL) { + if (pkt_count != NULL) + *pkt_count = 0; + return 0; + } + + flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET]; + flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET]; + + DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags, + reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET])); + + /* validate flags and flow id */ + if (flags == 0xFF) { + DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + + cur_pkt = *pkt; + *pkt = NULL; + + ptr = dhd->reorder_bufs[flow_id]; + if (flags & WLHOST_REORDERDATA_DEL_FLOW) { + uint32 buf_size = sizeof(struct reorder_info); + + DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n", + __FUNCTION__, flow_id)); + + if (ptr == NULL) { + DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n", + __FUNCTION__, flow_id)); + *pkt_count = 1; + *pkt = cur_pkt; + return 0; + } + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + /* set it to the last packet */ + if (plast) { + PKTSETNEXT(dhd->osh, plast, cur_pkt); + cnt++; + } + else { + if (cnt != 0) { + DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n", + __FUNCTION__, cnt)); + } + *pkt = cur_pkt; + cnt = 1; + } + buf_size += ((ptr->max_idx + 1) * sizeof(void *)); + MFREE(dhd->osh, ptr, buf_size); + dhd->reorder_bufs[flow_id] = NULL; + *pkt_count = cnt; + return 0; + } + /* all the other cases depend on the existance of the reorder struct for that flow id */ + if (ptr == NULL) { + uint32 buf_size_alloc = sizeof(reorder_info_t); + max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + + buf_size_alloc += ((max_idx + 1) * sizeof(void*)); + /* allocate space to hold the buffers, index etc */ + + DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n", + __FUNCTION__, buf_size_alloc, flow_id, max_idx)); + ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc); + if (ptr == NULL) { + DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + bzero(ptr, buf_size_alloc); + dhd->reorder_bufs[flow_id] = ptr; + ptr->p = (void *)(ptr+1); + ptr->max_idx = max_idx; + } + /* XXX: validate cur, exp indices */ + if (flags & WLHOST_REORDERDATA_NEW_HOLE) { + DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__)); + if (ptr->pend_pkts) { + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + ptr->pend_pkts = 0; + } + ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + ptr->p[ptr->cur_idx] = cur_pkt; + ptr->pend_pkts++; + *pkt_count = cnt; + } + else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) { + cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + ptr->cur_idx = cur_idx; + DHD_REORDER(("%s: fill up a hole..pending packets is %d\n", + __FUNCTION__, ptr->pend_pkts)); + *pkt_count = 0; + *pkt = NULL; + } + else if (ptr->exp_idx == cur_idx) { + /* got the right one ..flush from cur to exp and update exp */ + DHD_REORDER(("%s: got the right one now, cur_idx is %d\n", + __FUNCTION__, cur_idx)); + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: Error buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + + ptr->cur_idx = cur_idx; + ptr->exp_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + cur_idx, exp_idx); + *pkt_count = cnt; + DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n", + __FUNCTION__, cnt, ptr->pend_pkts)); + } + else { + uint8 end_idx; + bool flush_current = FALSE; + /* both cur and exp are moved now .. */ + DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n", + __FUNCTION__, flow_id, ptr->cur_idx, cur_idx, + ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, end_idx); + + if (cur_idx == ptr->max_idx) { + if (exp_idx == 0) + flush_current = TRUE; + } else { + if (exp_idx == cur_idx + 1) + flush_current = TRUE; + } + if (flush_current) { + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + } + else { + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + } + ptr->exp_idx = exp_idx; + ptr->cur_idx = cur_idx; + *pkt_count = cnt; + } + } + else { + uint8 end_idx; + /* no real packet but update to exp_seq...that means explicit window move */ + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n", + __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx); + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + *pkt_count = cnt; + /* set the new expected idx */ + ptr->exp_idx = exp_idx; + } + return 0; +} diff --git a/bcmdhd.101.10.361.x/dhd_cfg80211.c b/bcmdhd.101.10.361.x/dhd_cfg80211.c new file mode 100755 index 0000000..59258ec --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_cfg80211.c @@ -0,0 +1,597 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include + +#include +#include +#include +#include + +#ifdef PKT_FILTER_SUPPORT +#include +#include +#endif + +#ifdef PKT_FILTER_SUPPORT +extern uint dhd_pkt_filter_enable; +extern uint dhd_master_mode; +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +static int dhd_dongle_up = FALSE; +#define PKT_FILTER_BUF_SIZE 64 + +#if defined(BCMDONGLEHOST) +#include +#include +#include +#include +#include +#include +#endif /* defined(BCMDONGLEHOST) */ + +static s32 wl_dongle_up(struct net_device *ndev); +static s32 wl_dongle_down(struct net_device *ndev); +#ifndef OEM_ANDROID +#ifndef CUSTOMER_HW6 +static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode); +#ifdef BCMSDIO /* glomming is a sdio specific feature */ +static s32 wl_dongle_glom(struct net_device *ndev, s32 glom, u32 dongle_align); +#endif +static s32 wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, s32 scan_unassoc_time); +static s32 wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol); +static s32 wl_pattern_atoh(s8 *src, s8 *dst); +static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode); +#endif /* !CUSTOMER_HW6 */ +#endif /* !OEM_ANDROID */ + +/** + * Function implementations + */ + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (!dhd_dongle_up) { + WL_INFORM_MEM(("Dongle is already down\n")); + err = 0; + goto done; + } + ndev = bcmcfg_to_prmry_ndev(cfg); + wl_dongle_down(ndev); +done: + return err; +} + +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode |= val; + WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode)); + + return 0; +} + +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE); + WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode)); + + return 0; +} +#ifdef WL_STATIC_IF +int32 +wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, + int ifidx, uint8 *addr, int bssidx, char *name, int if_state) +{ + return dhd_update_iflist_info(cfg->pub, ndev, ifidx, addr, bssidx, name, if_state); +} +#endif /* WL_STATIC_IF */ +struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, const char *dngl_name) +{ + return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name); +} + +int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, + int ifidx, struct net_device* ndev, bool rtnl_lock_reqd) +{ + return dhd_register_if(cfg->pub, ifidx, rtnl_lock_reqd); +} + +int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, + int ifidx, struct net_device* ndev, bool rtnl_lock_reqd) +{ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(cfg->pub, CAN_SLEEP(), __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd); +} + +void wl_cfg80211_cleanup_if(struct net_device *net) +{ + struct bcm_cfg80211 *cfg = wl_get_cfg(net); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(cfg->pub, CAN_SLEEP(), __builtin_return_address(0)); +#else + BCM_REFERENCE(cfg); +#endif /* DHD_PCIE_RUNTIMEPM */ + dhd_cleanup_if(net); +} + +struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev) +{ + struct bcm_cfg80211 *cfg; + + if (ndev) { + cfg = wl_get_cfg(ndev); + if (ndev->ieee80211_ptr) { + MFREE(cfg->osh, ndev->ieee80211_ptr, sizeof(struct wireless_dev)); + ndev->ieee80211_ptr = NULL; + } + free_netdev(ndev); + return NULL; + } + + return ndev; +} + +void dhd_netdev_free(struct net_device *ndev) +{ +#ifdef WL_CFG80211 + ndev = dhd_cfg80211_netdev_free(ndev); +#endif + if (ndev) + free_netdev(ndev); +} + +static s32 +wl_dongle_up(struct net_device *ndev) +{ + s32 err = 0; + u32 local_up = 0; +#ifdef WLAN_ACCEL_BOOT + u32 bus_host_access = 1; + err = wldev_iovar_setint(ndev, "bus:host_access", bus_host_access); + if (unlikely(err)) { + WL_ERR(("bus:host_access(%d) error (%d)\n", bus_host_access, err)); + } +#endif /* WLAN_ACCEL_BOOT */ + err = wldev_ioctl_set(ndev, WLC_UP, &local_up, sizeof(local_up)); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + } else { + WL_INFORM_MEM(("wl up\n")); + dhd_dongle_up = TRUE; + } + return err; +} + +static s32 +wl_dongle_down(struct net_device *ndev) +{ + s32 err = 0; + u32 local_down = 0; +#ifdef WLAN_ACCEL_BOOT + u32 bus_host_access = 0; +#endif /* WLAN_ACCEL_BOOT */ + + err = wldev_ioctl_set(ndev, WLC_DOWN, &local_down, sizeof(local_down)); + if (unlikely(err)) { + WL_ERR(("WLC_DOWN error (%d)\n", err)); + } +#ifdef WLAN_ACCEL_BOOT + err = wldev_iovar_setint(ndev, "bus:host_access", bus_host_access); + if (unlikely(err)) { + WL_ERR(("bus:host_access(%d) error (%d)\n", bus_host_access, err)); + } +#endif /* WLAN_ACCEL_BOOT */ + WL_INFORM_MEM(("wl down\n")); + dhd_dongle_up = FALSE; + + return err; +} + +#ifndef OEM_ANDROID +#ifndef CUSTOMER_HW6 +static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode) +{ + s32 err = 0; + + WL_TRACE(("In\n")); + err = wldev_ioctl_set(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode)); + if (unlikely(err)) { + WL_ERR(("WLC_SET_PM error (%d)\n", err)); + } + return err; +} + +#ifdef BCMSDIO +static s32 +wl_dongle_glom(struct net_device *ndev, s32 glom, u32 dongle_align) +{ + s32 err = 0; + + /* Match Host and Dongle rx alignment */ + err = wldev_iovar_setint(ndev, "bus:txglomalign", dongle_align); + if (unlikely(err)) { + WL_ERR(("txglomalign error (%d)\n", err)); + goto dongle_glom_out; + } + /* disable glom option per default */ + if (glom != DEFAULT_GLOM_VALUE) { + err = wldev_iovar_setint(ndev, "bus:txglom", glom); + if (unlikely(err)) { + WL_ERR(("txglom error (%d)\n", err)); + goto dongle_glom_out; + } + } +dongle_glom_out: + return err; +} + +#endif /* BCMSDIO */ +#endif /* !CUSTOMER_HW6 */ +#endif /* !OEM_ANDROID */ + +s32 +wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout) +{ + s32 err = 0; + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + if (roamvar) { + err = wldev_iovar_setint(ndev, "bcn_timeout", bcn_timeout); + if (unlikely(err)) { + WL_ERR(("bcn_timeout error (%d)\n", err)); + goto dongle_rom_out; + } + } + /* Enable/Disable built-in roaming to allow supplicant to take care of roaming */ + err = wldev_iovar_setint(ndev, "roam_off", roamvar); + if (unlikely(err)) { + WL_ERR(("roam_off error (%d)\n", err)); + goto dongle_rom_out; + } +dongle_rom_out: + return err; +} + +#ifndef OEM_ANDROID +#ifndef CUSTOMER_HW6 +static s32 +wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, + s32 scan_unassoc_time) +{ + s32 err = 0; + + err = wldev_ioctl_set(ndev, WLC_SET_SCAN_CHANNEL_TIME, &scan_assoc_time, + sizeof(scan_assoc_time)); + if (err) { + if (err == -EOPNOTSUPP) { + WL_INFORM(("Scan assoc time is not supported\n")); + } else { + WL_ERR(("Scan assoc time error (%d)\n", err)); + } + goto dongle_scantime_out; + } + err = wldev_ioctl_set(ndev, WLC_SET_SCAN_UNASSOC_TIME, &scan_unassoc_time, + sizeof(scan_unassoc_time)); + if (err) { + if (err == -EOPNOTSUPP) { + WL_INFORM(("Scan unassoc time is not supported\n")); + } else { + WL_ERR(("Scan unassoc time error (%d)\n", err)); + } + goto dongle_scantime_out; + } + +dongle_scantime_out: + return err; +} + +static s32 +wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol) +{ + s8 iovbuf[WLC_IOCTL_SMLEN]; + s32 err = 0; + s32 len; + struct bcm_cfg80211 *cfg = wl_get_cfg(ndev); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + /* Set ARP offload */ + len = bcm_mkiovar("arpoe", (char *)&arpoe, sizeof(arpoe), iovbuf, sizeof(iovbuf)); + if (!len) { + WL_ERR(("%s: bcm_mkiovar failed:%d\n", __FUNCTION__, len)); + return BCME_BADARG; + } + err = wldev_ioctl_set(ndev, WLC_SET_VAR, iovbuf, len); + if (err) { + if (err == -EOPNOTSUPP) + WL_INFORM(("arpoe is not supported\n")); + else + WL_ERR(("arpoe error (%d)\n", err)); + + goto dongle_offload_out; + } + len = bcm_mkiovar("arp_ol", (char *)&arp_ol, sizeof(arp_ol), iovbuf, sizeof(iovbuf)); + if (!len) { + WL_ERR(("%s: bcm_mkiovar failed:%d\n", __FUNCTION__, len)); + return BCME_BADARG; + } + err = wldev_ioctl_set(ndev, WLC_SET_VAR, iovbuf, len); + if (err) { + if (err == -EOPNOTSUPP) + WL_INFORM(("arp_ol is not supported\n")); + else + WL_ERR(("arp_ol error (%d)\n", err)); + + goto dongle_offload_out; + } + + dhd->arpoe_enable = TRUE; + dhd->arpol_configured = TRUE; + WL_ERR(("arpoe:%d arpol:%d\n", + dhd->arpoe_enable, dhd->arpol_configured)); + +dongle_offload_out: + return err; +} + +static s32 wl_pattern_atoh(s8 *src, s8 *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) { + WL_ERR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + WL_ERR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + + for (i = 0; *src != '\0'; i++) { + char num[3]; + if ((num[0] = src[0]) != '\0') { + num[1] = src[1]; + } + num[2] = '\0'; + dst[i] = (u8) simple_strtoul(num, NULL, 16); + src += 2; + } + + return i; +} + +static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode) +{ + const s8 *str; + struct wl_pkt_filter pkt_filter; + struct wl_pkt_filter *pkt_filterp; + s32 buf_len; + s32 str_len; + u32 mask_size; + u32 pattern_size; + s8 buf[PKT_FILTER_BUF_SIZE] = {0}; + s32 err = 0; + + /* add a default packet filter pattern */ + str = "pkt_filter_add"; + str_len = strlen(str); + strlcpy(buf, str, sizeof(buf)); + buf_len = str_len + 1; + + pkt_filterp = (struct wl_pkt_filter *)(buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(100); + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(0); + + /* Parse filter type. */ + pkt_filter.type = htod32(0); + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(0); + + /* Parse pattern filter mask. */ + mask_size = htod32(wl_pattern_atoh("0xff", + (char *)pkt_filterp->u.pattern. + mask_and_pattern)); + + if (mask_size == (typeof(mask_size))-1 || + (mask_size > (PKT_FILTER_BUF_SIZE - (buf_len) + + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN))) { + /* mask_size has to be equal to pattern_size */ + err = -EINVAL; + goto dongle_filter_out; + } + /* Parse pattern filter pattern. */ + pattern_size = htod32(wl_pattern_atoh("0x00", + (char *)&pkt_filterp->u.pattern.mask_and_pattern[mask_size])); + + if (mask_size != pattern_size) { + WL_ERR(("Mask and pattern not the same size\n")); + err = -EINVAL; + goto dongle_filter_out; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local + * variable (keep_alive_pkt), and + * then memcpy'ed into buffer (keep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + + err = wldev_ioctl_set(ndev, WLC_SET_VAR, buf, buf_len); + if (err) { + if (err == -EOPNOTSUPP) { + WL_INFORM(("filter not supported\n")); + } else { + WL_ERR(("filter (%d)\n", err)); + } + goto dongle_filter_out; + } + + /* set mode to allow pattern */ + err = wldev_iovar_setint(ndev, "pkt_filter_mode", filter_mode); + if (err) { + if (err == -EOPNOTSUPP) { + WL_INFORM(("filter_mode not supported\n")); + } else { + WL_ERR(("filter_mode (%d)\n", err)); + } + goto dongle_filter_out; + } + +dongle_filter_out: + return err; +} +#endif /* !CUSTOMER_HW6 */ +#endif /* !OEM_ANDROID */ + +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg) +{ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + struct net_device *ndev; + s32 err = 0; + dhd_pub_t *dhd = NULL; +#if !defined(OEM_ANDROID) && defined(BCMSDIO) + s32 glom = CUSTOM_GLOM_SETTING; + BCM_REFERENCE(glom); +#endif + + WL_TRACE(("In\n")); + + ndev = bcmcfg_to_prmry_ndev(cfg); + dhd = (dhd_pub_t *)(cfg->pub); + + err = wl_dongle_up(ndev); + if (unlikely(err)) { + WL_ERR(("wl_dongle_up failed\n")); + goto default_conf_out; + } + + if (dhd && dhd->fw_preinit) { + /* Init config will be done by fw preinit context */ + return BCME_OK; + } + +#ifndef OEM_ANDROID +#ifndef CUSTOMER_HW6 + err = wl_dongle_power(ndev, PM_FAST); + if (unlikely(err)) { + WL_ERR(("wl_dongle_power failed\n")); + goto default_conf_out; + } +#ifdef BCMSDIO + err = wl_dongle_glom(ndev, glom, DHD_SDALIGN); + if (unlikely(err)) { + WL_ERR(("wl_dongle_glom failed\n")); + goto default_conf_out; + } +#endif /* BCMSDIO */ + err = wl_dongle_roam(ndev, (cfg->roam_on ? 0 : 1), 3); + if (unlikely(err)) { + WL_ERR(("wl_dongle_roam failed\n")); + goto default_conf_out; + } + wl_dongle_scantime(ndev, 40, 80); + wl_dongle_offload(ndev, 1, 0xf); + wl_dongle_filter(ndev, 1); +#endif /* !CUSTOMER_HW6 */ +#endif /* OEM_ANDROID */ + +default_conf_out: + + return err; + +} + +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, + const struct bcm_nlmsg_hdr *nlioc, void *buf) +{ + struct net_device *ndev = NULL; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0, NULL, 0, 0, 0, 0, 0}; + int ret = 0; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + + dhd = cfg->pub; + DHD_OS_WAKE_LOCK(dhd); + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ioc.buf = buf; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + +done: + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} diff --git a/bcmdhd.101.10.361.x/dhd_cfg80211.h b/bcmdhd.101.10.361.x/dhd_cfg80211.h new file mode 100755 index 0000000..1abf42b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_cfg80211.h @@ -0,0 +1,49 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_CFG80211__ +#define __DHD_CFG80211__ + +#include +#include +#include + +#ifndef WL_ERR +#define WL_ERR CFG80211_ERR +#endif +#ifndef WL_TRACE +#define WL_TRACE CFG80211_TRACE +#endif + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg); +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg); +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data); +s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout); +#endif /* __DHD_CFG80211__ */ diff --git a/bcmdhd.101.10.361.x/dhd_common.c b/bcmdhd.101.10.361.x/dhd_common.c new file mode 100755 index 0000000..a8f8ef6 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_common.c @@ -0,0 +1,11596 @@ +/* + * Broadcom Dongle Host Driver (DHD), common DHD core. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#ifdef DHD_SDTC_ETB_DUMP +#include +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef BCMDBG +#include +#endif /* BCMDBG */ + +#ifdef PCIE_FULL_DONGLE +#include +#endif /* PCIE_FULL_DONGLE */ + +#ifdef SHOW_LOGTRACE +#include +#endif /* SHOW_LOGTRACE */ + +#ifdef BCMPCIE +#include +#endif + +#include +#include +#include +#include +#include <802.1d.h> +#include +#include +#include +#include +#include +#include + +#ifdef WL_CFG80211 +#include +#include +#endif +#if defined(OEM_ANDROID) && defined(PNO_SUPPORT) +#include +#endif /* (OEM_ANDROID) && (PNO_SUPPORT) */ +#ifdef RTT_SUPPORT +#include +#endif + +#ifdef DNGL_EVENT_SUPPORT +#include +#endif + +#ifdef IL_BIGENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#if defined(__linux__) +#include +#endif /* __linux__ */ + +#ifdef DHD_WMF +#include +#endif /* DHD_WMF */ + +#ifdef DHD_L2_FILTER +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ + +#ifdef DHD_WET +#include +#endif /* DHD_WET */ +#if defined(NDIS) +#include +#endif + +#ifdef DHD_LOG_DUMP +#include +#ifdef DHD_PKT_LOGGING +#include +#endif +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_LOG_PRINT_RATE_LIMIT +int log_print_threshold = 0; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +int dbgring_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_INFO_VAL + | DHD_EVENT_VAL | DHD_PKT_MON_VAL | DHD_IOVAR_MEM_VAL; +int dhd_msg_level = DHD_ERROR_VAL; +#else +int dbgring_msg_level = 0; +/* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */ +int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + +#ifdef NDIS +extern uint wl_msg_level; +#endif + +#if defined(WL_WLC_SHIM) +#include +#else +#if defined(NDIS) +#include +#endif +#endif /* WL_WLC_SHIM */ + +#ifdef DHD_DEBUG +#include +#endif /* DHD_DEBUG */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef CSI_SUPPORT +#include +#endif /* CSI_SUPPORT */ + +#if defined(BTLOG) && !defined(BCMPCIE) +#error "BT logging supported only with PCIe" +#endif /* defined(BTLOG) && !defined(BCMPCIE) */ + +#ifdef SOFTAP +char fw_path2[MOD_PARAM_PATHLEN]; +extern bool softap_enabled; +#endif +#ifdef PROP_TXSTATUS +extern int disable_proptx; +#endif /* PROP_TXSTATUS */ + +#ifdef REPORT_FATAL_TIMEOUTS +#ifdef BCMINTERNAL +/* + * Internal Builds are used by DVT. + * The timeouts are not required for DVT builds, since they use IOVARs like + * SROM programming etc, that takes long time. So make the timeout values + * as 0. If DVT needs to use this feature they can enable them using IOVAR + * + * SVT any way uses external builds + */ +#define SCAN_TIMEOUT_DEFAULT 0 +#define JOIN_TIMEOUT_DEFAULT 0 +#define BUS_TIMEOUT_DEFAULT 0 +#define CMD_TIMEOUT_DEFAULT 0 +#else +/* Default timeout value in ms */ +#ifdef DHD_EFI +#define BUS_TIMEOUT_DEFAULT 800 /* 800ms */ +#define CMD_TIMEOUT_DEFAULT 1500 /* 1.5s */ +#define SCAN_TIMEOUT_DEFAULT 0 +#define JOIN_TIMEOUT_DEFAULT 0 +#else +#define BUS_TIMEOUT_DEFAULT 800 +#define CMD_TIMEOUT_DEFAULT 1200 +#define SCAN_TIMEOUT_DEFAULT 17000 +#define JOIN_TIMEOUT_DEFAULT 7500 +#endif /* DHD_EFI */ +#endif /* BCMINTERNAL */ +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef SHOW_LOGTRACE +#define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */ +#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ +#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ +static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */ +static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */ +static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */ +#define RAMSTART_BIT 0x01 +#define RDSTART_BIT 0x02 +#define RDEND_BIT 0x04 +#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) +#endif /* SHOW_LOGTRACE */ + +#ifdef SHOW_LOGTRACE +#if defined(LINUX) || defined(linux) +/* the fw file path is taken from either the module parameter at + * insmod time or is defined as a constant of different values + * for different platforms + */ +extern char *st_str_file_path; +#else +static char *st_str_file_path = "rtecdc.bin"; +#endif /* LINUX */ +#endif /* SHOW_LOGTRACE */ + +#ifdef EWP_EDL +typedef struct msg_hdr_edl { + uint32 infobuf_ver; + info_buf_payload_hdr_t pyld_hdr; + msgtrace_hdr_t trace_hdr; +} msg_hdr_edl_t; +#endif /* EWP_EDL */ + +#define DHD_TPUT_MAX_TX_PKTS_BATCH 1000 + +/* Last connection success/failure status */ +uint32 dhd_conn_event; +uint32 dhd_conn_status; +uint32 dhd_conn_reason; + +extern int dhd_iscan_request(void * dhdp, uint16 action); +extern void dhd_ind_scan_confirm(void *h, bool status); +extern int dhd_iscan_in_progress(void *h); +void dhd_iscan_lock(void); +void dhd_iscan_unlock(void); +extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); +#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) +extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd); +#endif + +extern int dhd_socram_dump(struct dhd_bus *bus); +extern void dhd_set_packet_filter(dhd_pub_t *dhd); + +#ifdef DNGL_EVENT_SUPPORT +static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, + bcm_dngl_event_msg_t *dngl_event, size_t pktlen); +static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, + size_t pktlen); +#endif /* DNGL_EVENT_SUPPORT */ + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT +static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc); +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + +#ifdef REPORT_FATAL_TIMEOUTS +static void dhd_set_join_error(dhd_pub_t *pub, uint32 mask); +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR +#define MAX_IOCTL_SUSPEND_ERROR 10 +static int ioctl_suspend_error = 0; +#endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */ + +/* Should ideally read this from target(taken from wlu) */ +#define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */ + +#if defined(OEM_ANDROID) +/* note these variables will be used with wext */ +bool ap_cfg_running = FALSE; +bool ap_fw_loaded = FALSE; +#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */ + +#ifdef WLEASYMESH +extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast); +extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast); +#endif /* WLEASYMESH */ + +#define CHIPID_MISMATCH 8 + +#define DHD_VERSION "Dongle Host Driver, version " EPI_VERSION_STR "\n" + +#if defined(DHD_DEBUG) && defined(DHD_COMPILED) +const char dhd_version[] = DHD_VERSION DHD_COMPILED " compiled on " + __DATE__ " at " __TIME__ "\n\0"; +#else +const char dhd_version[] = DHD_VERSION; +#endif /* DHD_DEBUG && DHD_COMPILED */ + +char fw_version[FW_VER_STR_LEN] = "\0"; +char clm_version[CLM_VER_STR_LEN] = "\0"; + +char bus_api_revision[BUS_API_REV_STR_LEN] = "\0"; + +void dhd_set_timer(void *bus, uint wdtick); + +#if defined(BCM_ROUTER_DHD) +static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd, + trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len); +#endif + +static char* ioctl2str(uint32 ioctl); + +/* IOVar table */ +enum { + IOV_VERSION = 1, + IOV_WLMSGLEVEL, + IOV_MSGLEVEL, + IOV_BCMERRORSTR, + IOV_BCMERROR, + IOV_WDTICK, + IOV_DUMP, + IOV_CLEARCOUNTS, + IOV_LOGDUMP, + IOV_LOGCAL, + IOV_LOGSTAMP, + IOV_GPIOOB, + IOV_IOCTLTIMEOUT, + IOV_CONS, + IOV_DCONSOLE_POLL, +#if defined(DHD_DEBUG) + IOV_DHD_JOIN_TIMEOUT_DBG, + IOV_SCAN_TIMEOUT, + IOV_MEM_DEBUG, +#ifdef BCMPCIE + IOV_FLOW_RING_DEBUG, +#endif /* BCMPCIE */ +#endif /* defined(DHD_DEBUG) */ +#ifdef PROP_TXSTATUS + IOV_PROPTXSTATUS_ENABLE, + IOV_PROPTXSTATUS_MODE, + IOV_PROPTXSTATUS_OPT, +#ifdef QMONITOR + IOV_QMON_TIME_THRES, + IOV_QMON_TIME_PERCENT, +#endif /* QMONITOR */ + IOV_PROPTXSTATUS_MODULE_IGNORE, + IOV_PROPTXSTATUS_CREDIT_IGNORE, + IOV_PROPTXSTATUS_TXSTATUS_IGNORE, + IOV_PROPTXSTATUS_RXPKT_CHK, +#endif /* PROP_TXSTATUS */ + IOV_BUS_TYPE, + IOV_CHANGEMTU, + IOV_HOSTREORDER_FLOWS, +#ifdef DHDTCPACK_SUPPRESS + IOV_TCPACK_SUPPRESS, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + IOV_WMF_BSS_ENAB, + IOV_WMF_UCAST_IGMP, + IOV_WMF_MCAST_DATA_SENDUP, +#ifdef WL_IGMP_UCQUERY + IOV_WMF_UCAST_IGMP_QUERY, +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + IOV_WMF_UCAST_UPNP, +#endif /* DHD_UCAST_UPNP */ + IOV_WMF_PSTA_DISABLE, +#endif /* DHD_WMF */ +#if defined(BCM_ROUTER_DHD) + IOV_TRAFFIC_MGMT_DWM, +#endif /* BCM_ROUTER_DHD */ + IOV_AP_ISOLATE, +#ifdef DHD_L2_FILTER + IOV_DHCP_UNICAST, + IOV_BLOCK_PING, + IOV_PROXY_ARP, + IOV_GRAT_ARP, + IOV_BLOCK_TDLS, +#endif /* DHD_L2_FILTER */ + IOV_DHD_IE, +#ifdef DHD_PSTA + IOV_PSTA, +#endif /* DHD_PSTA */ +#ifdef DHD_WET + IOV_WET, + IOV_WET_HOST_IPV4, + IOV_WET_HOST_MAC, +#endif /* DHD_WET */ + IOV_CFG80211_OPMODE, + IOV_ASSERT_TYPE, +#if defined(NDIS) + IOV_WAKEIND, +#endif /* NDIS */ +#if !defined(NDIS) && !defined(BCM_ROUTER_DHD) + IOV_LMTEST, +#endif +#ifdef DHD_MCAST_REGEN + IOV_MCAST_REGEN_BSS_ENABLE, +#endif +#ifdef BCMDBG + IOV_MACDBG_PD11REGS, + IOV_MACDBG_REGLIST, + IOV_MACDBG_PSVMPMEMS, +#endif /* BCMDBG */ +#ifdef SHOW_LOGTRACE + IOV_DUMP_TRACE_LOG, +#endif /* SHOW_LOGTRACE */ +#ifdef REPORT_FATAL_TIMEOUTS + IOV_SCAN_TO, + IOV_JOIN_TO, + IOV_CMD_TO, + IOV_OQS_TO, +#endif /* REPORT_FATAL_TIMEOUTS */ + IOV_DONGLE_TRAP_TYPE, + IOV_DONGLE_TRAP_INFO, + IOV_BPADDR, + IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */ +#if defined(DHD_LOG_DUMP) +#if defined(DHD_EFI) + IOV_LOG_CAPTURE_ENABLE, +#endif + IOV_LOG_DUMP, +#endif /* DHD_LOG_DUMP */ +#ifdef BTLOG + IOV_DUMP_BT_LOG, + IOV_BTLOG, +#endif /* BTLOG */ +#ifdef SNAPSHOT_UPLOAD + IOV_BT_MEM_DUMP, + IOV_BT_UPLOAD, +#endif /* SNAPSHOT_UPLOAD */ + IOV_TPUT_TEST, +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + IOV_PKT_LATENCY, +#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */ + IOV_DEBUG_BUF_DEST_STAT, +#ifdef DHD_PKTTS + IOV_PKTTS_ENAB, + IOV_PKTTS_FLOW, +#endif /* DHD_PKTTS */ +#ifdef DHD_DEBUG + IOV_INDUCE_ERROR, +#endif /* DHD_DEBUG */ +#if defined(DHD_EFI) + IOV_INTR_POLL, +#endif + IOV_FIS_TRIGGER, +#ifdef WL_IFACE_MGMT_CONF +#ifdef WL_CFG80211 +#ifdef WL_NANP2P + IOV_CONC_DISC, +#endif /* WL_NANP2P */ +#ifdef WL_IFACE_MGMT + IOV_IFACE_POLICY, +#endif /* WL_IFACE_MGMT */ +#endif /* WL_CFG80211 */ +#endif /* WL_IFACE_MGMT_CONF */ +#ifdef RTT_GEOFENCE_CONT +#if defined (RTT_SUPPORT) && defined (WL_NAN) + IOV_RTT_GEOFENCE_TYPE_OVRD, +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_CONT */ + IOV_FW_VBS, +#ifdef DHD_TX_PROFILE + IOV_TX_PROFILE_TAG, + IOV_TX_PROFILE_ENABLE, + IOV_TX_PROFILE_DUMP, +#endif /* defined(DHD_TX_PROFILE) */ + IOV_CHECK_TRAP_ROT, +#if defined(DHD_AWDL) + IOV_AWDL_LLC_ENABLE, +#endif +#ifdef WLEASYMESH + IOV_1905_AL_UCAST, + IOV_1905_AL_MCAST, +#endif /* WLEASYMESH */ + IOV_LAST +}; + +const bcm_iovar_t dhd_iovars[] = { + /* name varid flags flags2 type minlen */ + {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, 0}, + {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_DEBUG + {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0}, + {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 }, +#ifdef BCMPCIE + {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 }, +#endif /* BCMPCIE */ +#ifdef NDIS + {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0}, +#endif /* NDIS */ +#endif /* DHD_DEBUG */ + {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN}, + {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0}, + {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0}, + {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN_32K}, + {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0}, + {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0}, + {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0}, +#ifdef BCMPERFSTATS + {"logdump", IOV_LOGDUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN}, + {"logcal", IOV_LOGCAL, 0, 0, IOVT_UINT32, 0}, + {"logstamp", IOV_LOGSTAMP, 0, 0, IOVT_BUFFER, 0}, +#endif + {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0}, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0}, +#ifdef PROP_TXSTATUS + {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 }, + /* + set the proptxtstatus operation mode: + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + */ + {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 }, + {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 }, +#ifdef QMONITOR + {"qtime_thres", IOV_QMON_TIME_THRES, 0, 0, IOVT_UINT32, 0 }, + {"qtime_percent", IOV_QMON_TIME_PERCENT, 0, 0, IOVT_UINT32, 0 }, +#endif /* QMONITOR */ + {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 }, +#endif /* PROP_TXSTATUS */ + {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0}, + {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 }, + {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER, + (WLHOST_REORDERDATA_MAXFLOWS + 1) }, +#ifdef DHDTCPACK_SUPPRESS + {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 }, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, 0, IOVT_BOOL, 0 }, + {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, 0, IOVT_BOOL, 0 }, + {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, 0, IOVT_BOOL, 0 }, +#ifdef WL_IGMP_UCQUERY + {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 }, +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 }, +#endif /* DHD_UCAST_UPNP */ + {"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 }, +#endif /* DHD_WMF */ +#if defined(BCM_ROUTER_DHD) + {"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0}, +#endif /* BCM_ROUTER_DHD */ +#ifdef DHD_L2_FILTER + {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 }, +#endif /* DHD_L2_FILTER */ + {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0}, +#ifdef DHD_L2_FILTER + {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0}, + {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0}, + {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0}, + {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0}, +#endif /* DHD_L2_FILTER */ + {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0}, +#ifdef DHD_PSTA + /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */ + {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0}, +#endif /* DHD PSTA */ +#ifdef DHD_WET + /* WET Mode configuration. 0: DIABLED 1: WET */ + {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0}, + {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0}, + {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0}, +#endif /* DHD WET */ + {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 }, + {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0}, +#if defined(NDIS) + { "wowl_wakeind", IOV_WAKEIND, 0, 0, IOVT_UINT32, 0 }, +#endif /* NDIS */ +#if !defined(NDIS) && !defined(BCM_ROUTER_DHD) + {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 }, +#endif +#ifdef DHD_MCAST_REGEN + {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0}, +#endif +#ifdef BCMDBG + {"pd11regs", IOV_MACDBG_PD11REGS, 0, 0, IOVT_BUFFER, 0}, + {"mreglist", IOV_MACDBG_REGLIST, 0, 0, IOVT_BUFFER, 0}, + {"psvmpmems", IOV_MACDBG_PSVMPMEMS, 0, 0, IOVT_BUFFER, 0}, +#endif /* BCMDBG */ +#ifdef SHOW_LOGTRACE + {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) }, +#endif /* SHOW_LOGTRACE */ +#ifdef REPORT_FATAL_TIMEOUTS + {"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 }, + {"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 }, + {"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 }, + {"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 }, +#endif /* REPORT_FATAL_TIMEOUTS */ + {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 }, + {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) }, +#ifdef DHD_DEBUG + {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, +#endif /* DHD_DEBUG */ + {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER, + MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) }, +#if defined(DHD_LOG_DUMP) +#if defined(DHD_EFI) + {"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0}, +#endif + {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0}, +#endif /* DHD_LOG_DUMP */ +#ifdef BTLOG + {"dump_bt_log", IOV_DUMP_BT_LOG, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) }, + {"btlog", IOV_BTLOG, 0, 0, IOVT_UINT32, 0 }, +#endif /* BTLOG */ +#ifdef SNAPSHOT_UPLOAD + {"bt_mem_dump", IOV_BT_MEM_DUMP, 0, 0, IOVT_UINT32, 0}, + {"bt_upload", IOV_BT_UPLOAD, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) }, +#endif /* SNAPSHOT_UPLOAD */ + {"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)}, + {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PKTTS + {"pktts_enab", IOV_PKTTS_ENAB, (0), 0, IOVT_BOOL, 0 }, + {"pktts_flow", IOV_PKTTS_FLOW, (0), 0, IOVT_BUFFER, sizeof(tput_test_t) }, +#endif /* DHD_PKTTS */ +#if defined(DHD_EFI) + {"intr_poll", IOV_INTR_POLL, 0, 0, IOVT_BUFFER, sizeof(intr_poll_t)}, +#endif +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + {"pkt_latency", IOV_PKT_LATENCY, 0, 0, IOVT_UINT32, 0 }, +#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */ +#if defined(DHD_SSSR_DUMP) + {"fis_trigger", IOV_FIS_TRIGGER, 0, 0, IOVT_UINT32, 0}, +#endif +#ifdef DHD_DEBUG + {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 }, +#endif /* DHD_DEBUG */ +#ifdef WL_IFACE_MGMT_CONF +#ifdef WL_CFG80211 +#ifdef WL_NANP2P + {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 }, +#endif /* WL_NANP2P */ +#ifdef WL_IFACE_MGMT + {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)}, +#endif /* WL_IFACE_MGMT */ +#endif /* WL_CFG80211 */ +#endif /* WL_IFACE_MGMT_CONF */ +#ifdef RTT_GEOFENCE_CONT +#if defined (RTT_SUPPORT) && defined (WL_NAN) + {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0}, +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_CONT */ + {"fw_verbose", IOV_FW_VBS, 0, 0, IOVT_UINT32, 0}, +#ifdef DHD_TX_PROFILE + {"tx_profile_tag", IOV_TX_PROFILE_TAG, 0, 0, IOVT_BUFFER, + sizeof(dhd_tx_profile_protocol_t)}, + {"tx_profile_enable", IOV_TX_PROFILE_ENABLE, 0, 0, IOVT_BOOL, 0}, + {"tx_profile_dump", IOV_TX_PROFILE_DUMP, 0, 0, IOVT_UINT32, 0}, +#endif /* defined(DHD_TX_PROFILE) */ + {"check_trap_rot", IOV_CHECK_TRAP_ROT, (0), 0, IOVT_BOOL, 0}, +#if defined(DHD_AWDL) + {"awdl_llc_enable", IOV_AWDL_LLC_ENABLE, 0, 0, IOVT_BOOL, 0}, +#endif + /* --- add new iovars *ABOVE* this line --- */ +#ifdef WLEASYMESH + {"1905_al_ucast", IOV_1905_AL_UCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN}, + {"1905_al_mcast", IOV_1905_AL_MCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN}, +#endif /* WLEASYMESH */ + {NULL, 0, 0, 0, 0, 0 } +}; + +#define DHD_IOVAR_BUF_SIZE 128 + +#if defined(LINUX) || defined(linux) || defined(DHD_EFI) +fw_download_status_t +dhd_fw_download_status(dhd_pub_t * dhd_pub) +{ + return dhd_pub->fw_download_status; +} +#endif /* defined(LINUX) || defined(linux) || defined(DHD_EFI) */ + +bool +dhd_query_bus_erros(dhd_pub_t *dhdp) +{ + bool ret = FALSE; + + if (dhdp->dongle_reset) { + DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->dongle_trap_occured) { + DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; +#ifdef OEM_ANDROID + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; + dhd_os_send_hang_message(dhdp); +#endif /* OEM_ANDROID */ + } + + if (dhdp->iovar_timeout_occured) { + DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->d3ack_timeout_occured) { + DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + if (dhdp->livelock_occured) { + DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->pktid_audit_failed) { + DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } +#endif /* PCIE_FULL_DONGLE */ + + if (dhdp->iface_op_failed) { + DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->scan_timeout_occurred) { + DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->scan_busy_occurred) { + DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + +#ifdef DNGL_AXI_ERROR_LOGGING + if (dhdp->axi_error) { + DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#if defined(BCMPCIE) + if (dhd_bus_get_linkdown(dhdp)) { + DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhd_bus_get_cto(dhdp)) { + DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } +#endif + + return ret; +} + +void +dhd_clear_bus_errors(dhd_pub_t *dhdp) +{ + if (!dhdp) + return; + + dhdp->dongle_reset = FALSE; + dhdp->dongle_trap_occured = FALSE; + dhdp->iovar_timeout_occured = FALSE; +#ifdef PCIE_FULL_DONGLE + dhdp->d3ack_timeout_occured = FALSE; + dhdp->livelock_occured = FALSE; + dhdp->pktid_audit_failed = FALSE; +#endif + dhdp->iface_op_failed = FALSE; + dhdp->scan_timeout_occurred = FALSE; + dhdp->scan_busy_occurred = FALSE; +#ifdef BT_OVER_PCIE + dhdp->dongle_trap_due_to_bt = FALSE; +#endif +} + +#ifdef DHD_SSSR_DUMP + +/* This can be overwritten by module parameter defined in dhd_linux.c */ +uint sssr_enab = TRUE; + +#ifdef DHD_FIS_DUMP +uint fis_enab = TRUE; +#else +uint fis_enab = FALSE; +#endif /* DHD_FIS_DUMP */ + +int +dhd_sssr_mempool_init(dhd_pub_t *dhd) +{ + dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE); + if (dhd->sssr_mempool == NULL) { + DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +void +dhd_sssr_mempool_deinit(dhd_pub_t *dhd) +{ + if (dhd->sssr_mempool) { + MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE); + dhd->sssr_mempool = NULL; + } +} + +int +dhd_sssr_reg_info_init(dhd_pub_t *dhd) +{ + dhd->sssr_reg_info = (sssr_reg_info_cmn_t *) MALLOCZ(dhd->osh, sizeof(sssr_reg_info_cmn_t)); + if (dhd->sssr_reg_info == NULL) { + DHD_ERROR(("%s: MALLOC of sssr_reg_info failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +void +dhd_sssr_reg_info_deinit(dhd_pub_t *dhd) +{ + if (dhd->sssr_reg_info) { + MFREE(dhd->osh, dhd->sssr_reg_info, sizeof(sssr_reg_info_cmn_t)); + dhd->sssr_reg_info = NULL; + } +} + +#ifdef DHD_PCIE_REG_ACCESS +static void +dhd_dump_sssr_reg_info_v2(dhd_pub_t *dhd) +{ + sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info; + sssr_reg_info_v2_t *sssr_reg_info = (sssr_reg_info_v2_t *)&sssr_reg_info_cmn->rev2; + int i, j; + uint8 num_d11cores = dhd_d11_slices_num_get(dhd); + DHD_ERROR(("pmu_regs\n")); + DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x " + "macresreqtimer=0x%x macresreqtimer1=0x%x\n", + sssr_reg_info->pmu_regs.base_regs.pmuintmask0, + sssr_reg_info->pmu_regs.base_regs.pmuintmask1, + sssr_reg_info->pmu_regs.base_regs.resreqtimer, + sssr_reg_info->pmu_regs.base_regs.macresreqtimer, + sssr_reg_info->pmu_regs.base_regs.macresreqtimer1)); + DHD_ERROR(("chipcommon_regs\n")); + DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n", + sssr_reg_info->chipcommon_regs.base_regs.intmask, + sssr_reg_info->chipcommon_regs.base_regs.powerctrl, + sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus, + sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask)); + DHD_ERROR(("arm_regs\n")); + DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x" + " resetctrl=0x%x extrsrcreq=0x%x\n", + sssr_reg_info->arm_regs.base_regs.clockcontrolstatus, + sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val, + sssr_reg_info->arm_regs.wrapper_regs.resetctrl, + sssr_reg_info->arm_regs.wrapper_regs.extrsrcreq)); + DHD_ERROR(("pcie_regs\n")); + DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x " + "clockcontrolstatus_val=0x%x extrsrcreq=0x%x\n", + sssr_reg_info->pcie_regs.base_regs.ltrstate, + sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus, + sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val, + sssr_reg_info->pcie_regs.wrapper_regs.extrsrcreq)); + + for (i = 0; i < num_d11cores; i++) { + DHD_ERROR(("mac_regs core[%d]\n", i)); + DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x " + "clockcontrolstatus_val=0x%x\n", + sssr_reg_info->mac_regs[i].base_regs.xmtaddress, + sssr_reg_info->mac_regs[i].base_regs.xmtdata, + sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus, + sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val)); + DHD_ERROR(("resetctrl=0x%x extrsrcreq=0x%x ioctrl=0x%x\n", + sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl, + sssr_reg_info->mac_regs[i].wrapper_regs.extrsrcreq, + sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl)); + for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) { + DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j, + sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j])); + } + DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size)); + } + DHD_ERROR(("dig_regs\n")); + DHD_ERROR(("dig_sr_addr=0x%x dig_sr_size=0x%x\n", + sssr_reg_info->dig_mem_info.dig_sr_addr, + sssr_reg_info->dig_mem_info.dig_sr_size)); +} + +static void +dhd_dump_sssr_reg_info_v3(dhd_pub_t *dhd) +{ + sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info; + sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3; + int i; + + dhd_dump_sssr_reg_info_v2(dhd); + + DHD_ERROR(("FIS Enab in fw : %d\n", sssr_reg_info->fis_enab)); + + DHD_ERROR(("HWA regs for reset \n")); + DHD_ERROR(("clkenable 0x%x, clkgatingenable 0x%x, clkext 0x%x, " + "clkctlstatus 0x%x, ioctrl 0x%x, resetctrl 0x%x\n", + sssr_reg_info->hwa_regs.base_regs.clkenable, + sssr_reg_info->hwa_regs.base_regs.clkgatingenable, + sssr_reg_info->hwa_regs.base_regs.clkext, + sssr_reg_info->hwa_regs.base_regs.clkctlstatus, + sssr_reg_info->hwa_regs.wrapper_regs.ioctrl, + sssr_reg_info->hwa_regs.wrapper_regs.resetctrl)); + DHD_ERROR(("HWA regs value seq for reset \n")); + for (i = 0; i < SSSR_HWA_RESET_SEQ_STEPS; i++) { + DHD_ERROR(("hwa_resetseq_val[%d] 0x%x", i, + sssr_reg_info->hwa_regs.hwa_resetseq_val[i])); + } +} + +static void +dhd_dump_sssr_reg_info_v1(dhd_pub_t *dhd) +{ + sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info; + sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1; + int i, j; + uint8 num_d11cores = dhd_d11_slices_num_get(dhd); + + DHD_ERROR(("pmu_regs\n")); + DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x " + "macresreqtimer=0x%x macresreqtimer1=0x%x\n", + sssr_reg_info->pmu_regs.base_regs.pmuintmask0, + sssr_reg_info->pmu_regs.base_regs.pmuintmask1, + sssr_reg_info->pmu_regs.base_regs.resreqtimer, + sssr_reg_info->pmu_regs.base_regs.macresreqtimer, + sssr_reg_info->pmu_regs.base_regs.macresreqtimer1)); + DHD_ERROR(("chipcommon_regs\n")); + DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n", + sssr_reg_info->chipcommon_regs.base_regs.intmask, + sssr_reg_info->chipcommon_regs.base_regs.powerctrl, + sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus, + sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask)); + DHD_ERROR(("arm_regs\n")); + DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x" + " resetctrl=0x%x itopoobb=0x%x\n", + sssr_reg_info->arm_regs.base_regs.clockcontrolstatus, + sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val, + sssr_reg_info->arm_regs.wrapper_regs.resetctrl, + sssr_reg_info->arm_regs.wrapper_regs.itopoobb)); + DHD_ERROR(("pcie_regs\n")); + DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x " + "clockcontrolstatus_val=0x%x itopoobb=0x%x\n", + sssr_reg_info->pcie_regs.base_regs.ltrstate, + sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus, + sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val, + sssr_reg_info->pcie_regs.wrapper_regs.itopoobb)); + DHD_ERROR(("vasip_regs\n")); + DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n", + sssr_reg_info->vasip_regs.wrapper_regs.ioctrl, + sssr_reg_info->vasip_regs.vasip_sr_addr, + sssr_reg_info->vasip_regs.vasip_sr_size)); + + for (i = 0; i < num_d11cores; i++) { + DHD_ERROR(("mac_regs core[%d]\n", i)); + DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x " + "clockcontrolstatus_val=0x%x\n", + sssr_reg_info->mac_regs[i].base_regs.xmtaddress, + sssr_reg_info->mac_regs[i].base_regs.xmtdata, + sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus, + sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val)); + DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n", + sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl, + sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb, + sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl)); + for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) { + DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j, + sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j])); + } + DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size)); + } +} + +#endif /* DHD_PCIE_REG_ACCESS */ + +void +dhd_dump_sssr_reg_info(dhd_pub_t *dhd) +{ +#ifdef DHD_PCIE_REG_ACCESS + sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info; + sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1; + + DHD_ERROR(("************** SSSR REG INFO start version:%d ****************\n", + sssr_reg_info->version)); + switch (sssr_reg_info->version) { + case SSSR_REG_INFO_VER_3 : + dhd_dump_sssr_reg_info_v3(dhd); + break; + case SSSR_REG_INFO_VER_2 : + dhd_dump_sssr_reg_info_v2(dhd); + break; + default: + dhd_dump_sssr_reg_info_v1(dhd); + break; + } + DHD_ERROR(("************** SSSR REG INFO end ****************\n")); +#endif /* DHD_PCIE_REG_ACCESS */ +} + +int +dhd_get_sssr_reg_info(dhd_pub_t *dhd) +{ + int ret; + /* get sssr_reg_info from firmware */ + ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)dhd->sssr_reg_info, + sizeof(sssr_reg_info_cmn_t), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n", + __FUNCTION__, ret)); + return BCME_ERROR; + } + + dhd_dump_sssr_reg_info(dhd); + return BCME_OK; +} + +uint32 +dhd_get_sssr_bufsize(dhd_pub_t *dhd) +{ + int i; + uint32 sssr_bufsize = 0; + uint8 num_d11cores; + + num_d11cores = dhd_d11_slices_num_get(dhd); + + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + for (i = 0; i < num_d11cores; i++) { + sssr_bufsize += dhd->sssr_reg_info->rev2.mac_regs[i].sr_size; + } + if ((dhd->sssr_reg_info->rev2.length > + OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) && + dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) { + sssr_bufsize += 0; /* TBD */ + } + break; + case SSSR_REG_INFO_VER_1 : + for (i = 0; i < num_d11cores; i++) { + sssr_bufsize += dhd->sssr_reg_info->rev1.mac_regs[i].sr_size; + } + if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) { + sssr_bufsize += dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size; + } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t, + dig_mem_info)) && dhd->sssr_reg_info->rev1. + dig_mem_info.dig_sr_addr) { + sssr_bufsize += dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size; + } + break; + case SSSR_REG_INFO_VER_0 : + for (i = 0; i < num_d11cores; i++) { + sssr_bufsize += dhd->sssr_reg_info->rev0.mac_regs[i].sr_size; + } + if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) { + sssr_bufsize += dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size; + } + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + /* Double the size as different dumps will be saved before and after SR */ + sssr_bufsize = 2 * sssr_bufsize; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + + return sssr_bufsize; +} + +int +dhd_sssr_dump_init(dhd_pub_t *dhd) +{ + int i; + uint32 sssr_bufsize; + uint32 mempool_used = 0; + uint8 num_d11cores = 0; + bool alloc_sssr = FALSE; + uint32 sr_size = 0; + + dhd->sssr_inited = FALSE; + if (!sssr_enab) { + DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__)); + return BCME_OK; + } + + /* check if sssr mempool is allocated */ + if (dhd->sssr_mempool == NULL) { + DHD_ERROR(("%s: sssr_mempool is not allocated\n", + __FUNCTION__)); + return BCME_ERROR; + } + + /* check if sssr mempool is allocated */ + if (dhd->sssr_reg_info == NULL) { + DHD_ERROR(("%s: sssr_reg_info is not allocated\n", + __FUNCTION__)); + return BCME_ERROR; + } + + /* Get SSSR reg info */ + if (dhd_get_sssr_reg_info(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__)); + printf("DEBUG_SSSr: %s: dhd_get_sssr_reg_info failed\n", __FUNCTION__); + return BCME_ERROR; + } + + num_d11cores = dhd_d11_slices_num_get(dhd); + /* Validate structure version and length */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + if (dhd->sssr_reg_info->rev3.length != sizeof(sssr_reg_info_v3_t)) { + DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)" + "mismatch on rev2\n", __FUNCTION__, + (int)dhd->sssr_reg_info->rev3.length, + (int)sizeof(sssr_reg_info_v3_t))); + return BCME_ERROR; + } + break; + case SSSR_REG_INFO_VER_2 : + if (dhd->sssr_reg_info->rev2.length != sizeof(sssr_reg_info_v2_t)) { + DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)" + "mismatch on rev2\n", __FUNCTION__, + (int)dhd->sssr_reg_info->rev2.length, + (int)sizeof(sssr_reg_info_v2_t))); + return BCME_ERROR; + } + break; + case SSSR_REG_INFO_VER_1 : + if (dhd->sssr_reg_info->rev1.length != sizeof(sssr_reg_info_v1_t)) { + DHD_ERROR(("%s: dhd->sssr_reg_info->rev1.length (%d : %d)" + "mismatch on rev1\n", __FUNCTION__, + (int)dhd->sssr_reg_info->rev1.length, + (int)sizeof(sssr_reg_info_v1_t))); + return BCME_ERROR; + } + break; + case SSSR_REG_INFO_VER_0 : + if (dhd->sssr_reg_info->rev0.length != sizeof(sssr_reg_info_v0_t)) { + DHD_ERROR(("%s: dhd->sssr_reg_info->rev0.length (%d : %d)" + "mismatch on rev0\n", __FUNCTION__, + (int)dhd->sssr_reg_info->rev0.length, + (int)sizeof(sssr_reg_info_v0_t))); + return BCME_ERROR; + } + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* validate fifo size */ + sssr_bufsize = dhd_get_sssr_bufsize(dhd); + if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) { + DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n", + __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE)); + return BCME_ERROR; + } + + /* init all pointers to NULL */ + for (i = 0; i < num_d11cores; i++) { +#ifdef DHD_SSSR_DUMP_BEFORE_SR + dhd->sssr_d11_before[i] = NULL; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + dhd->sssr_d11_after[i] = NULL; + } + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + dhd->sssr_dig_buf_before = NULL; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + dhd->sssr_dig_buf_after = NULL; + + /* Allocate memory */ + for (i = 0; i < num_d11cores; i++) { + alloc_sssr = FALSE; + sr_size = 0; + + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + if (dhd->sssr_reg_info->rev2.mac_regs[i].sr_size) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev2.mac_regs[i].sr_size; + } + break; + case SSSR_REG_INFO_VER_1 : + if (dhd->sssr_reg_info->rev1.mac_regs[i].sr_size) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev1.mac_regs[i].sr_size; + } + break; + case SSSR_REG_INFO_VER_0 : + if (dhd->sssr_reg_info->rev0.mac_regs[i].sr_size) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev0.mac_regs[i].sr_size; + } + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + if (alloc_sssr) { +#ifdef DHD_SSSR_DUMP_BEFORE_SR + dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += sr_size; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + + dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += sr_size; + } + } + + /* Allocate dump memory for VASIP (version 0 or 1) or digital core (version 0, 1, or 2) */ + alloc_sssr = FALSE; + sr_size = 0; + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + if ((dhd->sssr_reg_info->rev2.length > + OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) && + dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size; + } + break; + case SSSR_REG_INFO_VER_1 : + if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size; + } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t, + dig_mem_info)) && dhd->sssr_reg_info->rev1. + dig_mem_info.dig_sr_addr) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size; + } + break; + case SSSR_REG_INFO_VER_0 : + if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) { + alloc_sssr = TRUE; + sr_size = dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size; + } + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + if (alloc_sssr) { + dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += sr_size; + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + /* DIG dump before suspend is not applicable. */ + dhd->sssr_dig_buf_before = NULL; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + } + + dhd->sssr_inited = TRUE; + + return BCME_OK; + +} + +void +dhd_sssr_dump_deinit(dhd_pub_t *dhd) +{ + int i; + + dhd->sssr_inited = FALSE; + /* init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11_CORES_WITH_SCAN; i++) { +#ifdef DHD_SSSR_DUMP_BEFORE_SR + dhd->sssr_d11_before[i] = NULL; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + dhd->sssr_d11_after[i] = NULL; + } +#ifdef DHD_SSSR_DUMP_BEFORE_SR + dhd->sssr_dig_buf_before = NULL; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + dhd->sssr_dig_buf_after = NULL; + + return; +} + +void +dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path) +{ + bool print_info = FALSE; + int dump_mode; + + if (!dhd || !path) { + DHD_ERROR(("%s: dhd or memdump_path is NULL\n", + __FUNCTION__)); + return; + } + + if (!dhd->sssr_dump_collected) { + /* SSSR dump is not collected */ + return; + } + + dump_mode = dhd->sssr_dump_mode; + + if (bcmstrstr(path, "core_0_before")) { + if (dhd->sssr_d11_outofreset[0] && + dump_mode == SSSR_DUMP_MODE_SSSR) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_0_after")) { + if (dhd->sssr_d11_outofreset[0]) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_1_before")) { + if (dhd->sssr_d11_outofreset[1] && + dump_mode == SSSR_DUMP_MODE_SSSR) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_1_after")) { + if (dhd->sssr_d11_outofreset[1]) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_2_before")) { + if (dhd->sssr_d11_outofreset[2] && + dump_mode == SSSR_DUMP_MODE_SSSR) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_2_after")) { + if (dhd->sssr_d11_outofreset[2]) { + print_info = TRUE; + } + } else { + print_info = TRUE; + } + + if (print_info) { + DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__, + path, FILE_NAME_HAL_TAG)); + } +} +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_SDTC_ETB_DUMP +/* + * sdtc: system debug trace controller + * etb: embedded trace buf + */ +void +dhd_sdtc_etb_init(dhd_pub_t *dhd) +{ + bcm_iov_buf_t *iov_req = NULL; + etb_addr_info_t *p_etb_addr_info = NULL; + bcm_iov_buf_t *iov_resp = NULL; + uint8 *buf = NULL; + int ret = 0; + uint16 iovlen = 0; + uint16 version = 0; + + BCM_REFERENCE(p_etb_addr_info); + dhd->sdtc_etb_inited = FALSE; + + iov_req = MALLOCZ(dhd->osh, WLC_IOCTL_SMLEN); + if (iov_req == NULL) { + DHD_ERROR(("%s: Failed to alloc buffer for iovar request\n", __FUNCTION__)); + goto exit; + } + + buf = MALLOCZ(dhd->osh, WLC_IOCTL_MAXLEN); + if (buf == NULL) { + DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__)); + goto exit; + } + + /* fill header */ + iov_req->version = WL_SDTC_IOV_VERSION; + iov_req->id = WL_SDTC_CMD_ETB_INFO; + iov_req->len = sizeof(etb_addr_info_t); + iovlen = OFFSETOF(bcm_iov_buf_t, data) + iov_req->len; + + ret = dhd_iovar(dhd, 0, "sdtc", (char *)iov_req, iovlen, + (char *)buf, WLC_IOCTL_MAXLEN, FALSE); + if (ret < 0) { + DHD_ERROR(("%s failed to get sdtc etb_info %d\n", __FUNCTION__, ret)); + goto exit; + } + + version = dtoh16(*(uint16 *)buf); + /* Check for version */ + if (version != WL_SDTC_IOV_VERSION) { + DHD_ERROR(("%s WL_SDTC_IOV_VERSION mis match\n", __FUNCTION__)); + goto exit; + } + iov_resp = (bcm_iov_buf_t *)buf; + if (iov_resp->id == iov_req->id) { + p_etb_addr_info = (etb_addr_info_t*)iov_resp->data; + dhd->etb_addr_info.version = p_etb_addr_info->version; + dhd->etb_addr_info.len = p_etb_addr_info->len; + dhd->etb_addr_info.etbinfo_addr = p_etb_addr_info->etbinfo_addr; + + DHD_ERROR(("%s etb_addr_info: ver:%d, len:%d, addr:0x%x\n", __FUNCTION__, + dhd->etb_addr_info.version, dhd->etb_addr_info.len, + dhd->etb_addr_info.etbinfo_addr)); + } else { + DHD_ERROR(("%s Unknown CMD-ID (%d) as response for request ID %d\n", + __FUNCTION__, iov_resp->id, iov_req->id)); + goto exit; + } + + /* since all the requirements for SDTC and ETB are met mark the capability as TRUE */ + dhd->sdtc_etb_inited = TRUE; + DHD_ERROR(("%s sdtc_etb_inited: %d\n", __FUNCTION__, dhd->sdtc_etb_inited)); +exit: + if (iov_req) { + MFREE(dhd->osh, iov_req, WLC_IOCTL_SMLEN); + } + if (buf) { + MFREE(dhd->osh, buf, WLC_IOCTL_MAXLEN); + } + return; +} + +void +dhd_sdtc_etb_deinit(dhd_pub_t *dhd) +{ + dhd->sdtc_etb_inited = FALSE; +} + +int +dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd) +{ + dhd->sdtc_etb_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SDTC_ETB_MEMPOOL_SIZE); + if (dhd->sdtc_etb_mempool == NULL) { + DHD_ERROR(("%s: MALLOC of sdtc_etb_mempool failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +void +dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd) +{ + if (dhd->sdtc_etb_mempool) { + MFREE(dhd->osh, dhd->sdtc_etb_mempool, DHD_SDTC_ETB_MEMPOOL_SIZE); + dhd->sdtc_etb_mempool = NULL; + } +} +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef DHD_FW_COREDUMP +void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length) +{ + if (!dhd_pub->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, + DHD_PREALLOC_MEMDUMP_RAM, length); +#else + dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length); + + if ((dhd_pub->soc_ram == NULL) && CAN_SLEEP()) { + DHD_ERROR(("%s: Try to allocate virtual memory for fw crash snap shot.\n", + __FUNCTION__)); + dhd_pub->soc_ram = (uint8*) VMALLOC(dhd_pub->osh, length); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + } + + if (dhd_pub->soc_ram == NULL) { + DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n", + __FUNCTION__)); + dhd_pub->soc_ram_length = 0; + } else { + memset(dhd_pub->soc_ram, 0, length); + dhd_pub->soc_ram_length = length; + } + + /* soc_ram free handled in dhd_{free,clear} */ + return dhd_pub->soc_ram; +} +#endif /* DHD_FW_COREDUMP */ + +/* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + +int +dhd_common_socram_dump(dhd_pub_t *dhdp) +{ +#ifdef BCMDBUS + return 0; +#else + return dhd_socram_dump(dhdp->bus); +#endif /* BCMDBUS */ +} + +int +dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) +{ + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; +#ifdef DHD_MEM_STATS + uint64 malloc_mem = 0; + uint64 total_txpath_mem = 0; + uint64 txpath_bkpq_len = 0; + uint64 txpath_bkpq_mem = 0; + uint64 total_dhd_mem = 0; +#endif /* DHD_MEM_STATS */ + + if (!dhdp || !dhdp->prot || !buf) { + return BCME_ERROR; + } + + bcm_binit(strbuf, buf, buflen); + + /* Base DHD info */ + bcm_bprintf(strbuf, "%s\n", dhd_version); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n", + dhdp->up, dhdp->txoff, dhdp->busstate); + bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n", + dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz); + bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n", + dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac)); + bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt); + + bcm_bprintf(strbuf, "dongle stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n", + dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes, + dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped); + bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n", + dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes, + dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped); + bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast); + + bcm_bprintf(strbuf, "bus stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n", + dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors); + bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n", + dhdp->tx_ctlpkts, dhdp->tx_ctlerrs); + bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n", + dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors); + bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n", + dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped); + bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n", + dhdp->rx_readahead_cnt, dhdp->tx_realloc); + bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n", + dhdp->tx_pktgetfail, dhdp->rx_pktgetfail); + bcm_bprintf(strbuf, "tx_big_packets %lu\n", + dhdp->tx_big_packets); + bcm_bprintf(strbuf, "\n"); +#ifdef DMAMAP_STATS + /* Add DMA MAP info */ + bcm_bprintf(strbuf, "DMA MAP stats: \n"); + bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n", + dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz), + dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz)); +#ifndef IOCTLRESP_USE_CONSTMEM + bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,", + dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz)); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, " + "TSBUF RX: %lu size %luK\n", + dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz), + dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz), + dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz)); + bcm_bprintf(strbuf, "Total : %luK \n", + KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz + + dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz + + dhdp->dma_stats.tsbuf_rx_sz)); +#endif /* DMAMAP_STATS */ + bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error); + /* Add any prot info */ + dhd_prot_dump(dhdp, strbuf); + bcm_bprintf(strbuf, "\n"); + + /* Add any bus info */ + dhd_bus_dump(dhdp, strbuf); +#if defined(BCM_ROUTER_DHD) && defined(HNDCTF) + /* Add ctf info */ + dhd_ctf_dump(dhdp, strbuf); +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#if defined(DHD_LB_STATS) + dhd_lb_stats_dump(dhdp, strbuf); +#endif /* DHD_LB_STATS */ + +#ifdef DHD_MEM_STATS + + malloc_mem = MALLOCED(dhdp->osh); + + txpath_bkpq_len = dhd_active_tx_flowring_bkpq_len(dhdp); + /* + * Instead of traversing the entire queue to find the skbs length, + * considering MAX_MTU_SZ as lenth of each skb. + */ + txpath_bkpq_mem = (txpath_bkpq_len* MAX_MTU_SZ); + total_txpath_mem = dhdp->txpath_mem + txpath_bkpq_mem; + + bcm_bprintf(strbuf, "\nDHD malloc memory_usage: %llubytes %lluKB\n", + malloc_mem, (malloc_mem / 1024)); + + bcm_bprintf(strbuf, "\nDHD tx-bkpq len: %llu memory_usage: %llubytes %lluKB\n", + txpath_bkpq_len, txpath_bkpq_mem, (txpath_bkpq_mem / 1024)); + bcm_bprintf(strbuf, "DHD tx-path memory_usage: %llubytes %lluKB\n", + total_txpath_mem, (total_txpath_mem / 1024)); + + total_dhd_mem = malloc_mem + total_txpath_mem; +#if defined(DHD_LB_STATS) + total_dhd_mem += dhd_lb_mem_usage(dhdp, strbuf); +#endif /* DHD_LB_STATS */ + bcm_bprintf(strbuf, "\nDHD Totoal memory_usage: %llubytes %lluKB \n", + total_dhd_mem, (total_dhd_mem / 1024)); +#endif /* DHD_MEM_STATS */ +#if defined(DHD_LB_STATS) + bcm_bprintf(strbuf, "\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n", + dhdp->lb_rxp_stop_thr_hitcnt, dhdp->lb_rxp_strt_thr_hitcnt); + bcm_bprintf(strbuf, "\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n", + dhdp->lb_rxp_napi_sched_cnt, dhdp->lb_rxp_napi_complete_cnt); +#endif /* DHD_LB_STATS */ + +#if defined(DHD_MQ) && defined(DHD_MQ_STATS) + dhd_mqstats_dump(dhdp, strbuf); +#endif + +#ifdef DHD_WET + if (dhd_get_wet_mode(dhdp)) { + bcm_bprintf(strbuf, "Wet Dump:\n"); + dhd_wet_dump(dhdp, strbuf); + } +#endif /* DHD_WET */ + + DHD_ERROR(("%s bufsize: %d free: %d\n", __FUNCTION__, buflen, strbuf->size)); + /* return remaining buffer length */ + return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size); +} + +void +dhd_dump_to_kernelog(dhd_pub_t *dhdp) +{ + char buf[512]; + + DHD_ERROR(("F/W version: %s\n", fw_version)); + bcm_bprintf_bypass = TRUE; + dhd_dump(dhdp, buf, sizeof(buf)); + bcm_bprintf_bypass = FALSE; +} + +int +dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx) +{ + wl_ioctl_t ioc; + + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len); +} + +int +dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + memset(iovbuf, 0, sizeof(iovbuf)); + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (!ret) { + *pval = ltoh32(*((uint*)iovbuf)); + } else { + DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + int lval = htol32(val); + uint len; + + len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf)); + + if (len) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx); + if (ret) { + DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +static struct ioctl2str_s { + uint32 ioctl; + char *name; +} ioctl2str_array[] = { + {WLC_UP, "UP"}, + {WLC_DOWN, "DOWN"}, + {WLC_SET_PROMISC, "SET_PROMISC"}, + {WLC_SET_INFRA, "SET_INFRA"}, + {WLC_SET_AUTH, "SET_AUTH"}, + {WLC_SET_SSID, "SET_SSID"}, + {WLC_RESTART, "RESTART"}, + {WLC_SET_CHANNEL, "SET_CHANNEL"}, + {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"}, + {WLC_SET_KEY, "SET_KEY"}, + {WLC_SCAN, "SCAN"}, + {WLC_DISASSOC, "DISASSOC"}, + {WLC_REASSOC, "REASSOC"}, + {WLC_SET_COUNTRY, "SET_COUNTRY"}, + {WLC_SET_WAKE, "SET_WAKE"}, + {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"}, + {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"}, + {WLC_SET_WSEC, "SET_WSEC"}, + {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"}, + {WLC_SET_RADAR, "SET_RADAR"}, + {0, NULL} +}; + +static char * +ioctl2str(uint32 ioctl) +{ + struct ioctl2str_s *p = ioctl2str_array; + + while (p->name != NULL) { + if (p->ioctl == ioctl) { + return p->name; + } + p++; + } + + return ""; +} + +/** + * @param ioc IO control struct, members are partially used by this function. + * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return. + * @param len Maximum number of bytes that dongle is allowed to write into 'buf'. + */ +int +dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) +{ + int ret = BCME_ERROR; + unsigned long flags; +#ifdef DUMP_IOCTL_IOV_LIST + dhd_iov_li_t *iov_li; +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef REPORT_FATAL_TIMEOUTS + wl_escan_params_t *eparams; + uint8 *buf_ptr = (uint8 *)buf; + uint16 action = 0; +#endif /* REPORT_FATAL_TIMEOUTS */ + int hostsleep_set = 0; + int hostsleep_val = 0; + + if (dhd_query_bus_erros(dhd_pub)) { + return -ENODEV; + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + DHD_OS_WAKE_LOCK(dhd_pub); + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) { + DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__)); + DHD_OS_WAKE_UNLOCK(dhd_pub); + return BCME_ERROR; + } +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef KEEPIF_ON_DEVICE_RESET + if (ioc->cmd == WLC_GET_VAR) { + dbus_config_t config; + config.general_param = 0; + if (buf) { + if (!strcmp(buf, "wowl_activate")) { + /* 1 (TRUE) after decreased by 1 */ + config.general_param = 2; + } else if (!strcmp(buf, "wowl_clear")) { + /* 0 (FALSE) after decreased by 1 */ + config.general_param = 1; + } + } + if (config.general_param) { + config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET; + config.general_param--; + dbus_set_config(dhd_pub->dbus, &config); + } + } +#endif /* KEEPIF_ON_DEVICE_RESET */ + + if (dhd_os_proto_block(dhd_pub)) + { +#ifdef DHD_LOG_DUMP + int slen, val, lval, min_len; + char *msg, tmp[64]; + + /* WLC_GET_VAR */ + if (ioc->cmd == WLC_GET_VAR && buf) { + min_len = MIN(sizeof(tmp) - 1, strlen(buf)); + memset(tmp, 0, sizeof(tmp)); + bcopy(buf, tmp, min_len); + tmp[min_len] = '\0'; + } +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_DISCONNECT_TRACE + if (WLC_DISASSOC == ioc->cmd || WLC_DOWN == ioc->cmd || + WLC_DISASSOC_MYAP == ioc->cmd) { + DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd)); + } +#endif /* HW_DISCONNECT_TRACE */ + /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */ + if (ioc->set == TRUE) { + char *pars = (char *)buf; // points at user buffer + if (ioc->cmd == WLC_SET_VAR && buf) { + DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars)); + if (ioc->len > 1 + sizeof(uint32)) { + // skip iovar name: + pars += strnlen(pars, ioc->len - 1 - sizeof(uint32)); + pars++; // skip NULL character + } + } else { + DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s", + ifidx, ioc->cmd, ioctl2str(ioc->cmd))); + } + if (pars != NULL) { + DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars)); + } else { + DHD_DNGL_IOVAR_SET((" NULL\n")); + } + } + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) { +#ifdef DHD_EFI + DHD_INFO(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); +#else + DHD_INFO(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); +#endif /* DHD_EFI */ + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd_pub) || + dhd_pub->dhd_induce_error == DHD_INDUCE_IOCTL_SUSPEND_ERROR) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state)); +#ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR + ioctl_suspend_error++; + if (ioctl_suspend_error > MAX_IOCTL_SUSPEND_ERROR) { + dhd_pub->hang_reason = HANG_REASON_IOCTL_SUSPEND_ERROR; + dhd_os_send_hang_message(dhd_pub); + ioctl_suspend_error = 0; + } +#endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */ + DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } +#ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR + ioctl_suspend_error = 0; +#endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */ + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + +#if defined(WL_WLC_SHIM) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + + wl_io_pport_t io_pport; + io_pport.dhd_pub = dhd_pub; + io_pport.ifidx = ifidx; + + ret = wl_shim_ioctl(shim, ioc, len, &io_pport); + if (ret != BCME_OK) { + DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n", + __FUNCTION__, ioc->cmd, ret)); + } + } +#else +#ifdef DUMP_IOCTL_IOV_LIST + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) { + if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) { + DHD_ERROR(("iovar dump list item allocation Failed\n")); + } else { + iov_li->cmd = ioc->cmd; + if (buf) + bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1); + dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head, + &iov_li->list); + } + } +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef REPORT_FATAL_TIMEOUTS + /* fill in the sync_id to ensure that the scan timeout is always for the + * current running escan in the FW - the wl app does not fill in an + * incrementing number for sync_id, it only fills in a random number which + * increases the chance of 2 consecutive escans having the same sync id + * This should happen here after dhd_proto_block() + * is called, so that sync_id does not + * get incremented if 2 consecutive escans are fired in quick succession + */ + if ((ioc->cmd == WLC_SET_VAR && + buf != NULL && + strcmp("escan", buf) == 0)) { + eparams = (wl_escan_params_t *) (buf_ptr + strlen("escan") + 1); + action = dtoh16(eparams->action); + if (action == WL_SCAN_ACTION_START) { + ++dhd_pub->esync_id; + /* sync id of 0 is not used for escan, + * it is used to indicate + * a normal scan timer is running, so as + * to ensure that escan abort event + * does not cancel a normal scan timeout + */ + if (dhd_pub->esync_id == 0) + ++dhd_pub->esync_id; + DHD_INFO(("%s:escan sync id set to = %u \n", + __FUNCTION__, dhd_pub->esync_id)); + eparams->sync_id = htod16(dhd_pub->esync_id); + } + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len, + &hostsleep_set, &hostsleep_val, &ret)) + goto exit; + ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); + dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret); + +#ifdef DUMP_IOCTL_IOV_LIST + if (ret == -ETIMEDOUT) { + DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n", + IOV_LIST_MAX_LEN)); + dhd_iov_li_print(&dhd_pub->dump_iovlist_head); + } +#endif /* DUMP_IOCTL_IOV_LIST */ +#endif /* defined(WL_WLC_SHIM) */ +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + if (ret == -ETIMEDOUT) { + copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc); + } +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ +#ifdef DHD_LOG_DUMP + if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) && + buf != NULL) { + if (buf) { + lval = 0; + slen = strlen(buf) + 1; + msg = (char*)buf; + if (len >= slen + sizeof(lval)) { + if (ioc->cmd == WLC_GET_VAR) { + msg = tmp; + lval = *(int*)buf; + } else { + min_len = MIN(ioc->len - slen, sizeof(int)); + bcopy((msg + slen), &lval, min_len); + } + if (!strncmp(msg, "cur_etheraddr", + strlen("cur_etheraddr"))) { + lval = 0; + } + } + DHD_IOVAR_MEM(( + "%s: cmd: %d, msg: %s val: 0x%x," + " len: %d, set: %d, txn-id: %d\n", + ioc->cmd == WLC_GET_VAR ? + "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, msg, lval, ioc->len, ioc->set, + dhd_prot_get_ioctl_trans_id(dhd_pub))); + } else { + DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n", + ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, ioc->len, ioc->set, + dhd_prot_get_ioctl_trans_id(dhd_pub))); + } + } else { + slen = ioc->len; + if (buf != NULL && slen != 0) { + if (slen >= 4) { + val = *(int*)buf; + } else if (slen >= 2) { + val = *(short*)buf; + } else { + val = *(char*)buf; + } + /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */ + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) { + DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, " + "set: %d\n", ioc->cmd, val, ioc->len, ioc->set)); + } + } else { + DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); + } + } +#endif /* DHD_LOG_DUMP */ +#if defined(OEM_ANDROID) + if (ret && dhd_pub->up) { + /* Send hang event only if dhd_open() was success */ + dhd_os_check_hang(dhd_pub, ifidx, ret); + } + + if (ret == -ETIMEDOUT && !dhd_pub->up) { + DHD_ERROR(("%s: 'resumed on timeout' error is " + "occurred before the interface does not" + " bring up\n", __FUNCTION__)); + } +#endif /* defined(OEM_ANDROID) */ + +exit: + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef REPORT_FATAL_TIMEOUTS + if ((ret == BCME_OK && ioc->cmd == WLC_SET_VAR && + buf != NULL && + strcmp("escan", buf) == 0)) { + if (action == WL_SCAN_ACTION_START) + dhd_start_scan_timer(dhd_pub, TRUE); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + dhd_os_proto_unblock(dhd_pub); + +#ifdef DETAIL_DEBUG_LOG_FOR_IOCTL + if (ret < 0) { + if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) && + buf != NULL) { + if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) { + DHD_ERROR_MEM(("%s: %s: %s, %s\n", + __FUNCTION__, ioc->cmd == WLC_GET_VAR ? + "WLC_GET_VAR" : "WLC_SET_VAR", + buf? (char *)buf:"NO MESSAGE", + ret == BCME_UNSUPPORTED ? "UNSUPPORTED" + : "NOT ASSOCIATED")); + } else { + DHD_ERROR_MEM(("%s: %s: %s, ret = %d\n", + __FUNCTION__, ioc->cmd == WLC_GET_VAR ? + "WLC_GET_VAR" : "WLC_SET_VAR", + (char *)buf, ret)); + } + } else { + if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) { + DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, %s\n", + __FUNCTION__, ioc->cmd, + ret == BCME_UNSUPPORTED ? "UNSUPPORTED" : + "NOT ASSOCIATED")); + } else { + DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, ret = %d\n", + __FUNCTION__, ioc->cmd, ret)); + } + } + } +#endif /* DETAIL_DEBUG_LOG_FOR_IOCTL */ + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus)); + + DHD_OS_WAKE_UNLOCK(dhd_pub); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef WL_MONITOR + /* Intercept monitor ioctl here, add/del monitor if */ + if (ret == BCME_OK && ioc->cmd == WLC_SET_MONITOR) { + int val = 0; + if (buf != NULL && len != 0) { + if (len >= 4) { + val = *(int*)buf; + } else if (len >= 2) { + val = *(short*)buf; + } else { + val = *(char*)buf; + } + } + dhd_set_monitor(dhd_pub, ifidx, val); + } +#endif /* WL_MONITOR */ + + return ret; +} + +uint wl_get_port_num(wl_io_pport_t *io_pport) +{ + return 0; +} + +/* Get bssidx from iovar params + * Input: dhd_pub - pointer to dhd_pub_t + * params - IOVAR params + * Output: idx - BSS index + * val - ponter to the IOVAR arguments + */ +static int +dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val) +{ + char *prefix = "bsscfg:"; + uint32 bssidx; + + if (!(strncmp(params, prefix, strlen(prefix)))) { + /* per bss setting should be prefixed with 'bsscfg:' */ + const char *p = params + strlen(prefix); + + /* Skip Name */ + while (*p != '\0') + p++; + /* consider null */ + p = p + 1; + bcopy(p, &bssidx, sizeof(uint32)); + /* Get corresponding dhd index */ + bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx)); + + if (bssidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* skip bss idx */ + p += sizeof(uint32); + *val = p; + *idx = bssidx; + } else { + DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#if defined(DHD_DEBUG) && defined(BCMDBUS) +/* USB Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + DHD_TRACE(("%s \n", __FUNCTION__)); + + return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE); + +} +#endif /* DHD_DEBUG && BCMDBUS */ + +#ifdef DHD_DEBUG +int +dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + unsigned long int_arg = 0; + char *p; + char *end_ptr = NULL; + dhd_dbg_mwli_t *mw_li; + dll_t *item, *next; + /* check if mwalloc, mwquery or mwfree was supplied arguement with space */ + p = bcmstrstr((char *)msg, " "); + if (p != NULL) { + /* space should be converted to null as separation flag for firmware */ + *p = '\0'; + /* store the argument in int_arg */ + int_arg = bcm_strtoul(p+1, &end_ptr, 10); + } + + if (!p && !strcmp(msg, "query")) { + /* lets query the list inetrnally */ + if (dll_empty(dll_head_p(&dhd->mw_list_head))) { + DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n")); + } else { + for (item = dll_head_p(&dhd->mw_list_head); + !dll_end(&dhd->mw_list_head, item); item = next) { + next = dll_next_p(item); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + DHD_ERROR(("item: \n", mw_li->id, mw_li->size)); + } + } + } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) { + int32 alloc_handle; + /* convert size into KB and append as integer */ + *((int32 *)(p+1)) = int_arg*1024; + *(p+1+sizeof(int32)) = '\0'; + + /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size + + * 1 bytes for null caracter + */ + msglen = strlen(msg) + sizeof(int32) + 1; + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) { + DHD_ERROR(("IOCTL failed for memdebug alloc\n")); + } + + /* returned allocated handle from dongle, basically address of the allocated unit */ + alloc_handle = *((int32 *)msg); + + /* add a node in the list with tuple */ + if (alloc_handle == 0) { + DHD_ERROR(("Reuqested size could not be allocated\n")); + } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) { + DHD_ERROR(("mw list item allocation Failed\n")); + } else { + mw_li->id = dhd->mw_id++; + mw_li->handle = alloc_handle; + mw_li->size = int_arg; + /* append the node in the list */ + dll_append(&dhd->mw_list_head, &mw_li->list); + } + } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) { + /* inform dongle to free wasted chunk */ + int handle = 0; + int size = 0; + for (item = dll_head_p(&dhd->mw_list_head); + !dll_end(&dhd->mw_list_head, item); item = next) { + next = dll_next_p(item); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + + if (mw_li->id == (int)int_arg) { + handle = mw_li->handle; + size = mw_li->size; + dll_delete(item); + MFREE(dhd->osh, mw_li, sizeof(*mw_li)); + if (dll_empty(dll_head_p(&dhd->mw_list_head))) { + /* reset the id */ + dhd->mw_id = 0; + } + } + } + if (handle) { + int len; + /* append the free handle and the chunk size in first 8 bytes + * after the command and null character + */ + *((int32 *)(p+1)) = handle; + *((int32 *)((p+1)+sizeof(int32))) = size; + /* append null as terminator */ + *(p+1+2*sizeof(int32)) = '\0'; + /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size + * + 1 bytes for null caracter + */ + len = strlen(msg) + 2*sizeof(int32) + 1; + /* send iovar to free the chunk */ + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) { + DHD_ERROR(("IOCTL failed for memdebug free\n")); + } + } else { + DHD_ERROR(("specified id does not exist\n")); + } + } else { + /* for all the wrong argument formats */ + return BCME_BADARG; + } + return 0; +} +extern void +dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head) +{ + dll_t *item; + dhd_dbg_mwli_t *mw_li; + while (!(dll_empty(list_head))) { + item = dll_head_p(list_head); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + dll_delete(item); + MFREE(dhd->osh, mw_li, sizeof(*mw_li)); + } +} +#ifdef BCMPCIE +int +dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen) +{ + flow_ring_table_t *flow_ring_table; + char *cmd; + char *end_ptr = NULL; + uint8 prio; + uint16 flowid; + int i; + int ret = 0; + cmd = bcmstrstr(msg, " "); + BCM_REFERENCE(prio); + if (cmd != NULL) { + /* in order to use string operations append null */ + *cmd = '\0'; + } else { + DHD_ERROR(("missing: create/delete args\n")); + return BCME_ERROR; + } + if (cmd && !strcmp(msg, "create")) { + /* extract <"source address", "destination address", "priority"> */ + uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN]; + BCM_REFERENCE(sa); + BCM_REFERENCE(da); + msg = msg + strlen("create") + 1; + /* fill ethernet source address */ + for (i = 0; i < ETHER_ADDR_LEN; i++) { + sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16); + if (*end_ptr == ':') { + msg = (end_ptr + 1); + } else if (i != 5) { + DHD_ERROR(("not a valid source mac addr\n")); + return BCME_ERROR; + } + } + if (*end_ptr != ' ') { + DHD_ERROR(("missing: destiantion mac id\n")); + return BCME_ERROR; + } else { + /* skip space */ + msg = end_ptr + 1; + } + /* fill ethernet destination address */ + for (i = 0; i < ETHER_ADDR_LEN; i++) { + da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16); + if (*end_ptr == ':') { + msg = (end_ptr + 1); + } else if (i != 5) { + DHD_ERROR(("not a valid destination mac addr\n")); + return BCME_ERROR; + } + } + if (*end_ptr != ' ') { + DHD_ERROR(("missing: priority\n")); + return BCME_ERROR; + } else { + msg = end_ptr + 1; + } + /* parse priority */ + prio = (uint8)bcm_strtoul(msg, &end_ptr, 10); + if (prio > MAXPRIO) { + DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n", + __FUNCTION__)); + return BCME_ERROR; + } + + if (*end_ptr != '\0') { + DHD_ERROR(("msg not truncated with NULL character\n")); + return BCME_ERROR; + } + ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret)); + return BCME_ERROR; + } + return BCME_OK; + + } else if (cmd && !strcmp(msg, "delete")) { + msg = msg + strlen("delete") + 1; + /* parse flowid */ + flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10); + if (*end_ptr != '\0') { + DHD_ERROR(("msg not truncated with NULL character\n")); + return BCME_ERROR; + } + + /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */ + if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK) + { + DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid)); + return BCME_ERROR; + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]); + if (ret != BCME_OK) { + DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret)); + return BCME_ERROR; + } + return BCME_OK; + } + DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__)); + return BCME_ERROR; +} +#endif /* BCMPCIE */ +#endif /* DHD_DEBUG */ + +static int +dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, uint len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + uint32 dhd_ver_len, bus_api_rev_len; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_VERSION): + /* Need to have checked buffer length */ + dhd_ver_len = sizeof(dhd_version) - 1; + bus_api_rev_len = strlen(bus_api_revision); + if (len > dhd_ver_len + bus_api_rev_len) { + bcmerror = memcpy_s((char *)arg, len, dhd_version, dhd_ver_len); + if (bcmerror != BCME_OK) { + break; + } + bcmerror = memcpy_s((char *)arg + dhd_ver_len, len - dhd_ver_len, + bus_api_revision, bus_api_rev_len); + if (bcmerror != BCME_OK) { + break; + } + *((char *)arg + dhd_ver_len + bus_api_rev_len) = '\0'; + } + break; + + case IOV_GVAL(IOV_WLMSGLEVEL): + printf("android_msg_level=0x%x\n", android_msg_level); + printf("config_msg_level=0x%x\n", config_msg_level); +#if defined(WL_WIRELESS_EXT) + int_val = (int32)iw_msg_level; + bcopy(&int_val, arg, val_size); + printf("iw_msg_level=0x%x\n", iw_msg_level); +#endif +#ifdef WL_CFG80211 + int_val = (int32)wl_dbg_level; + bcopy(&int_val, arg, val_size); + printf("cfg_msg_level=0x%x\n", wl_dbg_level); +#endif + break; + + case IOV_SVAL(IOV_WLMSGLEVEL): + if (int_val & DHD_ANDROID_VAL) { + android_msg_level = (uint)(int_val & 0xFFFF); + printf("android_msg_level=0x%x\n", android_msg_level); + } + if (int_val & DHD_CONFIG_VAL) { + config_msg_level = (uint)(int_val & 0xFFFF); + printf("config_msg_level=0x%x\n", config_msg_level); + } +#if defined(WL_WIRELESS_EXT) + if (int_val & DHD_IW_VAL) { + iw_msg_level = (uint)(int_val & 0xFFFF); + printf("iw_msg_level=0x%x\n", iw_msg_level); + } +#endif +#ifdef WL_CFG80211 + if (int_val & DHD_CFG_VAL) { + wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF)); + } +#endif + break; + + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)dhd_msg_level; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + dhd_msg_level = int_val; + break; + + case IOV_GVAL(IOV_BCMERRORSTR): + bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN); + ((char *)arg)[BCME_STRLEN - 1] = 0x00; + break; + + case IOV_GVAL(IOV_BCMERROR): + int_val = (int32)dhd_pub->bcmerror; + bcopy(&int_val, arg, val_size); + break; + +#ifndef BCMDBUS + case IOV_GVAL(IOV_WDTICK): + int_val = (int32)dhd_watchdog_ms; + bcopy(&int_val, arg, val_size); + break; +#endif /* !BCMDBUS */ + + case IOV_SVAL(IOV_WDTICK): + if (!dhd_pub->up) { + bcmerror = BCME_NOTUP; + break; + } + + dhd_watchdog_ms = (uint)int_val; + + dhd_os_wd_timer(dhd_pub, (uint)int_val); + break; + + case IOV_GVAL(IOV_DUMP): + if (dhd_dump(dhd_pub, arg, len) <= 0) + bcmerror = BCME_ERROR; + else + bcmerror = BCME_OK; + break; + +#ifndef BCMDBUS + case IOV_GVAL(IOV_DCONSOLE_POLL): + int_val = (int32)dhd_pub->dhd_console_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DCONSOLE_POLL): + dhd_pub->dhd_console_ms = (uint)int_val; + break; + +#if defined(DHD_DEBUG) + case IOV_SVAL(IOV_CONS): + if (len > 0) { +#ifdef CONSOLE_DPC + bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1); +#else + bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); +#endif + } + break; +#endif /* DHD_DEBUG */ +#endif /* !BCMDBUS */ + + case IOV_SVAL(IOV_CLEARCOUNTS): + dhd_pub->tx_packets = dhd_pub->rx_packets = 0; + dhd_pub->tx_errors = dhd_pub->rx_errors = 0; + dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0; + dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; + dhd_pub->tx_dropped = 0; + dhd_pub->rx_dropped = 0; + dhd_pub->tx_pktgetfail = 0; + dhd_pub->rx_pktgetfail = 0; + dhd_pub->rx_readahead_cnt = 0; + dhd_pub->tx_realloc = 0; + dhd_pub->wd_dpc_sched = 0; + dhd_pub->tx_big_packets = 0; + memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats)); + dhd_bus_clearcounts(dhd_pub); +#ifdef PROP_TXSTATUS + /* clear proptxstatus related counters */ + dhd_wlfc_clear_counts(dhd_pub); +#endif /* PROP_TXSTATUS */ +#if defined(DHD_LB_STATS) + DHD_LB_STATS_RESET(dhd_pub); +#endif /* DHD_LB_STATS */ + break; + +#ifdef BCMPERFSTATS + case IOV_GVAL(IOV_LOGDUMP): { + bcmdumplog((char*)arg, len); + break; + } + + case IOV_SVAL(IOV_LOGCAL): { + bcmlog("Starting OSL_DELAY (%d usecs)", (uint)int_val, 0); + OSL_DELAY((uint)int_val); + bcmlog("Finished OSL_DELAY (%d usecs)", (uint)int_val, 0); + break; + } + + case IOV_SVAL(IOV_LOGSTAMP): { + int int_val2; + + if (plen >= 2 * sizeof(int)) { + bcopy((char *)params + sizeof(int_val), &int_val2, sizeof(int_val2)); + bcmlog("User message %d %d", (uint)int_val, (uint)int_val2); + } else if (plen >= sizeof(int)) { + bcmlog("User message %d", (uint)int_val, 0); + } else { + bcmlog("User message", 0, 0); + } + break; + } +#endif /* BCMPERFSTATS */ + + case IOV_GVAL(IOV_IOCTLTIMEOUT): { + int_val = (int32)dhd_os_get_ioctl_resp_timeout(); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_IOCTLTIMEOUT): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_os_set_ioctl_resp_timeout((unsigned int)int_val); + break; + } + +#ifdef PROP_TXSTATUS + case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + int_val = wlfc_enab ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + + /* wlfc is already set as desired */ + if (wlfc_enab == (int_val == 0 ? FALSE : TRUE)) + goto exit; + + if (int_val == TRUE && disable_proptx) { + disable_proptx = 0; + } + + if (int_val == TRUE) + bcmerror = dhd_wlfc_init(dhd_pub); + else + bcmerror = dhd_wlfc_deinit(dhd_pub); + + break; + } + case IOV_GVAL(IOV_PROPTXSTATUS_MODE): + bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODE): + dhd_wlfc_set_mode(dhd_pub, int_val); + break; +#ifdef QMONITOR + case IOV_GVAL(IOV_QMON_TIME_THRES): { + int_val = dhd_qmon_thres(dhd_pub, FALSE, 0); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_QMON_TIME_THRES): { + dhd_qmon_thres(dhd_pub, TRUE, int_val); + break; + } + + case IOV_GVAL(IOV_QMON_TIME_PERCENT): { + int_val = dhd_qmon_getpercent(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + } +#endif /* QMONITOR */ + + case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + dhd_wlfc_set_module_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + dhd_wlfc_set_credit_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val); + break; + +#endif /* PROP_TXSTATUS */ + + case IOV_GVAL(IOV_BUS_TYPE): + /* The dhd application queries the driver to check if its usb or sdio. */ +#ifdef BCMDBUS + int_val = BUS_TYPE_USB; +#endif +#ifdef BCMSDIO + int_val = BUS_TYPE_SDIO; +#endif +#ifdef PCIE_FULL_DONGLE + int_val = BUS_TYPE_PCIE; +#endif + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CHANGEMTU): + int_val &= 0xffff; + bcmerror = dhd_change_mtu(dhd_pub, int_val, 0); + break; + + case IOV_GVAL(IOV_HOSTREORDER_FLOWS): + { + uint i = 0; + uint8 *ptr = (uint8 *)arg; + uint8 count = 0; + + ptr++; + for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) { + if (dhd_pub->reorder_bufs[i] != NULL) { + *ptr = dhd_pub->reorder_bufs[i]->flow_id; + ptr++; + count++; + } + } + ptr = (uint8 *)arg; + *ptr = count; + break; + } +#ifdef DHDTCPACK_SUPPRESS + case IOV_GVAL(IOV_TCPACK_SUPPRESS): { + int_val = (uint32)dhd_pub->tcpack_sup_mode; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_TCPACK_SUPPRESS): { + bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val); + break; + } +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + case IOV_GVAL(IOV_WMF_BSS_ENAB): { + uint32 bssidx; + dhd_wmf_t *wmf; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + wmf = dhd_wmf_conf(dhd_pub, bssidx); + int_val = wmf->wmf_enable ? 1 :0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_WMF_BSS_ENAB): { + /* Enable/Disable WMF */ + uint32 bssidx; + dhd_wmf_t *wmf; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + wmf = dhd_wmf_conf(dhd_pub, bssidx); + if (wmf->wmf_enable == int_val) + break; + if (int_val) { + /* Enable WMF */ + if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) { + DHD_ERROR(("%s: Error in creating WMF instance\n", + __FUNCTION__)); + break; + } + if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) { + DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__)); + break; + } + wmf->wmf_enable = TRUE; + } else { + /* Disable WMF */ + wmf->wmf_enable = FALSE; + dhd_wmf_stop(dhd_pub, bssidx); + dhd_wmf_instance_del(dhd_pub, bssidx); + } + break; + } + case IOV_GVAL(IOV_WMF_UCAST_IGMP): + int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_IGMP): + if (dhd_pub->wmf_ucast_igmp == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_igmp = int_val; + else + bcmerror = BCME_RANGE; + break; + case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP): + int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP): + dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val); + break; + +#ifdef WL_IGMP_UCQUERY + case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY): + int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY): + if (dhd_pub->wmf_ucast_igmp_query == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_igmp_query = int_val; + else + bcmerror = BCME_RANGE; + break; +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + case IOV_GVAL(IOV_WMF_UCAST_UPNP): + int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_UPNP): + if (dhd_pub->wmf_ucast_upnp == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_upnp = int_val; + else + bcmerror = BCME_RANGE; + break; +#endif /* DHD_UCAST_UPNP */ + + case IOV_GVAL(IOV_WMF_PSTA_DISABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_WMF_PSTA_DISABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val); + break; + } +#endif /* DHD_WMF */ + +#if defined(BCM_ROUTER_DHD) + case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): { + trf_mgmt_filter_list_t *trf_mgmt_filter_list = + (trf_mgmt_filter_list_t *)(arg); + bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len); + } + break; +#endif /* BCM_ROUTER_DHD */ + +#ifdef DHD_L2_FILTER + case IOV_GVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_PING): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_block_ping_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_BLOCK_PING): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_PROXY_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_parp_status(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROXY_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + bcopy(val, &int_val, sizeof(int_val)); + + /* Issue a iovar request to WL to update the proxy arp capability bit + * in the Extended Capability IE of beacons/probe responses. + */ + bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val), + NULL, 0, TRUE); + if (bcmerror == BCME_OK) { + dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0); + } + break; + } + case IOV_GVAL(IOV_GRAT_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_grat_arp_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_GRAT_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_TDLS): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_block_tdls_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_BLOCK_TDLS): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } +#endif /* DHD_L2_FILTER */ + case IOV_SVAL(IOV_DHD_IE): { + uint32 bssidx; + const char *val; +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + uint8 ie_type; + bcm_tlv_t *qos_map_ie = NULL; + ie_setbuf_t *ie_getbufp = (ie_setbuf_t *)(arg+4); + ie_type = ie_getbufp->ie_buffer.ie_list[0].ie_data.id; +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + qos_map_ie = (bcm_tlv_t *)(&(ie_getbufp->ie_buffer.ie_list[0].ie_data)); + if (qos_map_ie != NULL && (ie_type == DOT11_MNG_QOS_MAP_ID)) { + bcmerror = dhd_set_qosmap_up_table(dhd_pub, bssidx, qos_map_ie); + } +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + break; + } + case IOV_GVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_ap_isolate(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_ap_isolate(dhd_pub, bssidx, int_val); + break; + } +#ifdef DHD_PSTA + case IOV_GVAL(IOV_PSTA): { + int_val = dhd_get_psta_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PSTA): { + if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) { + dhd_set_psta_mode(dhd_pub, int_val); + } else { + bcmerror = BCME_RANGE; + } + break; + } +#endif /* DHD_PSTA */ +#ifdef DHD_WET + case IOV_GVAL(IOV_WET): + int_val = dhd_get_wet_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WET): + if (int_val == 0 || int_val == 1) { + dhd_set_wet_mode(dhd_pub, int_val); + /* Delete the WET DB when disabled */ + if (!int_val) { + dhd_wet_sta_delete_list(dhd_pub); + } + } else { + bcmerror = BCME_RANGE; + } + break; + case IOV_SVAL(IOV_WET_HOST_IPV4): + dhd_set_wet_host_ipv4(dhd_pub, params, plen); + break; + case IOV_SVAL(IOV_WET_HOST_MAC): + dhd_set_wet_host_mac(dhd_pub, params, plen); + break; +#endif /* DHD_WET */ +#ifdef DHD_MCAST_REGEN + case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val); + break; + } +#endif /* DHD_MCAST_REGEN */ + + case IOV_GVAL(IOV_CFG80211_OPMODE): { + int_val = (int32)dhd_pub->op_mode; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CFG80211_OPMODE): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_pub->op_mode = int_val; + break; + } + + case IOV_GVAL(IOV_ASSERT_TYPE): + int_val = g_assert_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ASSERT_TYPE): + g_assert_type = (uint32)int_val; + break; + +#if defined(NDIS) + case IOV_GVAL(IOV_WAKEIND): + dhd_os_wakeind(dhd_pub, &int_val); + bcopy(&int_val, arg, val_size); + break; +#endif /* NDIS */ + +#if !defined(NDIS) && !defined(BCM_ROUTER_DHD) + case IOV_GVAL(IOV_LMTEST): { + *(uint32 *)arg = (uint32)lmtest; + break; + } + + case IOV_SVAL(IOV_LMTEST): { + uint32 val = *(uint32 *)arg; + if (val > 50) + bcmerror = BCME_BADARG; + else { + lmtest = (uint)val; + DHD_ERROR(("%s: lmtest %s\n", + __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON")); + } + break; + } +#endif /* !NDIS && !BCM_ROUTER_DHD */ +#ifdef BCMDBG + case IOV_GVAL(IOV_MACDBG_PD11REGS): + bcmerror = dhd_macdbg_pd11regs(dhd_pub, params, plen, arg, len); + break; + case IOV_GVAL(IOV_MACDBG_REGLIST): + bcmerror = dhd_macdbg_reglist(dhd_pub, arg, len); + break; + case IOV_GVAL(IOV_MACDBG_PSVMPMEMS): + bcmerror = dhd_macdbg_psvmpmems(dhd_pub, params, plen, arg, len); + break; +#endif /* BCMDBG */ + +#ifdef SHOW_LOGTRACE + case IOV_GVAL(IOV_DUMP_TRACE_LOG): { + trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg; + dhd_dbg_ring_t *dbg_verbose_ring = NULL; + + dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID); + if (dbg_verbose_ring == NULL) { + DHD_ERROR(("dbg_verbose_ring is NULL\n")); + bcmerror = BCME_UNSUPPORTED; + break; + } + + if (trace_buf_info != NULL) { + bzero(trace_buf_info, sizeof(trace_buf_info_t)); + dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info); + } else { + DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__)); + bcmerror = BCME_NOMEM; + } + break; + } +#endif /* SHOW_LOGTRACE */ +#ifdef BTLOG + case IOV_GVAL(IOV_DUMP_BT_LOG): { + bt_log_buf_info_t *bt_log_buf_info = (bt_log_buf_info_t *)arg; + uint32 rlen; + + rlen = dhd_dbg_pull_single_from_ring(dhd_pub, BT_LOG_RING_ID, bt_log_buf_info->buf, + BT_LOG_BUF_MAX_SIZE, TRUE); + bt_log_buf_info->size = rlen; + bt_log_buf_info->availability = BT_LOG_NEXT_BUF_NOT_AVAIL; + if (rlen == 0) { + bt_log_buf_info->availability = BT_LOG_BUF_NOT_AVAILABLE; + } else { + dhd_dbg_ring_status_t ring_status; + dhd_dbg_get_ring_status(dhd_pub, BT_LOG_RING_ID, &ring_status); + if (ring_status.written_bytes != ring_status.read_bytes) { + bt_log_buf_info->availability = BT_LOG_NEXT_BUF_AVAIL; + } + } + break; + } + case IOV_GVAL(IOV_BTLOG): + { + uint32 btlog_val = dhd_pub->bt_logging_enabled ? 1 : 0; + bcopy(&btlog_val, arg, val_size); + } + break; + case IOV_SVAL(IOV_BTLOG): + { + if (dhd_pub->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + dhd_pub->bt_logging_enabled = TRUE; + else + dhd_pub->bt_logging_enabled = FALSE; + } + break; + +#endif /* BTLOG */ +#ifdef SNAPSHOT_UPLOAD + case IOV_SVAL(IOV_BT_MEM_DUMP): { + dhd_prot_send_snapshot_request(dhd_pub, SNAPSHOT_TYPE_BT, int_val); + break; + } + case IOV_GVAL(IOV_BT_UPLOAD): { + int status; + bt_mem_req_t req; + bt_log_buf_info_t *mem_info = (bt_log_buf_info_t *)arg; + uint32 size; + bool is_more; + + memcpy(&req, params, sizeof(req)); + + status = dhd_prot_get_snapshot(dhd_pub, SNAPSHOT_TYPE_BT, req.offset, + req.buf_size, mem_info->buf, &size, &is_more); + if (status == BCME_OK) { + mem_info->size = size; + mem_info->availability = is_more ? + BT_LOG_NEXT_BUF_AVAIL : BT_LOG_NEXT_BUF_NOT_AVAIL; + } else if (status == BCME_NOTREADY) { + mem_info->size = 0; + mem_info->availability = BT_LOG_NOT_READY; + } else { + mem_info->size = 0; + mem_info->availability = BT_LOG_BUF_NOT_AVAILABLE; + } + break; + } +#endif /* SNAPSHOT_UPLOAD */ +#ifdef REPORT_FATAL_TIMEOUTS + case IOV_GVAL(IOV_SCAN_TO): { + dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_SCAN_TO): { + dhd_set_scan_to_val(dhd_pub, (uint32)int_val); + break; + } + case IOV_GVAL(IOV_JOIN_TO): { + dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_JOIN_TO): { + dhd_set_join_to_val(dhd_pub, (uint32)int_val); + break; + } + case IOV_GVAL(IOV_CMD_TO): { + dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_CMD_TO): { + dhd_set_cmd_to_val(dhd_pub, (uint32)int_val); + break; + } + case IOV_GVAL(IOV_OQS_TO): { + dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_OQS_TO): { + dhd_set_bus_to_val(dhd_pub, (uint32)int_val); + break; + } +#endif /* REPORT_FATAL_TIMEOUTS */ + case IOV_GVAL(IOV_DONGLE_TRAP_TYPE): + if (dhd_pub->dongle_trap_occured) + int_val = ltoh32(dhd_pub->last_trap_info.type); + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DONGLE_TRAP_INFO): + { + struct bcmstrbuf strbuf; + bcm_binit(&strbuf, arg, len); + if (dhd_pub->dongle_trap_occured == FALSE) { + bcm_bprintf(&strbuf, "no trap recorded\n"); + break; + } +#ifndef BCMDBUS + dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf); +#endif /* BCMDBUS */ + break; + } +#ifdef DHD_DEBUG +#if defined(BCMSDIO) || defined(BCMPCIE) + + case IOV_GVAL(IOV_BPADDR): + { + sdreg_t sdreg; + uint32 addr, size; + + memcpy(&sdreg, params, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size, + (uint *)&int_val, TRUE); + + memcpy(arg, &int_val, sizeof(int32)); + + break; + } + + case IOV_SVAL(IOV_BPADDR): + { + sdreg_t sdreg; + uint32 addr, size; + + memcpy(&sdreg, params, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size, + (uint *)&sdreg.value, + FALSE); + + break; + } +#endif /* BCMSDIO || BCMPCIE */ +#ifdef BCMPCIE + case IOV_SVAL(IOV_FLOW_RING_DEBUG): + { + bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len); + break; + } +#endif /* BCMPCIE */ + case IOV_SVAL(IOV_MEM_DEBUG): + if (len > 0) { + bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1); + } + break; +#endif /* DHD_DEBUG */ +#if defined(DHD_LOG_DUMP) +#if defined(DHD_EFI) + case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE): + { + int_val = dhd_pub->log_capture_enable; + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE): + { + dhd_pub->log_capture_enable = (uint8)int_val; + break; + } +#endif /* DHD_EFI */ + case IOV_GVAL(IOV_LOG_DUMP): + { + dhd_prot_debug_info_print(dhd_pub); + dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT); + break; + } +#endif /* DHD_LOG_DUMP */ + + case IOV_GVAL(IOV_TPUT_TEST): + { + tput_test_t *tput_data = NULL; + if (params && plen >= sizeof(tput_test_t)) { + tput_data = (tput_test_t *)params; + bcmerror = dhd_tput_test(dhd_pub, tput_data); + } else { + DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__)); + bcmerror = BCME_BADARG; + } + break; + } +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + case IOV_SVAL(IOV_PKT_LATENCY): + dhd_pub->pkt_latency = (uint32)int_val; + break; + case IOV_GVAL(IOV_PKT_LATENCY): + int_val = (int32)dhd_pub->pkt_latency; + bcopy(&int_val, arg, val_size); + break; +#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */ + case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT): + { + if (dhd_pub->debug_buf_dest_support) { + debug_buf_dest_stat_t *debug_buf_dest_stat = + (debug_buf_dest_stat_t *)arg; + memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat, + sizeof(dhd_pub->debug_buf_dest_stat)); + } else { + bcmerror = BCME_DISABLED; + } + break; + } + +#ifdef DHD_PKTTS + case IOV_GVAL(IOV_PKTTS_ENAB): { + int_val = dhd_get_pktts_enab(dhd_pub); + (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_PKTTS_ENAB): { + dhd_set_pktts_enab(dhd_pub, !!int_val); + break; + } + + case IOV_GVAL(IOV_PKTTS_FLOW): { + bcmerror = dhd_get_pktts_flow(dhd_pub, arg, len); + break; + } + case IOV_SVAL(IOV_PKTTS_FLOW): { + bcmerror = dhd_set_pktts_flow(dhd_pub, params, plen); + break; + } +#endif /* DHD_PKTTS */ + +#if defined(DHD_EFI) + case IOV_SVAL(IOV_INTR_POLL): + bcmerror = dhd_intr_poll(dhd_pub->bus, arg, len, TRUE); + break; + + case IOV_GVAL(IOV_INTR_POLL): + bcmerror = dhd_intr_poll(dhd_pub->bus, params, plen, FALSE); + break; +#endif /* DHD_EFI */ + +#if defined(DHD_SSSR_DUMP) + case IOV_GVAL(IOV_FIS_TRIGGER): + bcmerror = dhd_bus_fis_trigger(dhd_pub); + + if (bcmerror == BCME_OK) { + bcmerror = dhd_bus_fis_dump(dhd_pub); + } + + int_val = bcmerror; + bcopy(&int_val, arg, val_size); + break; +#endif /* defined(DHD_SSSR_DUMP) */ + +#ifdef DHD_DEBUG + case IOV_SVAL(IOV_INDUCE_ERROR): { + if (int_val >= DHD_INDUCE_ERROR_MAX) { + DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val)); + } else { + dhd_pub->dhd_induce_error = (uint16)int_val; +#ifdef BCMPCIE + if (dhd_pub->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) { + dhdpcie_induce_cbp_hang(dhd_pub); + } +#endif /* BCMPCIE */ + } + break; + } +#endif /* DHD_DEBUG */ +#ifdef WL_IFACE_MGMT_CONF +#ifdef WL_CFG80211 +#ifdef WL_NANP2P + case IOV_GVAL(IOV_CONC_DISC): { + int_val = wl_cfg80211_get_iface_conc_disc( + dhd_linux_get_primary_netdev(dhd_pub)); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CONC_DISC): { + bcmerror = wl_cfg80211_set_iface_conc_disc( + dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val); + break; + } +#endif /* WL_NANP2P */ +#ifdef WL_IFACE_MGMT + case IOV_GVAL(IOV_IFACE_POLICY): { + int_val = wl_cfg80211_get_iface_policy( + dhd_linux_get_primary_netdev(dhd_pub)); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_IFACE_POLICY): { + bcmerror = wl_cfg80211_set_iface_policy( + dhd_linux_get_primary_netdev(dhd_pub), + arg, len); + break; + } +#endif /* WL_IFACE_MGMT */ +#endif /* WL_CFG80211 */ +#endif /* WL_IFACE_MGMT_CONF */ +#ifdef RTT_GEOFENCE_CONT +#if defined (RTT_SUPPORT) && defined (WL_NAN) + case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): { + bool enable = 0; + dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable); + int_val = enable ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): { + bool enable = *(bool *)arg; + dhd_rtt_set_geofence_cont_ind(dhd_pub, enable); + break; + } +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_CONT */ + case IOV_GVAL(IOV_FW_VBS): { + *(uint32 *)arg = (uint32)dhd_dbg_get_fwverbose(dhd_pub); + break; + } + + case IOV_SVAL(IOV_FW_VBS): { + if (int_val < 0) { + int_val = 0; + } + dhd_dbg_set_fwverbose(dhd_pub, (uint32)int_val); + break; + } + +#ifdef DHD_TX_PROFILE + case IOV_SVAL(IOV_TX_PROFILE_TAG): + { + /* note: under the current implementation only one type of packet may be + * tagged per profile + */ + const dhd_tx_profile_protocol_t *protocol = NULL; + /* for example, we might have a profile of profile_index 6, but at + * offset 2 from dhd_pub->protocol_filters. + */ + uint8 offset; + + if (params == NULL) { + bcmerror = BCME_ERROR; + break; + } + + protocol = (dhd_tx_profile_protocol_t *)params; + + /* validate */ + if (protocol->version != DHD_TX_PROFILE_VERSION) { + bcmerror = BCME_VERSION; + break; + } + if (protocol->profile_index > DHD_MAX_PROFILE_INDEX) { + DHD_ERROR(("%s:\tprofile index must be between 0 and %d\n", + __FUNCTION__, DHD_MAX_PROFILE_INDEX)); + bcmerror = BCME_RANGE; + break; + } + if (protocol->layer != DHD_TX_PROFILE_DATA_LINK_LAYER && protocol->layer + != DHD_TX_PROFILE_NETWORK_LAYER) { + DHD_ERROR(("%s:\tlayer must be %d or %d\n", __FUNCTION__, + DHD_TX_PROFILE_DATA_LINK_LAYER, + DHD_TX_PROFILE_NETWORK_LAYER)); + bcmerror = BCME_BADARG; + break; + } + if (protocol->protocol_number > __UINT16_MAX__) { + DHD_ERROR(("%s:\tprotocol number must be <= %d\n", __FUNCTION__, + __UINT16_MAX__)); + bcmerror = BCME_BADLEN; + break; + } + + /* find the dhd_tx_profile_protocol_t */ + for (offset = 0; offset < dhd_pub->num_profiles; offset++) { + if (dhd_pub->protocol_filters[offset].profile_index == + protocol->profile_index) { + break; + } + } + + if (offset >= DHD_MAX_PROFILES) { +#if DHD_MAX_PROFILES > 1 + DHD_ERROR(("%s:\tonly %d profiles supported at present\n", + __FUNCTION__, DHD_MAX_PROFILES)); +#else /* DHD_MAX_PROFILES > 1 */ + DHD_ERROR(("%s:\tonly %d profile supported at present\n", + __FUNCTION__, DHD_MAX_PROFILES)); + DHD_ERROR(("%s:\tthere is a profile of index %d\n", __FUNCTION__, + dhd_pub->protocol_filters->profile_index)); +#endif /* DHD_MAX_PROFILES > 1 */ + bcmerror = BCME_NOMEM; + break; + } + + /* memory already allocated in dhd_attach; just assign the value */ + dhd_pub->protocol_filters[offset] = *protocol; + + if (offset >= dhd_pub->num_profiles) { + dhd_pub->num_profiles = offset + 1; + } + + break; + } + + case IOV_SVAL(IOV_TX_PROFILE_ENABLE): + dhd_pub->tx_profile_enab = int_val ? TRUE : FALSE; + break; + + case IOV_GVAL(IOV_TX_PROFILE_ENABLE): + int_val = dhd_pub->tx_profile_enab; + bcmerror = memcpy_s(arg, val_size, &int_val, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_TX_PROFILE_DUMP): + { + const dhd_tx_profile_protocol_t *protocol = NULL; + uint8 offset; + char *format = "%s:\ttx_profile %s: %d\n"; + + for (offset = 0; offset < dhd_pub->num_profiles; offset++) { + if (dhd_pub->protocol_filters[offset].profile_index == int_val) { + protocol = &(dhd_pub->protocol_filters[offset]); + break; + } + } + + if (protocol == NULL) { + DHD_ERROR(("%s:\tno profile with index %d\n", __FUNCTION__, + int_val)); + bcmerror = BCME_ERROR; + break; + } + + printf(format, __FUNCTION__, "profile_index", protocol->profile_index); + printf(format, __FUNCTION__, "layer", protocol->layer); + printf(format, __FUNCTION__, "protocol_number", protocol->protocol_number); + printf(format, __FUNCTION__, "src_port", protocol->src_port); + printf(format, __FUNCTION__, "dest_port", protocol->dest_port); + + break; + } +#endif /* defined(DHD_TX_PROFILE) */ + + case IOV_GVAL(IOV_CHECK_TRAP_ROT): { + int_val = dhd_pub->check_trap_rot? 1 : 0; + (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CHECK_TRAP_ROT): { + dhd_pub->check_trap_rot = *(bool *)arg; + break; + } + +#if defined(DHD_AWDL) + case IOV_SVAL(IOV_AWDL_LLC_ENABLE): { + bool bval = *(bool *)arg; + if (bval != 0 && bval != 1) + bcmerror = BCME_ERROR; + else + dhd_pub->awdl_llc_enabled = bval; + break; + } + case IOV_GVAL(IOV_AWDL_LLC_ENABLE): + int_val = dhd_pub->awdl_llc_enabled; + (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val)); + break; +#endif +#ifdef WLEASYMESH + case IOV_SVAL(IOV_1905_AL_UCAST): { + uint32 bssidx; + const char *val; + uint8 ea[6] = {0}; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + bcopy(val, ea, ETHER_ADDR_LEN); + printf("IOV_1905_AL_UCAST:" MACDBG "\n", MAC2STRDBG(ea)); + bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, FALSE); + break; + } + case IOV_GVAL(IOV_1905_AL_UCAST): { + uint32 bssidx; + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, FALSE); + break; + } + case IOV_SVAL(IOV_1905_AL_MCAST): { + uint32 bssidx; + const char *val; + uint8 ea[6] = {0}; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + bcopy(val, ea, ETHER_ADDR_LEN); + printf("IOV_1905_AL_MCAST:" MACDBG "\n", MAC2STRDBG(ea)); + bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, TRUE); + break; + } + case IOV_GVAL(IOV_1905_AL_MCAST): { + uint32 bssidx; + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, TRUE); + break; + } +#endif /* WLEASYMESH */ + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror)); + return bcmerror; +} + +#ifdef BCMDONGLEHOST +/* Store the status of a connection attempt for later retrieval by an iovar */ +void +dhd_store_conn_status(uint32 event, uint32 status, uint32 reason) +{ + /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID + * because an encryption/rsn mismatch results in both events, and + * the important information is in the WLC_E_PRUNE. + */ + if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL && + dhd_conn_event == WLC_E_PRUNE)) { + dhd_conn_event = event; + dhd_conn_status = status; + dhd_conn_reason = reason; + } +} +#else +#error "BCMDONGLEHOST not defined" +#endif /* BCMDONGLEHOST */ + +bool +dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec) +{ + void *p; + int eprec = -1; /* precedence to evict from */ + bool discard_oldest; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktqprec_full(q, prec) && !pktq_full(q)) { + pktq_penq(q, prec, pkt); + return TRUE; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktqprec_full(q, prec)) + eprec = prec; + else if (pktq_full(q)) { + p = pktq_peek_tail(q, &eprec); + ASSERT(p); + if (eprec > prec || eprec < 0) + return FALSE; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktqprec_empty(q, eprec)); + discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec); + if (eprec == prec && !discard_oldest) + return FALSE; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec); + ASSERT(p); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + PKTFREE(dhdp->osh, p, TRUE); + } + + /* Enqueue */ + p = pktq_penq(q, prec, pkt); + ASSERT(p); + + return TRUE; +} + +/* + * Functions to drop proper pkts from queue: + * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only + * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts + * If can't find pkts matching upper 2 cases, drop first pkt anyway + */ +bool +dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn) +{ + struct pktq_prec *q = NULL; + void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL; + pkt_frag_t frag_info; + + ASSERT(dhdp && pq); + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + if (p == NULL) + return FALSE; + + while (p) { + frag_info = pkt_frag_info(dhdp->osh, p); + if (frag_info == DHD_PKT_FRAG_NONE) { + break; + } else if (frag_info == DHD_PKT_FRAG_FIRST) { + if (first) { + /* No last frag pkt, use prev as last */ + last = prev; + break; + } else { + first = p; + prev_first = prev; + } + } else if (frag_info == DHD_PKT_FRAG_LAST) { + if (first) { + last = p; + break; + } + } + + prev = p; + p = PKTLINK(p); + } + + if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) { + /* Not found matching pkts, use oldest */ + prev = NULL; + p = q->head; + frag_info = 0; + } + + if (frag_info == DHD_PKT_FRAG_NONE) { + first = last = p; + prev_first = prev; + } + + p = first; + while (p) { + next = PKTLINK(p); + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(p, NULL); + + if (fn) + fn(dhdp, prec, p, TRUE); + + if (p == last) + break; + + p = next; + } + + if (prev_first == NULL) { + if ((q->head = next) == NULL) + q->tail = NULL; + } else { + PKTSETLINK(prev_first, next); + if (!next) + q->tail = prev_first; + } + + return TRUE; +} + +static int +dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name, + void *params, int plen, void *arg, uint len, bool set) +{ + int bcmerror = 0; + uint val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + + bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +int +dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen) +{ + int bcmerror = 0; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!buf) { + return BCME_BADARG; + } + + dhd_os_dhdiovar_lock(dhd_pub); + switch (ioc->cmd) { + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; + + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; + break; + + case DHD_GET_VAR: + case DHD_SET_VAR: + { + char *arg; + uint arglen; + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) && + bcmstricmp((char *)buf, "devreset")) { + /* In platforms like FC19, the FW download is done via IOCTL + * and should not return error for IOCTLs fired before FW + * Download is done + */ + if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) { + DHD_ERROR(("%s: return as fw_download_status=%d\n", + __FUNCTION__, + dhd_fw_download_status(dhd_pub))); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) { + /* If Suspend/Resume is tested via pcie_suspend IOVAR + * then continue to execute the IOVAR, return from here for + * other IOVARs, also include pciecfgreg and devreset to go + * through. + */ +#ifdef DHD_EFI + if (bcmstricmp((char *)buf, "pcie_suspend") && + bcmstricmp((char *)buf, "pciecfgreg") && + bcmstricmp((char *)buf, "devreset") && + bcmstricmp((char *)buf, "sdio_suspend") && + bcmstricmp((char *)buf, "control_signal")) +#else + if (bcmstricmp((char *)buf, "pcie_suspend") && + bcmstricmp((char *)buf, "pciecfgreg") && + bcmstricmp((char *)buf, "devreset") && + bcmstricmp((char *)buf, "sdio_suspend")) +#endif /* DHD_EFI */ + { + DHD_ERROR(("%s: bus is in suspend(%d)" + "or suspending(0x%x) state\n", + __FUNCTION__, dhd_pub->busstate, + dhd_pub->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup, + * which will wait for all the busy contexts to get over for + * particular time and call ASSERT if timeout happens. As during + * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR, + * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is + * not used in Production platforms but only used in FC19 setups. + */ + if (!bcmstricmp((char *)buf, "devreset") || +#ifdef BCMPCIE + (dhd_bus_is_multibp_capable(dhd_pub->bus) && + !bcmstricmp((char *)buf, "dwnldstate")) || +#endif /* BCMPCIE */ +#if defined(DHD_EFI) && defined (BT_OVER_PCIE) + !bcmstricmp((char *)buf, "btop_test") || + !bcmstricmp((char *)buf, "control_signal") || +#endif /* DHD_EFI && BT_OVER_PCIE */ + FALSE) + { + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + } + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) + ; + + if (arglen == 0 || *arg) { + bcmerror = BCME_BUFTOOSHORT; + goto unlock_exit; + } + + /* account for the NUL terminator */ + arg++, arglen--; + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + } else { + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, + arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + +#ifdef DHD_TIMESYNC + /* check TS module */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg, + arglen, buf, buflen, IOV_GET); + else + bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, + NULL, 0, arg, arglen, IOV_SET); +#endif /* DHD_TIMESYNC */ + } + goto unlock_exit; + + default: + bcmerror = BCME_UNSUPPORTED; + } + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; + +unlock_exit: + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; +} + +#ifdef SHOW_EVENTS + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) +static void +dhd_update_awdl_stats(dhd_pub_t *dhd_pub, const awdl_aws_event_data_t *aw) +{ + dhd_awdl_stats_t *awdl_stats; + unsigned long lock_flags; + + /* since AWDL stats are read on clear to protect against clear, + * lock before update + */ + DHD_AWDL_STATS_LOCK(dhd_pub->awdl_stats_lock, lock_flags); + /* Start of AWDL slot */ + if (!(aw->flags & AWDL_AW_LAST_EXT)) { + dhd_pub->awdl_tx_status_slot = + ((aw->aw_counter/AWDL_SLOT_MULT) % AWDL_NUM_SLOTS); + awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot]; + awdl_stats->slot_start_time = OSL_SYSUPTIME_US(); + awdl_stats->fw_slot_start_time = ntoh32_ua(&aw->fw_time); + awdl_stats->num_slots++; + } else { + /* End of AWDL slot */ + awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot]; + if (awdl_stats->slot_start_time) { + awdl_stats->cum_slot_time += + OSL_SYSUPTIME_US() - awdl_stats->slot_start_time; + /* FW reports time in us in a 32bit number. + * This 32bit number wrap-arround in ~90 minutes. + * Below logic considers wrap-arround too + */ + awdl_stats->fw_cum_slot_time += + ((ntoh32_ua(&aw->fw_time) - awdl_stats->fw_slot_start_time) & + (UINT_MAX)); + + } + } + DHD_AWDL_STATS_UNLOCK(dhd_pub->awdl_stats_lock, lock_flags); +} +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + +static void +wl_show_roam_event(dhd_pub_t *dhd_pub, uint status, uint datalen, + const char *event_name, char *eabuf, void *event_data) +{ +#ifdef REPORT_FATAL_TIMEOUTS + OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE); + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else { +#ifdef REPORT_FATAL_TIMEOUTS + /* + * For secure join if WLC_E_SET_SSID returns with any failure case, + * donot expect WLC_E_PSK_SUP. So clear the mask. + */ + dhd_clear_join_error(dhd_pub, WLC_WPA_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + if (datalen) { + uint8 id = *((uint8 *)event_data); + if (id != DOT11_MNG_PROPR_ID) { + wl_roam_event_t *roam_data = + (wl_roam_event_t *)event_data; + bcm_xtlv_t *tlv = (bcm_xtlv_t *)roam_data->xtlvs; + if (tlv->id == WLC_ROAM_NO_NETWORKS_TLV_ID) { + uint32 *fail_reason = (uint32 *)tlv->data; + switch (*fail_reason) { + case WLC_E_REASON_NO_NETWORKS: + DHD_EVENT(("MACEVENT: %s," + " no networks found\n", + event_name)); + break; + case WLC_E_REASON_NO_NETWORKS_BY_SCORE: + DHD_EVENT(("MACEVENT: %s," + " no networks found by score\n", + event_name)); + break; + default: + DHD_ERROR(("MACEVENT: %s," + " unknown fail reason 0x%x\n", + event_name, + *fail_reason)); + ASSERT(0); + } + } else { + DHD_EVENT(("MACEVENT: %s," + " no networks found\n", + event_name)); + } + } else { + DHD_EVENT(("MACEVENT: %s," + " no networks found\n", + event_name)); + } + } else { + DHD_EVENT(("MACEVENT: %s, no networks found\n", + event_name)); + } + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + } +} + +static void +wl_show_roam_cache_update_event(const char *name, uint status, + uint reason, uint datalen, void *event_data) +{ + wlc_roam_cache_update_event_t *cache_update; + uint16 len_of_tlvs; + void *val_tlv_ptr; + bcm_xtlv_t *val_xtlv; + char ntoa_buf[ETHER_ADDR_STR_LEN]; + uint idx; + const char* reason_name = NULL; + const char* status_name = NULL; + static struct { + uint event; + const char *event_name; + } reason_names[] = { + {WLC_E_REASON_INITIAL_ASSOC, "INITIAL ASSOCIATION"}, + {WLC_E_REASON_LOW_RSSI, "LOW_RSSI"}, + {WLC_E_REASON_DEAUTH, "RECEIVED DEAUTHENTICATION"}, + {WLC_E_REASON_DISASSOC, "RECEIVED DISASSOCATION"}, + {WLC_E_REASON_BCNS_LOST, "BEACONS LOST"}, + {WLC_E_REASON_BETTER_AP, "BETTER AP FOUND"}, + {WLC_E_REASON_MINTXRATE, "STUCK AT MIN TX RATE"}, + {WLC_E_REASON_BSSTRANS_REQ, "REQUESTED ROAM"}, + {WLC_E_REASON_TXFAIL, "TOO MANY TXFAILURES"} + }; + + static struct { + uint event; + const char *event_name; + } status_names[] = { + {WLC_E_STATUS_SUCCESS, "operation was successful"}, + {WLC_E_STATUS_FAIL, "operation failed"}, + {WLC_E_STATUS_TIMEOUT, "operation timed out"}, + {WLC_E_STATUS_NO_NETWORKS, "failed due to no matching network found"}, + {WLC_E_STATUS_ABORT, "operation was aborted"}, + {WLC_E_STATUS_NO_ACK, "protocol failure: packet not ack'd"}, + {WLC_E_STATUS_UNSOLICITED, "AUTH or ASSOC packet was unsolicited"}, + {WLC_E_STATUS_ATTEMPT, "attempt to assoc to an auto auth configuration"}, + {WLC_E_STATUS_PARTIAL, "scan results are incomplete"}, + {WLC_E_STATUS_NEWSCAN, "scan aborted by another scan"}, + {WLC_E_STATUS_NEWASSOC, "scan aborted due to assoc in progress"}, + {WLC_E_STATUS_11HQUIET, "802.11h quiet period started"}, + {WLC_E_STATUS_SUPPRESS, "user disabled scanning"}, + {WLC_E_STATUS_NOCHANS, "no allowable channels to scan"}, + {WLC_E_STATUS_CS_ABORT, "abort channel select"}, + {WLC_E_STATUS_ERROR, "request failed due to error"}, + {WLC_E_STATUS_INVALID, "Invalid status code"} + }; + + switch (reason) { + case WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is new roam cache\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_JOIN: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is start of join\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_RSSI_DELTA: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is delta in rssi\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is motion delta in rssi\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is missed channel\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is start of split scan\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is start of full scan\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_INIT_ASSOC: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is init association\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is failure in full scan\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is empty scan result\n", status)); + break; + case WLC_ROAM_CACHE_UPDATE_MISSING_AP: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is missed ap\n", status)); + break; + default: + DHD_EVENT(("Current roam cache status %d, " + "reason for cache update is unknown %d\n", status, reason)); + break; + } + + if (datalen < sizeof(wlc_roam_cache_update_event_t)) { + DHD_ERROR(("MACEVENT: %s, missing event data\n", name)); + return; + } + + cache_update = (wlc_roam_cache_update_event_t *)event_data; + val_tlv_ptr = (void *)cache_update->xtlvs; + len_of_tlvs = datalen - sizeof(wlc_roam_cache_update_event_t); + val_xtlv = (bcm_xtlv_t *)val_tlv_ptr; + if (val_xtlv->id != WL_RMC_RPT_CMD_DATA) { + DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n", + name, val_xtlv->id)); + return; + } + val_tlv_ptr = (uint8 *)val_tlv_ptr + BCM_XTLV_HDR_SIZE; + len_of_tlvs = val_xtlv->len; + + while (len_of_tlvs && len_of_tlvs > BCM_XTLV_HDR_SIZE) { + val_xtlv = (bcm_xtlv_t *)val_tlv_ptr; + switch (val_xtlv->id) { + case WL_RMC_RPT_XTLV_BSS_INFO: + { + rmc_bss_info_v1_t *bss_info = (rmc_bss_info_v1_t *)(val_xtlv->data); + DHD_EVENT(("\t Current BSS INFO:\n")); + DHD_EVENT(("\t\tRSSI: %d\n", bss_info->rssi)); + DHD_EVENT(("\t\tNumber of full scans performed " + "on current BSS: %d\n", bss_info->fullscan_count)); + for (idx = 0; idx < ARRAYSIZE(reason_names); idx++) { + if (reason_names[idx].event == bss_info->reason) { + reason_name = reason_names[idx].event_name; + } + } + DHD_EVENT(("\t\tReason code for last full scan: %s(%d)\n", + reason_name, bss_info->reason)); + DHD_EVENT(("\t\tDelta between current time and " + "last full scan: %d\n", bss_info->time_full_scan)); + for (idx = 0; idx < ARRAYSIZE(status_names); idx++) { + if (status_names[idx].event == bss_info->status) + status_name = status_names[idx].event_name; + } + DHD_EVENT(("\t\tLast status code for not roaming: %s(%d)\n", + status_name, bss_info->status)); + + } + break; + case WL_RMC_RPT_XTLV_CANDIDATE_INFO: + case WL_RMC_RPT_XTLV_USER_CACHE_INFO: + { + rmc_candidate_info_v1_t *candidate_info = + (rmc_candidate_info_v1_t *)(val_xtlv->data); + if (val_xtlv->id == WL_RMC_RPT_XTLV_CANDIDATE_INFO) { + DHD_EVENT(("\t Candidate INFO:\n")); + } else { + DHD_EVENT(("\t User Candidate INFO:\n")); + } + DHD_EVENT(("\t\tBSSID: %s\n", + bcm_ether_ntoa((const struct ether_addr *) + &candidate_info->bssid, ntoa_buf))); + DHD_EVENT(("\t\tRSSI: %d\n", candidate_info->rssi)); + DHD_EVENT(("\t\tChannel: %d\n", candidate_info->ctl_channel)); + DHD_EVENT(("\t\tDelta between current time and last " + "seen time: %d\n", candidate_info->time_last_seen)); + DHD_EVENT(("\t\tBSS load: %d\n", candidate_info->bss_load)); + } + break; + default: + DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n", + name, val_xtlv->id)); + return; + } + val_tlv_ptr = (uint8 *)val_tlv_ptr + bcm_xtlv_size(val_xtlv, + BCM_XTLV_OPTION_NONE); + len_of_tlvs -= (uint16)bcm_xtlv_size(val_xtlv, BCM_XTLV_OPTION_NONE); + } +} + +static void +wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, + void *raw_event_ptr, char *eventmask) +{ + uint i, status, reason; + bool group = FALSE, flush_txq = FALSE, link = FALSE; + bool host_data = FALSE; /* prints event data after the case when set */ + const char *auth_str; + const char *event_name; + const uchar *buf; + char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; + uint event_type, flags, auth_type, datalen; + + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + reason = ntoh32(event->reason); + BCM_REFERENCE(reason); + auth_type = ntoh32(event->auth_type); + datalen = (event_data != NULL) ? ntoh32(event->datalen) : 0; + + /* debug dump of event messages */ + snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet)); + + event_name = bcmevent_get_name(event_type); + BCM_REFERENCE(event_name); + + if (flags & WLC_EVENT_MSG_LINK) + link = TRUE; + if (flags & WLC_EVENT_MSG_GROUP) + group = TRUE; + if (flags & WLC_EVENT_MSG_FLUSHTXQ) + flush_txq = TRUE; + + switch (event_type) { + case WLC_E_START: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + case WLC_E_DEAUTH: + case WLC_E_DISASSOC: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + break; + + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); +#ifdef REPORT_FATAL_TIMEOUTS + if (status != WLC_E_STATUS_SUCCESS) { + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + break; + + case WLC_E_ASSOC: + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n", + event_name, eabuf, (int)status, (int)reason)); + } else if (status == WLC_E_STATUS_SUPPRESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUPPRESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_NO_ACK) { + DHD_EVENT(("MACEVENT: %s, MAC %s, NOACK\n", event_name, eabuf)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", + event_name, eabuf, (int)status)); + } +#ifdef REPORT_FATAL_TIMEOUTS + if (status != WLC_E_STATUS_SUCCESS) { + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: +#ifdef REPORT_FATAL_TIMEOUTS + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason)); + break; + + case WLC_E_AUTH: + case WLC_E_AUTH_IND: + if (auth_type == DOT11_OPEN_SYSTEM) + auth_str = "Open System"; + else if (auth_type == DOT11_SHARED_KEY) + auth_str = "Shared Key"; + else if (auth_type == DOT11_SAE) + auth_str = "SAE"; + else { + snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type); + auth_str = err_msg; + } + + if (event_type == WLC_E_AUTH_IND) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n", + event_name, eabuf, auth_str, (int)status, (int)reason)); + } else if (status == WLC_E_STATUS_SUPPRESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUPPRESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_NO_ACK) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n", + event_name, eabuf, auth_str)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n", + event_name, eabuf, auth_str, (int)status, (int)reason)); + } + BCM_REFERENCE(auth_str); +#ifdef REPORT_FATAL_TIMEOUTS + if (status != WLC_E_STATUS_SUCCESS) { + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + break; + + case WLC_E_ROAM: + wl_show_roam_event(dhd_pub, status, datalen, + event_name, eabuf, event_data); + break; + case WLC_E_ROAM_START: + if (datalen >= sizeof(wlc_roam_start_event_t)) { + const wlc_roam_start_event_t *roam_start = + (wlc_roam_start_event_t *)event_data; + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d," + " reason %d, auth %d, current bss rssi %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type, (int)roam_start->rssi)); + } else { + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + } + break; + case WLC_E_ROAM_PREP: + if (datalen >= sizeof(wlc_roam_prep_event_t)) { + const wlc_roam_prep_event_t *roam_prep = + (wlc_roam_prep_event_t *)event_data; + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d," + " reason %d, auth %d, target bss rssi %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type, (int)roam_prep->rssi)); + } else { + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + } + break; + case WLC_E_ROAM_CACHE_UPDATE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + wl_show_roam_cache_update_event(event_name, status, + reason, datalen, event_data); + break; + case WLC_E_JOIN: + case WLC_E_SET_SSID: +#ifdef REPORT_FATAL_TIMEOUTS + OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE); + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else { +#ifdef REPORT_FATAL_TIMEOUTS + /* + * For secure join if WLC_E_SET_SSID returns with any failure case, + * donot expect WLC_E_PSK_SUP. So clear the mask. + */ + dhd_clear_join_error(dhd_pub, WLC_WPA_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + } + break; + + case WLC_E_BEACON_RX: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status)); + } + break; + + case WLC_E_LINK: + DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d reason:%d\n", + event_name, link?"UP":"DOWN", flags, status, reason)); +#ifdef PCIE_FULL_DONGLE +#ifdef REPORT_FATAL_TIMEOUTS + { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); + if ((role == WLC_E_IF_ROLE_STA) && (!link)) { + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK); + } + } +#endif /* PCIE_FULL_DONGLE */ +#endif /* REPORT_FATAL_TIMEOUTS */ + BCM_REFERENCE(link); + break; + + case WLC_E_MIC_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n", + event_name, eabuf, group, flush_txq)); + BCM_REFERENCE(group); + BCM_REFERENCE(flush_txq); + break; + + case WLC_E_ICV_ERROR: + case WLC_E_UNICAST_DECODE_ERROR: + case WLC_E_MULTICAST_DECODE_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", + event_name, eabuf)); + break; + + case WLC_E_TXFAIL: + DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status)); + break; + + case WLC_E_ASSOC_REQ_IE: + case WLC_E_ASSOC_RESP_IE: + case WLC_E_PMKID_CACHE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + + case WLC_E_SCAN_COMPLETE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_stop_scan_timer(dhd_pub, FALSE, 0); +#endif /* REPORT_FATAL_TIMEOUTS */ + break; + case WLC_E_RSSI_LQM: + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_COMPLETE: + case WLC_E_PFN_SCAN_NONE: + case WLC_E_PFN_SCAN_ALLGONE: + case WLC_E_PFN_GSCAN_FULL_RESULT: + case WLC_E_PFN_SSID_EXT: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; + + case WLC_E_PFN_SCAN_BACKOFF: + case WLC_E_PFN_BSSID_SCAN_BACKOFF: + DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_clear_join_error(dhd_pub, WLC_WPA_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + break; + +#ifdef WIFI_ACT_FRAME + case WLC_E_ACTION_FRAME: + DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); + break; + case WLC_E_ACTION_FRAME_COMPLETE: + if (datalen >= sizeof(uint32)) { + const uint32 *pktid = event_data; + BCM_REFERENCE(pktid); + DHD_EVENT(("MACEVENT: %s status %d, reason %d, pktid 0x%x\n", + event_name, (int)status, (int)reason, *pktid)); + } + break; +#endif /* WIFI_ACT_FRAME */ + +#ifdef SHOW_LOGTRACE + case WLC_E_TRACE: + { + dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen); + break; + } +#endif /* SHOW_LOGTRACE */ + + case WLC_E_RSSI: + if (datalen >= sizeof(int)) { + DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data)))); + } + break; + + case WLC_E_SERVICE_FOUND: + case WLC_E_P2PO_ADD_DEVICE: + case WLC_E_P2PO_DEL_DEVICE: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + +#ifdef BT_WIFI_HANDOBER + case WLC_E_BT_WIFI_HANDOVER_REQ: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; +#endif +#ifdef DHD_AWDL + case WLC_E_AWDL_AW: + if (datalen >= sizeof(awdl_aws_event_data_t)) { + const awdl_aws_event_data_t *aw = + (awdl_aws_event_data_t *)event_data; + BCM_REFERENCE(aw); + DHD_EVENT(("MACEVENT: %s, MAC %s aw_cnt %u ext_cnt %u flags %u " + "aw_ch %u\n", event_name, eabuf, aw->aw_counter, + aw->aw_ext_count, aw->flags, CHSPEC_CHANNEL(aw->aw_chan))); + host_data = TRUE; + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + dhd_update_awdl_stats(dhd_pub, aw); + /* Store last received aw counter */ + dhd_pub->awdl_aw_counter = aw->aw_counter; +#endif /* DHD_AWDL */ + } + break; + case WLC_E_AWDL_ROLE: + DHD_EVENT(("MACEVENT: %s, MAC %s ROLE %d\n", event_name, eabuf, (int)status)); + break; + case WLC_E_AWDL_EVENT: + DHD_EVENT(("MACEVENT: %s, MAC %s status %d reason %d\n", + event_name, eabuf, (int)status, (int)reason)); + if (datalen >= OFFSETOF(awdl_scan_event_data_t, chan_list)) { + const awdl_scan_event_data_t *scan_evt = + (awdl_scan_event_data_t *)event_data; + BCM_REFERENCE(scan_evt); + DHD_EVENT(("scan_usage %d, nscan_chans %d, ncached_chans %d, " + "iscan_flags 0x%x\n", scan_evt->scan_usage, + scan_evt->nscan_chans, scan_evt->ncached_chans, + scan_evt->flags)); + host_data = TRUE; + } + break; +#endif /* DHD_AWDL */ + + case WLC_E_CCA_CHAN_QUAL: + /* I would like to check here that datalen >= sizeof(cca_chan_qual_event_t) + * but since definition of cca_chan_qual_event_t is different + * between blazar and legacy firmware, I will + * check only that datalen is bigger than 0. + */ + if (datalen > 0) { + const cca_chan_qual_event_t *cca_event = + (cca_chan_qual_event_t *)event_data; + if ((cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) || + (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE)) { + const cca_only_chan_qual_event_t *cca_only_event = + (const cca_only_chan_qual_event_t *)cca_event; + BCM_REFERENCE(cca_only_event); + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec)); + DHD_EVENT(( + "\tTOTAL (dur %dms me %dms notme %dms interf %dms" + " ts 0x%08x)\n", + cca_only_event->cca_busy_ext.duration, + cca_only_event->cca_busy_ext.congest_ibss, + cca_only_event->cca_busy_ext.congest_obss, + cca_only_event->cca_busy_ext.interference, + cca_only_event->cca_busy_ext.timestamp)); + DHD_EVENT(( + "\t !PM (dur %dms me %dms notme %dms interf %dms)\n", + cca_only_event->cca_busy_nopm.duration, + cca_only_event->cca_busy_nopm.congest_ibss, + cca_only_event->cca_busy_nopm.congest_obss, + cca_only_event->cca_busy_nopm.interference)); + DHD_EVENT(( + "\t PM (dur %dms me %dms notme %dms interf %dms)\n", + cca_only_event->cca_busy_pm.duration, + cca_only_event->cca_busy_pm.congest_ibss, + cca_only_event->cca_busy_pm.congest_obss, + cca_only_event->cca_busy_pm.interference)); + if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE) { + DHD_EVENT(("\t OFDM desense %d\n", + ((const cca_only_chan_qual_event_v2_t *) + cca_only_event)->ofdm_desense)); + } + } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms" + " ts 0x%08x)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->cca_busy_ext.duration, + cca_event->cca_busy_ext.congest_ibss, + cca_event->cca_busy_ext.congest_obss, + cca_event->cca_busy_ext.interference, + cca_event->cca_busy_ext.timestamp)); + } else if (cca_event->id == WL_CHAN_QUAL_CCA) { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->cca_busy.duration, + cca_event->cca_busy.congest, + cca_event->cca_busy.timestamp)); + } else if ((cca_event->id == WL_CHAN_QUAL_NF) || + (cca_event->id == WL_CHAN_QUAL_NF_LTE)) { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (NF[%d] %ddB)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->id, cca_event->noise)); + } else { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (unknown ID %d)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->id)); + } + } + break; + case WLC_E_ESCAN_RESULT: + if (datalen >= sizeof(wl_escan_result_v2_t)) { + const wl_escan_result_v2_t *escan_result = + (wl_escan_result_v2_t *)event_data; + BCM_REFERENCE(escan_result); +#ifdef OEM_ANDROID + /* Because WLC_E_ESCAN_RESULT event log are being print too many. + * So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform. + */ + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n", + event_name, event_type, eabuf, (int)status)); +#else + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n", + event_name, event_type, eabuf, + (int)status, dtoh16(escan_result->sync_id))); +#endif /* CUSTOMER_HW4 */ +#ifdef REPORT_FATAL_TIMEOUTS + /* a 'partial' status means the escan is still in progress + * any other status implies the escan has either finished or aborted + */ + if (status != WLC_E_STATUS_PARTIAL) { + unsigned long timeout_flags = 0; + uint16 syncid = dtoh16(escan_result->sync_id); + /* this is to take care of the specific case where + * escan event returns abort and is processed immediately + * by dhd before the escan iovar has returned. In that case + * if the iovar returns success, then we will be starting a + * timeout even though the escan has already been aborted ! + * So the flag below is checked before starting the escan timeout + */ + if (dhd_pub->timeout_info) { + DHD_TIMER_LOCK(dhd_pub->timeout_info->scan_timer_lock, + timeout_flags); + if (!dhd_pub->timeout_info->scan_timer_active && + syncid == dhd_pub->esync_id) { + dhd_pub->timeout_info->escan_aborted = TRUE; + dhd_pub->timeout_info->abort_syncid = syncid; + DHD_TIMER_UNLOCK( + dhd_pub->timeout_info->scan_timer_lock, + timeout_flags); + break; + } else { + dhd_pub->timeout_info->escan_aborted = FALSE; + } + DHD_TIMER_UNLOCK(dhd_pub->timeout_info->scan_timer_lock, + timeout_flags); + } + dhd_stop_scan_timer(dhd_pub, TRUE, dtoh16(escan_result->sync_id)); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + } + break; + case WLC_E_IF: + if (datalen >= sizeof(struct wl_event_data_if)) { + const struct wl_event_data_if *ifevent = + (struct wl_event_data_if *)event_data; + BCM_REFERENCE(ifevent); + + DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n", + event_name, ifevent->opcode, ifevent->ifidx, ifevent->role)); + } + break; +#ifdef SHOW_LOGTRACE + case WLC_E_MSCH: + { + wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen); + break; + } +#endif /* SHOW_LOGTRACE */ + + case WLC_E_PSK_AUTH: + DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n", + event_name, eabuf, status, reason)); + break; + case WLC_E_AGGR_EVENT: + if (datalen >= sizeof(event_aggr_data_t)) { + const event_aggr_data_t *aggrbuf = event_data; + int j = 0, len = 0; + const uint8 *data = aggrbuf->data; + DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ", + event_name, aggrbuf->num_events, aggrbuf->len)); + for (j = 0; j < aggrbuf->num_events; j++) + { + const wl_event_msg_t * sub_event = (const wl_event_msg_t *)data; + if (len > aggrbuf->len) { + DHD_ERROR(("%s: Aggr events corrupted!", + __FUNCTION__)); + break; + } + DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type))); + len += ALIGN_SIZE((ntoh32(sub_event->datalen) + + sizeof(wl_event_msg_t)), sizeof(uint64)); + buf = (const uchar *)(data + sizeof(wl_event_msg_t)); + BCM_REFERENCE(buf); + DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen))); + for (i = 0; i < ntoh32(sub_event->datalen); i++) { + DHD_EVENT((" 0x%02x ", buf[i])); + } + data = aggrbuf->data + len; + } + DHD_EVENT(("\n")); + } + break; + case WLC_E_PHY_CAL: + { + DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason)); + break; + } + case WLC_E_NAN_CRITICAL: + { + DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason)); + break; + } + case WLC_E_NAN_NON_CRITICAL: + { + DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason)); + break; + } + case WLC_E_PROXD: + if (datalen >= sizeof(wl_proxd_event_t)) { + const wl_proxd_event_t *proxd = + (wl_proxd_event_t*)event_data; + DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n", + event_name, proxd->type, reason)); + } + break; + case WLC_E_RPSNOA: + if (datalen >= sizeof(rpsnoa_stats_t)) { + const rpsnoa_stats_t *stat = event_data; + if (datalen == sizeof(*stat)) { + DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name, + (stat->band == WLC_BAND_2G) ? "2G":"5G", + stat->state, stat->last_pps)); + } + } + break; + case WLC_E_WA_LQM: + if (datalen >= sizeof(wl_event_wa_lqm_t)) { + const wl_event_wa_lqm_t *event_wa_lqm = + (wl_event_wa_lqm_t *)event_data; + const bcm_xtlv_t *subevent; + const wl_event_wa_lqm_basic_t *elqm_basic; + + if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) || + (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) { + DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n", + event_name, event_wa_lqm->ver, event_wa_lqm->len)); + break; + } + + subevent = (const bcm_xtlv_t *)event_wa_lqm->subevent; + if ((subevent->id != WL_EVENT_WA_LQM_BASIC) || + (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) { + DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n", + event_name, subevent->id, subevent->len)); + break; + } + + elqm_basic = (const wl_event_wa_lqm_basic_t *)subevent->data; + BCM_REFERENCE(elqm_basic); + DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n", + event_name, elqm_basic->rssi, elqm_basic->snr, + elqm_basic->tx_rate, elqm_basic->rx_rate)); + } + break; + + case WLC_E_OBSS_DETECTION: + { + DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason)); + break; + } + + case WLC_E_AP_BCN_MUTE: + if (datalen >= sizeof(wlc_bcn_mute_miti_event_data_v1_t)) { + const wlc_bcn_mute_miti_event_data_v1_t + *bcn_mute_miti_evnt_data = event_data; + DHD_EVENT(("MACEVENT: %s, reason :%d uatbtt_count: %d\n", + event_name, reason, bcn_mute_miti_evnt_data->uatbtt_count)); + } + break; + + case WLC_E_TWT_SETUP: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + case WLC_E_TWT_TEARDOWN: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + case WLC_E_TWT_INFO_FRM: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + default: + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + break; + } + + /* show any appended data if message level is set to bytes or host_data is set */ + if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) { + buf = (uchar *) event_data; + BCM_REFERENCE(buf); + DHD_EVENT((" data (%d) : ", datalen)); + for (i = 0; i < datalen; i++) { + DHD_EVENT((" 0x%02x ", buf[i])); + } + DHD_EVENT(("\n")); + } +} /* wl_show_host_event */ +#endif /* SHOW_EVENTS */ + +#ifdef DNGL_EVENT_SUPPORT +/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */ +int +dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen) +{ + bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata; + + dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen); + return BCME_OK; +} + +#ifdef PARSE_DONGLE_HOST_EVENT +typedef struct hck_id_to_str_s { + uint32 id; + char *name; +} hck_id_to_str_t; + +hck_id_to_str_t hck_sw_id_to_str[] = { + {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"}, + {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"}, + {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"}, + {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"}, + {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"}, + {WL_HC_DD_PHY, "WL_HC_DD_PHY"}, + {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"}, + {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"}, + {0, NULL} +}; + +hck_id_to_str_t hck_pcie_module_to_str[] = { + {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"}, + {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"}, + {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"}, + {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"}, + {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"}, + {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"}, + {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"}, + {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"}, + {0, NULL} +}; + +hck_id_to_str_t hck_rx_stall_v2_to_str[] = { + {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"}, + {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"}, + {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"}, + {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"}, + {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"}, + {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"}, + {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"}, + {0, NULL} +}; + +static void +dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck) +{ + while (hck->name != NULL) { + if (hck->id == id) { + DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name)); + return; + } + hck++; + } +} + +void +dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc) +{ + + wl_rx_hc_info_v2_t *hck_rx_stall_v2; + uint16 id; + + id = ltoh16(wl_hc->id); + + if (id == WL_HC_DD_RX_STALL_V2) { + /* map the hck_rx_stall_v2 structure to the value of the XTLV */ + hck_rx_stall_v2 = + (wl_rx_hc_info_v2_t*)wl_hc; + DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d" + " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n", + hck_rx_stall_v2->type, + hck_rx_stall_v2->length, + hck_rx_stall_v2->if_idx, + hck_rx_stall_v2->ac, + hck_rx_stall_v2->rx_hc_pkts, + hck_rx_stall_v2->rx_hc_dropped_all, + hck_rx_stall_v2->rx_hc_alert_th, + hck_rx_stall_v2->reason, + ETHER_TO_MACF(hck_rx_stall_v2->peer_ea))); + dhd_print_dongle_hck_id( + ltoh32(hck_rx_stall_v2->reason), + hck_rx_stall_v2_to_str); + } else { + dhd_print_dongle_hck_id(ltoh16(wl_hc->id), + hck_sw_id_to_str); + } + +} + +#endif /* PARSE_DONGLE_HOST_EVENT */ + +void +dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, + bcm_dngl_event_msg_t *dngl_event, size_t pktlen) +{ + uint8 *p = (uint8 *)(event + 1); + uint16 type = ntoh16_ua((void *)&dngl_event->event_type); + uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen); + uint16 version = ntoh16_ua((void *)&dngl_event->version); + + DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen)); + if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) { + return; + } + if (version != BCM_DNGL_EVENT_MSG_VERSION) { + DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__, + version, BCM_DNGL_EVENT_MSG_VERSION)); + return; + } + switch (type) { + case DNGL_E_SOCRAM_IND: + { + bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p; + uint16 tag = ltoh32(socramind_ptr->tag); + uint16 taglen = ltoh32(socramind_ptr->length); + p = (uint8 *)socramind_ptr->value; + DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen)); + switch (tag) { + case SOCRAM_IND_ASSERT_TAG: + { + /* + * The payload consists of - + * null terminated function name padded till 32 bit boundary + + * Line number - (32 bits) + * Caller address (32 bits) + */ + char *fnname = (char *)p; + if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) + + sizeof(uint32) * 2)) { + DHD_ERROR(("Wrong length:%d\n", datalen)); + return; + } + DHD_EVENT(("ASSRT Function:%s ", p)); + p += ROUNDUP(strlen(p) + 1, sizeof(uint32)); + DHD_EVENT(("Line:%d ", *(uint32 *)p)); + p += sizeof(uint32); + DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p)); +#ifdef PARSE_DONGLE_HOST_EVENT + DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n")); +#endif /* PARSE_DONGLE_HOST_EVENT */ + break; + } + case SOCRAM_IND_TAG_HEALTH_CHECK: + { + bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p; + DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n", + ltoh32(dngl_hc->top_module_tag), + ltoh32(dngl_hc->top_module_len), + datalen)); + if (DHD_EVENT_ON()) { + prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len) + + BCM_XTLV_HDR_SIZE, datalen)); + } +#ifdef DHD_LOG_DUMP + memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE); + memcpy(dhdp->health_chk_event_data, p, + MIN(ltoh32(dngl_hc->top_module_len), + HEALTH_CHK_BUF_SIZE)); +#endif /* DHD_LOG_DUMP */ + p = (uint8 *)dngl_hc->value; + + switch (ltoh32(dngl_hc->top_module_tag)) { + case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE: + { + bcm_dngl_pcie_hc_t *pcie_hc; + pcie_hc = (bcm_dngl_pcie_hc_t *)p; + BCM_REFERENCE(pcie_hc); + if (ltoh32(dngl_hc->top_module_len) < + sizeof(bcm_dngl_pcie_hc_t)) { + DHD_ERROR(("Wrong length:%d\n", + ltoh32(dngl_hc->top_module_len))); + return; + } + DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x," + " control:0x%x\n", + ltoh32(pcie_hc->version), + ltoh32(pcie_hc->pcie_err_ind_type), + ltoh32(pcie_hc->pcie_flag), + ltoh32(pcie_hc->pcie_control_reg))); +#ifdef PARSE_DONGLE_HOST_EVENT + dhd_print_dongle_hck_id( + ltoh32(pcie_hc->pcie_err_ind_type), + hck_pcie_module_to_str); +#endif /* PARSE_DONGLE_HOST_EVENT */ + break; + } +#ifdef HCHK_COMMON_SW_EVENT + case HCHK_SW_ENTITY_WL_PRIMARY: + case HCHK_SW_ENTITY_WL_SECONDARY: + { + bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p; + + if (ltoh32(dngl_hc->top_module_len) < + sizeof(bcm_xtlv_t)) { + DHD_ERROR(("WL SW HC Wrong length:%d\n", + ltoh32(dngl_hc->top_module_len))); + return; + } + BCM_REFERENCE(wl_hc); + DHD_EVENT(("WL SW HC type %d len %d\n", + ltoh16(wl_hc->id), ltoh16(wl_hc->len))); + +#ifdef PARSE_DONGLE_HOST_EVENT + dhd_parse_hck_common_sw_event(wl_hc); +#endif /* PARSE_DONGLE_HOST_EVENT */ + break; + + } +#endif /* HCHK_COMMON_SW_EVENT */ + default: + { + DHD_ERROR(("%s:Unknown module TAG:%d\n", + __FUNCTION__, + ltoh32(dngl_hc->top_module_tag))); + break; + } + } + break; + } + default: + DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__)); + if (p && DHD_EVENT_ON()) { + prhex("SOCRAMIND", p, taglen); + } + break; + } + break; + } + default: + DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type)); + if (p && DHD_EVENT_ON()) { + prhex("SOCRAMIND", p, datalen); + } + break; + } +#ifndef BCMDBUS +#ifdef DHD_FW_COREDUMP + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT; + if ( +#ifdef GDB_PROXY + !dhdp->gdb_proxy_active && +#endif /* GDB_PROXY */ + dhd_schedule_socram_dump(dhdp)) { + DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__)); + } + } +#else + dhd_dbg_send_urgent_evt(dhdp, p, datalen); +#endif /* DHD_FW_COREDUMP */ +#endif /* !BCMDBUS */ +} + +#endif /* DNGL_EVENT_SUPPORT */ + +/* Stub for now. Will become real function as soon as shim + * is being integrated to Android, Linux etc. + */ +#if !defined(NDIS) +int +wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport) +{ + return BCME_OK; +} +#endif + +int +wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, + uint pktlen, void **data_ptr, void *raw_event) +{ + wl_evt_pport_t evt_pport; + wl_event_msg_t event; + bcm_event_msg_u_t evu; + int ret; + + /* make sure it is a BRCM event pkt and record event data */ + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + if (ret != BCME_OK) { + return ret; + } + + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + + /* convert event from network order to host order */ + wl_event_to_host_order(&event); + + /* record event params to evt_pport */ + evt_pport.dhd_pub = dhd_pub; + evt_pport.ifidx = ifidx; + evt_pport.pktdata = pktdata; + evt_pport.data_ptr = data_ptr; + evt_pport.raw_event = raw_event; + evt_pport.data_len = pktlen; + +#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + if (shim) { + ret = wl_shim_event_process(shim, &event, &evt_pport); + } else { + /* events can come even before shim is initialized + (when waiting for "wlc_ver" response) + * handle them in a non-shim way. + */ + DHD_ERROR(("%s: Events coming before shim initialization!\n", + __FUNCTION__)); + ret = wl_event_process_default(&event, &evt_pport); + } + } +#else + ret = wl_event_process_default(&event, &evt_pport); +#endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */ + + return ret; +} /* wl_event_process */ + +/* Check whether packet is a BRCM event pkt. If it is, record event data. */ +int +wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu) +{ + int ret; + + ret = is_wlc_event_frame(pktdata, pktlen, 0, evu); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Invalid event frame, err = %d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + bcm_event_msg_u_t evu; + uint8 *event_data; + uint32 type, status, datalen, reason; + uint16 flags; + uint evlen; + int ret; + uint16 usr_subtype; +#if defined(__linux__) + dhd_if_t *ifp = NULL; + BCM_REFERENCE(ifp); +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ + + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + if (ret != BCME_OK) { + return ret; + } + + usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype); + switch (usr_subtype) { + case BCMILCP_BCM_SUBTYPE_EVENT: + memcpy(event, &evu.event, sizeof(wl_event_msg_t)); + *data_ptr = &pvt_data[1]; + break; + case BCMILCP_BCM_SUBTYPE_DNGLEVENT: +#ifdef DNGL_EVENT_SUPPORT + /* If it is a DNGL event process it first */ + if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) { + /* + * Return error purposely to prevent DNGL event being processed + * as BRCM event + */ + return BCME_ERROR; + } +#endif /* DNGL_EVENT_SUPPORT */ + return BCME_NOTFOUND; + default: + return BCME_NOTFOUND; + } + + /* start wl_event_msg process */ + event_data = *data_ptr; + type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + reason = ntoh32_ua((void *)&event->reason); + datalen = ntoh32_ua((void *)&event->datalen); + evlen = datalen + sizeof(bcm_event_t); + + switch (type) { +#ifdef PROP_TXSTATUS + case WLC_E_FIFO_CREDIT_MAP: + dhd_wlfc_enable(dhd_pub); + dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data); + WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): " + "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1], + event_data[2], + event_data[3], event_data[4], event_data[5])); + break; + + case WLC_E_BCMC_CREDIT_SUPPORT: + dhd_wlfc_BCMCCredit_support_event(dhd_pub); + break; +#ifdef LIMIT_BORROW + case WLC_E_ALLOW_CREDIT_BORROW: + dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data); + break; +#endif /* LIMIT_BORROW */ +#endif /* PROP_TXSTATUS */ + + case WLC_E_ULP: + break; + case WLC_E_TDLS_PEER_EVENT: +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + { + dhd_tdls_event_handler(dhd_pub, event); + } +#endif + break; + + case WLC_E_IF: + { + struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data; + + /* Ignore the event if NOIF is set */ + if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) { + DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n")); + return (BCME_UNSUPPORTED); + } +#ifdef PCIE_FULL_DONGLE + dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx, + ifevent->opcode, ifevent->role); +#endif +#ifdef PROP_TXSTATUS + { + uint8* ea = pvt_data->eth.ether_dhost; + WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n" + ifevent->ifidx, + ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"), + ((ifevent->role == 0) ? "STA":"AP "), + MAC2STRDBG(ea))); + (void)ea; + + if (ifevent->opcode == WLC_E_IF_CHANGE) + dhd_wlfc_interface_event(dhd_pub, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + ifevent->ifidx, ifevent->role, ea); + else + dhd_wlfc_interface_event(dhd_pub, + ((ifevent->opcode == WLC_E_IF_ADD) ? + eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL), + ifevent->ifidx, ifevent->role, ea); + + /* dhd already has created an interface by default, for 0 */ + if (ifevent->ifidx == 0) + break; + } +#endif /* PROP_TXSTATUS */ + + if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) { + if (ifevent->opcode == WLC_E_IF_ADD) { + if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname, + event->addr.octet)) { + + DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); + return (BCME_ERROR); + } + } else if (ifevent->opcode == WLC_E_IF_DEL) { +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_delete(dhd_pub, + (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname)); +#endif /* PCIE_FULL_DONGLE */ + dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); + } else if (ifevent->opcode == WLC_E_IF_CHANGE) { +#ifdef WL_CFG80211 + dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); +#endif /* WL_CFG80211 */ + } + } else { +#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211) + DHD_INFO(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); +#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */ + } + /* send up the if event: btamp user needs it */ + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + break; + } + + case WLC_E_NDIS_LINK: + break; + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */ + case WLC_E_PFN_NET_LOST: + break; +#if defined(OEM_ANDROID) && defined(PNO_SUPPORT) + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BEST_BATCHING: + dhd_pno_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */ +#if defined(RTT_SUPPORT) + case WLC_E_PROXD: +#ifndef WL_CFG80211 + dhd_rtt_event_handler(dhd_pub, event, (void *)event_data); +#endif /* WL_CFG80211 */ + break; +#endif /* RTT_SUPPORT */ + /* These are what external supplicant/authenticator wants */ + case WLC_E_ASSOC_IND: + case WLC_E_AUTH_IND: + case WLC_E_REASSOC_IND: + dhd_findadd_sta(dhd_pub, + dhd_ifname2idx(dhd_pub->info, event->ifname), + &event->addr.octet); + break; +#if !defined(BCMDBUS) && defined(DHD_FW_COREDUMP) + case WLC_E_PSM_WATCHDOG: + DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__)); + if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) { + DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__)); + } + break; +#endif +#ifdef DHD_WMF + case WLC_E_PSTA_PRIMARY_INTF_IND: + dhd_update_psta_interface_for_sta(dhd_pub, event->ifname, + (void *)(event->addr.octet), (void*) event_data); + break; +#endif +#ifdef BCM_ROUTER_DHD + case WLC_E_DPSTA_INTF_IND: + dhd_update_dpsta_interface_for_sta(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname), (void*) event_data); + break; +#endif /* BCM_ROUTER_DHD */ +#ifdef BCMDBG + case WLC_E_MACDBG: + dhd_macdbg_event_handler(dhd_pub, reason, event_data, datalen); + break; +#endif /* BCMDBG */ + case WLC_E_NATOE_NFCT: +#ifdef WL_NATOE + DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__)); + dhd_natoe_ct_event(dhd_pub, event_data); +#endif /* WL_NATOE */ + break; + case WLC_E_SLOTTED_BSS_PEER_OP: + DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: " + "" MACDBG ", status = %d\n", + __FUNCTION__, MAC2STRDBG(event->addr.octet), status)); + if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) { + dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + BCM_REFERENCE(ifindex); + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + (char *)&event->addr.octet[0]); +#endif + } else { + DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n", + __FUNCTION__, status)); + } + break; +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + case WLC_E_REASSOC: + ifp = dhd_get_ifp(dhd_pub, event->ifidx); + + if (!ifp) + break; + + /* Consider STA role only since roam is disabled on P2P GC. + * Drop EAPOL M1 frame only if roam is done to same BSS. + */ + if ((status == WLC_E_STATUS_SUCCESS) && + IS_STA_IFACE(ndev_to_wdev(ifp->net)) && + wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) { + ifp->recv_reassoc_evt = TRUE; + } + break; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ +#if defined(CSI_SUPPORT) + case WLC_E_CSI: + dhd_csi_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif /* CSI_SUPPORT */ + case WLC_E_LINK: +#ifdef PCIE_FULL_DONGLE + if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname), (uint8)flags) != BCME_OK) { + DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n", + __FUNCTION__)); + break; + } + if (!flags) { + DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n", + __FUNCTION__)); + /* Delete all sta and flowrings */ + dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname)); + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); + } + /* fall through */ +#endif /* PCIE_FULL_DONGLE */ + case WLC_E_DEAUTH: + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC: + case WLC_E_DISASSOC_IND: +#ifdef PCIE_FULL_DONGLE + if (type != WLC_E_LINK) { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); + uint8 del_sta = TRUE; +#ifdef WL_CFG80211 + if (role == WLC_E_IF_ROLE_STA && + !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) && + !wl_cfg80211_is_event_from_connected_bssid( + dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) { + del_sta = FALSE; + } +#endif /* WL_CFG80211 */ + DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n", + __FUNCTION__, type, flags, status, role, del_sta)); + + if (del_sta) { + DHD_EVENT(("%s: Deleting STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(event->addr.octet))); + + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + /* Delete all flowrings for STA and P2P Client */ + if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) { + dhd_flow_rings_delete(dhd_pub, ifindex); + } else { + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + (char *)&event->addr.octet[0]); + } + } + } +#endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + /* fall through */ + ifp = dhd_get_ifp(dhd_pub, event->ifidx); + if (ifp) { + ifp->recv_reassoc_evt = FALSE; + ifp->post_roam_evt = FALSE; + } +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ + /* fall through */ + default: + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); +#ifdef DHD_UPDATE_INTF_MAC + if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) { + dhd_event_ifchange(dhd_pub->info, + (struct wl_event_data_if *)event, + event->ifname, + event->addr.octet); + } +#endif /* DHD_UPDATE_INTF_MAC */ + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + BCM_REFERENCE(flags); + BCM_REFERENCE(status); + BCM_REFERENCE(reason); + + break; + } +#if defined(BCM_ROUTER_DHD) || defined(STBAP) + /* For routers, EAPD will be working on these events. + * Overwrite interface name to that event is pushed + * to host with its registered interface name + */ + memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ); +#endif + +#ifdef DHD_STATUS_LOGGING + if (dhd_pub->statlog) { + dhd_statlog_process_event(dhd_pub, type, *ifidx, + status, reason, flags); + } +#endif /* DHD_STATUS_LOGGING */ + +#ifdef SHOW_EVENTS + if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { + wl_show_host_event(dhd_pub, event, + (void *)event_data, raw_event, dhd_pub->enable_log); + } +#endif /* SHOW_EVENTS */ + + return (BCME_OK); +} /* wl_process_host_event */ + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr, + raw_event); +} + +void +dhd_print_buf(void *pbuf, int len, int bytes_per_line) +{ +#ifdef DHD_DEBUG + int i, j = 0; + unsigned char *buf = pbuf; + + if (bytes_per_line == 0) { + bytes_per_line = len; + } + + for (i = 0; i < len; i++) { + printf("%2.2x", *buf++); + j++; + if (j == bytes_per_line) { + printf("\n"); + j = 0; + } else { + printf(":"); + } + } + printf("\n"); +#endif /* DHD_DEBUG */ +} +#ifndef strtoul +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#endif + +/* Convert user's input in hex pattern to byte-size mask */ +int +wl_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + bcm_strncpy_s(num, sizeof(num), src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +#if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING) +int +pattern_atoh_len(char *src, char *dst, int len) +{ + int i; + if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 && + strncmp(src, "0X", HD_PREFIX_SIZE) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + HD_PREFIX_SIZE; /* Skip past 0x */ + if (strlen(src) % HD_BYTE_SIZE != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[HD_BYTE_SIZE + 1]; + + if (i > len - 1) { + DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len)); + return -1; + } + bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE); + num[HD_BYTE_SIZE] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += HD_BYTE_SIZE; + } + return i; +} +#endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */ + +#ifdef PKT_FILTER_SUPPORT +void +dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) +{ + char *argv[8]; + int i = 0; + const char *str; + int buf_len; + int str_len; + char *arg_save = 0, *arg_org = 0; + int rc; + char buf[32] = {0}; + wl_pkt_filter_enable_t enable_parm; + wl_pkt_filter_enable_t * pkt_filterp; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + arg_org = arg_save; + memcpy(arg_save, arg, strlen(arg) + 1); + + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_enable"; + str_len = strlen(str); + bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1); + + /* Parse packet filter id. */ + enable_parm.id = htod32(strtoul(argv[i], NULL, 0)); + if (dhd_conf_del_pkt_filter(dhd, enable_parm.id)) + goto fail; + + /* Parse enable/disable value. */ + enable_parm.enable = htod32(enable); + + buf_len += sizeof(enable_parm); + memcpy((char *)pkt_filterp, + &enable_parm, + sizeof(enable_parm)); + + /* Enable/disable the specified filter. */ + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) { + DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n", + __FUNCTION__, enable?"enable":"disable", arg, rc)); + dhd_set_packet_filter(dhd); + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) { + DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + } else { + DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n", + __FUNCTION__, arg)); + } + } + else + DHD_TRACE(("%s: successfully %s pktfilter %s\n", + __FUNCTION__, enable?"enable":"disable", arg)); + + /* Contorl the master mode */ + rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode", + master_mode, WLC_SET_VAR, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n", + __FUNCTION__, master_mode, rc)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); +} + +/* Packet filter section: extended filters have named offsets, add table here */ +typedef struct { + char *name; + uint16 base; +} wl_pfbase_t; + +static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES }; + +static int +wl_pkt_filter_base_parse(char *name) +{ + uint i; + char *bname, *uname; + + for (i = 0; i < ARRAYSIZE(basenames); i++) { + bname = basenames[i].name; + for (uname = name; *uname; bname++, uname++) { + if (*bname != bcm_toupper(*uname)) { + break; + } + } + if (!*uname && !*bname) { + break; + } + } + + if (i < ARRAYSIZE(basenames)) { + return basenames[i].base; + } else { + return -1; + } +} + +void +dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) +{ + const char *str; + wl_pkt_filter_t pkt_filter; + wl_pkt_filter_t *pkt_filterp; + int buf_len; + int str_len; + int rc = -1; + uint32 mask_size; + uint32 pattern_size; + char *argv[MAXPKT_ARG] = {0}, * buf = 0; + int i = 0; + char *arg_save = 0, *arg_org = 0; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + arg_org = arg_save; + + if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + memset(buf, 0, MAX_PKTFLT_BUF_SIZE); + memcpy(arg_save, arg, strlen(arg) + 1); + + if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) { + DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf))); + goto fail; + } + + argv[i] = bcmstrtok(&arg_save, " ", 0); + while (argv[i++]) { + if (i >= MAXPKT_ARG) { + DHD_ERROR(("Invalid args provided\n")); + goto fail; + } + argv[i] = bcmstrtok(&arg_save, " ", 0); + } + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_add"; + str_len = strlen(str); + bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len); + buf[ str_len ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Polarity not provided\n")); + goto fail; + } + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Filter type not provided\n")); + goto fail; + } + + /* Parse filter type. */ + pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); + + if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) { + if (argv[++i] == NULL) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + rc = wl_pattern_atoh(argv[i], + (char *) pkt_filterp->u.pattern.mask_and_pattern); + + if (rc == -1) { + DHD_ERROR(("Rejecting: %s\n", argv[i])); + goto fail; + } + mask_size = htod32(rc); + if (argv[++i] == NULL) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + rc = wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[rc]); + + if (rc == -1) { + DHD_ERROR(("Rejecting: %s\n", argv[i])); + goto fail; + } + pattern_size = htod32(rc); + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * rc); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + * then memcpy'ed into buffer (keep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) { + int list_cnt = 0; + char *endptr = NULL; + wl_pkt_filter_pattern_listel_t *pf_el = + (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0]; + + while (argv[++i] != NULL) { + /* Check valid buffer size. */ + if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) { + DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n")); + goto fail; + } + + /* Parse pattern filter base and offset. */ + if (bcm_isdigit(*argv[i])) { + /* Numeric base */ + rc = strtoul(argv[i], &endptr, 0); + } else { + endptr = strchr(argv[i], ':'); + if (endptr) { + *endptr = '\0'; + rc = wl_pkt_filter_base_parse(argv[i]); + if (rc == -1) { + printf("Invalid base %s\n", argv[i]); + goto fail; + } + *endptr = ':'; + } + } + + if (endptr == NULL) { + printf("Invalid [base:]offset format: %s\n", argv[i]); + goto fail; + } + + if (*endptr == ':') { + pf_el->base_offs = htod16(rc); + rc = strtoul(endptr + 1, &endptr, 0); + } else { + /* Must have had a numeric offset only */ + pf_el->base_offs = htod16(0); + } + + if (*endptr) { + printf("Invalid [base:]offset format: %s\n", argv[i]); + goto fail; + } + if (rc > 0x0000FFFF) { + printf("Offset too large\n"); + goto fail; + } + pf_el->rel_offs = htod16(rc); + + /* Clear match_flag (may be set in parsing which follows) */ + pf_el->match_flags = htod16(0); + + /* Parse pattern filter mask and pattern directly into ioctl buffer */ + if (argv[++i] == NULL) { + printf("Bitmask not provided\n"); + goto fail; + } + rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data); + if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) { + printf("Rejecting: %s\n", argv[i]); + goto fail; + } + mask_size = htod16(rc); + + if (argv[++i] == NULL) { + printf("Pattern not provided\n"); + goto fail; + } + + endptr = argv[i]; + if (*endptr == '!') { + pf_el->match_flags = + htod16(WL_PKT_FILTER_MFLAG_NEG); + if (*(++endptr) == '\0') { + printf("Pattern not provided\n"); + goto fail; + } + } + rc = wl_pattern_atoh(endptr, (char*)&pf_el->mask_and_data[rc]); + if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) { + printf("Rejecting: %s\n", argv[i]); + goto fail; + } + pattern_size = htod16(rc); + + if (mask_size != pattern_size) { + printf("Mask and pattern not the same size\n"); + goto fail; + } + + pf_el->size_bytes = mask_size; + + /* Account for the size of this pattern element */ + buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc; + + /* Move to next element location in ioctl buffer */ + pf_el = (wl_pkt_filter_pattern_listel_t*) + ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc); + + /* Count list element */ + list_cnt++; + } + + /* Account for initial fixed size, and copy initial fixed fields */ + buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN; + + if (buf_len > MAX_PKTFLT_BUF_SIZE) { + DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n")); + goto fail; + } + + /* Update list count and total size */ + pkt_filter.u.patlist.list_cnt = list_cnt; + pkt_filter.u.patlist.PAD1[0] = 0; + pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp; + pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN; + + memcpy((char *)pkt_filterp, &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN); + } else { + DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type)); + goto fail; + } + + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + + if (rc) + DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); + + if (buf) + MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE); +} + +void +dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) +{ + int ret; + + ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete", + id, WLC_SET_VAR, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n", + __FUNCTION__, id, ret)); + } + else + DHD_TRACE(("%s: successfully deleted pktfilter %d\n", + __FUNCTION__, id)); +} +#endif /* PKT_FILTER_SUPPORT */ + +/* ========================== */ +/* ==== ARP OFFLOAD SUPPORT = */ +/* ========================== */ +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol", + arp_mode, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) { + DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", + __FUNCTION__, arp_mode, retcode)); + } else { + DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n", + __FUNCTION__, arp_mode)); + dhd->arpol_configured = TRUE; + } +} + +void +dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) +{ + int retcode; + + if (!dhd->arpol_configured) { + /* If arpol is not applied, apply it */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + } + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe", + arp_enable, WLC_SET_VAR, TRUE, 0); + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n", + __FUNCTION__, arp_enable, retcode)); + else +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); +#else + DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); +#endif /* DHD_LOG_DUMP */ + if (arp_enable) { + uint32 version; + retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version", + &version, WLC_GET_VAR, FALSE, 0); + if (retcode) { + DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n", + __FUNCTION__, retcode)); + dhd->arp_version = 1; + } + else { + DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version)); + dhd->arp_version = version; + } + } +} + +/* XXX ANDREY: clear AOE arp_table */ +void +dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + else { +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__)); +#else + DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__)); +#endif /* DHD_LOG_DUMP */ + } + /* mac address isn't cleared here but it will be cleared after dongle off */ + dhd->hmac_updated = 0; +} + +/* XXX ANDREY: clear hostip table */ +void +dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + else { +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__)); +#else + DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__)); +#endif /* DHD_LOG_DUMP */ + } +} + +void +dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) +{ + int ret; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret)); + else { + /* mac address is updated in the dongle */ + dhd->hmac_updated = 1; +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__)); +#else + DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__)); +#endif /* DHD_LOG_DUMP */ + } +} + +int +dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) +{ + int ret, i; + uint32 *ptr32 = buf; + bool clr_bottom = FALSE; + + if (!buf) + return -1; + if (dhd == NULL) return -1; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen, + FALSE); + if (ret) { + DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n", + __FUNCTION__, ret)); + + return -1; + } + + /* clean up the buf, ascii reminder */ + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (!clr_bottom) { + if (*ptr32 == 0) + clr_bottom = TRUE; + } else { + *ptr32 = 0; + } + ptr32++; + } + + return 0; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* + * Neighbor Discovery Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up/goes down + */ +int +dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable) +{ + int retcode; + + if (dhd == NULL) + return -1; + +#if defined(WL_CFG80211) && defined(WL_NAN) + if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) { + /* If nan dp is active, skip NDO */ + DHD_INFO(("Active NAN DP, skip NDO\n")); + return 0; + } +#endif /* WL_CFG80211 && WL_NAN */ +#ifdef WL_CFG80211 + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + /* NDO disable on STA+SOFTAP mode */ + ndo_enable = FALSE; + } +#endif /* WL_CFG80211 */ + retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe", + ndo_enable, WLC_SET_VAR, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n", + __FUNCTION__, ndo_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ndo offload to %d\n", + __FUNCTION__, ndo_enable)); + + return retcode; +} + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + */ +int +dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr, + IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry added \n", + __FUNCTION__)); + + return retcode; +} + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface goes down + */ +int +dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip_clear", NULL, + 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry removed \n", + __FUNCTION__)); + + return retcode; +} +/* Enhanced ND offload */ +uint16 +dhd_ndo_get_version(dhd_pub_t *dhdp) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_get_ver; + int iov_len; + int retcode; + uint16 ver = 0; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + memset(&iovbuf, 0, sizeof(iovbuf)); + ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER); + ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16)); + ndo_get_ver.u.version = 0; + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver, + WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0); + + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + /* ver iovar not supported. NDO version is 0 */ + ver = 0; + } else { + wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf; + + if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) && + (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) && + (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN + + sizeof(uint16))) { + /* nd_hostip iovar version */ + ver = dtoh16(ndo_ver_ret->u.version); + } + + DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver)); + } + + return ver; +} + +int +dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_add_addr; + int iov_len; + int retcode; + + if (dhdp == NULL || ipv6addr == 0) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD); + ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN); + /* wl_nd_host_ip_addr_t param for add */ + memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN); + ndo_add_addr.u.host_ip.type = type; + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr, + WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); +#ifdef NDO_CONFIG_SUPPORT + if (retcode == BCME_NORESOURCE) { + /* number of host ip addr exceeds FW capacity, Deactivate ND offload */ + DHD_INFO(("%s: Host IP count exceed device capacity," + "ND offload deactivated\n", __FUNCTION__)); + dhdp->ndo_host_ip_overflow = TRUE; + dhd_ndo_enable(dhdp, FALSE); + } +#endif /* NDO_CONFIG_SUPPORT */ + } else { + DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_del_addr; + int iov_len; + int retcode; + + if (dhdp == NULL || ipv6addr == 0) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL); + ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN); + /* wl_nd_host_ip_addr_t param for del */ + memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN); + ndo_del_addr.u.host_ip.type = 0; /* don't care */ + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, + WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + } else { + DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_del_addr; + int iov_len; + int retcode; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) { + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC); + } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) { + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC); + } else { + return BCME_BADARG; + } + ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN); + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN, + iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + } else { + DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int iov_len; + int retcode; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int), + iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n", + __FUNCTION__, enable, retcode)); + else { + DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n", + __FUNCTION__, enable)); + } + + return retcode; +} +#ifdef SIMPLE_ISCAN + +uint iscan_thread_id = 0; +iscan_buf_t * iscan_chain = 0; + +iscan_buf_t * +dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf) +{ + iscan_buf_t *iscanbuf_alloc = 0; + iscan_buf_t *iscanbuf_head; + + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + dhd_iscan_lock(); + + iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t)); + if (iscanbuf_alloc == NULL) + goto fail; + + iscanbuf_alloc->next = NULL; + iscanbuf_head = *iscanbuf; + + DHD_ISCAN(("%s: addr of allocated node = 0x%X" + "addr of iscanbuf_head = 0x%X dhd = 0x%X\n", + __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd)); + + if (iscanbuf_head == NULL) { + *iscanbuf = iscanbuf_alloc; + DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__)); + goto fail; + } + + while (iscanbuf_head->next) + iscanbuf_head = iscanbuf_head->next; + + iscanbuf_head->next = iscanbuf_alloc; + +fail: + dhd_iscan_unlock(); + return iscanbuf_alloc; +} + +void +dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete) +{ + iscan_buf_t *iscanbuf_free = 0; + iscan_buf_t *iscanbuf_prv = 0; + iscan_buf_t *iscanbuf_cur; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + + dhd_iscan_lock(); + + iscanbuf_cur = iscan_chain; + + /* If iscan_delete is null then delete the entire + * chain or else delete specific one provided + */ + if (!iscan_delete) { + while (iscanbuf_cur) { + iscanbuf_free = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + iscanbuf_free->next = 0; + MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t)); + } + iscan_chain = 0; + } else { + while (iscanbuf_cur) { + if (iscanbuf_cur == iscan_delete) + break; + iscanbuf_prv = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + } + if (iscanbuf_prv) + iscanbuf_prv->next = iscan_delete->next; + + iscan_delete->next = 0; + MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t)); + + if (!iscanbuf_prv) + iscan_chain = 0; + } + dhd_iscan_unlock(); +} + +iscan_buf_t * +dhd_iscan_result_buf(void) +{ + return iscan_chain; +} + +int +dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size) +{ + int rc = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + char *buf; + char iovar[] = "iscan"; + uint32 allocSize = 0; + wl_ioctl_t ioctl; + int len; + + if (pParams) { + allocSize = (size + strlen(iovar) + 1); + if ((allocSize < size) || (allocSize < strlen(iovar))) + { + DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n", + __FUNCTION__, allocSize, size, strlen(iovar))); + goto cleanUp; + } + buf = MALLOC(dhd->osh, allocSize); + + if (buf == NULL) + { + DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize)); + goto cleanUp; + } + ioctl.cmd = WLC_SET_VAR; + len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize); + if (len == 0) { + rc = BCME_BUFTOOSHORT; + goto cleanUp; + } + rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len); + } + +cleanUp: + if (buf) { + MFREE(dhd->osh, buf, allocSize); + } + + return rc; +} + +static int +dhd_iscan_get_partial_result(void *dhdp, uint *scan_count) +{ + wl_iscan_results_t *list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + iscan_buf_t *iscan_cur; + int status = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + int rc; + wl_ioctl_t ioctl; + int len; + + DHD_ISCAN(("%s: Enter\n", __FUNCTION__)); + + iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain); + if (!iscan_cur) { + DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__)); + dhd_iscan_free_buf(dhdp, 0); + dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(dhdp, FALSE); + goto fail; + } + + dhd_iscan_lock(); + + memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE, + iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + if (len == 0) { + dhd_iscan_free_buf(dhdp, 0); + dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(dhdp, FALSE); + status = BCME_BUFTOOSHORT; + goto fail; + } + ioctl.cmd = WLC_GET_VAR; + ioctl.set = FALSE; + rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + *scan_count = results->count = dtoh32(results->count); + status = dtoh32(list_buf->status); + DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status)); + + dhd_iscan_unlock(); + + if (!(*scan_count)) { + /* TODO: race condition when FLUSH already called */ + dhd_iscan_free_buf(dhdp, 0); + } +fail: + return status; +} + +#ifdef NDIS +/* XXXX Following code had bit of OS dependency. + * Cleanup to move the OS dependency to other + * per port code so that iscan logic here can be + * leveraged across all OS's + */ +NDIS_EVENT iscan_event; +HANDLE tHandle; +NDIS_SPIN_LOCK dhd_iscan_queue_lock; + +void +dhd_iscan_lock(void) +{ + NdisAcquireSpinLock(&dhd_iscan_queue_lock); +} + +void +dhd_iscan_unlock(void) +{ + NdisReleaseSpinLock(&dhd_iscan_queue_lock); +} + +void +dhd_iscan_notify(void) +{ + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + NdisSetEvent(&iscan_event); +} + +static void +dhd_iscan_func(void *h) +{ + int status; + uint scan_count; + dhd_pub_t *dhd = dhd_bus_pub(h); + + /* Read the priority from registry */ + CeSetThreadPriority(GetCurrentThread(), 128); + DHD_ISCAN(("%s: thread created\n", __FUNCTION__)); + + while (TRUE) { + NdisWaitEvent(&iscan_event, 0); /* wait forever */ + NdisResetEvent(&iscan_event); /* reset the event */ + DHD_ISCAN(("%s: thread scheduled\n", __FUNCTION__)); + + status = dhd_iscan_get_partial_result(h, &scan_count); + + if (status == WL_SCAN_RESULTS_PARTIAL) { + dhd_iscan_request(h, WL_SCAN_ACTION_CONTINUE); + } else if (status == WL_SCAN_RESULTS_SUCCESS) { + if (dhd_iscan_in_progress(h)) { + dhd_ind_scan_confirm(h, TRUE); + } + } else if (status == WL_SCAN_RESULTS_ABORTED || + status == WL_SCAN_RESULTS_NO_MEM) { + dhd_iscan_request(h, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(h, FALSE); + } else { + dhd_iscan_request(h, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(h, FALSE); + } + } +} + +int +dhd_iscan_attach(void *dhdp) +{ + DHD_ISCAN(("%s: dhdp = 0x%x\n", __FUNCTION__, dhdp)); + + NdisInitializeEvent(&iscan_event); + NdisResetEvent(&iscan_event); + NdisAllocateSpinLock(&dhd_iscan_queue_lock); + + /* XXX - should move to ndishared sublayer */ + tHandle = CreateThread(NULL, + 0, + (LPTHREAD_START_ROUTINE)dhd_iscan_func, + (void *)dhdp, + 0, + &iscan_thread_id); + + if (!iscan_thread_id) + return NDIS_STATUS_FAILURE; + + return NDIS_STATUS_SUCCESS; +} + +void +dhd_iscan_deattach(void *dhdp) +{ + if (iscan_thread_id) + { + NdisFreeEvent(&iscan_event); + NdisFreeSpinLock(&dhd_iscan_queue_lock); + CloseHandle(tHandle); + iscan_thread_id = 0; + } +} +#endif /* NDIS */ +#endif /* SIMPLE_ISCAN */ + +/* + * returns = TRUE if associated, FALSE if not associated + */ +bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval) +{ + char bssid[6], zbuf[6]; + int ret = -1; + + bzero(bssid, 6); + bzero(zbuf, 6); + + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, + ETHER_ADDR_LEN, FALSE, ifidx); + /* XXX:AS!!! res can be: -17(BCME_NOTASSOCIATED),-22(BCME_NORESOURCE), and 0(OK) + OK - doesn't mean associated yet, the returned bssid + still needs to be checked for non zero array + */ + DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTASSOCIATED) { + DHD_ERROR(("%s: WLC_GET_BSSID, NOT ASSOCIATED\n", __FUNCTION__)); + } + + if (retval) + *retval = ret; + + if (ret < 0) + return FALSE; + + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) { + DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); + return FALSE; + } + return TRUE; +} + +/* Function to estimate possible DTIM_SKIP value */ +#if defined(OEM_ANDROID) && defined(BCMPCIE) +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int allowed_skip_dtim_cnt = 0; + + if (dhd->disable_dtim_in_suspend) { + DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__)); + bcn_li_dtim = 0; + return bcn_li_dtim; + } + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + if (dtim_period == NULL || bcn_interval == NULL) + return bcn_li_dtim; + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + /* read associated AP dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + /* if not assocated just return */ + if (*dtim_period == 0) { + return bcn_li_dtim; + } + + if (dhd->max_dtim_enable) { + bcn_li_dtim = + (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval))); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + } else { + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (*dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL)); + return bcn_li_dtim; + } + + if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = + MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)); + bcn_li_dtim = + (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } + } + + if (dhd->conf->suspend_bcn_li_dtim >= 0) + bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim; + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL)); + + return bcn_li_dtim; +} +#else /* OEM_ANDROID && BCMPCIE */ +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int dtim_period = 0; + int ap_beacon = 0; + int allowed_skip_dtim_cnt = 0; + + if (dhd->disable_dtim_in_suspend) { + DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__)); + bcn_li_dtim = 0; + goto exit; + } + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated ap's dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* if not assocated just exit */ + if (dtim_period == 0) { + goto exit; + } + + if (dhd->max_dtim_enable) { + bcn_li_dtim = + (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + } else { + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL)); + goto exit; + } + + if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = + MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon); + bcn_li_dtim = + (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } + } + + if (dhd->conf->suspend_bcn_li_dtim >= 0) + bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim; + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL)); + +exit: + return bcn_li_dtim; +} +#endif /* OEM_ANDROID && BCMPCIE */ + +#ifdef CONFIG_SILENT_ROAM +int +dhd_sroam_set_mon(dhd_pub_t *dhd, bool set) +{ + int ret = BCME_OK; + wlc_sroam_t *psroam; + wlc_sroam_info_t *sroam; + uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN; + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc\n", __FUNCTION__)); + return ret; + } + + if (set && (dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) { + DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode)); + return ret; + } + + if (!dhd->sroam_turn_on) { + DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on)); + return ret; + } + psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen); + if (!psroam) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret)); + goto done; + } + + if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) { + ret = BCME_VERSION; + goto done; + } + + sroam = (wlc_sroam_info_t *)psroam->data; + sroam->sroam_on = set; + DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off")); + + ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret)); + } + +done: + if (psroam) { + MFREE(dhd->osh, psroam, sroamlen); + } + + return ret; +} +#endif /* CONFIG_SILENT_ROAM */ + +/* Check if the mode supports STA MODE */ +bool dhd_support_sta_mode(dhd_pub_t *dhd) +{ + +#ifdef WL_CFG80211 + if (!(dhd->op_mode & DHD_FLAG_STA_MODE)) + return FALSE; + else +#endif /* WL_CFG80211 */ + return TRUE; +} + +#if defined(KEEP_ALIVE) +int dhd_keep_alive_onoff(dhd_pub_t *dhd) +{ + char buf[32] = {0}; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}}; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + + if (!dhd_support_sta_mode(dhd)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + str = "mkeep_alive"; + str_len = strlen(str); + strlcpy(buf, str, sizeof(buf)); + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data)); + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + + return res; +} +#endif /* defined(KEEP_ALIVE) */ +#if defined(OEM_ANDROID) +#define CSCAN_TLV_TYPE_SSID_IE 'S' +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) +{ + char* str; + int idx = 0; + uint8 len; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return BCME_BADARG; + } + str = *list_str; + while (*bytes_left > 0) { + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + if (idx >= max) { + DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx)); + return BCME_BADARG; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + if (*bytes_left == 0) { + DHD_ERROR(("%s no length field.\n", __FUNCTION__)); + return BCME_BADARG; + } + str += 1; + ssid[idx].rssi_thresh = 0; + ssid[idx].flags = 0; + len = str[0]; + if (len == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } else if (len <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = len; + *bytes_left -= 1; + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return BCME_BADARG; + } + str += 1; + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + ssid[idx].hidden = TRUE; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } else { + DHD_ERROR(("### SSID size more than %d\n", str[0])); + return BCME_BADARG; + } + idx++; + } + + *list_str = str; + return idx; +} + +#if defined(WL_WIRELESS_EXT) +/* Android ComboSCAN support */ + +/* + * data parsing from ComboScan tlv list +*/ +int +wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token, + int input_size, int *bytes_left) +{ + char* str; + uint16 short_temp; + uint32 int_temp; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + /* Clean all dest bytes */ + memset(dst, 0, dst_size); + if (*bytes_left > 0) { + + if (str[0] != token) { + DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n", + __FUNCTION__, token, str[0], *bytes_left)); + return -1; + } + + *bytes_left -= 1; + str += 1; + + if (input_size == 1) { + memcpy(dst, str, input_size); + } + else if (input_size == 2) { + memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)), + input_size); + } + else if (input_size == 4) { + memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)), + input_size); + } + + *bytes_left -= input_size; + str += input_size; + *list_str = str; + return 1; + } + return 1; +} + +/* + * channel list parsing from cscan tlv list +*/ +int +wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) { + *list_str = str; + DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* All channels */ + channel_list[idx] = 0x0; + } + else { + channel_list[idx] = (uint16)str[0]; + DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx])); + } + *bytes_left -= 1; + str += 1; + + if (idx++ > 255) { + DHD_ERROR(("%s Too many channels \n", __FUNCTION__)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* Parse a comma-separated list from list_str into ssid array, starting + * at index idx. Max specifies size of the ssid array. Parses ssids + * and returns updated idx; if idx >= max not all fit, the excess have + * not been copied. Returns -1 on empty string, or on ssid too long. + */ +int +wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max) +{ + char* str, *ptr; + + if ((list_str == NULL) || (*list_str == NULL)) + return -1; + + for (str = *list_str; str != NULL; str = ptr) { + + /* check for next TAG */ + if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) { + *list_str = str + strlen(GET_CHANNEL); + return idx; + } + + if ((ptr = strchr(str, ',')) != NULL) { + *ptr++ = '\0'; + } + + if (strlen(str) > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN)); + return -1; + } + + if (strlen(str) == 0) + ssid[idx].SSID_len = 0; + + if (idx < max) { + bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID)); + strlcpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID)); + ssid[idx].SSID_len = sizeof(ssid[idx].SSID); + } + idx++; + } + return idx; +} + +/* + * Parse channel list from iwpriv CSCAN + */ +int +wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) +{ + int num; + int val; + char* str; + char* endptr = NULL; + + if ((list_str == NULL)||(*list_str == NULL)) + return -1; + + str = *list_str; + num = 0; + while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) { + val = (int)strtoul(str, &endptr, 0); + if (endptr == str) { + printf("could not parse channel number starting at" + " substring \"%s\" in list:\n%s\n", + str, *list_str); + return -1; + } + str = endptr + strspn(endptr, " ,"); + + if (num == channel_num) { + DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n", + channel_num, *list_str)); + return -1; + } + + channel_list[num++] = (uint16)val; + } + *list_str = str; + return num; +} +#endif +#endif /* defined(OEM_ANDROID) */ + +#if defined(BCM_ROUTER_DHD) +static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd, + trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len) +{ + int ret = 0; + uint32 i; + trf_mgmt_filter_t *trf_mgmt_filter; + uint8 dwm_tbl_entry; + uint32 dscp = 0; + uint16 dwm_filter_enabled = 0; + + /* Check parameter length is adequate */ + if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) + + trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) { + ret = BCME_BUFTOOSHORT; + return ret; + } + + bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t)); + + for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) { + trf_mgmt_filter = &trf_mgmt_filter_list->filter[i]; + + dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM); + + if (dwm_filter_enabled) { + dscp = trf_mgmt_filter->dscp; + if (dscp >= DHD_DWM_TBL_SIZE) { + ret = BCME_BADARG; + return ret; + } + } + + dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1; + /* set WMM AC bits */ + dwm_tbl_entry = (uint8) trf_mgmt_filter->priority; + DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry); + + /* set favored bits */ + if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED) + DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry); + + dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] = dwm_tbl_entry; + } + return ret; +} +#endif /* BCM_ROUTER_DHD */ + +/* Given filename and download type, returns a buffer pointer and length +* for download to f/w. Type can be FW or NVRAM. +* +*/ +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length) + +{ + int ret = BCME_ERROR; + int len = 0; + int file_len; + void *image = NULL; + uint8 *buf = NULL; + + /* Point to cache if available. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (dhd->cached_fw_length) { + len = dhd->cached_fw_length; + buf = dhd->cached_fw; + } + } else if (component == NVRAM) { + if (dhd->cached_nvram_length) { + len = dhd->cached_nvram_length; + buf = dhd->cached_nvram; + } + } else if (component == CLM_BLOB) { + if (dhd->cached_clm_length) { + len = dhd->cached_clm_length; + buf = dhd->cached_clm; + } + } else if (component == TXCAP_BLOB) { + if (dhd->cached_txcap_length) { + len = dhd->cached_txcap_length; + buf = dhd->cached_txcap; + } + } else { + DHD_ERROR(("%s: Invalid component arg %d\n", + __FUNCTION__, component)); + ret = BCME_BADARG; + return ret; + } +#endif /* CACHE_FW_IMAGES */ + /* No Valid cache found on this call */ + if (!len) { + file_len = *length; + *length = 0; + + if (file_path) { + image = dhd_os_open_image1(dhd, file_path); + if (image == NULL) { + printf("%s: Open image file failed %s\n", __FUNCTION__, file_path); + goto err; + } + } + + buf = MALLOCZ(dhd->osh, file_len); + if (buf == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, file_len)); + goto err; + } + + /* Download image */ +#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI) + if (!image) { + memcpy(buf, nvram_arr, sizeof(nvram_arr)); + len = sizeof(nvram_arr); + } else { + len = dhd_os_get_image_block((char *)buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } + } +#else + len = dhd_os_get_image_block((char *)buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } +#endif /* DHD_EFI */ + } + + ret = BCME_OK; + *length = len; + *buffer = (char *)buf; + + /* Cache if first call. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (!dhd->cached_fw_length) { + dhd->cached_fw = buf; + dhd->cached_fw_length = len; + } + } else if (component == NVRAM) { + if (!dhd->cached_nvram_length) { + dhd->cached_nvram = buf; + dhd->cached_nvram_length = len; + } + } else if (component == CLM_BLOB) { + if (!dhd->cached_clm_length) { + dhd->cached_clm = buf; + dhd->cached_clm_length = len; + } + } else if (component == TXCAP_BLOB) { + if (!dhd->cached_txcap_length) { + dhd->cached_txcap = buf; + dhd->cached_txcap_length = len; + } + } +#endif /* CACHE_FW_IMAGES */ + +err: + if (image) + dhd_os_close_image1(dhd, image); + + return ret; +} + +int +dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type, + unsigned char *dload_buf, int len) +{ + struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf; + int err = 0; + int dload_data_offset; + static char iovar_buf[WLC_IOCTL_MEDLEN]; + int iovar_len; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + + dload_data_offset = OFFSETOF(wl_dload_data_t, data); + dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag; + dload_ptr->dload_type = dload_type; + dload_ptr->len = htod32(len - dload_data_offset); + dload_ptr->crc = 0; + len = ROUNDUP(len, 8); + + iovar_len = bcm_mkiovar(iovar, (char *)dload_buf, + (uint)len, iovar_buf, sizeof(iovar_buf)); + if (iovar_len == 0) { + DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n", + __FUNCTION__, iovar)); + return BCME_BUFTOOSHORT; + } + + err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf, + iovar_len, IOV_SET, 0); + + return err; +} + +int +dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf, + uint32 len, char *iovar) + +{ + int chunk_len; +#if !defined(LINUX) && !defined(linux) + int cumulative_len = 0; +#endif /* !LINUX && !linux */ + int size2alloc; + unsigned char *new_buf; + int err = 0, data_offset; + uint16 dl_flag = DL_BEGIN; + + data_offset = OFFSETOF(wl_dload_data_t, data); + size2alloc = data_offset + MAX_CHUNK_LEN; + size2alloc = ROUNDUP(size2alloc, 8); + + if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) { + do { +#if !defined(LINUX) && !defined(linux) + if (len >= MAX_CHUNK_LEN) + chunk_len = MAX_CHUNK_LEN; + else + chunk_len = len; + + memcpy(new_buf + data_offset, buf + cumulative_len, chunk_len); + cumulative_len += chunk_len; +#else + chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset), + MAX_CHUNK_LEN, buf); + if (chunk_len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", + __FUNCTION__, chunk_len)); + err = BCME_ERROR; + goto exit; + } +#endif /* !LINUX && !linux */ + if (len - chunk_len == 0) + dl_flag |= DL_END; + + err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM, + new_buf, data_offset + chunk_len); + + dl_flag &= ~DL_BEGIN; + + len = len - chunk_len; + } while ((len > 0) && (err == 0)); +#if !defined(LINUX) && !defined(linux) + MFREE(dhd->osh, new_buf, size2alloc); +#endif /* !LINUX && !linux */ + } else { + err = BCME_NOMEM; + } +#if defined(LINUX) || defined(linux) +exit: + if (new_buf) { + MFREE(dhd->osh, new_buf, size2alloc); + } +#endif /* LINUX || linux */ + return err; +} + +#if defined(CACHE_FW_IMAGES) +int +dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path, + uint32 len, char *iovar) +{ + int ret = BCME_ERROR; + uint chunk_len, size2alloc, data_offset, file_offset; + unsigned char *pay_load, *dnld_buf; + char *memblock; + uint16 dl_flag = DL_BEGIN; + download_type_t dl_type; + + data_offset = OFFSETOF(wl_dload_data_t, data); + size2alloc = data_offset + MAX_CHUNK_LEN; + size2alloc = ROUNDUP(size2alloc, 8); + file_offset = 0; + + if ((dnld_buf = MALLOCZ(dhd->osh, size2alloc)) == NULL) { + ret = BCME_NOMEM; + goto exit; + } + pay_load = (dnld_buf + data_offset); + + if (!memcmp("clmload", iovar, strlen("clmload"))) { + dl_type = CLM_BLOB; + } else if (!memcmp("txcapload", iovar, strlen("txcapload"))) { + dl_type = TXCAP_BLOB; + } else { + DHD_ERROR(("%s Invalid iovar :%s \n", __FUNCTION__, iovar)); + ret = BCME_BADARG; + goto exit; + } + + ret = dhd_get_download_buffer(dhd, file_path, dl_type, &memblock, (int *)&len); + if (ret != BCME_OK) { + DHD_ERROR(("%s: error getting buffer for %s, %s \n", __FUNCTION__, + file_path, bcmerrorstr(ret))); + goto exit; + } + + do { + chunk_len = MIN(len, MAX_CHUNK_LEN); + memcpy(pay_load, memblock + file_offset, chunk_len); + if (len - chunk_len == 0) { + dl_flag |= DL_END; + } + + ret = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM, + dnld_buf, data_offset + chunk_len); + + dl_flag &= ~DL_BEGIN; + len = len - chunk_len; + file_offset += chunk_len; + } while ((len > 0) && (ret == 0)); + +exit: + if (dnld_buf) { + MFREE(dhd->osh, dnld_buf, size2alloc); + } + + return ret; +} + +int +dhd_apply_default_txcap(dhd_pub_t *dhd, char *path) +{ + int ret = BCME_ERROR; + ret = dhd_download_blob_cached(dhd, path, MAX_TXCAP_BUF_SIZE, "txcapload"); + if (ret) { + DHD_ERROR(("%s: error downloading blob: %s \n", __FUNCTION__, bcmerrorstr(ret))); + } + return ret; +} + +int +dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) +{ + char *clm_blob_path; + int len; + unsigned char *imgbuf = NULL; + int err = BCME_OK; + char iovbuf[WLC_IOCTL_SMLEN]; + wl_country_t *cspec; + + if (clm_path[0] != '\0') { + if (strlen(clm_path) > MOD_PARAM_PATHLEN) { + DHD_ERROR(("clm path exceeds max len\n")); + return BCME_ERROR; + } + clm_blob_path = clm_path; + DHD_TRACE(("clm path from module param:%s\n", clm_path)); + } else { + clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH; + } + + /* If CLM blob file is found on the filesystem, download the file. + * After CLM file download or If the blob file is not present, + * validate the country code before proceeding with the initialization. + * If country code is not valid, fail the initialization. + */ + + imgbuf = dhd_os_open_image((char *)clm_blob_path); + if (imgbuf == NULL) { + goto exit; + } + + len = dhd_os_get_image_size(imgbuf); + + if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) { + len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + if (len == 0) { + err = BCME_BUFTOOSHORT; + goto exit; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + goto exit; + } + + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) != 0) { + DHD_ERROR(("%s: CLM already exist in F/W, " + "new CLM data will be added to the end of existing CLM data!\n", + __FUNCTION__)); + } + + /* Found blob file. Download the file */ + DHD_ERROR(("clm file download from %s \n", clm_blob_path)); + if (imgbuf) { + dhd_os_close_image(imgbuf); + imgbuf = NULL; + } + err = dhd_download_blob_cached(dhd, clm_blob_path, MAX_CLM_BUF_SIZE, "clmload"); + if (err) { + DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err)); + if (!dhd_bus_skip_clm(dhd)) { + /* Retrieve clmload_status and print */ + len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, + sizeof(iovbuf)); + if (len == 0) { + err = BCME_BUFTOOSHORT; + goto exit; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, + sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: clmload_status get failed err=%d \n", + __FUNCTION__, err)); + } else { + DHD_ERROR(("%s: clmload_status: %d \n", + __FUNCTION__, *((int *)iovbuf))); + if (*((int *)iovbuf) == CHIPID_MISMATCH) { + DHD_ERROR(("Chip ID mismatch error \n")); + } + } + err = BCME_ERROR; + goto exit; + } + } else { + DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__)); + } + } else { + DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf)); +#ifdef DHD_USE_CLMINFO_PARSER + err = BCME_ERROR; + goto exit; +#endif /* DHD_USE_CLMINFO_PARSER */ + } + + /* Verify country code */ + len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + if (len == 0) { + err = BCME_BUFTOOSHORT; + goto exit; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + goto exit; + } + + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) { + /* Country code not initialized or CLM download not proper */ + DHD_ERROR(("country code not initialized\n")); + err = BCME_ERROR; + } +exit: + + if (imgbuf) { + dhd_os_close_image(imgbuf); + } + + return err; +} +#else + +int +dhd_apply_default_txcap(dhd_pub_t *dhd, char *path) +{ + return 0; +} + +int +dhd_check_current_clm_data(dhd_pub_t *dhd) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + wl_country_t *cspec; + int err = BCME_OK; + + memset(iovbuf, 0, sizeof(iovbuf)); + err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + if (err == 0) { + err = BCME_BUFTOOSHORT; + DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__)); + return err; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + return err; + } + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) { + DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n", + __FUNCTION__)); + return FALSE; + } + DHD_ERROR(("%s: ----- This FW is included CLM data -----\n", + __FUNCTION__)); + return TRUE; +} + +int +dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) +{ + char *clm_blob_path; + int len; + char *memblock = NULL; + int err = BCME_OK; + char iovbuf[WLC_IOCTL_SMLEN]; + int status = FALSE; + + if (clm_path && clm_path[0] != '\0') { + if (strlen(clm_path) > MOD_PARAM_PATHLEN) { + DHD_ERROR(("clm path exceeds max len\n")); + return BCME_ERROR; + } + clm_blob_path = clm_path; + DHD_TRACE(("clm path from module param:%s\n", clm_path)); + } else { + clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH; + } + + /* If CLM blob file is found on the filesystem, download the file. + * After CLM file download or If the blob file is not present, + * validate the country code before proceeding with the initialization. + * If country code is not valid, fail the initialization. + */ +#if !defined(LINUX) && !defined(linux) + len = MAX_CLM_BUF_SIZE; + dhd_get_download_buffer(dhd, clm_blob_path, CLM_BLOB, &memblock, &len); +#else + memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path); + if (memblock == NULL) { + printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->is_blob) { + err = BCME_ERROR; + } else { + status = dhd_check_current_clm_data(dhd); + if (status == TRUE) { + err = BCME_OK; + } else { + err = status; + } + } +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + goto exit; + } + + len = dhd_os_get_image_size(memblock); +#endif /* !LINUX && !linux */ + + if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) { + status = dhd_check_current_clm_data(dhd); + if (status == TRUE) { +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->op_mode != DHD_FLAG_MFG_MODE) { + if (dhd->is_blob) { + err = BCME_ERROR; + } + goto exit; + } +#else + DHD_ERROR(("%s: CLM already exist in F/W, " + "new CLM data will be added to the end of existing CLM data!\n", + __FUNCTION__)); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + } else if (status != FALSE) { + err = status; + goto exit; + } + + /* Found blob file. Download the file */ + DHD_TRACE(("clm file download from %s \n", clm_blob_path)); + err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload"); + if (err) { + DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err)); + /* Retrieve clmload_status and print */ + memset(iovbuf, 0, sizeof(iovbuf)); + len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf)); + if (len == 0) { + err = BCME_BUFTOOSHORT; + goto exit; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: clmload_status get failed err=%d \n", + __FUNCTION__, err)); + } else { + DHD_ERROR(("%s: clmload_status: %d \n", + __FUNCTION__, *((int *)iovbuf))); + if (*((int *)iovbuf) == CHIPID_MISMATCH) { + DHD_ERROR(("Chip ID mismatch error \n")); + } + } + err = BCME_ERROR; + goto exit; + } else { + DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__)); + } + } else { + DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock)); + } + + /* Verify country code */ + status = dhd_check_current_clm_data(dhd); + + if (status != TRUE) { + /* Country code not initialized or CLM download not proper */ + DHD_ERROR(("country code not initialized\n")); + err = status; + } +exit: + + if (memblock) { +#if defined(LINUX) || defined(linux) + dhd_os_close_image1(dhd, memblock); +#else + dhd_free_download_buffer(dhd, memblock, MAX_CLM_BUF_SIZE); +#endif /* LINUX || linux */ + } + + return err; +} +#endif /* defined(CACHE_FW_IMAGES) */ + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) +{ +#ifdef CACHE_FW_IMAGES + return; +#endif + MFREE(dhd->osh, buffer, length); +} + +#ifdef REPORT_FATAL_TIMEOUTS +void +init_dhd_timeouts(dhd_pub_t *pub) +{ + pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t)); + if (pub->timeout_info == NULL) { + DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__)); + } else { + DHD_INFO(("Initializing dhd_timeouts\n")); + pub->timeout_info->scan_timer_lock = osl_spin_lock_init(pub->osh); + pub->timeout_info->join_timer_lock = osl_spin_lock_init(pub->osh); + pub->timeout_info->bus_timer_lock = osl_spin_lock_init(pub->osh); + pub->timeout_info->cmd_timer_lock = osl_spin_lock_init(pub->osh); + pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT; + pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT; + pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT; + pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT; + pub->timeout_info->scan_timer_active = FALSE; + pub->timeout_info->join_timer_active = FALSE; + pub->timeout_info->cmd_timer_active = FALSE; + pub->timeout_info->bus_timer_active = FALSE; + pub->timeout_info->cmd_join_error = FALSE; + pub->timeout_info->cmd_request_id = 0; + OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE); + } +} + +void +deinit_dhd_timeouts(dhd_pub_t *pub) +{ + /* stop the join, scan bus, cmd timers + * as failing to do so may cause a kernel panic if + * an rmmod is done + */ + if (!pub->timeout_info) { + DHD_ERROR(("%s timeout_info pointer is NULL\n", __FUNCTION__)); + ASSERT(0); + return; + } + if (dhd_stop_scan_timer(pub, FALSE, 0)) { + DHD_ERROR(("%s dhd_stop_scan_timer failed\n", __FUNCTION__)); + ASSERT(0); + } + if (dhd_stop_bus_timer(pub)) { + DHD_ERROR(("%s dhd_stop_bus_timer failed\n", __FUNCTION__)); + ASSERT(0); + } + if (dhd_stop_cmd_timer(pub)) { + DHD_ERROR(("%s dhd_stop_cmd_timer failed\n", __FUNCTION__)); + ASSERT(0); + } + if (dhd_stop_join_timer(pub)) { + DHD_ERROR(("%s dhd_stop_join_timer failed\n", __FUNCTION__)); + ASSERT(0); + } + + osl_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock); + osl_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock); + osl_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock); + osl_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock); + MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t)); +} + +static void +dhd_cmd_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags); + if (pub->timeout_info && pub->timeout_info->cmd_timer_active) { + DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val)); + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); +#ifdef PCIE_OOB + /* Assert device_wake so that UART_Rx is available */ + if (dhd_bus_set_device_wake(pub->bus, TRUE)) { + DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__)); + ASSERT(0); + } +#endif /* PCIE_OOB */ + if (dhd_stop_cmd_timer(pub)) { + DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__)); + ASSERT(0); + } + dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR); + if (!dhd_query_bus_erros(pub)) + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO); + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); + } +} + +int +dhd_start_cmd_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 cmd_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags); + cmd_to_ms = pub->timeout_info->cmd_timeout_val; + + if (pub->timeout_info->cmd_timeout_val == 0) { + /* Disable Command timer timeout */ + DHD_INFO(("DHD: Command Timeout Disabled\n")); + goto exit; + } + if (pub->timeout_info->cmd_timer_active) { + DHD_ERROR(("%s:Timer already active\n", __FUNCTION__)); + ret = BCME_ERROR; + ASSERT(0); + } else { + pub->timeout_info->cmd_timer = osl_timer_init(pub->osh, + "cmd_timer", dhd_cmd_timeout, pub); + osl_timer_update(pub->osh, pub->timeout_info->cmd_timer, + cmd_to_ms, 0); + pub->timeout_info->cmd_timer_active = TRUE; + } + if (ret == BCME_OK) { + DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__)); + } +exit: + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); +exit_null: + return ret; +} + +int +dhd_stop_cmd_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + + if (!pub) { + DHD_ERROR(("DHD: pub NULL\n")); + ASSERT(0); + return BCME_ERROR; + } + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags); + + if (pub->timeout_info->cmd_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->cmd_timer); + pub->timeout_info->cmd_timer_active = FALSE; + } + else { + DHD_INFO(("DHD: CMD timer is not active\n")); + } + if (ret == BCME_OK) { + DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); +exit: + return ret; +} + +static int +__dhd_stop_join_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + if (!pub) { + DHD_ERROR(("DHD: pub NULL\n")); + ASSERT(0); + return BCME_ERROR; + } + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ASSERT(0); + return BCME_ERROR; + } + + if (pub->timeout_info->join_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->join_timer); + pub->timeout_info->join_timer_active = FALSE; + DHD_INFO(("%s join timer stopped\n", __FUNCTION__)); + } else { + DHD_INFO(("%s join timer is not active\n", __FUNCTION__)); + } + + return ret; +} + +static void +dhd_join_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (!pub->timeout_info) { + DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__)); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + if (pub->timeout_info->join_timer_active) { + if (__dhd_stop_join_timer(pub)) { + DHD_ERROR(("%s: __dhd_stop_join_timer() failed\n", __FUNCTION__)); + ASSERT(0); + } + if (pub->timeout_info->cmd_join_error) { + DHD_ERROR(("\n%s ERROR JOIN TIMEOUT TO:%d:0x%x\n", __FUNCTION__, + pub->timeout_info->join_timeout_val, + pub->timeout_info->cmd_join_error)); + if (!dhd_query_bus_erros(pub)) { + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_JOIN_TO); + } + pub->timeout_info->cmd_join_error = 0; + } + } + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); +} + +int +dhd_start_join_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 join_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + + join_to_ms = pub->timeout_info->join_timeout_val; + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + if (pub->timeout_info->join_timer_active) { + DHD_ERROR(("%s: stopping active timer\n", __FUNCTION__)); + __dhd_stop_join_timer(pub); + } + if (pub->timeout_info->join_timeout_val == 0) { + /* Disable Join timer timeout */ + DHD_INFO(("%s DHD: join timeout disabled\n", __FUNCTION__)); + } else { + pub->timeout_info->join_timer = osl_timer_init(pub->osh, + "join_timer", dhd_join_timeout, pub); + osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0); + pub->timeout_info->join_timer_active = TRUE; + pub->timeout_info->cmd_join_error = 0; + dhd_set_join_error(pub, WLC_SSID_MASK); + if (pub->secure_join) { + dhd_set_join_error(pub, WLC_WPA_MASK); + } + DHD_ERROR(("%s: join timer started 0x%x\n", __FUNCTION__, + pub->timeout_info->cmd_join_error)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); +exit: + return ret; +} + +int +dhd_stop_join_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags; + + if (!pub) { + DHD_ERROR(("%s DHD: pub NULL\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } + + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + ret = __dhd_stop_join_timer(pub); + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); + return ret; +} + +static void +dhd_set_join_error(dhd_pub_t *pub, uint32 mask) +{ + DHD_INFO(("Setting join Error %d\n", mask)); + if (pub->timeout_info) { + pub->timeout_info->cmd_join_error |= mask; + } +} + +void +dhd_clear_join_error(dhd_pub_t *pub, uint32 mask) +{ + unsigned long flags; + + DHD_INFO(("%s clear join error %d\n", __FUNCTION__, mask)); + if (!(pub->timeout_info)) { + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + pub->timeout_info->cmd_join_error &= ~mask; + /* If both WLC_SSID_MASK, WLC_WPA_MASK are received cancel the timer */ + if (!(pub->timeout_info->cmd_join_error)) { + if (__dhd_stop_join_timer(pub)) { + DHD_ERROR(("%s: dhd_stop_join_timer failed\n", __FUNCTION__)); + ASSERT(0); + } + } + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); +} + +static void +dhd_scan_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (!pub) { + DHD_ERROR(("DHD: pub NULL\n")); + ASSERT(0); + return; + } + + if (pub->timeout_info == NULL) { + DHD_ERROR(("timeout_info pointer is NULL\n")); + ASSERT(0); + return; + } + DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags); + if (pub->timeout_info->scan_timer_active) { + DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val)); + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); + dhd_stop_scan_timer(pub, FALSE, 0); + if (!dhd_query_bus_erros(pub)) + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO); + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); + } +} + +int +dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 scan_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags); + scan_to_ms = pub->timeout_info->scan_timeout_val; + + if (is_escan) { + if (pub->timeout_info->escan_aborted && + pub->esync_id == pub->timeout_info->abort_syncid) { + pub->timeout_info->escan_aborted = FALSE; + DHD_INFO(("%s: escan already aborted, do not start timer \n", + __FUNCTION__)); + goto exit; + } + pub->timeout_info->escan_syncid = pub->esync_id; + } else { + pub->timeout_info->escan_syncid = 0; + } + + if (pub->timeout_info->scan_timer_active) { + /* cancel any earlier running timer */ + DHD_INFO(("%s:Timer already active, stopping it.\n", __FUNCTION__)); + osl_timer_del(pub->osh, pub->timeout_info->scan_timer); + pub->timeout_info->scan_timer_active = FALSE; + } + + if (pub->timeout_info->scan_timeout_val == 0) { + /* Disable Scan timer timeout */ + DHD_INFO(("DHD: Scan Timeout Disabled\n")); + } else { + pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer", + dhd_scan_timeout, pub); + pub->timeout_info->scan_timer_active = TRUE; + osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0); + DHD_INFO(("%s Scan Timer started\n", __FUNCTION__)); + } + +exit: + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); +exit_null: + return ret; +} + +int +dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id) +{ + int ret = BCME_OK; + unsigned long flags = 0; + + if (!pub) { + DHD_ERROR(("DHD: pub NULL\n")); + ASSERT(0); + return BCME_ERROR; + } + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + + DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags); + + if (pub->timeout_info->scan_timer_active) { + if (is_escan) { + if (sync_id == pub->timeout_info->escan_syncid) { + osl_timer_del(pub->osh, pub->timeout_info->scan_timer); + pub->timeout_info->scan_timer_active = FALSE; + DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__)); + } + } else { + osl_timer_del(pub->osh, pub->timeout_info->scan_timer); + pub->timeout_info->scan_timer_active = FALSE; + DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__)); + } + + } else { + DHD_INFO(("DHD: SCAN timer is not active\n")); + } + + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); + +exit_null: + return ret; +} + +static void +dhd_bus_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (pub->timeout_info == NULL) { + DHD_ERROR(("timeout_info pointer is NULL\n")); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags); + if (pub->timeout_info && pub->timeout_info->bus_timer_active) { + DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val)); + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); +#ifdef PCIE_OOB + /* Assert device_wake so that UART_Rx is available */ + if (dhd_bus_set_device_wake(pub->bus, TRUE)) { + DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__)); + ASSERT(0); + } +#endif /* PCIE_OOB */ + if (dhd_stop_bus_timer(pub)) { + DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__)); + ASSERT(0); + } + if (!dhd_query_bus_erros(pub)) { + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO); + } +#ifdef BCMPCIE + dhd_msgbuf_iovar_timeout_dump(pub); +#endif /* BCMPCIE */ + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); + } +} + +int +dhd_start_bus_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 bus_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags); + bus_to_ms = pub->timeout_info->bus_timeout_val; + + if (pub->timeout_info->bus_timeout_val == 0) { + /* Disable Bus timer timeout */ + DHD_INFO(("DHD: Bus Timeout Disabled\n")); + goto exit; + } + if (pub->timeout_info->bus_timer_active) { + DHD_ERROR(("%s:Timer already active\n", __FUNCTION__)); + ret = BCME_ERROR; + ASSERT(0); + } else { + pub->timeout_info->bus_timer = osl_timer_init(pub->osh, + "bus_timer", dhd_bus_timeout, pub); + pub->timeout_info->bus_timer_active = TRUE; + osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0); + } + if (ret == BCME_OK) { + DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__)); + } +exit: + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); +exit_null: + return ret; +} + +int +dhd_stop_bus_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags; + + if (!pub) { + DHD_ERROR(("DHD: pub NULL\n")); + ASSERT(0); + return BCME_ERROR; + } + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + + DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags); + + if (pub->timeout_info->bus_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->bus_timer); + pub->timeout_info->bus_timer_active = FALSE; + } + else { + DHD_INFO(("DHD: BUS timer is not active\n")); + } + if (ret == BCME_OK) { + DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); +exit: + return ret; +} + +int +dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd) +{ + DHD_INFO(("%s: id:%d\n", __FUNCTION__, id)); + if (pub->timeout_info) { + pub->timeout_info->cmd_request_id = id; + pub->timeout_info->cmd = cmd; + return BCME_OK; + } else { + return BCME_ERROR; + } +} + +uint16 +dhd_get_request_id(dhd_pub_t *pub) +{ + if (pub->timeout_info) { + return (pub->timeout_info->cmd_request_id); + } else { + return 0; + } +} + +void +dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->scan_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting scan TO val:%d\n", to_val)); + pub->timeout_info->scan_timeout_val = to_val; + } +} + +void +dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->join_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting join TO val:%d\n", to_val)); + pub->timeout_info->join_timeout_val = to_val; + } +} + +void +dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->cmd_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting cmd TO val:%d\n", to_val)); + pub->timeout_info->cmd_timeout_val = to_val; + } +} + +void +dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->bus_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting bus TO val:%d\n", to_val)); + pub->timeout_info->bus_timeout_val = to_val; + } +} +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef SHOW_LOGTRACE +int +dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, + dhd_event_log_t *event_log) +{ + uint32 *lognums = NULL; + char *logstrs = NULL; + logstr_trailer_t *trailer = NULL; + int ram_index = 0; + char **fmts = NULL; + int num_fmts = 0; + bool match_fail = TRUE; + int32 i = 0; + uint8 *pfw_id = NULL; + uint32 fwid = 0; + void *file = NULL; + int file_len = 0; + char fwid_str[FWID_STR_LEN]; + uint32 hdr_logstrs_size = 0; + + /* Read last three words in the logstrs.bin file */ + trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size - + sizeof(logstr_trailer_t)); + + if (trailer->log_magic == LOGSTRS_MAGIC) { + /* + * logstrs.bin has a header. + */ + if (trailer->version == 1) { + logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts + + logstrs_size - sizeof(logstr_header_v1_t)); + DHD_INFO(("%s: logstr header version = %u\n", + __FUNCTION__, hdr_v1->version)); + num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr_v1->ram_lognums_offset - + hdr_v1->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset]; + hdr_logstrs_size = hdr_v1->logstrs_size; + } else if (trailer->version == 2) { + logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size - + sizeof(logstr_header_t)); + DHD_INFO(("%s: logstr header version = %u; flags = %x\n", + __FUNCTION__, hdr->version, hdr->flags)); + + /* For ver. 2 of the header, need to match fwid of + * both logstrs.bin and fw bin + */ + + /* read the FWID from fw bin */ + file = dhd_os_open_image1(NULL, st_str_file_path); + if (!file) { + DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__)); + goto error; + } + file_len = dhd_os_get_image_size(file); + if (file_len <= 0) { + DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__)); + goto error; + } + /* fwid is at the end of fw bin in string format */ + if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) { + DHD_ERROR(("%s: can't seek file \n", __FUNCTION__)); + goto error; + } + + memset(fwid_str, 0, sizeof(fwid_str)); + if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) { + DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__)); + goto error; + } + pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1, + FWID_STR_1, strlen(FWID_STR_1)); + if (!pfw_id) { + pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1, + FWID_STR_2, strlen(FWID_STR_2)); + if (!pfw_id) { + DHD_ERROR(("%s: could not find id in FW bin!\n", + __FUNCTION__)); + goto error; + } + } + /* search for the '-' in the fw id str, after which the + * actual 4 byte fw id is present + */ + while (pfw_id && *pfw_id != '-') { + ++pfw_id; + } + ++pfw_id; + fwid = bcm_strtoul((char *)pfw_id, NULL, 16); + + /* check if fw id in logstrs.bin matches the fw one */ + if (hdr->fw_id != fwid) { + DHD_ERROR(("%s: logstr id does not match FW!" + "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n", + __FUNCTION__, hdr->fw_id, fwid)); + goto error; + } + + match_fail = FALSE; + num_fmts = hdr->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr->ram_lognums_offset - + hdr->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset]; + hdr_logstrs_size = hdr->logstrs_size; + +error: + if (file) { + dhd_os_close_image1(NULL, file); + } + if (match_fail) { + return BCME_DECERR; + } + } else { + DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__, + trailer->version)); + return BCME_ERROR; + } + if (logstrs_size != hdr_logstrs_size) { + DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size)); + return BCME_ERROR; + } + } else { + /* + * Legacy logstrs.bin format without header. + */ + num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32); + + /* Legacy RAM-only logstrs.bin format: + * - RAM 'lognums' section + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is an index to the + * start of 'logstrs'. Therefore, if this index is divided + * by 'sizeof(uint32)' it provides the number of logstr + * entries. + */ + ram_index = 0; + lognums = (uint32 *) raw_fmts; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + if (num_fmts) { + if (event_log->fmts != NULL) { + fmts = event_log->fmts; /* reuse existing malloced fmts */ + } else { + fmts = MALLOC(osh, num_fmts * sizeof(char *)); + } + } + if (fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__)); + return BCME_ERROR; + } + event_log->fmts_size = num_fmts * sizeof(char *); + + for (i = 0; i < num_fmts; i++) { + /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base + * (they are 0-indexed relative to 'rom_logstrs_offset'). + * + * RAM lognums are already indexed to point to the correct RAM logstrs (they + * are 0-indexed relative to the start of the logstrs.bin file). + */ + if (i == ram_index) { + logstrs = raw_fmts; + } + fmts[i] = &logstrs[lognums[i]]; + } + event_log->fmts = fmts; + event_log->raw_fmts_size = logstrs_size; + event_log->raw_fmts = raw_fmts; + event_log->num_fmts = num_fmts; + return BCME_OK; +} /* dhd_parse_logstrs_file */ + +int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + char *raw_fmts = NULL, *raw_fmts_loc = NULL; + uint32 read_size = READ_NUM_BYTES; + int error = 0; + char * cptr = NULL; + char c; + uint8 count = 0; + + *ramstart = 0; + *rodata_start = 0; + *rodata_end = 0; + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = MALLOCZ(osh, read_size + 1); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + /* read ram start, rodata_start and rodata_end values from map file */ + while (count != ALL_MAP_VAL) + { + error = dhd_os_read_file(file, raw_fmts, read_size); + if (error < 0) { + DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__, + error)); + goto fail; + } + + /* End raw_fmts with NULL as strstr expects NULL terminated strings */ + raw_fmts[read_size] = '\0'; + + /* Get ramstart address */ + raw_fmts_loc = raw_fmts; + if (!(count & RAMSTART_BIT) && + (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str, + strlen(ramstart_str)))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c text_start", ramstart, &c); + count |= RAMSTART_BIT; + } + + /* Get ram rodata start address */ + raw_fmts_loc = raw_fmts; + if (!(count & RDSTART_BIT) && + (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str, + strlen(rodata_start_str)))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_start", rodata_start, &c); + count |= RDSTART_BIT; + } + + /* Get ram rodata end address */ + raw_fmts_loc = raw_fmts; + if (!(count & RDEND_BIT) && + (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str, + strlen(rodata_end_str)))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_end", rodata_end, &c); + count |= RDEND_BIT; + } + + if (error < (int)read_size) { + /* + * since we reset file pos back to earlier pos by + * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. + * The reason for this is if string is spreaded across + * bytes, the read function should not miss it. + * So if ret value is less than read_size, reached EOF don't read further + */ + break; + } + memset(raw_fmts, 0, read_size); + /* + * go back to predefined NUM of bytes so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES); + } + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, read_size + 1); + raw_fmts = NULL; + } + if (count == ALL_MAP_VAL) { + return BCME_OK; + } + else { + DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__, + count)); + return BCME_ERROR; + } + +} /* dhd_parse_map_file */ + +#ifdef PCIE_FULL_DONGLE +int +dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf, + dhd_event_log_t *event_data) +{ + uint32 infobuf_version; + info_buf_payload_hdr_t *payload_hdr_ptr; + uint16 payload_hdr_type; + uint16 payload_hdr_length; + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) { + DHD_ERROR(("%s: infobuf too small for version field\n", + __FUNCTION__)); + goto exit; + } + infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf)); + PKTPULL(dhdp->osh, pktbuf, sizeof(uint32)); + if (infobuf_version != PCIE_INFOBUF_V1) { + DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n", + __FUNCTION__, infobuf_version)); + goto exit; + } + + /* Version 1 infobuf has a single type/length (and then value) field */ + if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) { + DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n", + __FUNCTION__)); + goto exit; + } + /* Process/parse the common info payload header (type/length) */ + payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf); + payload_hdr_type = ltoh16(payload_hdr_ptr->type); + payload_hdr_length = ltoh16(payload_hdr_ptr->length); + if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) { + DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n", + __FUNCTION__, payload_hdr_type)); + goto exit; + } + PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t)); + + /* Validate that the specified length isn't bigger than the + * provided data. + */ + if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) { + DHD_ERROR(("%s: infobuf logtrace length is bigger" + " than actual buffer data\n", __FUNCTION__)); + goto exit; + } + dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf), + event_data, payload_hdr_length); + + return BCME_OK; + +exit: + return BCME_ERROR; +} /* dhd_event_logtrace_infobuf_pkt_process */ +#endif /* PCIE_FULL_DONGLE */ +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG +int +dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf) +{ + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + dhd_dbg_bt_log_handler(dhdp, + PKTDATA(dhdp->osh, pktbuf), PKTLEN(dhdp->osh, pktbuf)); + + return BCME_OK; +} +#endif /* BTLOG */ + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + +/* To handle the TDLS event in the dhd_common.c + */ +int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event) +{ + int ret = BCME_OK; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() + ret = dhd_tdls_update_peer_info(dhd_pub, event); + GCC_DIAGNOSTIC_POP() + + return ret; +} + +int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub) +{ + tdls_peer_node_t *cur = NULL, *prev = NULL; + if (!dhd_pub) + return BCME_ERROR; + cur = dhd_pub->peer_tbl.node; + + if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count) + return BCME_ERROR; + + while (cur != NULL) { + prev = cur; + cur = cur->next; + MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t)); + } + dhd_pub->peer_tbl.tdls_peer_count = 0; + dhd_pub->peer_tbl.node = NULL; + return BCME_OK; +} +#endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +/* pretty hex print a contiguous buffer +* based on the debug level specified +*/ +void +dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) { + if (dbg_level == DHD_ERROR_VAL) + DHD_ERROR(("%s:\n", msg)); + else if (dbg_level == DHD_INFO_VAL) + DHD_INFO(("%s:\n", msg)); + else if (dbg_level == DHD_TRACE_VAL) + DHD_TRACE(("%s:\n", msg)); + } + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + /* flush line */ + if (dbg_level == DHD_ERROR_VAL) + DHD_ERROR(("%s:\n", line)); + else if (dbg_level == DHD_INFO_VAL) + DHD_INFO(("%s:\n", line)); + else if (dbg_level == DHD_TRACE_VAL) + DHD_TRACE(("%s:\n", line)); + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) { + if (dbg_level == DHD_ERROR_VAL) + DHD_ERROR(("%s:\n", line)); + else if (dbg_level == DHD_INFO_VAL) + DHD_INFO(("%s:\n", line)); + else if (dbg_level == DHD_TRACE_VAL) + DHD_TRACE(("%s:\n", line)); + } +} + +int +dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data) +{ + struct ether_header ether_hdr; + tput_pkt_t tput_pkt; + void *pkt = NULL; + uint8 *pktdata = NULL; + uint32 pktsize = 0; + uint64 total_size = 0; + uint32 *crc = 0; + uint32 pktid = 0; + uint32 total_num_tx_pkts = 0; + int err = 0, err_exit = 0; + uint32 i = 0; + uint64 time_taken = 0; + int max_txbufs = 0; + uint32 n_batches = 0; + uint32 n_remain = 0; + uint8 tput_pkt_hdr_size = 0; + bool batch_cnt = FALSE; + bool tx_stop_pkt = FALSE; + +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + uint32 cur_intr_poll_period = 0; + cur_intr_poll_period = dhd_os_get_intr_poll_period(); + /* before running tput_test, set interrupt poll period to a lesser value */ + dhd_os_set_intr_poll_period(dhd->bus, INTR_POLL_PERIOD_CRITICAL); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + + if (tput_data->version != TPUT_TEST_T_VER || + tput_data->length != TPUT_TEST_T_LEN) { + DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__)); + err_exit = BCME_BADARG; + goto exit_error; + } + + if (dhd->tput_data.tput_test_running) { + DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__)); + err_exit = BCME_BUSY; + goto exit_error; + } +#ifdef PCIE_FULL_DONGLE + /* + * 100 bytes to accommodate ether header and tput header. As of today + * both occupy 30 bytes. Rest is reserved. + */ + if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) || + (tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) { + DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n", + __FUNCTION__, TPUT_TEST_MAX_PAYLOAD, + (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))); + err_exit = BCME_BUFTOOLONG; + goto exit_error; + } +#endif + max_txbufs = dhd_get_max_txbufs(dhd); + max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH); + + if (!(tput_data->num_pkts > 0)) { + DHD_ERROR(("%s: invalid num_pkts: %d to tx\n", + __FUNCTION__, tput_data->num_pkts)); + err_exit = BCME_ERROR; + goto exit_error; + } + + memset(&dhd->tput_data, 0, sizeof(dhd->tput_data)); + memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data)); + dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0; + dhd->tput_data.pkts_cmpl = 0; + dhd->tput_start_ts = dhd->tput_stop_ts = 0; + + if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) { + pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) + + (tput_data->payload_size - 12); + } else { + pktsize = sizeof(tput_pkt_t) + + (tput_data->payload_size - 12); + } + + tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 - + (uint8 *)&tput_pkt.mac_sta); + + /* mark the tput test as started */ + dhd->tput_data.tput_test_running = TRUE; + + if (tput_data->direction == TPUT_DIR_TX) { + /* for ethernet header */ + memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN); + memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN); + ether_hdr.ether_type = hton16(ETHER_TYPE_IP); + + /* fill in the tput pkt */ + memset(&tput_pkt, 0, sizeof(tput_pkt)); + memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN); + memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN); + tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL); + tput_pkt.num_pkts = hton32(tput_data->num_pkts); + + if (tput_data->num_pkts > (uint32)max_txbufs) { + n_batches = tput_data->num_pkts / max_txbufs; + n_remain = tput_data->num_pkts % max_txbufs; + } else { + n_batches = 0; + n_remain = tput_data->num_pkts; + } + DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n", + __FUNCTION__, tput_data->num_pkts, n_batches, n_remain)); + + do { + /* reset before every batch */ + dhd->batch_tx_pkts_cmpl = 0; + if (n_batches) { + dhd->batch_tx_num_pkts = max_txbufs; + --n_batches; + } else if (n_remain) { + dhd->batch_tx_num_pkts = n_remain; + n_remain = 0; + } else { + DHD_ERROR(("Invalid. This should not hit\n")); + } + + dhd->tput_start_ts = OSL_SYSUPTIME_US(); + for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) { + pkt = PKTGET(dhd->osh, pktsize, TRUE); + if (!pkt) { + dhd->tput_data.tput_test_running = FALSE; + DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n", + __FUNCTION__)); + DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n", + __FUNCTION__, dhd->tput_data.pkts_good, + dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl)); + err_exit = BCME_NOMEM; + goto exit_error; + } + pktdata = PKTDATA(dhd->osh, pkt); + PKTSETLEN(dhd->osh, pkt, pktsize); + memset(pktdata, 0, pktsize); + if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) { + memcpy(pktdata, ðer_hdr, sizeof(ether_hdr)); + pktdata += sizeof(ether_hdr); + } + /* send stop pkt as last pkt */ + if (tx_stop_pkt) { + tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP); + tx_stop_pkt = FALSE; + } else + tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL); + tput_pkt.pkt_id = hton32(pktid++); + tput_pkt.crc32 = 0; + memcpy(pktdata, &tput_pkt, sizeof(tput_pkt)); + /* compute crc32 over the pkt-id, num-pkts and data fields */ + crc = (uint32 *)(pktdata + tput_pkt_hdr_size); + *crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4, + 8 + (tput_data->payload_size - 12), + CRC32_INIT_VALUE)); + + err = dhd_sendpkt(dhd, 0, pkt); + if (err != BCME_OK) { + DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n", + __FUNCTION__, pktid, err)); + dhd->tput_data.pkts_bad++; + } + total_num_tx_pkts++; + if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) { + tx_stop_pkt = TRUE; + } + } + DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__)); + if (!dhd_os_tput_test_wait(dhd, NULL, + TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) { + dhd->tput_stop_ts = OSL_SYSUPTIME_US(); + dhd->tput_data.tput_test_running = FALSE; + DHD_ERROR(("%s: TX completion timeout !" + " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n", + __FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl)); + err_exit = BCME_ERROR; + goto exit_error; + } + if ((dhd->tput_start_ts && dhd->tput_stop_ts && + (dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) { + if (!time_taken) { + time_taken = dhd->tput_stop_ts - dhd->tput_start_ts; + } + } else { + dhd->tput_data.tput_test_running = FALSE; + DHD_ERROR(("%s: bad timestamp while cal tx batch time\n", + __FUNCTION__)); + err_exit = BCME_ERROR; + goto exit_error; + } + if (n_batches || n_remain) { + batch_cnt = TRUE; + } else { + batch_cnt = FALSE; + } + } while (batch_cnt); + } else { + /* TPUT_DIR_RX */ + DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__)); + if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) { + DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__)); + dhd->tput_stop_ts = OSL_SYSUPTIME_US(); + } + } + + /* calculate the throughput in bits per sec */ + if (dhd->tput_start_ts && dhd->tput_stop_ts && + (dhd->tput_stop_ts > dhd->tput_start_ts)) { + time_taken = dhd->tput_stop_ts - dhd->tput_start_ts; + time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */ + dhd->tput_data.time_ms = time_taken; + if (time_taken) { + total_size = pktsize * dhd->tput_data.pkts_cmpl * 8; + dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken); + /* convert from ms to seconds */ + dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * 1000; + } + } else { + DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__)); + } + DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__, + dhd->tput_data.tput_bps, dhd->tput_data.time_ms)); + + memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data)); + + dhd->tput_data.tput_test_running = FALSE; + + err_exit = BCME_OK; + +exit_error: + DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n", + __FUNCTION__, dhd->tput_data.pkts_good, + dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl)); +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + /* restore interrupt poll period to the previous existing value */ + dhd_os_set_intr_poll_period(dhd->bus, cur_intr_poll_period); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + + return err_exit; +} + +void +dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt) +{ + uint8 *pktdata = NULL; + tput_pkt_t *tput_pkt = NULL; + uint32 crc = 0; + uint8 tput_pkt_hdr_size = 0; + + pktdata = PKTDATA(dhd->osh, pkt); + if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR) + pktdata += sizeof(struct ether_header); + tput_pkt = (tput_pkt_t *)pktdata; + + /* record the timestamp of the first packet received */ + if (dhd->tput_data.pkts_cmpl == 0) { + dhd->tput_start_ts = OSL_SYSUPTIME_US(); + } + + if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP && + dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) { + dhd->tput_data.pkts_cmpl++; + } + /* drop rx packets received beyond the specified # */ + if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts) + return; + + DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__, + ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type))); + + /* discard if mac addr of AP/STA does not match the specified ones */ + if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap, + ETHER_ADDR_LEN) != 0) || + (memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta, + ETHER_ADDR_LEN) != 0)) { + dhd->tput_data.pkts_bad++; + DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n", + __FUNCTION__, ntoh32(tput_pkt->pkt_id))); + return; + } + + tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 - + (uint8 *)&tput_pkt->mac_sta); + pktdata += tput_pkt_hdr_size + 4; + crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12), + CRC32_INIT_VALUE); + if (crc != ntoh32(tput_pkt->crc32)) { + DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n", + __FUNCTION__, ntoh32(tput_pkt->pkt_id))); + dhd->tput_data.pkts_bad++; + return; + } + + if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP) + dhd->tput_data.pkts_good++; + + /* if we have received the stop packet or all the # of pkts, we're done */ + if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP || + dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) { + dhd->tput_stop_ts = OSL_SYSUPTIME_US(); + dhd_os_tput_test_wake(dhd); + } +} + +#ifdef DUMP_IOCTL_IOV_LIST +void +dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node) +{ + dll_t *item; + dhd_iov_li_t *iov_li; + dhd->dump_iovlist_len++; + + if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) { + item = dll_head_p(list_head); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + dll_delete(item); + MFREE(dhd->osh, iov_li, sizeof(*iov_li)); + dhd->dump_iovlist_len--; + } + dll_append(list_head, node); +} + +void +dhd_iov_li_print(dll_t *list_head) +{ + dhd_iov_li_t *iov_li; + dll_t *item, *next; + uint8 index = 0; + for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) { + next = dll_next_p(item); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd)); + } +} + +void +dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head) +{ + dll_t *item; + dhd_iov_li_t *iov_li; + while (!(dll_empty(list_head))) { + item = dll_head_p(list_head); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + dll_delete(item); + MFREE(dhd->osh, iov_li, sizeof(*iov_li)); + } +} +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef EWP_EDL +/* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT +* The reason being that, in hikey, if we try to DMA_MAP prealloced memory +* it is failing with an 'out of space in SWIOTLB' error +*/ +int +dhd_edl_mem_init(dhd_pub_t *dhd) +{ + int ret = 0; + + memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem)); + ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE); + if (ret != BCME_OK) { + DHD_ERROR(("%s: alloc of edl_ring_mem failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +/* + * NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf + * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init. + */ +void +dhd_edl_mem_deinit(dhd_pub_t *dhd) +{ + if (dhd->edl_ring_mem.va != NULL) + dhd_dma_buf_free(dhd, &dhd->edl_ring_mem); +} + +int +dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data, + void *evt_decode_data) +{ + msg_hdr_edl_t *msg = NULL; + cmn_msg_hdr_t *cmn_msg_hdr = NULL; + uint8 *buf = NULL; + + if (!data || !dhdp || !evt_decode_data) { + DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__)); + return BCME_ERROR; + } + + /* format of data in each work item in the EDL ring: + * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t| + * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|| + */ + cmn_msg_hdr = (cmn_msg_hdr_t *)data; + msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t)); + buf = (uint8 *)msg; + /* validate the fields */ + if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) { + DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)" + " expected (0x%x)\n", __FUNCTION__, + msg->infobuf_ver, PCIE_INFOBUF_V1)); + return BCME_VERSION; + } + + /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */ + if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) { + DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n", + __FUNCTION__)); + return BCME_BUFTOOLONG; + } + + if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) { + DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n", + __FUNCTION__, ltoh16(msg->pyld_hdr.type))); + return BCME_BADOPTION; + } + + if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) { + DHD_ERROR(("%s: infobuf logtrace length %u is bigger" + " than available buffer size %u\n", __FUNCTION__, + ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id)); + return BCME_BADLEN; + } + + /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */ + buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr); + dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data, + ltoh16(msg->pyld_hdr.length)); + + /* + * check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb + * copy the event data to the skb and send it up the stack + */ + if (dhdp->logtrace_pkt_sendup) { + DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__, + (uint32)(ltoh16(msg->pyld_hdr.length) + + sizeof(info_buf_payload_hdr_t) + 4))); + dhd_sendup_info_buf(dhdp, (uint8 *)msg); + } + + return BCME_OK; +} +#endif /* EWP_EDL */ + +#ifdef DHD_LOG_DUMP +#define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4 +void +dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd) +{ +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + log_dump_type_t *flush_type; +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + uint64 current_time_sec; + + if (!dhdp) { + DHD_ERROR(("dhdp is NULL !\n")); + return; + } + + if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) { + DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__)); + return; + } + + current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + + DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n", + __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec, + DEBUG_DUMP_TRIGGER_INTERVAL_SEC)); + + if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) { + DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n", + __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC)); + return; + } + + clear_debug_dump_time(dhdp->debug_dump_time_str); +#ifdef DHD_PCIE_RUNTIMEPM + /* wake up RPM if SYSDUMP is triggered */ + dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + /* */ + + dhdp->debug_dump_subcmd = subcmd; + + dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + /* flush_type is freed at do_dhd_log_dump function */ + flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + dhd_schedule_log_dump(dhdp, flush_type); + } else { + DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__)); + return; + } +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + + /* Inside dhd_mem_dump, event notification will be sent to HAL and + * from other context DHD pushes memdump, debug_dump and pktlog dump + * to HAL and HAL will write into file + */ +#if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP) + dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + +#if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + dhd_schedule_pktlog_dump(dhdp); +#endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */ +} +#endif /* DHD_LOG_DUMP */ + +#if (defined(LINUX) || defined(DHD_EFI)) && defined(SHOW_LOGTRACE) +int +dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath) +{ + void *file = NULL; + int size = 0; + char buf[FW_VER_STR_LEN]; + char *str = NULL; + int ret = BCME_OK; + + if (!fwpath) + return BCME_BADARG; + + file = dhd_os_open_image1(dhdp, fwpath); + if (!file) { + ret = BCME_ERROR; + goto exit; + } + size = dhd_os_get_image_size(file); + if (!size) { + ret = BCME_ERROR; + goto exit; + } + + /* seek to the last 'X' bytes in the file */ + if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) { + ret = BCME_ERROR; + goto exit; + } + + /* read the last 'X' bytes of the file to a buffer */ + memset(buf, 0, FW_VER_STR_LEN); + if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) { + ret = BCME_ERROR; + goto exit; + } + /* search for 'Version' in the buffer */ + str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR)); + if (!str) { + ret = BCME_ERROR; + goto exit; + } + /* go back in the buffer to the last ascii character */ + while (str != buf && + (*str >= ' ' && *str <= '~')) { + --str; + } + /* reverse the final decrement, so that str is pointing + * to the first ascii character in the buffer + */ + ++str; + + if (strlen(str) > (FW_VER_STR_LEN - 1)) { + ret = BCME_BADLEN; + goto exit; + } + + DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str)); + /* copy to global variable, so that in case FW load fails, the + * core capture logs will contain FW version read from the file + */ + memset(fw_version, 0, FW_VER_STR_LEN); + strlcpy(fw_version, str, FW_VER_STR_LEN); + +exit: + if (file) + dhd_os_close_image1(dhdp, file); + + return ret; +} +#endif /* LINUX || DHD_EFI */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) +void +dhd_clear_awdl_stats(dhd_pub_t *dhd) +{ + unsigned long flags; + /* + * Since event path(ex: WLC_E_AWDL_AW) and bus path(tx status process) update + * the AWDL data acquire lock before clearing the AWDL stats. + */ + DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, flags); + memset(dhd->awdl_stats, 0, sizeof(dhd->awdl_stats)); + DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, flags); +} +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + +static void +copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc) +{ + int remain_len; + int i; + int *cnt; + char *dest; + int bytes_written; + uint32 ioc_dwlen = 0; + + if (!dhd || !dhd->hang_info) { + DHD_ERROR(("%s dhd=%p hang_info=%p\n", + __FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL))); + return; + } + + cnt = &dhd->hang_info_cnt; + dest = dhd->hang_info; + + memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN); + (*cnt) = 0; + + bytes_written = 0; + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written; + + get_debug_dump_time(dhd->debug_dump_time_hang_str); + copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str); + + bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ", + HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER, + dhd->debug_dump_time_hang_str, + ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed); + (*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT; + + clear_debug_dump_time(dhd->debug_dump_time_hang_str); + + /* Access ioc->buf only if the ioc->len is more than 4 bytes */ + ioc_dwlen = (uint32)(ioc->len / sizeof(uint32)); + if (ioc_dwlen > 0) { + const uint32 *ioc_buf = (const uint32 *)ioc->buf; + + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + bytes_written += scnprintf(&dest[bytes_written], remain_len, + "%08x", *(uint32 *)(ioc_buf++)); + GCC_DIAGNOSTIC_POP(); + (*cnt)++; + if ((*cnt) >= HANG_FIELD_CNT_MAX) { + return; + } + + for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX; + i++, (*cnt)++) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x", + HANG_RAW_DEL, *(uint32 *)(ioc_buf++)); + GCC_DIAGNOSTIC_POP(); + } + } + + DHD_INFO(("%s hang info len: %d data: %s\n", + __FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info)); +} + +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +/* + * Helper function: + * Used for Dongle console message time syncing with Host printk + */ +void dhd_h2d_log_time_sync(dhd_pub_t *dhd) +{ + uint64 ts; + + /* + * local_clock() returns time in nano seconds. + * Dongle understand only milli seconds time. + */ + ts = local_clock(); + /* Nano seconds to milli seconds */ + do_div(ts, 1000000); + if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__)); + /* Stopping HOST Dongle console time syncing */ + dhd->dhd_rte_time_sync_ms = 0; + } +} +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +#if defined(LINUX) || defined(linux) +/* configuations of ecounters to be enabled by default in FW */ +static ecounters_cfg_t ecounters_cfg_tbl[] = { + /* Global ecounters */ + {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE}, + // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS}, + // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS}, + + /* Slice specific ecounters */ + {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX}, + + /* Interface specific ecounters */ + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT}, + + /* secondary interface */ + /* XXX REMOVE for temporal, will be enabled after decision + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_IF_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_GENERIC}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_INFRA_SPECIFIC}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_MGT_CNT}, + */ +}; + +/* XXX: Same event id shall be defined in consecutive order in the below table */ +static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = { + /* Interface specific event ecounters */ + {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS}, +}; + +/* Accepts an argument to -s, -g or -f and creates an XTLV */ +int +dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx, + uint16 stats_rep, uint8 **xtlv) +{ + uint8 *req_xtlv = NULL; + ecounters_stats_types_report_req_t *req; + bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf; + ecountersv2_xtlv_list_elt_t temp; + uint16 xtlv_len = 0, total_len = 0; + int rc = BCME_OK; + + /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */ + temp.id = stats_rep; + temp.len = 0; + + /* Hence len/data = 0/NULL */ + xtlv_len += temp.len + BCM_XTLV_HDR_SIZE; + + /* Total length of the container */ + total_len = BCM_XTLV_HDR_SIZE + + OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len; + + /* Now allocate a structure for the entire request */ + if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + /* container XTLV context */ + bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len, + BCM_XTLV_OPTION_ALIGN32); + + /* Fill other XTLVs in the container. Leave space for XTLV headers */ + req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE); + req->flags = type; + if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) { + req->slice_mask = 0x1 << if_slice_idx; + } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) { + req->if_index = if_slice_idx; + } + + /* Fill remaining XTLVs */ + bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len, + BCM_XTLV_OPTION_ALIGN32); + if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) { + DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id)); + rc = BCME_ERROR; + goto fail; + } + + /* fill the top level container and get done with the XTLV container */ + rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL, + bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t, + stats_types_req)); + + if (rc) { + DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags)); + goto fail; + } + +fail: + if (rc && req_xtlv) { + MFREE(dhd->osh, req_xtlv, total_len); + req_xtlv = NULL; + } + + /* update the xtlv pointer */ + *xtlv = req_xtlv; + return rc; +} + +static int +dhd_ecounter_autoconfig(dhd_pub_t *dhd) +{ + int rc = BCME_OK; + uint32 buf; + rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + + if (rc != BCME_OK) { + + if (rc != BCME_UNSUPPORTED) { + rc = BCME_OK; + DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc)); + } else { + DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__)); + } + } + + return rc; +} + +int +dhd_ecounter_configure(dhd_pub_t *dhd, bool enable) +{ + int rc = BCME_OK; + if (enable) { + if (dhd_ecounter_autoconfig(dhd) != BCME_OK) { + if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__)); + } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__)); + } + } + } else { + if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__)); + } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__)); + } + } + return rc; +} + +int +dhd_start_ecounters(dhd_pub_t *dhd) +{ + uint8 i = 0; + uint8 *start_ptr; + int rc = BCME_OK; + bcm_xtlv_t *elt; + ecounters_config_request_v2_t *req = NULL; + ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL; + ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL; + uint16 total_processed_containers_len = 0; + + for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) { + ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i]; + + if ((list_elt = (ecountersv2_processed_xtlv_list_elt *) + MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) { + DHD_ERROR(("Ecounters v2: No memory to process\n")); + goto fail; + } + + rc = dhd_create_ecounters_params(dhd, ecounter_stat->type, + ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data); + + if (rc) { + DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n", + ecounter_stat->stats_rep, rc)); + + /* Free allocated memory and go to fail to release any memories allocated + * in previous iterations. Note that list_elt->data gets populated in + * dhd_create_ecounters_params() and gets freed there itself. + */ + MFREE(dhd->osh, list_elt, sizeof(*list_elt)); + list_elt = NULL; + goto fail; + } + elt = (bcm_xtlv_t *) list_elt->data; + + /* Put the elements in the order they are processed */ + if (processed_containers_list == NULL) { + processed_containers_list = list_elt; + } else { + tail->next = list_elt; + } + tail = list_elt; + /* Size of the XTLV returned */ + total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE; + } + + /* Now create ecounters config request with totallength */ + req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) + + total_processed_containers_len); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + req->version = ECOUNTERS_VERSION_2; + req->logset = EVENT_LOG_SET_ECOUNTERS; + req->reporting_period = ECOUNTERS_DEFAULT_PERIOD; + req->num_reports = ECOUNTERS_NUM_REPORTS; + req->len = total_processed_containers_len + + OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs); + + /* Copy config */ + start_ptr = req->ecounters_xtlvs; + + /* Now go element by element in the list */ + while (processed_containers_list) { + list_elt = processed_containers_list; + + elt = (bcm_xtlv_t *)list_elt->data; + + memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + processed_containers_list = processed_containers_list->next; + + /* Free allocated memories */ + MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE); + MFREE(dhd->osh, list_elt, sizeof(*list_elt)); + } + + if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) { + DHD_ERROR(("failed to start ecounters\n")); + } + +fail: + if (req) { + MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len); + } + + /* Now go element by element in the list */ + while (processed_containers_list) { + list_elt = processed_containers_list; + elt = (bcm_xtlv_t *)list_elt->data; + processed_containers_list = processed_containers_list->next; + + /* Free allocated memories */ + MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE); + MFREE(dhd->osh, list_elt, sizeof(*list_elt)); + } + return rc; +} + +int +dhd_stop_ecounters(dhd_pub_t *dhd) +{ + int rc = BCME_OK; + ecounters_config_request_v2_t *req; + + /* Now create ecounters config request with totallength */ + req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req)); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + req->version = ECOUNTERS_VERSION_2; + req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs); + + if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) { + DHD_ERROR(("failed to stop ecounters\n")); + } + +fail: + if (req) { + MFREE(dhd->osh, req, sizeof(*req)); + } + return rc; +} + +/* configured event_id_array for event ecounters */ +typedef struct event_id_array { + uint8 event_id; + uint8 str_idx; +} event_id_array_t; + +/* get event id array only from event_ecounters_cfg_tbl[] */ +static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array) +{ + uint8 i; + uint8 idx = 0; + int32 prev_evt_id = -1; + + for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) { + if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) { + if (prev_evt_id >= 0) + idx++; + event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id; + event_array[idx].str_idx = i; + } + prev_evt_id = event_ecounters_cfg_tbl[i].event_id; + } + return idx; +} + +/* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */ +#define ECNTRS_MAX_XTLV_NUM (31 * 2) + +int +dhd_start_event_ecounters(dhd_pub_t *dhd) +{ + uint8 i, j = 0; + uint8 event_id_cnt = 0; + uint16 processed_containers_len = 0; + uint16 max_xtlv_len = 0; + int rc = BCME_OK; + uint8 *ptr; + uint8 *data; + event_id_array_t *id_array; + bcm_xtlv_t *elt = NULL; + event_ecounters_config_request_v2_t *req = NULL; + + /* XXX: the size of id_array is limited by the size of event_ecounters_cfg_tbl */ + id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) * + ARRAYSIZE(event_ecounters_cfg_tbl)); + + if (id_array == NULL) { + rc = BCME_NOMEM; + goto fail; + } + event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array); + + max_xtlv_len = ((BCM_XTLV_HDR_SIZE + + OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) * + ECNTRS_MAX_XTLV_NUM); + + /* Now create ecounters config request with max allowed length */ + req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, + sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + for (i = 0; i <= event_id_cnt; i++) { + /* req initialization by event id */ + req->version = ECOUNTERS_VERSION_2; + req->logset = EVENT_LOG_SET_ECOUNTERS; + req->event_id = id_array[i].event_id; + req->flags = EVENT_ECOUNTERS_FLAGS_ADD; + req->len = 0; + processed_containers_len = 0; + + /* Copy config */ + ptr = req->ecounters_xtlvs; + + for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) { + event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j]; + if (id_array[i].event_id != event_ecounter_stat->event_id) + break; + + rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type, + event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep, + &data); + + if (rc) { + DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n", + __FUNCTION__, event_ecounter_stat->stats_rep, rc)); + goto fail; + } + + elt = (bcm_xtlv_t *)data; + + memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE; + + /* Free allocated memories alloced by dhd_create_ecounters_params */ + MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE); + + if (processed_containers_len > max_xtlv_len) { + DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n", + __FUNCTION__)); + rc = BCME_BADLEN; + goto fail; + } + } + + req->len = processed_containers_len + + OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs); + + DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n", + __FUNCTION__, req->version, req->logset, req->event_id, + req->flags, req->len)); + + rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE); + + if (rc < 0) { + DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n", + req->event_id, rc)); + goto fail; + } + } + +fail: + /* Free allocated memories */ + if (req) { + MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len); + } + if (id_array) { + MFREE(dhd->osh, id_array, sizeof(event_id_array_t) * + ARRAYSIZE(event_ecounters_cfg_tbl)); + } + + return rc; +} + +int +dhd_stop_event_ecounters(dhd_pub_t *dhd) +{ + int rc = BCME_OK; + event_ecounters_config_request_v2_t *req; + + /* Now create ecounters config request with totallength */ + req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req)); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + req->version = ECOUNTERS_VERSION_2; + req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL; + req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs); + + if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) { + DHD_ERROR(("failed to stop event_ecounters\n")); + } + +fail: + if (req) { + MFREE(dhd->osh, req, sizeof(*req)); + } + return rc; +} +#ifdef DHD_LOG_DUMP +int +dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf, + log_dump_section_hdr_t *sec_hdr, + char *text_hdr, int buflen, uint32 sec_type) +{ + uint32 rlen = 0; + uint32 data_len = 0; + void *data = NULL; + unsigned long flags = 0; + int ret = 0; + dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr; + int pos = 0; + int fpos_sechdr = 0; + + if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) { + return BCME_BADARG; + } + /* do not allow further writes to the ring + * till we flush it + */ + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_SUSPEND; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + if (dhdp->concise_dbg_buf) { + /* re-use concise debug buffer temporarily + * to pull ring data, to write + * record by record to file + */ + data_len = CONCISE_DUMP_BUFLEN; + data = dhdp->concise_dbg_buf; + ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos); + /* write the section header now with zero length, + * once the correct length is found out, update + * it later + */ + fpos_sechdr = pos; + sec_hdr->type = sec_type; + sec_hdr->length = 0; + ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf, + sizeof(*sec_hdr), &pos); + do { + rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE); + if (rlen > 0) { + /* write the log */ + ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos); + } + DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen)); + } while ((rlen > 0)); + /* now update the section header length in the file */ + /* Complete ring size is dumped by HAL, hence updating length to ring size */ + sec_hdr->length = ring->ring_size; + ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf, + sizeof(*sec_hdr), &fpos_sechdr); + } else { + DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__)); + } + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + /* Resetting both read and write pointer, + * since all items are read. + */ + ring->rp = ring->wp = 0; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return ret; +} + +int +dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, + unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, + char *text_hdr, uint32 sec_type) +{ + uint32 rlen = 0; + uint32 data_len = 0, total_len = 0; + void *data = NULL; + unsigned long fpos_sechdr = 0; + unsigned long flags = 0; + int ret = 0; + dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr; + + if (!dhdp || !ring || !file || !sec_hdr || + !file_posn || !text_hdr) + return BCME_BADARG; + + /* do not allow further writes to the ring + * till we flush it + */ + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_SUSPEND; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + if (dhdp->concise_dbg_buf) { + /* re-use concise debug buffer temporarily + * to pull ring data, to write + * record by record to file + */ + data_len = CONCISE_DUMP_BUFLEN; + data = dhdp->concise_dbg_buf; + dhd_os_write_file_posn(file, file_posn, text_hdr, + strlen(text_hdr)); + /* write the section header now with zero length, + * once the correct length is found out, update + * it later + */ + dhd_init_sec_hdr(sec_hdr); + fpos_sechdr = *file_posn; + sec_hdr->type = sec_type; + sec_hdr->length = 0; + dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr, + sizeof(*sec_hdr)); + do { + rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE); + if (rlen > 0) { + /* write the log */ + ret = dhd_os_write_file_posn(file, file_posn, data, rlen); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_ERROR; + } + } + total_len += rlen; + } while (rlen > 0); + /* now update the section header length in the file */ + sec_hdr->length = total_len; + dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr)); + } else { + DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__)); + } + + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + /* Resetting both read and write pointer, + * since all items are read. + */ + ring->rp = ring->wp = 0; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_OK; +} + +/* logdump cookie */ +#define MAX_LOGUDMP_COOKIE_CNT 10u +#define LOGDUMP_COOKIE_STR_LEN 50u +int +dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size) +{ + uint32 ring_size; + + if (!dhdp || !buf) { + DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf)); + return BCME_ERROR; + } + + ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT; + if (buf_size < ring_size) { + DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n", + ring_size, buf_size)); + return BCME_ERROR; + } + + dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size, + LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT, + DHD_RING_TYPE_FIXED); + if (!dhdp->logdump_cookie) { + DHD_ERROR(("FAIL TO INIT COOKIE RING\n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +void +dhd_logdump_cookie_deinit(dhd_pub_t *dhdp) +{ + if (!dhdp) { + return; + } + if (dhdp->logdump_cookie) { + dhd_ring_deinit(dhdp, dhdp->logdump_cookie); + } + + return; +} + +#ifdef DHD_TX_PROFILE +int +dhd_tx_profile_detach(dhd_pub_t *dhdp) +{ + int result = BCME_ERROR; + + if (dhdp != NULL && dhdp->protocol_filters != NULL) { + MFREE(dhdp->osh, dhdp->protocol_filters, DHD_MAX_PROFILES * + sizeof(*(dhdp->protocol_filters))); + dhdp->protocol_filters = NULL; + + result = BCME_OK; + } + + return result; +} + +int +dhd_tx_profile_attach(dhd_pub_t *dhdp) +{ + int result = BCME_ERROR; + + if (dhdp != NULL) { + dhdp->protocol_filters = (dhd_tx_profile_protocol_t*)MALLOCZ(dhdp->osh, + DHD_MAX_PROFILES * sizeof(*(dhdp->protocol_filters))); + + if (dhdp->protocol_filters != NULL) { + result = BCME_OK; + } + } + + if (result != BCME_OK) { + DHD_ERROR(("%s:\tMALLOC of tx profile protocol filters failed\n", + __FUNCTION__)); + } + + return result; +} +#endif /* defined(DHD_TX_PROFILE) */ + +void +dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type) +{ + char *ptr; + + if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) { + DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p" + " type = %p, cookie_cfg:%p\n", __FUNCTION__, + dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL)); + return; + } + ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie); + if (ptr == NULL) { + DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__)); + return; + } + scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie); + return; +} + +int +dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size) +{ + char *ptr; + + if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) { + DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p" + "cookie=%p cookie_cfg:%p\n", __FUNCTION__, + dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL)); + return BCME_ERROR; + } + ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie); + if (ptr == NULL) { + DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__)); + return BCME_ERROR; + } + memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr))); + dhd_ring_free_first(dhdp->logdump_cookie); + return BCME_OK; +} + +int +dhd_logdump_cookie_count(dhd_pub_t *dhdp) +{ + if (!dhdp || !dhdp->logdump_cookie) { + DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL)); + return 0; + } + return dhd_ring_get_cur_size(dhdp->logdump_cookie); +} + +static inline int +__dhd_log_dump_cookie_to_file( + dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos, + char *buf, uint32 buf_size) +{ + + uint32 remain = buf_size; + int ret = BCME_ERROR; + char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; + log_dump_section_hdr_t sec_hdr; + uint32 read_idx; + uint32 write_idx; + + read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie); + write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie); + while (dhd_logdump_cookie_count(dhdp) > 0) { + memset(tmp_buf, 0, sizeof(tmp_buf)); + ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); + if (ret != BCME_OK) { + return ret; + } + remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf); + } + dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx); + dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx); + + ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos); + if (ret < 0) { + DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__)); + return ret; + } + sec_hdr.magic = LOG_DUMP_MAGIC; + sec_hdr.timestamp = local_clock(); + sec_hdr.type = LOG_DUMP_SECTION_COOKIE; + sec_hdr.length = buf_size - remain; + + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos); + if (ret < 0) { + DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__)); + return ret; + } + + ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos); + if (ret < 0) { + DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__)); + } + + return ret; +} + +uint32 +dhd_log_dump_cookie_len(dhd_pub_t *dhdp) +{ + int len = 0; + char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; + log_dump_section_hdr_t sec_hdr; + char *buf = NULL; + int ret = BCME_ERROR; + uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; + uint32 read_idx; + uint32 write_idx; + uint32 remain; + + remain = buf_size; + + if (!dhdp || !dhdp->logdump_cookie) { + DHD_ERROR(("%s At least one ptr is NULL " + "dhdp = %p cookie %p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL)); + goto exit; + } + + buf = (char *)MALLOCZ(dhdp->osh, buf_size); + if (!buf) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + goto exit; + } + + read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie); + write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie); + while (dhd_logdump_cookie_count(dhdp) > 0) { + memset(tmp_buf, 0, sizeof(tmp_buf)); + ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); + if (ret != BCME_OK) { + goto exit; + } + remain -= (uint32)strlen(tmp_buf); + } + dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx); + dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx); + len += strlen(COOKIE_LOG_HDR); + len += sizeof(sec_hdr); + len += (buf_size - remain); +exit: + if (buf) + MFREE(dhdp->osh, buf, buf_size); + return len; +} + +int +dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf) +{ + int ret = BCME_ERROR; + char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; + log_dump_section_hdr_t sec_hdr; + char *buf = NULL; + uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; + int pos = 0; + uint32 read_idx; + uint32 write_idx; + uint32 remain; + + remain = buf_size; + + if (!dhdp || !dhdp->logdump_cookie) { + DHD_ERROR(("%s At least one ptr is NULL " + "dhdp = %p cookie %p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL)); + goto exit; + } + + buf = (char *)MALLOCZ(dhdp->osh, buf_size); + if (!buf) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + goto exit; + } + + read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie); + write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie); + while (dhd_logdump_cookie_count(dhdp) > 0) { + memset(tmp_buf, 0, sizeof(tmp_buf)); + ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); + if (ret != BCME_OK) { + goto exit; + } + remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf); + } + dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx); + dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx); + ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos); + sec_hdr.magic = LOG_DUMP_MAGIC; + sec_hdr.timestamp = local_clock(); + sec_hdr.type = LOG_DUMP_SECTION_COOKIE; + sec_hdr.length = buf_size - remain; + ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos); + ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos); +exit: + if (buf) + MFREE(dhdp->osh, buf, buf_size); + return ret; +} + +int +dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos) +{ + char *buf; + int ret = BCME_ERROR; + uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; + + if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) { + DHD_ERROR(("%s At least one ptr is NULL " + "dhdp = %p cookie %p fp = %p f_pos = %p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos)); + return ret; + } + + buf = (char *)MALLOCZ(dhdp->osh, buf_size); + if (!buf) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return ret; + } + ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size); + MFREE(dhdp->osh, buf, buf_size); + + return ret; +} +#endif /* DHD_LOG_DUMP */ +#endif /* LINUX || linux */ + +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +int +dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab) +{ + int ret = BCME_OK; + bcm_xtlv_t *pxtlv = NULL; + uint8 mybuf[DHD_IOVAR_BUF_SIZE]; + uint16 mybuf_len = sizeof(mybuf); + pxtlv = (bcm_xtlv_t *)mybuf; + + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab), + &he_enab, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret))); + return ret; + } + + ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n", + __FUNCTION__, he_enab, bcmerrorstr(ret))); + } else { + DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab)); + } + + return ret; +} +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ + +#ifdef CONFIG_ROAM_RSSI_LIMIT +int +dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g) +{ + wlc_roam_rssi_limit_t *plmt; + wlc_roam_rssi_lmt_info_v1_t *pinfo; + int ret = BCME_OK; + int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN; + + plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len); + if (!plmt) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Get roam rssi limit */ + ret = dhd_iovar(dhd, 0, "roam_rssi_limit", NULL, 0, (char *)plmt, plmt_len, FALSE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret)); + goto done; + } + + if (plmt->ver != WLC_ROAM_RSSI_LMT_VER_1) { + ret = BCME_VERSION; + goto done; + } + + pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data; + *lmt2g = (int)pinfo->rssi_limit_2g; + *lmt5g = (int)pinfo->rssi_limit_5g; + +done: + if (plmt) { + MFREE(dhd->osh, plmt, plmt_len); + } + return ret; +} + +int +dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g) +{ + wlc_roam_rssi_limit_t *plmt; + wlc_roam_rssi_lmt_info_v1_t *pinfo; + int ret = BCME_OK; + int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN; + + /* Sanity check RSSI limit Value */ + if ((lmt2g < ROAMRSSI_2G_MIN) || (lmt2g > ROAMRSSI_2G_MAX)) { + DHD_ERROR(("%s Not In Range 2G ROAM RSSI Limit\n", __FUNCTION__)); + return BCME_RANGE; + } + if ((lmt2g < ROAMRSSI_5G_MIN) || (lmt2g > ROAMRSSI_5G_MAX)) { + DHD_ERROR(("%s Not In Range 5G ROAM RSSI Limit\n", __FUNCTION__)); + return BCME_RANGE; + } + + plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len); + if (!plmt) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + plmt->ver = WLC_ROAM_RSSI_LMT_VER_1; + plmt->len = sizeof(*pinfo); + pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data; + pinfo->rssi_limit_2g = (int16)lmt2g; + pinfo->rssi_limit_5g = (int16)lmt5g; + + /* Set roam rssi limit */ + ret = dhd_iovar(dhd, 0, "roam_rssi_limit", (char *)plmt, plmt_len, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret)); + goto done; + } +done: + if (plmt) { + MFREE(dhd->osh, plmt, plmt_len); + } + return ret; +} +#endif /* CONFIG_ROAM_RSSI_LIMIT */ + +#ifdef CONFIG_ROAM_MIN_DELTA +int +dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g) +{ + wlc_roam_min_delta_t *pmin_delta; + wlc_roam_min_delta_info_v1_t *pmin_delta_info; + int ret = BCME_OK; + int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN; + + pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen); + if (!pmin_delta) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Get Minimum ROAM score delta */ + ret = dhd_iovar(dhd, 0, "roam_min_delta", NULL, 0, (char *)pmin_delta, plen, FALSE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Get roam_min_delta %d\n", __FUNCTION__, ret)); + goto done; + } + + if (pmin_delta->ver != WLC_ROAM_MIN_DELTA_VER_1) { + ret = BCME_VERSION; + goto done; + } + + pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data; + *dt2g = (uint32)pmin_delta_info->roam_min_delta_2g; + *dt5g = (uint32)pmin_delta_info->roam_min_delta_5g; + +done: + if (pmin_delta) { + MFREE(dhd->osh, pmin_delta, plen); + } + return ret; +} + +int +dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g) +{ + wlc_roam_min_delta_t *pmin_delta; + wlc_roam_min_delta_info_v1_t *pmin_delta_info; + int ret = BCME_OK; + int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN; + + /* Sanity check Minimum ROAM score delta */ + if ((dt2g > ROAM_MIN_DELTA_MAX) || (dt5g > ROAM_MIN_DELTA_MAX)) { + DHD_ERROR(("%s Not In Range Minimum ROAM score delta, 2G: %d, 5G: %d\n", + __FUNCTION__, dt2g, dt5g)); + return BCME_RANGE; + } + + pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen); + if (!pmin_delta) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + pmin_delta->ver = WLC_ROAM_MIN_DELTA_VER_1; + pmin_delta->len = sizeof(*pmin_delta_info); + pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data; + pmin_delta_info->roam_min_delta_2g = (uint32)dt2g; + pmin_delta_info->roam_min_delta_5g = (uint32)dt5g; + + /* Set Minimum ROAM score delta */ + ret = dhd_iovar(dhd, 0, "roam_min_delta", (char *)pmin_delta, plen, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Set roam_min_delta %d\n", __FUNCTION__, ret)); + goto done; + } +done: + if (pmin_delta) { + MFREE(dhd->osh, pmin_delta, plen); + } + return ret; +} +#endif /* CONFIG_ROAM_MIN_DELTA */ + +#ifdef HOST_SFH_LLC +#define SSTLOOKUP(proto) (((proto) == 0x80f3) || ((proto) == 0x8137)) +/** Convert Ethernet to 802.3 per 802.1H (use bridge-tunnel if type in SST) + * Note:- This function will overwrite the ethernet header in the pkt + * with a 802.3 ethernet + LLC/SNAP header by utilising the headroom + * in the packet. The pkt data pointer should be pointing to the + * start of the packet (at the ethernet header) when the function is called. + * The pkt data pointer will be pointing to the + * start of the new 802.3 header if the function returns successfully + * + * + * Original Ethernet (header length = 14): + * ---------------------------------------------------------------------------------------- + * | | DA | SA | T | Data... | + * ---------------------------------------------------------------------------------------- + * 6 6 2 + * + * Conversion to 802.3 (header length = 22): + * (LLC includes ether_type in last 2 bytes): + * ---------------------------------------------------------------------------------------- + * | | DA | SA | L | LLC/SNAP | T | Data... | + * ---------------------------------------------------------------------------------------- + * 6 6 2 6 2 + */ +int +BCMFASTPATH(dhd_ether_to_8023_hdr)(osl_t *osh, struct ether_header *eh, void *p) +{ + struct ether_header *neh; + struct dot11_llc_snap_header *lsh; + uint16 plen, ether_type; + + if (PKTHEADROOM(osh, p) < DOT11_LLC_SNAP_HDR_LEN) { + DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__)); + ASSERT(0); + return BCME_BUFTOOSHORT; + } + + ether_type = ntoh16(eh->ether_type); + neh = (struct ether_header *)PKTPUSH(osh, p, DOT11_LLC_SNAP_HDR_LEN); + + /* 802.3 MAC header */ + eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost); + eacopy((char*)eh->ether_shost, (char*)neh->ether_shost); + plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN; + neh->ether_type = hton16(plen); + + /* 802.2 LLC header */ + lsh = (struct dot11_llc_snap_header *)&neh[1]; + lsh->dsap = 0xaa; + lsh->ssap = 0xaa; + lsh->ctl = 0x03; + + /* 802.2 SNAP header Use RFC1042 or bridge-tunnel if type in SST per 802.1H */ + lsh->oui[0] = 0x00; + lsh->oui[1] = 0x00; + if (SSTLOOKUP(ether_type)) + lsh->oui[2] = 0xf8; + else + lsh->oui[2] = 0x00; + lsh->type = hton16(ether_type); + + return BCME_OK; +} + +/** Convert 802.3+LLC to ethernet + * Note:- This function will overwrite the 802.3+LLC hdr in the pkt + * with an ethernet header. The pkt data pointer should be pointing to the + * start of the packet (at the 802.3 header) when the function is called. + * The pkt data pointer will be pointing to the + * start of the ethernet header if the function returns successfully + */ +int +BCMFASTPATH(dhd_8023_llc_to_ether_hdr)(osl_t *osh, struct ether_header *eh8023, void *p) +{ + struct dot11_llc_snap_header *lsh = NULL; + uint16 ether_type = 0; + uint8 *pdata = NULL; + + if (!p || !eh8023) + return BCME_BADARG; + + pdata = PKTDATA(osh, p); + ether_type = ntoh16(eh8023->ether_type); + /* ether type in 802.3 hdr for sfh llc host insertion case + * contains length, replace it with actual ether type at the + * end of the LLC hdr + */ + if (ether_type < ETHER_TYPE_MIN) { + /* 802.2 LLC header */ + lsh = (struct dot11_llc_snap_header *)(pdata + sizeof(*eh8023)); + eh8023->ether_type = lsh->type; + pdata = PKTPULL(osh, p, DOT11_LLC_SNAP_HDR_LEN); + memcpy_s(pdata, sizeof(*eh8023), eh8023, sizeof(*eh8023)); + } else { + DHD_ERROR_RLMT(("ethertype 0x%x is not a length !\n", ether_type)); + return BCME_BADARG; + } + + return BCME_OK; +} +#endif /* HOST_SFH_LLC */ + +#ifdef DHD_AWDL + +#define AWDL_MIN_EXTENSION_DEFAULT 0x3u +#define AWDL_PRESENCE_MODE_DEFAULT 0x4u +#define AWDL_FLAGS_DEFAULT 0x0000u +#define AWDL_PID 0x0800u +#define AWDL_USERDATA_SIZE 6u +/** Convert Ethernet to 802.3 + AWDL LLC SNAP header + * Note:- This function will overwrite the ethernet header in the pkt 'p' + * with a 802.3 ethernet + AWDL LLC/SNAP header by utilising the headroom + * in the packet. The pkt data pointer should be pointing to the + * start of the packet (at the ethernet header) when the function is called. + * The pkt data pointer will be pointing to the + * start of the new 802.3 header if the function returns successfully + */ +int +BCMFASTPATH(dhd_ether_to_awdl_llc_hdr)(struct dhd_pub *dhd, struct ether_header *eh, void *p) +{ + osl_t *osh = dhd->osh; + struct ether_header *neh; + struct dot11_llc_snap_header *lsh; + uint16 plen, ether_type; + uint8 *awdl_data = NULL; + uint16 *seq = NULL; + uint16 *flags = NULL; + uint16 *type = NULL; + + if (PKTHEADROOM(osh, p) < (2 * DOT11_LLC_SNAP_HDR_LEN)) { + DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__)); + ASSERT(0); + return BCME_BUFTOOSHORT; + } + + ether_type = ntoh16(eh->ether_type); + neh = (struct ether_header *)PKTPUSH(osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN); + + /* 802.3 MAC header */ + eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost); + eacopy((char*)eh->ether_shost, (char*)neh->ether_shost); + plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN; + neh->ether_type = hton16(plen); + + /* 802.2 LLC header */ + lsh = (struct dot11_llc_snap_header *)&neh[1]; + lsh->dsap = 0xaa; + lsh->ssap = 0xaa; + lsh->ctl = 0x03; + + /* 802.2 SNAP header */ + lsh->oui[0] = 0x00; + lsh->oui[1] = 0x17; + lsh->oui[2] = 0xf2; + lsh->type = hton16(AWDL_PID); + + /* AWDL upper layer data */ + awdl_data = (uint8 *)&lsh[1]; + + awdl_data[0] = dhd->awdl_minext; + awdl_data[1] = dhd->awdl_presmode; + + seq = (uint16 *)&awdl_data[2]; + *seq = dhd->awdl_seq++; + + flags = (uint16 *)&awdl_data[4]; + *flags = hton16(AWDL_FLAGS_DEFAULT); + + type = (uint16 *)&awdl_data[6]; + *type = hton16(ether_type); + + return BCME_OK; +} + +/** Convert 802.3 + AWDL LLC SNAP header to ethernet header + * Note:- This function will overwrite the existing + * 802.3 ethernet + AWDL LLC/SNAP header in the packet 'p' + * with a 14 byte ethernet header + * The pkt data pointer should be pointing to the + * start of the packet (at the 802.3 header) when the function is called. + * The pkt data pointer will be pointing to the + * start of the new ethernet header if the function returns successfully + */ +int +dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p) +{ + uint16 *ethertype = NULL; + uint8 *ptr = NULL; + + if (!eh || !p || !dhd) + return BCME_BADARG; + + ptr = PKTDATA(dhd->osh, p); + + /* copy ether type instead of length from the + * end of the awdl llc header to the ethernet header + */ + ptr += sizeof(*eh) + DOT11_LLC_SNAP_HDR_LEN + AWDL_USERDATA_SIZE; + ethertype = (uint16 *)ptr; + eh->ether_type = *ethertype; + + /* overwrite awdl llc header with ethernet header */ + PKTPULL(dhd->osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN); + ptr = PKTDATA(dhd->osh, p); + memcpy_s(ptr, sizeof(*eh), eh, sizeof(*eh)); + return BCME_OK; +} +#endif /* DHD_AWDL */ + +int +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf, + uint res_len, bool set) +{ + char *buf = NULL; + uint input_len; + wl_ioctl_t ioc; + int ret; + + if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + input_len = strlen(name) + 1 + param_len; + + /* WAR to fix GET iovar returning buf too short error + * If param len is 0 for get iovar, increment input_len by sizeof(int) + * to avoid the length check error in fw + */ + if (!set && !param_len) { + input_len += sizeof(int); + } + if (input_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + buf = NULL; + if (set) { + if (res_buf || res_len != 0) { + DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__)); + ret = BCME_BADARG; + goto exit; + } + buf = MALLOCZ(pub->osh, input_len); + if (!buf) { + DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto exit; + } + ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = input_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + } else { + if (!res_buf || !res_len) { + DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__)); + ret = BCME_BADARG; + goto exit; + } + + if (res_len < input_len) { + DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__, + res_len, input_len)); + buf = MALLOCZ(pub->osh, input_len); + if (!buf) { + DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto exit; + } + ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = input_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + + if (ret == BCME_OK) { + memcpy(res_buf, buf, res_len); + } + } else { + memset(res_buf, 0, res_len); + ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_GET_VAR; + ioc.buf = res_buf; + ioc.len = res_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + } + } +exit: + if (buf) { + MFREE(pub->osh, buf, input_len); + } + return ret; +} diff --git a/bcmdhd.101.10.361.x/dhd_config.c b/bcmdhd.101.10.361.x/dhd_config.c new file mode 100755 index 0000000..77028e0 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_config.c @@ -0,0 +1,5175 @@ + +#include +#include + +#include +#include +#include +#include +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +#include +#include +#include +#include +#endif +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef WL_CFG80211 +#include +#endif + +#include +#include +#include +#ifdef BCMPCIE +#include +#endif + +#if defined(BCMSDIO) || defined(BCMPCIE) +#include +#include +#ifdef BCMSDIO +#include +#include +#include +#include +#endif /* defined(BCMSDIO) */ +#endif + +/* message levels */ +#define CONFIG_ERROR_LEVEL (1 << 0) +#define CONFIG_TRACE_LEVEL (1 << 1) +#define CONFIG_MSG_LEVEL (1 << 0) + +uint config_msg_level = CONFIG_ERROR_LEVEL | CONFIG_MSG_LEVEL; +uint dump_msg_level = 0; + +#define CONFIG_MSG(x, args...) \ + do { \ + if (config_msg_level & CONFIG_MSG_LEVEL) { \ + printf("%s : " x, __func__, ## args); \ + } \ + } while (0) +#define CONFIG_ERROR(x, args...) \ + do { \ + if (config_msg_level & CONFIG_ERROR_LEVEL) { \ + printf("CONFIG-ERROR) %s : " x, __func__, ## args); \ + } \ + } while (0) +#define CONFIG_TRACE(x, args...) \ + do { \ + if (config_msg_level & CONFIG_TRACE_LEVEL) { \ + printf("CONFIG-TRACE) %s : " x, __func__, ## args); \ + } \ + } while (0) + +#define MAXSZ_BUF 4096 +#define MAXSZ_CONFIG 8192 + +#if defined(BCMSDIO) && defined(DYNAMIC_MAX_HDR_READ) +extern uint firstread; +#endif + +#if defined(PROP_TXSTATUS) +#include +#endif /* PROP_TXSTATUS */ + +#define MAX_EVENT_BUF_NUM 16 +typedef struct eventmsg_buf { + u16 num; + struct { + u16 type; + bool set; + } event [MAX_EVENT_BUF_NUM]; +} eventmsg_buf_t; + +typedef struct chip_name_map_t { + uint chip; + uint chiprev; + uint ag_type; + char *chip_name; + char *module_name; +} chip_name_map_t; + +/* Map of WLC_E events to connection failure strings */ +#define DONT_CARE 9999 +const chip_name_map_t chip_name_map[] = { + /* ChipID Chiprev AG ChipName ModuleName */ +#ifdef BCMSDIO + {BCM43362_CHIP_ID, 0, DONT_CARE, "bcm40181a0", ""}, + {BCM43362_CHIP_ID, 1, DONT_CARE, "bcm40181a2", ""}, + {BCM4330_CHIP_ID, 4, FW_TYPE_G, "bcm40183b2", ""}, + {BCM4330_CHIP_ID, 4, FW_TYPE_AG, "bcm40183b2_ag", ""}, + {BCM43430_CHIP_ID, 0, DONT_CARE, "bcm43438a0", "ap6212"}, + {BCM43430_CHIP_ID, 1, DONT_CARE, "bcm43438a1", "ap6212a"}, + {BCM43430_CHIP_ID, 2, DONT_CARE, "bcm43436b0", "ap6236"}, + {BCM43012_CHIP_ID, 1, FW_TYPE_G, "bcm43013b0", ""}, + {BCM43012_CHIP_ID, 1, FW_TYPE_AG, "bcm43013c0_ag", ""}, + {BCM43012_CHIP_ID, 2, DONT_CARE, "bcm43013c1_ag", ""}, + {BCM4334_CHIP_ID, 3, DONT_CARE, "bcm4334b1_ag", ""}, + {BCM43340_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""}, + {BCM43341_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""}, + {BCM4324_CHIP_ID, 5, DONT_CARE, "bcm43241b4_ag", ""}, + {BCM4335_CHIP_ID, 2, DONT_CARE, "bcm4339a0_ag", ""}, + {BCM4339_CHIP_ID, 1, DONT_CARE, "bcm4339a0_ag", "ap6335"}, + {BCM4345_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", "ap6255"}, + {BCM43454_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", ""}, + {BCM4345_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", "ap6256"}, + {BCM43454_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", ""}, + {BCM4354_CHIP_ID, 1, DONT_CARE, "bcm4354a1_ag", ""}, + {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", "ap6356"}, + {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""}, + {BCM4371_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""}, + {BCM43569_CHIP_ID, 3, DONT_CARE, "bcm4358a3_ag", ""}, + {BCM4359_CHIP_ID, 5, DONT_CARE, "bcm4359b1_ag", ""}, + {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_ag", "ap6398s"}, + {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_ag", ""}, + {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_ag", ""}, + {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_ag", ""}, + {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_ag", "ap6275s"}, +#endif +#ifdef BCMPCIE + {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""}, + {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""}, + {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_pcie_ag", ""}, + {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_pcie_ag", ""}, + {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_pcie_ag", ""}, + {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_pcie_ag", ""}, + {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_pcie_ag", ""}, + {BCM4375_CHIP_ID, 5, DONT_CARE, "bcm4375b4_pcie_ag", "ap6275hh3"}, +#endif +#ifdef BCMDBUS + {BCM43143_CHIP_ID, 2, DONT_CARE, "bcm43143b0", ""}, + {BCM43242_CHIP_ID, 1, DONT_CARE, "bcm43242a1_ag", ""}, + {BCM43569_CHIP_ID, 2, DONT_CARE, "bcm4358u_ag", "ap62x8"}, +#endif +}; + +#ifdef UPDATE_MODULE_NAME +typedef void (compat_func_t)(dhd_pub_t *dhd); +typedef struct module_name_map_t { + uint devid; + uint chip; + uint chiprev; + uint svid; + uint ssid; + char *module_name; + char *chip_name; + compat_func_t *compat_func; +} module_name_map_t; + +#if defined(BCMSDIO) || defined(BCMPCIE) +static void dhd_conf_compat_vht(dhd_pub_t *dhd); +#endif + +const module_name_map_t module_name_map[] = { + /* Devce ID Chip ID Chiprev SVID SSID + * ModuleName ChipName Compat function + */ +#ifdef BCMSDIO + {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0, + "ap6398s2", "bcm4359c51a2_ag", dhd_conf_compat_vht}, + {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0, + "ap6398sr32", "bcm4359c51a2_ag", dhd_conf_compat_vht}, + {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0, + "ap6398sv", "bcm4359c51a2_ag", dhd_conf_compat_vht}, + {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0, + "ap6398sv3", "bcm4359c51a2_ag", dhd_conf_compat_vht}, +#endif +#ifdef BCMPCIE + {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x179F, 0x003C, + "ap6398p2", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht}, + {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003C, + "ap6398p2", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht}, + {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003D, + "ap6398pr32", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht}, + {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003E, + "ap6398pv", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht}, + {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003F, + "ap6398pv3", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht}, +#endif +}; +#endif + +#ifdef BCMPCIE +typedef struct chip_cisaddr_map_t { + uint chip; + uint chiprev; + uint start_addr; + uint end_addr; +} chip_cisaddr_map_t; +const chip_cisaddr_map_t chip_cisaddr_map[] = { + /* ChipID Chiprev Start End */ + {BCM4354_CHIP_ID, 2, 0x0, 0x0}, + {BCM4356_CHIP_ID, 2, 0x0, 0x0}, + {BCM4359_CHIP_ID, 9, 0x0, 0x0}, +// {BCM43752_CHIP_ID, 2, 0x18011120, 0x18011177}, +// {BCM4375_CHIP_ID, 5, 0x18011120, 0x18011177}, +}; +#endif + +#ifdef DHD_TPUT_PATCH +extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); +#endif + +void +dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list) +{ + CONFIG_TRACE("called\n"); + + if (chip_nv_list->m_chip_nv_path_head) { + CONFIG_TRACE("Free %p\n", chip_nv_list->m_chip_nv_path_head); + kfree(chip_nv_list->m_chip_nv_path_head); + chip_nv_list->m_chip_nv_path_head = NULL; + } + chip_nv_list->count = 0; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) +typedef struct cis_tuple_format { + uint8 id; + uint8 len; /* total length of tag and data */ + uint8 tag; + uint8 data[1]; +} cis_tuple_format_t; +#define SBSDIO_CIS_SIZE_LIMIT 0x200 +#define SBSDIO_TUPLE_SIZE_LIMIT 0xff +#define CIS_TUPLE_ID_BRCM 0x80 +#define CIS_TUPLE_TAG_MACADDR 0x19 +#define CIS_TUPLE_ID_AMPAK 0x8e +#define CIS_TUPLE_TAG_MODULE 0x41 +#define CIS_TUPLE_LENGTH 1 +#define CIS_TUPLE_HDR_LEN 2 +#endif + +#ifdef BCMSDIO +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +void +dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih) +{ + uint32 gpiocontrol, addr; + + if (CHIPID(sih->chip) == BCM43362_CHIP_ID) { + CONFIG_MSG("Enable HW OOB for 43362\n"); + addr = SI_ENUM_BASE(sih) + OFFSETOF(chipcregs_t, gpiocontrol); + gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); + gpiocontrol |= 0x2; + bcmsdh_reg_write(sdh, addr, 4, gpiocontrol); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL); + } +} +#endif + +void +dhd_conf_get_otp(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih) +{ + int i, err = -1; + uint8 *cis, *ptr = 0; + uint8 mac_header[3] = {0x80, 0x07, 0x19}; + cis_tuple_format_t *tuple; + int totlen, len; + + if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) { + CONFIG_ERROR("cis malloc failed\n"); + } + bzero(cis, SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) { + CONFIG_ERROR("cis read err %d\n", err); + MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT); + return; + } + tuple = (cis_tuple_format_t *)cis; + totlen = SBSDIO_CIS_SIZE_LIMIT; + if (config_msg_level & CONFIG_TRACE_LEVEL) { + prhex("CIS", &tuple->id, totlen); + } + while (totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) { + len = tuple->len; + if ((config_msg_level & CONFIG_TRACE_LEVEL) && tuple->id) { + prhex("TPL", &tuple->id, tuple->len + CIS_TUPLE_HDR_LEN); + } + if (tuple->id == 0xff || tuple->len == 0xff) + break; + if ((tuple->id == CIS_TUPLE_ID_BRCM) && + (tuple->tag == CIS_TUPLE_TAG_MACADDR) && + (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { + memcpy(&dhd->conf->otp_mac, tuple->data, ETHER_ADDR_LEN); + } +#ifdef GET_OTP_MODULE_NAME + else if (tuple->id == CIS_TUPLE_ID_AMPAK && (tuple->len) && + tuple->tag == CIS_TUPLE_TAG_MODULE) { + int len = tuple->len - 1; + if (len <= sizeof(dhd->conf->module_name) - 1) { + strncpy(dhd->conf->module_name, tuple->data, len); + CONFIG_MSG("module_name=%s\n", dhd->conf->module_name); + } else { + CONFIG_ERROR("len is too long %d >= %d\n", + len, (int)sizeof(dhd->conf->module_name) - 1); + } + } +#endif + tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN)); + totlen -= (len + CIS_TUPLE_HDR_LEN); + } + + if (!memcmp(ðer_null, &dhd->conf->otp_mac, ETHER_ADDR_LEN)) { + ptr = cis; + /* Special OTP */ + if (bcmsdh_reg_read(sdh, SI_ENUM_BASE(sih), 4) == 0x16044330) { + for (i=0; iconf->otp_mac, ptr+3, ETHER_ADDR_LEN); + break; + } + ptr++; + } + } + } + + ASSERT(cis); + MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT); +} + +#ifdef SET_FWNV_BY_MAC +void +dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list) +{ + int i; + + CONFIG_TRACE("called\n"); + if (mac_list->m_mac_list_head) { + for (i=0; icount; i++) { + if (mac_list->m_mac_list_head[i].mac) { + CONFIG_TRACE("Free mac %p\n", mac_list->m_mac_list_head[i].mac); + kfree(mac_list->m_mac_list_head[i].mac); + } + } + CONFIG_TRACE("Free m_mac_list_head %p\n", mac_list->m_mac_list_head); + kfree(mac_list->m_mac_list_head); + } + mac_list->count = 0; +} + +void +dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, char *fw_path) +{ + int i, j; + uint8 *mac = (uint8 *)&dhd->conf->otp_mac; + int fw_num=0, mac_num=0; + uint32 oui, nic; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + int fw_type, fw_type_new; + char *name_ptr; + + mac_list = dhd->conf->fw_by_mac.m_mac_list_head; + fw_num = dhd->conf->fw_by_mac.count; + if (!mac_list || !fw_num) + return; + + oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); + nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); + + /* find out the last '/' */ + i = strlen(fw_path); + while (i > 0) { + if (fw_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &fw_path[i]; + + if (strstr(name_ptr, "_apsta")) + fw_type = FW_TYPE_APSTA; + else if (strstr(name_ptr, "_p2p")) + fw_type = FW_TYPE_P2P; + else if (strstr(name_ptr, "_mesh")) + fw_type = FW_TYPE_MESH; + else if (strstr(name_ptr, "_ezmesh")) + fw_type = FW_TYPE_EZMESH; + else if (strstr(name_ptr, "_es")) + fw_type = FW_TYPE_ES; + else if (strstr(name_ptr, "_mfg")) + fw_type = FW_TYPE_MFG; + else + fw_type = FW_TYPE_STA; + + for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { + strcpy(name_ptr, mac_list[i].name); + CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic); + CONFIG_MSG("fw_path=%s\n", fw_path); + return; + } + } + } + } +} + +void +dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, char *nv_path) +{ + int i, j; + uint8 *mac = (uint8 *)&dhd->conf->otp_mac; + int nv_num=0, mac_num=0; + uint32 oui, nic; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + char *pnv_name; + + mac_list = dhd->conf->nv_by_mac.m_mac_list_head; + nv_num = dhd->conf->nv_by_mac.count; + if (!mac_list || !nv_num) + return; + + oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); + nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); + + /* find out the last '/' */ + i = strlen(nv_path); + while (i > 0) { + if (nv_path[i] == '/') break; + i--; + } + pnv_name = &nv_path[i+1]; + + for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { + strcpy(pnv_name, mac_list[i].name); + CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic); + CONFIG_MSG("nv_path=%s\n", nv_path); + return; + } + } + } + } +} +#endif +#endif + +#ifdef BCMPCIE +static int +dhd_conf_read_otp_from_bp(si_t *sih, uint32 *data_buf, + uint32 cis_start_addr, uint32 cis_max_cnt) +{ + int int_val = 0, i = 0, bp_idx = 0; + int boardtype_backplane_addr[] = { + 0x18010324, /* OTP Control 1 */ + 0x18012618, /* PMU min resource mask */ + }; + int boardtype_backplane_data[] = { + 0x00fa0000, + 0x0e4fffff /* Keep on ARMHTAVAIL */ + }; + uint32 org_boardtype_backplane_data[] = { + 0, + 0 + }; + + for (bp_idx=0; bp_idxconf->chip; + chiprev = dhd->conf->chiprev; + + for (i=0; ichip == chip && row->chiprev == chiprev) { + cis_start_addr = row->start_addr; + cis_end_addr = row->end_addr; + } + } + + if (!cis_start_addr || !cis_end_addr) { + CONFIG_TRACE("no matched chip\n"); + goto exit; + } + cis_max_cnt = (cis_end_addr - cis_start_addr + 1) / sizeof(uint32); + + raw_data = kmalloc(cis_max_cnt, GFP_KERNEL); + if (raw_data == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", cis_max_cnt); + goto exit; + } + + totlen = dhd_conf_read_otp_from_bp(sih, raw_data, cis_start_addr, cis_max_cnt); + if (totlen == BCME_ERROR || totlen == 0) { + CONFIG_ERROR("Can't read the OTP\n"); + ret = BCME_ERROR; + goto exit; + } + + tuple = (cis_tuple_format_t *)raw_data; + + if (config_msg_level & CONFIG_TRACE_LEVEL) { + CONFIG_TRACE("start: 0x%x, end: 0x%x, totlen: %d\n", + cis_start_addr, cis_end_addr, totlen); + prhex("CIS", &tuple->id, totlen); + } + + /* check the first tuple has tag 'start' */ + if (tuple->id != CIS_TUPLE_ID_BRCM) { + CONFIG_ERROR("Can not find the TAG\n"); + ret = BCME_ERROR; + goto exit; + } + + /* find tagged parameter */ + while (totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) { + len = tuple->len; + if ((config_msg_level & CONFIG_TRACE_LEVEL) && tuple->id) { + prhex("TPL", &tuple->id, tuple->len+CIS_TUPLE_HDR_LEN); + } + if ((tuple->id == CIS_TUPLE_ID_BRCM) && + (tuple->tag == CIS_TUPLE_TAG_MACADDR) && + (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { + memcpy(&dhd->conf->otp_mac, tuple->data, ETHER_ADDR_LEN); + } + tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN)); + totlen -= (len + CIS_TUPLE_HDR_LEN); + } + +exit: + if(raw_data) + kfree(raw_data); + return ret; +} + +bool +dhd_conf_legacy_msi_chip(dhd_pub_t *dhd) +{ + uint chip; + + chip = dhd->conf->chip; + + if (chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID || + chip == BCM4371_CHIP_ID || + chip == BCM4359_CHIP_ID) { + return true; + } + + return false; +} +#endif + +void +dhd_conf_free_country_list(struct dhd_conf *conf) +{ + country_list_t *country = conf->country_head; + int count = 0; + + CONFIG_TRACE("called\n"); + while (country) { + CONFIG_TRACE("Free cspec %s\n", country->cspec.country_abbrev); + conf->country_head = country->next; + kfree(country); + country = conf->country_head; + count++; + } + CONFIG_TRACE("%d country released\n", count); +} + +void +dhd_conf_free_mchan_list(struct dhd_conf *conf) +{ + mchan_params_t *mchan = conf->mchan; + int count = 0; + + CONFIG_TRACE("called\n"); + while (mchan) { + CONFIG_TRACE("Free cspec %p\n", mchan); + conf->mchan = mchan->next; + kfree(mchan); + mchan = conf->mchan; + count++; + } + CONFIG_TRACE("%d mchan released\n", count); +} + +const chip_name_map_t* +dhd_conf_match_chip(dhd_pub_t *dhd, uint ag_type) +{ + uint chip, chiprev; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + for (i=0; ichip == chip && row->chiprev == chiprev && + (row->ag_type == ag_type || + ag_type == DONT_CARE || row->ag_type == DONT_CARE)) { + return row; + } + } + + return NULL; +} + +#ifdef UPDATE_MODULE_NAME +const module_name_map_t* +dhd_conf_match_module(dhd_pub_t *dhd) +{ + uint devid, chip, chiprev; +#ifdef BCMPCIE + uint svid, ssid; +#endif +#if defined(BCMSDIO) || defined(BCMPCIE) + int i; +#endif + + devid = dhd->conf->devid; + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; +#ifdef BCMPCIE + svid = dhd->conf->svid; + ssid = dhd->conf->ssid; +#endif + +#ifdef BCMSDIO + for (i=0; idevid == devid && row->chip == chip && row->chiprev == chiprev && + !strcmp(row->module_name, dhd->conf->module_name)) { + return row; + } + } +#endif + +#ifdef BCMPCIE + for (i=0; idevid == devid && row->chip == chip && row->chiprev == chiprev && + row->svid == svid && row->ssid == ssid) { + return row; + } + } +#endif + + return NULL; +} +#endif + +int +dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) +{ +#ifdef UPDATE_MODULE_NAME + const module_name_map_t* row_module = NULL; +#endif + const chip_name_map_t* row_chip = NULL; + int fw_type, ag_type; + uint chip, chiprev; + char *name_ptr; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (fw_path[0] == '\0') { +#ifdef CONFIG_BCMDHD_FW_PATH + bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1); + if (fw_path[0] == '\0') +#endif + { + CONFIG_MSG("firmware path is null\n"); + return 0; + } + } +#ifndef FW_PATH_AUTO_SELECT + return DONT_CARE; +#endif + + /* find out the last '/' */ + i = strlen(fw_path); + while (i > 0) { + if (fw_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &fw_path[i]; +#ifdef BAND_AG + ag_type = FW_TYPE_AG; +#else + ag_type = strstr(name_ptr, "_ag") ? FW_TYPE_AG : FW_TYPE_G; +#endif + if (strstr(name_ptr, "_apsta")) + fw_type = FW_TYPE_APSTA; + else if (strstr(name_ptr, "_p2p")) + fw_type = FW_TYPE_P2P; + else if (strstr(name_ptr, "_mesh")) + fw_type = FW_TYPE_MESH; + else if (strstr(name_ptr, "_ezmesh")) + fw_type = FW_TYPE_EZMESH; + else if (strstr(name_ptr, "_es")) + fw_type = FW_TYPE_ES; + else if (strstr(name_ptr, "_mfg")) + fw_type = FW_TYPE_MFG; + else if (strstr(name_ptr, "_minime")) + fw_type = FW_TYPE_MINIME; + else + fw_type = FW_TYPE_STA; +#ifdef WLEASYMESH + if (dhd->conf->fw_type == FW_TYPE_EZMESH) + fw_type = FW_TYPE_EZMESH; +#endif /* WLEASYMESH */ + + row_chip = dhd_conf_match_chip(dhd, ag_type); + if (row_chip && strlen(row_chip->chip_name)) { + strcpy(name_ptr, "fw_"); + strcat(name_ptr, row_chip->chip_name); +#ifdef BCMUSBDEV_COMPOSITE + strcat(name_ptr, "_cusb"); +#endif + if (fw_type == FW_TYPE_APSTA) + strcat(name_ptr, "_apsta.bin"); + else if (fw_type == FW_TYPE_P2P) + strcat(name_ptr, "_p2p.bin"); + else if (fw_type == FW_TYPE_MESH) + strcat(name_ptr, "_mesh.bin"); + else if (fw_type == FW_TYPE_EZMESH) + strcat(name_ptr, "_ezmesh.bin"); + else if (fw_type == FW_TYPE_ES) + strcat(name_ptr, "_es.bin"); + else if (fw_type == FW_TYPE_MFG) + strcat(name_ptr, "_mfg.bin"); + else if (fw_type == FW_TYPE_MINIME) + strcat(name_ptr, "_minime.bin"); + else + strcat(name_ptr, ".bin"); + } + +#ifdef UPDATE_MODULE_NAME + row_module = dhd_conf_match_module(dhd); + if (row_module && strlen(row_module->chip_name)) { + strcpy(name_ptr, "fw_"); + strcat(name_ptr, row_module->chip_name); +#ifdef BCMUSBDEV_COMPOSITE + strcat(name_ptr, "_cusb"); +#endif + if (fw_type == FW_TYPE_APSTA) + strcat(name_ptr, "_apsta.bin"); + else if (fw_type == FW_TYPE_P2P) + strcat(name_ptr, "_p2p.bin"); + else if (fw_type == FW_TYPE_MESH) + strcat(name_ptr, "_mesh.bin"); + else if (fw_type == FW_TYPE_EZMESH) + strcat(name_ptr, "_ezmesh.bin"); + else if (fw_type == FW_TYPE_ES) + strcat(name_ptr, "_es.bin"); + else if (fw_type == FW_TYPE_MFG) + strcat(name_ptr, "_mfg.bin"); + else if (fw_type == FW_TYPE_MINIME) + strcat(name_ptr, "_minime.bin"); + else + strcat(name_ptr, ".bin"); + } +#endif + + dhd->conf->fw_type = fw_type; + +#ifndef MINIME + if (fw_type == FW_TYPE_MINIME) + CONFIG_ERROR("***** Please enable MINIME in Makefile *****\n"); +#endif + + CONFIG_TRACE("firmware_path=%s\n", fw_path); + return ag_type; +} + +void +dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path, int ag_type) +{ +#ifdef UPDATE_MODULE_NAME + const module_name_map_t* row_module = NULL; +#endif + const chip_name_map_t* row_chip = NULL; + uint chip, chiprev; + char *name_ptr; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (clm_path[0] == '\0') { + CONFIG_MSG("clm path is null\n"); + return; + } + + /* find out the last '/' */ + i = strlen(clm_path); + while (i > 0) { + if (clm_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &clm_path[i]; + + row_chip = dhd_conf_match_chip(dhd, ag_type); + if (row_chip && strlen(row_chip->chip_name)) { + strcpy(name_ptr, "clm_"); + strcat(name_ptr, row_chip->chip_name); + strcat(name_ptr, ".blob"); + } + +#ifdef UPDATE_MODULE_NAME + row_module = dhd_conf_match_module(dhd); + if (row_module && strlen(row_module->chip_name)) { + strcpy(name_ptr, "clm_"); + strcat(name_ptr, row_module->chip_name); + strcat(name_ptr, ".blob"); + } +#endif + + CONFIG_TRACE("clm_path=%s\n", clm_path); +} + +void +dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path, int ag_type) +{ +#if defined(BCMPCIE) && defined(UPDATE_MODULE_NAME) + const module_name_map_t* row_module = NULL; +#endif + const chip_name_map_t* row_chip = NULL; + uint chip, chiprev; + char *name_ptr, nv_name[32]; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (nv_path[0] == '\0') { +#ifdef CONFIG_BCMDHD_NVRAM_PATH + bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1); + if (nv_path[0] == '\0') +#endif + { + CONFIG_MSG("nvram path is null\n"); + return; + } + } + + /* find out the last '/' */ + i = strlen(nv_path); + while (i > 0) { + if (nv_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &nv_path[i]; + + row_chip = dhd_conf_match_chip(dhd, ag_type); + if (row_chip && strlen(row_chip->module_name)) { + strcpy(name_ptr, "nvram_"); + strcat(name_ptr, row_chip->module_name); +#ifdef BCMUSBDEV_COMPOSITE + strcat(name_ptr, "_cusb"); +#endif + strcat(name_ptr, ".txt"); + } + strcpy(nv_name, name_ptr); + +#if defined(BCMSDIO) && defined(GET_OTP_MODULE_NAME) + if (strlen(dhd->conf->module_name)) { + strcpy(name_ptr, "nvram_"); + strcat(name_ptr, dhd->conf->module_name); + strcat(name_ptr, ".txt"); +#ifdef COMPAT_OLD_MODULE + if (dhd->conf->chip == BCM4359_CHIP_ID) { + struct file *fp; + // compatible for AP6398S and AP6398SA + fp = filp_open(nv_path, O_RDONLY, 0); + if (IS_ERR(fp)) { + strcpy(name_ptr, nv_name); + } else { + filp_close((struct file *)fp, NULL); + } + } +#endif + } +#endif + +#if defined(BCMPCIE) && defined(UPDATE_MODULE_NAME) + row_module = dhd_conf_match_module(dhd); + if (row_module && strlen(row_module->module_name)) { + strcpy(name_ptr, "nvram_"); + strcat(name_ptr, row_module->module_name); + strcat(name_ptr, ".txt"); + } +#endif + + for (i=0; iconf->nv_by_chip.count; i++) { + if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip && + chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) { + strcpy(name_ptr, dhd->conf->nv_by_chip.m_chip_nv_path_head[i].name); + break; + } + } + + CONFIG_TRACE("nvram_path=%s\n", nv_path); +} + +void +dhd_conf_copy_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path) +{ + int i; + + if (src_path[0] == '\0') { + CONFIG_MSG("src_path is null\n"); + return; + } else + strcpy(dst_path, src_path); + + /* find out the last '/' */ + i = strlen(dst_path); + while (i > 0) { + if (dst_path[i] == '/') { + i++; + break; + } + i--; + } + strcpy(&dst_path[i], dst_name); + + CONFIG_TRACE("dst_path=%s\n", dst_path); +} + +#ifdef CONFIG_PATH_AUTO_SELECT +void +dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) +{ +#ifdef UPDATE_MODULE_NAME + const module_name_map_t* row_module = NULL; +#endif + const chip_name_map_t* row_chip = NULL; + uint chip, chiprev; + char *name_ptr; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (conf_path[0] == '\0') { + CONFIG_MSG("config path is null\n"); + return; + } + + /* find out the last '/' */ + i = strlen(conf_path); + while (i > 0) { + if (conf_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &conf_path[i]; + + row_chip = dhd_conf_match_chip(dhd, DONT_CARE); + if (row_chip && strlen(row_chip->chip_name)) { + strcpy(name_ptr, "config_"); + strcat(name_ptr, row_chip->chip_name); + strcat(name_ptr, ".txt"); + } + +#ifdef UPDATE_MODULE_NAME + row_module = dhd_conf_match_module(dhd); + if (row_module && strlen(row_module->chip_name)) { + strcpy(name_ptr, "config_"); + strcat(name_ptr, row_module->chip_name); + strcat(name_ptr, ".txt"); + } +#endif + + CONFIG_TRACE("config_path=%s\n", conf_path); +} +#endif + +#ifdef TPUT_MONITOR +void +dhd_conf_tput_monitor(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + if (conf->tput_monitor_ms && conf->data_drop_mode >= FW_DROP) { + if (conf->tput_ts.tv_sec == 0 && conf->tput_ts.tv_nsec == 0) { + osl_do_gettimeofday(&conf->tput_ts); + } else { + struct osl_timespec cur_ts; + int32 tput_tx = 0, tput_rx = 0, tput_tx_kb = 0, + tput_rx_kb = 0, tput_net = 0, tput_net_kb = 0; + uint32 diff_ms; + unsigned long diff_bytes; + osl_do_gettimeofday(&cur_ts); + diff_ms = osl_do_gettimediff(&cur_ts, &conf->tput_ts)/1000; + if (diff_ms >= conf->tput_monitor_ms) { + diff_bytes = dhd->dstats.tx_bytes - conf->last_tx; + tput_tx = (int32)((diff_bytes/1024/1024)*8)*1000/diff_ms; + if (tput_tx == 0) { + tput_tx = (int32)(diff_bytes*8/1024/1024)*1000/diff_ms; + tput_tx_kb = (int32)(diff_bytes*8*1000/1024)/diff_ms; + tput_tx_kb = tput_tx_kb % 1000; + } + diff_bytes = dhd->dstats.rx_bytes - conf->last_rx; + tput_rx = (int32)((diff_bytes/1024/1024)*8)*1000/diff_ms; + if (tput_rx == 0) { + tput_rx = (int32)(diff_bytes*8/1024/1024)*1000/diff_ms; + tput_rx_kb = (int32)(diff_bytes*8*1000/1024)/diff_ms; + tput_rx_kb = tput_tx_kb % 1000; + } + diff_bytes = conf->net_len - conf->last_net_tx; + tput_net = (int32)((diff_bytes/1024/1024)*8)*1000/diff_ms; + if (tput_net == 0) { + tput_net = (int32)(diff_bytes*8/1024/1024)*1000/diff_ms; + tput_net_kb = (int32)(diff_bytes*8*1000/1024)/diff_ms; + tput_net_kb = tput_net_kb % 1000; + } + conf->last_tx = dhd->dstats.tx_bytes; + conf->last_rx = dhd->dstats.rx_bytes; + conf->last_net_tx = conf->net_len; + memcpy(&conf->tput_ts, &cur_ts, sizeof(struct osl_timespec)); + CONFIG_TRACE("xmit=%3d.%d%d%d Mbps, tx=%3d.%d%d%d Mbps, rx=%3d.%d%d%d Mbps\n", + tput_net, (tput_net_kb/100)%10, (tput_net_kb/10)%10, (tput_net_kb)%10, + tput_tx, (tput_tx_kb/100)%10, (tput_tx_kb/10)%10, (tput_tx_kb)%10, + tput_rx, (tput_rx_kb/100)%10, (tput_rx_kb/10)%10, (tput_rx_kb)%10); + } + } + } +} +#endif + +#ifdef DHD_TPUT_PATCH +void +dhd_conf_set_tput_patch(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + if (conf->tput_patch) { + conf->mtu = 1500; + conf->pktsetsum = TRUE; +#ifdef BCMSDIO + conf->dhd_dpc_prio = 98; +/* need to check if CPU can support multi-core first, + * so don't enable it by default. + */ +// conf->dpc_cpucore = 2; +// conf->rxf_cpucore = 3; +// conf->disable_proptx = 1; + conf->frameburst = 1; +#ifdef DYNAMIC_MAX_HDR_READ + conf->max_hdr_read = 256; + firstread = 256; +#endif /* DYNAMIC_MAX_HDR_READ */ + dhd_rxbound = 512; +#endif /* BCMSDIO */ +#ifdef BCMPCIE +#if defined(SET_XPS_CPUS) + conf->xps_cpus = TRUE; +#endif /* SET_XPS_CPUS */ +#if defined(SET_RPS_CPUS) + conf->rps_cpus = TRUE; +#endif /* SET_RPS_CPUS */ + conf->orphan_move = 3; + conf->flow_ring_queue_threshold = 2048; +#endif /* BCMPCIE */ +#ifdef DHDTCPACK_SUPPRESS + conf->tcpack_sup_ratio = 15; + conf->tcpack_sup_delay = 10; +#endif /* DHDTCPACK_SUPPRESS */ + } + else { + conf->mtu = 0; + conf->pktsetsum = FALSE; +#ifdef BCMSDIO + conf->dhd_dpc_prio = -1; + conf->disable_proptx = -1; + conf->frameburst = 1; +#ifdef DYNAMIC_MAX_HDR_READ + conf->max_hdr_read = 0; + firstread = 32; +#endif /* DYNAMIC_MAX_HDR_READ */ + dhd_rxbound = 128; +#endif /* BCMSDIO */ +#ifdef BCMPCIE +#if defined(SET_XPS_CPUS) + conf->xps_cpus = FALSE; +#endif /* SET_XPS_CPUS */ +#if defined(SET_RPS_CPUS) + conf->rps_cpus = FALSE; +#endif /* SET_RPS_CPUS */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + conf->orphan_move = 1; +#else + conf->orphan_move = 0; +#endif + conf->flow_ring_queue_threshold = 2048; +#endif /* BCMPCIE */ +#ifdef DHDTCPACK_SUPPRESS + conf->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + conf->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; +#endif /* DHDTCPACK_SUPPRESS */ + } +} + +void +dhd_conf_dump_tput_patch(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + CONFIG_TRACE("tput_patch=%d\n", conf->tput_patch); + CONFIG_TRACE("mtu=%d\n", conf->mtu); + CONFIG_TRACE("pktsetsum=%d\n", conf->pktsetsum); + CONFIG_TRACE("orphan_move=%d\n", conf->orphan_move); +#ifdef DHDTCPACK_SUPPRESS + CONFIG_TRACE("tcpack_sup_ratio=%d\n", conf->tcpack_sup_ratio); + CONFIG_TRACE("tcpack_sup_delay=%d\n", conf->tcpack_sup_delay); +#endif + +#ifdef BCMSDIO + CONFIG_TRACE("dhd_dpc_prio=%d\n", conf->dhd_dpc_prio); + CONFIG_TRACE("dhd_poll=%d\n", conf->dhd_poll); + CONFIG_TRACE("disable_proptx=%d\n", conf->disable_proptx); + CONFIG_TRACE("frameburst=%d\n", conf->frameburst); +#ifdef DYNAMIC_MAX_HDR_READ + CONFIG_TRACE("max_hdr_read=%d\n", conf->max_hdr_read); + CONFIG_TRACE("firstread=%d\n", firstread); +#endif + CONFIG_TRACE("dhd_rxbound=%d\n", dhd_rxbound); +#endif + +#ifdef BCMPCIE + CONFIG_TRACE("flow_ring_queue_threshold=%d\n", conf->flow_ring_queue_threshold); +#endif + +#if defined(SET_XPS_CPUS) + CONFIG_TRACE("xps_cpus=%d\n", conf->xps_cpus); +#endif +#if defined(SET_RPS_CPUS) + CONFIG_TRACE("rps_cpus=%d\n", conf->rps_cpus); +#endif + +} +#endif /* DHD_TPUT_PATCH */ + +void +dhd_conf_set_path_params(dhd_pub_t *dhd, char *fw_path, char *nv_path) +{ + int ag_type; + + /* External conf takes precedence if specified */ + dhd_conf_preinit(dhd); + + if (dhd->conf_path[0] == '\0') { + dhd_conf_copy_path(dhd, "config.txt", dhd->conf_path, nv_path); + } + if (dhd->clm_path[0] == '\0') { + dhd_conf_copy_path(dhd, "clm.blob", dhd->clm_path, fw_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(dhd, dhd->conf_path); +#endif + + dhd_conf_read_config(dhd, dhd->conf_path); +#ifdef DHD_TPUT_PATCH + dhd_conf_dump_tput_patch(dhd); +#endif + + ag_type = dhd_conf_set_fw_name_by_chip(dhd, fw_path); + dhd_conf_set_nv_name_by_chip(dhd, nv_path, ag_type); + dhd_conf_set_clm_name_by_chip(dhd, dhd->clm_path, ag_type); +#ifdef SET_FWNV_BY_MAC + dhd_conf_set_fw_name_by_mac(dhd, fw_path); + dhd_conf_set_nv_name_by_mac(dhd, nv_path); +#endif + + CONFIG_MSG("Final fw_path=%s\n", fw_path); + CONFIG_MSG("Final nv_path=%s\n", nv_path); + CONFIG_MSG("Final clm_path=%s\n", dhd->clm_path); + CONFIG_MSG("Final conf_path=%s\n", dhd->conf_path); +} + +int +dhd_conf_set_intiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name, int val, + int def, bool down) +{ + int ret = -1; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + + if (val >= def) { + if (down) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) + CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret); + } + if (cmd == WLC_SET_VAR) { + CONFIG_TRACE("set %s %d\n", name, val); + bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + CONFIG_ERROR("%s setting failed %d\n", name, ret); + } else { + CONFIG_TRACE("set %s %d %d\n", name, cmd, val); + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0) + CONFIG_ERROR("%s setting failed %d\n", name, ret); + } + } + + return ret; +} + +static int +dhd_conf_set_bufiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name, + char *buf, int len, bool down) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + s32 iovar_len; + int ret = -1; + + if (down) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, ifidx)) < 0) + CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret); + } + + if (cmd == WLC_SET_VAR) { + iovar_len = bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf)); + if (iovar_len > 0) + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx); + else + ret = BCME_BUFTOOSHORT; + if (ret < 0) + CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len); + } else { + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, ifidx)) < 0) + CONFIG_ERROR("%s setting failed %d\n", name, ret); + } + + return ret; +} + +static int +dhd_conf_iovar_buf(dhd_pub_t *dhd, int ifidx, int cmd, char *name, + char *buf, int len) +{ + char *iovbuf = NULL; + int ret = -1, iovbuf_len = WLC_IOCTL_MEDLEN; + s32 iovar_len; + + iovbuf = kmalloc(iovbuf_len, GFP_KERNEL); + if (iovbuf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", iovbuf_len); + goto exit; + } + + if (cmd == WLC_GET_VAR) { + if (bcm_mkiovar(name, buf, len, iovbuf, iovbuf_len)) { + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovbuf_len, FALSE, ifidx); + if (!ret) { + memcpy(buf, iovbuf, len); + } else { + CONFIG_ERROR("get iovar %s failed %d\n", name, ret); + } + } else { + CONFIG_ERROR("mkiovar %s failed\n", name); + } + } else if (cmd == WLC_SET_VAR) { + iovar_len = bcm_mkiovar(name, buf, len, iovbuf, iovbuf_len); + if (iovar_len > 0) + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx); + else + ret = BCME_BUFTOOSHORT; + if (ret < 0) + CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len); + } + +exit: + if (iovbuf) + kfree(iovbuf); + return ret; +} + +static int +dhd_conf_get_iovar(dhd_pub_t *dhd, int ifidx, int cmd, char *name, + char *buf, int len) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + if (cmd == WLC_GET_VAR) { + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx); + if (!ret) { + memcpy(buf, iovbuf, len); + } else { + CONFIG_ERROR("get iovar %s failed %d\n", name, ret); + } + } else { + CONFIG_ERROR("mkiovar %s failed\n", name); + } + } else { + ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0); + if (ret < 0) + CONFIG_ERROR("get iovar %s failed %d\n", name, ret); + } + + return ret; +} + +static int +dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *cmd, char *buf) +{ + wl_config_t rsdb_mode_cfg = {1, 0}; + + if (buf) { + rsdb_mode_cfg.config = (int)simple_strtol(buf, NULL, 0); + CONFIG_MSG("rsdb_mode %d\n", rsdb_mode_cfg.config); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, cmd, (char *)&rsdb_mode_cfg, + sizeof(rsdb_mode_cfg), TRUE); + } + + return 0; +} + +int +dhd_conf_reg2args(dhd_pub_t *dhd, char *cmd, bool set, uint32 index, uint32 *val) +{ + char var[WLC_IOCTL_SMLEN]; + uint32 int_val, len; + void *ptr = NULL; + int ret = 0; + + len = sizeof(int_val); + int_val = htod32(index); + memset(var, 0, sizeof(var)); + memcpy(var, (char *)&int_val, sizeof(int_val)); + + if (set) { + int_val = htod32(*val); + memcpy(&var[len], (char *)&int_val, sizeof(int_val)); + len += sizeof(int_val); + dhd_conf_iovar_buf(dhd, 0, WLC_SET_VAR, cmd, var, sizeof(var)); + } else { + ret = dhd_conf_iovar_buf(dhd, 0, WLC_GET_VAR, cmd, var, sizeof(var)); + if (ret < 0) + return ret; + ptr = var; + *val = dtoh32(*(int *)ptr); + } + + return ret; +} + +static int +dhd_conf_btc_params(dhd_pub_t *dhd, char *cmd, char *buf) +{ + int ret = BCME_OK; + uint32 cur_val; + int index = 0, mask = 0, value = 0; + // btc_params=[index] [mask] [value] + // Ex: btc_params=82 0x0021 0x0001 + + if (buf) { + sscanf(buf, "%d %x %x", &index, &mask, &value); + } + + CONFIG_TRACE("%s%d mask=0x%04x value=0x%04x\n", cmd, index, mask, value); + + ret = dhd_conf_reg2args(dhd, cmd, FALSE, index, &cur_val); + CONFIG_TRACE("%s%d = 0x%04x\n", cmd, index, cur_val); + cur_val &= (~mask); + cur_val |= value; + + // need to WLC_UP before btc_params + dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE); + + CONFIG_TRACE("wl %s%d 0x%04x\n", cmd, index, cur_val); + ret = dhd_conf_reg2args(dhd, cmd, TRUE, index, &cur_val); + + ret = dhd_conf_reg2args(dhd, cmd, FALSE, index, &cur_val); + CONFIG_MSG("%s%d = 0x%04x\n", cmd, index, cur_val); + + return ret; +} + +typedef struct sub_cmd_t { + char *name; + uint16 id; /* id for the dongle f/w switch/case */ + uint16 type; /* base type of argument IOVT_XXXX */ +} sub_cmd_t; + +/* wl he sub cmd list */ +static const sub_cmd_t he_cmd_list[] = { + {"enab", WL_HE_CMD_ENAB, IOVT_UINT8}, + {"features", WL_HE_CMD_FEATURES, IOVT_UINT32}, + {"bsscolor", WL_HE_CMD_BSSCOLOR, IOVT_UINT8}, + {"partialbsscolor", WL_HE_CMD_PARTIAL_BSSCOLOR, IOVT_UINT8}, + {"cap", WL_HE_CMD_CAP, IOVT_UINT8}, + {"staid", WL_HE_CMD_STAID, IOVT_UINT16}, + {"rtsdurthresh", WL_HE_CMD_RTSDURTHRESH, IOVT_UINT16}, + {"peduration", WL_HE_CMD_PEDURATION, IOVT_UINT8}, + {"testbed_mode", WL_HE_CMD_TESTBED_MODE, IOVT_UINT32}, + {"omi_ulmu_throttle", WL_HE_CMD_OMI_ULMU_THROTTLE, IOVT_UINT16}, + {"omi_dlmu_rr_mpf_map", WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP, IOVT_UINT32}, + {"ulmu_disable_policy", WL_HE_CMD_ULMU_DISABLE_POLICY, IOVT_UINT8}, + {"sr_prohibit", WL_HE_CMD_SR_PROHIBIT, IOVT_UINT8}, +}; + +static uint +wl_he_iovt2len(uint iovt) +{ + switch (iovt) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_UINT8: + return sizeof(uint8); + case IOVT_INT16: + case IOVT_UINT16: + return sizeof(uint16); + case IOVT_INT32: + case IOVT_UINT32: + return sizeof(uint32); + default: + /* ASSERT(0); */ + return 0; + } +} + +static int +dhd_conf_he_cmd(dhd_pub_t * dhd, char *cmd, char *buf) +{ + int ret = BCME_OK, i; + bcm_xtlv_t *pxtlv = NULL; + uint8 mybuf[128]; + uint16 he_id = -1, he_len = 0, mybuf_len = sizeof(mybuf); + uint32 he_val; + const sub_cmd_t *tpl = he_cmd_list; + char sub_cmd[32], he_val_str[10]; + + if (buf) { + sscanf(buf, "%s %s", sub_cmd, he_val_str); + } + + for (i=0; iname, sub_cmd)) { + he_id = tpl->id; + he_len = wl_he_iovt2len(tpl->type); + break; + } + } + if (he_id < 0) { + CONFIG_ERROR("No he id found for %s\n", sub_cmd); + return 0; + } + + pxtlv = (bcm_xtlv_t *)mybuf; + + if (strlen(he_val_str)) { + he_val = simple_strtol(he_val_str, NULL, 0); + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, he_id, + he_len, (uint8 *)&he_val, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + CONFIG_ERROR("failed to pack he enab, err: %s\n", bcmerrorstr(ret)); + return 0; + } + CONFIG_TRACE("he %s 0x%x\n", sub_cmd, he_val); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, cmd, (char *)&mybuf, + sizeof(mybuf), TRUE); + } + + return 0; +} + +#ifndef SUPPORT_RANDOM_MAC_SCAN +int +dhd_conf_scan_mac(dhd_pub_t * dhd, char *cmd, char *buf) +{ + uint8 buffer[WLC_IOCTL_SMLEN] = {0, }; + wl_scanmac_t *sm = NULL; + wl_scanmac_enable_t *sm_enable = NULL; + int enable = 0, len = 0, ret = -1; + char sub_cmd[32], iovbuf[WLC_IOCTL_SMLEN]; + s32 iovar_len; + + memset(sub_cmd, 0, sizeof(sub_cmd)); + if (buf) { + sscanf(buf, "%s %d", sub_cmd, &enable); + } + + if (!strcmp(sub_cmd, "enable")) { + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = enable; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; + CONFIG_TRACE("scanmac enable %d\n", sm_enable->enable); + + iovar_len = bcm_mkiovar("scanmac", buffer, len, iovbuf, sizeof(iovbuf)); + if (iovar_len > 0) + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0); + else + ret = BCME_BUFTOOSHORT; + if (ret == BCME_UNSUPPORTED) + CONFIG_TRACE("scanmac, UNSUPPORTED\n"); + else if (ret != BCME_OK) + CONFIG_ERROR("%s setting failed %d, len=%d\n", "scanmac", ret, len); + } + else { + CONFIG_ERROR("wrong cmd \"%s %d\"\n", sub_cmd, enable); + } + + return 0; +} +#endif + +typedef int (tpl_parse_t)(dhd_pub_t *dhd, char *name, char *buf); + +typedef struct iovar_tpl_t { + int cmd; + char *name; + tpl_parse_t *parse; +} iovar_tpl_t; + +const iovar_tpl_t iovar_tpl_list[] = { + {WLC_SET_VAR, "rsdb_mode", dhd_conf_rsdb_mode}, + {WLC_SET_VAR, "he", dhd_conf_he_cmd}, + {WLC_SET_VAR, "btc_params", dhd_conf_btc_params}, +#ifndef SUPPORT_RANDOM_MAC_SCAN + {WLC_SET_VAR, "scanmac", dhd_conf_scan_mac}, +#endif +}; + +static int iovar_tpl_parse(const iovar_tpl_t *tpl, int tpl_count, + dhd_pub_t *dhd, int cmd, char *name, char *buf) +{ + int i, ret = 0; + + /* look for a matching code in the table */ + for (i = 0; i < tpl_count; i++, tpl++) { + if (tpl->cmd == cmd && !strcmp(tpl->name, name)) + break; + } + if (i < tpl_count && tpl->parse) { + ret = tpl->parse(dhd, name, buf); + } else { + ret = -1; + } + + return ret; +} + +static bool +dhd_conf_set_wl_cmd(dhd_pub_t *dhd, char *data, bool down) +{ + int cmd, val, ret = 0, len; + char name[32], *pch, *pick_tmp, *pick_tmp2, *pdata = NULL; + + /* Process wl_preinit: + * wl_preinit=[cmd]=[val], [cmd]=[val] + * Ex: wl_preinit=86=0, mpc=0 + */ + + if (data == NULL) + return FALSE; + + len = strlen(data); + pdata = kmalloc(len+1, GFP_KERNEL); + if (pdata == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", len+1); + goto exit; + } + memset(pdata, 0, len+1); + strcpy(pdata, data); + + pick_tmp = pdata; + while (pick_tmp && (pick_tmp2 = bcmstrtok(&pick_tmp, ",", 0)) != NULL) { + char *pEnd; + pch = bcmstrtok(&pick_tmp2, "=", 0); + if (!pch) + break; + if (*pch == ' ') { + pch++; + } + memset(name, 0 , sizeof (name)); + cmd = bcm_strtoul(pch, &pEnd, 0); + if (cmd == 0 || strlen(pEnd)) { + cmd = WLC_SET_VAR; + strcpy(name, pch); + } + pch = bcmstrtok(&pick_tmp2, ",", 0); + if (!pch) { + break; + } + ret = iovar_tpl_parse(iovar_tpl_list, ARRAY_SIZE(iovar_tpl_list), + dhd, cmd, name, pch); + if (ret) { + val = (int)simple_strtol(pch, NULL, 0); + dhd_conf_set_intiovar(dhd, 0, cmd, name, val, -1, down); + } + } + +exit: + if (pdata) + kfree(pdata); + return true; +} + +int +dhd_conf_get_band(dhd_pub_t *dhd) +{ + int band = -1; + + if (dhd && dhd->conf) + band = dhd->conf->band; + else + CONFIG_ERROR("dhd or conf is NULL\n"); + + return band; +} + +int +dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1; + + memset(cspec, 0, sizeof(wl_country_t)); + bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), + FALSE, 0)) < 0) + CONFIG_ERROR("country code getting failed %d\n", bcmerror); + + return bcmerror; +} + +int +dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1; + struct dhd_conf *conf = dhd->conf; + country_list_t *country = conf->country_head; + +#ifdef CCODE_LIST + bcmerror = dhd_ccode_map_country_list(dhd, cspec); +#endif + // **:XZ/11 => return XZ/11 if not found + // **:**/0 => return user specified ccode if not found, but set regrev 0 + while (country != NULL) { + if (!strncmp("**", country->cspec.country_abbrev, 2)) { + if (!strncmp("**", country->cspec.ccode, 2)) { + cspec->rev = 0; + bcmerror = 0; + break; + } + memcpy(cspec->ccode, country->cspec.ccode, WLC_CNTRY_BUF_SZ); + cspec->rev = country->cspec.rev; + bcmerror = 0; + break; + } else if (!strncmp(cspec->country_abbrev, + country->cspec.country_abbrev, 2)) { + memcpy(cspec->ccode, country->cspec.ccode, WLC_CNTRY_BUF_SZ); + cspec->rev = country->cspec.rev; + bcmerror = 0; + break; + } + country = country->next; + } + + if (!bcmerror) + CONFIG_MSG("%s/%d\n", cspec->ccode, cspec->rev); + + return bcmerror; +} + +int +dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1; + + memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t)); + + CONFIG_MSG("set country %s, revision %d\n", cspec->ccode, cspec->rev); + bcmerror = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "country", (char *)cspec, + sizeof(wl_country_t), FALSE); + dhd_conf_get_country(dhd, cspec); + CONFIG_MSG("Country code: %s (%s/%d)\n", + cspec->country_abbrev, cspec->ccode, cspec->rev); + + return bcmerror; +} + +int +dhd_conf_fix_country(dhd_pub_t *dhd) +{ + int bcmerror = -1; + int band; + wl_uint32_list_t *list; + u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + wl_country_t cspec; + + if (!(dhd && dhd->conf)) { + return bcmerror; + } + + memset(valid_chan_list, 0, sizeof(valid_chan_list)); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, + sizeof(valid_chan_list), FALSE, 0)) < 0) { + CONFIG_ERROR("get channels failed with %d\n", bcmerror); + } + + band = dhd_conf_get_band(dhd); + + if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G || band==-1) && + dtoh32(list->count)<11)) { + CONFIG_ERROR("bcmerror=%d, # of channels %d\n", + bcmerror, dtoh32(list->count)); + dhd_conf_map_country_list(dhd, &dhd->conf->cspec); + if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) { + strcpy(cspec.country_abbrev, "US"); + cspec.rev = 0; + strcpy(cspec.ccode, "US"); + dhd_conf_map_country_list(dhd, &cspec); + dhd_conf_set_country(dhd, &cspec); + } + } + + return bcmerror; +} + +bool +dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel) +{ + int i; + bool match = false; + + if (dhd && dhd->conf) { + if (dhd->conf->channels.count == 0) + return true; + for (i=0; iconf->channels.count; i++) { + if (channel == dhd->conf->channels.channel[i]) + match = true; + } + } else { + match = true; + CONFIG_ERROR("dhd or conf is NULL\n"); + } + + return match; +} + +int +dhd_conf_set_roam(dhd_pub_t *dhd) +{ + int bcmerror = -1; + struct dhd_conf *conf = dhd->conf; + uint wnm_bsstrans_resp = 0; + + if (dhd->conf->chip == BCM4359_CHIP_ID) { + dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "wnm_bsstrans_resp", + (char *)&wnm_bsstrans_resp, sizeof(wnm_bsstrans_resp)); + if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT) { + dhd->wbtext_policy = WL_BSSTRANS_POLICY_ROAM_ALWAYS; + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "wnm_bsstrans_resp", + WL_BSSTRANS_POLICY_ROAM_ALWAYS, 0, FALSE); + } + } + + dhd_roam_disable = conf->roam_off; + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE); + + if (!conf->roam_off || !conf->roam_off_suspend) { + CONFIG_MSG("set roam_trigger %d\n", conf->roam_trigger[0]); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER", + (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE); + + CONFIG_MSG("set roam_scan_period %d\n", conf->roam_scan_period[0]); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD", + (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE); + + CONFIG_MSG("set roam_delta %d\n", conf->roam_delta[0]); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA", + (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE); + + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "fullroamperiod", + dhd->conf->fullroamperiod, 1, FALSE); + } + + return bcmerror; +} + +void +dhd_conf_add_to_eventbuffer(struct eventmsg_buf *ev, u16 event, bool set) +{ + if (!ev || (event > WLC_E_LAST)) + return; + + if (ev->num < MAX_EVENT_BUF_NUM) { + ev->event[ev->num].type = event; + ev->event[ev->num].set = set; + ev->num++; + } else { + CONFIG_ERROR("evenbuffer doesn't support > %u events. Update" + " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM); + ASSERT(0); + } +} + +s32 +dhd_conf_apply_eventbuffer(dhd_pub_t *dhd, eventmsg_buf_t *ev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + int i, ret = 0; + + if (!ev || (!ev->num)) + return -EINVAL; + + /* Read event_msgs mask */ + ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", eventmask, + sizeof(eventmask)); + if (unlikely(ret)) { + CONFIG_ERROR("Get event_msgs error (%d)\n", ret); + goto exit; + } + + /* apply the set bits */ + for (i = 0; i < ev->num; i++) { + if (ev->event[i].set) + setbit(eventmask, ev->event[i].type); + else + clrbit(eventmask, ev->event[i].type); + } + + /* Write updated Event mask */ + ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", eventmask, + sizeof(eventmask), FALSE); + if (unlikely(ret)) { + CONFIG_ERROR("Set event_msgs error (%d)\n", ret); + } + +exit: + return ret; +} + +static int +dhd_conf_enable_roam_offload(dhd_pub_t *dhd, int enable) +{ + int err; + eventmsg_buf_t ev_buf; + + if (dhd->conf->roam_off_suspend) + return 0; + + err = dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "roam_offload", enable, 0, FALSE); + if (err) + return err; + + bzero(&ev_buf, sizeof(eventmsg_buf_t)); + dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable); + dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable); + dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable); + dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable); + dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable); + dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable); + err = dhd_conf_apply_eventbuffer(dhd, &ev_buf); + + CONFIG_TRACE("roam_offload %d\n", enable); + + return err; +} + +void +dhd_conf_set_bw_cap(dhd_pub_t *dhd) +{ + struct { + u32 band; + u32 bw_cap; + } param = {0, 0}; + + if (dhd->conf->bw_cap[0] >= 0) { + memset(¶m, 0, sizeof(param)); + param.band = WLC_BAND_2G; + param.bw_cap = (uint)dhd->conf->bw_cap[0]; + CONFIG_MSG("set bw_cap 2g 0x%x\n", param.bw_cap); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m, + sizeof(param), TRUE); + } + + if (dhd->conf->bw_cap[1] >= 0) { + memset(¶m, 0, sizeof(param)); + param.band = WLC_BAND_5G; + param.bw_cap = (uint)dhd->conf->bw_cap[1]; + CONFIG_MSG("set bw_cap 5g 0x%x\n", param.bw_cap); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m, + sizeof(param), TRUE); + } +} + +void +dhd_conf_get_wme(dhd_pub_t *dhd, int ifidx, int mode, edcf_acparam_t *acp) +{ + int bcmerror = -1; + char iovbuf[WLC_IOCTL_SMLEN]; + edcf_acparam_t *acparam; + + bzero(iovbuf, sizeof(iovbuf)); + + /* + * Get current acparams, using buf as an input buffer. + * Return data is array of 4 ACs of wme params. + */ + if (mode == 0) + bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf)); + else + bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), + FALSE, ifidx)) < 0) { + CONFIG_ERROR("wme_ac_sta getting failed %d\n", bcmerror); + return; + } + memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT); + + acparam = &acp[AC_BK]; + CONFIG_TRACE("BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP); + acparam = &acp[AC_BE]; + CONFIG_TRACE("BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP); + acparam = &acp[AC_VI]; + CONFIG_TRACE("VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP); + acparam = &acp[AC_VO]; + CONFIG_TRACE("VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP); + + return; +} + +void +dhd_conf_update_wme(dhd_pub_t *dhd, int ifidx, int mode, + edcf_acparam_t *acparam_cur, int aci) +{ + int aifsn, ecwmin, ecwmax, txop; + edcf_acparam_t *acp; + struct dhd_conf *conf = dhd->conf; + wme_param_t *wme; + + if (mode == 0) + wme = &conf->wme_sta; + else + wme = &conf->wme_ap; + + /* Default value */ + aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK; + ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK; + ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT; + txop = acparam_cur->TXOP; + + /* Modified value */ + if (wme->aifsn[aci] > 0) + aifsn = wme->aifsn[aci]; + if (wme->ecwmin[aci] > 0) + ecwmin = wme->ecwmin[aci]; + if (wme->ecwmax[aci] > 0) + ecwmax = wme->ecwmax[aci]; + if (wme->txop[aci] > 0) + txop = wme->txop[aci]; + + if (!(wme->aifsn[aci] || wme->ecwmin[aci] || + wme->ecwmax[aci] || wme->txop[aci])) + return; + + /* Update */ + acp = acparam_cur; + acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK); + acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK); + acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK)); + acp->TXOP = txop; + + CONFIG_MSG("wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + mode?"ap":"sta", acp->ACI, acp->ACI&EDCF_AIFSN_MASK, + acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acp->TXOP); + + /* + * Now use buf as an output buffer. + * Put WME acparams after "wme_ac\0" in buf. + * NOTE: only one of the four ACs can be set at a time. + */ + if (mode == 0) + dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_sta", (char *)acp, + sizeof(edcf_acparam_t), FALSE); + else + dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_ap", (char *)acp, + sizeof(edcf_acparam_t), FALSE); + +} + +void +dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode) +{ + edcf_acparam_t acparam_cur[AC_COUNT]; + + if (dhd && dhd->conf) { + if (!dhd->conf->force_wme_ac) { + CONFIG_TRACE("force_wme_ac is not enabled %d\n", + dhd->conf->force_wme_ac); + return; + } + + CONFIG_TRACE("Before change:\n"); + dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur); + + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BK], AC_BK); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BE], AC_BE); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VI], AC_VI); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VO], AC_VO); + + CONFIG_TRACE("After change:\n"); + dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur); + } else { + CONFIG_ERROR("dhd or conf is NULL\n"); + } + + return; +} + +void +dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int p2p_mode, int miracast_mode) +{ + struct dhd_conf *conf = dhd->conf; + mchan_params_t *mchan = conf->mchan; + bool set = true; + + while (mchan != NULL) { + set = true; + set &= (mchan->bw >= 0); + set &= ((mchan->p2p_mode == -1) | (mchan->p2p_mode == p2p_mode)); + set &= ((mchan->miracast_mode == -1) | (mchan->miracast_mode == miracast_mode)); + if (set) { + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "mchan_bw", mchan->bw, 0, FALSE); + } + mchan = mchan->next; + } + + return; +} + +#ifdef PKT_FILTER_SUPPORT +void +dhd_conf_add_pkt_filter(dhd_pub_t *dhd) +{ + int i, j; + char str[16]; +#define MACS "%02x%02x%02x%02x%02x%02x" + + /* 0) suspend_mode=1 + * Case 0: default is unicast pkt and event wake up + * Case 1: no connection in suspend + * 1) wl_suspend=3=0 + * 2) wl_resume=2=0 + * 3) insuspend=0x7 + * Case 2: keep connection in suspend, but no pkt and event wake up + * 1) dhd_master_mode=1 + * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107 + * 3) pkt_filter_add=141 0 0 0 0xFFFFFFFFFFFF 0x000000000000 + * 4) insuspend=0x7 + * 5) rekey_offload=1 + * Case 3: magic pkt and event wake up + * 1) dhd_master_mode=1 + * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107 + * 3) pkt_filter_add=141 0 0 0 0xFFFFFFFFFFFF 0x000000000000 + * 4) magic_pkt_filter_add=141 0 1 12 + * 5) rekey_offload=1 + */ + for(i=0; iconf->pkt_filter_add.count; i++) { + dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i]; + CONFIG_MSG("%s\n", dhd->pktfilter[i+dhd->pktfilter_count]); + } + dhd->pktfilter_count += i; + + if (dhd->conf->magic_pkt_filter_add) { + strcat(dhd->conf->magic_pkt_filter_add, " 0x"); + strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF"); + for (j=0; j<16; j++) + strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF"); + strcat(dhd->conf->magic_pkt_filter_add, " 0x"); + strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF"); + sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet)); + for (j=0; j<16; j++) + strncat(dhd->conf->magic_pkt_filter_add, str, 12); + dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add; + dhd->pktfilter_count += 1; + } +} + +bool +dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id) +{ + int i; + + if (dhd && dhd->conf) { + for (i=0; iconf->pkt_filter_del.count; i++) { + if (id == dhd->conf->pkt_filter_del.id[i]) { + CONFIG_MSG("%d\n", dhd->conf->pkt_filter_del.id[i]); + return true; + } + } + return false; + } + return false; +} + +void +dhd_conf_discard_pkt_filter(dhd_pub_t *dhd) +{ + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333"; + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL; + + /* IPv4 broadcast address XXX.XXX.XXX.255 */ + dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF"; + dhd->pktfilter_count++; + /* discard IPv4 multicast address 224.0.0.0/4 */ + dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0"; + dhd->pktfilter_count++; + /* discard IPv6 multicast address FF00::/8 */ + dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF"; + dhd->pktfilter_count++; + /* discard Netbios pkt */ + dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089"; + dhd->pktfilter_count++; + +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_conf_get_pm(dhd_pub_t *dhd) +{ + if (dhd && dhd->conf) { + return dhd->conf->pm; + } + return -1; +} + +int +dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len, + int *hostsleep_set, int *hostsleep_val, int *ret) +{ + if (dhd->hang_reason) { + *ret = BCME_EPERM; + goto exit; + } + if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) { + if (cmd == WLC_SET_VAR) { + char *psleep = NULL; + psleep = strstr(buf, "hostsleep"); + if (psleep) { + *hostsleep_set = 1; + memcpy(hostsleep_val, psleep+strlen("hostsleep")+1, sizeof(int)); + } + } + if (dhd->hostsleep && (!*hostsleep_set || *hostsleep_val)) { + CONFIG_TRACE("block all none hostsleep clr cmd\n"); + *ret = BCME_EPERM; + goto exit; + } else if (*hostsleep_set && *hostsleep_val) { + CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val); + dhd->hostsleep = *hostsleep_val; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, ON); + } + if (dhd->hostsleep == 2) { + *ret = 0; + goto exit; + } + } else if (dhd->hostsleep == 2 && !*hostsleep_val) { + CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val); + dhd->hostsleep = *hostsleep_val; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + *ret = 0; + goto exit; + } + } +#ifdef NO_POWER_SAVE + if (cmd == WLC_SET_PM) { + if (*(const u32*)buf != 0) { + CONFIG_TRACE("skip PM\n"); + *ret = BCME_OK; + goto exit; + } + } else if (cmd == WLC_SET_VAR) { + int cmd_len = strlen("mpc"); + if (!strncmp(buf, "mpc", cmd_len)) { + if (*((u32 *)((u8*)buf+cmd_len+1)) != 0) { + CONFIG_TRACE("skip mpc\n"); + *ret = BCME_OK; + goto exit; + } + } + } +#endif + + return 0; +exit: + return -1; +} + +void +dhd_conf_get_hostsleep(dhd_pub_t *dhd, + int hostsleep_set, int hostsleep_val, int ret) +{ + if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) { + if (hostsleep_set) { + if (hostsleep_val && ret) { + CONFIG_TRACE("reset hostsleep %d => 0\n", dhd->hostsleep); + dhd->hostsleep = 0; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } else if (!hostsleep_val && !ret) { + CONFIG_TRACE("set hostsleep %d => 0\n", dhd->hostsleep); + dhd->hostsleep = 0; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + } + } +} + +#ifdef WL_EXT_WOWL +#define WL_WOWL_TCPFIN (1 << 26) +typedef struct wl_wowl_pattern2 { + char cmd[4]; + wl_wowl_pattern_t wowl_pattern; +} wl_wowl_pattern2_t; +static int +dhd_conf_wowl_pattern(dhd_pub_t *dhd, int ifidx, bool add, char *data) +{ + uint buf_len = 0; + int id, type, polarity, offset; + char cmd[4]="\0", mask[128]="\0", pattern[128]="\0", mask_tmp[128]="\0", *pmask_tmp; + uint32 masksize, patternsize, pad_len = 0; + wl_wowl_pattern2_t *wowl_pattern2 = NULL; + char *mask_and_pattern; + int ret = 0, i, j, v; + + if (data) { + if (add) + strcpy(cmd, "add"); + else + strcpy(cmd, "clr"); + if (!strcmp(cmd, "clr")) { + CONFIG_TRACE("wowl_pattern clr\n"); + ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wowl_pattern", cmd, + sizeof(cmd), FALSE); + goto exit; + } + sscanf(data, "%d %d %d %d %s %s", &id, &type, &polarity, &offset, + mask_tmp, pattern); + masksize = strlen(mask_tmp) -2; + CONFIG_TRACE("0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize); + + // add pading + if (masksize % 16) + pad_len = (16 - masksize % 16); + for (i=0; icmd, cmd, sizeof(cmd)); + wowl_pattern2->wowl_pattern.id = id; + wowl_pattern2->wowl_pattern.type = 0; + wowl_pattern2->wowl_pattern.offset = offset; + mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t); + + wowl_pattern2->wowl_pattern.masksize = masksize; + ret = wl_pattern_atoh(mask, mask_and_pattern); + if (ret == -1) { + CONFIG_ERROR("rejecting mask=%s\n", mask); + goto exit; + } + + mask_and_pattern += wowl_pattern2->wowl_pattern.masksize; + wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) + + wowl_pattern2->wowl_pattern.masksize; + + wowl_pattern2->wowl_pattern.patternsize = patternsize; + ret = wl_pattern_atoh(pattern, mask_and_pattern); + if (ret == -1) { + CONFIG_ERROR("rejecting pattern=%s\n", pattern); + goto exit; + } + + CONFIG_TRACE("%s %d %s %s\n", cmd, offset, mask, pattern); + + ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wowl_pattern", + (char *)wowl_pattern2, buf_len, FALSE); + } + +exit: + if (wowl_pattern2) + kfree(wowl_pattern2); + return ret; +} + +static int +dhd_conf_wowl_wakeind(dhd_pub_t *dhd, int ifidx, bool clear) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + wl_wowl_wakeind_t *wake = NULL; + int ret = -1; + char clr[6]="clear", wakeind_str[32]="\0"; + + if (clear) { + CONFIG_TRACE("wowl_wakeind clear\n"); + ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wowl_wakeind", + clr, sizeof(clr), 0); + } else { + ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "wowl_wakeind", + iovar_buf, sizeof(iovar_buf)); + if (!ret) { + wake = (wl_wowl_wakeind_t *) iovar_buf; + if (wake->ucode_wakeind & WL_WOWL_MAGIC) + strcpy(wakeind_str, "(MAGIC packet)"); + if (wake->ucode_wakeind & WL_WOWL_NET) + strcpy(wakeind_str, "(Netpattern)"); + if (wake->ucode_wakeind & WL_WOWL_DIS) + strcpy(wakeind_str, "(Disassoc/Deauth)"); + if (wake->ucode_wakeind & WL_WOWL_BCN) + strcpy(wakeind_str, "(Loss of beacon)"); + if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME) + strcpy(wakeind_str, "(TCPKA timeout)"); + if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA) + strcpy(wakeind_str, "(TCPKA data)"); + if (wake->ucode_wakeind & WL_WOWL_TCPFIN) + strcpy(wakeind_str, "(TCP FIN)"); + CONFIG_MSG("wakeind=0x%x %s\n", wake->ucode_wakeind, wakeind_str); + } + } + + return ret; +} +#endif + +int +dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period, + char *packet, bool bcast) +{ + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int ret = 0, len_bytes=0, buf_len=0; + char *buf = NULL, *iovar_buf = NULL; + uint8 *pdata; + + CONFIG_TRACE("id=%d, period=%d, packet=%s\n", id, period, packet); + if (period >= 0) { + buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (buf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN); + goto exit; + } + iovar_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (iovar_buf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN); + goto exit; + } + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf; + mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + mkeep_alive_pktp->keep_alive_id = id; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + mkeep_alive_pktp->period_msec = period; + if (packet && strlen(packet)) { + len_bytes = wl_pattern_atoh(packet, (char *)mkeep_alive_pktp->data); + buf_len += len_bytes; + if (bcast) { + memcpy(mkeep_alive_pktp->data, ðer_bcast, ETHER_ADDR_LEN); + } + ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", + iovar_buf, WLC_IOCTL_SMLEN); + if (!ret) { + pdata = mkeep_alive_pktp->data; + memcpy(pdata+6, iovar_buf, ETHER_ADDR_LEN); + } + } + mkeep_alive_pktp->len_bytes = htod16(len_bytes); + ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "mkeep_alive", + buf, buf_len, FALSE); + } + +exit: + if (buf) + kfree(buf); + if (iovar_buf) + kfree(iovar_buf); + return ret; +} + +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable) +{ + int i, len = 0, total_len = WLC_IOCTL_SMLEN; + char *iovar_buf = NULL, *packet = NULL; + + if (!dhd->conf->garp || ifidx != 0 || !(dhd->op_mode & DHD_FLAG_STA_MODE)) + return; + + CONFIG_TRACE("enable=%d\n", enable); + + if (enable) { + iovar_buf = kmalloc(total_len, GFP_KERNEL); + if (iovar_buf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len); + goto exit; + } + packet = kmalloc(total_len, GFP_KERNEL); + if (packet == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len); + goto exit; + } + dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", iovar_buf, total_len); + + len += snprintf(packet+len, total_len, "0xffffffffffff"); + for (i=0; i>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff); + // Target Hardware Addr. + len += snprintf(packet+len, total_len, "ffffffffffff"); + // Target IP Addr. + len += snprintf(packet+len, total_len, "%02x%02x%02x%02x", + ipa&0xff, (ipa>>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff); + len += snprintf(packet+len, total_len, "000000000000000000000000000000000000"); + } + + dhd_conf_mkeep_alive(dhd, ifidx, 0, dhd->conf->keep_alive_period, packet, TRUE); + +exit: + if (iovar_buf) + kfree(iovar_buf); + if (packet) + kfree(packet); + return; +} +#endif + +uint +dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask) +{ + uint insuspend = 0; + + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + insuspend = dhd->conf->insuspend & + (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND | + ROAM_OFFLOAD_IN_SUSPEND | WOWL_IN_SUSPEND); + } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + insuspend = dhd->conf->insuspend & + (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND | + AP_DOWN_IN_SUSPEND | AP_FILTER_IN_SUSPEND); + } + + return (insuspend & mask); +} + +static void +dhd_conf_check_connection(dhd_pub_t *dhd, int ifidx, int suspend) +{ + struct dhd_conf *conf = dhd->conf; + struct ether_addr bssid; + wl_event_msg_t msg; + int pm; +#ifdef WL_CFG80211 + struct net_device *net; + unsigned long flags = 0; +#endif /* defined(WL_CFG80211) */ + + if (suspend) { + memset(&bssid, 0, ETHER_ADDR_LEN); + dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, ifidx); + if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) + memcpy(&conf->bssid_insuspend, &bssid, ETHER_ADDR_LEN); + else + memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN); + } + else { + if (memcmp(ðer_null, &conf->bssid_insuspend, ETHER_ADDR_LEN)) { + memset(&bssid, 0, ETHER_ADDR_LEN); + dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, ifidx); + if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) { + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", 0, 0, FALSE); + dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "send_nulldata", + (char *)&bssid, ETHER_ADDR_LEN, FALSE); + OSL_SLEEP(100); + if (conf->pm >= 0) + pm = conf->pm; + else + pm = PM_FAST; + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE); + } else { + CONFIG_TRACE("send WLC_E_DEAUTH_IND event\n"); + bzero(&msg, sizeof(wl_event_msg_t)); + msg.ifidx = ifidx; + memcpy(&msg.addr, &conf->bssid_insuspend, ETHER_ADDR_LEN); + msg.event_type = hton32(WLC_E_DEAUTH_IND); + msg.status = 0; + msg.reason = hton32(DOT11_RC_DEAUTH_LEAVING); +#ifdef WL_EVENT + wl_ext_event_send(dhd->event_params, &msg, NULL); +#endif +#ifdef WL_CFG80211 + spin_lock_irqsave(&dhd->up_lock, flags); + net = dhd_idx2net(dhd, ifidx); + if (net && dhd->up) { + wl_cfg80211_event(net, &msg, NULL); + } + spin_unlock_irqrestore(&dhd->up_lock, flags); +#endif /* defined(WL_CFG80211) */ + } + } + } +} + +#ifdef SUSPEND_EVENT +static void +dhd_conf_set_suspend_event(dhd_pub_t *dhd, int suspend) +{ + struct dhd_conf *conf = dhd->conf; + char suspend_eventmask[WL_EVENTING_MASK_LEN]; + + CONFIG_TRACE("Enter\n"); + if (suspend) { +#ifdef PROP_TXSTATUS +#if defined(BCMSDIO) || defined(BCMDBUS) + if (dhd->wlfc_enabled) { + dhd_wlfc_deinit(dhd); + conf->wlfc = TRUE; + } else { + conf->wlfc = FALSE; + } +#endif /* BCMSDIO || BCMDBUS */ +#endif /* PROP_TXSTATUS */ + dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", + conf->resume_eventmask, sizeof(conf->resume_eventmask)); + memset(suspend_eventmask, 0, sizeof(suspend_eventmask)); + setbit(suspend_eventmask, WLC_E_ESCAN_RESULT); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", + suspend_eventmask, sizeof(suspend_eventmask), FALSE); + } + else { + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", + conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE); +#ifdef PROP_TXSTATUS +#if defined(BCMSDIO) || defined(BCMDBUS) + if (conf->wlfc) { + dhd_wlfc_init(dhd); + dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE); + } +#endif +#endif /* PROP_TXSTATUS */ + } + +} +#endif + +int +dhd_conf_suspend_resume_sta(dhd_pub_t *dhd, int ifidx, int suspend) +{ + struct dhd_conf *conf = dhd->conf; + uint insuspend = 0; + int pm; +#ifdef WL_EXT_WOWL + int i; +#endif + + insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND); + if (insuspend) + WL_MSG(dhd_ifname(dhd, ifidx), "suspend %d\n", suspend); + + if (suspend) { + dhd_conf_check_connection(dhd, ifidx, suspend); + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "roam_off", + conf->roam_off_suspend, 0, FALSE); + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "bcn_li_dtim", + conf->suspend_bcn_li_dtim, 0, FALSE); + if (conf->pm_in_suspend >= 0) + pm = conf->pm_in_suspend; + else if (conf->pm >= 0) + pm = conf->pm; + else + pm = PM_FAST; + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE); +#ifdef WL_EXT_WOWL + if ((insuspend & WOWL_IN_SUSPEND) && dhd_master_mode) { + dhd_conf_wowl_pattern(dhd, ifidx, FALSE, "clr"); + for(i=0; ipkt_filter_add.count; i++) { + dhd_conf_wowl_pattern(dhd, ifidx, TRUE, conf->pkt_filter_add.filter[i]); + } + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl", conf->wowl, 0, FALSE); + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl_activate", 1, 0, FALSE); + dhd_conf_wowl_wakeind(dhd, ifidx, TRUE); + } +#endif + } + else { + dhd_conf_get_iovar(dhd, 0, WLC_GET_PM, "WLC_GET_PM", (char *)&pm, sizeof(pm)); + CONFIG_TRACE("PM in suspend = %d\n", pm); + if (conf->pm >= 0) + pm = conf->pm; + else + pm = PM_FAST; + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE); +#ifdef WL_EXT_WOWL + if (insuspend & WOWL_IN_SUSPEND) { + dhd_conf_wowl_wakeind(dhd, ifidx, FALSE); + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl_activate", 0, 0, FALSE); + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl", 0, 0, FALSE); + dhd_conf_wowl_pattern(dhd, ifidx, FALSE, "clr"); + } +#endif + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "bcn_li_dtim", 0, 0, FALSE); + dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "roam_off", + conf->roam_off, 0, FALSE); + dhd_conf_check_connection(dhd, ifidx, suspend); + } + + return 0; +} + +#ifndef WL_EXT_IAPSTA +static int +dhd_conf_suspend_resume_ap(dhd_pub_t *dhd, int ifidx, int suspend) +{ + struct dhd_conf *conf = dhd->conf; + uint insuspend = 0; + + insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND); + if (insuspend) + WL_MSG(dhd_ifname(dhd, ifidx), "suspend %d\n", suspend); + + if (suspend) { + if (insuspend & AP_DOWN_IN_SUSPEND) { + dhd_conf_set_intiovar(dhd, ifidx, WLC_DOWN, "WLC_DOWN", 1, 0, FALSE); + } + } else { + if (insuspend & AP_DOWN_IN_SUSPEND) { + dhd_conf_set_intiovar(dhd, ifidx, WLC_UP, "WLC_UP", 0, 0, FALSE); + } + } + + return 0; +} +#endif /* !WL_EXT_IAPSTA */ + +static int +dhd_conf_suspend_resume_bus(dhd_pub_t *dhd, int suspend) +{ + uint insuspend = 0; + + insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND); + if (insuspend) + CONFIG_MSG("suspend %d\n", suspend); + + if (suspend) { + if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) { +#ifdef BCMSDIO + uint32 intstatus = 0; + int ret = 0; +#endif + int hostsleep = 2; +#ifdef WL_EXT_WOWL + hostsleep = 1; +#endif + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "hostsleep", hostsleep, 0, FALSE); +#ifdef BCMSDIO + ret = dhd_bus_sleep(dhd, TRUE, &intstatus); + CONFIG_TRACE("ret = %d, intstatus = 0x%x\n", ret, intstatus); +#endif + } + } else { + if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) { + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "hostsleep", 0, 0, FALSE); + } + } + + return 0; +} + +int +dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend) +{ + struct dhd_conf *conf = dhd->conf; + uint insuspend = 0; + + insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND); + if (insuspend) + CONFIG_MSG("op_mode %d, suspend %d, suspended %d, insuspend 0x%x, suspend_mode=%d\n", + dhd->op_mode, suspend, conf->suspended, insuspend, conf->suspend_mode); + + if (conf->suspended == suspend || !dhd->up) { + return 0; + } + + if (suspend) { + if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) { + if (conf->suspend_mode == PM_NOTIFIER) +#ifdef WL_EXT_IAPSTA + wl_iapsta_wait_event_complete(dhd); +#else + wl_ext_wait_event_complete(dhd, 0); +#endif /* WL_EXT_IAPSTA */ + } + if (insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, ON); + } +#if defined(WL_CFG80211) || defined(WL_ESCAN) + if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) { + if (conf->suspend_mode == PM_NOTIFIER) + wl_ext_user_sync(dhd, 0, TRUE); + } +#endif + if (insuspend & ROAM_OFFLOAD_IN_SUSPEND) + dhd_conf_enable_roam_offload(dhd, 2); +#ifdef SUSPEND_EVENT + if (insuspend & NO_EVENT_IN_SUSPEND) { + dhd_conf_set_suspend_event(dhd, suspend); + } +#endif +#ifdef WL_EXT_IAPSTA + wl_iapsta_suspend_resume(dhd, suspend); +#else + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + dhd_conf_suspend_resume_sta(dhd, 0, suspend); + } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + dhd_conf_suspend_resume_ap(dhd, 0, suspend); + } +#endif /* WL_EXT_IAPSTA */ + dhd_conf_set_wl_cmd(dhd, conf->wl_suspend, FALSE); + dhd_conf_suspend_resume_bus(dhd, suspend); + conf->suspended = TRUE; + } + else { + dhd_conf_suspend_resume_bus(dhd, suspend); +#ifdef SUSPEND_EVENT + if (insuspend & NO_EVENT_IN_SUSPEND) { + dhd_conf_set_suspend_event(dhd, suspend); + } +#endif + if (insuspend & ROAM_OFFLOAD_IN_SUSPEND) + dhd_conf_enable_roam_offload(dhd, 0); + dhd_conf_set_wl_cmd(dhd, conf->wl_resume, FALSE); +#ifdef WL_EXT_IAPSTA + wl_iapsta_suspend_resume(dhd, suspend); +#else + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + dhd_conf_suspend_resume_sta(dhd, 0, suspend); + } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + dhd_conf_suspend_resume_ap(dhd, 0, suspend); + } +#endif /* WL_EXT_IAPSTA */ +#if defined(WL_CFG80211) || defined(WL_ESCAN) + if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) { + if (conf->suspend_mode == PM_NOTIFIER) + wl_ext_user_sync(dhd, 0, FALSE); + } +#endif + if (insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + conf->suspended = FALSE; + } + + return 0; +} + +#ifdef PROP_TXSTATUS +int +dhd_conf_get_disable_proptx(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + int disable_proptx = -1; + int fw_proptx = 0; + + /* check fw proptx priority: + * 1st: check fw support by wl cap + * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default + * if you would like to disable it, please set disable_proptx=1 in config.txt + * 3th: disable when proptxstatus not support in wl cap + */ + if (FW_SUPPORTED(dhd, proptxstatus)) { + fw_proptx = 1; + } else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || + dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + fw_proptx = 1; + } else { + fw_proptx = 0; + } + + /* returned disable_proptx value: + * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled) + * 0: depend on fw support + * 1: always disable proptx + */ + if (conf->disable_proptx == 0) { + // check fw support as well + if (fw_proptx) + disable_proptx = 0; + else + disable_proptx = 1; + } else if (conf->disable_proptx >= 1) { + disable_proptx = 1; + } else { + // check fw support as well + if (fw_proptx) + disable_proptx = -1; + else + disable_proptx = 1; + } + + CONFIG_MSG("fw_proptx=%d, disable_proptx=%d\n", fw_proptx, disable_proptx); + + return disable_proptx; +} +#endif + +uint +pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf, int picklen) +{ + bool findNewline, changenewline=FALSE, pick=FALSE; + int column; + uint n, pick_column=0; + + findNewline = FALSE; + column = 0; + + if (start_pos >= len) { + CONFIG_ERROR("wrong start pos\n"); + return 0; + } + + for (n = start_pos; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if ((findNewline || changenewline) && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\\') { + changenewline = TRUE; + continue; + } + if (!changenewline && varbuf[n] == '\n') { + if (column == 0) + continue; + column = 0; + continue; + } + if (changenewline && varbuf[n] == '\n') { + changenewline = FALSE; + continue; + } + + if (column==0 && !pick) { // start to pick + pick = TRUE; + column++; + pick_column = 0; + } else { + if (pick && column==0) { // stop to pick + pick = FALSE; + break; + } else + column++; + } + if (pick) { + if (varbuf[n] == 0x9) + continue; + if (pick_column >= picklen) + break; + pickbuf[pick_column] = varbuf[n]; + pick_column++; + } + } + + return n; // return current position +} + +bool +dhd_conf_read_chiprev(dhd_pub_t *dhd, int *chip_match, + char *full_param, uint len_param) +{ + char *data = full_param+len_param, *pick_tmp, *pch; + uint chip = 0, rev = 0; + + /* Process chip, regrev: + * chip=[chipid], rev==[rev] + * Ex: chip=0x4359, rev=9 + */ + if (!strncmp("chip=", full_param, len_param)) { + chip = (int)simple_strtol(data, NULL, 0); + pick_tmp = data; + pch = bcmstrstr(pick_tmp, "rev="); + if (pch) { + rev = (int)simple_strtol(pch+strlen("rev="), NULL, 0); + } + if (chip == dhd->conf->chip && rev == dhd->conf->chiprev) + *chip_match = 1; + else + *chip_match = 0; + CONFIG_MSG("chip=0x%x, rev=%d, chip_match=%d\n", chip, rev, *chip_match); + } + + return TRUE; +} + +bool +dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + char *data = full_param+len_param; + + if (!strncmp("dhd_msg_level=", full_param, len_param)) { + dhd_msg_level = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("dhd_msg_level = 0x%X\n", dhd_msg_level); + } + else if (!strncmp("dump_msg_level=", full_param, len_param)) { + dump_msg_level = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("dump_msg_level = 0x%X\n", dump_msg_level); + } +#ifdef BCMSDIO + else if (!strncmp("sd_msglevel=", full_param, len_param)) { + sd_msglevel = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("sd_msglevel = 0x%X\n", sd_msglevel); + } +#endif +#ifdef BCMDBUS + else if (!strncmp("dbus_msglevel=", full_param, len_param)) { + dbus_msglevel = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("dbus_msglevel = 0x%X\n", dbus_msglevel); + } +#endif + else if (!strncmp("android_msg_level=", full_param, len_param)) { + android_msg_level = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("android_msg_level = 0x%X\n", android_msg_level); + } + else if (!strncmp("config_msg_level=", full_param, len_param)) { + config_msg_level = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("config_msg_level = 0x%X\n", config_msg_level); + } +#ifdef WL_CFG80211 + else if (!strncmp("wl_dbg_level=", full_param, len_param)) { + wl_dbg_level = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("wl_dbg_level = 0x%X\n", wl_dbg_level); + } +#endif +#if defined(WL_WIRELESS_EXT) + else if (!strncmp("iw_msg_level=", full_param, len_param)) { + iw_msg_level = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("iw_msg_level = 0x%X\n", iw_msg_level); + } +#endif +#if defined(DHD_DEBUG) + else if (!strncmp("dhd_console_ms=", full_param, len_param)) { + dhd->dhd_console_ms = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("dhd_console_ms = %d\n", dhd->dhd_console_ms); + } +#endif + else + return false; + + return true; +} + +void +dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val) +{ + char *pick_tmp, *pch; + + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "aifsn "); + if (pch) { + wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0); + CONFIG_MSG("ac_val=%d, aifsn=%d\n", ac_val, wme->aifsn[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "ecwmin "); + if (pch) { + wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0); + CONFIG_MSG("ac_val=%d, ecwmin=%d\n", ac_val, wme->ecwmin[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "ecwmax "); + if (pch) { + wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0); + CONFIG_MSG("ac_val=%d, ecwmax=%d\n", ac_val, wme->ecwmax[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "txop "); + if (pch) { + wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0); + CONFIG_MSG("ac_val=%d, txop=0x%x\n", ac_val, wme->txop[ac_val]); + } + +} + +bool +dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e + // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e + + if (!strncmp("force_wme_ac=", full_param, len_param)) { + conf->force_wme_ac = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("force_wme_ac = %d\n", conf->force_wme_ac); + } + else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE); + } + else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK); + } + else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI); + } + else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO); + } + else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE); + } + else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK); + } + else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI); + } + else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO); + } + else + return false; + + return true; +} + +#ifdef SET_FWNV_BY_MAC +bool +dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i, j; + char *pch, *pick_tmp; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + /* Process fw_by_mac: + * fw_by_mac=[fw_mac_num] \ + * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \ + * [oui1-1] [nic_start1-1] [nic_end1-1]... \ + * [oui1-n] [nic_start1-n] [nic_end1-n] \ + * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \ + * [oui2-1] [nic_start2-1] [nic_end2-1]... \ + * [oui2-n] [nic_start2-n] [nic_end2-n] \ + * Ex: fw_by_mac=2 \ + * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ + * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ + * 0x983B16 0x916157 0x916487 + */ + + if (!strncmp("fw_by_mac=", full_param, len_param)) { + dhd_conf_free_mac_list(&conf->fw_by_mac); + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, + GFP_KERNEL))) { + conf->fw_by_mac.count = 0; + CONFIG_ERROR("kmalloc failed\n"); + } + CONFIG_MSG("fw_count=%d\n", conf->fw_by_mac.count); + conf->fw_by_mac.m_mac_list_head = mac_list; + for (i=0; ifw_by_mac.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(mac_list[i].name, pch); + pch = bcmstrtok(&pick_tmp, " ", 0); + mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); + CONFIG_MSG("name=%s, mac_count=%d\n", + mac_list[i].name, mac_list[i].count); + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, + GFP_KERNEL))) { + mac_list[i].count = 0; + CONFIG_ERROR("kmalloc failed\n"); + break; + } + mac_list[i].mac = mac_range; + for (j=0; jconf; + char *data = full_param+len_param; + + /* Process nv_by_mac: + * [nv_by_mac]: The same format as fw_by_mac + */ + if (!strncmp("nv_by_mac=", full_param, len_param)) { + dhd_conf_free_mac_list(&conf->nv_by_mac); + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, + GFP_KERNEL))) { + conf->nv_by_mac.count = 0; + CONFIG_ERROR("kmalloc failed\n"); + } + CONFIG_MSG("nv_count=%d\n", conf->nv_by_mac.count); + conf->nv_by_mac.m_mac_list_head = mac_list; + for (i=0; inv_by_mac.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(mac_list[i].name, pch); + pch = bcmstrtok(&pick_tmp, " ", 0); + mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); + CONFIG_MSG("name=%s, mac_count=%d\n", + mac_list[i].name, mac_list[i].count); + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, + GFP_KERNEL))) { + mac_list[i].count = 0; + CONFIG_ERROR("kmalloc failed\n"); + break; + } + mac_list[i].mac = mac_range; + for (j=0; jconf; + char *data = full_param+len_param; + + /* Process nv_by_chip: + * nv_by_chip=[nv_chip_num] \ + * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \ + * Ex: nv_by_chip=2 \ + * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \ + */ + if (!strncmp("nv_by_chip=", full_param, len_param)) { + dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(chip_nv_path = kmalloc(sizeof(wl_chip_nv_path_t)*conf->nv_by_chip.count, + GFP_KERNEL))) { + conf->nv_by_chip.count = 0; + CONFIG_ERROR("kmalloc failed\n"); + } + CONFIG_MSG("nv_by_chip_count=%d\n", conf->nv_by_chip.count); + conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path; + for (i=0; inv_by_chip.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0); + pch = bcmstrtok(&pick_tmp, " ", 0); + chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0); + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(chip_nv_path[i].name, pch); + CONFIG_MSG("chip=0x%x, chiprev=%d, name=%s\n", + chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name); + } + } + else + return false; + + return true; +} + +bool +dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("roam_off=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->roam_off = 0; + else + conf->roam_off = 1; + CONFIG_MSG("roam_off = %d\n", conf->roam_off); + } + else if (!strncmp("roam_off_suspend=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->roam_off_suspend = 0; + else + conf->roam_off_suspend = 1; + CONFIG_MSG("roam_off_suspend = %d\n", conf->roam_off_suspend); + } + else if (!strncmp("roam_trigger=", full_param, len_param)) { + conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("roam_trigger = %d\n", conf->roam_trigger[0]); + } + else if (!strncmp("roam_scan_period=", full_param, len_param)) { + conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("roam_scan_period = %d\n", conf->roam_scan_period[0]); + } + else if (!strncmp("roam_delta=", full_param, len_param)) { + conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("roam_delta = %d\n", conf->roam_delta[0]); + } + else if (!strncmp("fullroamperiod=", full_param, len_param)) { + conf->fullroamperiod = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("fullroamperiod = %d\n", conf->fullroamperiod); + } else + return false; + + return true; +} + +bool +dhd_conf_read_country(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + country_list_t *country_next = NULL, *country; + int i, count = 0; + char *pch, *pick_tmp, *pick_tmp2; + char *data = full_param+len_param; + uint len_data = strlen(data); + + /* Process country_list: + * country_list=[country1]:[ccode1]/[regrev1], + * [country2]:[ccode2]/[regrev2] \ + * Ex: country_list=US:US/0, TW:TW/1 + */ + if (!strncmp("ccode=", full_param, len_param)) { + len_data = min((uint)WLC_CNTRY_BUF_SZ, len_data); + memset(&conf->cspec, 0, sizeof(wl_country_t)); + memcpy(conf->cspec.country_abbrev, data, len_data); + memcpy(conf->cspec.ccode, data, len_data); + CONFIG_MSG("ccode = %s\n", conf->cspec.ccode); + } + else if (!strncmp("regrev=", full_param, len_param)) { + conf->cspec.rev = (int32)simple_strtol(data, NULL, 10); + CONFIG_MSG("regrev = %d\n", conf->cspec.rev); + } + else if (!strncmp("country_list=", full_param, len_param)) { + dhd_conf_free_country_list(conf); + pick_tmp = data; + for (i=0; icspec.country_abbrev, pch, 2); + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + kfree(country); + break; + } + memcpy(country->cspec.ccode, pch, 2); + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + kfree(country); + break; + } + country->cspec.rev = (int32)simple_strtol(pch, NULL, 10); + count++; + if (!conf->country_head) { + conf->country_head = country; + country_next = country; + } else { + country_next->next = country; + country_next = country; + } + CONFIG_TRACE("abbrev=%s, ccode=%s, regrev=%d\n", + country->cspec.country_abbrev, country->cspec.ccode, country->cspec.rev); + } + CONFIG_MSG("%d country in list\n", count); + } + else + return false; + + return true; +} + +bool +dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i; + char *pch, *pick_tmp, *pick_tmp2; + struct dhd_conf *conf = dhd->conf; + mchan_params_t *mchan_next = NULL, *mchan; + char *data = full_param+len_param; + + /* Process mchan_bw: + * mchan_bw=[val]/[any/go/gc]/[any/source/sink] + * Ex: mchan_bw=80/go/source, 30/gc/sink + */ + if (!strncmp("mchan_bw=", full_param, len_param)) { + dhd_conf_free_mchan_list(conf); + pick_tmp = data; + for (i=0; ibw = (int)simple_strtol(pch, NULL, 0); + if (mchan->bw < 0 || mchan->bw > 100) { + CONFIG_ERROR("wrong bw %d\n", mchan->bw); + kfree(mchan); + break; + } + + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + kfree(mchan); + break; + } else { + if (bcmstrstr(pch, "any")) { + mchan->p2p_mode = -1; + } else if (bcmstrstr(pch, "go")) { + mchan->p2p_mode = WL_P2P_IF_GO; + } else if (bcmstrstr(pch, "gc")) { + mchan->p2p_mode = WL_P2P_IF_CLIENT; + } + } + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + kfree(mchan); + break; + } else { + if (bcmstrstr(pch, "any")) { + mchan->miracast_mode = -1; + } else if (bcmstrstr(pch, "source")) { + mchan->miracast_mode = MIRACAST_SOURCE; + } else if (bcmstrstr(pch, "sink")) { + mchan->miracast_mode = MIRACAST_SINK; + } + } + if (!conf->mchan) { + conf->mchan = mchan; + mchan_next = mchan; + } else { + mchan_next->next = mchan; + mchan_next = mchan; + } + CONFIG_TRACE("mchan_bw=%d/%d/%d\n", mchan->bw,mchan->p2p_mode, + mchan->miracast_mode); + } + } + else + return false; + + return true; +} + +#ifdef PKT_FILTER_SUPPORT +bool +dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + char *pch, *pick_tmp; + int i; + + /* Process pkt filter: + * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 + * 2) pkt_filter_delete=100, 102, 103, 104, 105 + * 3) magic_pkt_filter_add=141 0 1 12 + */ + if (!strncmp("dhd_master_mode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_master_mode = FALSE; + else + dhd_master_mode = TRUE; + CONFIG_MSG("dhd_master_mode = %d\n", dhd_master_mode); + } + else if (!strncmp("pkt_filter_add=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i=0; + while (pch != NULL && ipkt_filter_add.filter[i][0], pch); + CONFIG_MSG("pkt_filter_add[%d][] = %s\n", + i, &conf->pkt_filter_add.filter[i][0]); + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i++; + } + conf->pkt_filter_add.count = i; + } + else if (!strncmp("pkt_filter_delete=", full_param, len_param) || + !strncmp("pkt_filter_del=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i=0; + while (pch != NULL && ipkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10); + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i++; + } + conf->pkt_filter_del.count = i; + CONFIG_MSG("pkt_filter_del id = "); + for (i=0; ipkt_filter_del.count; i++) + printk(KERN_CONT "%d ", conf->pkt_filter_del.id[i]); + printk(KERN_CONT "\n"); + } + else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) { + if (conf->magic_pkt_filter_add) { + kfree(conf->magic_pkt_filter_add); + conf->magic_pkt_filter_add = NULL; + } + if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) { + CONFIG_ERROR("kmalloc failed\n"); + } else { + memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN); + strcpy(conf->magic_pkt_filter_add, data); + CONFIG_MSG("magic_pkt_filter_add = %s\n", conf->magic_pkt_filter_add); + } + } + else + return false; + + return true; +} +#endif + +#ifdef ISAM_PREINIT +#if !defined(WL_EXT_IAPSTA) +#error "WL_EXT_IAPSTA should be defined to enable ISAM_PREINIT" +#endif /* !WL_EXT_IAPSTA */ +/* + * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1] + * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x] + hidden [y|n] maxassoc [x] + amode [open|shared|wpapsk|wpa2psk|wpawpa2psk] + emode [none|wep|tkip|aes|tkipaes] + key [xxxxx] + * isam_enable=ifname [wlan0|wlan1] +*/ +bool +dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("isam_init=", full_param, len_param)) { + sprintf(conf->isam_init, "isam_init %s", data); + CONFIG_MSG("isam_init=%s\n", conf->isam_init); + } + else if (!strncmp("isam_config=", full_param, len_param)) { + sprintf(conf->isam_config, "isam_config %s", data); + CONFIG_MSG("isam_config=%s\n", conf->isam_config); + } + else if (!strncmp("isam_enable=", full_param, len_param)) { + sprintf(conf->isam_enable, "isam_enable %s", data); + CONFIG_MSG("isam_enable=%s\n", conf->isam_enable); + } + else + return false; + + return true; +} +#endif + +#ifdef IDHCP +bool +dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + struct ipv4_addr ipa_set; + + if (!strncmp("dhcpc_enable=", full_param, len_param)) { + conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhcpc_enable = %d\n", conf->dhcpc_enable); + } + else if (!strncmp("dhcpd_enable=", full_param, len_param)) { + conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhcpd_enable = %d\n", conf->dhcpd_enable); + } + else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_addr adress setting failed.n"); + return false; + } + memcpy(&conf->dhcpd_ip_addr, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_addr = %s\n", data); + } + else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_mask adress setting failed\n"); + return false; + } + memcpy(&conf->dhcpd_ip_mask, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_mask = %s\n", data); + } + else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_start adress setting failed\n"); + return false; + } + memcpy(&conf->dhcpd_ip_start, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_start = %s\n", data); + } + else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_end adress setting failed\n"); + return false; + } + memcpy(&conf->dhcpd_ip_end, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_end = %s\n", data); + } + else + return false; + + return true; +} +#endif + +#ifdef BCMSDIO +bool +dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("dhd_doflow=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_doflow = FALSE; + else + dhd_doflow = TRUE; + CONFIG_MSG("dhd_doflow = %d\n", dhd_doflow); + } + else if (!strncmp("dhd_slpauto=", full_param, len_param) || + !strncmp("kso_enable=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_slpauto = FALSE; + else + dhd_slpauto = TRUE; + CONFIG_MSG("dhd_slpauto = %d\n", dhd_slpauto); + } + else if (!strncmp("use_rxchain=", full_param, len_param)) { + conf->use_rxchain = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("use_rxchain = %d\n", conf->use_rxchain); + } + else if (!strncmp("dhd_txminmax=", full_param, len_param)) { + conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhd_txminmax = %d\n", conf->dhd_txminmax); + } + else if (!strncmp("txinrx_thres=", full_param, len_param)) { + conf->txinrx_thres = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("txinrx_thres = %d\n", conf->txinrx_thres); + } +#ifdef DYNAMIC_MAX_HDR_READ + else if (!strncmp("max_hdr_read=", full_param, len_param)) { + conf->max_hdr_read = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("max_hdr_read = %d\n", conf->max_hdr_read); + } + else if (!strncmp("dhd_firstread=", full_param, len_param)) { + firstread = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhd_firstread = %d\n", firstread); + } +#endif +#if defined(HW_OOB) + else if (!strncmp("oob_enabled_later=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->oob_enabled_later = FALSE; + else + conf->oob_enabled_later = TRUE; + CONFIG_MSG("oob_enabled_later = %d\n", conf->oob_enabled_later); + } +#endif + else if (!strncmp("dpc_cpucore=", full_param, len_param)) { + conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("dpc_cpucore = %d\n", conf->dpc_cpucore); + } + else if (!strncmp("rxf_cpucore=", full_param, len_param)) { + conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("rxf_cpucore = %d\n", conf->rxf_cpucore); + } + else if (!strncmp("dhd_dpc_prio=", full_param, len_param)) { + conf->dhd_dpc_prio = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhd_dpc_prio = %d\n", conf->dhd_dpc_prio); + } +#if defined(BCMSDIOH_TXGLOM) + else if (!strncmp("txglomsize=", full_param, len_param)) { + conf->txglomsize = (uint)simple_strtol(data, NULL, 10); + if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) + conf->txglomsize = SDPCM_MAXGLOM_SIZE; + CONFIG_MSG("txglomsize = %d\n", conf->txglomsize); + } + else if (!strncmp("txglom_ext=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txglom_ext = FALSE; + else + conf->txglom_ext = TRUE; + CONFIG_MSG("txglom_ext = %d\n", conf->txglom_ext); + if (conf->txglom_ext) { + if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) + conf->txglom_bucket_size = 1680; + else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) + conf->txglom_bucket_size = 1684; + } + CONFIG_MSG("txglom_bucket_size = %d\n", conf->txglom_bucket_size); + } + else if (!strncmp("bus:rxglom=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->bus_rxglom = FALSE; + else + conf->bus_rxglom = TRUE; + CONFIG_MSG("bus:rxglom = %d\n", conf->bus_rxglom); + } + else if (!strncmp("deferred_tx_len=", full_param, len_param)) { + conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("deferred_tx_len = %d\n", conf->deferred_tx_len); + } + else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) { + conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("txctl_tmo_fix = %d\n", conf->txctl_tmo_fix); + } + else if (!strncmp("tx_max_offset=", full_param, len_param)) { + conf->tx_max_offset = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("tx_max_offset = %d\n", conf->tx_max_offset); + } + else if (!strncmp("txglom_mode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txglom_mode = FALSE; + else + conf->txglom_mode = TRUE; + CONFIG_MSG("txglom_mode = %d\n", conf->txglom_mode); + } +#if defined(SDIO_ISR_THREAD) + else if (!strncmp("intr_extn=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->intr_extn = FALSE; + else + conf->intr_extn = TRUE; + CONFIG_MSG("intr_extn = %d\n", conf->intr_extn); + } +#endif +#ifdef BCMSDIO_RXLIM_POST + else if (!strncmp("rxlim_en=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->rxlim_en = FALSE; + else + conf->rxlim_en = TRUE; + CONFIG_MSG("rxlim_en = %d\n", conf->rxlim_en); + } +#endif +#ifdef BCMSDIO_TXSEQ_SYNC + else if (!strncmp("txseq_sync=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txseq_sync = FALSE; + else + conf->txseq_sync = TRUE; + CONFIG_MSG("txseq_sync = %d\n", conf->txseq_sync); + } +#endif +#endif +#ifdef MINIME + else if (!strncmp("ramsize=", full_param, len_param)) { + conf->ramsize = (uint32)simple_strtol(data, NULL, 0); + CONFIG_MSG("ramsize = %d\n", conf->ramsize); + } +#endif +#ifdef BCMSDIO_INTSTATUS_WAR + else if (!strncmp("read_intr_mode=", full_param, len_param)) { + conf->read_intr_mode = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("read_intr_mode = %d\n", conf->read_intr_mode); + } +#endif + else if (!strncmp("kso_try_max=", full_param, len_param)) { + conf->kso_try_max = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("kso_try_max = %d\n", conf->kso_try_max); + } + else + return false; + + return true; +} +#endif + +#ifdef BCMPCIE +bool +dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("bus:deepsleep_disable=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->bus_deepsleep_disable = 0; + else + conf->bus_deepsleep_disable = 1; + CONFIG_MSG("bus:deepsleep_disable = %d\n", conf->bus_deepsleep_disable); + } + else if (!strncmp("flow_ring_queue_threshold=", full_param, len_param)) { + conf->flow_ring_queue_threshold = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("flow_ring_queue_threshold = %d\n", conf->flow_ring_queue_threshold); + } + else if (!strncmp("d2h_intr_control=", full_param, len_param)) { + conf->d2h_intr_control = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("d2h_intr_control = %d\n", conf->d2h_intr_control); + } + else + return false; + + return true; +} +#endif + +bool +dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("deepsleep=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->deepsleep = TRUE; + else + conf->deepsleep = FALSE; + CONFIG_MSG("deepsleep = %d\n", conf->deepsleep); + } + else if (!strncmp("PM=", full_param, len_param)) { + conf->pm = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("PM = %d\n", conf->pm); + } + else if (!strncmp("pm_in_suspend=", full_param, len_param)) { + conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("pm_in_suspend = %d\n", conf->pm_in_suspend); + } + else if (!strncmp("suspend_mode=", full_param, len_param)) { + conf->suspend_mode = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("suspend_mode = %d\n", conf->suspend_mode); + if (conf->suspend_mode == EARLY_SUSPEND) + conf->insuspend &= ~(NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND); + else if (conf->suspend_mode == PM_NOTIFIER || + conf->suspend_mode == SUSPEND_MODE_2) + conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND); + CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend); + } + else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) { + conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("suspend_bcn_li_dtim = %d\n", conf->suspend_bcn_li_dtim); + } + else if (!strncmp("xmit_in_suspend=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->insuspend &= ~NO_TXDATA_IN_SUSPEND; + else + conf->insuspend |= NO_TXDATA_IN_SUSPEND; + CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend); + } + else if (!strncmp("insuspend=", full_param, len_param)) { + conf->insuspend = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend); + } +#ifdef WL_EXT_WOWL + else if (!strncmp("wowl=", full_param, len_param)) { + conf->wowl = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("wowl = 0x%x\n", conf->wowl); + } +#endif + else if (!strncmp("rekey_offload=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->rekey_offload = TRUE; + else + conf->rekey_offload = FALSE; + CONFIG_MSG("rekey_offload = %d\n", conf->rekey_offload); + } + else + return false; + + return true; +} + +#ifdef GET_CUSTOM_MAC_FROM_CONFIG +int +bcm_str2hex(const char *p, char *ea, int size) +{ + int i = 0; + char *ep; + + for (;;) { + ea[i++] = (char) bcm_strtoul(p, &ep, 16); + p = ep; + if (!*p++ || i == size) + break; + } + + return (i == size); +} +#endif + +bool +dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + char *pch, *pick_tmp; + int i; +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + struct ether_addr ea_addr; + char macpad[56]; +#endif + + if (!strncmp("dhd_poll=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->dhd_poll = 0; + else + conf->dhd_poll = 1; + CONFIG_MSG("dhd_poll = %d\n", conf->dhd_poll); + } + else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) { + dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhd_watchdog_ms = %d\n", dhd_watchdog_ms); + } + else if (!strncmp("band=", full_param, len_param)) { + /* Process band: + * band=a for 5GHz only and band=b for 2.4GHz only + */ + if (!strcmp(data, "b")) + conf->band = WLC_BAND_2G; + else if (!strcmp(data, "a")) + conf->band = WLC_BAND_5G; + else + conf->band = WLC_BAND_AUTO; + CONFIG_MSG("band = %d\n", conf->band); + } + else if (!strncmp("bw_cap_2g=", full_param, len_param)) { + conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0); + CONFIG_MSG("bw_cap_2g = %d\n", conf->bw_cap[0]); + } + else if (!strncmp("bw_cap_5g=", full_param, len_param)) { + conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0); + CONFIG_MSG("bw_cap_5g = %d\n", conf->bw_cap[1]); + } + else if (!strncmp("bw_cap=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + if (pch != NULL) { + conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0); + CONFIG_MSG("bw_cap 2g = %d\n", conf->bw_cap[0]); + } + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + if (pch != NULL) { + conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0); + CONFIG_MSG("bw_cap 5g = %d\n", conf->bw_cap[1]); + } + } + else if (!strncmp("channels=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i=0; + while (pch != NULL && ichannels.channel[i] = (uint32)simple_strtol(pch, NULL, 10); + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i++; + } + conf->channels.count = i; + CONFIG_MSG("channels = "); + for (i=0; ichannels.count; i++) + printk(KERN_CONT "%d ", conf->channels.channel[i]); + printk(KERN_CONT "\n"); + } + else if (!strncmp("keep_alive_period=", full_param, len_param)) { + conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("keep_alive_period = %d\n", conf->keep_alive_period); + } +#ifdef ARP_OFFLOAD_SUPPORT + else if (!strncmp("garp=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->garp = FALSE; + else + conf->garp = TRUE; + CONFIG_MSG("garp = %d\n", conf->garp); + } +#endif + else if (!strncmp("srl=", full_param, len_param)) { + conf->srl = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("srl = %d\n", conf->srl); + } + else if (!strncmp("lrl=", full_param, len_param)) { + conf->lrl = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("lrl = %d\n", conf->lrl); + } + else if (!strncmp("bcn_timeout=", full_param, len_param)) { + conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("bcn_timeout = %d\n", conf->bcn_timeout); + } + else if (!strncmp("frameburst=", full_param, len_param)) { + conf->frameburst = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("frameburst = %d\n", conf->frameburst); + } + else if (!strncmp("disable_proptx=", full_param, len_param)) { + conf->disable_proptx = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("disable_proptx = %d\n", conf->disable_proptx); + } +#ifdef DHDTCPACK_SUPPRESS + else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) { + conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("tcpack_sup_mode = %d\n", conf->tcpack_sup_mode); + } + else if (!strncmp("tcpack_sup_ratio=", full_param, len_param)) { + conf->tcpack_sup_ratio = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("tcpack_sup_ratio = %d\n", conf->tcpack_sup_ratio); + } + else if (!strncmp("tcpack_sup_delay=", full_param, len_param)) { + conf->tcpack_sup_delay = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("tcpack_sup_delay = %d\n", conf->tcpack_sup_delay); + } +#endif + else if (!strncmp("pktprio8021x=", full_param, len_param)) { + conf->pktprio8021x = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("pktprio8021x = %d\n", conf->pktprio8021x); + } +#if defined(BCMSDIO) || defined(BCMPCIE) + else if (!strncmp("dhd_txbound=", full_param, len_param)) { + dhd_txbound = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhd_txbound = %d\n", dhd_txbound); + } + else if (!strncmp("dhd_rxbound=", full_param, len_param)) { + dhd_rxbound = (uint)simple_strtol(data, NULL, 10); + CONFIG_MSG("dhd_rxbound = %d\n", dhd_rxbound); + } +#endif + else if (!strncmp("orphan_move=", full_param, len_param)) { + conf->orphan_move = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("orphan_move = %d\n", conf->orphan_move); + } + else if (!strncmp("tsq=", full_param, len_param)) { + conf->tsq = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("tsq = %d\n", conf->tsq); + } + else if (!strncmp("ctrl_resched=", full_param, len_param)) { + conf->ctrl_resched = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("ctrl_resched = %d\n", conf->ctrl_resched); + } + else if (!strncmp("rxcnt_timeout=", full_param, len_param)) { + conf->rxcnt_timeout = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("rxcnt_timeout = %d\n", conf->rxcnt_timeout); + } + else if (!strncmp("in4way=", full_param, len_param)) { + conf->in4way = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("in4way = 0x%x\n", conf->in4way); + } + else if (!strncmp("war=", full_param, len_param)) { + conf->war = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("war = 0x%x\n", conf->war); + } + else if (!strncmp("wl_preinit=", full_param, len_param)) { + if (conf->wl_preinit) { + kfree(conf->wl_preinit); + conf->wl_preinit = NULL; + } + if (!(conf->wl_preinit = kmalloc(strlen(data)+1, GFP_KERNEL))) { + CONFIG_ERROR("kmalloc failed\n"); + } else { + memset(conf->wl_preinit, 0, strlen(data)+1); + strcpy(conf->wl_preinit, data); + CONFIG_MSG("wl_preinit = %s\n", conf->wl_preinit); + } + } + else if (!strncmp("wl_suspend=", full_param, len_param)) { + if (conf->wl_suspend) { + kfree(conf->wl_suspend); + conf->wl_suspend = NULL; + } + if (!(conf->wl_suspend = kmalloc(strlen(data)+1, GFP_KERNEL))) { + CONFIG_ERROR("kmalloc failed\n"); + } else { + memset(conf->wl_suspend, 0, strlen(data)+1); + strcpy(conf->wl_suspend, data); + CONFIG_MSG("wl_suspend = %s\n", conf->wl_suspend); + } + } + else if (!strncmp("wl_resume=", full_param, len_param)) { + if (conf->wl_resume) { + kfree(conf->wl_resume); + conf->wl_resume = NULL; + } + if (!(conf->wl_resume = kmalloc(strlen(data)+1, GFP_KERNEL))) { + CONFIG_ERROR("kmalloc failed\n"); + } else { + memset(conf->wl_resume, 0, strlen(data)+1); + strcpy(conf->wl_resume, data); + CONFIG_MSG("wl_resume = %s\n", conf->wl_resume); + } + } +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + else if (!strncmp("mac=", full_param, len_param)) { + if (!bcm_ether_atoe(data, &ea_addr)) { + CONFIG_ERROR("mac adress read error"); + return false; + } + memcpy(&conf->hw_ether, &ea_addr, ETHER_ADDR_LEN); + CONFIG_MSG("mac = %s\n", data); + } + else if (!strncmp("macpad=", full_param, len_param)) { + if (!bcm_str2hex(data, macpad, sizeof(macpad))) { + CONFIG_ERROR("macpad adress read error"); + return false; + } + memcpy(&conf->hw_ether[ETHER_ADDR_LEN], macpad, sizeof(macpad)); + if (config_msg_level & CONFIG_TRACE_LEVEL) { + CONFIG_MSG("macpad =\n"); + for (i=0; ihw_ether[ETHER_ADDR_LEN+i]); + if ((i+1)%8 == 0) + printk(KERN_CONT "\n"); + } + } + } +#endif +#ifdef PROPTX_MAXCOUNT + else if (!strncmp("proptx_maxcnt_2g=", full_param, len_param)) { + conf->proptx_maxcnt_2g = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("proptx_maxcnt_2g = %d\n", conf->proptx_maxcnt_2g); + } + else if (!strncmp("proptx_maxcnt_5g=", full_param, len_param)) { + conf->proptx_maxcnt_5g = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("proptx_maxcnt_5g = %d\n", conf->proptx_maxcnt_5g); + } +#endif +#ifdef TPUT_MONITOR + else if (!strncmp("data_drop_mode=", full_param, len_param)) { + conf->data_drop_mode = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("data_drop_mode = %d\n", conf->data_drop_mode); + } + else if (!strncmp("tput_monitor_ms=", full_param, len_param)) { + conf->tput_monitor_ms = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("tput_monitor_ms = %d\n", conf->tput_monitor_ms); + } +#ifdef BCMSDIO + else if (!strncmp("doflow_tput_thresh=", full_param, len_param)) { + conf->doflow_tput_thresh = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("doflow_tput_thresh = %d\n", conf->doflow_tput_thresh); + if (conf->doflow_tput_thresh > 0) + conf->tput_monitor_ms = 1000; + } +#endif +#endif +#ifdef SCAN_SUPPRESS + else if (!strncmp("scan_intput=", full_param, len_param)) { + conf->scan_intput = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("scan_intput = 0x%x\n", conf->scan_intput); + } + else if (!strncmp("scan_tput_thresh=", full_param, len_param)) { + conf->scan_tput_thresh = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("scan_tput_thresh = %d\n", conf->scan_tput_thresh); + if (conf->scan_tput_thresh > 0) + conf->tput_monitor_ms = 1000; + } + else if (!strncmp("scan_busy_tmo=", full_param, len_param)) { + conf->scan_busy_tmo = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("scan_busy_tmo = %d\n", conf->scan_busy_tmo); + } + else if (!strncmp("scan_busy_thresh=", full_param, len_param)) { + conf->scan_busy_thresh = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("scan_busy_thresh = %d\n", conf->scan_busy_thresh); + } +#endif +#ifdef DHD_TPUT_PATCH + else if (!strncmp("tput_patch=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->tput_patch = TRUE; + else + conf->tput_patch = FALSE; + CONFIG_MSG("tput_patch = %d\n", conf->tput_patch); + dhd_conf_set_tput_patch(dhd); + } + else if (!strncmp("mtu=", full_param, len_param)) { + conf->mtu = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("mtu = %d\n", conf->mtu); + } + else if (!strncmp("pktsetsum=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->pktsetsum = TRUE; + else + conf->pktsetsum = FALSE; + CONFIG_MSG("pktsetsum = %d\n", conf->pktsetsum); + } +#endif +#ifdef SET_XPS_CPUS + else if (!strncmp("xps_cpus=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->xps_cpus = TRUE; + else + conf->xps_cpus = FALSE; + CONFIG_MSG("xps_cpus = %d\n", conf->xps_cpus); + } +#endif +#ifdef SET_RPS_CPUS + else if (!strncmp("rps_cpus=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->rps_cpus = TRUE; + else + conf->rps_cpus = FALSE; + CONFIG_MSG("rps_cpus = %d\n", conf->rps_cpus); + } +#endif +#ifdef CHECK_DOWNLOAD_FW + else if (!strncmp("fwchk=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->fwchk = TRUE; + else + conf->fwchk = FALSE; + CONFIG_MSG("fwchk = %d\n", conf->fwchk); + } +#endif + else if (!strncmp("vndr_ie_assocreq=", full_param, len_param)) { + if (conf->vndr_ie_assocreq) { + kfree(conf->vndr_ie_assocreq); + conf->vndr_ie_assocreq = NULL; + } + if (!(conf->vndr_ie_assocreq = kmalloc(strlen(data)+1, GFP_KERNEL))) { + CONFIG_ERROR("kmalloc failed\n"); + } else { + memset(conf->vndr_ie_assocreq, 0, strlen(data)+1); + strcpy(conf->vndr_ie_assocreq, data); + CONFIG_MSG("vndr_ie_assocreq = %s\n", conf->vndr_ie_assocreq); + } + } + else + return false; + + return true; +} + +int +dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) +{ + int bcmerror = -1, chip_match = -1; + uint len = 0, start_pos=0, end_pos=0; + void *image = NULL; + char *memblock = NULL; + char *bufp, *pick = NULL, *pch; + bool conf_file_exists; + uint len_param; + + conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0')); + if (!conf_file_exists) { + CONFIG_MSG("config path %s\n", conf_path); + return (0); + } + + if (conf_file_exists) { + image = dhd_os_open_image1(dhd, conf_path); + if (image == NULL) { + CONFIG_MSG("Ignore config file %s\n", conf_path); + goto err; + } + } + + memblock = MALLOC(dhd->osh, MAXSZ_CONFIG); + if (memblock == NULL) { + CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_CONFIG); + goto err; + } + + pick = MALLOC(dhd->osh, MAXSZ_BUF); + if (!pick) { + CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_BUF); + goto err; + } + + /* Read variables */ + if (conf_file_exists) { + len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image); + } + if (len > 0 && len < MAXSZ_CONFIG) { + bufp = (char *)memblock; + bufp[len] = 0; + + while (start_pos < len) { + memset(pick, 0, MAXSZ_BUF); + end_pos = pick_config_vars(bufp, len, start_pos, pick, MAXSZ_BUF); + if (end_pos - start_pos >= MAXSZ_BUF) + CONFIG_ERROR("out of buf to read MAXSIZ_BUF=%d\n", MAXSZ_BUF); + start_pos = end_pos; + pch = strchr(pick, '='); + if (pch != NULL) { + len_param = pch-pick+1; + if (len_param == strlen(pick)) { + CONFIG_ERROR("not a right parameter %s\n", pick); + continue; + } + } else { + CONFIG_ERROR("not a right parameter %s\n", pick); + continue; + } + + dhd_conf_read_chiprev(dhd, &chip_match, pick, len_param); + if (!chip_match) + continue; + + if (dhd_conf_read_log_level(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_roam_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param)) + continue; +#ifdef SET_FWNV_BY_MAC + else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param)) + continue; +#endif + else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_country(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_mchan_params(dhd, pick, len_param)) + continue; +#ifdef PKT_FILTER_SUPPORT + else if (dhd_conf_read_pkt_filter(dhd, pick, len_param)) + continue; +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ISAM_PREINIT + else if (dhd_conf_read_isam(dhd, pick, len_param)) + continue; +#endif /* ISAM_PREINIT */ +#ifdef IDHCP + else if (dhd_conf_read_dhcp_params(dhd, pick, len_param)) + continue; +#endif /* IDHCP */ +#ifdef BCMSDIO + else if (dhd_conf_read_sdio_params(dhd, pick, len_param)) + continue; +#endif /* BCMSDIO */ +#ifdef BCMPCIE + else if (dhd_conf_read_pcie_params(dhd, pick, len_param)) + continue; +#endif /* BCMPCIE */ + else if (dhd_conf_read_pm_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_others(dhd, pick, len_param)) + continue; + else + continue; + } + + bcmerror = 0; + } else { + CONFIG_ERROR("error reading config file: %d\n", len); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (pick) + MFREE(dhd->osh, pick, MAXSZ_BUF); + + if (memblock) + MFREE(dhd->osh, memblock, MAXSZ_CONFIG); + + if (image) + dhd_os_close_image1(dhd, image); + + return bcmerror; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) +void +dhd_conf_set_devid(dhd_pub_t *dhd) +{ + wifi_adapter_info_t *adapter = NULL; + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + + dhd_bus_get_ids(dhd->bus, &bus_type, &bus_num, &slot_num); + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + if (adapter) { +#if defined(BCMSDIO) + dhd->conf->devid = adapter->sdio_func->device; +#endif +#if defined(BCMPCIE) + dhd->conf->devid = adapter->pci_dev->device; + dhd->conf->svid = adapter->pci_dev->subsystem_vendor; + dhd->conf->ssid = adapter->pci_dev->subsystem_device; +#endif + } else { + CONFIG_ERROR("can't find adapter\n"); + } + + return; +} +#endif + +int +dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev) +{ +#if defined(BCMSDIO) || defined(BCMPCIE) + dhd_conf_set_devid(dhd); +#endif + dhd->conf->chip = chip; + dhd->conf->chiprev = chiprev; + +#if defined(BCMSDIO) + CONFIG_MSG("devid=0x%x, chip=0x%x, chiprev=%d\n", + dhd->conf->devid, dhd->conf->chip, dhd->conf->chiprev); +#endif +#if defined(BCMPCIE) + CONFIG_MSG("devid=0x%x, chip=0x%x, chiprev=%d, svid=0x%04x, ssid=0x%04x\n", + dhd->conf->devid, dhd->conf->chip, dhd->conf->chiprev, + dhd->conf->svid, dhd->conf->ssid); +#endif +#if defined(BCMDBUS) + CONFIG_MSG("chip=0x%x, chiprev=%d\n", dhd->conf->chip, dhd->conf->chiprev); +#endif + + return 0; +} + +uint +dhd_conf_get_chip(void *context) +{ + dhd_pub_t *dhd = context; + + if (dhd && dhd->conf) + return dhd->conf->chip; + return 0; +} + +uint +dhd_conf_get_chiprev(void *context) +{ + dhd_pub_t *dhd = context; + + if (dhd && dhd->conf) + return dhd->conf->chiprev; + return 0; +} + +#ifdef BCMSDIO +void +dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) +{ + struct dhd_conf *conf = dhd->conf; + + if (enable) { +#if defined(BCMSDIOH_TXGLOM_EXT) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_mode = SDPCM_TXGLOM_CPY; + } +#endif + // other parameters set in preinit or config.txt + if (conf->txglom_ext) + CONFIG_MSG("txglom_ext=%d, txglom_bucket_size=%d\n", + conf->txglom_ext, conf->txglom_bucket_size); + CONFIG_MSG("txglom_mode=%s\n", + conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy"); + CONFIG_MSG("txglomsize=%d, deferred_tx_len=%d\n", + conf->txglomsize, conf->deferred_tx_len); + CONFIG_MSG("txinrx_thres=%d, dhd_txminmax=%d\n", + conf->txinrx_thres, conf->dhd_txminmax); + CONFIG_MSG("tx_max_offset=%d, txctl_tmo_fix=%d\n", + conf->tx_max_offset, conf->txctl_tmo_fix); + } else { + // clear txglom parameters + conf->txglom_ext = FALSE; + conf->txglom_bucket_size = 0; + conf->txglomsize = 0; + conf->deferred_tx_len = 0; + } + +} +#endif + +#ifdef UPDATE_MODULE_NAME +#if defined(BCMSDIO) || defined(BCMPCIE) +static void +dhd_conf_compat_vht(dhd_pub_t *dhd) +{ + char vht_features[] = "vht_features=0"; + + CONFIG_TRACE("Enter\n"); + + dhd_conf_set_wl_cmd(dhd, vht_features, TRUE); +} +#endif + +int +dhd_conf_compat_func(dhd_pub_t *dhd) +{ + const module_name_map_t* row = NULL; + + row = dhd_conf_match_module(dhd); + if (row && row->compat_func) { + row->compat_func(dhd); + } + + return 0; +} +#endif + +void +dhd_conf_postinit_ioctls(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + char wl_preinit[] = "assoc_retry_max=10"; +#ifdef NO_POWER_SAVE + char wl_no_power_save[] = "mpc=0, 86=0"; + dhd_conf_set_wl_cmd(dhd, wl_no_power_save, FALSE); +#endif + + dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE); + dhd_conf_map_country_list(dhd, &conf->cspec); + dhd_conf_set_country(dhd, &conf->cspec); + dhd_conf_fix_country(dhd); + dhd_conf_get_country(dhd, &dhd->dhd_cspec); + + dhd_conf_set_intiovar(dhd, 0, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE); + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE); + dhd_conf_set_intiovar(dhd, 0, WLC_SET_PM, "WLC_SET_PM", conf->pm, 0, FALSE); + dhd_conf_set_intiovar(dhd, 0, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, FALSE); + dhd_conf_set_intiovar(dhd, 0, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE); + dhd_conf_set_bw_cap(dhd); + dhd_conf_set_roam(dhd); + +#if defined(BCMPCIE) + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "bus:deepsleep_disable", + conf->bus_deepsleep_disable, 0, FALSE); +#endif /* defined(BCMPCIE) */ + +#ifdef IDHCP + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, + 0, FALSE); + if (conf->dhcpd_enable >= 0) { + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_addr", + (char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_mask", + (char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_start", + (char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_end", + (char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE); + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "dhcpd_enable", + conf->dhcpd_enable, 0, FALSE); + } +#endif + dhd_conf_set_intiovar(dhd, 0, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", + conf->frameburst, 0, FALSE); + + dhd_conf_set_wl_cmd(dhd, wl_preinit, TRUE); +#if defined(BCMSDIO) + if (conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID || + conf->chip == BCM4375_CHIP_ID) { + char ampdu_mpdu[] = "ampdu_mpdu=32"; + dhd_conf_set_wl_cmd(dhd, ampdu_mpdu, TRUE); + } else { + char ampdu_mpdu[] = "ampdu_mpdu=16"; + dhd_conf_set_wl_cmd(dhd, ampdu_mpdu, TRUE); + } +#endif + +#ifdef DHD_TPUT_PATCH + if (dhd->conf->mtu) + dhd_change_mtu(dhd, dhd->conf->mtu, 0); +#endif + if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4371_CHIP_ID || conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM43569_CHIP_ID || + conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID || + conf->chip == BCM4375_CHIP_ID) { + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "txbf", 1, 0, FALSE); + } + if (conf->chip == BCM4375_CHIP_ID) { + char he_cmd[] = "110=1, nmode=1, vhtmode=1, he=enab 1"; + dhd_conf_set_wl_cmd(dhd, he_cmd, TRUE); + } + if (conf->chip == BCM43752_CHIP_ID || conf->chip == BCM4359_CHIP_ID) { + char txack_alive[] = "txack_alive=0"; + dhd_conf_set_wl_cmd(dhd, txack_alive, TRUE); + } +#if defined(WLEASYMESH) + if (conf->fw_type == FW_TYPE_EZMESH) { + if (conf->chip == BCM4359_CHIP_ID) { + char ezmesh[] = "mbss=1, rsdb_mode=0"; + dhd_conf_set_wl_cmd(dhd, ezmesh, TRUE); + } else { + char ezmesh[] = "mbss=1"; + dhd_conf_set_wl_cmd(dhd, ezmesh, TRUE); + } + } +#endif /* WLEASYMESH */ +#if defined(BCMSDIO) + if (conf->devid == BCM43751_CHIP_ID) +#elif defined(BCMPCIE) + if (conf->devid == BCM43751_D11AX_ID) +#endif + { + if (FW_SUPPORTED(dhd, mbo)) { + char he_features[] = "he=enab 0,he=features 0"; + dhd_conf_set_wl_cmd(dhd, he_features, TRUE); + } + } +#ifdef UPDATE_MODULE_NAME + dhd_conf_compat_func(dhd); +#endif +#ifndef SUPPORT_RANDOM_MAC_SCAN + { + char scanmac[] = "scanmac=enable 0"; + dhd_conf_set_wl_cmd(dhd, scanmac, TRUE); + } +#endif + dhd_conf_set_wl_cmd(dhd, conf->wl_preinit, TRUE); + +#ifndef WL_CFG80211 + dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE); +#endif + +} + +int +dhd_conf_preinit(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + CONFIG_TRACE("Enter\n"); + +#ifdef SET_FWNV_BY_MAC + dhd_conf_free_mac_list(&conf->fw_by_mac); + dhd_conf_free_mac_list(&conf->nv_by_mac); +#endif + dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); + dhd_conf_free_country_list(conf); + dhd_conf_free_mchan_list(conf); +#ifdef PKT_FILTER_SUPPORT + if (conf->magic_pkt_filter_add) { + kfree(conf->magic_pkt_filter_add); + conf->magic_pkt_filter_add = NULL; + } +#endif + if (conf->wl_preinit) { + kfree(conf->wl_preinit); + conf->wl_preinit = NULL; + } + if (conf->wl_suspend) { + kfree(conf->wl_suspend); + conf->wl_suspend = NULL; + } + if (conf->wl_resume) { + kfree(conf->wl_resume); + conf->wl_resume = NULL; + } + if (conf->vndr_ie_assocreq) { + kfree(conf->vndr_ie_assocreq); + conf->vndr_ie_assocreq = NULL; + } + conf->band = -1; + memset(&conf->bw_cap, -1, sizeof(conf->bw_cap)); + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { + strcpy(conf->cspec.country_abbrev, "ALL"); + strcpy(conf->cspec.ccode, "ALL"); + conf->cspec.rev = 0; + } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM4375_CHIP_ID) { + strcpy(conf->cspec.country_abbrev, "CN"); + strcpy(conf->cspec.ccode, "CN"); + conf->cspec.rev = 38; + } else { + strcpy(conf->cspec.country_abbrev, "CN"); + strcpy(conf->cspec.ccode, "CN"); + conf->cspec.rev = 0; + } + memset(&conf->channels, 0, sizeof(wl_channel_list_t)); + conf->roam_off = 1; + conf->roam_off_suspend = 1; + conf->roam_trigger[0] = -65; + conf->roam_trigger[1] = WLC_BAND_ALL; + conf->roam_scan_period[0] = 10; + conf->roam_scan_period[1] = WLC_BAND_ALL; + conf->roam_delta[0] = 10; + conf->roam_delta[1] = WLC_BAND_ALL; + conf->fullroamperiod = 20; + conf->keep_alive_period = 30000; +#ifdef ARP_OFFLOAD_SUPPORT + conf->garp = FALSE; +#endif + conf->force_wme_ac = 0; + memset(&conf->wme_sta, 0, sizeof(wme_param_t)); + memset(&conf->wme_ap, 0, sizeof(wme_param_t)); +#ifdef PKT_FILTER_SUPPORT + memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t)); + memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t)); +#endif + conf->srl = -1; + conf->lrl = -1; + conf->bcn_timeout = 16; + conf->disable_proptx = -1; + conf->dhd_poll = -1; +#ifdef BCMSDIO + conf->use_rxchain = 0; + conf->bus_rxglom = TRUE; + conf->txglom_ext = FALSE; + conf->tx_max_offset = 0; + conf->txglomsize = SDPCM_DEFGLOM_SIZE; + conf->txctl_tmo_fix = 300; + conf->txglom_mode = SDPCM_TXGLOM_CPY; + conf->deferred_tx_len = 0; + conf->dhd_txminmax = 1; + conf->txinrx_thres = -1; +#ifdef MINIME + conf->ramsize = 0x80000; +#endif +#if defined(SDIO_ISR_THREAD) + conf->intr_extn = FALSE; +#endif +#ifdef BCMSDIO_RXLIM_POST + conf->rxlim_en = FALSE; +#endif +#ifdef BCMSDIO_TXSEQ_SYNC + conf->txseq_sync = FALSE; +#endif +#if defined(HW_OOB) + conf->oob_enabled_later = FALSE; +#endif +#ifdef BCMSDIO_INTSTATUS_WAR + conf->read_intr_mode = 0; +#endif + conf->kso_try_max = 0; +#ifdef KSO_DEBUG + memset(&conf->kso_try_array, 0, sizeof(conf->kso_try_array)); +#endif +#endif +#ifdef BCMPCIE + conf->bus_deepsleep_disable = 1; + conf->flow_ring_queue_threshold = FLOW_RING_QUEUE_THRESHOLD; + conf->d2h_intr_method = -1; + conf->d2h_intr_control = -1; +#endif + conf->dpc_cpucore = -1; + conf->rxf_cpucore = -1; + conf->dhd_dpc_prio = -1; + conf->frameburst = -1; + conf->deepsleep = FALSE; + conf->pm = -1; + conf->pm_in_suspend = -1; + conf->insuspend = 0; + conf->suspend_mode = PM_NOTIFIER; + conf->suspend_bcn_li_dtim = -1; + conf->rekey_offload = FALSE; +#ifdef WL_EXT_WOWL + dhd_master_mode = TRUE; + conf->wowl = WL_WOWL_NET|WL_WOWL_DIS|WL_WOWL_BCN; + conf->insuspend |= (WOWL_IN_SUSPEND | NO_TXDATA_IN_SUSPEND); +#endif + if (conf->suspend_mode == PM_NOTIFIER || conf->suspend_mode == SUSPEND_MODE_2) + conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND); + conf->suspended = FALSE; + memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN); +#ifdef SUSPEND_EVENT + memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask)); + conf->wlfc = FALSE; +#endif +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + memset(&conf->hw_ether, 0, sizeof(conf->hw_ether)); +#endif +#ifdef IDHCP + conf->dhcpc_enable = -1; + conf->dhcpd_enable = -1; +#endif + conf->orphan_move = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + conf->tsq = 10; +#else + conf->tsq = 0; +#endif +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMPCIE + conf->tcpack_sup_mode = TCPACK_SUP_HOLD; +#else + conf->tcpack_sup_mode = TCPACK_SUP_OFF; +#endif + conf->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + conf->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; +#endif + conf->pktprio8021x = -1; + conf->ctrl_resched = 2; + conf->rxcnt_timeout = 3; + conf->in4way = STA_NO_SCAN_IN4WAY | STA_WAIT_DISCONNECTED | + AP_WAIT_STA_RECONNECT; + if (conf->chip == BCM43752_CHIP_ID) + conf->war = SET_CHAN_INCONN | FW_REINIT_INCSA | FW_REINIT_EMPTY_SCAN; + else + conf->war = 0; +#ifdef P2P_AP_CONCURRENT + conf->war |= P2P_AP_MAC_CONFLICT; +#endif +#ifdef PROPTX_MAXCOUNT + conf->proptx_maxcnt_2g = 46; + conf->proptx_maxcnt_5g = WL_TXSTATUS_FREERUNCTR_MASK; +#endif /* DYNAMIC_PROPTX_MAXCOUNT */ +#ifdef TPUT_MONITOR + conf->data_drop_mode = NO_DATA_DROP; + conf->tput_monitor_ms = 0; +#ifdef BCMSDIO + if (conf->chip == BCM43752_CHIP_ID || conf->chip == BCM4375_CHIP_ID) + conf->doflow_tput_thresh = 200; + else + conf->doflow_tput_thresh = 9999; + if (conf->doflow_tput_thresh > 0 && conf->doflow_tput_thresh < 9999) + conf->tput_monitor_ms = 1000; +#endif +#endif +#ifdef SCAN_SUPPRESS + conf->scan_intput = SCAN_CURCHAN_INTPUT; + conf->scan_busy_thresh = 10; + conf->scan_busy_tmo = 120; + if (conf->chip == BCM43752_CHIP_ID || conf->chip == BCM4375_CHIP_ID) + conf->scan_tput_thresh = 100; + else + conf->scan_tput_thresh = 9999; + if (conf->scan_tput_thresh > 0 && conf->scan_tput_thresh < 9999) + conf->tput_monitor_ms = 1000; +#endif +#ifdef DHD_TPUT_PATCH + conf->tput_patch = FALSE; + conf->mtu = 0; + conf->pktsetsum = FALSE; +#endif +#ifdef SET_XPS_CPUS + conf->xps_cpus = FALSE; +#endif +#ifdef SET_RPS_CPUS + conf->rps_cpus = FALSE; +#endif +#ifdef CHECK_DOWNLOAD_FW + conf->fwchk = FALSE; +#endif +#ifdef ISAM_PREINIT + memset(conf->isam_init, 0, sizeof(conf->isam_init)); + memset(conf->isam_config, 0, sizeof(conf->isam_config)); + memset(conf->isam_enable, 0, sizeof(conf->isam_enable)); +#endif +#if defined(SDIO_ISR_THREAD) + if (conf->chip == BCM43012_CHIP_ID || + conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID || + conf->chip == BCM4375_CHIP_ID) { + conf->intr_extn = TRUE; + } +#endif + if ((conf->chip == BCM43430_CHIP_ID && conf->chiprev == 2) || + conf->chip == BCM43012_CHIP_ID || + conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID || + conf->chip == BCM4375_CHIP_ID) { +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMSDIO + conf->tcpack_sup_mode = TCPACK_SUP_REPLACE; +#endif +#endif +#if defined(BCMSDIO) || defined(BCMPCIE) + dhd_rxbound = 128; + dhd_txbound = 64; +#endif + conf->frameburst = 1; +#ifdef BCMSDIO + conf->dhd_txminmax = -1; + conf->txinrx_thres = 128; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + conf->orphan_move = 1; +#else + conf->orphan_move = 0; +#endif + } +#ifdef DHD_TPUT_PATCH + if (conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID || + conf->chip == BCM4375_CHIP_ID) { + conf->tput_patch = TRUE; + dhd_conf_set_tput_patch(dhd); + } +#endif + +#ifdef BCMSDIO +#if defined(BCMSDIOH_TXGLOM_EXT) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_ext = TRUE; + } else { + conf->txglom_ext = FALSE; + } + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { + conf->txglom_bucket_size = 1680; // fixed value, don't change + conf->txglomsize = 6; + } + if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || + conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_bucket_size = 1684; // fixed value, don't change + conf->txglomsize = 16; + } +#endif + if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) + conf->txglomsize = SDPCM_MAXGLOM_SIZE; +#endif + init_waitqueue_head(&conf->event_complete); + +#if defined(BCMSDIO) && defined(CUSTOMER_HW_AMLOGIC) + if (conf->chip != BCM43752_CHIP_ID) { + dhd_slpauto = FALSE; + } + conf->txglom_mode = SDPCM_TXGLOM_CPY; + conf->rekey_offload = TRUE; +#endif + + return 0; +} + +int +dhd_conf_reset(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + +#ifdef SET_FWNV_BY_MAC + dhd_conf_free_mac_list(&conf->fw_by_mac); + dhd_conf_free_mac_list(&conf->nv_by_mac); +#endif + dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); + dhd_conf_free_country_list(conf); + dhd_conf_free_mchan_list(conf); +#ifdef PKT_FILTER_SUPPORT + if (conf->magic_pkt_filter_add) { + kfree(conf->magic_pkt_filter_add); + conf->magic_pkt_filter_add = NULL; + } +#endif + if (conf->wl_preinit) { + kfree(conf->wl_preinit); + conf->wl_preinit = NULL; + } + if (conf->wl_suspend) { + kfree(conf->wl_suspend); + conf->wl_suspend = NULL; + } + if (conf->wl_resume) { + kfree(conf->wl_resume); + conf->wl_resume = NULL; + } + if (conf->vndr_ie_assocreq) { + kfree(conf->vndr_ie_assocreq); + conf->vndr_ie_assocreq = NULL; + } + memset(conf, 0, sizeof(dhd_conf_t)); + return 0; +} + +int +dhd_conf_attach(dhd_pub_t *dhd) +{ + dhd_conf_t *conf; + + CONFIG_TRACE("Enter\n"); + + if (dhd->conf != NULL) { + CONFIG_MSG("config is attached before!\n"); + return 0; + } + /* Allocate private bus interface state */ + if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) { + CONFIG_ERROR("MALLOC failed\n"); + goto fail; + } + memset(conf, 0, sizeof(dhd_conf_t)); + + dhd->conf = conf; + + return 0; + +fail: + if (conf != NULL) + MFREE(dhd->osh, conf, sizeof(dhd_conf_t)); + return BCME_NOMEM; +} + +void +dhd_conf_detach(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + CONFIG_TRACE("Enter\n"); + if (dhd->conf) { +#ifdef SET_FWNV_BY_MAC + dhd_conf_free_mac_list(&conf->fw_by_mac); + dhd_conf_free_mac_list(&conf->nv_by_mac); +#endif + dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); + dhd_conf_free_country_list(conf); + dhd_conf_free_mchan_list(conf); +#ifdef PKT_FILTER_SUPPORT + if (conf->magic_pkt_filter_add) { + kfree(conf->magic_pkt_filter_add); + conf->magic_pkt_filter_add = NULL; + } +#endif + if (conf->wl_preinit) { + kfree(conf->wl_preinit); + conf->wl_preinit = NULL; + } + if (conf->wl_suspend) { + kfree(conf->wl_suspend); + conf->wl_suspend = NULL; + } + if (conf->wl_resume) { + kfree(conf->wl_resume); + conf->wl_resume = NULL; + } + if (conf->vndr_ie_assocreq) { + kfree(conf->vndr_ie_assocreq); + conf->vndr_ie_assocreq = NULL; + } + MFREE(dhd->osh, conf, sizeof(dhd_conf_t)); + } + dhd->conf = NULL; +} diff --git a/bcmdhd.101.10.361.x/dhd_config.h b/bcmdhd.101.10.361.x/dhd_config.h new file mode 100755 index 0000000..306c2b8 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_config.h @@ -0,0 +1,441 @@ + +#ifndef _dhd_config_ +#define _dhd_config_ + +#include +#include +#include +#include +#include +#include <802.11.h> + +#define FW_TYPE_STA 0 +#define FW_TYPE_APSTA 1 +#define FW_TYPE_P2P 2 +#define FW_TYPE_MESH 3 +#define FW_TYPE_EZMESH 4 +#define FW_TYPE_ES 5 +#define FW_TYPE_MFG 6 +#define FW_TYPE_MINIME 7 +#define FW_TYPE_G 0 +#define FW_TYPE_AG 1 + +#define FW_PATH_AUTO_SELECT 1 +#define CONFIG_PATH_AUTO_SELECT +extern char firmware_path[MOD_PARAM_PATHLEN]; +#if defined(BCMSDIO) || defined(BCMPCIE) +extern uint dhd_rxbound; +extern uint dhd_txbound; +#endif +#ifdef BCMSDIO +#define TXGLOM_RECV_OFFSET 8 +extern uint dhd_doflow; +extern uint dhd_slpauto; +#endif + +#ifdef SET_FWNV_BY_MAC +typedef struct wl_mac_range { + uint32 oui; + uint32 nic_start; + uint32 nic_end; +} wl_mac_range_t; + +typedef struct wl_mac_list { + int count; + wl_mac_range_t *mac; + char name[MOD_PARAM_PATHLEN]; +} wl_mac_list_t; + +typedef struct wl_mac_list_ctrl { + int count; + struct wl_mac_list *m_mac_list_head; +} wl_mac_list_ctrl_t; +#endif + +typedef struct wl_chip_nv_path { + uint chip; + uint chiprev; + char name[MOD_PARAM_PATHLEN]; +} wl_chip_nv_path_t; + +typedef struct wl_chip_nv_path_list_ctrl { + int count; + struct wl_chip_nv_path *m_chip_nv_path_head; +} wl_chip_nv_path_list_ctrl_t; + +typedef struct wl_channel_list { + uint32 count; + uint32 channel[WL_NUMCHANNELS]; +} wl_channel_list_t; + +typedef struct wmes_param { + int aifsn[AC_COUNT]; + int ecwmin[AC_COUNT]; + int ecwmax[AC_COUNT]; + int txop[AC_COUNT]; +} wme_param_t; + +#ifdef PKT_FILTER_SUPPORT +#define DHD_CONF_FILTER_MAX 8 +#define PKT_FILTER_LEN 300 +#define MAGIC_PKT_FILTER_LEN 450 +typedef struct conf_pkt_filter_add { + uint32 count; + char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN]; +} conf_pkt_filter_add_t; + +typedef struct conf_pkt_filter_del { + uint32 count; + uint32 id[DHD_CONF_FILTER_MAX]; +} conf_pkt_filter_del_t; +#endif + +#define CONFIG_COUNTRY_LIST_SIZE 500 +typedef struct country_list { + struct country_list *next; + wl_country_t cspec; +} country_list_t; + +/* mchan_params */ +#define MCHAN_MAX_NUM 4 +#define MIRACAST_SOURCE 1 +#define MIRACAST_SINK 2 +typedef struct mchan_params { + struct mchan_params *next; + int bw; + int p2p_mode; + int miracast_mode; +} mchan_params_t; + +#ifdef SCAN_SUPPRESS +enum scan_intput_flags { + NO_SCAN_INTPUT = (1 << (0)), + SCAN_CURCHAN_INTPUT = (1 << (1)), + SCAN_LIGHT_INTPUT = (1 << (2)), +}; +#endif + +enum war_flags { + SET_CHAN_INCONN = (1 << (0)), + FW_REINIT_INCSA = (1 << (1)), + FW_REINIT_EMPTY_SCAN = (1 << (2)), + P2P_AP_MAC_CONFLICT = (1 << (3)), + RESEND_EAPOL_PKT = (1 << (4)) +}; + +enum in4way_flags { + STA_NO_SCAN_IN4WAY = (1 << (0)), + STA_NO_BTC_IN4WAY = (1 << (1)), + STA_WAIT_DISCONNECTED = (1 << (2)), + AP_WAIT_STA_RECONNECT = (1 << (3)), + STA_FAKE_SCAN_IN_CONNECT = (1 << (4)), + STA_REASSOC_RETRY = (1 << (5)), +}; + +enum in_suspend_flags { + NO_EVENT_IN_SUSPEND = (1 << (0)), + NO_TXDATA_IN_SUSPEND = (1 << (1)), + NO_TXCTL_IN_SUSPEND = (1 << (2)), + AP_DOWN_IN_SUSPEND = (1 << (3)), + ROAM_OFFLOAD_IN_SUSPEND = (1 << (4)), + AP_FILTER_IN_SUSPEND = (1 << (5)), + WOWL_IN_SUSPEND = (1 << (6)), + ALL_IN_SUSPEND = 0xFFFFFFFF, +}; + +enum in_suspend_mode { + EARLY_SUSPEND = 0, + PM_NOTIFIER = 1, + SUSPEND_MODE_2 = 2 +}; + +#ifdef TPUT_MONITOR +enum data_drop_mode { + NO_DATA_DROP = -1, + FW_DROP = 0, + TXPKT_DROP = 1, + XMIT_DROP = 2 +}; +#endif + +enum conn_state { + CONN_STATE_IDLE = 0, + CONN_STATE_CONNECTING = 1, + CONN_STATE_AUTH_SAE_M1 = 2, + CONN_STATE_AUTH_SAE_M2 = 3, + CONN_STATE_AUTH_SAE_M3 = 4, + CONN_STATE_AUTH_SAE_M4 = 5, + CONN_STATE_REQID = 6, + CONN_STATE_RSPID = 7, + CONN_STATE_WSC_START = 8, + CONN_STATE_WPS_M1 = 9, + CONN_STATE_WPS_M2 = 10, + CONN_STATE_WPS_M3 = 11, + CONN_STATE_WPS_M4 = 12, + CONN_STATE_WPS_M5 = 13, + CONN_STATE_WPS_M6 = 14, + CONN_STATE_WPS_M7 = 15, + CONN_STATE_WPS_M8 = 16, + CONN_STATE_WSC_DONE = 17, + CONN_STATE_4WAY_M1 = 18, + CONN_STATE_4WAY_M2 = 19, + CONN_STATE_4WAY_M3 = 20, + CONN_STATE_4WAY_M4 = 21, + CONN_STATE_CONNECTED = 22, + CONN_STATE_GROUPKEY_M1 = 23, + CONN_STATE_GROUPKEY_M2 = 24, +}; + +typedef struct dhd_conf { + uint devid; + uint chip; + uint chiprev; +#if defined(BCMPCIE) + uint svid; + uint ssid; +#endif +#ifdef GET_OTP_MODULE_NAME + char module_name[16]; +#endif + struct ether_addr otp_mac; + int fw_type; +#ifdef SET_FWNV_BY_MAC + wl_mac_list_ctrl_t fw_by_mac; + wl_mac_list_ctrl_t nv_by_mac; +#endif + wl_chip_nv_path_list_ctrl_t nv_by_chip; + country_list_t *country_head; + int band; + int bw_cap[2]; + wl_country_t cspec; + wl_channel_list_t channels; + uint roam_off; + uint roam_off_suspend; + int roam_trigger[2]; + int roam_scan_period[2]; + int roam_delta[2]; + int fullroamperiod; + uint keep_alive_period; + bool rekey_offload; +#ifdef ARP_OFFLOAD_SUPPORT + bool garp; +#endif + int force_wme_ac; + wme_param_t wme_sta; + wme_param_t wme_ap; +#ifdef PKT_FILTER_SUPPORT + conf_pkt_filter_add_t pkt_filter_add; + conf_pkt_filter_del_t pkt_filter_del; + char *magic_pkt_filter_add; +#endif + int srl; + int lrl; + uint bcn_timeout; + int disable_proptx; + int dhd_poll; +#ifdef BCMSDIO + int use_rxchain; + bool bus_rxglom; + bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */ + /* terence 20161011: + 1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing + 2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported, + but not happened in sw txglom + */ + int tx_max_offset; + uint txglomsize; + int txctl_tmo_fix; + bool txglom_mode; + uint deferred_tx_len; + /*txglom_bucket_size: + * 43362/4330: 1680 + * 43340/43341/43241: 1684 + */ + int txglom_bucket_size; + int txinrx_thres; + int dhd_txminmax; // -1=DATABUFCNT(bus) +#ifdef DYNAMIC_MAX_HDR_READ + int max_hdr_read; +#endif + bool oob_enabled_later; +#ifdef MINIME + uint32 ramsize; +#endif +#if defined(SDIO_ISR_THREAD) + bool intr_extn; +#endif +#ifdef BCMSDIO_RXLIM_POST + bool rxlim_en; +#endif +#ifdef BCMSDIO_TXSEQ_SYNC + bool txseq_sync; +#endif +#ifdef BCMSDIO_INTSTATUS_WAR + uint read_intr_mode; +#endif + int kso_try_max; +#ifdef KSO_DEBUG + uint kso_try_array[10]; +#endif +#endif +#ifdef BCMPCIE + int bus_deepsleep_disable; + int flow_ring_queue_threshold; + int d2h_intr_method; + int d2h_intr_control; +#endif + int dpc_cpucore; + int rxf_cpucore; + int dhd_dpc_prio; + int frameburst; + bool deepsleep; + int pm; + int pm_in_suspend; + int suspend_mode; + int suspend_bcn_li_dtim; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; + uint32 tcpack_sup_ratio; + uint32 tcpack_sup_delay; +#endif + int pktprio8021x; + uint insuspend; + bool suspended; + struct ether_addr bssid_insuspend; +#ifdef SUSPEND_EVENT + char resume_eventmask[WL_EVENTING_MASK_LEN]; + bool wlfc; +#endif +#ifdef IDHCP + int dhcpc_enable; + int dhcpd_enable; + struct ipv4_addr dhcpd_ip_addr; + struct ipv4_addr dhcpd_ip_mask; + struct ipv4_addr dhcpd_ip_start; + struct ipv4_addr dhcpd_ip_end; +#endif +#ifdef ISAM_PREINIT + char isam_init[50]; + char isam_config[300]; + char isam_enable[50]; +#endif + int ctrl_resched; + uint rxcnt_timeout; + mchan_params_t *mchan; + char *wl_preinit; + char *wl_suspend; + char *wl_resume; + int tsq; + int orphan_move; + uint in4way; + uint war; +#ifdef WL_EXT_WOWL + uint wowl; +#endif +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + char hw_ether[62]; +#endif + wait_queue_head_t event_complete; +#ifdef PROPTX_MAXCOUNT + int proptx_maxcnt_2g; + int proptx_maxcnt_5g; +#endif /* DYNAMIC_PROPTX_MAXCOUNT */ +#ifdef TPUT_MONITOR + int data_drop_mode; + unsigned long net_len; + uint tput_monitor_ms; + struct osl_timespec tput_ts; + unsigned long last_tx; + unsigned long last_rx; + unsigned long last_net_tx; +#ifdef BCMSDIO + int32 doflow_tput_thresh; +#endif +#endif +#ifdef SCAN_SUPPRESS + uint scan_intput; + int scan_busy_thresh; + int scan_busy_tmo; + int32 scan_tput_thresh; +#endif +#ifdef DHD_TPUT_PATCH + bool tput_patch; + int mtu; + bool pktsetsum; +#endif +#ifdef SET_XPS_CPUS + bool xps_cpus; +#endif +#ifdef SET_RPS_CPUS + bool rps_cpus; +#endif +#ifdef CHECK_DOWNLOAD_FW + bool fwchk; +#endif + char *vndr_ie_assocreq; +} dhd_conf_t; + +#ifdef BCMSDIO +void dhd_conf_get_otp(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih); +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih); +#endif +void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); +#endif +#ifdef BCMPCIE +int dhd_conf_get_otp(dhd_pub_t *dhd, si_t *sih); +bool dhd_conf_legacy_msi_chip(dhd_pub_t *dhd); +#endif +void dhd_conf_set_path_params(dhd_pub_t *dhd, char *fw_path, char *nv_path); +int dhd_conf_set_intiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name, + int val, int def, bool down); +int dhd_conf_get_band(dhd_pub_t *dhd); +int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec); +#ifdef CCODE_LIST +int dhd_ccode_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec); +#endif +int dhd_conf_fix_country(dhd_pub_t *dhd); +bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel); +void dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode); +void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source); +void dhd_conf_add_pkt_filter(dhd_pub_t *dhd); +bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id); +void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd); +int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path); +int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev); +uint dhd_conf_get_chip(void *context); +uint dhd_conf_get_chiprev(void *context); +int dhd_conf_get_pm(dhd_pub_t *dhd); +int dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len, + int *hostsleep_set, int *hostsleep_val, int *ret); +void dhd_conf_get_hostsleep(dhd_pub_t *dhd, + int hostsleep_set, int hostsleep_val, int ret); +int dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period, + char *packet, bool bcast); +#ifdef ARP_OFFLOAD_SUPPORT +void dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable); +#endif +#ifdef PROP_TXSTATUS +int dhd_conf_get_disable_proptx(dhd_pub_t *dhd); +#endif +#ifdef TPUT_MONITOR +void dhd_conf_tput_monitor(dhd_pub_t *dhd); +#endif +uint dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask); +int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend); +void dhd_conf_postinit_ioctls(dhd_pub_t *dhd); +int dhd_conf_preinit(dhd_pub_t *dhd); +int dhd_conf_reset(dhd_pub_t *dhd); +int dhd_conf_attach(dhd_pub_t *dhd); +void dhd_conf_detach(dhd_pub_t *dhd); +void *dhd_get_pub(struct net_device *dev); +int wl_pattern_atoh(char *src, char *dst); +int dhd_conf_suspend_resume_sta(dhd_pub_t *dhd, int ifidx, int suspend); +/* Add to adjust 802.1x priority */ +extern void pktset8021xprio(void *pkt, int prio); +#ifdef BCMSDIO +extern int dhd_bus_sleep(dhd_pub_t *dhdp, bool sleep, uint32 *intstatus); +#endif +#endif /* _dhd_config_ */ diff --git a/bcmdhd.101.10.361.x/dhd_csi.c b/bcmdhd.101.10.361.x/dhd_csi.c new file mode 100755 index 0000000..5cacb9c --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_csi.c @@ -0,0 +1,219 @@ +/* + * Broadcom Dongle Host Driver (DHD) + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_csi.c 606280 2015-12-15 05:28:25Z $ + */ +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define NULL_ADDR "\x00\x00\x00\x00\x00\x00" + +int +dhd_csi_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int ret = BCME_OK; + bool is_new = TRUE; + cfr_dump_data_t *p_event; + cfr_dump_list_t *ptr, *next, *new; + + NULL_CHECK(dhd, "dhd is NULL", ret); + + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + + if (!event_data) { + DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__)); + return -EINVAL; + } + p_event = (cfr_dump_data_t *)event_data; + + /* check if this addr exist */ + if (!list_empty(&dhd->csi_list)) { + list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) { + if (bcmp(&ptr->entry.header.peer_macaddr, &p_event->header.peer_macaddr, + ETHER_ADDR_LEN) == 0) { + int pos = 0, dump_len = 0, remain = 0; + is_new = FALSE; + DHD_INFO(("CSI data exist\n")); + if (p_event->header.status == 0) { + bcopy(&p_event->header, &ptr->entry.header, sizeof(cfr_dump_header_t)); + dump_len = p_event->header.cfr_dump_length; + if (dump_len < MAX_EVENT_SIZE) { + bcopy(&p_event->data, &ptr->entry.data, dump_len); + } else { + /* for big csi data */ + uint8 *p = (uint8 *)&ptr->entry.data; + remain = p_event->header.remain_length; + if (remain) { + pos = dump_len - remain - MAX_EVENT_SIZE; + p += pos; + bcopy(&p_event->data, p, MAX_EVENT_SIZE); + } + /* copy rest of csi data */ + else { + pos = dump_len - (dump_len % MAX_EVENT_SIZE); + p += pos; + bcopy(&p_event->data, p, (dump_len % MAX_EVENT_SIZE)); + } + } + return BCME_OK; + } + } + } + } + if (is_new) { + if (dhd->csi_count < MAX_CSI_NUM) { + new = (cfr_dump_list_t *)MALLOCZ(dhd->osh, sizeof(cfr_dump_list_t)); + if (!new){ + DHD_ERROR(("Malloc cfr dump list error\n")); + return BCME_NOMEM; + } + bcopy(&p_event->header, &new->entry.header, sizeof(cfr_dump_header_t)); + DHD_INFO(("New entry data size %d\n", p_event->header.cfr_dump_length)); + /* for big csi data */ + if (p_event->header.remain_length) { + DHD_TRACE(("remain %d\n", p_event->header.remain_length)); + bcopy(&p_event->data, &new->entry.data, MAX_EVENT_SIZE); + } + else + bcopy(&p_event->data, &new->entry.data, p_event->header.cfr_dump_length); + INIT_LIST_HEAD(&(new->list)); + list_add_tail(&(new->list), &dhd->csi_list); + dhd->csi_count++; + } + else { + DHD_TRACE(("Over maximum CSI Number 8. SKIP it.\n")); + } + } + return ret; +} + +int +dhd_csi_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + INIT_LIST_HEAD(&dhd->csi_list); + dhd->csi_count = 0; + + return err; +} + +int +dhd_csi_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + cfr_dump_list_t *ptr, *next; + + NULL_CHECK(dhd, "dhd is NULL", err); + + if (!list_empty(&dhd->csi_list)) { + list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) { + list_del(&ptr->list); + MFREE(dhd->osh, ptr, sizeof(cfr_dump_list_t)); + } + } + return err; +} + +void +dhd_csi_clean_list(dhd_pub_t *dhd) +{ + cfr_dump_list_t *ptr, *next; + int num = 0; + + if (!dhd) { + DHD_ERROR(("NULL POINTER: %s\n", __FUNCTION__)); + return; + } + + if (!list_empty(&dhd->csi_list)) { + list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) { + if (0 == ptr->entry.header.remain_length) { + list_del(&ptr->list); + num++; + MFREE(dhd->osh, ptr, sizeof(cfr_dump_list_t)); + } + } + } + dhd->csi_count = 0; + DHD_TRACE(("Clean up %d record\n", num)); +} + +int +dhd_csi_dump_list(dhd_pub_t *dhd, char *buf) +{ + int ret = BCME_OK; + cfr_dump_list_t *ptr, *next; + uint8 * pbuf = buf; + int num = 0; + int length = 0; + + NULL_CHECK(dhd, "dhd is NULL", ret); + + /* check if this addr exist */ + if (!list_empty(&dhd->csi_list)) { + list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) { + if (ptr->entry.header.remain_length) { + DHD_ERROR(("data not ready %d\n", ptr->entry.header.remain_length)); + continue; + } + bcopy(&ptr->entry.header, pbuf, sizeof(cfr_dump_header_t)); + length += sizeof(cfr_dump_header_t); + pbuf += sizeof(cfr_dump_header_t); + DHD_TRACE(("Copy data size %d\n", ptr->entry.header.cfr_dump_length)); + bcopy(&ptr->entry.data, pbuf, ptr->entry.header.cfr_dump_length); + length += ptr->entry.header.cfr_dump_length; + pbuf += ptr->entry.header.cfr_dump_length; + num++; + } + } + DHD_TRACE(("dump %d record %d bytes\n", num, length)); + + return length; +} diff --git a/bcmdhd.101.10.361.x/dhd_csi.h b/bcmdhd.101.10.361.x/dhd_csi.h new file mode 100755 index 0000000..e1c0aaa --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_csi.h @@ -0,0 +1,76 @@ +/* + * Broadcom Dongle Host Driver (DHD), CSI + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_csi.h 558438 2015-05-22 06:05:11Z $ + */ +#ifndef __DHD_CSI_H__ +#define __DHD_CSI_H__ + +/* Maxinum csi file dump size */ +#define MAX_CSI_FILESZ (32 * 1024) +/* Maxinum subcarrier number */ +#define MAXINUM_CFR_DATA 256 * 4 +#define CSI_DUMP_PATH "/sys/bcm-dhd/csi" +#define MAX_EVENT_SIZE 1400 +/* maximun csi number stored at dhd */ +#define MAX_CSI_NUM 8 + +typedef struct cfr_dump_header { + /* 0 - successful; 1 - Failed */ + uint8 status; + /* Peer MAC address */ + uint8 peer_macaddr[6]; + /* Number of Space Time Streams */ + uint8 sts; + /* Number of RX chain */ + uint8 num_rx; + /* Number of subcarrier */ + uint16 num_carrier; + /* Length of the CSI dump */ + uint32 cfr_dump_length; + /* remain unsend CSI data length */ + uint32 remain_length; + /* RSSI */ + int8 rssi; +} __attribute__((packed)) cfr_dump_header_t; + +typedef struct cfr_dump_data { + cfr_dump_header_t header; + uint32 data[MAXINUM_CFR_DATA]; +} cfr_dump_data_t; + +typedef struct { + struct list_head list; + cfr_dump_data_t entry; +} cfr_dump_list_t; + +int dhd_csi_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); + +int dhd_csi_init(dhd_pub_t *dhd); + +int dhd_csi_deinit(dhd_pub_t *dhd); + +void dhd_csi_clean_list(dhd_pub_t *dhd); + +int dhd_csi_dump_list(dhd_pub_t *dhd, char *buf); +#endif /* __DHD_CSI_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_cis.c b/bcmdhd.101.10.361.x/dhd_custom_cis.c new file mode 100755 index 0000000..7105a9e --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_cis.c @@ -0,0 +1,2010 @@ +/* + * Process CIS information from OTP for customer platform + * (Handle the MAC address and module information) + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef DHD_USE_CISINFO_FROM_OTP +#include /* need to still support chips no longer in trunk firmware */ +#include +#include +#include +#endif /* DHD_USE_CISINFO_FROM_OTP */ + +#ifdef DHD_USE_CISINFO_FROM_OTP +#define CIS_TUPLE_HDR_LEN 2 +#if defined(BCM4375_CHIP) +#define CIS_TUPLE_START_ADDRESS 0x18011120 +#define CIS_TUPLE_END_ADDRESS 0x18011177 +#elif defined(BCM4389_CHIP_DEF) +#define CIS_TUPLE_START_ADDRESS 0x18011058 +#define CIS_TUPLE_END_ADDRESS 0x180110AF +#else +#define CIS_TUPLE_START_ADDRESS 0x18011110 +#define CIS_TUPLE_END_ADDRESS 0x18011167 +#endif /* defined(BCM4375_CHIP) */ +#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\ + + 1) / sizeof(uint32)) +#define CIS_TUPLE_TAG_START 0x80 +#define CIS_TUPLE_TAG_VENDOR 0x81 +#define CIS_TUPLE_TAG_BOARDTYPE 0x1b +#define CIS_TUPLE_TAG_LENGTH 1 + +typedef struct cis_tuple_format { + uint8 id; + uint8 len; /* total length of tag and data */ + uint8 tag; + uint8 data[1]; +} cis_tuple_format_t; + +static int +read_otp_from_bp(dhd_bus_t *bus, uint32 *data_buf) +{ + int int_val = 0, i = 0, bp_idx = 0; + int boardtype_backplane_addr[] = { + 0x18010324, /* OTP Control 1 */ + 0x18012618, /* PMU min resource mask */ + }; + int boardtype_backplane_data[] = { + 0x00fa0000, + 0x0e4fffff /* Keep on ARMHTAVAIL */ + }; + + uint32 cis_start_addr = CIS_TUPLE_START_ADDRESS; + uint32 org_boardtype_backplane_data[] = { + 0, + 0 + }; + + for (bp_idx = 0; bp_idx < ARRAYSIZE(boardtype_backplane_addr); bp_idx++) { + /* Read OTP Control 1 and PMU min_rsrc_mask before writing */ + if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int), + &org_boardtype_backplane_data[bp_idx], TRUE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + /* Write new OTP and PMU configuration */ + if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int), + &boardtype_backplane_data[bp_idx], FALSE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int), + &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n", + __FUNCTION__, boardtype_backplane_addr[bp_idx], int_val)); + } + + /* read tuple raw data */ + for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) { + if (si_backplane_access(bus->sih, cis_start_addr + i * sizeof(uint32), + sizeof(uint32), &data_buf[i], TRUE) != BCME_OK) { + break; + } + DHD_INFO(("%s: tuple index %d, raw data 0x%08x\n", __FUNCTION__, i, data_buf[i])); + } + + for (bp_idx = 0; bp_idx < ARRAYSIZE(boardtype_backplane_addr); bp_idx++) { + /* Write original OTP and PMU configuration */ + if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int), + &org_boardtype_backplane_data[bp_idx], FALSE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int), + &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n", + __FUNCTION__, boardtype_backplane_addr[bp_idx], int_val)); + } + + return i * sizeof(uint32); +} + +static int +dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype, + unsigned char *vid, int *vid_length) +{ + int totlen, len; + uint32 raw_data[CIS_TUPLE_MAX_COUNT]; + cis_tuple_format_t *tuple; + + totlen = read_otp_from_bp(bus, raw_data); + if (totlen == BCME_ERROR || totlen == 0) { + DHD_ERROR(("%s : Can't read the OTP\n", __FUNCTION__)); + return BCME_ERROR; + } + + tuple = (cis_tuple_format_t *)raw_data; + + /* check the first tuple has tag 'start' */ + if (tuple->id != CIS_TUPLE_TAG_START) { + DHD_ERROR(("%s: Can not find the TAG\n", __FUNCTION__)); + return BCME_ERROR; + } + + *vid_length = *boardtype = 0; + + /* find tagged parameter */ + while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) && + (*vid_length == 0 || *boardtype == 0)) { + len = tuple->len; + + if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) && + (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { + /* found VID */ + memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); + *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH; + prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); + } + else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) && + (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { + /* found boardtype */ + *boardtype = (int)tuple->data[0]; + prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); + } + + tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN)); + totlen -= (len + CIS_TUPLE_HDR_LEN); + } + + if (*vid_length <= 0 || *boardtype <= 0) { + DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n", + *vid_length, *boardtype)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#ifdef USE_CID_CHECK +#define CHIP_REV_A0 1 +#define CHIP_REV_A1 2 +#define CHIP_REV_B0 3 +#define CHIP_REV_B1 4 +#define CHIP_REV_B2 5 +#define CHIP_REV_C0 6 +#define BOARD_TYPE_EPA 0x080f +#define BOARD_TYPE_IPA 0x0827 +#define BOARD_TYPE_IPA_OLD 0x081a +#define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA" +#define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA" +#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1" +#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0" + +naming_info_t bcm4361_naming_table[] = { + { {""}, {""}, {""} }, + { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} }, + { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} }, + { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} }, + { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} }, + { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} }, + { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} }, + { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} }, + { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} }, + { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} }, + { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} }, + { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} }, + { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} }, + { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} }, + { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} }, + { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} }, + { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} }, + { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} }, + { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} }, + { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} }, + { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} }, + { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} }, + { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, + { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */ + { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} }, + { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} }, + { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} }, + { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} }, + { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} }, + { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} }, + { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} }, + { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} }, + { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} } +}; + +naming_info_t bcm4375_naming_table[] = { + { {""}, {""}, {""} }, + { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} }, + { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} }, + { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} }, + { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} }, + { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} }, + { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} }, + { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} }, + { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} }, + { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} }, + { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} }, + { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} }, + { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} }, + { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} }, + { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} }, + { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} }, + { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} }, + { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} }, + { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} }, + { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} } +}; + +naming_info_t bcm4389_naming_table[] = { + { {""}, {""}, {""} }, + { {"e53_es23"}, {"_ES10_semco_b0"}, {"_b0"} }, + { {"e53_es24"}, {"_ES20_semco_b0"}, {"_b0"} }, + { {"e53_es25"}, {"_ES21_semco_b0"}, {"_b0"} }, + { {"e53_es31"}, {"_ES30_semco_c0"}, {"_c0"} }, + { {"e53_es32"}, {"_ES32_semco_c0"}, {"_c0"} }, + { {"e53_es40"}, {"_ES40_semco_c1"}, {"_c1"} }, + { {"1wk_es21"}, {"_1wk_es21_b0"}, {"_b0"} }, + { {"1wk_es30"}, {"_1wk_es30_b0"}, {"_b0"} }, + { {"1wk_es31"}, {"_1wk_es31_b0"}, {"_b0"} }, + { {"1wk_es32"}, {"_1wk_es32_b0"}, {"_b0"} }, + { {"1wk_es40"}, {"_1wk_es40_c0"}, {"_c0"} }, + { {"1wk_es41"}, {"_1wk_es41_c0"}, {"_c0"} }, + { {"1wk_es42"}, {"_1wk_es42_c0"}, {"_c0"} }, + { {"1wk_es43"}, {"_1wk_es43_c0"}, {"_c0"} }, + { {"1wk_es50"}, {"_1wk_es50_c1"}, {"_c1"} } +}; + +/* select the NVRAM/FW tag naming table */ +naming_info_t * +select_naming_table(dhd_pub_t *dhdp, int *table_size) +{ + naming_info_t * info = NULL; + + if (!dhdp || !dhdp->bus || !dhdp->bus->sih) + { + DHD_ERROR(("%s : Invalid pointer \n", __FUNCTION__)); + return info; + } + + switch (si_chipid(dhdp->bus->sih)) { + case BCM4361_CHIP_ID: + case BCM4347_CHIP_ID: + info = &bcm4361_naming_table[0]; + *table_size = ARRAYSIZE(bcm4361_naming_table); + DHD_INFO(("%s: info %p, ret %d\n", __FUNCTION__, info, *table_size)); + break; + case BCM4375_CHIP_ID: + info = &bcm4375_naming_table[0]; + *table_size = ARRAYSIZE(bcm4375_naming_table); + DHD_INFO(("%s: info %p, ret %d\n", __FUNCTION__, info, *table_size)); + break; + case BCM4389_CHIP_ID: + info = &bcm4389_naming_table[0]; + *table_size = ARRAYSIZE(bcm4389_naming_table); + DHD_INFO(("%s: info %p, ret %d\n", __FUNCTION__, info, *table_size)); + break; + default: + DHD_ERROR(("%s: No MODULE NAMING TABLE found\n", __FUNCTION__)); + break; + } + + return info; +} + +#define CID_FEM_MURATA "_mur_" +naming_info_t * +dhd_find_naming_info(dhd_pub_t *dhdp, char *module_type) +{ + int i = 0; + naming_info_t *info = NULL; + int table_size = 0; + + info = select_naming_table(dhdp, &table_size); + if (!info || !table_size) { + DHD_ERROR(("%s : Can't select the naming table\n", __FUNCTION__)); + return NULL; + } + + if (module_type && strlen(module_type) > 0) { + for (i = 1, info++; i < table_size; info++, i++) { + DHD_INFO(("%s : info %p, %d, info->cid_ext : %s\n", + __FUNCTION__, info, i, info->cid_ext)); + if (!strncmp(info->cid_ext, module_type, strlen(info->cid_ext))) { + break; + } + } + } + + return info; +} + +static naming_info_t * +dhd_find_naming_info_by_cid(dhd_pub_t *dhdp, char *cid_info) +{ + int i = 0; + char *ptr; + naming_info_t *info = NULL; + int table_size = 0; + + info = select_naming_table(dhdp, &table_size); + if (!info || !table_size) { + DHD_ERROR(("%s : Can't select the naming table\n", __FUNCTION__)); + return NULL; + } + + /* truncate extension */ + for (i = 1, ptr = cid_info; i < MODULE_NAME_INDEX_MAX && ptr; i++) { + ptr = bcmstrstr(ptr, "_"); + if (ptr) { + ptr++; + } + } + + for (i = 1, info++; i < table_size && ptr; info++, i++) { + DHD_INFO(("%s : info %p, %d, info->cid_ext : %s\n", + __FUNCTION__, info, i, info->cid_ext)); + if (!strncmp(info->cid_ext, ptr, strlen(info->cid_ext))) { + break; + } + } + + return info; +} + +naming_info_t * +dhd_find_naming_info_by_chip_rev(dhd_pub_t *dhdp, bool *is_murata_fem) +{ + int board_type = 0, chip_rev = 0, vid_length = 0; + unsigned char vid[MAX_VID_LEN]; + naming_info_t *info = NULL; + char *cid_info = NULL; + dhd_bus_t *bus = NULL; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL \n", __FUNCTION__)); + return NULL; + } + + bus = dhdp->bus; + + if (!bus || !bus->sih) { + DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus)); + return NULL; + } + + chip_rev = bus->sih->chiprev; + + if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length) + != BCME_OK) { + DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__)); + return NULL; + } + + DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev)); + +#ifdef BCM4361_CHIP + /* A0 chipset has exception only */ + if (chip_rev == CHIP_REV_A0) { + if (board_type == BOARD_TYPE_EPA) { + info = dhd_find_naming_info(dhdp, DEFAULT_CIDINFO_FOR_EPA); + } else if ((board_type == BOARD_TYPE_IPA) || + (board_type == BOARD_TYPE_IPA_OLD)) { + info = dhd_find_naming_info(dhdp, DEFAULT_CIDINFO_FOR_IPA); + } + } else +#endif /* BCM4361_CHIP */ + { + cid_info = dhd_get_cid_info(vid, vid_length); + if (cid_info) { + info = dhd_find_naming_info_by_cid(dhdp, cid_info); + if (strstr(cid_info, CID_FEM_MURATA)) { + *is_murata_fem = TRUE; + } + } + } + + return info; +} +#endif /* USE_CID_CHECK */ +#ifdef USE_DIRECT_VID_TAG +static int +concate_nvram_by_vid(dhd_pub_t *dhdp, char *nv_path, char *chipstr) +{ + unsigned char vid[MAX_VID_LEN]; + unsigned char vid2str[MAX_VID_LEN]; + + memset(vid, 0, sizeof(vid)); + memset(vid2str, 0, sizeof(vid2str)); + + if (dhd_check_stored_module_info(vid) == BCME_OK) { + /* concate chip string tag */ + strncat(nv_path, chipstr, strlen(nv_path)); + /* concate nvram tag */ + snprintf(vid2str, sizeof(vid2str), "_%x%x", vid[VENDOR_OFF], vid[MD_REV_OFF]); + strncat(nv_path, vid2str, strlen(nv_path)); + DHD_ERROR(("%s: nvram_path : %s\n", __FUNCTION__, nv_path)); + } else { + int board_type = 0, vid_length = 0; + dhd_bus_t *bus = NULL; + if (!dhdp) { + + DHD_ERROR(("%s : dhdp is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + bus = dhdp->bus; + if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length) + != BCME_OK) { + DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__)); + return BCME_ERROR; + } else { + /* concate chip string tag */ + strncat(nv_path, chipstr, strlen(nv_path)); + /* vid from CIS - vid[1] = vendor, vid[0] - module rev. */ + snprintf(vid2str, sizeof(vid2str), "_%x%x", + vid[VENDOR_OFF], vid[MD_REV_OFF]); + /* concate nvram tag */ + strncat(nv_path, vid2str, strlen(nv_path)); + DHD_ERROR(("%s: nvram_path : %s\n", __FUNCTION__, nv_path)); + } + } + return BCME_OK; +} +#endif /* USE_DIRECT_VID_TAG */ +#endif /* DHD_USE_CISINFO_FROM_OTP */ + +#ifdef DHD_USE_CISINFO + +/* File Location to keep each information */ +#ifdef OEM_ANDROID +#define MACINFO PLATFORM_PATH".mac.info" +#define CIDINFO PLATFORM_PATH".cid.info" +#ifdef PLATFORM_SLP +#define MACINFO_EFS "/csa/.mac.info" +#else +#define MACINFO_EFS "/efs/wifi/.mac.info" +#define CIDINFO_DATA "/data/.cid.info" +#endif /* PLATFORM_SLP */ +#else +#define MACINFO "/opt/.mac.info" +#define MACINFO_EFS "/opt/.efs.mac.info" +#define CIDINFO "/opt/.cid.info" +#endif /* OEM_ANDROID */ + +/* Definitions for MAC address */ +#define MAC_BUF_SIZE 20 +#define MAC_CUSTOM_FORMAT "%02X:%02X:%02X:%02X:%02X:%02X" + +/* Definitions for CIS information */ +#if defined(BCM4359_CHIP) || defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || \ + defined(BCM4389_CHIP_DEF) +#define CIS_BUF_SIZE 1280 +#else +#define CIS_BUF_SIZE 512 +#endif /* BCM4359_CHIP */ + +#define DUMP_CIS_SIZE 48 + +#define CIS_TUPLE_TAG_START 0x80 +#define CIS_TUPLE_TAG_VENDOR 0x81 +#define CIS_TUPLE_TAG_MACADDR 0x19 +#define CIS_TUPLE_TAG_BOARDTYPE 0x1b +#define CIS_TUPLE_LEN_MACADDR 7 +#define CIS_DUMP_END 0xff +#define CIS_TUPLE_NULL 0X00 + +#ifdef CONFIG_BCMDHD_PCIE +#if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) +#define OTP_OFFSET 208 +#elif defined(BCM4389_CHIP_DEF) +#define OTP_OFFSET 0 +#else +#define OTP_OFFSET 128 +#endif /* BCM4361 | BCM4375 = 208, BCM4389 = 0, Others = 128 */ +#else /* CONFIG_BCMDHD_PCIE */ +#define OTP_OFFSET 12 /* SDIO */ +#endif /* CONFIG_BCMDHD_PCIE */ + +unsigned char *g_cis_buf = NULL; + +/* Definitions for common interface */ +typedef struct tuple_entry { + struct list_head list; /* head of the list */ + uint32 cis_idx; /* index of each tuples */ +} tuple_entry_t; + +extern int _dhd_set_mac_address(struct dhd_info *dhd, int ifidx, struct ether_addr *addr); +#if defined(GET_MAC_FROM_OTP) || defined(USE_CID_CHECK) +static tuple_entry_t *dhd_alloc_tuple_entry(dhd_pub_t *dhdp, const int idx); +static void dhd_free_tuple_entry(dhd_pub_t *dhdp, struct list_head *head); +static int dhd_find_tuple_list_from_otp(dhd_pub_t *dhdp, int req_tup, + unsigned char* req_tup_len, struct list_head *head); +#endif /* GET_MAC_FROM_OTP || USE_CID_CHECK */ + +/* otp region read/write information */ +typedef struct otp_rgn_rw_info { + uint8 rgnid; + uint8 preview; + uint8 integrity_chk; + uint16 rgnsize; + uint16 datasize; + uint8 *data; +} otp_rgn_rw_info_t; + +/* otp region status information */ +typedef struct otp_rgn_stat_info { + uint8 rgnid; + uint16 rgnstart; + uint16 rgnsize; +} otp_rgn_stat_info_t; + +typedef int (pack_handler_t)(void *ctx, uint8 *buf, uint16 *buflen); + +/* Common Interface Functions */ +int +dhd_alloc_cis(dhd_pub_t *dhdp) +{ + if (g_cis_buf == NULL) { + g_cis_buf = MALLOCZ(dhdp->osh, CIS_BUF_SIZE); + if (g_cis_buf == NULL) { + DHD_ERROR(("%s: Failed to alloc buffer for CIS\n", __FUNCTION__)); + return BCME_NOMEM; + } else { + DHD_ERROR(("%s: Local CIS buffer is alloced\n", __FUNCTION__)); + memset(g_cis_buf, 0, CIS_BUF_SIZE); + } + } + return BCME_OK; +} + +void +dhd_clear_cis(dhd_pub_t *dhdp) +{ + if (g_cis_buf) { + MFREE(dhdp->osh, g_cis_buf, CIS_BUF_SIZE); + g_cis_buf = NULL; + DHD_ERROR(("%s: Local CIS buffer is freed\n", __FUNCTION__)); + } +} + +#ifdef DHD_READ_CIS_FROM_BP +int +dhd_read_cis(dhd_pub_t *dhdp) +{ + int ret = 0, totlen = 0; + uint32 raw_data[CIS_TUPLE_MAX_COUNT]; + + int cis_offset = OTP_OFFSET + sizeof(cis_rw_t); +#if defined(BCM4389_CHIP_DEF) + /* override OTP_OFFSET for 4389 */ + cis_offset = OTP_OFFSET; +#endif /* BCM4389_CHIP_DEF */ + + totlen = read_otp_from_bp(dhdp->bus, raw_data); + if (totlen == BCME_ERROR || totlen == 0) { + DHD_ERROR(("%s : Can't read the OTP\n", __FUNCTION__)); + return BCME_ERROR; + } + + (void)memcpy_s(g_cis_buf + cis_offset, CIS_BUF_SIZE, raw_data, totlen); + return ret; +} +#else +int +dhd_read_cis(dhd_pub_t *dhdp) +{ + int ret = 0; + cis_rw_t *cish; + int buf_size = CIS_BUF_SIZE; + int length = strlen("cisdump"); + + if (length >= buf_size) { + DHD_ERROR(("%s: check CIS_BUF_SIZE\n", __FUNCTION__)); + return BCME_BADLEN; + } + + /* Try reading out from CIS */ + cish = (cis_rw_t *)(g_cis_buf + 8); + cish->source = 0; + cish->byteoff = 0; + cish->nbytes = buf_size; + strlcpy(g_cis_buf, "cisdump", buf_size); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, g_cis_buf, buf_size, 0, 0); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s: get cisdump, UNSUPPORTED\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s : get cisdump err(%d)\n", + __FUNCTION__, ret)); + } + /* free local buf */ + dhd_clear_cis(dhdp); + } + + return ret; +} +#endif /* DHD_READ_CIS_FROM_BP */ + +static int +dhd_otp_process_iov_resp_buf(void *ctx, void *iov_resp, uint16 cmd_id, + bcm_xtlv_unpack_cbfn_t cbfn) +{ + bcm_iov_buf_t *p_resp = NULL; + int ret = BCME_OK; + uint16 version; + + /* check for version */ + version = dtoh16(*(uint16 *)iov_resp); + if (version != WL_OTP_IOV_VERSION) { + return BCME_VERSION; + } + + p_resp = (bcm_iov_buf_t *)iov_resp; + if ((p_resp->id == cmd_id) && (cbfn != NULL)) { + ret = bcm_unpack_xtlv_buf(ctx, (uint8 *)p_resp->data, p_resp->len, + BCM_XTLV_OPTION_ALIGN32, cbfn); + } + + return ret; +} + +static int +dhd_otp_get_iov_resp(dhd_pub_t *dhdp, const uint16 cmd_id, void *ctx, + pack_handler_t packfn, bcm_xtlv_unpack_cbfn_t cbfn) +{ + bcm_iov_buf_t *iov_buf = NULL; + uint8 *iov_resp = NULL; + int ret = BCME_OK; + int buf_size = CIS_BUF_SIZE; + uint16 iovlen = 0, buflen = 0, buflen_start = 0; + + /* allocate input buffer */ + iov_buf = MALLOCZ(dhdp->osh, WLC_IOCTL_SMLEN); + if (iov_buf == NULL) { + DHD_ERROR(("%s: Failed to alloc buffer for iovar input\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto fail; + } + + iov_resp = MALLOCZ(dhdp->osh, WLC_IOCTL_MAXLEN); + if (iov_resp == NULL) { + DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto fail; + } + + /* parse and pack config parameters */ + buflen = buflen_start = (WLC_IOCTL_SMLEN - sizeof(*iov_buf)); + ret = packfn(ctx, (uint8 *)&iov_buf->data[0], &buflen); + if (ret != BCME_OK) { + goto fail; + } + + /* fill header portion */ + iov_buf->version = WL_OTP_IOV_VERSION; + iov_buf->len = (buflen_start - buflen); + iov_buf->id = cmd_id; + + /* issue get iovar and process response */ + iovlen = sizeof(*iov_buf) + iov_buf->len; + ret = dhd_iovar(dhdp, 0, "otp", (char *)iov_buf, iovlen, + iov_resp, WLC_IOCTL_MAXLEN, FALSE); + if (ret == BCME_OK) { + ret = dhd_otp_process_iov_resp_buf(ctx, iov_resp, cmd_id, cbfn); + } else { + DHD_ERROR(("%s: Failed to get otp iovar\n", __FUNCTION__)); + } + +fail: + if (iov_buf) { + MFREE(dhdp->osh, iov_buf, WLC_IOCTL_SMLEN); + } + if (iov_resp) { + MFREE(dhdp->osh, iov_resp, buf_size); + } + if (ret < 0) { + /* free local buf */ + dhd_clear_cis(dhdp); + } + return ret; +} + +static int +dhd_otp_cbfn_rgnstatus(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + otp_rgn_stat_info_t *stat_info = (otp_rgn_stat_info_t *)ctx; + + BCM_REFERENCE(len); + + if (data == NULL) { + DHD_ERROR(("%s: bad argument !!!\n", __FUNCTION__)); + return BCME_BADARG; + } + + switch (type) { + case WL_OTP_XTLV_RGN: + stat_info->rgnid = *data; + break; + case WL_OTP_XTLV_ADDR: + stat_info->rgnstart = dtoh16((uint16)*data); + break; + case WL_OTP_XTLV_SIZE: + stat_info->rgnsize = dtoh16((uint16)*data); + break; + default: + DHD_ERROR(("%s: unknown tlv %u\n", __FUNCTION__, type)); + break; + } + + return BCME_OK; +} + +static int +dhd_otp_packfn_rgnstatus(void *ctx, uint8 *buf, uint16 *buflen) +{ + uint8 *pxtlv = buf; + int ret = BCME_OK; + uint16 len = *buflen; + uint8 rgnid = OTP_RGN_SW; + + BCM_REFERENCE(ctx); + + /* pack option <-r region> */ + ret = bcm_pack_xtlv_entry(&pxtlv, &len, WL_OTP_XTLV_RGN, sizeof(rgnid), + &rgnid, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed pack xtlv entry of region: %d\n", __FUNCTION__, ret)); + return ret; + } + + *buflen = len; + return ret; +} + +static int +dhd_otp_packfn_rgndump(void *ctx, uint8 *buf, uint16 *buflen) +{ + uint8 *pxtlv = buf; + int ret = BCME_OK; + uint16 len = *buflen, size = WLC_IOCTL_MAXLEN; + uint8 rgnid = OTP_RGN_SW; + + /* pack option <-r region> */ + ret = bcm_pack_xtlv_entry(&pxtlv, &len, WL_OTP_XTLV_RGN, + sizeof(rgnid), &rgnid, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed pack xtlv entry of region: %d\n", __FUNCTION__, ret)); + goto fail; + } + + /* pack option [-s size] */ + ret = bcm_pack_xtlv_entry(&pxtlv, &len, WL_OTP_XTLV_SIZE, + sizeof(size), (uint8 *)&size, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed pack xtlv entry of size: %d\n", __FUNCTION__, ret)); + goto fail; + } + *buflen = len; +fail: + return ret; +} + +static int +dhd_otp_cbfn_rgndump(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + otp_rgn_rw_info_t *rw_info = (otp_rgn_rw_info_t *)ctx; + + BCM_REFERENCE(len); + + if (data == NULL) { + DHD_ERROR(("%s: bad argument !!!\n", __FUNCTION__)); + return BCME_BADARG; + } + + switch (type) { + case WL_OTP_XTLV_RGN: + rw_info->rgnid = *data; + break; + case WL_OTP_XTLV_DATA: + /* + * intentionally ignoring the return value of memcpy_s as it is just + * a variable copy and because of this size is within the bounds + */ + (void)memcpy_s(&rw_info->data, sizeof(rw_info->data), + &data, sizeof(rw_info->data)); + rw_info->datasize = len; + break; + default: + DHD_ERROR(("%s: unknown tlv %u\n", __FUNCTION__, type)); + break; + } + return BCME_OK; +} + +int +dhd_read_otp_sw_rgn(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + otp_rgn_rw_info_t rw_info; + otp_rgn_stat_info_t stat_info; + + memset(&rw_info, 0, sizeof(rw_info)); + memset(&stat_info, 0, sizeof(stat_info)); + + ret = dhd_otp_get_iov_resp(dhdp, WL_OTP_CMD_RGNSTATUS, &stat_info, + dhd_otp_packfn_rgnstatus, dhd_otp_cbfn_rgnstatus); + if (ret != BCME_OK) { + DHD_ERROR(("%s: otp region status failed, ret=%d\n", __FUNCTION__, ret)); + goto fail; + } + + rw_info.rgnsize = stat_info.rgnsize; + ret = dhd_otp_get_iov_resp(dhdp, WL_OTP_CMD_RGNDUMP, &rw_info, + dhd_otp_packfn_rgndump, dhd_otp_cbfn_rgndump); + if (ret != BCME_OK) { + DHD_ERROR(("%s: otp region dump failed, ret=%d\n", __FUNCTION__, ret)); + goto fail; + } + + ret = memcpy_s(g_cis_buf, CIS_BUF_SIZE, rw_info.data, rw_info.datasize); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed to copy otp dump, ret=%d\n", __FUNCTION__, ret)); + } +fail: + return ret; + +} + +#if defined(GET_MAC_FROM_OTP) || defined(USE_CID_CHECK) +static tuple_entry_t* +dhd_alloc_tuple_entry(dhd_pub_t *dhdp, const int idx) +{ + tuple_entry_t *entry; + + entry = MALLOCZ(dhdp->osh, sizeof(tuple_entry_t)); + if (!entry) { + DHD_ERROR(("%s: failed to alloc entry\n", __FUNCTION__)); + return NULL; + } + + entry->cis_idx = idx; + + return entry; +} + +static void +dhd_free_tuple_entry(dhd_pub_t *dhdp, struct list_head *head) +{ + tuple_entry_t *entry; + + while (!list_empty(head)) { + entry = list_entry(head->next, tuple_entry_t, list); + list_del(&entry->list); + + MFREE(dhdp->osh, entry, sizeof(tuple_entry_t)); + } +} + +static int +dhd_find_tuple_list_from_otp(dhd_pub_t *dhdp, int req_tup, + unsigned char* req_tup_len, struct list_head *head) +{ + int idx = OTP_OFFSET + sizeof(cis_rw_t); + int tup, tup_len = 0; + int buf_len = CIS_BUF_SIZE; + int found = 0; + +#if defined(BCM4389_CHIP_DEF) + /* override OTP_OFFEST for 4389 */ + idx = OTP_OFFSET; +#endif /* BCM4389_CHIP_DEF */ + + if (!g_cis_buf) { + DHD_ERROR(("%s: Couldn't find cis info from" + " local buffer\n", __FUNCTION__)); + return BCME_ERROR; + } + + do { + tup = g_cis_buf[idx++]; + if (tup == CIS_TUPLE_NULL || tup == CIS_DUMP_END) { + tup_len = 0; + } else { + tup_len = g_cis_buf[idx++]; + if ((idx + tup_len) > buf_len) { + return BCME_ERROR; + } + + if (tup == CIS_TUPLE_TAG_START && + tup_len != CIS_TUPLE_NULL && + g_cis_buf[idx] == req_tup) { + idx++; + if (head) { + tuple_entry_t *entry; + entry = dhd_alloc_tuple_entry(dhdp, idx); + if (entry) { + list_add_tail(&entry->list, head); + found++; + } + } + if (found == 1 && req_tup_len) { + *req_tup_len = tup_len; + } + tup_len--; + } + } + idx += tup_len; + } while (tup != CIS_DUMP_END && (idx < buf_len)); + + return (found > 0) ? found : BCME_ERROR; +} +#endif /* GET_MAC_FROM_OTP || USE_CID_CHECK */ + +#ifdef DUMP_CIS +static void +dhd_dump_cis_buf(dhd_pub_t *dhdp, int size) +{ + int i; + int cis_offset = 0; + + cis_offset = OTP_OFFSET + sizeof(cis_rw_t); +#if defined(BCM4389_CHIP_DEF) + /* override OTP_OFFEST for 4389 */ + cis_offset = OTP_OFFSET; +#endif /* BCM4389_CHIP_DEF */ + + if (size <= 0) { + return; + } + + if (size > CIS_BUF_SIZE) { + size = CIS_BUF_SIZE; + } + + DHD_ERROR(("========== START CIS DUMP ==========\n")); + for (i = 0; i < size; i++) { + DHD_ERROR(("%02X ", g_cis_buf[i + cis_offset])); + if ((i % 16) == 15) { + DHD_ERROR(("\n")); + } + } + if ((i % 16) != 15) { + DHD_ERROR(("\n")); + } + DHD_ERROR(("========== END CIS DUMP ==========\n")); +} +#endif /* DUMP_CIS */ + +/* MAC address mangement functions */ +#ifdef READ_MACADDR +static void +dhd_create_random_mac(char *buf, unsigned int buf_len) +{ + char random_mac[3]; + + memset(random_mac, 0, sizeof(random_mac)); + get_random_bytes(random_mac, 3); + + snprintf(buf, buf_len, MAC_CUSTOM_FORMAT, 0x00, 0x12, 0x34, + (uint32)random_mac[0], (uint32)random_mac[1], (uint32)random_mac[2]); + + DHD_ERROR(("%s: The Random Generated MAC ID: %s\n", + __FUNCTION__, random_mac)); +} + +#ifndef DHD_MAC_ADDR_EXPORT +int +dhd_set_macaddr_from_file(dhd_pub_t *dhdp) +{ + char mac_buf[MAC_BUF_SIZE]; + char *filepath_efs = MACINFO_EFS; +#ifdef PLATFORM_SLP + char *filepath_mac = MACINFO; +#endif /* PLATFORM_SLP */ + int ret; + struct dhd_info *dhd; + struct ether_addr *mac; + char *invalid_mac = "00:00:00:00:00:00"; + + if (dhdp) { + dhd = dhdp->info; + mac = &dhdp->mac; + } else { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + memset(mac_buf, 0, sizeof(mac_buf)); + + /* Read MAC address from the specified file */ + ret = dhd_read_file(filepath_efs, mac_buf, sizeof(mac_buf) - 1); + + /* Check if the file does not exist or contains invalid data */ + if (ret || (!ret && strstr(mac_buf, invalid_mac))) { + /* Generate a new random MAC address */ + dhd_create_random_mac(mac_buf, sizeof(mac_buf)); + + /* Write random MAC address to the file */ + if (dhd_write_file(filepath_efs, mac_buf, strlen(mac_buf)) < 0) { + DHD_ERROR(("%s: MAC address [%s] Failed to write into File:" + " %s\n", __FUNCTION__, mac_buf, filepath_efs)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: MAC address [%s] written into File: %s\n", + __FUNCTION__, mac_buf, filepath_efs)); + } + } +#ifdef PLATFORM_SLP + /* Write random MAC address for framework */ + if (dhd_write_file(filepath_mac, mac_buf, strlen(mac_buf)) < 0) { + DHD_ERROR(("%s: MAC address [%c%c:xx:xx:xx:x%c:%c%c] Failed to write into File:" + " %s\n", __FUNCTION__, mac_buf[0], mac_buf[1], + mac_buf[13], mac_buf[15], mac_buf[16], filepath_mac)); + } else { + DHD_ERROR(("%s: MAC address [%c%c:xx:xx:xx:x%c:%c%c] written into File: %s\n", + __FUNCTION__, mac_buf[0], mac_buf[1], mac_buf[13], + mac_buf[15], mac_buf[16], filepath_mac)); + } +#endif /* PLATFORM_SLP */ + + mac_buf[sizeof(mac_buf) - 1] = '\0'; + + /* Write the MAC address to the Dongle */ + sscanf(mac_buf, MAC_CUSTOM_FORMAT, + (uint32 *)&(mac->octet[0]), (uint32 *)&(mac->octet[1]), + (uint32 *)&(mac->octet[2]), (uint32 *)&(mac->octet[3]), + (uint32 *)&(mac->octet[4]), (uint32 *)&(mac->octet[5])); + + if (_dhd_set_mac_address(dhd, 0, mac) == 0) { + DHD_INFO(("%s: MAC Address is overwritten\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + } + + return 0; +} +#else +int +dhd_set_macaddr_from_file(dhd_pub_t *dhdp) +{ + char mac_buf[MAC_BUF_SIZE]; + + struct dhd_info *dhd; + struct ether_addr *mac; + + if (dhdp) { + dhd = dhdp->info; + mac = &dhdp->mac; + } else { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + memset(mac_buf, 0, sizeof(mac_buf)); + if (ETHER_ISNULLADDR(&sysfs_mac_addr)) { + /* Generate a new random MAC address */ + dhd_create_random_mac(mac_buf, sizeof(mac_buf)); + if (!bcm_ether_atoe(mac_buf, &sysfs_mac_addr)) { + DHD_ERROR(("%s : mac parsing err\n", __FUNCTION__)); + return BCME_ERROR; + } + } + + /* Write the MAC address to the Dongle */ + memcpy(mac, &sysfs_mac_addr, sizeof(sysfs_mac_addr)); + + if (_dhd_set_mac_address(dhd, 0, mac) == 0) { + DHD_INFO(("%s: MAC Address is overwritten\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + } + + return 0; +} +#endif /* !DHD_MAC_ADDR_EXPORT */ +#endif /* READ_MACADDR */ + +#ifdef GET_MAC_FROM_OTP +static int +dhd_set_default_macaddr(dhd_pub_t *dhdp) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + struct ether_addr *mac; + int ret; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_BADARG; + } + + mac = &dhdp->mac; + + /* Read the default MAC address */ + ret = dhd_iovar(dhdp, 0, "cur_etheraddr", NULL, 0, iovbuf, sizeof(iovbuf), + FALSE); + if (ret < 0) { + DHD_ERROR(("%s: Can't get the default MAC address\n", __FUNCTION__)); + return BCME_NOTUP; + } + + /* Update the default MAC address */ + memcpy(mac, iovbuf, ETHER_ADDR_LEN); +#ifdef DHD_MAC_ADDR_EXPORT + memcpy(&sysfs_mac_addr, mac, sizeof(sysfs_mac_addr)); +#endif /* DHD_MAC_ADDR_EXPORT */ + + return 0; +} + +static int +dhd_verify_macaddr(dhd_pub_t *dhdp, struct list_head *head) +{ + tuple_entry_t *cur, *next; + int idx = -1; /* Invalid index */ + + list_for_each_entry(cur, head, list) { + list_for_each_entry(next, &cur->list, list) { + if ((unsigned long)next == (unsigned long)head) { + DHD_INFO(("%s: next ptr %p is same as head ptr %p\n", + __FUNCTION__, next, head)); + break; + } + if (!memcmp(&g_cis_buf[cur->cis_idx], + &g_cis_buf[next->cis_idx], ETHER_ADDR_LEN)) { + idx = cur->cis_idx; + break; + } + } + } + + return idx; +} + +int +dhd_check_module_mac(dhd_pub_t *dhdp) +{ +#ifndef DHD_MAC_ADDR_EXPORT + char *filepath_efs = MACINFO_EFS; +#endif /* !DHD_MAC_ADDR_EXPORT */ + unsigned char otp_mac_buf[MAC_BUF_SIZE]; + struct ether_addr *mac; + struct dhd_info *dhd; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_BADARG; + } + + dhd = dhdp->info; + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return BCME_BADARG; + } + +#if defined(DHD_READ_CIS_FROM_BP) && defined(READ_MACADDR) + /* + * For KOR Module, CID update is required only + * so, clearing and making g_cis_buf = NULL before processing it when read_cis from STA FW + * It will get MAC from sysfs && won't update sysfs mac + */ + if (dhd_bus_get_fw_mode(dhdp) == DHD_FLAG_STA_MODE) { + dhd_clear_cis(dhdp); + } +#endif /* DHD_READ_CIS_FROM_BP && READ_MACADDR */ + + mac = &dhdp->mac; + memset(otp_mac_buf, 0, sizeof(otp_mac_buf)); + + if (!g_cis_buf) { +#ifndef DHD_MAC_ADDR_EXPORT + char eabuf[ETHER_ADDR_STR_LEN]; + DHD_INFO(("%s: Couldn't read CIS information\n", __FUNCTION__)); + + /* Read the MAC address from the specified file */ + if (dhd_read_file(filepath_efs, otp_mac_buf, sizeof(otp_mac_buf) - 1) < 0) { + DHD_ERROR(("%s: Couldn't read the file, " + "use the default MAC Address\n", __FUNCTION__)); + if (dhd_set_default_macaddr(dhdp) < 0) { + return BCME_BADARG; + } + } else { + bzero((char *)eabuf, sizeof(eabuf)); + strlcpy(eabuf, otp_mac_buf, sizeof(eabuf)); + if (!bcm_ether_atoe(eabuf, mac)) { + DHD_ERROR(("%s : mac parsing err\n", __FUNCTION__)); + if (dhd_set_default_macaddr(dhdp) < 0) { + return BCME_BADARG; + } + } + } +#else + DHD_INFO(("%s: Couldn't read CIS information\n", __FUNCTION__)); + + /* Read the MAC address from the specified file */ + if (ETHER_ISNULLADDR(&sysfs_mac_addr)) { + DHD_ERROR(("%s: Couldn't read the file, " + "use the default MAC Address\n", __FUNCTION__)); + if (dhd_set_default_macaddr(dhdp) < 0) { + return BCME_BADARG; + } + } else { + /* sysfs mac addr is confirmed with valid format in set_mac_addr */ + memcpy(mac, &sysfs_mac_addr, sizeof(sysfs_mac_addr)); + } +#endif /* !DHD_MAC_ADDR_EXPORT */ + } else { + struct list_head mac_list; + unsigned char tuple_len = 0; + int found = 0; + int idx = -1; /* Invalid index */ + +#ifdef DUMP_CIS + dhd_dump_cis_buf(dhdp, DUMP_CIS_SIZE); +#endif /* DUMP_CIS */ + + /* Find a new tuple tag */ + INIT_LIST_HEAD(&mac_list); + found = dhd_find_tuple_list_from_otp(dhdp, CIS_TUPLE_TAG_MACADDR, + &tuple_len, &mac_list); + if ((found > 0) && tuple_len == CIS_TUPLE_LEN_MACADDR) { + if (found == 1) { + tuple_entry_t *cur = list_entry((&mac_list)->next, + tuple_entry_t, list); + idx = cur->cis_idx; + } else { + /* Find the start index of MAC address */ + idx = dhd_verify_macaddr(dhdp, &mac_list); + } + } + + /* Find the MAC address */ + if (idx > 0) { +#ifdef DHD_EXPORT_CNTL_FILE + /* + * WAR for incorrect otp mac address (including multicast bit) + * for SEMCo e53_es31 module + */ + if (strcmp(cidinfostr, "semco_sem_e53_es31") == 0) { + g_cis_buf[idx] &= 0xFE; + } +#endif /* DHD_EXPORT_CNTL_FILE */ + /* update MAC address */ + snprintf(otp_mac_buf, sizeof(otp_mac_buf), MAC_CUSTOM_FORMAT, + (uint32)g_cis_buf[idx], (uint32)g_cis_buf[idx + 1], + (uint32)g_cis_buf[idx + 2], (uint32)g_cis_buf[idx + 3], + (uint32)g_cis_buf[idx + 4], (uint32)g_cis_buf[idx + 5]); + DHD_ERROR(("%s: MAC address is taken from OTP: " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(&g_cis_buf[idx]))); + } else { + /* Not found MAC address info from the OTP, use the default value */ + if (dhd_set_default_macaddr(dhdp) < 0) { + dhd_free_tuple_entry(dhdp, &mac_list); + return BCME_BADARG; + } + snprintf(otp_mac_buf, sizeof(otp_mac_buf), MAC_CUSTOM_FORMAT, + (uint32)mac->octet[0], (uint32)mac->octet[1], + (uint32)mac->octet[2], (uint32)mac->octet[3], + (uint32)mac->octet[4], (uint32)mac->octet[5]); + DHD_ERROR(("%s: Cannot find MAC address info from OTP," + " Check module mac by initial value: " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(mac->octet))); + } + + dhd_free_tuple_entry(dhdp, &mac_list); +#ifndef DHD_MAC_ADDR_EXPORT + dhd_write_file(filepath_efs, otp_mac_buf, strlen(otp_mac_buf)); +#else + /* Export otp_mac_buf to the sys/mac_addr */ + if (!bcm_ether_atoe(otp_mac_buf, &sysfs_mac_addr)) { + DHD_ERROR(("%s : mac parsing err\n", __FUNCTION__)); + if (dhd_set_default_macaddr(dhdp) < 0) { + return BCME_BADARG; + } + } else { + DHD_INFO(("%s : set mac address properly\n", __FUNCTION__)); + /* set otp mac to sysfs */ + memcpy(mac, &sysfs_mac_addr, sizeof(sysfs_mac_addr)); + } +#endif /* !DHD_MAC_ADDR_EXPORT */ + } + + if (_dhd_set_mac_address(dhd, 0, mac) == 0) { + DHD_INFO(("%s: MAC Address is set\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: Failed to set MAC address\n", __FUNCTION__)); + } + + return 0; +} +#endif /* GET_MAC_FROM_OTP */ + +/* + * XXX:SWWLAN-210178 SysFS MAC ADDR export + * framework controls mac addr with sysfs mac_addr kernel object without file system + * For this reason, DHD doesn't need to write mac address to file system directly + */ +#ifndef DHD_MAC_ADDR_EXPORT +#ifdef WRITE_MACADDR +int +dhd_write_macaddr(struct ether_addr *mac) +{ + char *filepath_data = MACINFO; + char *filepath_efs = MACINFO_EFS; + char mac_buf[MAC_BUF_SIZE]; + int ret = 0; + int retry_cnt = 0; + + memset(mac_buf, 0, sizeof(mac_buf)); + snprintf(mac_buf, sizeof(mac_buf), MAC_CUSTOM_FORMAT, + (uint32)mac->octet[0], (uint32)mac->octet[1], + (uint32)mac->octet[2], (uint32)mac->octet[3], + (uint32)mac->octet[4], (uint32)mac->octet[5]); + + if (filepath_data) { + for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) { + /* Write MAC information into /data/.mac.info */ + ret = dhd_write_file_and_check(filepath_data, mac_buf, strlen(mac_buf)); + if (!ret) { + break; + } + } + + if (ret < 0) { + DHD_ERROR(("%s: MAC address [%s] Failed to write into" + " File: %s\n", __FUNCTION__, mac_buf, filepath_data)); + return BCME_ERROR; + } + } else { + DHD_ERROR(("%s: filepath_data doesn't exist\n", __FUNCTION__)); + } + + if (filepath_efs) { + for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) { + /* Write MAC information into /efs/wifi/.mac.info */ + ret = dhd_write_file_and_check(filepath_efs, mac_buf, strlen(mac_buf)); + if (!ret) { + break; + } + } + + if (ret < 0) { + DHD_ERROR(("%s: MAC address [%s] Failed to write into" + " File: %s\n", __FUNCTION__, mac_buf, filepath_efs)); + return BCME_ERROR; + } + } else { + DHD_ERROR(("%s: filepath_efs doesn't exist\n", __FUNCTION__)); + } + + return ret; +} +#endif /* WRITE_MACADDR */ +#endif /* !DHD_MAC_ADDR_EXPORT */ + +#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG) +static int +dhd_find_tuple_idx_from_otp(dhd_pub_t *dhdp, int req_tup, unsigned char *req_tup_len) +{ + struct list_head head; + int start_idx; + int entry_num; + + if (!g_cis_buf) { + DHD_ERROR(("%s: Couldn't find cis info from" + " local buffer\n", __FUNCTION__)); + return BCME_ERROR; + } + + INIT_LIST_HEAD(&head); + entry_num = dhd_find_tuple_list_from_otp(dhdp, req_tup, req_tup_len, &head); + /* find the first cis index from the tuple list */ + if (entry_num > 0) { + tuple_entry_t *cur = list_entry((&head)->next, tuple_entry_t, list); + start_idx = cur->cis_idx; + } else { + start_idx = -1; /* Invalid index */ + } + + dhd_free_tuple_entry(dhdp, &head); + + return start_idx; +} +#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */ + +#ifdef USE_CID_CHECK +/* Definitions for module information */ +#define MAX_VID_LEN 8 + +#ifdef SUPPORT_MULTIPLE_BOARDTYPE +#define MAX_BNAME_LEN 6 + +typedef struct { + uint8 b_len; + unsigned char btype[MAX_VID_LEN]; + char bname[MAX_BNAME_LEN]; +} board_info_t; + +#if defined(BCM4361_CHIP) +board_info_t semco_PA_info[] = { + { 3, { 0x0f, 0x08, }, { "_ePA" } }, /* semco All ePA */ + { 3, { 0x27, 0x08, }, { "_iPA" } }, /* semco 2g iPA, 5g ePA */ + { 3, { 0x1a, 0x08, }, { "_iPA" } }, /* semco 2g iPA, 5g ePA old */ + { 0, { 0x00, }, { "" } } /* Default: Not specified yet */ +}; +#else +board_info_t semco_board_info[] = { + { 3, { 0x51, 0x07, }, { "_b90b" } }, /* semco three antenna */ + { 3, { 0x61, 0x07, }, { "_b90b" } }, /* semco two antenna */ + { 0, { 0x00, }, { "" } } /* Default: Not specified yet */ +}; +board_info_t murata_board_info[] = { + { 3, { 0xa5, 0x07, }, { "_b90" } }, /* murata three antenna */ + { 3, { 0xb0, 0x07, }, { "_b90b" } }, /* murata two antenna */ + { 3, { 0xb1, 0x07, }, { "_es5" } }, /* murata two antenna */ + { 0, { 0x00, }, { "" } } /* Default: Not specified yet */ +}; +#endif /* BCM4361_CHIP */ +#endif /* SUPPORT_MULTIPLE_BOARDTYPE */ + +typedef struct { + uint8 vid_length; + unsigned char vid[MAX_VID_LEN]; + char cid_info[MAX_VNAME_LEN]; +} vid_info_t; + +#if defined(BCM4335_CHIP) +vid_info_t vid_info[] = { + { 3, { 0x33, 0x66, }, { "semcosh" } }, /* B0 Sharp 5G-FEM */ + { 3, { 0x33, 0x33, }, { "semco" } }, /* B0 Skyworks 5G-FEM and A0 chip */ + { 3, { 0x33, 0x88, }, { "semco3rd" } }, /* B0 Syri 5G-FEM */ + { 3, { 0x00, 0x11, }, { "muratafem1" } }, /* B0 ANADIGICS 5G-FEM */ + { 3, { 0x00, 0x22, }, { "muratafem2" } }, /* B0 TriQuint 5G-FEM */ + { 3, { 0x00, 0x33, }, { "muratafem3" } }, /* 3rd FEM: Reserved */ + { 0, { 0x00, }, { "murata" } } /* Default: for Murata A0 module */ +}; +#elif defined(BCM4339_CHIP) || defined(BCM4354_CHIP) || \ + defined(BCM4356_CHIP) +vid_info_t vid_info[] = { /* 4339:2G FEM+5G FEM ,4354: 2G FEM+5G FEM */ + { 3, { 0x33, 0x33, }, { "semco" } }, /* 4339:Skyworks+sharp,4354:Panasonic+Panasonic */ + { 3, { 0x33, 0x66, }, { "semco" } }, /* 4339: , 4354:Panasonic+SEMCO */ + { 3, { 0x33, 0x88, }, { "semco3rd" } }, /* 4339: , 4354:SEMCO+SEMCO */ + { 3, { 0x90, 0x01, }, { "wisol" } }, /* 4339: , 4354:Microsemi+Panasonic */ + { 3, { 0x90, 0x02, }, { "wisolfem1" } }, /* 4339: , 4354:Panasonic+Panasonic */ + { 3, { 0x90, 0x03, }, { "wisolfem2" } }, /* 4354:Murata+Panasonic */ + { 3, { 0x00, 0x11, }, { "muratafem1" } }, /* 4339: , 4354:Murata+Anadigics */ + { 3, { 0x00, 0x22, }, { "muratafem2"} }, /* 4339: , 4354:Murata+Triquint */ + { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */ +}; +#elif defined(BCM4358_CHIP) +vid_info_t vid_info[] = { + { 3, { 0x33, 0x33, }, { "semco_b85" } }, + { 3, { 0x33, 0x66, }, { "semco_b85" } }, + { 3, { 0x33, 0x88, }, { "semco3rd_b85" } }, + { 3, { 0x90, 0x01, }, { "wisol_b85" } }, + { 3, { 0x90, 0x02, }, { "wisolfem1_b85" } }, + { 3, { 0x90, 0x03, }, { "wisolfem2_b85" } }, + { 3, { 0x31, 0x90, }, { "wisol_b85b" } }, + { 3, { 0x00, 0x11, }, { "murata_b85" } }, + { 3, { 0x00, 0x22, }, { "murata_b85"} }, + { 6, { 0x00, 0xFF, 0xFF, 0x00, 0x00, }, { "murata_b85"} }, + { 3, { 0x10, 0x33, }, { "semco_b85a" } }, + { 3, { 0x30, 0x33, }, { "semco_b85b" } }, + { 3, { 0x31, 0x33, }, { "semco_b85b" } }, + { 3, { 0x10, 0x22, }, { "murata_b85a" } }, + { 3, { 0x20, 0x22, }, { "murata_b85a" } }, + { 3, { 0x21, 0x22, }, { "murata_b85a" } }, + { 3, { 0x23, 0x22, }, { "murata_b85a" } }, + { 3, { 0x31, 0x22, }, { "murata_b85b" } }, + { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */ +}; +#elif defined(BCM4359_CHIP) +vid_info_t vid_info[] = { +#if defined(SUPPORT_BCM4359_MIXED_MODULES) + { 3, { 0x34, 0x33, }, { "semco_b90b" } }, + { 3, { 0x40, 0x33, }, { "semco_b90b" } }, + { 3, { 0x41, 0x33, }, { "semco_b90b" } }, + { 3, { 0x11, 0x33, }, { "semco_b90b" } }, + { 3, { 0x33, 0x66, }, { "semco_b90b" } }, + { 3, { 0x23, 0x22, }, { "murata_b90b" } }, + { 3, { 0x40, 0x22, }, { "murata_b90b" } }, + { 3, { 0x10, 0x90, }, { "wisol_b90b" } }, + { 3, { 0x33, 0x33, }, { "semco_b90s_b1" } }, + { 3, { 0x66, 0x33, }, { "semco_b90s_c0" } }, + { 3, { 0x60, 0x22, }, { "murata_b90s_b1" } }, + { 3, { 0x61, 0x22, }, { "murata_b90s_b1" } }, + { 3, { 0x62, 0x22, }, { "murata_b90s_b1" } }, + { 3, { 0x63, 0x22, }, { "murata_b90s_b1" } }, + { 3, { 0x70, 0x22, }, { "murata_b90s_c0" } }, + { 3, { 0x71, 0x22, }, { "murata_b90s_c0" } }, + { 3, { 0x72, 0x22, }, { "murata_b90s_c0" } }, + { 3, { 0x73, 0x22, }, { "murata_b90s_c0" } }, + { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */ +#else /* SUPPORT_BCM4359_MIXED_MODULES */ + { 3, { 0x34, 0x33, }, { "semco" } }, + { 3, { 0x40, 0x33, }, { "semco" } }, + { 3, { 0x41, 0x33, }, { "semco" } }, + { 3, { 0x11, 0x33, }, { "semco" } }, + { 3, { 0x33, 0x66, }, { "semco" } }, + { 3, { 0x23, 0x22, }, { "murata" } }, + { 3, { 0x40, 0x22, }, { "murata" } }, + { 3, { 0x51, 0x22, }, { "murata" } }, + { 3, { 0x52, 0x22, }, { "murata" } }, + { 3, { 0x10, 0x90, }, { "wisol" } }, + { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */ +#endif /* SUPPORT_BCM4359_MIXED_MODULES */ +}; +#elif defined(BCM4361_CHIP) +vid_info_t vid_info[] = { +#if defined(SUPPORT_MIXED_MODULES) + { 3, { 0x66, 0x33, }, { "semco_sky_r00a_e000_a0" } }, + { 3, { 0x30, 0x33, }, { "semco_sky_r01a_e30a_a1" } }, + { 3, { 0x31, 0x33, }, { "semco_sky_r02a_e30a_a1" } }, + { 3, { 0x32, 0x33, }, { "semco_sky_r02a_e30a_a1" } }, + { 3, { 0x51, 0x33, }, { "semco_sky_r01d_e31_b0" } }, + { 3, { 0x61, 0x33, }, { "semco_sem_r01f_e31_b0" } }, + { 3, { 0x62, 0x33, }, { "semco_sem_r02g_e31_b0" } }, + { 3, { 0x71, 0x33, }, { "semco_sky_r01h_e32_b0" } }, + { 3, { 0x81, 0x33, }, { "semco_sem_r01i_e32_b0" } }, + { 3, { 0x82, 0x33, }, { "semco_sem_r02j_e32_b0" } }, + { 3, { 0x91, 0x33, }, { "semco_sem_r02a_e32a_b2" } }, + { 3, { 0xa1, 0x33, }, { "semco_sem_r02b_e32a_b2" } }, + { 3, { 0x12, 0x22, }, { "murata_nxp_r012_1kl_a1" } }, + { 3, { 0x13, 0x22, }, { "murata_mur_r013_1kl_b0" } }, + { 3, { 0x14, 0x22, }, { "murata_mur_r014_1kl_b0" } }, + { 3, { 0x15, 0x22, }, { "murata_mur_r015_1kl_b0" } }, + { 3, { 0x20, 0x22, }, { "murata_mur_r020_1kl_b0" } }, + { 3, { 0x21, 0x22, }, { "murata_mur_r021_1kl_b0" } }, + { 3, { 0x22, 0x22, }, { "murata_mur_r022_1kl_b0" } }, + { 3, { 0x23, 0x22, }, { "murata_mur_r023_1kl_b0" } }, + { 3, { 0x24, 0x22, }, { "murata_mur_r024_1kl_b0" } }, + { 3, { 0x30, 0x22, }, { "murata_mur_r030_1kl_b0" } }, + { 3, { 0x31, 0x22, }, { "murata_mur_r031_1kl_b0" } }, + { 3, { 0x32, 0x22, }, { "murata_mur_r032_1kl_b0" } }, + { 3, { 0x33, 0x22, }, { "murata_mur_r033_1kl_b0" } }, + { 3, { 0x34, 0x22, }, { "murata_mur_r034_1kl_b0" } }, + { 3, { 0x50, 0x22, }, { "murata_mur_r020_1qw_b2" } }, + { 3, { 0x51, 0x22, }, { "murata_mur_r021_1qw_b2" } }, + { 3, { 0x52, 0x22, }, { "murata_mur_r022_1qw_b2" } }, + { 3, { 0x61, 0x22, }, { "murata_mur_r031_1qw_b2" } }, + { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */ +#endif /* SUPPORT_MIXED_MODULES */ +}; +#elif defined(BCM4375_CHIP) +vid_info_t vid_info[] = { +#if defined(SUPPORT_MIXED_MODULES) + { 3, { 0x11, 0x33, }, { "semco_sky_e41_es11" } }, + { 3, { 0x33, 0x33, }, { "semco_sem_e43_es33" } }, + { 3, { 0x34, 0x33, }, { "semco_sem_e43_es34" } }, + { 3, { 0x35, 0x33, }, { "semco_sem_e43_es35" } }, + { 3, { 0x36, 0x33, }, { "semco_sem_e43_es36" } }, + { 3, { 0x41, 0x33, }, { "semco_sem_e43_cs41" } }, + { 3, { 0x51, 0x33, }, { "semco_sem_e43_cs51" } }, + { 3, { 0x53, 0x33, }, { "semco_sem_e43_cs53" } }, + { 3, { 0x61, 0x33, }, { "semco_sky_e43_cs61" } }, + { 3, { 0x10, 0x22, }, { "murata_mur_1rh_es10" } }, + { 3, { 0x11, 0x22, }, { "murata_mur_1rh_es11" } }, + { 3, { 0x12, 0x22, }, { "murata_mur_1rh_es12" } }, + { 3, { 0x13, 0x22, }, { "murata_mur_1rh_es13" } }, + { 3, { 0x20, 0x22, }, { "murata_mur_1rh_es20" } }, + { 3, { 0x32, 0x22, }, { "murata_mur_1rh_es32" } }, + { 3, { 0x41, 0x22, }, { "murata_mur_1rh_es41" } }, + { 3, { 0x42, 0x22, }, { "murata_mur_1rh_es42" } }, + { 3, { 0x43, 0x22, }, { "murata_mur_1rh_es43" } }, + { 3, { 0x44, 0x22, }, { "murata_mur_1rh_es44" } } +#endif /* SUPPORT_MIXED_MODULES */ +}; +#elif defined(BCM4389_CHIP_DEF) +vid_info_t vid_info[] = { +#if defined(SUPPORT_MIXED_MODULES) + { 3, { 0x21, 0x33, }, { "semco_sem_e53_es23" } }, + { 3, { 0x23, 0x33, }, { "semco_sem_e53_es23" } }, + { 3, { 0x24, 0x33, }, { "semco_sem_e53_es24" } }, + { 3, { 0x25, 0x33, }, { "semco_sem_e53_es25" } }, + { 3, { 0x31, 0x33, }, { "semco_sem_e53_es31" } }, + { 3, { 0x32, 0x33, }, { "semco_sem_e53_es32" } }, + { 3, { 0x40, 0x33, }, { "semco_sem_e53_es40" } }, + { 3, { 0x21, 0x22, }, { "murata_mur_1wk_es21" } }, + { 3, { 0x30, 0x22, }, { "murata_mur_1wk_es30" } }, + { 3, { 0x31, 0x22, }, { "murata_mur_1wk_es31" } }, + { 3, { 0x32, 0x22, }, { "murata_mur_1wk_es32" } }, + { 3, { 0x40, 0x22, }, { "murata_mur_1wk_es40" } }, + { 3, { 0x41, 0x22, }, { "murata_mur_1wk_es41" } }, + { 3, { 0x42, 0x22, }, { "murata_mur_1wk_es42" } }, + { 3, { 0x43, 0x22, }, { "murata_mur_1wk_es43" } }, + { 3, { 0x50, 0x22, }, { "murata_mur_1wk_es50" } } +#endif /* SUPPORT_MIXED_MODULES */ +}; +#else +vid_info_t vid_info[] = { + { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */ +}; +#endif /* BCM_CHIP_ID */ + +/* CID managment functions */ + +char * +dhd_get_cid_info(unsigned char *vid, int vid_length) +{ + int i; + + for (i = 0; i < ARRAYSIZE(vid_info); i++) { + if (vid_info[i].vid_length-1 == vid_length && + !memcmp(vid_info[i].vid, vid, vid_length)) { + return vid_info[i].cid_info; + } + } + + DHD_ERROR(("%s : Can't find the cid info\n", __FUNCTION__)); + return NULL; +} + +int +dhd_check_module_cid(dhd_pub_t *dhdp) +{ + int ret = -1; +#ifndef DHD_EXPORT_CNTL_FILE + const char *cidfilepath = CIDINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + int idx, max; + vid_info_t *cur_info; + unsigned char *tuple_start = NULL; + unsigned char tuple_length = 0; + unsigned char cid_info[MAX_VNAME_LEN]; + int found = FALSE; +#ifdef SUPPORT_MULTIPLE_BOARDTYPE + board_info_t *cur_b_info = NULL; + board_info_t *vendor_b_info = NULL; + unsigned char *btype_start; + unsigned char boardtype_len = 0; +#endif /* SUPPORT_MULTIPLE_BOARDTYPE */ + + /* Try reading out from CIS */ + if (!g_cis_buf) { + DHD_INFO(("%s: Couldn't read CIS info\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_INFO(("%s: Reading CIS from local buffer\n", __FUNCTION__)); +#ifdef DUMP_CIS + dhd_dump_cis_buf(dhdp, DUMP_CIS_SIZE); +#endif /* DUMP_CIS */ + + idx = dhd_find_tuple_idx_from_otp(dhdp, CIS_TUPLE_TAG_VENDOR, &tuple_length); + if (idx > 0) { + found = TRUE; + tuple_start = &g_cis_buf[idx]; + } + + if (found) { + max = sizeof(vid_info) / sizeof(vid_info_t); + for (idx = 0; idx < max; idx++) { + cur_info = &vid_info[idx]; +#ifdef BCM4358_CHIP + if (cur_info->vid_length == 6 && tuple_length == 6) { + if (cur_info->vid[0] == tuple_start[0] && + cur_info->vid[3] == tuple_start[3] && + cur_info->vid[4] == tuple_start[4]) { + goto check_board_type; + } + } +#endif /* BCM4358_CHIP */ + if ((cur_info->vid_length == tuple_length) && + (cur_info->vid_length != 0) && + (memcmp(cur_info->vid, tuple_start, + cur_info->vid_length - 1) == 0)) { + goto check_board_type; + } + } + } + + /* find default nvram, if exist */ + DHD_ERROR(("%s: cannot find CIS TUPLE set as default\n", __FUNCTION__)); + max = sizeof(vid_info) / sizeof(vid_info_t); + for (idx = 0; idx < max; idx++) { + cur_info = &vid_info[idx]; + if (cur_info->vid_length == 0) { + goto write_cid; + } + } + DHD_ERROR(("%s: cannot find default CID\n", __FUNCTION__)); + return BCME_ERROR; + +check_board_type: +#ifdef SUPPORT_MULTIPLE_BOARDTYPE + idx = dhd_find_tuple_idx_from_otp(dhdp, CIS_TUPLE_TAG_BOARDTYPE, &tuple_length); + if (idx > 0) { + btype_start = &g_cis_buf[idx]; + boardtype_len = tuple_length; + DHD_INFO(("%s: board type found.\n", __FUNCTION__)); + } else { + boardtype_len = 0; + } +#if defined(BCM4361_CHIP) + vendor_b_info = semco_PA_info; + max = sizeof(semco_PA_info) / sizeof(board_info_t); +#else + if (strcmp(cur_info->cid_info, "semco") == 0) { + vendor_b_info = semco_board_info; + max = sizeof(semco_board_info) / sizeof(board_info_t); + } else if (strcmp(cur_info->cid_info, "murata") == 0) { + vendor_b_info = murata_board_info; + max = sizeof(murata_board_info) / sizeof(board_info_t); + } else { + max = 0; + } +#endif /* BCM4361_CHIP */ + if (boardtype_len) { + for (idx = 0; idx < max; idx++) { + cur_b_info = vendor_b_info; + if ((cur_b_info->b_len == boardtype_len) && + (cur_b_info->b_len != 0) && + (memcmp(cur_b_info->btype, btype_start, + cur_b_info->b_len - 1) == 0)) { + DHD_INFO(("%s : board type name : %s\n", + __FUNCTION__, cur_b_info->bname)); + break; + } + cur_b_info = NULL; + vendor_b_info++; + } + } +#endif /* SUPPORT_MULTIPLE_BOARDTYPE */ + +write_cid: +#ifdef SUPPORT_MULTIPLE_BOARDTYPE + if (cur_b_info && cur_b_info->b_len > 0) { + strcpy(cid_info, cur_info->cid_info); + strcpy(cid_info + strlen(cur_info->cid_info), cur_b_info->bname); + } else +#endif /* SUPPORT_MULTIPLE_BOARDTYPE */ + strcpy(cid_info, cur_info->cid_info); + + DHD_INFO(("%s: CIS MATCH FOUND : %s\n", __FUNCTION__, cid_info)); +#ifndef DHD_EXPORT_CNTL_FILE + dhd_write_file(cidfilepath, cid_info, strlen(cid_info) + 1); +#else + strlcpy(cidinfostr, cid_info, MAX_VNAME_LEN); +#endif /* DHD_EXPORT_CNTL_FILE */ + + return ret; +} + +#ifdef SUPPORT_MULTIPLE_MODULE_CIS +#ifndef DHD_EXPORT_CNTL_FILE +static bool +dhd_check_module(char *module_name) +{ + char vname[MAX_VNAME_LEN]; + const char *cidfilepath = CIDINFO; + int ret; + + memset(vname, 0, sizeof(vname)); + ret = dhd_read_file(cidfilepath, vname, sizeof(vname) - 1); + if (ret < 0) { + return FALSE; + } + DHD_INFO(("%s: This module is %s \n", __FUNCTION__, vname)); + return strstr(vname, module_name) ? TRUE : FALSE; +} +#else +bool +dhd_check_module(char *module_name) +{ + return strstr(cidinfostr, module_name) ? TRUE : FALSE; +} +#endif /* !DHD_EXPORT_CNTL_FILE */ + +int +dhd_check_module_b85a(void) +{ + int ret; + char *vname_b85a = "_b85a"; + + if (dhd_check_module(vname_b85a)) { + DHD_INFO(("%s: It's a b85a module\n", __FUNCTION__)); + ret = 1; + } else { + DHD_INFO(("%s: It is not a b85a module\n", __FUNCTION__)); + ret = -1; + } + + return ret; +} + +int +dhd_check_module_b90(void) +{ + int ret = 0; + char *vname_b90b = "_b90b"; + char *vname_b90s = "_b90s"; + + if (dhd_check_module(vname_b90b)) { + DHD_INFO(("%s: It's a b90b module \n", __FUNCTION__)); + ret = BCM4359_MODULE_TYPE_B90B; + } else if (dhd_check_module(vname_b90s)) { + DHD_INFO(("%s: It's a b90s module\n", __FUNCTION__)); + ret = BCM4359_MODULE_TYPE_B90S; + } else { + DHD_ERROR(("%s: It's neither b90b nor b90s\n", __FUNCTION__)); + ret = BCME_ERROR; + } + + return ret; +} +#endif /* SUPPORT_MULTIPLE_MODULE_CIS */ + +#define CID_FEM_MURATA "_mur_" +/* extract module type from cid information */ +/* XXX: extract string by delimiter '_' at specific counting position. + * it would be used for module type information. + * for example, cid information is 'semco_sky_r02a_e30a_a1', + * then output (module type) is 'r02a_e30a_a1' when index is 3. + */ +int +dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem) +{ + int ret = 0, i; + char vname[MAX_VNAME_LEN]; + char *ptr = NULL; +#ifndef DHD_EXPORT_CNTL_FILE + const char *cidfilepath = CIDINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + + memset(vname, 0, sizeof(vname)); + +#ifndef DHD_EXPORT_CNTL_FILE + ret = dhd_read_file(cidfilepath, vname, sizeof(vname) - 1); + if (ret < 0) { + DHD_ERROR(("%s: failed to get module infomaion from .cid.info\n", + __FUNCTION__)); + return ret; + } +#else + strlcpy(vname, cidinfostr, MAX_VNAME_LEN); +#endif /* DHD_EXPORT_CNTL_FILE */ + + for (i = 1, ptr = vname; i < index && ptr; i++) { + ptr = bcmstrstr(ptr, "_"); + if (ptr) { + ptr++; + } + } + + if (bcmstrnstr(vname, MAX_VNAME_LEN, CID_FEM_MURATA, 5)) { + *is_murata_fem = TRUE; + } + + if (ptr) { + memcpy(module_type, ptr, strlen(ptr)); + } else { + DHD_ERROR(("%s: failed to get module infomaion\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_INFO(("%s: module type = %s \n", __FUNCTION__, module_type)); + + return ret; +} +#endif /* USE_CID_CHECK */ + +#ifdef USE_DIRECT_VID_TAG +int +dhd_check_module_cid(dhd_pub_t *dhdp) +{ + int ret = BCME_ERROR; + int idx; + unsigned char tuple_length = 0; + unsigned char *vid = NULL; + unsigned char cid_info[MAX_VNAME_LEN]; +#ifndef DHD_EXPORT_CNTL_FILE + const char *cidfilepath = CIDINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + + /* Try reading out from CIS */ + if (!g_cis_buf) { + DHD_INFO(("%s: Couldn't read CIS info\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_INFO(("%s: Reading CIS from local buffer\n", __FUNCTION__)); +#ifdef DUMP_CIS + dhd_dump_cis_buf(dhdp, DUMP_CIS_SIZE); +#endif /* DUMP_CIS */ + idx = dhd_find_tuple_idx_from_otp(dhdp, CIS_TUPLE_TAG_VENDOR, &tuple_length); + if (idx > 0) { + vid = &g_cis_buf[idx]; + DHD_INFO(("%s: VID FOUND : 0x%x%x\n", __FUNCTION__, + vid[VENDOR_OFF], vid[MD_REV_OFF])); + } else { + DHD_ERROR(("%s: use nvram default\n", __FUNCTION__)); + return BCME_ERROR; + } + + memset(cid_info, 0, sizeof(MAX_VNAME_LEN)); + cid_info[MD_REV_OFF] = vid[MD_REV_OFF]; + cid_info[VENDOR_OFF] = vid[VENDOR_OFF]; +#ifndef DHD_EXPORT_CNTL_FILE + dhd_write_file(cidfilepath, cid_info, strlen(cid_info) + 1); +#else + strlcpy(cidinfostr, cid_info, MAX_VNAME_LEN); +#endif /* DHD_EXPORT_CNTL_FILE */ + + DHD_INFO(("%s: cidinfostr %x%x\n", __FUNCTION__, + cidinfostr[VENDOR_OFF], cidinfostr[MD_REV_OFF])); + return ret; +} + +int +dhd_check_stored_module_info(char *vid) +{ + int ret = BCME_OK; +#ifndef DHD_EXPORT_CNTL_FILE + const char *cidfilepath = CIDINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + + memset(vid, 0, MAX_VID_LEN); + +#ifndef DHD_EXPORT_CNTL_FILE + ret = dhd_read_file(cidfilepath, vid, MAX_VID_LEN - 1); + if (ret != BCME_OK) { + DHD_ERROR(("%s: failed to get module infomaion from .cid.info\n", + __FUNCTION__)); + return ret; + } +#else + strlcpy(vid, cidinfostr, MAX_VID_LEN); +#endif /* DHD_EXPORT_CNTL_FILE */ + + if (vid[0] == (char)0) { + DHD_ERROR(("%s : Failed to get module information \n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_INFO(("%s: stored VID= 0x%x%x\n", __FUNCTION__, vid[VENDOR_OFF], vid[MD_REV_OFF])); + return ret; +} +#endif /* USE_DIRECT_VID_TAG */ +#endif /* DHD_USE_CISINFO */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_exynos.c b/bcmdhd.101.10.361.x/dhd_custom_exynos.c new file mode 100755 index 0000000..01e1f54 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_exynos.c @@ -0,0 +1,333 @@ +/* + * Platform Dependent file for Samsung Exynos + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) || \ + defined(CONFIG_SOC_EXYNOS2100) || defined(CONFIG_SOC_EXYNOS1000) +#include +#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || + * CONFIG_SOC_EXYNOS9820 || CONFIG_SOC_EXYNOS9830 || + * CONFIG_SOC_EXYNOS2100 || CONFIG_SOC_EXYNOS1000 + */ + +#if defined(CONFIG_64BIT) +#include +#endif /* CONFIG_64BIT */ + +#ifdef BCMDHD_MODULAR +#if IS_ENABLED(CONFIG_SEC_SYSFS) +#include +#endif /* CONFIG_SEC_SYSFS */ +#if IS_ENABLED(CONFIG_DRV_SAMSUNG) +#include +#endif /* CONFIG_SEC_SYSFS */ +#else +#if defined(CONFIG_SEC_SYSFS) +#include +#elif defined(CONFIG_DRV_SAMSUNG) +#include +#endif /* CONFIG_SEC_SYSFS */ +#endif /* BCMDHD_MODULAR */ +#include + +#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE) +#define PINCTL_DELAY 150 +#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */ + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM +extern void dhd_exit_wlan_mem(void); +extern int dhd_init_wlan_mem(void); +extern void *dhd_wlan_mem_prealloc(int section, unsigned long size); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +#define WIFI_TURNON_DELAY 200 +static int wlan_pwr_on = -1; + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE +static int wlan_host_wake_irq = 0; +static unsigned int wlan_host_wake_up = -1; +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE) +extern struct device *mmc_dev_for_wlan; +#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */ + +#ifdef CONFIG_BCMDHD_PCIE +extern int pcie_ch_num; +extern void exynos_pcie_pm_resume(int); +extern void exynos_pcie_pm_suspend(int); +#endif /* CONFIG_BCMDHD_PCIE */ + +#if defined(CONFIG_SOC_EXYNOS7870) || defined(CONFIG_SOC_EXYNOS9110) +extern struct mmc_host *wlan_mmc; +extern void mmc_ctrl_power(struct mmc_host *host, bool onoff); +#endif /* SOC_EXYNOS7870 || CONFIG_SOC_EXYNOS9110 */ + +static int +dhd_wlan_power(int onoff) +{ +#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE) + struct pinctrl *pinctrl = NULL; +#endif /* CONFIG_MACH_A7LTE || ONFIG_NOBLESSE */ + + printk(KERN_INFO"%s Enter: power %s\n", __FUNCTION__, onoff ? "on" : "off"); + +#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE) + if (onoff) { + pinctrl = devm_pinctrl_get_select(mmc_dev_for_wlan, "sdio_wifi_on"); + if (IS_ERR(pinctrl)) + printk(KERN_INFO "%s WLAN SDIO GPIO control error\n", __FUNCTION__); + msleep(PINCTL_DELAY); + } +#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */ + + if (gpio_direction_output(wlan_pwr_on, onoff)) { + printk(KERN_ERR "%s failed to control WLAN_REG_ON to %s\n", + __FUNCTION__, onoff ? "HIGH" : "LOW"); + return -EIO; + } + +#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE) + if (!onoff) { + pinctrl = devm_pinctrl_get_select(mmc_dev_for_wlan, "sdio_wifi_off"); + if (IS_ERR(pinctrl)) + printk(KERN_INFO "%s WLAN SDIO GPIO control error\n", __FUNCTION__); + } +#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */ + +#if defined(CONFIG_SOC_EXYNOS7870) || defined(CONFIG_SOC_EXYNOS9110) + if (wlan_mmc) + mmc_ctrl_power(wlan_mmc, onoff); +#endif /* SOC_EXYNOS7870 || CONFIG_SOC_EXYNOS9110 */ + return 0; +} + +static int +dhd_wlan_reset(int onoff) +{ + return 0; +} + +#ifndef CONFIG_BCMDHD_PCIE +extern void (*notify_func_callback)(void *dev_id, int state); +extern void *mmc_host_dev; +#endif /* !CONFIG_BCMDHD_PCIE */ + +static int +dhd_wlan_set_carddetect(int val) +{ +#ifndef CONFIG_BCMDHD_PCIE + pr_err("%s: notify_func=%p, mmc_host_dev=%p, val=%d\n", + __FUNCTION__, notify_func_callback, mmc_host_dev, val); + + if (notify_func_callback) { + notify_func_callback(mmc_host_dev, val); + } else { + pr_warning("%s: Nobody to notify\n", __FUNCTION__); + } +#else + if (val) { + exynos_pcie_pm_resume(pcie_ch_num); + } else { + exynos_pcie_pm_suspend(pcie_ch_num); + } +#endif /* CONFIG_BCMDHD_PCIE */ + + return 0; +} + +int __init +dhd_wlan_init_gpio(void) +{ + const char *wlan_node = "samsung,brcm-wlan"; + struct device_node *root_node = NULL; + struct device *wlan_dev; + + wlan_dev = sec_device_create(NULL, "wlan"); + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (!root_node) { + WARN(1, "failed to get device node of bcm4354\n"); + return -ENODEV; + } + + /* ========== WLAN_PWR_EN ============ */ + wlan_pwr_on = of_get_gpio(root_node, 0); + if (!gpio_is_valid(wlan_pwr_on)) { + WARN(1, "Invalied gpio pin : %d\n", wlan_pwr_on); + return -ENODEV; + } + + if (gpio_request(wlan_pwr_on, "WLAN_REG_ON")) { + WARN(1, "fail to request gpio(WLAN_REG_ON)\n"); + return -ENODEV; + } +#ifdef CONFIG_BCMDHD_PCIE + gpio_direction_output(wlan_pwr_on, 1); + msleep(WIFI_TURNON_DELAY); +#else + gpio_direction_output(wlan_pwr_on, 0); +#endif /* CONFIG_BCMDHD_PCIE */ + gpio_export(wlan_pwr_on, 1); + if (wlan_dev) + gpio_export_link(wlan_dev, "WLAN_REG_ON", wlan_pwr_on); + +#ifdef CONFIG_BCMDHD_PCIE + exynos_pcie_pm_resume(pcie_ch_num); +#endif /* CONFIG_BCMDHD_PCIE */ + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + /* ========== WLAN_HOST_WAKE ============ */ + wlan_host_wake_up = of_get_gpio(root_node, 1); + if (!gpio_is_valid(wlan_host_wake_up)) { + WARN(1, "Invalied gpio pin : %d\n", wlan_host_wake_up); + return -ENODEV; + } + + if (gpio_request(wlan_host_wake_up, "WLAN_HOST_WAKE")) { + WARN(1, "fail to request gpio(WLAN_HOST_WAKE)\n"); + return -ENODEV; + } + gpio_direction_input(wlan_host_wake_up); + gpio_export(wlan_host_wake_up, 1); + if (wlan_dev) + gpio_export_link(wlan_dev, "WLAN_HOST_WAKE", wlan_host_wake_up); + + wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + + return 0; +} + +#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE) +int +dhd_get_wlan_oob_gpio(void) +{ + return gpio_is_valid(wlan_host_wake_up) ? + gpio_get_value(wlan_host_wake_up) : -1; +} +EXPORT_SYMBOL(dhd_get_wlan_oob_gpio); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */ + +struct resource dhd_wlan_resources = { + .name = "bcmdhd_wlan_irq", + .start = 0, + .end = 0, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE | +#ifdef CONFIG_BCMDHD_PCIE + IORESOURCE_IRQ_HIGHEDGE, +#else + IORESOURCE_IRQ_HIGHLEVEL, +#endif /* CONFIG_BCMDHD_PCIE */ +}; +EXPORT_SYMBOL(dhd_wlan_resources); + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_power, + .set_reset = dhd_wlan_reset, + .set_carddetect = dhd_wlan_set_carddetect, +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + .mem_prealloc = dhd_wlan_mem_prealloc, +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ +}; +EXPORT_SYMBOL(dhd_wlan_control); + +int __init +dhd_wlan_init(void) +{ + int ret; + + printk(KERN_INFO "%s: START.......\n", __FUNCTION__); + ret = dhd_wlan_init_gpio(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n", + __FUNCTION__, ret); + goto fail; + } + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + dhd_wlan_resources.start = wlan_host_wake_irq; + dhd_wlan_resources.end = wlan_host_wake_irq; +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + ret = dhd_init_wlan_mem(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to alloc reserved memory," + " ret=%d\n", __FUNCTION__, ret); + } +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +fail: + return ret; +} + +int +dhd_wlan_deinit(void) +{ +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + gpio_free(wlan_host_wake_up); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + gpio_free(wlan_pwr_on); + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + dhd_exit_wlan_mem(); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + return 0; +} + +#ifndef BCMDHD_MODULAR +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ + defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) +#if defined(CONFIG_DEFERRED_INITCALLS) +deferred_module_init(dhd_wlan_init); +#else +late_initcall(dhd_wlan_init); +#endif /* CONFIG_DEFERRED_INITCALLS */ +#else +device_initcall(dhd_wlan_init); +#endif /* CONFIG Exynos PCIE Platforms */ +#endif /* !BCMDHD_MODULAR */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_gpio.c b/bcmdhd.101.10.361.x/dhd_custom_gpio.c new file mode 100755 index 0000000..baf4740 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_gpio.c @@ -0,0 +1,437 @@ +/* + * Customer code to add GPIO control during WLAN start/stop + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef BCMDONGLEHOST +#include +#include +#else +#define WL_ERROR(x) printf x +#define WL_TRACE(x) +#endif + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + +#if defined(BCMLXSDMMC) +extern int sdioh_mmc_irq(int irq); +#endif /* (BCMLXSDMMC) */ + +/* Customer specific Host GPIO defintion */ +static int dhd_oob_gpio_num = -1; + +module_param(dhd_oob_gpio_num, int, 0644); +MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); + +/* This function will return: + * 1) return : Host gpio interrupt number per customer platform + * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge + * + * NOTE : + * Customer should check his platform definitions + * and his Host Interrupt spec + * to figure out the proper setting for his platform. + * Broadcom provides just reference settings as example. + * + */ +int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) +{ + int host_oob_irq = 0; + +#if defined(CUSTOMER_HW2) || defined(CUSTOMER_HW4) || defined(BOARD_HIKEY) + host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr); + +#else +#if defined(CUSTOM_OOB_GPIO_NUM) + if (dhd_oob_gpio_num < 0) { + dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; + } +#endif /* CUSTOMER_OOB_GPIO_NUM */ + + if (dhd_oob_gpio_num < 0) { + WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", + __FUNCTION__)); + return (dhd_oob_gpio_num); + } + + WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", + __FUNCTION__, dhd_oob_gpio_num)); + +#endif /* CUSTOMER_HW2 || CUSTOMER_HW4 || BOARD_HIKEY */ + + return (host_oob_irq); +} +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + +/* Customer function to control hw specific wlan gpios */ +int +dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff) +{ + int err = 0; + + return err; +} + +#if 0 +/* Function to get custom MAC address */ +int +dhd_custom_get_mac_address(void *adapter, unsigned char *buf) +{ + int ret = 0; + + WL_TRACE(("%s Enter\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + + /* Customer access to MAC address stored outside of DHD driver */ +#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10) || defined(BOARD_HIKEY)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + ret = wifi_platform_get_mac_addr(adapter, buf); +#endif + +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + + return ret; +} +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#ifndef CUSTOMER_HW4 +/* Customized Locale table : OPTIONAL feature */ +const struct cntry_locales_custom translate_custom_table[] = { +/* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XY", 4}, /* Universal if Country code is unknown or empty */ + {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ + {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ + {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ + {"AT", "EU", 5}, + {"BE", "EU", 5}, + {"BG", "EU", 5}, + {"CY", "EU", 5}, + {"CZ", "EU", 5}, + {"DK", "EU", 5}, + {"EE", "EU", 5}, + {"FI", "EU", 5}, + {"FR", "EU", 5}, + {"DE", "EU", 5}, + {"GR", "EU", 5}, + {"HU", "EU", 5}, + {"IE", "EU", 5}, + {"IT", "EU", 5}, + {"LV", "EU", 5}, + {"LI", "EU", 5}, + {"LT", "EU", 5}, + {"LU", "EU", 5}, + {"MT", "EU", 5}, + {"NL", "EU", 5}, + {"PL", "EU", 5}, + {"PT", "EU", 5}, + {"RO", "EU", 5}, + {"SK", "EU", 5}, + {"SI", "EU", 5}, + {"ES", "EU", 5}, + {"SE", "EU", 5}, + {"GB", "EU", 5}, + {"KR", "XY", 3}, + {"AU", "XY", 3}, + {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ + {"TW", "XY", 3}, + {"AR", "XY", 3}, + {"MX", "XY", 3}, + {"IL", "IL", 0}, + {"CH", "CH", 0}, + {"TR", "TR", 0}, + {"NO", "NO", 0}, +#endif /* EXMAPLE_TABLE */ +#if (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) && !defined(CUSTOMER_HW5) +#if defined(BCM4335_CHIP) + {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ +#endif + {"AE", "AE", 1}, + {"AR", "AR", 1}, + {"AT", "AT", 1}, + {"AU", "AU", 2}, + {"BE", "BE", 1}, + {"BG", "BG", 1}, + {"BN", "BN", 1}, + {"CA", "CA", 2}, + {"CH", "CH", 1}, + {"CY", "CY", 1}, + {"CZ", "CZ", 1}, + {"DE", "DE", 3}, + {"DK", "DK", 1}, + {"EE", "EE", 1}, + {"ES", "ES", 1}, + {"FI", "FI", 1}, + {"FR", "FR", 1}, + {"GB", "GB", 1}, + {"GR", "GR", 1}, + {"HR", "HR", 1}, + {"HU", "HU", 1}, + {"IE", "IE", 1}, + {"IS", "IS", 1}, + {"IT", "IT", 1}, + {"ID", "ID", 1}, + {"JP", "JP", 8}, + {"KR", "KR", 24}, + {"KW", "KW", 1}, + {"LI", "LI", 1}, + {"LT", "LT", 1}, + {"LU", "LU", 1}, + {"LV", "LV", 1}, + {"MA", "MA", 1}, + {"MT", "MT", 1}, + {"MX", "MX", 1}, + {"NL", "NL", 1}, + {"NO", "NO", 1}, + {"PL", "PL", 1}, + {"PT", "PT", 1}, + {"PY", "PY", 1}, + {"RO", "RO", 1}, + {"SE", "SE", 1}, + {"SI", "SI", 1}, + {"SK", "SK", 1}, + {"TR", "TR", 7}, + {"TW", "TW", 1}, + {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */ + {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */ + {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */ + {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */ + {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */ + {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */ + {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */ +#elif defined(CUSTOMER_HW5) + {"", "XZ", 11}, + {"AE", "AE", 212}, + {"AG", "AG", 2}, + {"AI", "AI", 2}, + {"AL", "AL", 2}, + {"AN", "AN", 3}, + {"AR", "AR", 212}, + {"AS", "AS", 15}, + {"AT", "AT", 4}, + {"AU", "AU", 212}, + {"AW", "AW", 2}, + {"AZ", "AZ", 2}, + {"BA", "BA", 2}, + {"BD", "BD", 2}, + {"BE", "BE", 4}, + {"BG", "BG", 4}, + {"BH", "BH", 4}, + {"BM", "BM", 15}, + {"BN", "BN", 4}, + {"BR", "BR", 212}, + {"BS", "BS", 2}, + {"BY", "BY", 3}, + {"BW", "BW", 1}, + {"CA", "CA", 212}, + {"CH", "CH", 212}, + {"CL", "CL", 212}, + {"CN", "CN", 212}, + {"CO", "CO", 212}, + {"CR", "CR", 21}, + {"CY", "CY", 212}, + {"CZ", "CZ", 212}, + {"DE", "DE", 212}, + {"DK", "DK", 4}, + {"DZ", "DZ", 1}, + {"EC", "EC", 23}, + {"EE", "EE", 4}, + {"EG", "EG", 212}, + {"ES", "ES", 212}, + {"ET", "ET", 2}, + {"FI", "FI", 4}, + {"FR", "FR", 212}, + {"GB", "GB", 212}, + {"GD", "GD", 2}, + {"GF", "GF", 2}, + {"GP", "GP", 2}, + {"GR", "GR", 212}, + {"GT", "GT", 0}, + {"GU", "GU", 17}, + {"HK", "HK", 212}, + {"HR", "HR", 4}, + {"HU", "HU", 4}, + {"IN", "IN", 212}, + {"ID", "ID", 212}, + {"IE", "IE", 5}, + {"IL", "IL", 7}, + {"IN", "IN", 212}, + {"IS", "IS", 4}, + {"IT", "IT", 212}, + {"JO", "JO", 3}, + {"JP", "JP", 212}, + {"KH", "KH", 4}, + {"KI", "KI", 1}, + {"KR", "KR", 212}, + {"KW", "KW", 5}, + {"KY", "KY", 4}, + {"KZ", "KZ", 212}, + {"LA", "LA", 4}, + {"LB", "LB", 6}, + {"LI", "LI", 4}, + {"LK", "LK", 3}, + {"LS", "LS", 2}, + {"LT", "LT", 4}, + {"LR", "LR", 2}, + {"LU", "LU", 3}, + {"LV", "LV", 4}, + {"MA", "MA", 2}, + {"MC", "MC", 1}, + {"MD", "MD", 2}, + {"ME", "ME", 2}, + {"MK", "MK", 2}, + {"MN", "MN", 0}, + {"MO", "MO", 2}, + {"MR", "MR", 2}, + {"MT", "MT", 4}, + {"MQ", "MQ", 2}, + {"MU", "MU", 2}, + {"MV", "MV", 3}, + {"MX", "MX", 212}, + {"MY", "MY", 212}, + {"NI", "NI", 0}, + {"NL", "NL", 212}, + {"NO", "NO", 4}, + {"NP", "NP", 3}, + {"NZ", "NZ", 9}, + {"OM", "OM", 4}, + {"PA", "PA", 17}, + {"PE", "PE", 212}, + {"PG", "PG", 2}, + {"PH", "PH", 212}, + {"PL", "PL", 212}, + {"PR", "PR", 25}, + {"PT", "PT", 212}, + {"PY", "PY", 4}, + {"RE", "RE", 2}, + {"RO", "RO", 212}, + {"RS", "RS", 2}, + {"RU", "RU", 212}, + {"SA", "SA", 212}, + {"SE", "SE", 212}, + {"SG", "SG", 212}, + {"SI", "SI", 4}, + {"SK", "SK", 212}, + {"SN", "SN", 2}, + {"SV", "SV", 25}, + {"TH", "TH", 212}, + {"TR", "TR", 212}, + {"TT", "TT", 5}, + {"TW", "TW", 212}, + {"UA", "UA", 212}, + {"UG", "UG", 2}, + {"US", "US", 212}, + {"UY", "UY", 5}, + {"VA", "VA", 2}, + {"VE", "VE", 3}, + {"VG", "VG", 2}, + {"VI", "VI", 18}, + {"VN", "VN", 4}, + {"YT", "YT", 2}, + {"ZA", "ZA", 212}, + {"ZM", "ZM", 2}, + {"XT", "XT", 212}, + {"XZ", "XZ", 11}, + {"XV", "XV", 17}, + {"Q1", "Q1", 77}, +#endif /* (CUSTOMER_HW2 || BOARD_HIKEY) && CUSTOMER_HW5 */ +}; + +/* Customized Locale convertor +* input : ISO 3166-1 country abbreviation +* output: customized cspec +*/ +void +#ifdef CUSTOM_COUNTRY_CODE +get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags) +#else +get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +#endif /* CUSTOM_COUNTRY_CODE */ +{ +#if defined(OEM_ANDROID) +#if (defined(CUSTOMER_HW) || defined(CUSTOMER_HW2)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + + struct cntry_locales_custom *cloc_ptr; + + if (!cspec) + return; +#ifdef CUSTOM_COUNTRY_CODE + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, flags); +#else + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); +#endif /* CUSTOM_COUNTRY_CODE */ + + if (cloc_ptr) { + strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = cloc_ptr->custom_locale_rev; + } + return; +#else + int size, i; + + size = ARRAYSIZE(translate_custom_table); + + if (cspec == 0) + return; + + if (size == 0) + return; + + for (i = 0; i < size; i++) { + if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { + memcpy(cspec->ccode, + translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[i].custom_locale_rev; + return; + } + } +#ifdef EXAMPLE_TABLE + /* if no country code matched return first universal code from translate_custom_table */ + memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[0].custom_locale_rev; +#endif /* EXMAPLE_TABLE */ + return; +#endif /* (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) && + * (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) + */ +#endif /* OEM_ANDROID */ +} +#endif /* !CUSTOMER_HW4 */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_hikey.c b/bcmdhd.101.10.361.x/dhd_custom_hikey.c new file mode 100755 index 0000000..cb5b715 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_hikey.c @@ -0,0 +1,290 @@ +/* + * Platform Dependent file for Hikey + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_WIFI_CONTROL_FUNC +#include +#else +#include +#endif /* CONFIG_WIFI_CONTROL_FUNC */ +#include +#include + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM +extern int dhd_init_wlan_mem(void); +extern void *dhd_wlan_mem_prealloc(int section, unsigned long size); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +#define WLAN_REG_ON_GPIO 491 +#define WLAN_HOST_WAKE_GPIO 493 + +static int wlan_reg_on = -1; +#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan" +#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on" + +static int wlan_host_wake_up = -1; +static int wlan_host_wake_irq = 0; +#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake" + +int +dhd_wifi_init_gpio(void) +{ + int gpio_reg_on_val; + /* ========== WLAN_PWR_EN ============ */ + char *wlan_node = DHD_DT_COMPAT_ENTRY; + struct device_node *root_node = NULL; + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (root_node) { + wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0); + wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0); + } else { + DHD_ERROR(("failed to get device node of BRCM WLAN, use default GPIOs\n")); + wlan_reg_on = WLAN_REG_ON_GPIO; + wlan_host_wake_up = WLAN_HOST_WAKE_GPIO; + } + + /* ========== WLAN_PWR_EN ============ */ + DHD_INFO(("%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on)); + + /* + * For reg_on, gpio_request will fail if the gpio is configured to output-high + * in the dts using gpio-hog, so do not return error for failure. + */ + if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_HIGH, "WL_REG_ON")) { + DHD_ERROR(("%s: Failed to request gpio %d for WL_REG_ON, " + "might have configured in the dts\n", + __FUNCTION__, wlan_reg_on)); + } else { + DHD_ERROR(("%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n", + __FUNCTION__, wlan_reg_on)); + } + + gpio_reg_on_val = gpio_get_value(wlan_reg_on); + DHD_INFO(("%s: Initial WL_REG_ON: [%d]\n", + __FUNCTION__, gpio_get_value(wlan_reg_on))); + + if (gpio_reg_on_val == 0) { + DHD_INFO(("%s: WL_REG_ON is LOW, drive it HIGH\n", __FUNCTION__)); + if (gpio_direction_output(wlan_reg_on, 1)) { + DHD_ERROR(("%s: WL_REG_ON is failed to pull up\n", __FUNCTION__)); + return -EIO; + } + } + + /* Wait for WIFI_TURNON_DELAY due to power stability */ + msleep(WIFI_TURNON_DELAY); + + /* ========== WLAN_HOST_WAKE ============ */ + DHD_INFO(("%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up)); + + if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) { + DHD_ERROR(("%s: Failed to request gpio %d for WLAN_HOST_WAKE\n", + __FUNCTION__, wlan_host_wake_up)); + return -ENODEV; + } else { + DHD_ERROR(("%s: gpio_request WLAN_HOST_WAKE done" + " - WLAN_HOST_WAKE: GPIO %d\n", + __FUNCTION__, wlan_host_wake_up)); + } + + if (gpio_direction_input(wlan_host_wake_up)) { + DHD_ERROR(("%s: Failed to set WL_HOST_WAKE gpio direction\n", __FUNCTION__)); + return -EIO; + } + + wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up); + + return 0; +} + +extern void kirin_pcie_power_on_atu_fixup(void) __attribute__ ((weak)); +extern int kirin_pcie_lp_ctrl(u32 enable) __attribute__ ((weak)); + +int +dhd_wlan_power(int onoff) +{ + DHD_INFO(("------------------------------------------------")); + DHD_INFO(("------------------------------------------------\n")); + DHD_INFO(("%s Enter: power %s\n", __func__, onoff ? "on" : "off")); + + if (onoff) { + if (gpio_direction_output(wlan_reg_on, 1)) { + DHD_ERROR(("%s: WL_REG_ON is failed to pull up\n", __FUNCTION__)); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + DHD_INFO(("WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on))); + } else { + DHD_ERROR(("[%s] gpio value is 0. We need reinit.\n", __func__)); + if (gpio_direction_output(wlan_reg_on, 1)) { + DHD_ERROR(("%s: WL_REG_ON is " + "failed to pull up\n", __func__)); + } + } + + /* Wait for WIFI_TURNON_DELAY due to power stability */ + msleep(WIFI_TURNON_DELAY); + + /* + * Call Kiric RC ATU fixup else si_attach will fail due to + * improper BAR0/1 address translations + */ + if (kirin_pcie_power_on_atu_fixup) { + kirin_pcie_power_on_atu_fixup(); + } else { + DHD_ERROR(("[%s] kirin_pcie_power_on_atu_fixup is NULL. " + "REG_ON may not work\n", __func__)); + } + /* Enable ASPM after powering ON */ + if (kirin_pcie_lp_ctrl) { + kirin_pcie_lp_ctrl(onoff); + } else { + DHD_ERROR(("[%s] kirin_pcie_lp_ctrl is NULL. " + "ASPM may not work\n", __func__)); + } + } else { + /* Disable ASPM before powering off */ + if (kirin_pcie_lp_ctrl) { + kirin_pcie_lp_ctrl(onoff); + } else { + DHD_ERROR(("[%s] kirin_pcie_lp_ctrl is NULL. " + "ASPM may not work\n", __func__)); + } + if (gpio_direction_output(wlan_reg_on, 0)) { + DHD_ERROR(("%s: WL_REG_ON is failed to pull up\n", __FUNCTION__)); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + DHD_INFO(("WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on))); + } + } + return 0; +} +EXPORT_SYMBOL(dhd_wlan_power); + +static int +dhd_wlan_reset(int onoff) +{ + return 0; +} + +static int +dhd_wlan_set_carddetect(int val) +{ + return 0; +} + +#ifdef BCMSDIO +static int dhd_wlan_get_wake_irq(void) +{ + return gpio_to_irq(wlan_host_wake_up); +} +#endif /* BCMSDIO */ + +#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE) +int +dhd_get_wlan_oob_gpio(void) +{ + return gpio_is_valid(wlan_host_wake_up) ? + gpio_get_value(wlan_host_wake_up) : -1; +} +EXPORT_SYMBOL(dhd_get_wlan_oob_gpio); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */ + +struct resource dhd_wlan_resources = { + .name = "bcmdhd_wlan_irq", + .start = 0, /* Dummy */ + .end = 0, /* Dummy */ + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE | + IORESOURCE_IRQ_HIGHEDGE, +}; +EXPORT_SYMBOL(dhd_wlan_resources); + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_power, + .set_reset = dhd_wlan_reset, + .set_carddetect = dhd_wlan_set_carddetect, +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + .mem_prealloc = dhd_wlan_mem_prealloc, +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ +#ifdef BCMSDIO + .get_wake_irq = dhd_wlan_get_wake_irq, +#endif +}; +EXPORT_SYMBOL(dhd_wlan_control); + +int +dhd_wlan_init(void) +{ + int ret; + + DHD_INFO(("%s: START.......\n", __FUNCTION__)); + ret = dhd_wifi_init_gpio(); + if (ret < 0) { + DHD_ERROR(("%s: failed to initiate GPIO, ret=%d\n", + __FUNCTION__, ret)); + goto fail; + } + + dhd_wlan_resources.start = wlan_host_wake_irq; + dhd_wlan_resources.end = wlan_host_wake_irq; + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + ret = dhd_init_wlan_mem(); + if (ret < 0) { + DHD_ERROR(("%s: failed to alloc reserved memory," + " ret=%d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +fail: + DHD_INFO(("%s: FINISH.......\n", __FUNCTION__)); + return ret; +} + +int +dhd_wlan_deinit(void) +{ + gpio_free(wlan_host_wake_up); + gpio_free(wlan_reg_on); + return 0; +} +#ifndef BCMDHD_MODULAR +/* Required only for Built-in DHD */ +device_initcall(dhd_wlan_init); +#endif /* BOARD_HIKEY_MODULAR */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_memprealloc.c b/bcmdhd.101.10.361.x/dhd_custom_memprealloc.c new file mode 100755 index 0000000..f43d7e3 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_memprealloc.c @@ -0,0 +1,500 @@ +/* + * Platform Dependent file for usage of Preallocted Memory + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + +#define WLAN_STATIC_SCAN_BUF0 5 +#define WLAN_STATIC_SCAN_BUF1 6 +#define WLAN_STATIC_DHD_INFO_BUF 7 +#define WLAN_STATIC_DHD_WLFC_BUF 8 +#define WLAN_STATIC_DHD_IF_FLOW_LKUP 9 +#define WLAN_STATIC_DHD_MEMDUMP_RAM 11 +#define WLAN_STATIC_DHD_WLFC_HANGER 12 +#define WLAN_STATIC_DHD_PKTID_MAP 13 +#define WLAN_STATIC_DHD_PKTID_IOCTL_MAP 14 +#define WLAN_STATIC_DHD_LOG_DUMP_BUF 15 +#define WLAN_STATIC_DHD_LOG_DUMP_BUF_EX 16 +#define WLAN_STATIC_DHD_PKTLOG_DUMP_BUF 17 + +#define WLAN_SCAN_BUF_SIZE (64 * 1024) + +#define WLAN_DHD_INFO_BUF_SIZE (64 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +/* Have 2MB ramsize to accomodate future chips */ +#define WLAN_DHD_MEMDUMP_SIZE (3 * 1024 * 1024) + +#define PREALLOC_WLAN_SEC_NUM 4 +#define PREALLOC_WLAN_BUF_NUM 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#ifdef CONFIG_BCMDHD_PCIE +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_1 0 +#define WLAN_SECTION_SIZE_2 0 +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) + +#define DHD_SKB_1PAGE_BUF_NUM 0 +#define DHD_SKB_2PAGE_BUF_NUM 192 +#define DHD_SKB_4PAGE_BUF_NUM 0 + +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) + +#define DHD_SKB_1PAGE_BUF_NUM 8 +#define DHD_SKB_2PAGE_BUF_NUM 8 +#define DHD_SKB_4PAGE_BUF_NUM 1 +#endif /* CONFIG_BCMDHD_PCIE */ + +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + \ + (DHD_SKB_4PAGE_BUF_NUM)) + +#define WLAN_MAX_PKTID_ITEMS (8192) +#define WLAN_DHD_PKTID_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_ITEMS + 1)) +#define WLAN_DHD_PKTID_MAP_ITEM_SIZE (32) +#define WLAN_DHD_PKTID_MAP_SIZE ((WLAN_DHD_PKTID_MAP_HDR_SIZE) + \ + ((WLAN_MAX_PKTID_ITEMS+1) * WLAN_DHD_PKTID_MAP_ITEM_SIZE)) + +#define WLAN_MAX_PKTID_IOCTL_ITEMS (32) +#define WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_IOCTL_ITEMS + 1)) +#define WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE (32) +#define WLAN_DHD_PKTID_IOCTL_MAP_SIZE ((WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE) + \ + ((WLAN_MAX_PKTID_IOCTL_ITEMS+1) * WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE)) + +#define DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * 4) +#define DHD_LOG_DUMP_BUF_EX_SIZE (1024 * 1024 * 2) + +#define DHD_PKTLOG_DUMP_BUF_SIZE (64 * 1024) + +#define WLAN_DHD_WLFC_HANGER_MAXITEMS 3072 +#define WLAN_DHD_WLFC_HANGER_ITEM_SIZE 32 +#define WLAN_DHD_WLFC_HANGER_SIZE ((WLAN_DHD_WLFC_HANGER_ITEM_SIZE) + \ + ((WLAN_DHD_WLFC_HANGER_MAXITEMS) * (WLAN_DHD_WLFC_HANGER_ITEM_SIZE))) + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +struct wlan_mem_prealloc { + void *mem_ptr; + unsigned long size; +}; + +static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = { + {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)} +}; + +static void *wlan_static_scan_buf0 = NULL; +static void *wlan_static_scan_buf1 = NULL; +static void *wlan_static_dhd_info_buf = NULL; +static void *wlan_static_dhd_wlfc_buf = NULL; +static void *wlan_static_if_flow_lkup = NULL; +static void *wlan_static_dhd_memdump_ram = NULL; +static void *wlan_static_dhd_wlfc_hanger = NULL; +static void *wlan_static_dhd_pktid_map = NULL; +static void *wlan_static_dhd_pktid_ioctl_map = NULL; +static void *wlan_static_dhd_log_dump_buf = NULL; +static void *wlan_static_dhd_log_dump_buf_ex = NULL; +static void *wlan_static_dhd_pktlog_dump_buf = NULL; + +void dhd_exit_wlan_mem(void); + +void +*dhd_wlan_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_SEC_NUM) { + return wlan_static_skb; + } + + if (section == WLAN_STATIC_SCAN_BUF0) { + return wlan_static_scan_buf0; + } + + if (section == WLAN_STATIC_SCAN_BUF1) { + return wlan_static_scan_buf1; + } + + if (section == WLAN_STATIC_DHD_INFO_BUF) { + if (size > WLAN_DHD_INFO_BUF_SIZE) { + pr_err("request DHD_INFO size(%lu) is bigger than" + " static size(%d).\n", size, + WLAN_DHD_INFO_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf; + } + + if (section == WLAN_STATIC_DHD_WLFC_BUF) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + pr_err("request DHD_WLFC size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_buf; + } + + if (section == WLAN_STATIC_DHD_WLFC_HANGER) { + if (size > WLAN_DHD_WLFC_HANGER_SIZE) { + pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger; + } + + if (section == WLAN_STATIC_DHD_IF_FLOW_LKUP) { + if (size > WLAN_DHD_IF_FLOW_LKUP_SIZE) { + pr_err("request DHD_WLFC size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_if_flow_lkup; + } + + if (section == WLAN_STATIC_DHD_MEMDUMP_RAM) { + if (size > WLAN_DHD_MEMDUMP_SIZE) { + pr_err("request DHD_MEMDUMP_RAM size(%lu) is bigger" + " than static size(%d).\n", + size, WLAN_DHD_MEMDUMP_SIZE); + return NULL; + } + return wlan_static_dhd_memdump_ram; + } + + if (section == WLAN_STATIC_DHD_PKTID_MAP) { + if (size > WLAN_DHD_PKTID_MAP_SIZE) { + pr_err("request DHD_PKTID_MAP size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_PKTID_MAP_SIZE); + return NULL; + } + return wlan_static_dhd_pktid_map; + } + + if (section == WLAN_STATIC_DHD_PKTID_IOCTL_MAP) { + if (size > WLAN_DHD_PKTID_IOCTL_MAP_SIZE) { + pr_err("request DHD_PKTID_IOCTL_MAP size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_PKTID_IOCTL_MAP_SIZE); + return NULL; + } + return wlan_static_dhd_pktid_ioctl_map; + } + + if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF) { + if (size > DHD_LOG_DUMP_BUF_SIZE) { + pr_err("request DHD_LOG_DUMP_BUF size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_LOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf; + } + + if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF_EX) { + if (size > DHD_LOG_DUMP_BUF_EX_SIZE) { + pr_err("request DHD_LOG_DUMP_BUF_EX size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_LOG_DUMP_BUF_EX_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf_ex; + } + + if (section == WLAN_STATIC_DHD_PKTLOG_DUMP_BUF) { + if (size > DHD_PKTLOG_DUMP_BUF_SIZE) { + pr_err("request DHD_PKTLOG_DUMP_BUF size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_PKTLOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_pktlog_dump_buf; + } + + if ((section < 0) || (section >= PREALLOC_WLAN_SEC_NUM)) { + return NULL; + } + + if (wlan_mem_array[section].size < size) { + return NULL; + } + + return wlan_mem_array[section].mem_ptr; +} +EXPORT_SYMBOL(dhd_wlan_mem_prealloc); + +int +dhd_init_wlan_mem(void) +{ + int i; + int j; + +#if !defined(CONFIG_BCMDHD_PCIE) + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL); + if (!wlan_static_skb[i]) { + pr_err("Failed to alloc 1PAGE SKB BUF\n"); + goto err_skb_alloc; + } + } +#endif /* !CONFIG_BCMDHD_PCIE */ + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL); + if (!wlan_static_skb[i]) { + pr_err("Failed to alloc 2PAGE SKB BUF\n"); + goto err_skb_alloc; + } + } + +#if !defined(CONFIG_BCMDHD_PCIE) + wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE, GFP_KERNEL); + if (!wlan_static_skb[i]) { + pr_err("Failed to alloc 4PAGE SKB BUF\n"); + goto err_skb_alloc; + } +#endif /* !CONFIG_BCMDHD_PCIE */ + + for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) { + if (wlan_mem_array[i].size > 0) { + wlan_mem_array[i].mem_ptr = + kmalloc(wlan_mem_array[i].size, GFP_KERNEL); + + if (!wlan_mem_array[i].mem_ptr) { + pr_err("Failed to mem_alloc for WLAN\n"); + goto err_mem_alloc; + } + } + } + + wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf0) { + pr_err("Failed to alloc wlan_static_scan_buf0\n"); + goto err_mem_alloc; + } + + wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf1) { + pr_err("Failed to alloc wlan_static_scan_buf1\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_log_dump_buf = kmalloc(DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf) { + pr_err("Failed to alloc wlan_static_dhd_log_dump_buf\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf_ex) { + pr_err("Failed to alloc wlan_static_dhd_log_dump_buf_ex\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_info_buf) { + pr_err("Failed to alloc wlan_static_dhd_info_buf\n"); + goto err_mem_alloc; + } + +#ifdef CONFIG_BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE, + GFP_KERNEL); + if (!wlan_static_if_flow_lkup) { + pr_err("Failed to alloc wlan_static_if_flow_lkup\n"); + goto err_mem_alloc; + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + wlan_static_dhd_pktid_map = kmalloc(WLAN_DHD_PKTID_MAP_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_pktid_map) { + pr_err("Failed to alloc wlan_static_dhd_pktid_map\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_pktid_ioctl_map = kmalloc(WLAN_DHD_PKTID_IOCTL_MAP_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_pktid_ioctl_map) { + pr_err("Failed to alloc wlan_static_dhd_pktid_ioctl_map\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_wlfc_buf) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_wlfc_hanger = kmalloc(WLAN_DHD_WLFC_HANGER_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_wlfc_hanger) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PCIE */ + +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_memdump_ram) { + pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + + wlan_static_dhd_pktlog_dump_buf = kmalloc(DHD_PKTLOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_pktlog_dump_buf) { + pr_err("Failed to alloc wlan_static_dhd_pktlog_dump_buf\n"); + goto err_mem_alloc; + } + + pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__); + return 0; + +err_mem_alloc: + dhd_exit_wlan_mem(); + return -ENOMEM; + +err_skb_alloc: + /* + * When all the skb alloc buf couldn't alloced, free these buf with alloced size + * dhd_exit_wlan_mem will free with total size (don't know alloced size) + */ + pr_err("Failed to skb_alloc for WLAN\n"); + for (j = 0; j < i; j++) { + dev_kfree_skb(wlan_static_skb[j]); + } + return -ENOMEM; +} + +EXPORT_SYMBOL(dhd_init_wlan_mem); + +void +dhd_exit_wlan_mem(void) +{ + int i = 0; + +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + if (wlan_static_dhd_memdump_ram) { + kfree(wlan_static_dhd_memdump_ram); + } + +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + +#ifdef CONFIG_BCMDHD_PCIE + if (wlan_static_if_flow_lkup) { + kfree(wlan_static_if_flow_lkup); + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + if (wlan_static_dhd_pktid_map) { + kfree(wlan_static_dhd_pktid_map); + } + + if (wlan_static_dhd_pktid_ioctl_map) { + kfree(wlan_static_dhd_pktid_ioctl_map); + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + if (wlan_static_dhd_wlfc_buf) { + kfree(wlan_static_dhd_wlfc_buf); + } + + if (wlan_static_dhd_wlfc_hanger) { + kfree(wlan_static_dhd_wlfc_hanger); + } +#endif /* CONFIG_BCMDHD_PCIE */ + if (wlan_static_dhd_info_buf) { + kfree(wlan_static_dhd_info_buf); + } + + if (wlan_static_dhd_log_dump_buf) { + kfree(wlan_static_dhd_log_dump_buf); + } + + if (wlan_static_dhd_log_dump_buf_ex) { + kfree(wlan_static_dhd_log_dump_buf_ex); + } + + if (wlan_static_scan_buf1) { + kfree(wlan_static_scan_buf1); + } + + if (wlan_static_scan_buf0) { + kfree(wlan_static_scan_buf0); + } + + if (wlan_static_dhd_pktlog_dump_buf) { + kfree(wlan_static_dhd_pktlog_dump_buf); + } + + for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) { + if (wlan_mem_array[i].mem_ptr) { + kfree(wlan_mem_array[i].mem_ptr); + } + } + + for (i = 0; i < WLAN_SKB_BUF_NUM; i++) { + dev_kfree_skb(wlan_static_skb[i]); + } + + return; +} +EXPORT_SYMBOL(dhd_exit_wlan_mem); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_msm.c b/bcmdhd.101.10.361.x/dhd_custom_msm.c new file mode 100755 index 0000000..eb16b69 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_msm.c @@ -0,0 +1,283 @@ +/* + * Platform Dependent file for Qualcomm MSM/APQ + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_BCMDHD_PCIE +#include +#endif /* CONFIG_BCMDHD_PCIE */ +#include +#include +#include +#include + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM +extern void dhd_exit_wlan_mem(void); +extern int dhd_init_wlan_mem(void); +extern void *dhd_wlan_mem_prealloc(int section, unsigned long size); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +#define WIFI_TURNON_DELAY 200 +static int wlan_reg_on = -1; +#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan" +#ifdef CUSTOMER_HW2 +#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on" +#else +#define WIFI_WL_REG_ON_PROPNAME "wlan-en-gpio" +#endif /* CUSTOMER_HW2 */ + +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \ + defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \ + defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA) +#define MSM_PCIE_CH_NUM 0 +#else +#define MSM_PCIE_CH_NUM 1 +#endif /* MSM PCIE Platforms */ + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE +static int wlan_host_wake_up = -1; +static int wlan_host_wake_irq = 0; +#ifdef CUSTOMER_HW2 +#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake" +#else +#define WIFI_WLAN_HOST_WAKE_PROPNAME "wlan-host-wake-gpio" +#endif /* CUSTOMER_HW2 */ +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +int __init +dhd_wifi_init_gpio(void) +{ + char *wlan_node = DHD_DT_COMPAT_ENTRY; + struct device_node *root_node = NULL; + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (!root_node) { + WARN(1, "failed to get device node of BRCM WLAN\n"); + return -ENODEV; + } + + /* ========== WLAN_PWR_EN ============ */ + wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0); + printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on); + + if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_LOW, "WL_REG_ON")) { + printk(KERN_ERR "%s: Faiiled to request gpio %d for WL_REG_ON\n", + __FUNCTION__, wlan_reg_on); + } else { + printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n", + __FUNCTION__, wlan_reg_on); + } + + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON failed to pull up\n", __FUNCTION__); + } else { + printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__); + } + + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n", + __FUNCTION__, gpio_get_value(wlan_reg_on)); + } + + /* Wait for WIFI_TURNON_DELAY due to power stability */ + msleep(WIFI_TURNON_DELAY); + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + /* ========== WLAN_HOST_WAKE ============ */ + wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0); + printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up); + +#ifndef CUSTOMER_HW2 + if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) { + printk(KERN_ERR "%s: Faiiled to request gpio %d for WLAN_HOST_WAKE\n", + __FUNCTION__, wlan_host_wake_up); + return -ENODEV; + } else { + printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done" + " - WLAN_HOST_WAKE: GPIO %d\n", + __FUNCTION__, wlan_host_wake_up); + } +#endif /* !CUSTOMER_HW2 */ + + gpio_direction_input(wlan_host_wake_up); + wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +#ifdef CONFIG_BCMDHD_PCIE + printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__); + msm_pcie_enumerate(MSM_PCIE_CH_NUM); +#endif /* CONFIG_BCMDHD_PCIE */ + + return 0; +} + +int +dhd_wlan_power(int onoff) +{ + printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off"); + + if (onoff) { + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on)); + } else { + printk("[%s] gpio value is 0. We need reinit.\n", __func__); + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is " + "failed to pull up\n", __func__); + } + } + } else { + if (gpio_direction_output(wlan_reg_on, 0)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on)); + } + } + return 0; +} +EXPORT_SYMBOL(dhd_wlan_power); + +static int +dhd_wlan_reset(int onoff) +{ + return 0; +} + +static int +dhd_wlan_set_carddetect(int val) +{ +#ifdef CONFIG_BCMDHD_PCIE + printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__); + msm_pcie_enumerate(MSM_PCIE_CH_NUM); +#endif /* CONFIG_BCMDHD_PCIE */ + return 0; +} + +#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE) +int +dhd_get_wlan_oob_gpio(void) +{ + return gpio_is_valid(wlan_host_wake_up) ? + gpio_get_value(wlan_host_wake_up) : -1; +} +EXPORT_SYMBOL(dhd_get_wlan_oob_gpio); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */ + +struct resource dhd_wlan_resources = { + .name = "bcmdhd_wlan_irq", + .start = 0, /* Dummy */ + .end = 0, /* Dummy */ + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE | +#ifdef CONFIG_BCMDHD_PCIE + IORESOURCE_IRQ_HIGHEDGE, +#else + IORESOURCE_IRQ_HIGHLEVEL, +#endif /* CONFIG_BCMDHD_PCIE */ +}; +EXPORT_SYMBOL(dhd_wlan_resources); + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_power, + .set_reset = dhd_wlan_reset, + .set_carddetect = dhd_wlan_set_carddetect, +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + .mem_prealloc = dhd_wlan_mem_prealloc, +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ +}; +EXPORT_SYMBOL(dhd_wlan_control); + +int __init +dhd_wlan_init(void) +{ + int ret; + + printk(KERN_INFO"%s: START.......\n", __FUNCTION__); + ret = dhd_wifi_init_gpio(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n", + __FUNCTION__, ret); + goto fail; + } + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + dhd_wlan_resources.start = wlan_host_wake_irq; + dhd_wlan_resources.end = wlan_host_wake_irq; +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + ret = dhd_init_wlan_mem(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to alloc reserved memory," + " ret=%d\n", __FUNCTION__, ret); + } +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +fail: + printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__); + return ret; +} + +int +dhd_wlan_deinit(void) +{ +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + gpio_free(wlan_host_wake_up); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + gpio_free(wlan_reg_on); + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + dhd_exit_wlan_mem(); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + return 0; +} + +#ifndef BCMDHD_MODULAR +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \ + defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \ + defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA) +#if defined(CONFIG_DEFERRED_INITCALLS) +deferred_module_init(dhd_wlan_init); +#else +late_initcall(dhd_wlan_init); +#endif /* CONFIG_DEFERRED_INITCALLS */ +#else +device_initcall(dhd_wlan_init); +#endif /* MSM PCIE Platforms */ +#endif /* !BCMDHD_MODULAR */ diff --git a/bcmdhd.101.10.361.x/dhd_custom_sec.c b/bcmdhd.101.10.361.x/dhd_custom_sec.c new file mode 100755 index 0000000..fd5607b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_custom_sec.c @@ -0,0 +1,1040 @@ +/* + * Customer HW 4 dependant file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: dhd_custom_sec.c 334946 2012-05-24 20:38:00Z chanyun $ + */ +#if defined(CUSTOMER_HW4) || defined(CUSTOMER_HW40) +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* need to still support chips no longer in trunk firmware */ + +#include +#include + +const struct cntry_locales_custom translate_custom_table[] = { + /* default ccode/regrev */ + {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ + {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */ + {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */ + {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */ + {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */ + {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */ + {"GL", "GP", 2}, + {"AL", "AL", 2}, +#ifdef DHD_SUPPORT_GB_999 + {"DZ", "GB", 999}, +#else + {"DZ", "GB", 6}, +#endif /* DHD_SUPPORT_GB_999 */ + {"AS", "AS", 12}, + {"AI", "AI", 1}, + {"AF", "AD", 0}, + {"AG", "AG", 2}, + {"AR", "AU", 6}, + {"AW", "AW", 2}, + {"AU", "AU", 6}, + {"AT", "AT", 4}, + {"AZ", "AZ", 2}, + {"BS", "BS", 2}, + {"BH", "BH", 4}, + {"BD", "BD", 1}, + {"BY", "BY", 3}, + {"BE", "BE", 4}, + {"BM", "BM", 12}, + {"BA", "BA", 2}, + {"BR", "BR", 2}, + {"VG", "VG", 2}, + {"BN", "BN", 4}, + {"BG", "BG", 4}, + {"KH", "KH", 2}, + {"KY", "KY", 3}, + {"CN", "CN", 38}, + {"CO", "CO", 17}, + {"CR", "CR", 17}, + {"HR", "HR", 4}, + {"CY", "CY", 4}, + {"CZ", "CZ", 4}, + {"DK", "DK", 4}, + {"EE", "EE", 4}, + {"ET", "ET", 2}, + {"FI", "FI", 4}, + {"FR", "FR", 5}, + {"GF", "GF", 2}, + {"DE", "DE", 7}, + {"GR", "GR", 4}, + {"GD", "GD", 2}, + {"GP", "GP", 2}, + {"GU", "GU", 30}, + {"HK", "HK", 2}, + {"HU", "HU", 4}, + {"IS", "IS", 4}, + {"IN", "IN", 3}, + {"ID", "ID", 1}, + {"IE", "IE", 5}, + {"IL", "IL", 14}, + {"IT", "IT", 4}, + {"JP", "JP", 45}, + {"JO", "JO", 3}, + {"KE", "SA", 0}, + {"KW", "KW", 5}, + {"LA", "LA", 2}, + {"LV", "LV", 4}, + {"LB", "LB", 5}, + {"LS", "LS", 2}, + {"LI", "LI", 4}, + {"LT", "LT", 4}, + {"LU", "LU", 3}, + {"MO", "SG", 0}, + {"MK", "MK", 2}, + {"MW", "MW", 1}, + {"MY", "MY", 3}, + {"MV", "MV", 3}, + {"MT", "MT", 4}, + {"MQ", "MQ", 2}, + {"MR", "MR", 2}, + {"MU", "MU", 2}, + {"YT", "YT", 2}, + {"MX", "MX", 44}, + {"MD", "MD", 2}, + {"MC", "MC", 1}, + {"ME", "ME", 2}, + {"MA", "MA", 2}, + {"NL", "NL", 4}, + {"AN", "GD", 2}, + {"NZ", "NZ", 4}, + {"NO", "NO", 4}, + {"OM", "OM", 4}, + {"PA", "PA", 17}, + {"PG", "AU", 6}, + {"PY", "PY", 2}, + {"PE", "PE", 20}, + {"PH", "PH", 5}, + {"PL", "PL", 4}, + {"PT", "PT", 4}, + {"PR", "PR", 38}, + {"RE", "RE", 2}, + {"RO", "RO", 4}, + {"SN", "MA", 2}, + {"RS", "RS", 2}, + {"SK", "SK", 4}, + {"SI", "SI", 4}, + {"ES", "ES", 4}, + {"LK", "LK", 1}, + {"SE", "SE", 4}, + {"CH", "CH", 4}, + {"TW", "TW", 1}, + {"TH", "TH", 5}, + {"TT", "TT", 3}, + {"TR", "TR", 7}, + {"AE", "AE", 6}, +#ifdef DHD_SUPPORT_GB_999 + {"GB", "GB", 999}, +#else + {"GB", "GB", 6}, +#endif /* DHD_SUPPORT_GB_999 */ + {"UY", "VE", 3}, + {"VI", "PR", 38}, + {"VA", "VA", 2}, + {"VE", "VE", 3}, + {"VN", "VN", 4}, + {"ZM", "LA", 2}, + {"EC", "EC", 21}, + {"SV", "SV", 25}, +#if defined(BCM4358_CHIP) || defined(BCM4359_CHIP) + {"KR", "KR", 70}, +#else + {"KR", "KR", 48}, +#endif + {"RU", "RU", 13}, + {"UA", "UA", 8}, + {"GT", "GT", 1}, + {"MN", "MN", 1}, + {"NI", "NI", 2}, + {"UZ", "MA", 2}, + {"ZA", "ZA", 6}, + {"EG", "EG", 13}, + {"TN", "TN", 1}, + {"AO", "AD", 0}, + {"BT", "BJ", 0}, + {"BW", "BJ", 0}, + {"LY", "LI", 4}, + {"BO", "NG", 0}, + {"UM", "PR", 38}, + /* Support FCC 15.407 (Part 15E) Changes, effective June 2 2014 */ + /* US/988, Q2/993 country codes with higher power on UNII-1 5G band */ + {"US", "US", 988}, + {"CU", "US", 988}, + {"CA", "Q2", 993}, +}; + +/* Customized Locale convertor +* input : ISO 3166-1 country abbreviation +* output: customized cspec +*/ +void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +{ + int size, i; + + size = ARRAYSIZE(translate_custom_table); + + if (cspec == 0) + return; + + if (size == 0) + return; + + for (i = 0; i < size; i++) { + if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { + memcpy(cspec->ccode, + translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[i].custom_locale_rev; + return; + } + } + return; +} + +#define PSMINFO PLATFORM_PATH".psm.info" +#define ANTINFO PLATFORM_PATH".ant.info" +#define WIFIVERINFO PLATFORM_PATH".wifiver.info" +#define LOGTRACEINFO PLATFORM_PATH".logtrace.info" +#define SOFTAPINFO PLATFORM_PATH".softap.info" + +#ifdef DHD_PM_CONTROL_FROM_FILE +/* XXX This function used for setup PM related value control by read from file. + * Normally, PM related value Turn Offed for MFG process + */ +extern bool g_pm_control; +#ifdef DHD_EXPORT_CNTL_FILE +extern uint32 pmmode_val; +#endif /* !DHD_EXPORT_CNTL_FILE */ +void sec_control_pm(dhd_pub_t *dhd, uint *power_mode) +{ +#ifndef DHD_EXPORT_CNTL_FILE + struct file *fp = NULL; + char *filepath = PSMINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + char power_val = 0; + int ret = 0; +#ifdef DHD_ENABLE_LPC + uint32 lpc = 0; +#endif /* DHD_ENABLE_LPC */ + +#ifndef DHD_EXPORT_CNTL_FILE + g_pm_control = FALSE; + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp) || (fp == NULL)) { + /* Enable PowerSave Mode */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode, + sizeof(uint), TRUE, 0); + DHD_ERROR(("[WIFI_SEC] %s: %s doesn't exist" + " so set PM to %d\n", + __FUNCTION__, filepath, *power_mode)); + return; + } else { + kernel_read_compat(fp, fp->f_pos, &power_val, 1); + DHD_ERROR(("[WIFI_SEC] %s: POWER_VAL = %c \r\n", __FUNCTION__, power_val)); + filp_close(fp, NULL); + } +#else + g_pm_control = FALSE; + /* Not set from the framework side */ + if (pmmode_val == 0xFFu) { + /* Enable PowerSave Mode */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode, + sizeof(uint), TRUE, 0); + DHD_ERROR(("[WIFI_SEC] %s: doesn't set from sysfs" + " so set PM to %d\n", + __FUNCTION__, *power_mode)); + return; + + } else { + power_val = (char)pmmode_val; + } +#endif /* !DHD_EXPORT_CNTL_FILE */ + +#ifdef DHD_EXPORT_CNTL_FILE + if (power_val == 0) { +#else + /* XXX: power_val is compared with character type read from .psm.info file */ + if (power_val == '0') { +#endif /* DHD_EXPORT_CNTL_FILE */ +#ifdef ROAM_ENABLE + uint roamvar = 1; +#endif + uint32 wl_updown = 1; + + *power_mode = PM_OFF; + /* Disable PowerSave Mode */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode, + sizeof(uint), TRUE, 0); +#ifndef CUSTOM_SET_ANTNPM + /* Turn off MPC in AP mode */ + ret = dhd_iovar(dhd, 0, "mpc", (char *)power_mode, sizeof(*power_mode), + NULL, 0, TRUE); +#endif /* !CUSTOM_SET_ANTNPM */ + g_pm_control = TRUE; +#ifdef ROAM_ENABLE + /* Roaming off of dongle */ + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, + 0, TRUE); +#endif +#ifdef DHD_ENABLE_LPC + /* Set lpc 0 */ + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: Set lpc failed %d\n", + __FUNCTION__, ret)); + } +#endif /* DHD_ENABLE_LPC */ +#ifdef DHD_PCIE_RUNTIMEPM + DHD_ERROR(("[WIFI_SEC] %s : Turn Runtime PM off \n", __FUNCTION__)); + /* Turn Runtime PM off */ + dhdpcie_block_runtime_pm(dhd); +#endif /* DHD_PCIE_RUNTIMEPM */ + /* Disable ocl */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_updown, + sizeof(wl_updown), TRUE, 0)) < 0) { + DHD_ERROR(("[WIFI_SEC] %s: WLC_UP faield %d\n", __FUNCTION__, ret)); + } +#ifndef CUSTOM_SET_OCLOFF + { + uint32 ocl_enable = 0; + ret = dhd_iovar(dhd, 0, "ocl_enable", (char *)&ocl_enable, + sizeof(ocl_enable), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: Set ocl_enable %d failed %d\n", + __FUNCTION__, ocl_enable, ret)); + } else { + DHD_ERROR(("[WIFI_SEC] %s: Set ocl_enable %d OK %d\n", + __FUNCTION__, ocl_enable, ret)); + } + } +#else + dhd->ocl_off = TRUE; +#endif /* CUSTOM_SET_OCLOFF */ +#ifdef WLADPS + if ((ret = dhd_enable_adps(dhd, ADPS_DISABLE)) < 0) { + DHD_ERROR(("[WIFI_SEC] %s: dhd_enable_adps failed %d\n", + __FUNCTION__, ret)); + } +#endif /* WLADPS */ + + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_updown, + sizeof(wl_updown), TRUE, 0)) < 0) { + DHD_ERROR(("[WIFI_SEC] %s: WLC_DOWN faield %d\n", + __FUNCTION__, ret)); + } + } else { + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode, + sizeof(uint), TRUE, 0); + } +} +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#ifdef MIMO_ANT_SETTING +int get_ant_val_from_file(uint32 *read_val) +{ + int ret = -1; + struct file *fp = NULL; + char *filepath = ANTINFO; + char *p_ant_val = NULL; + uint32 ant_val = 0; + + /* Read antenna settings from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + ret = -ENOENT; + return ret; + } else { + ret = kernel_read_compat(fp, 0, (char *)&ant_val, sizeof(uint32)); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + return ret; + } + + p_ant_val = (char *)&ant_val; + p_ant_val[sizeof(uint32) - 1] = '\0'; + ant_val = bcm_atoi(p_ant_val); + + DHD_ERROR(("[WIFI_SEC]%s: ANT val = %d\n", __FUNCTION__, ant_val)); + filp_close(fp, NULL); + + /* Check value from the file */ + if (ant_val < 1 || ant_val > 3) { + DHD_ERROR(("[WIFI_SEC] %s: Invalid value %d read from the file %s\n", + __FUNCTION__, ant_val, filepath)); + return -1; + } + } + *read_val = ant_val; + return ret; +} + +int dhd_sel_ant_from_file(dhd_pub_t *dhd) +{ + int ret = -1; + uint32 ant_val = 0; + uint32 btc_mode = 0; + uint chip_id = dhd_bus_chip_id(dhd); +#ifndef CUSTOM_SET_ANTNPM + wl_config_t rsdb_mode; + + memset(&rsdb_mode, 0, sizeof(rsdb_mode)); +#endif /* !CUSTOM_SET_ANTNPM */ + + /* Check if this chip can support MIMO */ + if (chip_id != BCM4350_CHIP_ID && + chip_id != BCM4354_CHIP_ID && + chip_id != BCM43569_CHIP_ID && + chip_id != BCM4358_CHIP_ID && + chip_id != BCM4359_CHIP_ID && + chip_id != BCM4355_CHIP_ID && + chip_id != BCM4347_CHIP_ID && + chip_id != BCM4361_CHIP_ID && + chip_id != BCM4375_CHIP_ID && + chip_id != BCM4389_CHIP_ID) { + DHD_ERROR(("[WIFI_SEC] %s: This chipset does not support MIMO\n", + __FUNCTION__)); + return ret; + } + +#ifndef DHD_EXPORT_CNTL_FILE + ret = get_ant_val_from_file(&ant_val); +#else + ant_val = (uint32)antsel; +#endif /* !DHD_EXPORT_CNTL_FILE */ + if (ant_val == 0) { +#ifdef CUSTOM_SET_ANTNPM + dhd->mimo_ant_set = 0; +#endif /* CUSTOM_SET_ANTNPM */ + return ret; + } + DHD_ERROR(("[WIFI_SEC]%s: ANT val = %d\n", __FUNCTION__, ant_val)); + + /* bt coex mode off */ + if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) { + ret = dhd_iovar(dhd, 0, "btc_mode", (char *)&btc_mode, sizeof(btc_mode), NULL, 0, + TRUE); + if (ret) { + DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): " + "btc_mode, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } + } + +#ifndef CUSTOM_SET_ANTNPM + /* rsdb mode off */ + DHD_ERROR(("[WIFI_SEC] %s: %s the RSDB mode!\n", + __FUNCTION__, rsdb_mode.config ? "Enable" : "Disable")); + ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode), NULL, 0, TRUE); + if (ret) { + DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): " + "rsdb_mode, ret=%d\n", __FUNCTION__, ret)); + return ret; + } + + /* Select Antenna */ + ret = dhd_iovar(dhd, 0, "txchain", (char *)&ant_val, sizeof(ant_val), NULL, 0, TRUE); + if (ret) { + DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): txchain, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } + + ret = dhd_iovar(dhd, 0, "rxchain", (char *)&ant_val, sizeof(ant_val), NULL, 0, TRUE); + if (ret) { + DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): rxchain, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } +#else + dhd->mimo_ant_set = ant_val; + DHD_ERROR(("[WIFI_SEC] %s: mimo_ant_set = %d\n", __FUNCTION__, dhd->mimo_ant_set)); +#endif /* CUSTOM_SET_ANTNPM */ + + return 0; +} +#endif /* MIMO_ANTENNA_SETTING */ + +#ifdef LOGTRACE_FROM_FILE +/* + * LOGTRACEINFO = .logtrace.info + * - logtrace = 1 => Enable LOGTRACE Event + * - logtrace = 0 => Disable LOGTRACE Event + * - file not exist => Disable LOGTRACE Event + */ +int dhd_logtrace_from_file(dhd_pub_t *dhd) +{ +#ifndef DHD_EXPORT_CNTL_FILE + struct file *fp = NULL; + int ret = -1; + uint32 logtrace = 0; + char *filepath = LOGTRACEINFO; + char *p_logtrace = NULL; + + /* Read LOGTRACE Event on/off request from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + return 0; + } else { + ret = kernel_read_compat(fp, 0, (char *)&logtrace, sizeof(uint32)); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + return 0; + } + + p_logtrace = (char *)&logtrace; + p_logtrace[sizeof(uint32) - 1] = '\0'; + logtrace = bcm_atoi(p_logtrace); + + DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from file = %d\n", + __FUNCTION__, logtrace)); + filp_close(fp, NULL); + + /* Check value from the file */ + if (logtrace > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Invalid value %d read from the file %s\n", + __FUNCTION__, logtrace, filepath)); + return 0; + } + } + + return (int)logtrace; +#else + DHD_ERROR(("[WIFI_SEC] %s : LOGTRACE On/Off from sysfs = %d\n", + __FUNCTION__, (int)logtrace_val)); + return (int)logtrace_val; +#endif /* !DHD_EXPORT_CNTL_FILE */ +} +#endif /* LOGTRACE_FROM_FILE */ + +#ifdef USE_WFA_CERT_CONF +#ifndef DHD_EXPORT_CNTL_FILE +int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val) +{ + struct file *fp = NULL; + char *filepath = NULL; + int val = 0; + char *p_val = NULL; + + if (!dhd || (mode < SET_PARAM_BUS_TXGLOM_MODE) || + (mode >= PARAM_LAST_VALUE)) { + DHD_ERROR(("[WIFI_SEC] %s: invalid argument\n", __FUNCTION__)); + return BCME_ERROR; + } + + switch (mode) { +#ifdef BCMSDIO + case SET_PARAM_BUS_TXGLOM_MODE: + filepath = PLATFORM_PATH".bustxglom.info"; + break; +#endif /* BCMSDIO */ +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) + case SET_PARAM_ROAMOFF: + filepath = PLATFORM_PATH".roamoff.info"; + break; +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#ifdef USE_WL_FRAMEBURST + case SET_PARAM_FRAMEBURST: + filepath = PLATFORM_PATH".frameburst.info"; + break; +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + case SET_PARAM_TXBF: + filepath = PLATFORM_PATH".txbf.info"; + break; +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + case SET_PARAM_PROPTX: + filepath = PLATFORM_PATH".proptx.info"; + break; +#endif /* PROP_TXSTATUS */ + default: + DHD_ERROR(("[WIFI_SEC] %s: File to find file name for index=%d\n", + __FUNCTION__, mode)); + return BCME_ERROR; + } + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp) || (fp == NULL)) { + DHD_ERROR(("[WIFI_SEC] %s: File open failed, file path=%s\n", + __FUNCTION__, filepath)); + return BCME_ERROR; + } else { + if (kernel_read_compat(fp, fp->f_pos, (char *)&val, sizeof(uint32)) < 0) { + filp_close(fp, NULL); + /* File operation is failed so we will return error code */ + DHD_ERROR(("[WIFI_SEC] %s: read failed, file path=%s\n", + __FUNCTION__, filepath)); + return BCME_ERROR; + } + filp_close(fp, NULL); + } + + p_val = (char *)&val; + p_val[sizeof(uint32) - 1] = '\0'; + val = bcm_atoi(p_val); + + switch (mode) { +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) + case SET_PARAM_ROAMOFF: +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#ifdef USE_WL_FRAMEBURST + case SET_PARAM_FRAMEBURST: +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + case SET_PARAM_TXBF: +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + case SET_PARAM_PROPTX: +#endif /* PROP_TXSTATUS */ + if (val < 0 || val > 1) { + DHD_ERROR(("[WIFI_SEC] %s: value[%d] is out of range\n", + __FUNCTION__, *read_val)); + return BCME_ERROR; + } + break; + default: + return BCME_ERROR; + } + *read_val = (uint)val; + return BCME_OK; +} +#else +int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val) +{ + uint val = 0; + + if (!dhd || (mode < SET_PARAM_BUS_TXGLOM_MODE) || + (mode >= PARAM_LAST_VALUE)) { + DHD_ERROR(("[WIFI_SEC] %s: invalid argument\n", __FUNCTION__)); + return BCME_ERROR; + } + + switch (mode) { +#ifdef BCMSDIO + case SET_PARAM_BUS_TXGLOM_MODE: + if (bus_txglom == VALUENOTSET) + return BCME_ERROR; + else + val = (uint)bus_txglom; + break; +#endif /* BCMSDIO */ +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) + case SET_PARAM_ROAMOFF: + if (roam_off == VALUENOTSET) + return BCME_ERROR; + else + val = (uint)roam_off; + break; +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#ifdef USE_WL_FRAMEBURST + case SET_PARAM_FRAMEBURST: + if (frameburst == VALUENOTSET) + return BCME_ERROR; + else + val = (uint)frameburst; + break; +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + case SET_PARAM_TXBF: + if (txbf == VALUENOTSET) + return BCME_ERROR; + else + val = (uint)txbf; + break; +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + case SET_PARAM_PROPTX: + if (proptx == VALUENOTSET) + return BCME_ERROR; + else + val = (uint)proptx; + break; +#endif /* PROP_TXSTATUS */ + default: + return BCME_ERROR; + } + *read_val = val; + return BCME_OK; +} +#endif /* !DHD_EXPORT_CNTL_FILE */ +#endif /* USE_WFA_CERT_CONF */ + +#ifdef WRITE_WLANINFO +#define FIRM_PREFIX "Firm_ver:" +#define DHD_PREFIX "DHD_ver:" +#define NV_PREFIX "Nv_info:" +#define CLM_PREFIX "CLM_ver:" +#define max_len(a, b) ((sizeof(a)/(2)) - (strlen(b)) - (3)) +#define tstr_len(a, b) ((strlen(a)) + (strlen(b)) + (3)) + +char version_info[MAX_VERSION_LEN]; +char version_old_info[MAX_VERSION_LEN]; + +int write_filesystem(struct file *file, unsigned long long offset, + unsigned char* data, unsigned int size) +{ + mm_segment_t oldfs; + int ret; + + oldfs = get_fs(); + set_fs(KERNEL_DS); + + ret = vfs_write(file, data, size, &offset); + + set_fs(oldfs); + return ret; +} + +uint32 sec_save_wlinfo(char *firm_ver, char *dhd_ver, char *nvram_p, char *clm_ver) +{ +#ifndef DHD_EXPORT_CNTL_FILE + struct file *fp = NULL; + char *filepath = WIFIVERINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + struct file *nvfp = NULL; + int min_len, str_len = 0; + int ret = 0; + char* nvram_buf; + char temp_buf[256]; + + DHD_TRACE(("[WIFI_SEC] %s: Entered.\n", __FUNCTION__)); + + DHD_INFO(("[WIFI_SEC] firmware version : %s\n", firm_ver)); + DHD_INFO(("[WIFI_SEC] dhd driver version : %s\n", dhd_ver)); + DHD_INFO(("[WIFI_SEC] nvram path : %s\n", nvram_p)); + DHD_INFO(("[WIFI_SEC] clm version : %s\n", clm_ver)); + + memset(version_info, 0, sizeof(version_info)); + + if (strlen(dhd_ver)) { + min_len = min(strlen(dhd_ver), max_len(temp_buf, DHD_PREFIX)); + min_len += strlen(DHD_PREFIX) + 3; + DHD_INFO(("[WIFI_SEC] DHD ver length : %d\n", min_len)); + snprintf(version_info+str_len, min_len, DHD_PREFIX " %s\n", dhd_ver); + str_len = strlen(version_info); + + DHD_INFO(("[WIFI_SEC] Driver version_info len : %d\n", str_len)); + DHD_INFO(("[WIFI_SEC] Driver version_info : %s\n", version_info)); + } else { + DHD_ERROR(("[WIFI_SEC] Driver version is missing.\n")); + } + + if (strlen(firm_ver)) { + min_len = min(strlen(firm_ver), max_len(temp_buf, FIRM_PREFIX)); + min_len += strlen(FIRM_PREFIX) + 3; + DHD_INFO(("[WIFI_SEC] firmware ver length : %d\n", min_len)); + snprintf(version_info+str_len, min_len, FIRM_PREFIX " %s\n", firm_ver); + str_len = strlen(version_info); + + DHD_INFO(("[WIFI_SEC] Firmware version_info len : %d\n", str_len)); + DHD_INFO(("[WIFI_SEC] Firmware version_info : %s\n", version_info)); + } else { + DHD_ERROR(("[WIFI_SEC] Firmware version is missing.\n")); + } + + if (nvram_p) { + memset(temp_buf, 0, sizeof(temp_buf)); + nvfp = filp_open(nvram_p, O_RDONLY, 0); + if (IS_ERR(nvfp) || (nvfp == NULL)) { + DHD_ERROR(("[WIFI_SEC] %s: Nvarm File open failed.\n", __FUNCTION__)); + return -1; + } else { + ret = kernel_read_compat(nvfp, nvfp->f_pos, temp_buf, sizeof(temp_buf)); + filp_close(nvfp, NULL); + } + + if (strlen(temp_buf)) { + nvram_buf = temp_buf; + bcmstrtok(&nvram_buf, "\n", 0); + DHD_INFO(("[WIFI_SEC] nvram tolkening : %s(%zu) \n", + temp_buf, strlen(temp_buf))); + snprintf(version_info+str_len, tstr_len(temp_buf, NV_PREFIX), + NV_PREFIX " %s\n", temp_buf); + str_len = strlen(version_info); + DHD_INFO(("[WIFI_SEC] NVRAM version_info : %s\n", version_info)); + DHD_INFO(("[WIFI_SEC] NVRAM version_info len : %d, nvram len : %zu\n", + str_len, strlen(temp_buf))); + } else { + DHD_ERROR(("[WIFI_SEC] NVRAM info is missing.\n")); + } + } else { + DHD_ERROR(("[WIFI_SEC] Not exist nvram path\n")); + } + + if (strlen(clm_ver)) { + min_len = min(strlen(clm_ver), max_len(temp_buf, CLM_PREFIX)); + min_len += strlen(CLM_PREFIX) + 3; + DHD_INFO(("[WIFI_SEC] clm ver length : %d\n", min_len)); + snprintf(version_info+str_len, min_len, CLM_PREFIX " %s\n", clm_ver); + str_len = strlen(version_info); + + DHD_INFO(("[WIFI_SEC] CLM version_info len : %d\n", str_len)); + DHD_INFO(("[WIFI_SEC] CLM version_info : %s\n", version_info)); + } else { + DHD_ERROR(("[WIFI_SEC] CLM version is missing.\n")); + } + + DHD_INFO(("[WIFI_SEC] version_info : %s, strlen : %zu\n", + version_info, strlen(version_info))); + +#ifndef DHD_EXPORT_CNTL_FILE + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp) || (fp == NULL)) { + DHD_ERROR(("[WIFI_SEC] %s: .wifiver.info File open failed.\n", __FUNCTION__)); + } else { + memset(version_old_info, 0, sizeof(version_old_info)); + ret = kernel_read_compat(fp, fp->f_pos, version_old_info, sizeof(version_info)); + filp_close(fp, NULL); + DHD_INFO(("[WIFI_SEC] kernel_read ret : %d.\n", ret)); + if (strcmp(version_info, version_old_info) == 0) { + DHD_ERROR(("[WIFI_SEC] .wifiver.info already saved.\n")); + return 0; + } + } + + fp = filp_open(filepath, O_RDWR | O_CREAT, 0664); + if (IS_ERR(fp) || (fp == NULL)) { + DHD_ERROR(("[WIFI_SEC] %s: .wifiver.info File open failed.\n", + __FUNCTION__)); + } else { + ret = write_filesystem(fp, fp->f_pos, version_info, sizeof(version_info)); + DHD_INFO(("[WIFI_SEC] sec_save_wlinfo done. ret : %d\n", ret)); + DHD_ERROR(("[WIFI_SEC] save .wifiver.info file.\n")); + filp_close(fp, NULL); + } +#endif /* DHD_EXPORT_CNTL_FILE */ + return ret; +} +#endif /* WRITE_WLANINFO */ + +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW +unsigned int system_hw_rev; +static int +__init get_hw_rev(char *arg) +{ + get_option(&arg, &system_hw_rev); + printk("dhd : hw_rev : %d\n", system_hw_rev); + return 0; +} + +early_param("androidboot.hw_rev", get_hw_rev); +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */ + +#ifdef GEN_SOFTAP_INFO_FILE +#define SOFTAP_INFO_FILE_FIRST_LINE "#.softap.info" +/* + * # Does RSDB Wifi sharing support? + * DualBandConcurrency + * # Both wifi and hotspot can be turned on at the same time? + * DualInterface + * # 5Ghz band support? + * 5G + * # How many clients can be connected? + * maxClient + * # Does hotspot support PowerSave mode? + * PowerSave + * # Does android_net_wifi_set_Country_Code_Hal feature supported? + * HalFn_setCountryCodeHal + * # Does android_net_wifi_getValidChannels supported? + * HalFn_getValidChannels + */ +const char *softap_info_items[] = { + "DualBandConcurrency", +#ifdef DHD_SOFTAP_DUAL_IF_INFO + "DualInterface", +#endif /* DHD_SOFTAP_DUAL_IF_INFO */ + "5G", "maxClient", "PowerSave", + "HalFn_setCountryCodeHal", "HalFn_getValidChannels", NULL +}; +#if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF) +const char *softap_info_values[] = { + "yes", +#ifdef DHD_SOFTAP_DUAL_IF_INFO + "yes", +#endif /* DHD_SOFTAP_DUAL_IF_INFO */ + "yes", "10", "yes", "yes", "yes", NULL +}; +#elif defined(BCM43455_CHIP) +const char *softap_info_values[] = { + "no", +#ifdef DHD_SOFTAP_DUAL_IF_INFO + "no", +#endif /* DHD_SOFTAP_DUAL_IF_INFO */ + "yes", "10", "no", "yes", "yes", NULL +}; +#elif defined(BCM43430_CHIP) +const char *softap_info_values[] = { + "no", +#ifdef DHD_SOFTAP_DUAL_IF_INFO + "no", +#endif /* DHD_SOFTAP_DUAL_IF_INFO */ + "no", "10", "no", "yes", "yes", NULL +}; +#else +const char *softap_info_values[] = { + "UNDEF", +#ifdef DHD_SOFTAP_DUAL_IF_INFO + "UNDEF", +#endif /* DHD_SOFTAP_DUAL_IF_INFO */ + "UNDEF", "UNDEF", "UNDEF", "UNDEF", "UNDEF", NULL +}; +#endif /* defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF) */ +#endif /* GEN_SOFTAP_INFO_FILE */ + +#ifdef GEN_SOFTAP_INFO_FILE +uint32 sec_save_softap_info(void) +{ +#ifndef DHD_EXPORT_CNTL_FILE + struct file *fp = NULL; + char *filepath = SOFTAPINFO; +#endif /* DHD_EXPORT_CNTL_FILE */ + char temp_buf[SOFTAP_INFO_BUF_SZ]; + int ret = -1, idx = 0, rem = 0, written = 0; + char *pos = NULL; + + DHD_TRACE(("[WIFI_SEC] %s: Entered.\n", __FUNCTION__)); + memset(temp_buf, 0, sizeof(temp_buf)); + + pos = temp_buf; + rem = sizeof(temp_buf); + written = snprintf(pos, sizeof(temp_buf), "%s\n", + SOFTAP_INFO_FILE_FIRST_LINE); + do { + int len = strlen(softap_info_items[idx]) + + strlen(softap_info_values[idx]) + 2; + pos += written; + rem -= written; + if (len > rem) { + break; + } + written = snprintf(pos, rem, "%s=%s\n", + softap_info_items[idx], softap_info_values[idx]); + } while (softap_info_items[++idx] != NULL); + +#ifndef DHD_EXPORT_CNTL_FILE + fp = filp_open(filepath, O_RDWR | O_CREAT, 0664); + if (IS_ERR(fp) || (fp == NULL)) { + DHD_ERROR(("[WIFI_SEC] %s: %s File open failed.\n", + SOFTAPINFO, __FUNCTION__)); + } else { + ret = write_filesystem(fp, fp->f_pos, temp_buf, strlen(temp_buf)); + DHD_INFO(("[WIFI_SEC] %s done. ret : %d\n", __FUNCTION__, ret)); + DHD_ERROR(("[WIFI_SEC] save %s file.\n", SOFTAPINFO)); + filp_close(fp, NULL); + } +#else + strlcpy(softapinfostr, temp_buf, SOFTAP_INFO_BUF_SZ); + + ret = BCME_OK; +#endif /* !DHD_EXPORT_CNTL_FILE */ + return ret; +} +#endif /* GEN_SOFTAP_INFO_FILE */ +#endif /* CUSTOMER_HW4 || CUSTOMER_HW40 */ + +/* XXX WAR: disable pm_bcnrx , scan_ps for BCM4354 WISOL module. + * WISOL module have ANT_1 Rx sensitivity issue. +*/ +#if defined(FORCE_DISABLE_SINGLECORE_SCAN) +void +dhd_force_disable_singlcore_scan(dhd_pub_t *dhd) +{ + int ret = 0; + struct file *fp = NULL; + char *filepath = PLATFORM_PATH".cid.info"; + char vender[10] = {0, }; + uint32 pm_bcnrx = 0; + uint32 scan_ps = 0; + + if (BCM4354_CHIP_ID != dhd_bus_chip_id(dhd)) + return; + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s file open error\n", filepath)); + } else { + ret = kernel_read_compat(fp, 0, (char *)vender, 5); + + if (ret > 0 && NULL != strstr(vender, "wisol")) { + DHD_ERROR(("wisol module : set pm_bcnrx=0, set scan_ps=0\n")); + + ret = dhd_iovar(dhd, 0, "pm_bcnrx", (char *)&pm_bcnrx, sizeof(pm_bcnrx), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("Set pm_bcnrx error (%d)\n", ret)); + + ret = dhd_iovar(dhd, 0, "scan_ps", (char *)&scan_ps, sizeof(scan_ps), NULL, + 0, TRUE); + if (ret < 0) + DHD_ERROR(("Set scan_ps error (%d)\n", ret)); + } + filp_close(fp, NULL); + } +} +#endif /* FORCE_DISABLE_SINGLECORE_SCAN */ + +#ifdef BCM4335_XTAL_WAR +bool +check_bcm4335_rev(void) +{ + int ret = -1; + struct file *fp = NULL; + char *filepath = "/data/.rev"; + char chip_rev[10] = {0, }; + bool is_revb0 = TRUE; + + DHD_ERROR(("check BCM4335, check_bcm4335_rev \n")); + fp = filp_open(filepath, O_RDONLY, 0); + + if (IS_ERR(fp)) { + DHD_ERROR(("/data/.rev file open error\n")); + is_revb0 = TRUE; + } else { + DHD_ERROR(("/data/.rev file Found\n")); + ret = kernel_read_compat(fp, 0, (char *)chip_rev, 9); + if (ret != -1 && NULL != strstr(chip_rev, "BCM4335B0")) { + DHD_ERROR(("Found BCM4335B0\n")); + is_revb0 = TRUE; + } else { + is_revb0 = FALSE; + } + filp_close(fp, NULL); + } + return is_revb0; +} +#endif /* BCM4335_XTAL_WAR */ diff --git a/bcmdhd.101.10.361.x/dhd_dbg.h b/bcmdhd.101.10.361.x/dhd_dbg.h new file mode 100755 index 0000000..c955e38 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_dbg.h @@ -0,0 +1,637 @@ +/* + * Debug/trace/assert driver definitions for Dongle Host Driver. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dhd_dbg_ +#define _dhd_dbg_ + +#if defined(NDIS) +#include "wl_nddbg.h" +#endif /* defined(NDIS) */ + +#ifdef DHD_LOG_DUMP +extern char *dhd_log_dump_get_timestamp(void); +#ifdef DHD_EFI +/* FW verbose/console output to FW ring buffer */ +extern void dhd_log_dump_print(const char *fmt, ...); +/* DHD verbose/console output to DHD ring buffer */ +extern void dhd_log_dump_print_drv(const char *fmt, ...); +#define DHD_LOG_DUMP_WRITE(fmt, ...) dhd_log_dump_print_drv(fmt, ##__VA_ARGS__) +#define DHD_LOG_DUMP_WRITE_FW(fmt, ...) dhd_log_dump_print(fmt, ##__VA_ARGS__) +#else +#ifndef _DHD_LOG_DUMP_DEFINITIONS_ +#define _DHD_LOG_DUMP_DEFINITIONS_ +#define GENERAL_LOG_HDR "\n-------------------- General log ---------------------------\n" +#define PRESERVE_LOG_HDR "\n-------------------- Preserve log ---------------------------\n" +#define SPECIAL_LOG_HDR "\n-------------------- Special log ---------------------------\n" +#define DHD_DUMP_LOG_HDR "\n-------------------- 'dhd dump' log -----------------------\n" +#define EXT_TRAP_LOG_HDR "\n-------------------- Extended trap data -------------------\n" +#define HEALTH_CHK_LOG_HDR "\n-------------------- Health check data --------------------\n" +#ifdef DHD_DUMP_PCIE_RINGS +#define FLOWRING_DUMP_HDR "\n-------------------- Flowring dump --------------------\n" +#endif /* DHD_DUMP_PCIE_RINGS */ +#define DHD_LOG_DUMP_DLD(fmt, ...) \ + dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__) +#define DHD_LOG_DUMP_DLD_EX(fmt, ...) \ + dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, NULL, 0, fmt, ##__VA_ARGS__) +#define DHD_LOG_DUMP_DLD_PRSRV(fmt, ...) \ + dhd_log_dump_write(DLD_BUF_TYPE_PRESERVE, NULL, 0, fmt, ##__VA_ARGS__) +#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */ + +#ifndef DHD_LOG_DUMP_RING_DEFINITIONS +#define DHD_LOG_DUMP_RING_DEFINITIONS +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +/* Enabled DHD_DEBUGABILITY_LOG_DUMP_RING */ +extern void dhd_dbg_ring_write(int type, char *binary_data, + int binary_len, const char *fmt, ...); +extern char* dhd_dbg_get_system_timestamp(void); +#define DHD_DBG_RING(fmt, ...) \ + dhd_dbg_ring_write(DRIVER_LOG_RING_ID, NULL, 0, fmt, ##__VA_ARGS__) +#define DHD_DBG_RING_EX(fmt, ...) \ + dhd_dbg_ring_write(FW_VERBOSE_RING_ID, NULL, 0, fmt, ##__VA_ARGS__) +#define DHD_DBG_RING_ROAM(fmt, ...) \ + dhd_dbg_ring_write(ROAM_STATS_RING_ID, NULL, 0, fmt, ##__VA_ARGS__) + +#define DHD_LOG_DUMP_WRITE DHD_DBG_RING +#define DHD_LOG_DUMP_WRITE_EX DHD_DBG_RING_EX +#define DHD_LOG_DUMP_WRITE_PRSRV DHD_DBG_RING +#define DHD_LOG_DUMP_WRITE_ROAM DHD_DBG_RING_ROAM + +#define DHD_PREFIX_TS "[%s][%s]: ", dhd_dbg_get_system_timestamp(), dhd_log_dump_get_timestamp() +#define DHD_PREFIX_TS_FN DHD_PREFIX_TS +#define DHD_LOG_DUMP_WRITE_TS DHD_DBG_RING(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_TS_FN DHD_DBG_RING(DHD_PREFIX_TS_FN) +#define DHD_LOG_DUMP_WRITE_EX_TS DHD_DBG_RING_EX(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_EX_TS_FN DHD_DBG_RING_EX(DHD_PREFIX_TS_FN) +#define DHD_LOG_DUMP_WRITE_PRSRV_TS DHD_DBG_RING(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_PRSRV_TS_FN DHD_DBG_RING(DHD_PREFIX_TS_FN) +#define DHD_LOG_DUMP_WRITE_ROAM_TS DHD_DBG_RING_ROAM(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_ROAM_TS_FN DHD_DBG_RING_ROAM(DHD_PREFIX_TS_FN) +#else +/* Not enabled DHD_DEBUGABILITY_LOG_DUMP_RING */ +#define DHD_LOG_DUMP_WRITE DHD_LOG_DUMP_DLD +#define DHD_LOG_DUMP_WRITE_EX DHD_LOG_DUMP_DLD_EX +#define DHD_LOG_DUMP_WRITE_PRSRV DHD_LOG_DUMP_DLD_PRSRV +#define DHD_LOG_DUMP_WRITE_ROAM DHD_LOG_DUMP_DLD + +#define DHD_PREFIX_TS "[%s]: ", dhd_log_dump_get_timestamp() +#define DHD_PREFIX_TS_FN "[%s] %s: ", dhd_log_dump_get_timestamp(), __func__ +#define DHD_LOG_DUMP_WRITE_TS DHD_LOG_DUMP_DLD(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_TS_FN DHD_LOG_DUMP_DLD(DHD_PREFIX_TS_FN) +#define DHD_LOG_DUMP_WRITE_EX_TS DHD_LOG_DUMP_DLD_EX(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_EX_TS_FN DHD_LOG_DUMP_DLD_EX(DHD_PREFIX_TS_FN) +#define DHD_LOG_DUMP_WRITE_PRSRV_TS DHD_LOG_DUMP_DLD_PRSRV(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_PRSRV_TS_FN DHD_LOG_DUMP_DLD_PRSRV(DHD_PREFIX_TS_FN) +#define DHD_LOG_DUMP_WRITE_ROAM_TS DHD_LOG_DUMP_DLD(DHD_PREFIX_TS) +#define DHD_LOG_DUMP_WRITE_ROAM_TS_FN DHD_LOG_DUMP_DLD(DHD_PREFIX_TS_FN) +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ +#endif /* DHD_LOG_DUMP_RING_DEFINITIONS */ + +#endif /* DHD_EFI */ +#define CONCISE_DUMP_BUFLEN 32 * 1024 +#define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n" +#ifdef DHD_STATUS_LOGGING +#define STATUS_LOG_HDR "\n-------------------- Status log -----------------------\n" +#endif /* DHD_STATUS_LOGGING */ +#define RTT_LOG_HDR "\n-------------------- RTT log --------------------------\n" +#define BCM_TRACE_LOG_HDR "\n-------------------- BCM Trace log --------------------------\n" +#define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n" +#endif /* DHD_LOG_DUMP */ + +#if defined(CUSTOMER_DBG_SYSTEM_TIME) && defined(DHD_DEBUGABILITY_LOG_DUMP_RING) +#define DBG_PRINT_PREFIX "[%s][dhd][wlan]", dhd_dbg_get_system_timestamp() +#else +#define DBG_PRINT_PREFIX +#endif +#define DBG_PRINT_SYSTEM_TIME pr_cont(DBG_PRINT_PREFIX) + +#if defined(BCMDBG) || defined(DHD_DEBUG) + +#if defined(NDIS) +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \ + {printf args; DHD_NDDBG_OUTPUT args;}} while (0) +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) \ + {printf args; DHD_NDDBG_OUTPUT args;}} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) \ + {printf args; DHD_NDDBG_OUTPUT args;}} while (0) +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#else +/* NON-NDIS cases */ +#ifdef DHD_LOG_DUMP +#ifdef DHD_EFI +/* defined(DHD_EFI) && defined(DHD_LOG_DUMP) */ +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) + +#define DHD_INFO(args) \ +do { \ + if (dhd_msg_level & DHD_INFO_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#else /* DHD_EFI */ +/* !defined(DHD_EFI) and defined(DHD_LOG_DUMP) */ +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_TS; \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) + +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#endif /* DHD_EFI */ +#else /* DHD_LOG_DUMP */ +/* !defined(DHD_LOG_DUMP cases) */ +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#endif /* DHD_LOG_DUMP */ + +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) +#endif /* defined(NDIS) */ + +#ifdef DHD_LOG_DUMP +/* LOG_DUMP defines common to EFI and NON-EFI */ +#ifdef DHD_EFI +/* EFI builds with LOG DUMP enabled */ +#define DHD_ERROR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_IOVAR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_IOVAR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_LOG_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) + +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_FW args; \ + } \ +} while (0) +#define DHD_ECNTR_LOG(args) DHD_EVENT(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_FW args; \ + } \ +} while (0) +#define DHD_PRSRV_MEM(args) DHD_EVENT(args) +#else +/* NON-EFI builds with LOG DUMP enabled */ +#define DHD_ERROR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_IOVAR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_IOVAR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_LOG_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) + +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_PRSRV_TS; \ + DHD_LOG_DUMP_WRITE_PRSRV args; \ + } \ +} while (0) +#define DHD_PRSRV_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + if (dhd_msg_level & DHD_PRSRV_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE_TS; \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +/* Re-using 'DHD_MSGTRACE_VAL' for controlling printing of ecounter binary event +* logs to console and debug dump -- need to cleanup in the future to use separate +* 'DHD_ECNTR_VAL' bitmap flag. 'DHD_MSGTRACE_VAL' will be defined only +* for non-android builds. +*/ +#define DHD_ECNTR_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_TS; \ + DHD_LOG_DUMP_WRITE args; \ + } \ + } \ +} while (0) +#define DHD_ERROR_EX(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_EX_TS; \ + DHD_LOG_DUMP_WRITE_EX args; \ + } \ +} while (0) +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE_TS; \ + DHD_LOG_DUMP_WRITE args; \ +} while (0) + +#define DHD_ERROR_ROAM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_ROAM_TS; \ + DHD_LOG_DUMP_WRITE_ROAM args; \ + } \ +} while (0) +#endif /* DHD_EFI */ +#else /* DHD_LOG_DUMP */ +/* !DHD_LOG_DUMP */ +#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0) +#define DHD_ERROR_MEM(args) DHD_ERROR(args) +#define DHD_IOVAR_MEM(args) DHD_ERROR(args) +#define DHD_LOG_MEM(args) DHD_ERROR(args) +#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#define DHD_ECNTR_LOG(args) DHD_EVENT(args) +#define DHD_PRSRV_MEM(args) DHD_EVENT(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#endif /* DHD_LOG_DUMP */ + +#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) +#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) +#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) +#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0) +#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) +#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) +#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) +#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) +#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) +#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0) +#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0) +#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) +#define DHD_RPM(args) do {if (dhd_msg_level & DHD_RPM_VAL) printf args;} while (0) +#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0) + +#if defined(DHD_LOG_DUMP) +#if defined(DHD_EFI) +#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args) +#elif defined(DHD_LOG_PRINT_RATE_LIMIT) +#define DHD_FW_VERBOSE(args) \ +do { \ + if (dbgring_msg_level & DHD_FWLOG_VAL) { \ + DHD_LOG_DUMP_WRITE_EX args; \ + } \ +} while (0) +#define DHD_FWLOG(args) \ + do { \ + if (dhd_msg_level & DHD_FWLOG_VAL) { \ + if (control_logtrace && !log_print_threshold) \ + printf args; \ + DHD_LOG_DUMP_WRITE args; \ + } \ + } while (0) +#else +#define DHD_FW_VERBOSE(args) \ +do { \ + if (dbgring_msg_level & DHD_FWLOG_VAL) { \ + DHD_LOG_DUMP_WRITE_EX args; \ + } \ +} while (0) + +#define DHD_FWLOG(args) \ + do { \ + if (dhd_msg_level & DHD_FWLOG_VAL) { \ + if (control_logtrace) \ + printf args; \ + DHD_LOG_DUMP_WRITE args; \ + } \ + } while (0) +#endif /* DHD_EFI */ +#else /* DHD_LOG_DUMP */ +#if defined(NDIS) && (NDISVER >= 0x0630) +#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) \ + {printf args; DHD_NDDBG_OUTPUT args;}} while (0) +#else +#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) +#endif /* defined(NDIS) && (NDISVER >= 0x0630) */ +#endif /* DHD_LOG_DUMP */ + +#define DHD_DBGIF(args) do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0) + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM(args) do {if (dhd_msg_level & DHD_RPM_VAL) printf args;} while (0) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#define DHD_ERROR_NO_HW4 DHD_INFO +#else +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#define DHD_ERROR_NO_HW4 DHD_ERROR +#endif /* CUSTOMER_HW4_DEBUG */ + +#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) +#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) +#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL) +#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL) +#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL) +#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL) +#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL) +#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL) +#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) +#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) +#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) +#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) +#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) +#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL) +#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL) +#define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL) +#define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL) +#define DHD_RTT_ON() (dhd_msg_level & DHD_RTT_VAL) +#define DHD_MSGTRACE_ON() (dhd_msg_level & DHD_MSGTRACE_VAL) +#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL) +#define DHD_DBGIF_ON() (dhd_msg_level & DHD_DBGIF_VAL) +#define DHD_PKT_MON_ON() (dhd_msg_level & DHD_PKT_MON_VAL) +#define DHD_PKT_MON_DUMP_ON() (dhd_msg_level & DHD_PKT_MON_DUMP_VAL) +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM_ON() (dhd_msg_level & DHD_RPM_VAL) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#else /* defined(BCMDBG) || defined(DHD_DEBUG) */ + +#if defined(NDIS) +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \ + {printf args; DHD_NDDBG_OUTPUT args;}} while (0) +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) \ + {DHD_NDDBG_OUTPUT args;}} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) \ + {DHD_NDDBG_OUTPUT args;}} while (0) +#elif defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_INFO(args) \ +do { \ + if (dhd_msg_level & DHD_INFO_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_TRACE(args) +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#else /* DHD_EFI && DHD_LOG_DUMP */ + +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \ + printf args;} while (0) +#define DHD_TRACE(args) +#define DHD_INFO(args) +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#endif /* defined(NDIS) */ + +#define DHD_DATA(args) +#define DHD_CTL(args) +#define DHD_TIMER(args) +#define DHD_HDRS(args) +#define DHD_BYTES(args) +#define DHD_INTR(args) +#define DHD_GLOM(args) + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_FW args; \ + } \ +} while (0) +#define DHD_ECNTR_LOG(args) DHD_EVENT(args) +#else +#define DHD_EVENT(args) +#define DHD_ECNTR_LOG(args) DHD_EVENT(args) +#endif /* DHD_EFI && DHD_LOG_DUMP */ + +#define DHD_PRSRV_MEM(args) DHD_EVENT(args) + +#define DHD_BTA(args) +#define DHD_ISCAN(args) +#define DHD_ARPOE(args) +#define DHD_REORDER(args) +#define DHD_PNO(args) +#define DHD_RTT(args) +#define DHD_PKT_MON(args) + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_FW args; \ + } \ +} while (0) +#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args) +#else +#define DHD_MSGTRACE_LOG(args) +#define DHD_FWLOG(args) +#endif /* DHD_EFI && DHD_LOG_DUMP */ + +#define DHD_DBGIF(args) + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_ERROR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_IOVAR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_LOG_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#else +#define DHD_ERROR_MEM(args) DHD_ERROR(args) +#define DHD_IOVAR_MEM(args) DHD_ERROR(args) +#define DHD_LOG_MEM(args) DHD_ERROR(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_EFI */ +#define DHD_ERROR_ROAM(args) DHD_ERROR(args) +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#define DHD_ERROR_NO_HW4 DHD_INFO +#else +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#define DHD_ERROR_NO_HW4 DHD_ERROR +#endif /* CUSTOMER_HW4_DEBUG */ + +#define DHD_ERROR_ON() 0 +#define DHD_TRACE_ON() 0 +#define DHD_INFO_ON() 0 +#define DHD_DATA_ON() 0 +#define DHD_CTL_ON() 0 +#define DHD_TIMER_ON() 0 +#define DHD_HDRS_ON() 0 +#define DHD_BYTES_ON() 0 +#define DHD_INTR_ON() 0 +#define DHD_GLOM_ON() 0 +#define DHD_EVENT_ON() 0 +#define DHD_BTA_ON() 0 +#define DHD_ISCAN_ON() 0 +#define DHD_ARPOE_ON() 0 +#define DHD_REORDER_ON() 0 +#define DHD_NOCHECKDIED_ON() 0 +#define DHD_PNO_ON() 0 +#define DHD_RTT_ON() 0 +#define DHD_PKT_MON_ON() 0 +#define DHD_PKT_MON_DUMP_ON() 0 +#define DHD_MSGTRACE_ON() 0 +#define DHD_FWLOG_ON() 0 +#define DHD_DBGIF_ON() 0 +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM_ON() 0 +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#endif /* defined(BCMDBG) || defined(DHD_DEBUG) */ + +#define PRINT_RATE_LIMIT_PERIOD 5000000u /* 5s in units of us */ +#define DHD_ERROR_RLMT(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + static uint64 __err_ts = 0; \ + static uint32 __err_cnt = 0; \ + uint64 __cur_ts = 0; \ + __cur_ts = OSL_SYSUPTIME_US(); \ + if (__err_ts == 0 || (__cur_ts > __err_ts && \ + (__cur_ts - __err_ts > PRINT_RATE_LIMIT_PERIOD))) { \ + __err_ts = __cur_ts; \ + DHD_ERROR(args); \ + DHD_ERROR(("[Repeats %u times]\n", __err_cnt)); \ + __err_cnt = 0; \ + } else { \ + ++__err_cnt; \ + } \ + } \ +} while (0) + +/* even in non-BCMDBG builds, logging of dongle iovars should be available */ +#define DHD_DNGL_IOVAR_SET(args) \ + do {if (dhd_msg_level & DHD_DNGL_IOVAR_SET_VAL) printf args;} while (0) + +#ifdef BCMPERFSTATS +#define DHD_LOG(args) do {if (dhd_msg_level & DHD_LOG_VAL) bcmlog args;} while (0) +#else +#define DHD_LOG(args) +#endif + +#if defined(BCMINTERNAL) && defined(LINUX) && defined(BCMSDIO) && (defined(BCMDBG) || \ + defined(DHD_DEBUG)) +extern void dhd_blog(char *cp, int size); +#define DHD_BLOG(cp, size) do { dhd_blog(cp, size);} while (0) +#else +#define DHD_BLOG(cp, size) +#endif + +#define DHD_NONE(args) +extern int dhd_msg_level; +extern int dbgring_msg_level; +#ifdef DHD_LOG_PRINT_RATE_LIMIT +extern int log_print_threshold; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + +/* Defines msg bits */ +#include + +#endif /* _dhd_dbg_ */ diff --git a/bcmdhd.101.10.361.x/dhd_dbg_ring.c b/bcmdhd.101.10.361.x/dhd_dbg_ring.c new file mode 100755 index 0000000..d78d21e --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_dbg_ring.c @@ -0,0 +1,473 @@ +/* + * DHD debug ring API and structures - implementation + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include +#include +#include +#include +#include +#include +#include + +dhd_dbg_ring_t * +dhd_dbg_ring_alloc_init(dhd_pub_t *dhd, uint16 ring_id, + char *ring_name, uint32 ring_sz, void *allocd_buf, + bool pull_inactive) +{ + dhd_dbg_ring_t *ring = NULL; + int ret = 0; + unsigned long flags = 0; + + ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t)); + if (!ring) + goto fail; + + ret = dhd_dbg_ring_init(dhd, ring, ring_id, + (uint8 *)ring_name, ring_sz, + allocd_buf, pull_inactive); + if (ret != BCME_OK) { + DHD_ERROR(("%s: unable to init ring %s!\n", + __FUNCTION__, ring_name)); + goto fail; + } + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + ring->threshold = 0; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return ring; + +fail: + if (ring) { + dhd_dbg_ring_deinit(dhd, ring); + ring->ring_buf = NULL; + ring->ring_size = 0; + MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t)); + } + return NULL; +} + +void +dhd_dbg_ring_dealloc_deinit(void **ring_ptr, dhd_pub_t *dhd) +{ + dhd_dbg_ring_t *ring = NULL; + dhd_dbg_ring_t **dbgring = (dhd_dbg_ring_t **)ring_ptr; + + if (!dbgring) + return; + + ring = *dbgring; + + if (ring) { + dhd_dbg_ring_deinit(dhd, ring); + ring->ring_buf = NULL; + ring->ring_size = 0; + MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t)); + *dbgring = NULL; + } +} + +int +dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, + uint32 ring_sz, void *allocd_buf, bool pull_inactive) +{ + void *buf; + unsigned long flags = 0; + + if (allocd_buf == NULL) { + return BCME_NOMEM; + } else { + buf = allocd_buf; + } + + ring->lock = DHD_DBG_RING_LOCK_INIT(dhdp->osh); + + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->id = id; + strlcpy((char *)ring->name, (char *)name, sizeof(ring->name)); + ring->ring_size = ring_sz; + ring->wp = ring->rp = 0; + ring->ring_buf = buf; + ring->threshold = DBGRING_FLUSH_THRESHOLD(ring); + ring->state = RING_SUSPEND; + ring->rem_len = 0; + ring->sched_pull = TRUE; + ring->pull_inactive = pull_inactive; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return BCME_OK; +} + +void +dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring) +{ + unsigned long flags = 0; + + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->id = 0; + ring->name[0] = 0; + ring->wp = ring->rp = 0; + memset(&ring->stat, 0, sizeof(ring->stat)); + ring->threshold = 0; + ring->state = RING_STOP; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + DHD_DBG_RING_LOCK_DEINIT(dhdp->osh, ring->lock); +} + +void +dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len, + os_pullreq_t pull_fn, void *os_pvt, const int id) +{ + unsigned long flags = 0; + + DHD_DBG_RING_LOCK(ring->lock, flags); + /* if the current pending size is bigger than threshold and + * threshold is set + */ + if (ring->threshold > 0 && + (pending_len >= ring->threshold) && ring->sched_pull) { + /* + * Update the state and release the lock before calling + * the pull_fn. Do not transfer control to other layers + * with locks held. If the call back again calls into + * the same layer fro this context, can lead to deadlock. + */ + ring->sched_pull = FALSE; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + pull_fn(os_pvt, id); + } else { + DHD_DBG_RING_UNLOCK(ring->lock, flags); + } +} + +uint32 +dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring) +{ + uint32 pending_len = 0; + unsigned long flags = 0; + + DHD_DBG_RING_LOCK(ring->lock, flags); + if (ring->stat.written_bytes > ring->stat.read_bytes) { + pending_len = ring->stat.written_bytes - ring->stat.read_bytes; + } else if (ring->stat.written_bytes < ring->stat.read_bytes) { + pending_len = PENDING_LEN_MAX - ring->stat.read_bytes + ring->stat.written_bytes; + } else { + pending_len = 0; + } + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return pending_len; +} + +int +dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data) +{ + unsigned long flags; + uint32 w_len; + uint32 avail_size; + dhd_dbg_ring_entry_t *w_entry, *r_entry; + + if (!ring || !hdr || !data) { + return BCME_BADARG; + } + + DHD_DBG_RING_LOCK(ring->lock, flags); + + if (ring->state != RING_ACTIVE) { + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_OK; + } + + w_len = ENTRY_LENGTH(hdr); + + DHD_DBGIF(("%s: RING%d[%s] hdr->len=%u, w_len=%u, wp=%d, rp=%d, ring_start=0x%p;" + " ring_size=%u\n", + __FUNCTION__, ring->id, ring->name, hdr->len, w_len, ring->wp, ring->rp, + ring->ring_buf, ring->ring_size)); + + if (w_len > ring->ring_size) { + DHD_DBG_RING_UNLOCK(ring->lock, flags); + DHD_ERROR(("%s: RING%d[%s] w_len=%u, ring_size=%u," + " write size exceeds ring size !\n", + __FUNCTION__, ring->id, ring->name, w_len, ring->ring_size)); + return BCME_ERROR; + } + /* Claim the space */ + do { + avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size); + if (avail_size <= w_len) { + /* Prepare the space */ + if (ring->rp <= ring->wp) { + ring->tail_padded = TRUE; + ring->rem_len = ring->ring_size - ring->wp; + DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space," + " rp=%d, wp=%d, rem_len=%d, ring_size=%d," + " avail_size=%d, w_len=%d\n", __FUNCTION__, + ring->id, ring->name, ring->rp, ring->wp, + ring->rem_len, ring->ring_size, avail_size, + w_len)); + + /* 0 pad insufficient tail space */ + memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len); + /* If read pointer is still at the beginning, make some room */ + if (ring->rp == 0) { + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + + ring->rp); + ring->rp += ENTRY_LENGTH(r_entry); + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s: rp at 0, move by one entry length" + " (%u bytes)\n", + __FUNCTION__, (uint32)ENTRY_LENGTH(r_entry))); + } + if (ring->rp == ring->wp) { + ring->rp = 0; + } + ring->wp = 0; + DHD_DBGIF(("%s: new rp=%u, wp=%u\n", + __FUNCTION__, ring->rp, ring->wp)); + } else { + /* Not enough space for new entry, free some up */ + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + + ring->rp); + /* check bounds before incrementing read ptr */ + if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] rp points out of boundary," + "ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n", + __FUNCTION__, ring->id, ring->name, ring->wp, + ring->rp, ring->ring_size)); + ASSERT(0); + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_BUFTOOSHORT; + } + ring->rp += ENTRY_LENGTH(r_entry); + /* skip padding if there is one */ + if (ring->tail_padded && + ((ring->rp + ring->rem_len) == ring->ring_size)) { + DHD_DBGIF(("%s: RING%d[%s] Found padding," + " avail_size=%d, w_len=%d, set rp = 0\n", + __FUNCTION__, + ring->id, ring->name, avail_size, w_len)); + ring->rp = 0; + ring->tail_padded = FALSE; + ring->rem_len = 0; + } + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s: RING%d[%s] read_bytes=%d, wp=%d, rp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes, + ring->wp, ring->rp)); + } + } else { + break; + } + } while (TRUE); + + /* check before writing to the ring */ + if (ring->wp + w_len >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, " + "wp=%d, ring_size=%d, w_len=%u\n", __FUNCTION__, ring->id, + ring->name, ring->wp, ring->ring_size, w_len)); + ASSERT(0); + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_BUFTOOLONG; + } + + w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp); + /* header */ + memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE); + w_entry->len = hdr->len; + /* payload */ + memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len); + /* update write pointer */ + ring->wp += w_len; + + /* update statistics */ + ring->stat.written_records++; + ring->stat.written_bytes += w_len; + DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d," + " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name, + ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes, + ring->threshold, ring->wp, ring->rp)); + + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_OK; +} + +/* + * This function folds ring->lock, so callers of this function + * should not hold ring->lock. + */ +int +dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_header) +{ + dhd_dbg_ring_entry_t *r_entry = NULL; + uint32 rlen = 0; + char *buf = NULL; + + if (!ring || !data || buf_len <= 0) { + return 0; + } + + /* pull from ring is allowed for inactive (suspended) ring + * in case of ecounters only, this is because, for ecounters + * when a trap occurs the ring is suspended and data is then + * pulled to dump it to a file. For other rings if ring is + * not in active state return without processing (as before) + */ + if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) { + goto exit; + } + + if (ring->rp == ring->wp) { + goto exit; + } + + DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n", + __FUNCTION__, ring->id, ring->name, buf_len, ring->wp, ring->rp, + ring->ring_buf, ring->ring_size)); + + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp); + + /* Boundary Check */ + rlen = ENTRY_LENGTH(r_entry); + if ((ring->rp + rlen) > ring->ring_size) { + DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d," + " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen, + ring->ring_size, ring->id, ring->name, ring->rp)); + rlen = 0; + goto exit; + } + + if (strip_header) { + rlen = r_entry->len; + buf = (char *)r_entry + DBG_RING_ENTRY_SIZE; + } else { + rlen = ENTRY_LENGTH(r_entry); + buf = (char *)r_entry; + } + if (rlen > buf_len) { + DHD_ERROR(("%s: buf len %d is too small for entry len %d\n", + __FUNCTION__, buf_len, rlen)); + DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->ring_size, + ring->wp, ring->rp)); + ASSERT(0); + rlen = 0; + goto exit; + } + + memcpy(data, buf, rlen); + /* update ring context */ + ring->rp += ENTRY_LENGTH(r_entry); + /* don't pass wp but skip padding if there is one */ + if (ring->rp != ring->wp && + ring->tail_padded && ((ring->rp + ring->rem_len) >= ring->ring_size)) { + DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp)); + ring->rp = 0; + ring->tail_padded = FALSE; + ring->rem_len = 0; + } + if (ring->rp >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary," + " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id, + ring->name, ring->rp, ring->ring_size)); + ASSERT(0); + rlen = 0; + goto exit; + } + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__, + ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp)); + +exit: + + return rlen; +} + +int +dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr) +{ + int32 r_len, total_r_len = 0; + + if (!ring || !data) + return 0; + + if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) { + return 0; + } + + while (buf_len > 0) { + r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr); + if (r_len == 0) + break; + data = (uint8 *)data + r_len; + buf_len -= r_len; + total_r_len += r_len; + } + + return total_r_len; +} + +int +dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold) +{ + unsigned long flags = 0; + + if (!ring) + return BCME_BADADDR; + + if (ring->state == RING_STOP) + return BCME_UNSUPPORTED; + + DHD_DBG_RING_LOCK(ring->lock, flags); + + if (log_level == 0) + ring->state = RING_SUSPEND; + else + ring->state = RING_ACTIVE; + + ring->log_level = log_level; + ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring)); + + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return BCME_OK; +} + +void +dhd_dbg_ring_start(dhd_dbg_ring_t *ring) +{ + if (!ring) + return; + + /* Initialize the information for the ring */ + ring->state = RING_SUSPEND; + ring->log_level = 0; + ring->rp = ring->wp = 0; + ring->threshold = 0; + memset(&ring->stat, 0, sizeof(struct ring_statistics)); + memset(ring->ring_buf, 0, ring->ring_size); +} diff --git a/bcmdhd.101.10.361.x/dhd_dbg_ring.h b/bcmdhd.101.10.361.x/dhd_dbg_ring.h new file mode 100755 index 0000000..bab8646 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_dbg_ring.h @@ -0,0 +1,146 @@ +/* + * DHD debug ring header file - interface + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_DBG_RING_H__ +#define __DHD_DBG_RING_H__ + +#include + +#if defined(LINUX) +#define PACKED_STRUCT __attribute__ ((packed)) +#else +#define PACKED_STRUCT +#endif + +#define DBGRING_NAME_MAX 32 + +enum dbg_ring_state { + RING_STOP = 0, /* ring is not initialized */ + RING_ACTIVE, /* ring is live and logging */ + RING_SUSPEND /* ring is initialized but not logging */ +}; + +/* each entry in dbg ring has below header, to handle + * variable length records in ring + */ +typedef struct dhd_dbg_ring_entry { + uint16 len; /* payload length excluding the header */ + uint8 flags; + uint8 type; /* Per ring specific */ + uint64 timestamp; /* present if has_timestamp bit is set. */ +} PACKED_STRUCT dhd_dbg_ring_entry_t; + +struct ring_statistics { + /* number of bytes that was written to the buffer by driver */ + uint32 written_bytes; + /* number of bytes that was read from the buffer by user land */ + uint32 read_bytes; + /* number of records that was written to the buffer by driver */ + uint32 written_records; +}; + +typedef struct dhd_dbg_ring_status { + uint8 name[DBGRING_NAME_MAX]; + uint32 flags; + int ring_id; /* unique integer representing the ring */ + /* total memory size allocated for the buffer */ + uint32 ring_buffer_byte_size; + uint32 verbose_level; + /* number of bytes that was written to the buffer by driver */ + uint32 written_bytes; + /* number of bytes that was read from the buffer by user land */ + uint32 read_bytes; + /* number of records that was read from the buffer by user land */ + uint32 written_records; +} dhd_dbg_ring_status_t; + +typedef struct dhd_dbg_ring { + int id; /* ring id */ + uint8 name[DBGRING_NAME_MAX]; /* name string */ + uint32 ring_size; /* numbers of item in ring */ + uint32 wp; /* write pointer */ + uint32 rp; /* read pointer */ + uint32 rp_tmp; /* tmp read pointer */ + uint32 log_level; /* log_level */ + uint32 threshold; /* threshold bytes */ + void * ring_buf; /* pointer of actually ring buffer */ + void * lock; /* lock for ring access */ + struct ring_statistics stat; /* statistics */ + enum dbg_ring_state state; /* ring state enum */ + bool tail_padded; /* writer does not have enough space */ + uint32 rem_len; /* number of bytes from wp_pad to end */ + bool sched_pull; /* schedule reader immediately */ + bool pull_inactive; /* pull contents from ring even if it is inactive */ +} dhd_dbg_ring_t; + +#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3) +#define RING_STAT_TO_STATUS(ring, status) \ + do { \ + /* status.name/ring->name are the same length so no need to check return value */ \ + (void)memcpy_s(status.name, sizeof(status.name), ring->name, sizeof(ring->name)); \ + status.ring_id = ring->id; \ + status.ring_buffer_byte_size = ring->ring_size; \ + status.written_bytes = ring->stat.written_bytes; \ + status.written_records = ring->stat.written_records; \ + status.read_bytes = ring->stat.read_bytes; \ + status.verbose_level = ring->log_level; \ + } while (0) + +#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t)) +#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE) +#define PAYLOAD_MAX_LEN 65535 +#define PAYLOAD_ECNTR_MAX_LEN 1648u +#define PAYLOAD_RTT_MAX_LEN 1648u +#define PAYLOAD_BCM_TRACE_MAX_LEN 1648u +#define PENDING_LEN_MAX 0xFFFFFFFF +#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t)) + +#define TXACTIVESZ(r, w, d) (((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w))) +#define DBG_RING_READ_AVAIL_SPACE(w, r, d) (((w) >= (r)) ? ((w) - (r)) : ((d) - (r))) +#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d) (((w) >= (r)) ? ((d) - (w)) : ((r) - (w))) +#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d) (d - (TXACTIVESZ(r, w, d))) +#define DBG_RING_CHECK_WRITE_SPACE(r, w, d) \ + MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d)) + +typedef void (*os_pullreq_t)(void *os_priv, const int ring_id); + +dhd_dbg_ring_t *dhd_dbg_ring_alloc_init(dhd_pub_t *dhd, uint16 ring_id, + char *ring_name, uint32 ring_sz, void *allocd_buf, + bool pull_inactive); +void dhd_dbg_ring_dealloc_deinit(void **dbgring, dhd_pub_t *dhd); +int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, + uint32 ring_sz, void *allocd_buf, bool pull_inactive); +void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring); +int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data); +int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, + bool strip_hdr); +int dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, + bool strip_header); +uint32 dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring); +void dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len, + os_pullreq_t pull_fn, void *os_pvt, const int id); +int dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold); +void dhd_dbg_ring_start(dhd_dbg_ring_t *ring); +#endif /* __DHD_DBG_RING_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_debug.c b/bcmdhd.101.10.361.x/dhd_debug.c new file mode 100755 index 0000000..f8f6c92 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_debug.c @@ -0,0 +1,2853 @@ +/* + * DHD debugability support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(DHD_EVENT_LOG_FILTER) +#include +#endif /* DHD_EVENT_LOG_FILTER */ + +#if defined(DHD_EFI) || defined(NDIS) +#if !defined(offsetof) +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif /* !defined(offsetof) */ + +#define container_of(ptr, type, member) \ + (type *)((char *)(ptr) - offsetof(type, member)) +#endif /* defined(DHD_EFI ) || defined(NDIS) */ + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +uint8 control_logtrace = LOGTRACE_RAW_FMT; +#else +uint8 control_logtrace = CUSTOM_CONTROL_LOGTRACE; +#endif + +struct map_table { + uint16 fw_id; + uint16 host_id; + char *desc; +}; + +struct map_table event_map[] = { + {WLC_E_AUTH, WIFI_EVENT_AUTH_COMPLETE, "AUTH_COMPLETE"}, + {WLC_E_ASSOC, WIFI_EVENT_ASSOC_COMPLETE, "ASSOC_COMPLETE"}, + {TRACE_FW_AUTH_STARTED, WIFI_EVENT_FW_AUTH_STARTED, "AUTH STARTED"}, + {TRACE_FW_ASSOC_STARTED, WIFI_EVENT_FW_ASSOC_STARTED, "ASSOC STARTED"}, + {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_FW_RE_ASSOC_STARTED, "REASSOC STARTED"}, + {TRACE_G_SCAN_STARTED, WIFI_EVENT_G_SCAN_STARTED, "GSCAN STARTED"}, + {WLC_E_PFN_SCAN_COMPLETE, WIFI_EVENT_G_SCAN_COMPLETE, "GSCAN COMPLETE"}, + {WLC_E_DISASSOC, WIFI_EVENT_DISASSOCIATION_REQUESTED, "DIASSOC REQUESTED"}, + {WLC_E_REASSOC, WIFI_EVENT_RE_ASSOCIATION_REQUESTED, "REASSOC REQUESTED"}, + {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_REQUESTED, "ROAM REQUESTED"}, + {WLC_E_BEACON_FRAME_RX, WIFI_EVENT_BEACON_RECEIVED, "BEACON Received"}, + {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_SCAN_STARTED, "ROAM SCAN STARTED"}, + {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"}, + {TRACE_ROAM_AUTH_STARTED, WIFI_EVENT_ROAM_AUTH_STARTED, "ROAM AUTH STARTED"}, + {WLC_E_AUTH, WIFI_EVENT_ROAM_AUTH_COMPLETE, "ROAM AUTH COMPLETED"}, + {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_ROAM_ASSOC_STARTED, "ROAM ASSOC STARTED"}, + {WLC_E_ASSOC, WIFI_EVENT_ROAM_ASSOC_COMPLETE, "ROAM ASSOC COMPLETED"}, + {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"}, + {TRACE_BT_COEX_BT_SCO_START, WIFI_EVENT_BT_COEX_BT_SCO_START, "BT SCO START"}, + {TRACE_BT_COEX_BT_SCO_STOP, WIFI_EVENT_BT_COEX_BT_SCO_STOP, "BT SCO STOP"}, + {TRACE_BT_COEX_BT_SCAN_START, WIFI_EVENT_BT_COEX_BT_SCAN_START, "BT COEX SCAN START"}, + {TRACE_BT_COEX_BT_SCAN_STOP, WIFI_EVENT_BT_COEX_BT_SCAN_STOP, "BT COEX SCAN STOP"}, + {TRACE_BT_COEX_BT_HID_START, WIFI_EVENT_BT_COEX_BT_HID_START, "BT HID START"}, + {TRACE_BT_COEX_BT_HID_STOP, WIFI_EVENT_BT_COEX_BT_HID_STOP, "BT HID STOP"}, + {WLC_E_EAPOL_MSG, WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, "FW EAPOL PKT RECEIVED"}, + {TRACE_FW_EAPOL_FRAME_TRANSMIT_START, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, + "FW EAPOL PKT TRANSMITED"}, + {TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP, + "FW EAPOL PKT TX STOPPED"}, + {TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE, WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE, + "BLOCK ACK NEGO COMPLETED"}, +}; + +struct map_table event_tag_map[] = { + {TRACE_TAG_VENDOR_SPECIFIC, WIFI_TAG_VENDOR_SPECIFIC, "VENDOR SPECIFIC DATA"}, + {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"}, + {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"}, + {TRACE_TAG_SSID, WIFI_TAG_SSID, "SSID"}, + {TRACE_TAG_STATUS, WIFI_TAG_STATUS, "STATUS"}, + {TRACE_TAG_CHANNEL_SPEC, WIFI_TAG_CHANNEL_SPEC, "CHANSPEC"}, + {TRACE_TAG_WAKE_LOCK_EVENT, WIFI_TAG_WAKE_LOCK_EVENT, "WAKELOCK EVENT"}, + {TRACE_TAG_ADDR1, WIFI_TAG_ADDR1, "ADDR_1"}, + {TRACE_TAG_ADDR2, WIFI_TAG_ADDR2, "ADDR_2"}, + {TRACE_TAG_ADDR3, WIFI_TAG_ADDR3, "ADDR_3"}, + {TRACE_TAG_ADDR4, WIFI_TAG_ADDR4, "ADDR_4"}, + {TRACE_TAG_TSF, WIFI_TAG_TSF, "TSF"}, + {TRACE_TAG_IE, WIFI_TAG_IE, "802.11 IE"}, + {TRACE_TAG_INTERFACE, WIFI_TAG_INTERFACE, "INTERFACE"}, + {TRACE_TAG_REASON_CODE, WIFI_TAG_REASON_CODE, "REASON CODE"}, + {TRACE_TAG_RATE_MBPS, WIFI_TAG_RATE_MBPS, "RATE"}, +}; + +/* define log level per ring type */ +struct log_level_table fw_verbose_level_map[] = { + {1, EVENT_LOG_TAG_PCI_ERROR, "PCI_ERROR"}, +#ifndef DISABLE_PCI_LOGGING + {1, EVENT_LOG_TAG_PCI_WARN, "PCI_WARN"}, + {2, EVENT_LOG_TAG_PCI_INFO, "PCI_INFO"}, + {3, EVENT_LOG_TAG_PCI_DBG, "PCI_DEBUG"}, +#endif +#ifndef DISABLE_BEACON_LOGGING + {3, EVENT_LOG_TAG_BEACON_LOG, "BEACON_LOG"}, +#endif + {2, EVENT_LOG_TAG_WL_ASSOC_LOG, "ASSOC_LOG"}, + {2, EVENT_LOG_TAG_WL_ROAM_LOG, "ROAM_LOG"}, + {1, EVENT_LOG_TAG_TRACE_WL_INFO, "WL INFO"}, + {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, "BTCOEX INFO"}, +#ifdef DHD_RANDMAC_LOGGING + {1, EVENT_LOG_TAG_RANDMAC_ERR, "RANDMAC_ERR"}, +#endif /* DHD_RANDMAC_LOGGING */ +#ifdef CUSTOMER_HW4_DEBUG + {3, EVENT_LOG_TAG_SCAN_WARN, "SCAN_WARN"}, +#else + {1, EVENT_LOG_TAG_SCAN_WARN, "SCAN_WARN"}, +#endif /* CUSTOMER_HW4_DEBUG */ + {1, EVENT_LOG_TAG_SCAN_ERROR, "SCAN_ERROR"}, + {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, "SCAN_TRACE_LOW"}, + {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"}, +#ifdef DHD_WL_ERROR_LOGGING + {3, EVENT_LOG_TAG_WL_ERROR, "WL_ERROR"}, +#endif +#ifdef DHD_IE_ERROR_LOGGING + {3, EVENT_LOG_TAG_IE_ERROR, "IE_ERROR"}, +#endif +#ifdef DHD_ASSOC_ERROR_LOGGING + {3, EVENT_LOG_TAG_ASSOC_ERROR, "ASSOC_ERROR"}, +#endif +#ifdef DHD_PMU_ERROR_LOGGING + {3, EVENT_LOG_TAG_PMU_ERROR, "PMU_ERROR"}, +#endif +#ifdef DHD_8021X_ERROR_LOGGING + {3, EVENT_LOG_TAG_4WAYHANDSHAKE, "8021X_ERROR"}, +#endif +#ifdef DHD_AMPDU_ERROR_LOGGING + {3, EVENT_LOG_TAG_AMSDU_ERROR, "AMPDU_ERROR"}, +#endif +#ifdef DHD_SAE_ERROR_LOGGING + {3, EVENT_LOG_TAG_SAE_ERROR, "SAE_ERROR"}, +#endif +}; + +/* reference tab table */ +uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0}; + +typedef struct dhddbg_loglist_item { + dll_t list; + prcd_event_log_hdr_t prcd_log_hdr; +} loglist_item_t; + +typedef struct dhbdbg_pending_item { + dll_t list; + dhd_dbg_ring_status_t ring_status; + dhd_dbg_ring_entry_t *ring_entry; +} pending_item_t; + +/* trace log entry header user space processing */ +struct tracelog_header { + int magic_num; + int buf_size; + int seq_num; +}; +#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06 + +int +dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data) +{ + dhd_dbg_ring_t *ring; + int ret = 0; + uint32 pending_len = 0; + + if (!dhdp || !dhdp->dbg) { + return BCME_BADADDR; + } + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + ret = dhd_dbg_ring_push(ring, hdr, data); + if (ret != BCME_OK) + return ret; + + pending_len = dhd_dbg_ring_get_pending_len(ring); + dhd_dbg_ring_sched_pull(ring, pending_len, dhdp->dbg->pullreq, + dhdp->dbg->private, ring->id); + + return ret; +} + +dhd_dbg_ring_t * +dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id) +{ + if (!dhdp || !dhdp->dbg) { + return NULL; + } + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return NULL; + } + + return &dhdp->dbg->dbg_rings[ring_id]; +} + +int +dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, + bool strip_header) +{ + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) { + return 0; + } + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + return dhd_dbg_ring_pull_single(ring, data, buf_len, strip_header); +} + +int +dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len) +{ + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) + return 0; + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + ring = &dhdp->dbg->dbg_rings[ring_id]; + return dhd_dbg_ring_pull(ring, data, buf_len, FALSE); +} + +static int +dhd_dbg_msgtrace_seqchk(uint32 *prev, uint32 cur) +{ + /* normal case including wrap around */ + if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) { + goto done; + } else if (cur == *prev) { + DHD_EVENT(("%s duplicate trace\n", __FUNCTION__)); + return -1; + } else if (cur > *prev) { + DHD_EVENT(("%s lost %d packets\n", __FUNCTION__, cur - *prev)); + } else { + DHD_EVENT(("%s seq out of order, dhd %d, dongle %d\n", + __FUNCTION__, *prev, cur)); + } +done: + *prev = cur; + return 0; +} + +static void +dhd_dbg_msgtrace_msg_parser(void *event_data) +{ + msgtrace_hdr_t *hdr; + char *data, *s; + static uint32 seqnum_prev = 0; + + if (!event_data) { + DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__)); + return; + } + + hdr = (msgtrace_hdr_t *)event_data; + data = (char *)event_data + MSGTRACE_HDRLEN; + + /* There are 2 bytes available at the end of data */ + data[ntoh16(hdr->len)] = '\0'; + + if (ntoh32(hdr->discarded_bytes) || ntoh32(hdr->discarded_printf)) { + DHD_DBGIF(("WLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr->discarded_bytes), + ntoh32(hdr->discarded_printf))); + } + + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum))) + return; + + /* Display the trace buffer. Advance from + * \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + while (*data != '\0' && (s = strstr(data, "\n")) != NULL) { + *s = '\0'; + DHD_FWLOG(("[FWLOG] %s\n", data)); + data = s+1; + } + if (*data) + DHD_FWLOG(("[FWLOG] %s", data)); +} +#ifdef SHOW_LOGTRACE +#define DATA_UNIT_FOR_LOG_CNT 4 + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +int +replace_percent_p_to_x(char *fmt) +{ + int p_to_x_done = FALSE; + + while (*fmt != '\0') + { + /* Skip characters will we see a % */ + if (*fmt++ != '%') + { + continue; + } + + /* + * Skip any flags, field width and precision: + *Flags: Followed by % + * #, 0, -, ' ', + + */ + if (*fmt == '#') + fmt++; + + if (*fmt == '0' || *fmt == '-' || *fmt == '+') + fmt++; + + /* + * Field width: + * An optional decimal digit string (with non-zero first digit) + * specifying a minimum field width + */ + while (*fmt && bcm_isdigit(*fmt)) + fmt++; + + /* + * Precision: + * An optional precision, in the form of a period ('.') followed by an + * optional decimal digit string. + */ + if (*fmt == '.') + { + fmt++; + while (*fmt && bcm_isdigit(*fmt)) fmt++; + } + + /* If %p is seen, change it to %x */ + if (*fmt == 'p') + { + *fmt = 'x'; + p_to_x_done = TRUE; + } + if (*fmt) + fmt++; + } + + return p_to_x_done; +} + +/* To identify format of types %Ns where N >= 0 is a number */ +bool +check_valid_string_format(char *curr_ptr) +{ + char *next_ptr; + if ((next_ptr = bcmstrstr(curr_ptr, "s")) != NULL) { + /* Default %s format */ + if (curr_ptr == next_ptr) { + return TRUE; + } + + /* Verify each charater between '%' and 's' is a valid number */ + while (curr_ptr < next_ptr) { + if (bcm_isdigit(*curr_ptr) == FALSE) { + return FALSE; + } + curr_ptr++; + } + + return TRUE; + } else { + return FALSE; + } +} + +/* To identify format of non string format types */ +bool +check_valid_non_string_format(char *curr_ptr) +{ + char *next_ptr; + char *next_fmt_stptr; + char valid_fmt_types[17] = {'d', 'i', 'x', 'X', 'c', 'p', 'u', + 'f', 'F', 'e', 'E', 'g', 'G', 'o', + 'a', 'A', 'n'}; + int i; + bool valid = FALSE; + + /* Check for next % in the fmt str */ + next_fmt_stptr = bcmstrstr(curr_ptr, "%"); + + for (next_ptr = curr_ptr; *next_ptr != '\0'; next_ptr++) { + for (i = 0; i < (int)((sizeof(valid_fmt_types))/sizeof(valid_fmt_types[0])); i++) { + if (*next_ptr == valid_fmt_types[i]) { + /* Check whether format type found corresponds to current % + * and not the next one, if exists. + */ + if ((next_fmt_stptr == NULL) || + (next_fmt_stptr && (next_ptr < next_fmt_stptr))) { + /* Not validating for length/width fields in + * format specifier. + */ + valid = TRUE; + } + goto done; + } + } + } + +done: + return valid; +} + +#define MAX_NO_OF_ARG 16 +#define FMTSTR_SIZE 200 +#define ROMSTR_SIZE 268 +#define SIZE_LOC_STR 50 +#define LOG_PRINT_CNT_MAX 16u +#define EL_MSEC_PER_SEC 1000 +#ifdef DHD_LOG_PRINT_RATE_LIMIT +#define MAX_LOG_PRINT_COUNT 100u +#define LOG_PRINT_THRESH (1u * USEC_PER_SEC) +#endif +#define EL_PARSE_VER "V02" +static uint64 verboselog_ts_saved = 0; + +bool +dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr) +{ + event_log_extended_hdr_t *ext_log_hdr; + uint16 event_log_fmt_num; + uint8 event_log_hdr_type; + + /* Identify the type of event tag, payload type etc.. */ + event_log_hdr_type = log_hdr->fmt_num & DHD_EVENT_LOG_HDR_MASK; + event_log_fmt_num = (log_hdr->fmt_num >> DHD_EVENT_LOG_FMT_NUM_OFFSET) & + DHD_EVENT_LOG_FMT_NUM_MASK; + + switch (event_log_hdr_type) { + case DHD_OW_NB_EVENT_LOG_HDR: + prcd_log_hdr->ext_event_log_hdr = FALSE; + prcd_log_hdr->binary_payload = FALSE; + break; + case DHD_TW_NB_EVENT_LOG_HDR: + prcd_log_hdr->ext_event_log_hdr = TRUE; + prcd_log_hdr->binary_payload = FALSE; + break; + case DHD_BI_EVENT_LOG_HDR: + if (event_log_fmt_num == DHD_OW_BI_EVENT_FMT_NUM) { + prcd_log_hdr->ext_event_log_hdr = FALSE; + prcd_log_hdr->binary_payload = TRUE; + } else if (event_log_fmt_num == DHD_TW_BI_EVENT_FMT_NUM) { + prcd_log_hdr->ext_event_log_hdr = TRUE; + prcd_log_hdr->binary_payload = TRUE; + } else { + DHD_ERROR(("%s: invalid format number 0x%X\n", + __FUNCTION__, event_log_fmt_num)); + return FALSE; + } + break; + case DHD_INVALID_EVENT_LOG_HDR: + default: + DHD_ERROR(("%s: invalid event log header type 0x%X\n", + __FUNCTION__, event_log_hdr_type)); + return FALSE; + } + + /* Parse extended and legacy event log headers and populate prcd_event_log_hdr_t */ + if (prcd_log_hdr->ext_event_log_hdr) { + ext_log_hdr = (event_log_extended_hdr_t *) + ((uint8 *)log_hdr - sizeof(event_log_hdr_t)); + prcd_log_hdr->tag = ((ext_log_hdr->extended_tag & + DHD_TW_VALID_TAG_BITS_MASK) << DHD_TW_EVENT_LOG_TAG_OFFSET) | log_hdr->tag; + } else { + prcd_log_hdr->tag = log_hdr->tag; + } + prcd_log_hdr->count = log_hdr->count; + prcd_log_hdr->fmt_num_raw = log_hdr->fmt_num; + prcd_log_hdr->fmt_num = event_log_fmt_num; + + /* update arm cycle */ + /* + * For loegacy event tag :- + * |payload........|Timestamp| Tag + * + * For extended event tag:- + * |payload........|Timestamp|extended Tag| Tag. + * + */ + prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ? + *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET) : + *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_OFFSET); + + /* update event log data pointer address */ + prcd_log_hdr->log_ptr = + (uint32 *)log_hdr - log_hdr->count - prcd_log_hdr->ext_event_log_hdr; + + /* handle error cases above this */ + return TRUE; +} + +static void +dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, + void *raw_event_ptr, uint32 logset, uint16 block, uint32* data) +{ + event_log_hdr_t *ts_hdr; + uint32 *log_ptr = plog_hdr->log_ptr; + char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 }; + uint32 rom_str_len = 0; + uint32 *ts_data; + + if (!raw_event_ptr) { + return; + } + + if (log_ptr < data) { + DHD_ERROR(("Invalid log pointer, logptr : %p data : %p \n", log_ptr, data)); + return; + } + + if (log_ptr > data) { + /* Get time stamp if it's updated */ + ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t)); + if (ts_hdr->tag == EVENT_LOG_TAG_TS) { + ts_data = (uint32 *)ts_hdr - ts_hdr->count; + if (ts_data >= data) { + verboselog_ts_saved = (uint64)ts_data[0]; + DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", + ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1])); + } + } else if (ts_hdr->tag == EVENT_LOG_TAG_ENHANCED_TS) { + ets_msg_v1_t *ets; + ets = (ets_msg_v1_t *)ts_hdr - ts_hdr->count; + if ((uint32*)ets >= data && + ts_hdr->count >= (sizeof(ets_msg_v1_t) / sizeof(uint32)) && + ets->version == ENHANCED_TS_MSG_VERSION_1) { + DHD_MSGTRACE_LOG(("EVENT_LOG_ENHANCED_TS_V1: " + "SYS:%08x CPU:%08x CPUFREQ:%u\n", + ets->timestamp, ets->cyclecount, ets->cpu_freq)); + } + } + } + + if (plog_hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) { + rom_str_len = (plog_hdr->count - 1) * sizeof(uint32); + if (rom_str_len >= (ROMSTR_SIZE -1)) + rom_str_len = ROMSTR_SIZE - 1; + + /* copy all ascii data for ROM printf to local string */ + memcpy(fmtstr_loc_buf, log_ptr, rom_str_len); + /* add end of line at last */ + fmtstr_loc_buf[rom_str_len] = '\0'; + + DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", + log_ptr[plog_hdr->count - 1], fmtstr_loc_buf)); + + /* Add newline if missing */ + if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') + DHD_MSGTRACE_LOG(("\n")); + + return; + } + + if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || + plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) { + wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, plog_hdr, log_ptr); + return; + } + + /* print the message out in a logprint */ + dhd_dbg_verboselog_printf(dhdp, plog_hdr, raw_event_ptr, log_ptr, logset, block); +} + +void +dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, + void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block) +{ + dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr; + uint16 count; + int log_level, id; + char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 }; + char (*str_buf)[SIZE_LOC_STR] = NULL; + char *str_tmpptr = NULL; + uint32 addr = 0; + typedef union { + uint32 val; + char * addr; + } u_arg; + u_arg arg[MAX_NO_OF_ARG] = {{0}}; + char *c_ptr = NULL; + struct bcmstrbuf b; +#ifdef DHD_LOG_PRINT_RATE_LIMIT + static int log_print_count = 0; + static uint64 ts0 = 0; + uint64 ts1 = 0; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + + BCM_REFERENCE(arg); + +#ifdef DHD_LOG_PRINT_RATE_LIMIT + if (!ts0) + ts0 = OSL_SYSUPTIME_US(); + + ts1 = OSL_SYSUPTIME_US(); + + if (((ts1 - ts0) <= LOG_PRINT_THRESH) && (log_print_count >= MAX_LOG_PRINT_COUNT)) { + log_print_threshold = 1; + ts0 = 0; + log_print_count = 0; + DHD_ERROR(("%s: Log print water mark is reached," + " console logs are dumped only to debug_dump file\n", __FUNCTION__)); + } else if ((ts1 - ts0) > LOG_PRINT_THRESH) { + log_print_threshold = 0; + ts0 = 0; + log_print_count = 0; + } + +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + /* print the message out in a logprint */ + if ((control_logtrace == LOGTRACE_RAW_FMT) || !(raw_event->fmts)) { + if (dhdp->dbg) { + log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level; + for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) { + if ((fw_verbose_level_map[id].tag == plog_hdr->tag) && + (fw_verbose_level_map[id].log_level > log_level)) + return; + } + } + if (plog_hdr->binary_payload) { + DHD_ECNTR_LOG(("%d.%d EL:tag=%d len=%d fmt=0x%x", + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + plog_hdr->tag, + plog_hdr->count, + plog_hdr->fmt_num_raw)); + + for (count = 0; count < (plog_hdr->count - 1); count++) { + /* XXX: skip first line feed in case count 0 */ + if (count && (count % LOG_PRINT_CNT_MAX == 0)) { + DHD_ECNTR_LOG(("\n\t%08x", log_ptr[count])); + } else { + DHD_ECNTR_LOG((" %08x", log_ptr[count])); + } + } + DHD_ECNTR_LOG(("\n")); + } + else { + bcm_binit(&b, fmtstr_loc_buf, FMTSTR_SIZE); + /* XXX: The 'hdr->count - 1' is dongle time */ +#ifndef OEM_ANDROID + bcm_bprintf(&b, "%06d.%03d EL: %d 0x%x", + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + plog_hdr->tag, + plog_hdr->fmt_num_raw); +#else + bcm_bprintf(&b, "%06d.%03d EL:%s:%u:%u %d %d 0x%x", + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + EL_PARSE_VER, logset, block, + plog_hdr->tag, + plog_hdr->count, + plog_hdr->fmt_num_raw); +#endif /* !OEM_ANDROID */ + for (count = 0; count < (plog_hdr->count - 1); count++) { + bcm_bprintf(&b, " %x", log_ptr[count]); + } + + /* ensure preserve fw logs go to debug_dump only in case of customer4 */ + if (logset < dhdp->event_log_max_sets && + ((0x01u << logset) & dhdp->logset_prsrv_mask)) { + DHD_PRSRV_MEM(("%s\n", b.origbuf)); + } else { +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + DHD_FW_VERBOSE(("%s\n", b.origbuf)); +#else + DHD_FWLOG(("%s\n", b.origbuf)); +#endif +#ifdef DHD_LOG_PRINT_RATE_LIMIT + log_print_count++; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + } + } + return; + } + + str_buf = MALLOCZ(dhdp->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)); + if (!str_buf) { + DHD_ERROR(("%s: malloc failed str_buf\n", __FUNCTION__)); + return; + } + + if ((plog_hdr->fmt_num) < raw_event->num_fmts) { + if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s", + raw_event->fmts[plog_hdr->fmt_num]); + plog_hdr->count++; + } else { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E:%u:%u %06d.%03d %s", + logset, block, + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + raw_event->fmts[plog_hdr->fmt_num]); + } + c_ptr = fmtstr_loc_buf; + } else { + /* for ecounters, don't print the error as it will flood */ + if ((plog_hdr->fmt_num != DHD_OW_BI_EVENT_FMT_NUM) && + (plog_hdr->fmt_num != DHD_TW_BI_EVENT_FMT_NUM)) { + DHD_ERROR(("%s: fmt number: 0x%x out of range\n", + __FUNCTION__, plog_hdr->fmt_num)); + } else { + DHD_INFO(("%s: fmt number: 0x%x out of range\n", + __FUNCTION__, plog_hdr->fmt_num)); + } + + goto exit; + } + + if (plog_hdr->count > MAX_NO_OF_ARG) { + DHD_ERROR(("%s: plog_hdr->count(%d) out of range\n", + __FUNCTION__, plog_hdr->count)); + goto exit; + } + + /* print the format string which will be needed for debugging incorrect formats */ + DHD_INFO(("%s: fmtstr_loc_buf = %s\n", __FUNCTION__, fmtstr_loc_buf)); + + /* Replace all %p to %x to handle 32 bit %p */ + replace_percent_p_to_x(fmtstr_loc_buf); + + for (count = 0; count < (plog_hdr->count - 1); count++) { + if (c_ptr != NULL) + if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL) + c_ptr++; + + if (c_ptr != NULL) { + if (check_valid_string_format(c_ptr)) { + if ((raw_event->raw_sstr) && + ((log_ptr[count] > raw_event->rodata_start) && + (log_ptr[count] < raw_event->rodata_end))) { + /* ram static string */ + addr = log_ptr[count] - raw_event->rodata_start; + str_tmpptr = raw_event->raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, + SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else if ((raw_event->rom_raw_sstr) && + ((log_ptr[count] > + raw_event->rom_rodata_start) && + (log_ptr[count] < + raw_event->rom_rodata_end))) { + /* rom static string */ + addr = log_ptr[count] - raw_event->rom_rodata_start; + str_tmpptr = raw_event->rom_raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, + SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else { + /* + * Dynamic string OR + * No data for static string. + * So store all string's address as string. + */ + snprintf(str_buf[count], SIZE_LOC_STR, + "(s)0x%x", log_ptr[count]); + arg[count].addr = str_buf[count]; + } + } else if (check_valid_non_string_format(c_ptr)) { + /* Other than string format */ + arg[count].val = log_ptr[count]; + } else { + /* There is nothing copied after % or improper format specifier + * after current %, because of not enough buffer size for complete + * copy of original fmt string. + * This is causing error mentioned below. + * Error: "Please remove unsupported %\x00 in format string" + * error(lib/vsprintf.c:1900 format_decode+0x3bc/0x470). + * Refer to JIRA: SWWLAN-200629 for detailed info. + * + * Terminate the string at current . + */ + *(c_ptr - 1) = '\0'; + break; + } + } + } + + /* ensure preserve fw logs go to debug_dump only in case of customer4 */ + if (logset < dhdp->event_log_max_sets && + ((0x01u << logset) & dhdp->logset_prsrv_mask)) { + if (dhd_msg_level & DHD_EVENT_VAL) { + if (dhd_msg_level & DHD_PRSRV_MEM_VAL) + printk(fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15]); + } + } else { + if (dhd_msg_level & DHD_FWLOG_VAL) { + printk(fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15]); + } +#ifdef DHD_LOG_PRINT_RATE_LIMIT + log_print_count++; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + } + +exit: + MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); +} + +#if defined(EWP_BCM_TRACE) || defined(EWP_RTT_LOGGING) || \ + defined(EWP_ECNTRS_LOGGING) +static int +dhd_dbg_send_evtlog_to_ring(prcd_event_log_hdr_t *plog_hdr, + dhd_dbg_ring_entry_t *msg_hdr, dhd_dbg_ring_t *ring, + uint16 max_payload_len, uint8 *logbuf) +{ + event_log_hdr_t *log_hdr; + struct tracelog_header *logentry_header; + uint16 len_chk = 0; + + BCM_REFERENCE(log_hdr); + BCM_REFERENCE(logentry_header); + /* + * check msg hdr len before pushing. + * FW msg_hdr.len includes length of event log hdr, + * logentry header and payload. + */ + len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) + + max_payload_len); + /* account extended event log header(extended_event_log_hdr) */ + if (plog_hdr->ext_event_log_hdr) { + len_chk += sizeof(*log_hdr); + } + if (msg_hdr->len > len_chk) { + DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: " + "msg_hdr->len=%u, max allowed for %s=%u\n", + __FUNCTION__, msg_hdr->len, ring->name, len_chk)); + return BCME_ERROR; + } + dhd_dbg_ring_push(ring, msg_hdr, logbuf); + return BCME_OK; +} +#endif /* EWP_BCM_TRACE || EWP_RTT_LOGGING || EWP_ECNTRS_LOGGING */ + +void +dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present, + uint32 msgtrace_seqnum) +{ + msgtrace_hdr_t *hdr; + char *data, *tmpdata; + const uint32 log_hdr_len = sizeof(event_log_hdr_t); + uint32 log_pyld_len; + static uint32 seqnum_prev = 0; + event_log_hdr_t *log_hdr; + bool msg_processed = FALSE; + prcd_event_log_hdr_t prcd_log_hdr; + prcd_event_log_hdr_t *plog_hdr; + dll_t list_head, *cur; + loglist_item_t *log_item; + dhd_dbg_ring_entry_t msg_hdr; + char *logbuf; + struct tracelog_header *logentry_header; + uint ring_data_len = 0; + bool ecntr_pushed = FALSE; + bool rtt_pushed = FALSE; + bool bcm_trace_pushed = FALSE; + bool dll_inited = FALSE; + uint32 logset = 0; + uint16 block = 0; + bool event_log_max_sets_queried; + uint32 event_log_max_sets; + uint min_expected_len = 0; + uint16 len_chk = 0; + + BCM_REFERENCE(ecntr_pushed); + BCM_REFERENCE(rtt_pushed); + BCM_REFERENCE(bcm_trace_pushed); + BCM_REFERENCE(len_chk); + + /* store event_logset_queried and event_log_max_sets in local variables + * to avoid race conditions as they were set from different contexts(preinit) + */ + event_log_max_sets_queried = dhdp->event_log_max_sets_queried; + /* Make sure queried is read first with wmb and then max_sets, + * as it is done in reverse order during preinit ioctls. + */ + OSL_SMP_WMB(); + event_log_max_sets = dhdp->event_log_max_sets; + + if (msgtrace_hdr_present) + min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN); + else + min_expected_len = EVENT_LOG_BLOCK_LEN; + + /* log trace event consists of: + * msgtrace header + * event log block header + * event log payload + */ + if (!event_data || (datalen <= min_expected_len)) { + DHD_ERROR(("%s: Not processing due to invalid event_data : %p or length : %d\n", + __FUNCTION__, event_data, datalen)); + if (event_data && msgtrace_hdr_present) { + prhex("event_data dump", event_data, datalen); + tmpdata = (char *)event_data + MSGTRACE_HDRLEN; + if (tmpdata) { + DHD_ERROR(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n", + ltoh16(*((uint16 *)(tmpdata+2))), + ltoh32(*((uint32 *)(tmpdata + 4))), + ltoh16(*((uint16 *)(tmpdata))))); + } + } else if (!event_data) { + DHD_ERROR(("%s: event_data is NULL, cannot dump prhex\n", __FUNCTION__)); + } + + return; + } + + if (msgtrace_hdr_present) { + hdr = (msgtrace_hdr_t *)event_data; + data = (char *)event_data + MSGTRACE_HDRLEN; + datalen -= MSGTRACE_HDRLEN; + msgtrace_seqnum = ntoh32(hdr->seqnum); + } else { + data = (char *)event_data; + } + + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, msgtrace_seqnum)) + return; + + /* Save the whole message to event log ring */ + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + logbuf = VMALLOC(dhdp->osh, sizeof(*logentry_header) + datalen); + if (logbuf == NULL) + return; + logentry_header = (struct tracelog_header *)logbuf; + logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER; + logentry_header->buf_size = datalen; + logentry_header->seq_num = msgtrace_seqnum; + msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE; + + ring_data_len = datalen + sizeof(*logentry_header); + + if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) { + DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__, + ((uint)sizeof(*logentry_header) + datalen))); + goto exit; + } + + msg_hdr.len = sizeof(*logentry_header) + datalen; + memcpy(logbuf + sizeof(*logentry_header), data, datalen); + DHD_DBGIF(("%s: datalen %d %d\n", __FUNCTION__, msg_hdr.len, datalen)); +#ifndef DHD_DEBUGABILITY_LOG_DUMP_RING + dhd_dbg_push_to_ring(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf); +#endif + /* Print sequence number, originating set and length of received + * event log buffer. Refer to event log buffer structure in + * event_log.h + */ + DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n", + ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))), + ltoh16(*((uint16 *)(data))))); + + logset = ltoh32(*((uint32 *)(data + 4))); + + if (logset >= event_log_max_sets) { + DHD_ERROR(("%s logset: %d max: %d out of range queried: %d\n", + __FUNCTION__, logset, event_log_max_sets, event_log_max_sets_queried)); +#ifdef DHD_FW_COREDUMP + if (event_log_max_sets_queried) { + DHD_ERROR(("%s: collect socram for DUMP_TYPE_LOGSET_BEYOND_RANGE\n", + __FUNCTION__)); + dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_FW_COREDUMP */ + } + + block = ltoh16(*((uint16 *)(data + 2))); + + data += EVENT_LOG_BLOCK_HDRLEN; + datalen -= EVENT_LOG_BLOCK_HDRLEN; + + /* start parsing from the tail of packet + * Sameple format of a meessage + * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 + * 001d3c54 00000064 00000064 035d6d89 0c580439 + * 0x0c580439 -- 39 is tag, 04 is count, 580c is format number + * all these uint32 values comes in reverse order as group as EL data + * while decoding we can only parse from last to first + * |<- datalen ->| + * |----(payload and maybe more logs)----|event_log_hdr_t| + * data log_hdr + */ + dll_init(&list_head); + dll_inited = TRUE; + + while (datalen > log_hdr_len) { + log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len); + memset(&prcd_log_hdr, 0, sizeof(prcd_log_hdr)); + if (!dhd_dbg_process_event_log_hdr(log_hdr, &prcd_log_hdr)) { + DHD_ERROR(("%s: Error while parsing event log header\n", + __FUNCTION__)); + } + + /* skip zero padding at end of frame */ + if (prcd_log_hdr.tag == EVENT_LOG_TAG_NULL) { + datalen -= log_hdr_len; + continue; + } + /* Check argument count (for non-ecounter events only), + * any event log should contain at least + * one argument (4 bytes) for arm cycle count and up to 16 + * arguments except EVENT_LOG_TAG_STATS which could use the + * whole payload of 256 words + */ + if (prcd_log_hdr.count == 0) { + break; + } + /* Both tag_stats and proxd are binary payloads so skip + * argument count check for these. + */ + if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) && + (prcd_log_hdr.tag != EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) && + (prcd_log_hdr.tag != EVENT_LOG_TAG_ROAM_ENHANCED_LOG) && + (prcd_log_hdr.tag != EVENT_LOG_TAG_BCM_TRACE) && + (prcd_log_hdr.count > MAX_NO_OF_ARG)) { + break; + } + + log_pyld_len = (prcd_log_hdr.count + prcd_log_hdr.ext_event_log_hdr) * + DATA_UNIT_FOR_LOG_CNT; + /* log data should not cross the event data boundary */ + if ((uint32)((char *)log_hdr - data) < log_pyld_len) { + break; + } + /* skip 4 bytes time stamp packet */ + if (prcd_log_hdr.tag == EVENT_LOG_TAG_TS || + prcd_log_hdr.tag == EVENT_LOG_TAG_ENHANCED_TS) { + datalen -= (log_pyld_len + log_hdr_len); + continue; + } + if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) { + DHD_ERROR(("%s allocating log list item failed\n", + __FUNCTION__)); + break; + } + + log_item->prcd_log_hdr.tag = prcd_log_hdr.tag; + log_item->prcd_log_hdr.count = prcd_log_hdr.count; + log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num; + log_item->prcd_log_hdr.fmt_num_raw = prcd_log_hdr.fmt_num_raw; + log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle; + log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr; + log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len; + log_item->prcd_log_hdr.ext_event_log_hdr = prcd_log_hdr.ext_event_log_hdr; + log_item->prcd_log_hdr.binary_payload = prcd_log_hdr.binary_payload; + + dll_insert(&log_item->list, &list_head); + datalen -= (log_pyld_len + log_hdr_len); + } + + while (!dll_empty(&list_head)) { + msg_processed = FALSE; + cur = dll_head_p(&list_head); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list); + GCC_DIAGNOSTIC_POP(); + + plog_hdr = &log_item->prcd_log_hdr; +#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP) + /* Ecounter tag can be time_data or log_stats+binary paloaod */ + if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) || + ((plog_hdr->tag == EVENT_LOG_TAG_STATS) && + (plog_hdr->binary_payload))) { + if (!ecntr_pushed && dhd_log_dump_ecntr_enabled()) { + if (dhd_dbg_send_evtlog_to_ring(plog_hdr, &msg_hdr, + dhdp->ecntr_dbg_ring, + PAYLOAD_ECNTR_MAX_LEN, logbuf) != BCME_OK) { + goto exit; + } + ecntr_pushed = TRUE; + } + } +#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */ + + if (plog_hdr->tag == EVENT_LOG_TAG_ROAM_ENHANCED_LOG) { + print_roam_enhanced_log(plog_hdr); + msg_processed = TRUE; + } +#if defined(EWP_RTT_LOGGING) && defined(DHD_LOG_DUMP) + if ((plog_hdr->tag == EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) && + plog_hdr->binary_payload) { + if (!rtt_pushed && dhd_log_dump_rtt_enabled()) { + if (dhd_dbg_send_evtlog_to_ring(plog_hdr, &msg_hdr, + dhdp->rtt_dbg_ring, + PAYLOAD_RTT_MAX_LEN, logbuf) != BCME_OK) { + goto exit; + } + rtt_pushed = TRUE; + } + } +#endif /* EWP_RTT_LOGGING && DHD_LOG_DUMP */ + +#ifdef EWP_BCM_TRACE + if ((logset == EVENT_LOG_SET_BCM_TRACE) && !bcm_trace_pushed && + plog_hdr->binary_payload) { + if (dhd_dbg_send_evtlog_to_ring(plog_hdr, &msg_hdr, + dhdp->bcm_trace_dbg_ring, + PAYLOAD_BCM_TRACE_MAX_LEN, logbuf) != BCME_OK) { + goto exit; + } + bcm_trace_pushed = TRUE; + } +#endif /* EWP_BCM_TRACE */ + +#if defined (DHD_EVENT_LOG_FILTER) + if (plog_hdr->tag == EVENT_LOG_TAG_STATS) { + dhd_event_log_filter_event_handler(dhdp, plog_hdr, plog_hdr->log_ptr); + } +#endif /* DHD_EVENT_LOG_FILTER */ + if (!msg_processed) { + dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr, + logset, block, (uint32 *)data); + } + dll_delete(cur); + MFREE(dhdp->osh, log_item, sizeof(*log_item)); + + } + BCM_REFERENCE(log_hdr); +exit: + while (dll_inited && (!dll_empty(&list_head))) { + cur = dll_head_p(&list_head); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list); + GCC_DIAGNOSTIC_POP(); + + dll_delete(cur); + MFREE(dhdp->osh, log_item, sizeof(*log_item)); + } + + VMFREE(dhdp->osh, logbuf, ring_data_len); +} +#else /* !SHOW_LOGTRACE */ +static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, + prcd_event_log_hdr_t *plog_hdr, void *raw_event_ptr, uint32 logset, uint16 block, + uint32 *data) {}; +INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, + void *event_data, void *raw_event_ptr, uint datalen, + bool msgtrace_hdr_present, uint32 msgtrace_seqnum) {}; +#endif /* SHOW_LOGTRACE */ +void +dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen) +{ + msgtrace_hdr_t *hdr; + + hdr = (msgtrace_hdr_t *)event_data; + + if (hdr->version != MSGTRACE_VERSION) { + DHD_DBGIF(("%s unsupported MSGTRACE version, dhd %d, dongle %d\n", + __FUNCTION__, MSGTRACE_VERSION, hdr->version)); + return; + } + + if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG) + dhd_dbg_msgtrace_msg_parser(event_data); + else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG) + dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen, TRUE, 0); +} + +#ifdef BTLOG +void +dhd_dbg_bt_log_handler(dhd_pub_t *dhdp, void *data, uint datalen) +{ + dhd_dbg_ring_entry_t msg_hdr; + int ret; + + /* push to ring */ + memset(&msg_hdr, 0, sizeof(msg_hdr)); + msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE; + msg_hdr.len = datalen; + ret = dhd_dbg_push_to_ring(dhdp, BT_LOG_RING_ID, &msg_hdr, data); + if (ret != BCME_OK) { + DHD_ERROR(("%s ring push failed %d\n", __FUNCTION__, ret)); + } +} +#endif /* BTLOG */ + +/* + * dhd_dbg_set_event_log_tag : modify the state of an event log tag + */ +void +dhd_dbg_set_event_log_tag(dhd_pub_t *dhdp, uint16 tag, uint8 set) +{ + wl_el_tag_params_t pars; + char *cmd = "event_log_tag_control"; + char iovbuf[WLC_IOCTL_SMLEN] = { 0 }; + int ret; + + memset(&pars, 0, sizeof(pars)); + pars.tag = tag; + pars.set = set; + pars.flags = EVENT_LOG_TAG_FLAG_LOG; + + if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) { + DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__)); + return; + } + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +// if (ret) { +// DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret)); +// } +} + +int +dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold) +{ + dhd_dbg_ring_t *ring; + uint8 set = 1; + int i, array_len = 0; + struct log_level_table *log_level_tbl = NULL; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + dhd_dbg_ring_config(ring, log_level, threshold); + + if (log_level > 0) + set = TRUE; + + if (ring->id == FW_VERBOSE_RING_ID) { + log_level_tbl = fw_verbose_level_map; + array_len = ARRAYSIZE(fw_verbose_level_map); + } + + for (i = 0; i < array_len; i++) { + if (log_level == 0 || (log_level_tbl[i].log_level > log_level)) { + /* clear the reference per ring */ + ref_tag_tbl[log_level_tbl[i].tag] &= ~(1 << ring_id); + } else { + /* set the reference per ring */ + ref_tag_tbl[log_level_tbl[i].tag] |= (1 << ring_id); + } + set = (ref_tag_tbl[log_level_tbl[i].tag])? 1 : 0; + DHD_DBGIF(("%s TAG(%s) is %s for the ring(%s)\n", __FUNCTION__, + log_level_tbl[i].desc, (set)? "SET" : "CLEAR", ring->name)); + dhd_dbg_set_event_log_tag(dhdp, log_level_tbl[i].tag, set); + } + return BCME_OK; +} + +int +__dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *get_ring_status) +{ + dhd_dbg_ring_status_t ring_status; + int ret = BCME_OK; + + if (ring == NULL) { + return BCME_BADADDR; + } + + bzero(&ring_status, sizeof(dhd_dbg_ring_status_t)); + RING_STAT_TO_STATUS(ring, ring_status); + *get_ring_status = ring_status; + + return ret; +} + +/* +* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer +* Return: An error code or 0 on success. +*/ + +int +dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status) +{ + int ret = BCME_OK; + int id = 0; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *dbg_ring; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + dbg = dhdp->dbg; + + for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { + dbg_ring = &dbg->dbg_rings[id]; + if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) { + __dhd_dbg_get_ring_status(dbg_ring, dbg_ring_status); + break; + } + } + if (!VALID_RING(id)) { + DHD_ERROR(("%s : cannot find the ring_id : %d\n", __FUNCTION__, ring_id)); + ret = BCME_NOTFOUND; + } + return ret; +} + +#ifdef SHOW_LOGTRACE +void +dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info) +{ + dhd_dbg_ring_status_t ring_status; + uint32 rlen = 0; + + rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE); + + trace_buf_info->size = rlen; + trace_buf_info->availability = NEXT_BUF_NOT_AVAIL; + if (rlen == 0) { + trace_buf_info->availability = BUF_NOT_AVAILABLE; + return; + } + + __dhd_dbg_get_ring_status(ring, &ring_status); + + if (ring_status.written_bytes != ring_status.read_bytes) { + trace_buf_info->availability = NEXT_BUF_AVAIL; + } +} +#endif /* SHOW_LOGTRACE */ + +/* +* dhd_dbg_find_ring_id : return ring_id based on ring_name +* Return: An invalid ring id for failure or valid ring id on success. +*/ + +int +dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name) +{ + int id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + dbg = dhdp->dbg; + for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { + ring = &dbg->dbg_rings[id]; + if (!strncmp((char *)ring->name, ring_name, sizeof(ring->name) - 1)) + break; + } + return id; +} + +/* +* dhd_dbg_get_priv : get the private data of dhd dbugability module +* Return : An NULL on failure or valid data address +*/ +void * +dhd_dbg_get_priv(dhd_pub_t *dhdp) +{ + if (!dhdp || !dhdp->dbg) + return NULL; + return dhdp->dbg->private; +} + +/* +* dhd_dbg_start : start and stop All of Ring buffers +* Return: An error code or 0 on success. +*/ +int +dhd_dbg_start(dhd_pub_t *dhdp, bool start) +{ + int ret = BCME_OK; + int ring_id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *dbg_ring; + if (!dhdp) + return BCME_BADARG; + dbg = dhdp->dbg; + + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + dbg_ring = &dbg->dbg_rings[ring_id]; + if (!start) { + if (VALID_RING(dbg_ring->id)) { + dhd_dbg_ring_start(dbg_ring); + } + } + } + return ret; +} + +/* + * dhd_dbg_send_urgent_evt: send the health check evt to Upper layer + * + * Return: An error code or 0 on success. + */ + +int +dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len) +{ + dhd_dbg_t *dbg; + int ret = BCME_OK; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + dbg = dhdp->dbg; + if (dbg->urgent_notifier) { + dbg->urgent_notifier(dhdp, data, len); + } + return ret; +} + +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) +uint32 +__dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid) +{ + uint32 __pkt; + uint32 __pktid; + + __pkt = ((int)pkt) >= 0 ? (2 * pkt) : (-2 * pkt - 1); + __pktid = ((int)pktid) >= 0 ? (2 * pktid) : (-2 * pktid - 1); + + return (__pkt >= __pktid ? (__pkt * __pkt + __pkt + __pktid) : + (__pkt + __pktid * __pktid)); +} + +#define __TIMESPEC_TO_US(ts) \ + (((uint32)(ts).tv_sec * USEC_PER_SEC) + ((ts).tv_nsec / NSEC_PER_USEC)) + +uint32 +__dhd_dbg_driver_ts_usec(void) +{ + struct osl_timespec ts; + + osl_get_monotonic_boottime(&ts); + return ((uint32)(__TIMESPEC_TO_US(ts))); +} + +wifi_tx_packet_fate +__dhd_dbg_map_tx_status_to_pkt_fate(uint16 status) +{ + wifi_tx_packet_fate pkt_fate; + + switch (status) { + case WLFC_CTL_PKTFLAG_DISCARD: + pkt_fate = TX_PKT_FATE_ACKED; + break; + case WLFC_CTL_PKTFLAG_D11SUPPRESS: + /* intensional fall through */ + case WLFC_CTL_PKTFLAG_WLSUPPRESS: + pkt_fate = TX_PKT_FATE_FW_QUEUED; + break; + case WLFC_CTL_PKTFLAG_TOSSED_BYWLC: + pkt_fate = TX_PKT_FATE_FW_DROP_INVALID; + break; + case WLFC_CTL_PKTFLAG_DISCARD_NOACK: + pkt_fate = TX_PKT_FATE_SENT; + break; + case WLFC_CTL_PKTFLAG_EXPIRED: + pkt_fate = TX_PKT_FATE_FW_DROP_EXPTIME; + break; +#ifndef OEM_ANDROID + case WLFC_CTL_PKTFLAG_MKTFREE: + pkt_fate = TX_PKT_FATE_FW_PKT_FREE; + break; +#endif /* !OEM_ANDROID */ + default: + pkt_fate = TX_PKT_FATE_FW_DROP_OTHER; + break; + } + + return pkt_fate; +} +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ + +#ifdef DBG_PKT_MON +static int +__dhd_dbg_free_tx_pkts(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkts, + uint16 pkt_count) +{ + uint16 count; + + count = 0; + while ((count < pkt_count) && tx_pkts) { + if (tx_pkts->info.pkt) { + PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE); + } + tx_pkts++; + count++; + } + + return BCME_OK; +} + +static int +__dhd_dbg_free_rx_pkts(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkts, + uint16 pkt_count) +{ + uint16 count; + + count = 0; + while ((count < pkt_count) && rx_pkts) { + if (rx_pkts->info.pkt) { + PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE); + } + rx_pkts++; + count++; + } + + return BCME_OK; +} + +void +__dhd_dbg_dump_pkt_info(dhd_pub_t *dhdp, dhd_dbg_pkt_info_t *info) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("payload type = %d\n", info->payload_type)); + DHD_PKT_MON(("driver ts = %u\n", info->driver_ts)); + DHD_PKT_MON(("firmware ts = %u\n", info->firmware_ts)); + DHD_PKT_MON(("packet hash = %u\n", info->pkt_hash)); + DHD_PKT_MON(("packet length = %zu\n", info->pkt_len)); + DHD_PKT_MON(("packet address = %p\n", info->pkt)); + DHD_PKT_MON(("packet data = \n")); + if (DHD_PKT_MON_ON()) { + prhex(NULL, PKTDATA(dhdp->osh, info->pkt), info->pkt_len); + } + } +} + +void +__dhd_dbg_dump_tx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkt, + uint16 count) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("\nTX (count: %d)\n", ++count)); + DHD_PKT_MON(("packet fate = %d\n", tx_pkt->fate)); + __dhd_dbg_dump_pkt_info(dhdp, &tx_pkt->info); + } +} + +void +__dhd_dbg_dump_rx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkt, + uint16 count) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("\nRX (count: %d)\n", ++count)); + DHD_PKT_MON(("packet fate = %d\n", rx_pkt->fate)); + __dhd_dbg_dump_pkt_info(dhdp, &rx_pkt->info); + } +} + +int +dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, + dbg_mon_tx_pkts_t tx_pkt_mon, + dbg_mon_tx_status_t tx_status_mon, + dbg_mon_rx_pkts_t rx_pkt_mon) +{ + + dhd_dbg_tx_report_t *tx_report = NULL; + dhd_dbg_rx_report_t *rx_report = NULL; + dhd_dbg_tx_info_t *tx_pkts = NULL; + dhd_dbg_rx_info_t *rx_pkts = NULL; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint32 alloc_len; + int ret = BCME_OK; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_pkt_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_ATTACHED(tx_pkt_state) || PKT_MON_ATTACHED(tx_status_state) || + PKT_MON_ATTACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is already attached, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + /* return success as the intention was to initialize packet monitor */ + return BCME_OK; + } + + /* allocate and initialize tx packet monitoring */ + alloc_len = sizeof(*tx_report); + tx_report = (dhd_dbg_tx_report_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!tx_report)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_tx_report_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN); + tx_pkts = (dhd_dbg_tx_info_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!tx_pkts)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_tx_info_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + dhdp->dbg->pkt_mon.tx_report = tx_report; + dhdp->dbg->pkt_mon.tx_report->tx_pkts = tx_pkts; + dhdp->dbg->pkt_mon.tx_pkt_mon = tx_pkt_mon; + dhdp->dbg->pkt_mon.tx_status_mon = tx_status_mon; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_ATTACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_ATTACHED; + + /* allocate and initialze rx packet monitoring */ + alloc_len = sizeof(*rx_report); + rx_report = (dhd_dbg_rx_report_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!rx_report)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_rx_report_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN); + rx_pkts = (dhd_dbg_rx_info_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!rx_pkts)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_rx_info_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + dhdp->dbg->pkt_mon.rx_report = rx_report; + dhdp->dbg->pkt_mon.rx_report->rx_pkts = rx_pkts; + dhdp->dbg->pkt_mon.rx_pkt_mon = rx_pkt_mon; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__)); + return ret; + +fail: + /* tx packet monitoring */ + if (tx_pkts) { + alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN); + MFREE(dhdp->osh, tx_pkts, alloc_len); + } + if (tx_report) { + alloc_len = sizeof(*tx_report); + MFREE(dhdp->osh, tx_report, alloc_len); + } + dhdp->dbg->pkt_mon.tx_report = NULL; + dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL; + dhdp->dbg->pkt_mon.tx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.tx_status_mon = NULL; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED; + + /* rx packet monitoring */ + if (rx_pkts) { + alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN); + MFREE(dhdp->osh, rx_pkts, alloc_len); + } + if (rx_report) { + alloc_len = sizeof(*rx_report); + MFREE(dhdp->osh, rx_report, alloc_len); + } + dhdp->dbg->pkt_mon.rx_report = NULL; + dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL; + dhdp->dbg->pkt_mon.rx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__)); + return ret; +} + +int +dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTING; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTING; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTING; + + tx_report = dhdp->dbg->pkt_mon.tx_report; + rx_report = dhdp->dbg->pkt_mon.rx_report; + if (!tx_report || !rx_report) { + DHD_PKT_MON(("%s(): tx_report=%p, rx_report=%p\n", + __FUNCTION__, tx_report, rx_report)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + /* Safe to free packets as state pkt_state is STARTING */ + __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, tx_report->pkt_pos); + + __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, rx_report->pkt_pos); + + /* reset array postion */ + tx_report->pkt_pos = 0; + tx_report->status_pos = 0; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTED; + + rx_report->pkt_pos = 0; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__)); + return BCME_OK; +} + +int +dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkts; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + uint32 pkt_hash, driver_ts; + uint16 pkt_pos; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + if (PKT_MON_STARTED(tx_pkt_state)) { + tx_report = dhdp->dbg->pkt_mon.tx_report; + pkt_pos = tx_report->pkt_pos; + + if (!PKT_MON_PKT_FULL(pkt_pos)) { + tx_pkts = tx_report->tx_pkts; + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + driver_ts = __dhd_dbg_driver_ts_usec(); + + tx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt); + tx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt); + tx_pkts[pkt_pos].info.pkt_hash = pkt_hash; + tx_pkts[pkt_pos].info.driver_ts = driver_ts; + tx_pkts[pkt_pos].info.firmware_ts = 0U; + tx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II; + tx_pkts[pkt_pos].fate = TX_PKT_FATE_DRV_QUEUED; + + tx_report->pkt_pos++; + } else { + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): tx pkt logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkt; + dhd_dbg_pkt_mon_state_t tx_status_state; + wifi_tx_packet_fate pkt_fate; + uint32 pkt_hash, temp_hash; + uint16 pkt_pos, status_pos; + int16 count; + bool found = FALSE; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + if (PKT_MON_STARTED(tx_status_state)) { + tx_report = dhdp->dbg->pkt_mon.tx_report; + pkt_pos = tx_report->pkt_pos; + status_pos = tx_report->status_pos; + + if (!PKT_MON_STATUS_FULL(pkt_pos, status_pos)) { + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status); + + /* best bet (in-order tx completion) */ + count = status_pos; + tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + status_pos); + while ((count < pkt_pos) && tx_pkt) { + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->fate = pkt_fate; + tx_report->status_pos++; + found = TRUE; + break; + } + tx_pkt++; + count++; + } + + /* search until beginning (handles out-of-order completion) */ + if (!found) { + count = status_pos - 1; + tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + count); + while ((count >= 0) && tx_pkt) { + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->fate = pkt_fate; + tx_report->status_pos++; + found = TRUE; + break; + } + tx_pkt--; + count--; + } + + if (!found) { + /* still couldn't match tx_status */ + DHD_INFO(("%s(): couldn't match tx_status, pkt_pos=%u, " + "status_pos=%u, pkt_fate=%u\n", __FUNCTION__, + pkt_pos, status_pos, pkt_fate)); + } + } + } else { + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): tx_status logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt) +{ + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_rx_info_t *rx_pkts; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint32 driver_ts; + uint16 pkt_pos; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + if (PKT_MON_STARTED(rx_pkt_state)) { + rx_report = dhdp->dbg->pkt_mon.rx_report; + pkt_pos = rx_report->pkt_pos; + + if (!PKT_MON_PKT_FULL(pkt_pos)) { + rx_pkts = rx_report->rx_pkts; + driver_ts = __dhd_dbg_driver_ts_usec(); + + rx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt); + rx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt); + rx_pkts[pkt_pos].info.pkt_hash = 0U; + rx_pkts[pkt_pos].info.driver_ts = driver_ts; + rx_pkts[pkt_pos].info.firmware_ts = 0U; + rx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II; + rx_pkts[pkt_pos].fate = RX_PKT_FATE_SUCCESS; + + rx_report->pkt_pos++; + } else { + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): rx pkt logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + DHD_PKT_MON(("%s(): packet monitor stopped\n", __FUNCTION__)); + return BCME_OK; +} + +#define __COPY_TO_USER(to, from, n) \ + do { \ + int __ret; \ + __ret = copy_to_user((void __user *)(to), (void *)(from), \ + (unsigned long)(n)); \ + if (unlikely(__ret)) { \ + DHD_ERROR(("%s():%d: copy_to_user failed, ret=%d\n", \ + __FUNCTION__, __LINE__, __ret)); \ + return __ret; \ + } \ + } while (0); + +int +dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkt; + wifi_tx_report_t *ptr; + compat_wifi_tx_report_t *cptr; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + uint16 pkt_count, count; + unsigned long flags; + + BCM_REFERENCE(ptr); + BCM_REFERENCE(cptr); + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d\n", __FUNCTION__, + tx_pkt_state, tx_status_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + count = 0; + tx_report = dhdp->dbg->pkt_mon.tx_report; + tx_pkt = tx_report->tx_pkts; + pkt_count = MIN(req_count, tx_report->status_pos); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + cptr = (compat_wifi_tx_report_t *)user_buf; + while ((count < pkt_count) && tx_pkt && cptr) { + compat_wifi_tx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr); + compat_dhd_dbg_pkt_info_t compat_tx_pkt; + __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count); + __COPY_TO_USER(&comp_ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate)); + + compat_tx_pkt.payload_type = tx_pkt->info.payload_type; + compat_tx_pkt.pkt_len = tx_pkt->info.pkt_len; + compat_tx_pkt.driver_ts = tx_pkt->info.driver_ts; + compat_tx_pkt.firmware_ts = tx_pkt->info.firmware_ts; + compat_tx_pkt.pkt_hash = tx_pkt->info.pkt_hash; + __COPY_TO_USER(&comp_ptr->frame_inf.payload_type, + &compat_tx_pkt.payload_type, + OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len); + + cptr++; + tx_pkt++; + count++; + } + } else +#endif /* CONFIG_COMPAT */ + { + ptr = (wifi_tx_report_t *)user_buf; + while ((count < pkt_count) && tx_pkt && ptr) { + __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count); + __COPY_TO_USER(&ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate)); + __COPY_TO_USER(&ptr->frame_inf.payload_type, + &tx_pkt->info.payload_type, + OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len); + + ptr++; + tx_pkt++; + count++; + } + } + *resp_count = pkt_count; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + if (!pkt_count) { + DHD_ERROR(("%s(): no tx_status in tx completion messages, " + "make sure that 'd11status' is enabled in firmware, " + "status_pos=%u\n", __FUNCTION__, pkt_count)); + } + + return BCME_OK; +} + +int +dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_rx_info_t *rx_pkt; + wifi_rx_report_t *ptr; + compat_wifi_rx_report_t *cptr; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint16 pkt_count, count; + unsigned long flags; + + BCM_REFERENCE(ptr); + BCM_REFERENCE(cptr); + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + if (PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet fetch is not allowed , " + "rx_pkt_state=%d\n", __FUNCTION__, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + count = 0; + rx_report = dhdp->dbg->pkt_mon.rx_report; + rx_pkt = rx_report->rx_pkts; + pkt_count = MIN(req_count, rx_report->pkt_pos); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + cptr = (compat_wifi_rx_report_t *)user_buf; + while ((count < pkt_count) && rx_pkt && cptr) { + compat_wifi_rx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr); + compat_dhd_dbg_pkt_info_t compat_rx_pkt; + __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count); + __COPY_TO_USER(&comp_ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate)); + + compat_rx_pkt.payload_type = rx_pkt->info.payload_type; + compat_rx_pkt.pkt_len = rx_pkt->info.pkt_len; + compat_rx_pkt.driver_ts = rx_pkt->info.driver_ts; + compat_rx_pkt.firmware_ts = rx_pkt->info.firmware_ts; + compat_rx_pkt.pkt_hash = rx_pkt->info.pkt_hash; + __COPY_TO_USER(&comp_ptr->frame_inf.payload_type, + &compat_rx_pkt.payload_type, + OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len); + + cptr++; + rx_pkt++; + count++; + } + } else +#endif /* CONFIG_COMPAT */ + { + ptr = (wifi_rx_report_t *)user_buf; + while ((count < pkt_count) && rx_pkt && ptr) { + __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count); + + __COPY_TO_USER(&ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate)); + __COPY_TO_USER(&ptr->frame_inf.payload_type, + &rx_pkt->info.payload_type, + OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len); + + ptr++; + rx_pkt++; + count++; + } + } + + *resp_count = pkt_count; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + return BCME_OK; +} + +int +dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is already detached, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + tx_report = dhdp->dbg->pkt_mon.tx_report; + rx_report = dhdp->dbg->pkt_mon.rx_report; + + /* free and de-initalize tx packet monitoring */ + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED; + if (tx_report) { + if (tx_report->tx_pkts) { + __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, + tx_report->pkt_pos); + MFREE(dhdp->osh, tx_report->tx_pkts, + (sizeof(*tx_report->tx_pkts) * MAX_FATE_LOG_LEN)); + dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL; + } + MFREE(dhdp->osh, tx_report, sizeof(*tx_report)); + dhdp->dbg->pkt_mon.tx_report = NULL; + } + dhdp->dbg->pkt_mon.tx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.tx_status_mon = NULL; + + /* free and de-initalize rx packet monitoring */ + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED; + if (rx_report) { + if (rx_report->rx_pkts) { + __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, + rx_report->pkt_pos); + MFREE(dhdp->osh, rx_report->rx_pkts, + (sizeof(*rx_report->rx_pkts) * MAX_FATE_LOG_LEN)); + dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL; + } + MFREE(dhdp->osh, rx_report, sizeof(*rx_report)); + dhdp->dbg->pkt_mon.rx_report = NULL; + } + dhdp->dbg->pkt_mon.rx_pkt_mon = NULL; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__)); + return BCME_OK; +} +#endif /* DBG_PKT_MON */ + +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) +/* + * XXX: WAR: Because of the overloading by DMA marker field, + * tx_status in TX completion message cannot be used. As a WAR, + * send d11 tx_status through unused status field of PCIe + * completion header. + */ +bool +dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + bool pkt_fate = TRUE; + if (dhdp->d11_tx_status) { + pkt_fate = (status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE; + DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status); + } + return pkt_fate; +} +#else /* DBG_PKT_MON || DHD_PKT_LOGGING */ +bool +dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status) +{ + return TRUE; +} +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ + +#define EL_LOG_STR_LEN 512 + +#define PRINT_CHN_PER_LINE 8 +#define PRINT_CHAN_LINE(cnt) \ +{\ + cnt ++; \ + if (cnt >= PRINT_CHN_PER_LINE) { \ + DHD_ERROR(("%s\n", b.origbuf)); \ + bcm_binit(&b, pr_buf, EL_LOG_STR_LEN); \ + bcm_bprintf(&b, "%s: ", prefix); \ + cnt = 0; \ + } \ +} + +void print_roam_chan_list(char *prefix, uint chan_num, uint16 band_2g, + uint16 uni2a, uint8 uni3, uint8 *uni2c) +{ + struct bcmstrbuf b; + char pr_buf[EL_LOG_STR_LEN] = { 0 }; + int cnt = 0; + int idx, idx2; + + bcm_binit(&b, pr_buf, EL_LOG_STR_LEN); + bcm_bprintf(&b, "%s: count(%d)", prefix, chan_num); + /* 2G channnels */ + for (idx = 0; idx < NBITS(uint16); idx++) { + if (BCM_BIT(idx) & band_2g) { + bcm_bprintf(&b, " %d", idx); + PRINT_CHAN_LINE(cnt); + + } + } + + /* 5G UNII BAND 1, UNII BAND 2A */ + for (idx = 0; idx < NBITS(uint16); idx++) { + if (BCM_BIT(idx) & uni2a) { + bcm_bprintf(&b, " %u", ROAM_CHN_UNI_2A + idx * ROAM_CHN_SPACE); + PRINT_CHAN_LINE(cnt); + } + } + + /* 5G UNII BAND 2C */ + for (idx2 = 0; idx2 < 3; idx2++) { + for (idx = 0; idx < NBITS(uint8); idx++) { + if (BCM_BIT(idx) & uni2c[idx2]) { + bcm_bprintf(&b, " %u", ROAM_CHN_UNI_2C + + idx2 * ROAM_CHN_SPACE * NBITS(uint8) + + idx * ROAM_CHN_SPACE); + PRINT_CHAN_LINE(cnt); + } + } + } + + /* 5G UNII BAND 3 */ + for (idx = 0; idx < NBITS(uint8); idx++) { + if (BCM_BIT(idx) & uni3) { + bcm_bprintf(&b, " %u", ROAM_CHN_UNI_3 + idx * ROAM_CHN_SPACE); + PRINT_CHAN_LINE(cnt); + } + } + + if (cnt != 0) { + DHD_ERROR(("%s\n", b.origbuf)); + } +} + +void pr_roam_scan_start_v1(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_scan_cmpl_v1(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_cmpl_v1(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_nbr_req_v1(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_nbr_rep_v1(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_bcn_req_v1(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_bcn_rep_v1(prcd_event_log_hdr_t *plog_hdr); + +void pr_roam_scan_start_v2(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_scan_cmpl_v2(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_nbr_rep_v2(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_bcn_rep_v2(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_btm_rep_v2(prcd_event_log_hdr_t *plog_hdr); + +void pr_roam_bcn_req_v3(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_bcn_rep_v3(prcd_event_log_hdr_t *plog_hdr); +void pr_roam_btm_rep_v3(prcd_event_log_hdr_t *plog_hdr); + +static const pr_roam_tbl_t roam_log_print_tbl[] = +{ + {ROAM_LOG_VER_1, ROAM_LOG_SCANSTART, pr_roam_scan_start_v1}, + {ROAM_LOG_VER_1, ROAM_LOG_SCAN_CMPLT, pr_roam_scan_cmpl_v1}, + {ROAM_LOG_VER_1, ROAM_LOG_ROAM_CMPLT, pr_roam_cmpl_v1}, + {ROAM_LOG_VER_1, ROAM_LOG_NBR_REQ, pr_roam_nbr_req_v1}, + {ROAM_LOG_VER_1, ROAM_LOG_NBR_REP, pr_roam_nbr_rep_v1}, + {ROAM_LOG_VER_1, ROAM_LOG_BCN_REQ, pr_roam_bcn_req_v1}, + {ROAM_LOG_VER_1, ROAM_LOG_BCN_REP, pr_roam_bcn_rep_v1}, + + {ROAM_LOG_VER_2, ROAM_LOG_SCANSTART, pr_roam_scan_start_v2}, + {ROAM_LOG_VER_2, ROAM_LOG_SCAN_CMPLT, pr_roam_scan_cmpl_v2}, + {ROAM_LOG_VER_2, ROAM_LOG_ROAM_CMPLT, pr_roam_cmpl_v1}, + {ROAM_LOG_VER_2, ROAM_LOG_NBR_REQ, pr_roam_nbr_req_v1}, + {ROAM_LOG_VER_2, ROAM_LOG_NBR_REP, pr_roam_nbr_rep_v2}, + {ROAM_LOG_VER_2, ROAM_LOG_BCN_REQ, pr_roam_bcn_req_v1}, + {ROAM_LOG_VER_2, ROAM_LOG_BCN_REP, pr_roam_bcn_rep_v2}, + {ROAM_LOG_VER_2, ROAM_LOG_BTM_REP, pr_roam_btm_rep_v2}, + + {ROAM_LOG_VER_3, ROAM_LOG_SCANSTART, pr_roam_scan_start_v2}, + {ROAM_LOG_VER_3, ROAM_LOG_SCAN_CMPLT, pr_roam_scan_cmpl_v2}, + {ROAM_LOG_VER_3, ROAM_LOG_ROAM_CMPLT, pr_roam_cmpl_v1}, + {ROAM_LOG_VER_3, ROAM_LOG_NBR_REQ, pr_roam_nbr_req_v1}, + {ROAM_LOG_VER_3, ROAM_LOG_NBR_REP, pr_roam_nbr_rep_v2}, + {ROAM_LOG_VER_3, ROAM_LOG_BCN_REQ, pr_roam_bcn_req_v3}, + {ROAM_LOG_VER_3, ROAM_LOG_BCN_REP, pr_roam_bcn_rep_v3}, + {ROAM_LOG_VER_3, ROAM_LOG_BTM_REP, pr_roam_btm_rep_v3}, + + {0, PRSV_PERIODIC_ID_MAX, NULL} + +}; + +void pr_roam_scan_start_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_trig_v1_t *log = (roam_log_trig_v1_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_SCANSTART time: %d," + " version:%d reason: %d rssi:%d cu:%d result:%d\n", + plog_hdr->armcycle, log->hdr.version, log->reason, + log->rssi, log->current_cu, log->result)); + if (log->reason == WLC_E_REASON_DEAUTH || + log->reason == WLC_E_REASON_DISASSOC) { + DHD_ERROR_ROAM((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n", + log->prt_roam.rcvd_reason)); + } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) { + DHD_ERROR_ROAM((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d " + "duration disassoc:%d valid:%d term:%d\n", + log->bss_trans.req_mode, log->bss_trans.nbrlist_size, + log->bss_trans.token, log->bss_trans.disassoc_dur, + log->bss_trans.validity_dur, log->bss_trans.bss_term_dur)); + } +} + +void pr_roam_scan_cmpl_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_scan_cmplt_v1_t *log = (roam_log_scan_cmplt_v1_t *)plog_hdr->log_ptr; + char chanspec_buf[CHANSPEC_STR_LEN]; + int i; + + DHD_ERROR_ROAM(("ROAM_LOG_SCAN_CMPL: time:%d version:%d" + "is_full:%d scan_count:%d score_delta:%d", + plog_hdr->armcycle, log->hdr.version, log->full_scan, + log->scan_count, log->score_delta)); + DHD_ERROR_ROAM((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n", + MAC2STRDBG((uint8 *)&log->cur_info.addr), + log->cur_info.rssi, + log->cur_info.score, + wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf))); + for (i = 0; i < log->scan_list_size; i++) { + DHD_ERROR_ROAM((" ROAM_LOG_CANDIDATE %d: " MACDBG + "rssi:%d score:%d channel:%s TPUT:%dkbps\n", + i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr), + log->scan_list[i].rssi, log->scan_list[i].score, + wf_chspec_ntoa_ex(log->scan_list[i].chanspec, + chanspec_buf), + log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT? + log->scan_list[i].estm_tput:0)); + } +} + +void pr_roam_cmpl_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_cmplt_v1_t *log = (roam_log_cmplt_v1_t *)plog_hdr->log_ptr; + char chanspec_buf[CHANSPEC_STR_LEN]; + + DHD_ERROR_ROAM(("ROAM_LOG_ROAM_CMPL: time: %d, version:%d" + "status: %d reason: %d channel:%s retry:%d " MACDBG "\n", + plog_hdr->armcycle, log->hdr.version, log->status, log->reason, + wf_chspec_ntoa_ex(log->chanspec, chanspec_buf), + log->retry, MAC2STRDBG((uint8 *)&log->addr))); +} + +void pr_roam_nbr_req_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_nbrreq_v1_t *log = (roam_log_nbrreq_v1_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_NBR_REQ: time: %d, version:%d token:%d\n", + plog_hdr->armcycle, log->hdr.version, log->token)); +} + +void pr_roam_nbr_rep_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_nbrrep_v1_t *log = (roam_log_nbrrep_v1_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n", + plog_hdr->armcycle, log->hdr.version, log->channel_num)); +} + +void pr_roam_bcn_req_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_bcnrpt_req_v1_t *log = (roam_log_bcnrpt_req_v1_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d" + "class:%d num_chan:%d ", + plog_hdr->armcycle, log->hdr.version, + log->result, log->reg, log->channel)); + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d" + "ssid_len:%d\n", log->mode, log->bssid_wild, + log->duration, log->ssid_len)); +} + +void pr_roam_bcn_rep_v1(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_bcnrpt_rep_v1_t *log = (roam_log_bcnrpt_rep_v1_t *)plog_hdr->log_ptr; + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d\n", + plog_hdr->armcycle, log->hdr.version, + log->count)); +} + +void pr_roam_scan_start_v2(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_trig_v2_t *log = (roam_log_trig_v2_t *)plog_hdr->log_ptr; + DHD_ERROR_ROAM(("ROAM_LOG_SCANSTART time: %d," + " version:%d reason: %d rssi:%d cu:%d result:%d full_scan:%d\n", + plog_hdr->armcycle, log->hdr.version, log->reason, + log->rssi, log->current_cu, log->result, + log->result?(-1):log->full_scan)); + if (log->reason == WLC_E_REASON_DEAUTH || + log->reason == WLC_E_REASON_DISASSOC) { + DHD_ERROR_ROAM((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n", + log->prt_roam.rcvd_reason)); + } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) { + DHD_ERROR_ROAM((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d " + "duration disassoc:%d valid:%d term:%d\n", + log->bss_trans.req_mode, log->bss_trans.nbrlist_size, + log->bss_trans.token, log->bss_trans.disassoc_dur, + log->bss_trans.validity_dur, log->bss_trans.bss_term_dur)); + } else if (log->reason == WLC_E_REASON_LOW_RSSI) { + DHD_ERROR_ROAM((" ROAM_LOG_LOW_RSSI: threshold:%d\n", + log->low_rssi.rssi_threshold)); + } +} + +void pr_roam_scan_cmpl_v2(prcd_event_log_hdr_t *plog_hdr) +{ + int i; + roam_log_scan_cmplt_v2_t *log = (roam_log_scan_cmplt_v2_t *)plog_hdr->log_ptr; + char chanspec_buf[CHANSPEC_STR_LEN]; + + DHD_ERROR_ROAM(("ROAM_LOG_SCAN_CMPL: time:%d version:%d" + "scan_count:%d score_delta:%d", + plog_hdr->armcycle, log->hdr.version, + log->scan_count, log->score_delta)); + DHD_ERROR_ROAM((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n", + MAC2STRDBG((uint8 *)&log->cur_info.addr), + log->cur_info.rssi, + log->cur_info.score, + wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf))); + for (i = 0; i < log->scan_list_size; i++) { + DHD_ERROR_ROAM((" ROAM_LOG_CANDIDATE %d: " MACDBG + "rssi:%d score:%d cu :%d channel:%s TPUT:%dkbps\n", + i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr), + log->scan_list[i].rssi, log->scan_list[i].score, + log->scan_list[i].cu * 100 / WL_MAX_CHANNEL_USAGE, + wf_chspec_ntoa_ex(log->scan_list[i].chanspec, + chanspec_buf), + log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT? + log->scan_list[i].estm_tput:0)); + } + if (log->chan_num != 0) { + print_roam_chan_list("ROAM_LOG_SCAN_CHANLIST", log->chan_num, + log->band2g_chan_list, log->uni2a_chan_list, + log->uni3_chan_list, log->uni2c_chan_list); + } + +} + +void pr_roam_nbr_rep_v2(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_nbrrep_v2_t *log = (roam_log_nbrrep_v2_t *)plog_hdr->log_ptr; + DHD_ERROR_ROAM(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n", + plog_hdr->armcycle, log->hdr.version, log->channel_num)); + if (log->channel_num != 0) { + print_roam_chan_list("ROAM_LOG_NBR_REP_CHANLIST", log->channel_num, + log->band2g_chan_list, log->uni2a_chan_list, + log->uni3_chan_list, log->uni2c_chan_list); + } +} + +void pr_roam_bcn_rep_v2(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_bcnrpt_rep_v2_t *log = (roam_log_bcnrpt_rep_v2_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d mode:%d\n", + plog_hdr->armcycle, log->hdr.version, + log->count, log->reason)); +} + +void pr_roam_btm_rep_v2(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_btm_rep_v2_t *log = (roam_log_btm_rep_v2_t *)plog_hdr->log_ptr; + DHD_ERROR_ROAM(("ROAM_LOG_BTM_REP: time:%d version:%d req_mode:%d " + "status:%d ret:%d\n", + plog_hdr->armcycle, log->hdr.version, + log->req_mode, log->status, log->result)); +} + +void pr_roam_bcn_req_v3(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_bcnrpt_req_v3_t *log = (roam_log_bcnrpt_req_v3_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d" + "class:%d %s ", + plog_hdr->armcycle, log->hdr.version, + log->result, log->reg, log->channel?"":"all_chan")); + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d" + "ssid_len:%d\n", log->mode, log->bssid_wild, + log->duration, log->ssid_len)); + if (log->channel_num != 0) { + print_roam_chan_list("ROAM_LOG_BCNREQ_SCAN_CHANLIST", log->channel_num, + log->band2g_chan_list, log->uni2a_chan_list, + log->uni3_chan_list, log->uni2c_chan_list); + } +} + +static const char* +pr_roam_bcn_rep_reason(uint16 reason_detail) +{ + static const char* reason_tbl[] = { + "BCNRPT_RSN_SUCCESS", + "BCNRPT_RSN_BADARG", + "BCNRPT_RSN_SCAN_ING", + "BCNRPT_RSN_SCAN_FAIL", + "UNKNOWN" + }; + + if (reason_detail >= ARRAYSIZE(reason_tbl)) { + DHD_ERROR_ROAM(("UNKNOWN Reason:%u\n", reason_detail)); + ASSERT(0); + reason_detail = ARRAYSIZE(reason_tbl) - 1; + + } + return reason_tbl[reason_detail]; +} + +void pr_roam_bcn_rep_v3(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_bcnrpt_rep_v3_t *log = (roam_log_bcnrpt_rep_v3_t *)plog_hdr->log_ptr; + + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d mode:%d\n", + plog_hdr->armcycle, log->hdr.version, + log->count, log->reason)); + DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: mode reason(%d):%s scan_stus:%u duration:%u\n", + log->reason_detail, pr_roam_bcn_rep_reason(log->reason_detail), + (log->reason_detail == BCNRPT_RSN_SCAN_FAIL)? log->scan_status:0, + log->duration)); +} + +void pr_roam_btm_rep_v3(prcd_event_log_hdr_t *plog_hdr) +{ + roam_log_btm_rep_v3_t *log = (roam_log_btm_rep_v3_t *)plog_hdr->log_ptr; + DHD_ERROR_ROAM(("ROAM_LOG_BTM_REP: time:%d version:%d req_mode:%d " + "status:%d ret:%d target:" MACDBG "\n", + plog_hdr->armcycle, log->hdr.version, + log->req_mode, log->status, log->result, + MAC2STRDBG((uint8 *)&log->target_addr))); +} + +void +print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr) +{ + prsv_periodic_log_hdr_t *hdr = (prsv_periodic_log_hdr_t *)plog_hdr->log_ptr; + uint32 *ptr = (uint32 *)plog_hdr->log_ptr; + int i; + int loop_cnt = hdr->length / sizeof(uint32); + struct bcmstrbuf b; + char pr_buf[EL_LOG_STR_LEN] = { 0 }; + const pr_roam_tbl_t *cur_elem = &roam_log_print_tbl[0]; + + while (cur_elem && cur_elem->pr_func) { + if (hdr->version == cur_elem->version && + hdr->id == cur_elem->id) { + cur_elem->pr_func(plog_hdr); + return; + } + cur_elem++; + } + + bcm_binit(&b, pr_buf, EL_LOG_STR_LEN); + bcm_bprintf(&b, "ROAM_LOG_UNKNOWN ID:%d ver:%d armcycle:%d", + hdr->id, hdr->version, plog_hdr->armcycle); + for (i = 0; i < loop_cnt && b.size > 0; i++) { + bcm_bprintf(&b, " %x", *ptr); + ptr++; + } + DHD_ERROR_ROAM(("%s\n", b.origbuf)); +} + +/* + * dhd_dbg_attach: initialziation of dhd dbugability module + * + * Return: An error code or 0 on success. + */ +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +struct dhd_dbg_ring_buf g_ring_buf; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ +int +dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, + dbg_urgent_noti_t os_urgent_notifier, void *os_priv) +{ + dhd_dbg_t *dbg = NULL; + dhd_dbg_ring_t *ring = NULL; + int ret = BCME_ERROR, ring_id = 0; + void *buf = NULL; +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + struct dhd_dbg_ring_buf *ring_buf; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + + dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t)); + if (!dbg) + return BCME_NOMEM; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE); +#else + buf = MALLOCZ(dhdp->osh, FW_VERBOSE_RING_SIZE); +#endif + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID, + (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf, FALSE); + if (ret) + goto error; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE); +#else + buf = MALLOCZ(dhdp->osh, DHD_EVENT_RING_SIZE); +#endif + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID, + (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf, FALSE); + if (ret) + goto error; + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + buf = MALLOCZ(dhdp->osh, DRIVER_LOG_RING_SIZE); + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DRIVER_LOG_RING_ID], DRIVER_LOG_RING_ID, + (uint8 *)DRIVER_LOG_RING_NAME, DRIVER_LOG_RING_SIZE, buf, FALSE); + if (ret) + goto error; + + buf = MALLOCZ(dhdp->osh, ROAM_STATS_RING_SIZE); + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[ROAM_STATS_RING_ID], ROAM_STATS_RING_ID, + (uint8 *)ROAM_STATS_RING_NAME, ROAM_STATS_RING_SIZE, buf, FALSE); + if (ret) + goto error; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ +#ifdef BTLOG + buf = MALLOCZ(dhdp->osh, BT_LOG_RING_SIZE); + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[BT_LOG_RING_ID], BT_LOG_RING_ID, + BT_LOG_RING_NAME, BT_LOG_RING_SIZE, buf, FALSE); + if (ret) + goto error; +#endif /* BTLOG */ + + dbg->private = os_priv; + dbg->pullreq = os_pullreq; + dbg->urgent_notifier = os_urgent_notifier; + dhdp->dbg = dbg; +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + ring_buf = &g_ring_buf; + ring_buf->dhd_pub = dhdp; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + return BCME_OK; + +error: + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + if (VALID_RING(dbg->dbg_rings[ring_id].id)) { + ring = &dbg->dbg_rings[ring_id]; + dhd_dbg_ring_deinit(dhdp, ring); + if (ring->ring_buf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhdp->osh, ring->ring_buf, ring->ring_size); +#endif + ring->ring_buf = NULL; + } + ring->ring_size = 0; + } + } + MFREE(dhdp->osh, dbg, sizeof(dhd_dbg_t)); +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + ring_buf = &g_ring_buf; + ring_buf->dhd_pub = NULL; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + + return ret; +} + +/* + * dhd_dbg_detach: clean up dhd dbugability module + */ +void +dhd_dbg_detach(dhd_pub_t *dhdp) +{ + int ring_id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *ring = NULL; +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + struct dhd_dbg_ring_buf *ring_buf; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + + if (!dhdp->dbg) + return; + + dbg = dhdp->dbg; + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + if (VALID_RING(dbg->dbg_rings[ring_id].id)) { + ring = &dbg->dbg_rings[ring_id]; + dhd_dbg_ring_deinit(dhdp, ring); + if (ring->ring_buf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhdp->osh, ring->ring_buf, ring->ring_size); +#endif + ring->ring_buf = NULL; + } + ring->ring_size = 0; + } + } + MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t)); +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + ring_buf = &g_ring_buf; + ring_buf->dhd_pub = NULL; +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ +} + +uint32 +dhd_dbg_get_fwverbose(dhd_pub_t *dhdp) +{ + if (dhdp && dhdp->dbg) { + return dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level; + } + return 0; +} + +void +dhd_dbg_set_fwverbose(dhd_pub_t *dhdp, uint32 new_val) +{ + + if (dhdp && dhdp->dbg) { + dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level = new_val; + } +} diff --git a/bcmdhd.101.10.361.x/dhd_debug.h b/bcmdhd.101.10.361.x/dhd_debug.h new file mode 100755 index 0000000..08ba4a5 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_debug.h @@ -0,0 +1,891 @@ +/* + * DHD debugability header file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dhd_debug_h_ +#define _dhd_debug_h_ +#include +#include +#include + +enum { + /* Feature set */ + DBG_MEMORY_DUMP_SUPPORTED = (1 << (0)), /* Memory dump of FW */ + DBG_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), /* PKT Status */ + DBG_CONNECT_EVENT_SUPPORTED = (1 << (2)), /* Connectivity Event */ + DBG_POWER_EVENT_SUPOORTED = (1 << (3)), /* POWER of Driver */ + DBG_WAKE_LOCK_SUPPORTED = (1 << (4)), /* WAKE LOCK of Driver */ + DBG_VERBOSE_LOG_SUPPORTED = (1 << (5)), /* verbose log of FW */ + DBG_HEALTH_CHECK_SUPPORTED = (1 << (6)), /* monitor the health of FW */ + DBG_DRIVER_DUMP_SUPPORTED = (1 << (7)), /* dumps driver state */ + DBG_PACKET_FATE_SUPPORTED = (1 << (8)), /* tracks connection packets' fate */ + DBG_NAN_EVENT_SUPPORTED = (1 << (9)), /* NAN Events */ +}; + +enum { + /* set for binary entries */ + DBG_RING_ENTRY_FLAGS_HAS_BINARY = (1 << (0)), + /* set if 64 bits timestamp is present */ + DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1)) +}; + +/* firmware verbose ring, ring id 1 */ +#define FW_VERBOSE_RING_NAME "fw_verbose" +#define FW_VERBOSE_RING_SIZE (256 * 1024) +/* firmware event ring, ring id 2 */ +#define FW_EVENT_RING_NAME "fw_event" +#define FW_EVENT_RING_SIZE (64 * 1024) +/* DHD connection event ring, ring id 3 */ +#define DHD_EVENT_RING_NAME "dhd_event" +#define DHD_EVENT_RING_SIZE (64 * 1024) +/* NAN event ring, ring id 4 */ +#define NAN_EVENT_RING_NAME "nan_event" +#define NAN_EVENT_RING_SIZE (64 * 1024) + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +/* DHD driver log ring */ +#define DRIVER_LOG_RING_NAME "driver_log" +#define DRIVER_LOG_RING_SIZE (256 * 1024) +/* ROAM stats log ring */ +#define ROAM_STATS_RING_NAME "roam_stats" +#define ROAM_STATS_RING_SIZE (64 * 1024) +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + +#ifdef BTLOG +/* BT log ring, ring id 5 */ +#define BT_LOG_RING_NAME "bt_log" +#define BT_LOG_RING_SIZE (64 * 1024) +#endif /* BTLOG */ + +#define TLV_LOG_SIZE(tlv) ((tlv) ? (sizeof(tlv_log) + (tlv)->len) : 0) + +#define TLV_LOG_NEXT(tlv) \ + ((tlv) ? ((tlv_log *)((uint8 *)tlv + TLV_LOG_SIZE(tlv))) : 0) + +#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t)) + +#define VALID_RING(id) \ + ((id > DEBUG_RING_ID_INVALID) && (id < DEBUG_RING_ID_MAX)) + +#ifdef DEBUGABILITY +#define DBG_RING_ACTIVE(dhdp, ring_id) \ + ((dhdp)->dbg->dbg_rings[(ring_id)].state == RING_ACTIVE) +#else +#define DBG_RING_ACTIVE(dhdp, ring_id) 0 +#endif /* DEBUGABILITY */ + +enum { + /* driver receive association command from kernel */ + WIFI_EVENT_ASSOCIATION_REQUESTED = 0, + WIFI_EVENT_AUTH_COMPLETE, + WIFI_EVENT_ASSOC_COMPLETE, + /* received firmware event indicating auth frames are sent */ + WIFI_EVENT_FW_AUTH_STARTED, + /* received firmware event indicating assoc frames are sent */ + WIFI_EVENT_FW_ASSOC_STARTED, + /* received firmware event indicating reassoc frames are sent */ + WIFI_EVENT_FW_RE_ASSOC_STARTED, + WIFI_EVENT_DRIVER_SCAN_REQUESTED, + WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, + WIFI_EVENT_DRIVER_SCAN_COMPLETE, + WIFI_EVENT_G_SCAN_STARTED, + WIFI_EVENT_G_SCAN_COMPLETE, + WIFI_EVENT_DISASSOCIATION_REQUESTED, + WIFI_EVENT_RE_ASSOCIATION_REQUESTED, + WIFI_EVENT_ROAM_REQUESTED, + /* received beacon from AP (event enabled only in verbose mode) */ + WIFI_EVENT_BEACON_RECEIVED, + /* firmware has triggered a roam scan (not g-scan) */ + WIFI_EVENT_ROAM_SCAN_STARTED, + /* firmware has completed a roam scan (not g-scan) */ + WIFI_EVENT_ROAM_SCAN_COMPLETE, + /* firmware has started searching for roam candidates (with reason =xx) */ + WIFI_EVENT_ROAM_SEARCH_STARTED, + /* firmware has stopped searching for roam candidates (with reason =xx) */ + WIFI_EVENT_ROAM_SEARCH_STOPPED, + WIFI_EVENT_UNUSED_0, + /* received channel switch anouncement from AP */ + WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT, + /* fw start transmit eapol frame, with EAPOL index 1-4 */ + WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, + /* fw gives up eapol frame, with rate, success/failure and number retries */ + WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP, + /* kernel queue EAPOL for transmission in driver with EAPOL index 1-4 */ + WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, + /* with rate, regardless of the fact that EAPOL frame is accepted or + * rejected by firmware + */ + WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, + WIFI_EVENT_UNUSED_1, + /* with rate, and eapol index, driver has received */ + /* EAPOL frame and will queue it up to wpa_supplicant */ + WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, + /* with success/failure, parameters */ + WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE, + WIFI_EVENT_BT_COEX_BT_SCO_START, + WIFI_EVENT_BT_COEX_BT_SCO_STOP, + /* for paging/scan etc..., when BT starts transmiting twice per BT slot */ + WIFI_EVENT_BT_COEX_BT_SCAN_START, + WIFI_EVENT_BT_COEX_BT_SCAN_STOP, + WIFI_EVENT_BT_COEX_BT_HID_START, + WIFI_EVENT_BT_COEX_BT_HID_STOP, + /* firmware sends auth frame in roaming to next candidate */ + WIFI_EVENT_ROAM_AUTH_STARTED, + /* firmware receive auth confirm from ap */ + WIFI_EVENT_ROAM_AUTH_COMPLETE, + /* firmware sends assoc/reassoc frame in */ + WIFI_EVENT_ROAM_ASSOC_STARTED, + /* firmware receive assoc/reassoc confirm from ap */ + WIFI_EVENT_ROAM_ASSOC_COMPLETE, + /* firmware sends stop G_SCAN */ + WIFI_EVENT_G_SCAN_STOP, + /* firmware indicates G_SCAN scan cycle started */ + WIFI_EVENT_G_SCAN_CYCLE_STARTED, + /* firmware indicates G_SCAN scan cycle completed */ + WIFI_EVENT_G_SCAN_CYCLE_COMPLETED, + /* firmware indicates G_SCAN scan start for a particular bucket */ + WIFI_EVENT_G_SCAN_BUCKET_STARTED, + /* firmware indicates G_SCAN scan completed for particular bucket */ + WIFI_EVENT_G_SCAN_BUCKET_COMPLETED, + /* Event received from firmware about G_SCAN scan results being available */ + WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE, + /* Event received from firmware with G_SCAN capabilities */ + WIFI_EVENT_G_SCAN_CAPABILITIES, + /* Event received from firmware when eligible candidate is found */ + WIFI_EVENT_ROAM_CANDIDATE_FOUND, + /* Event received from firmware when roam scan configuration gets enabled or disabled */ + WIFI_EVENT_ROAM_SCAN_CONFIG, + /* firmware/driver timed out authentication */ + WIFI_EVENT_AUTH_TIMEOUT, + /* firmware/driver timed out association */ + WIFI_EVENT_ASSOC_TIMEOUT, + /* firmware/driver encountered allocation failure */ + WIFI_EVENT_MEM_ALLOC_FAILURE, + /* driver added a PNO network in firmware */ + WIFI_EVENT_DRIVER_PNO_ADD, + /* driver removed a PNO network in firmware */ + WIFI_EVENT_DRIVER_PNO_REMOVE, + /* driver received PNO networks found indication from firmware */ + WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, + /* driver triggered a scan for PNO networks */ + WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, + /* driver received scan results of PNO networks */ + WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, + /* driver updated scan results from PNO candidates to cfg */ + WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE +}; + +enum { + WIFI_TAG_VENDOR_SPECIFIC = 0, /* take a byte stream as parameter */ + WIFI_TAG_BSSID, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_SSID, /* takes a 32 bytes SSID address as parameter */ + WIFI_TAG_STATUS, /* takes an integer as parameter */ + WIFI_TAG_CHANNEL_SPEC, /* takes one or more wifi_channel_spec as parameter */ + WIFI_TAG_WAKE_LOCK_EVENT, /* takes a wake_lock_event struct as parameter */ + WIFI_TAG_ADDR1, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR2, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR3, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR4, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_TSF, /* take a 64 bits TSF value as parameter */ + WIFI_TAG_IE, + /* take one or more specific 802.11 IEs parameter, IEs are in turn + * indicated in TLV format as per 802.11 spec + */ + WIFI_TAG_INTERFACE, /* take interface name as parameter */ + WIFI_TAG_REASON_CODE, /* take a reason code as per 802.11 as parameter */ + WIFI_TAG_RATE_MBPS, /* take a wifi rate in 0.5 mbps */ + WIFI_TAG_REQUEST_ID, /* take an integer as parameter */ + WIFI_TAG_BUCKET_ID, /* take an integer as parameter */ + WIFI_TAG_GSCAN_PARAMS, /* takes a wifi_scan_cmd_params struct as parameter */ + WIFI_TAG_GSCAN_CAPABILITIES, /* takes a wifi_gscan_capabilities struct as parameter */ + WIFI_TAG_SCAN_ID, /* take an integer as parameter */ + WIFI_TAG_RSSI, /* takes s16 as parameter */ + WIFI_TAG_CHANNEL, /* takes u16 as parameter */ + WIFI_TAG_LINK_ID, /* take an integer as parameter */ + WIFI_TAG_LINK_ROLE, /* take an integer as parameter */ + WIFI_TAG_LINK_STATE, /* take an integer as parameter */ + WIFI_TAG_LINK_TYPE, /* take an integer as parameter */ + WIFI_TAG_TSCO, /* take an integer as parameter */ + WIFI_TAG_RSCO, /* take an integer as parameter */ + WIFI_TAG_EAPOL_MESSAGE_TYPE /* take an integer as parameter */ +}; + +/* NAN events */ +typedef enum { + NAN_EVENT_INVALID = 0, + NAN_EVENT_CLUSTER_STARTED = 1, + NAN_EVENT_CLUSTER_JOINED = 2, + NAN_EVENT_CLUSTER_MERGED = 3, + NAN_EVENT_ROLE_CHANGED = 4, + NAN_EVENT_SCAN_COMPLETE = 5, + NAN_EVENT_STATUS_CHNG = 6, + /* ADD new events before this line */ + NAN_EVENT_MAX +} nan_event_id_t; + +typedef struct { + uint16 tag; + uint16 len; /* length of value */ + uint8 value[0]; +} tlv_log; + +typedef struct per_packet_status_entry { + uint8 flags; + uint8 tid; /* transmit or received tid */ + uint16 MCS; /* modulation and bandwidth */ + /* + * TX: RSSI of ACK for that packet + * RX: RSSI of packet + */ + uint8 rssi; + uint8 num_retries; /* number of attempted retries */ + uint16 last_transmit_rate; /* last transmit rate in .5 mbps */ + /* transmit/reeive sequence for that MPDU packet */ + uint16 link_layer_transmit_sequence; + /* + * TX: firmware timestamp (us) when packet is queued within firmware buffer + * for SDIO/HSIC or into PCIe buffer + * RX : firmware receive timestamp + */ + uint64 firmware_entry_timestamp; + /* + * firmware timestamp (us) when packet start contending for the + * medium for the first time, at head of its AC queue, + * or as part of an MPDU or A-MPDU. This timestamp is not updated + * for each retry, only the first transmit attempt. + */ + uint64 start_contention_timestamp; + /* + * fimrware timestamp (us) when packet is successfully transmitted + * or aborted because it has exhausted its maximum number of retries + */ + uint64 transmit_success_timestamp; + /* + * packet data. The length of packet data is determined by the entry_size field of + * the wifi_ring_buffer_entry structure. It is expected that first bytes of the + * packet, or packet headers only (up to TCP or RTP/UDP headers) will be copied into the ring + */ + uint8 *data; +} per_packet_status_entry_t; + +#if defined(LINUX) +#define PACKED_STRUCT __attribute__ ((packed)) +#else +#define PACKED_STRUCT +#endif + +#if defined(LINUX) +typedef struct log_conn_event { + uint16 event; + tlv_log tlvs[0]; + /* + * separate parameter structure per event to be provided and optional data + * the event_data is expected to include an official android part, with some + * parameter as transmit rate, num retries, num scan result found etc... + * as well, event_data can include a vendor proprietary part which is + * understood by the developer only. + */ +} PACKED_STRUCT log_conn_event_t; +#endif /* defined(LINUX) */ + +/* + * Ring buffer name for power events ring. note that power event are extremely frequents + * and thus should be stored in their own ring/file so as not to clobber connectivity events + */ + +typedef struct wake_lock_event { + uint32 status; /* 0 taken, 1 released */ + uint32 reason; /* reason why this wake lock is taken */ + char *name; /* null terminated */ +} wake_lock_event_t; + +typedef struct wifi_power_event { + uint16 event; + tlv_log *tlvs; +} wifi_power_event_t; + +#define NAN_EVENT_VERSION 1 +typedef struct log_nan_event { + uint8 version; + uint8 pad; + uint16 event; + tlv_log *tlvs; +} log_nan_event_t; + +/* entry type */ +enum { + DBG_RING_ENTRY_EVENT_TYPE = 1, + DBG_RING_ENTRY_PKT_TYPE, + DBG_RING_ENTRY_WAKE_LOCK_EVENT_TYPE, + DBG_RING_ENTRY_POWER_EVENT_TYPE, + DBG_RING_ENTRY_DATA_TYPE, + DBG_RING_ENTRY_NAN_EVENT_TYPE +}; + +struct log_level_table { + int log_level; + uint16 tag; + char *desc; +}; + +#ifdef OEM_ANDROID +/* + * Assuming that the Ring lock is mutex, bailing out if the + * callers are from atomic context. On a long term, one has to + * schedule a job to execute in sleepable context so that + * contents are pushed to the ring. + */ +#define DBG_EVENT_LOG(dhdp, connect_state) \ +{ \ + do { \ + uint16 state = connect_state; \ + if (CAN_SLEEP() && DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \ + dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID, \ + &state, sizeof(state)); \ + } while (0); \ +} +#else +#define DBG_EVENT_LOG(dhd, connect_state) +#endif /* !OEM_ANDROID */ + +/* + * Packet logging - HAL specific data + * XXX: These should be moved to wl_cfgvendor.h + */ + +#define MD5_PREFIX_LEN 4 +#define MAX_FATE_LOG_LEN 32 +#define MAX_FRAME_LEN_ETHERNET 1518 +#define MAX_FRAME_LEN_80211_MGMT 2352 /* 802.11-2012 Fig. 8-34 */ + +typedef enum { + /* Sent over air and ACKed. */ + TX_PKT_FATE_ACKED, + + /* Sent over air but not ACKed. (Normal for broadcast/multicast.) */ + TX_PKT_FATE_SENT, + + /* Queued within firmware, but not yet sent over air. */ + TX_PKT_FATE_FW_QUEUED, + + /* + * Dropped by firmware as invalid. E.g. bad source address, + * bad checksum, or invalid for current state. + */ + TX_PKT_FATE_FW_DROP_INVALID, + + /* Dropped by firmware due to lifetime expiration. */ + TX_PKT_FATE_FW_DROP_EXPTIME, + + /* + * Dropped by firmware for any other reason. Includes + * frames that were sent by driver to firmware, but + * unaccounted for by firmware. + */ + TX_PKT_FATE_FW_DROP_OTHER, + + /* Queued within driver, not yet sent to firmware. */ + TX_PKT_FATE_DRV_QUEUED, + + /* + * Dropped by driver as invalid. E.g. bad source address, + * or invalid for current state. + */ + TX_PKT_FATE_DRV_DROP_INVALID, + + /* Dropped by driver due to lack of buffer space. */ + TX_PKT_FATE_DRV_DROP_NOBUFS, + + /* Dropped by driver for any other reason. */ + TX_PKT_FATE_DRV_DROP_OTHER, + + /* Packet free by firmware. */ + TX_PKT_FATE_FW_PKT_FREE, + + } wifi_tx_packet_fate; + +typedef enum { + /* Valid and delivered to network stack (e.g., netif_rx()). */ + RX_PKT_FATE_SUCCESS, + + /* Queued within firmware, but not yet sent to driver. */ + RX_PKT_FATE_FW_QUEUED, + + /* Dropped by firmware due to host-programmable filters. */ + RX_PKT_FATE_FW_DROP_FILTER, + + /* + * Dropped by firmware as invalid. E.g. bad checksum, + * decrypt failed, or invalid for current state. + */ + RX_PKT_FATE_FW_DROP_INVALID, + + /* Dropped by firmware due to lack of buffer space. */ + RX_PKT_FATE_FW_DROP_NOBUFS, + + /* Dropped by firmware for any other reason. */ + RX_PKT_FATE_FW_DROP_OTHER, + + /* Queued within driver, not yet delivered to network stack. */ + RX_PKT_FATE_DRV_QUEUED, + + /* Dropped by driver due to filter rules. */ + RX_PKT_FATE_DRV_DROP_FILTER, + + /* Dropped by driver as invalid. E.g. not permitted in current state. */ + RX_PKT_FATE_DRV_DROP_INVALID, + + /* Dropped by driver due to lack of buffer space. */ + RX_PKT_FATE_DRV_DROP_NOBUFS, + + /* Dropped by driver for any other reason. */ + RX_PKT_FATE_DRV_DROP_OTHER, + + /* Indicate RX Host Wake up packet. */ + RX_PKT_FATE_WAKE_PKT, + + } wifi_rx_packet_fate; + +typedef enum { + FRAME_TYPE_UNKNOWN, + FRAME_TYPE_ETHERNET_II, + FRAME_TYPE_80211_MGMT, + } frame_type; + +typedef struct wifi_frame_info { + /* + * The type of MAC-layer frame that this frame_info holds. + * - For data frames, use FRAME_TYPE_ETHERNET_II. + * - For management frames, use FRAME_TYPE_80211_MGMT. + * - If the type of the frame is unknown, use FRAME_TYPE_UNKNOWN. + */ + frame_type payload_type; + + /* + * The number of bytes included in |frame_content|. If the frame + * contents are missing (e.g. RX frame dropped in firmware), + * |frame_len| should be set to 0. + */ + size_t frame_len; + + /* + * Host clock when this frame was received by the driver (either + * outbound from the host network stack, or inbound from the + * firmware). + * - The timestamp should be taken from a clock which includes time + * the host spent suspended (e.g. ktime_get_boottime()). + * - If no host timestamp is available (e.g. RX frame was dropped in + * firmware), this field should be set to 0. + */ + uint32 driver_timestamp_usec; + + /* + * Firmware clock when this frame was received by the firmware + * (either outbound from the host, or inbound from a remote + * station). + * - The timestamp should be taken from a clock which includes time + * firmware spent suspended (if applicable). + * - If no firmware timestamp is available (e.g. TX frame was + * dropped by driver), this field should be set to 0. + * - Consumers of |frame_info| should _not_ assume any + * synchronization between driver and firmware clocks. + */ + uint32 firmware_timestamp_usec; + + /* + * Actual frame content. + * - Should be provided for TX frames originated by the host. + * - Should be provided for RX frames received by the driver. + * - Optionally provided for TX frames originated by firmware. (At + * discretion of HAL implementation.) + * - Optionally provided for RX frames dropped in firmware. (At + * discretion of HAL implementation.) + * - If frame content is not provided, |frame_len| should be set + * to 0. + */ + union { + char ethernet_ii[MAX_FRAME_LEN_ETHERNET]; + char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT]; + } frame_content; +} wifi_frame_info_t; + +typedef struct wifi_tx_report { + /* + * Prefix of MD5 hash of |frame_inf.frame_content|. If frame + * content is not provided, prefix of MD5 hash over the same data + * that would be in frame_content, if frame content were provided. + */ + char md5_prefix[MD5_PREFIX_LEN]; + wifi_tx_packet_fate fate; + wifi_frame_info_t frame_inf; +} wifi_tx_report_t; + +typedef struct wifi_rx_report { + /* + * Prefix of MD5 hash of |frame_inf.frame_content|. If frame + * content is not provided, prefix of MD5 hash over the same data + * that would be in frame_content, if frame content were provided. + */ + char md5_prefix[MD5_PREFIX_LEN]; + wifi_rx_packet_fate fate; + wifi_frame_info_t frame_inf; +} wifi_rx_report_t; + +typedef struct compat_wifi_frame_info { + frame_type payload_type; + + uint32 frame_len; + + uint32 driver_timestamp_usec; + + uint32 firmware_timestamp_usec; + + union { + char ethernet_ii[MAX_FRAME_LEN_ETHERNET]; + char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT]; + } frame_content; +} compat_wifi_frame_info_t; + +typedef struct compat_wifi_tx_report { + char md5_prefix[MD5_PREFIX_LEN]; + wifi_tx_packet_fate fate; + compat_wifi_frame_info_t frame_inf; +} compat_wifi_tx_report_t; + +typedef struct compat_wifi_rx_report { + char md5_prefix[MD5_PREFIX_LEN]; + wifi_rx_packet_fate fate; + compat_wifi_frame_info_t frame_inf; +} compat_wifi_rx_report_t; + +/* + * Packet logging - internal data + */ + +typedef enum dhd_dbg_pkt_mon_state { + PKT_MON_INVALID = 0, + PKT_MON_ATTACHED, + PKT_MON_STARTING, + PKT_MON_STARTED, + PKT_MON_STOPPING, + PKT_MON_STOPPED, + PKT_MON_DETACHED, + } dhd_dbg_pkt_mon_state_t; + +typedef struct dhd_dbg_pkt_info { + frame_type payload_type; + size_t pkt_len; + uint32 driver_ts; + uint32 firmware_ts; + uint32 pkt_hash; + void *pkt; +} dhd_dbg_pkt_info_t; + +typedef struct compat_dhd_dbg_pkt_info { + frame_type payload_type; + uint32 pkt_len; + uint32 driver_ts; + uint32 firmware_ts; + uint32 pkt_hash; + void *pkt; +} compat_dhd_dbg_pkt_info_t; + +typedef struct dhd_dbg_tx_info +{ + wifi_tx_packet_fate fate; + dhd_dbg_pkt_info_t info; +} dhd_dbg_tx_info_t; + +typedef struct dhd_dbg_rx_info +{ + wifi_rx_packet_fate fate; + dhd_dbg_pkt_info_t info; +} dhd_dbg_rx_info_t; + +typedef struct dhd_dbg_tx_report +{ + dhd_dbg_tx_info_t *tx_pkts; + uint16 pkt_pos; + uint16 status_pos; +} dhd_dbg_tx_report_t; + +typedef struct dhd_dbg_rx_report +{ + dhd_dbg_rx_info_t *rx_pkts; + uint16 pkt_pos; +} dhd_dbg_rx_report_t; + +typedef void (*dbg_pullreq_t)(void *os_priv, const int ring_id); +typedef void (*dbg_urgent_noti_t) (dhd_pub_t *dhdp, const void *data, const uint32 len); +typedef int (*dbg_mon_tx_pkts_t) (dhd_pub_t *dhdp, void *pkt, uint32 pktid); +typedef int (*dbg_mon_tx_status_t) (dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +typedef int (*dbg_mon_rx_pkts_t) (dhd_pub_t *dhdp, void *pkt); + +typedef struct dhd_dbg_pkt_mon +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + + /* call backs */ + dbg_mon_tx_pkts_t tx_pkt_mon; + dbg_mon_tx_status_t tx_status_mon; + dbg_mon_rx_pkts_t rx_pkt_mon; +} dhd_dbg_pkt_mon_t; + +typedef struct dhd_dbg { + dhd_dbg_ring_t dbg_rings[DEBUG_RING_ID_MAX]; + void *private; /* os private_data */ + dhd_dbg_pkt_mon_t pkt_mon; + void *pkt_mon_lock; /* spin lock for packet monitoring */ + dbg_pullreq_t pullreq; + dbg_urgent_noti_t urgent_notifier; +} dhd_dbg_t; + +#define PKT_MON_ATTACHED(state) \ + (((state) > PKT_MON_INVALID) && ((state) < PKT_MON_DETACHED)) +#define PKT_MON_DETACHED(state) \ + (((state) == PKT_MON_INVALID) || ((state) == PKT_MON_DETACHED)) +#define PKT_MON_STARTED(state) ((state) == PKT_MON_STARTED) +#define PKT_MON_STOPPED(state) ((state) == PKT_MON_STOPPED) +#define PKT_MON_NOT_OPERATIONAL(state) \ + (((state) != PKT_MON_STARTED) && ((state) != PKT_MON_STOPPED)) +#define PKT_MON_SAFE_TO_FREE(state) \ + (((state) == PKT_MON_STARTING) || ((state) == PKT_MON_STOPPED)) +#define PKT_MON_PKT_FULL(pkt_count) ((pkt_count) >= MAX_FATE_LOG_LEN) +#define PKT_MON_STATUS_FULL(pkt_count, status_count) \ + (((status_count) >= (pkt_count)) || ((status_count) >= MAX_FATE_LOG_LEN)) + +#ifdef DBG_PKT_MON +#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_pkt_mon && (pkt)) { \ + (dhdp)->dbg->pkt_mon.tx_pkt_mon((dhdp), (pkt), (pktid)); \ + } \ + } while (0); +#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_status_mon && (pkt)) { \ + (dhdp)->dbg->pkt_mon.tx_status_mon((dhdp), (pkt), (pktid), (status)); \ + } \ + } while (0); +#define DHD_DBG_PKT_MON_RX(dhdp, pkt) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.rx_pkt_mon && (pkt)) { \ + if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \ + (dhdp)->dbg->pkt_mon.rx_pkt_mon((dhdp), (pkt)); \ + } \ + } \ + } while (0); + +#define DHD_DBG_PKT_MON_START(dhdp) \ + dhd_os_dbg_start_pkt_monitor((dhdp)); +#define DHD_DBG_PKT_MON_STOP(dhdp) \ + dhd_os_dbg_stop_pkt_monitor((dhdp)); +#else +#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) +#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) +#define DHD_DBG_PKT_MON_RX(dhdp, pkt) +#define DHD_DBG_PKT_MON_START(dhdp) +#define DHD_DBG_PKT_MON_STOP(dhdp) +#endif /* DBG_PKT_MON */ + +#ifdef DUMP_IOCTL_IOV_LIST +typedef struct dhd_iov_li { + dll_t list; + uint32 cmd; /* command number */ + char buff[100]; /* command name */ +} dhd_iov_li_t; +#endif /* DUMP_IOCTL_IOV_LIST */ + +#define IOV_LIST_MAX_LEN 5 + +#ifdef DHD_DEBUG +typedef struct { + dll_t list; + uint32 id; /* wasted chunk id */ + uint32 handle; /* wasted chunk handle */ + uint32 size; /* wasted chunk size */ +} dhd_dbg_mwli_t; +#endif /* DHD_DEBUG */ + +#define DHD_OW_BI_RAW_EVENT_LOG_FMT 0xFFFF + +/* LSB 2 bits of format number to identify the type of event log */ +#define DHD_EVENT_LOG_HDR_MASK 0x3 + +#define DHD_EVENT_LOG_FMT_NUM_OFFSET 2 +#define DHD_EVENT_LOG_FMT_NUM_MASK 0x3FFF +/** + * OW:- one word + * TW:- two word + * NB:- non binary + * BI:- binary + */ +#define DHD_OW_NB_EVENT_LOG_HDR 0 +#define DHD_TW_NB_EVENT_LOG_HDR 1 +#define DHD_BI_EVENT_LOG_HDR 3 +#define DHD_INVALID_EVENT_LOG_HDR 2 + +#define DHD_TW_VALID_TAG_BITS_MASK 0xF +#define DHD_OW_BI_EVENT_FMT_NUM 0x3FFF +#define DHD_TW_BI_EVENT_FMT_NUM 0x3FFE + +#define DHD_TW_EVENT_LOG_TAG_OFFSET 8 + +#define EVENT_TAG_TIMESTAMP_OFFSET 1 +#define EVENT_TAG_TIMESTAMP_EXT_OFFSET 2 + +typedef struct prcd_event_log_hdr { + uint32 tag; /* Event_log entry tag */ + uint32 count; /* Count of 4-byte entries */ + uint32 fmt_num_raw; /* Format number */ + uint32 fmt_num; /* Format number >> 2 */ + uint32 armcycle; /* global ARM CYCLE for TAG */ + uint32 *log_ptr; /* start of payload */ + uint32 payload_len; + /* Extended event log header info + * 0 - legacy, 1 - extended event log header present + */ + bool ext_event_log_hdr; + bool binary_payload; /* 0 - non binary payload, 1 - binary payload */ +} prcd_event_log_hdr_t; /* Processed event log header */ + +/* dhd_dbg functions */ +extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen); +void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present, + uint32 msgtrace_seqnum); + +#ifdef BTLOG +extern void dhd_dbg_bt_log_handler(dhd_pub_t *dhdp, void *data, uint datalen); +#endif /* BTLOG */ +extern int dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, + dbg_urgent_noti_t os_urgent_notifier, void *os_priv); +extern void dhd_dbg_detach(dhd_pub_t *dhdp); +extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start); +extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, + int log_level, int flags, uint32 threshold); +extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name); +extern dhd_dbg_ring_t *dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id); +extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp); +extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len); +extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, + void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block); +int dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len); +int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, + bool strip_header); +int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, + void *data); +int __dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *ring_status); +int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, + dhd_dbg_ring_status_t *dbg_ring_status); +#ifdef SHOW_LOGTRACE +void dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info); +#endif /* SHOW_LOGTRACE */ + +#ifdef DBG_PKT_MON +extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, + dbg_mon_tx_pkts_t tx_pkt_mon, + dbg_mon_tx_status_t tx_status_mon, + dbg_mon_rx_pkts_t rx_pkt_mon); +extern int dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid); +extern int dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +extern int dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt); +extern int dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count); +extern int dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count); +extern int dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp); +#endif /* DBG_PKT_MON */ + +extern bool dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); + +/* os wrapper function */ +extern int dhd_os_dbg_attach(dhd_pub_t *dhdp); +extern void dhd_os_dbg_detach(dhd_pub_t *dhdp); +extern int dhd_os_dbg_register_callback(int ring_id, + void (*dbg_ring_sub_cb)(void *ctx, const int ring_id, const void *data, + const uint32 len, const dhd_dbg_ring_status_t dbg_ring_status)); +extern int dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, + void (*urgent_noti)(void *ctx, const void *data, const uint32 len, const uint32 fw_len)); + +extern int dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level, + int flags, int time_intval, int threshold); +extern int dhd_os_reset_logging(dhd_pub_t *dhdp); +extern int dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress); + +extern int dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, + dhd_dbg_ring_status_t *dbg_ring_status); +extern int dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name); +extern int dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len); +extern int dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features); + +#ifdef DBG_PKT_MON +extern int dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, + uint32 pktid); +extern int dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +extern int dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt); +extern int dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, + void __user *user_buf, uint16 req_count, uint16 *resp_count); +extern int dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, + void __user *user_buf, uint16 req_count, uint16 *resp_count); +extern int dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp); +#endif /* DBG_PKT_MON */ + +#ifdef DUMP_IOCTL_IOV_LIST +extern void dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node); +extern void dhd_iov_li_print(dll_t *list_head); +extern void dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head); +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef DHD_DEBUG +extern void dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head); +#endif /* DHD_DEBUG */ + +void print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr); + +typedef void (*print_roam_enhance_log_func)(prcd_event_log_hdr_t *plog_hdr); +typedef struct _pr_roam_tbl { + uint8 version; + uint8 id; + print_roam_enhance_log_func pr_func; +} pr_roam_tbl_t; + +extern uint32 dhd_dbg_get_fwverbose(dhd_pub_t *dhdp); +extern void dhd_dbg_set_fwverbose(dhd_pub_t *dhdp, uint32 new_val); +#endif /* _dhd_debug_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_debug_linux.c b/bcmdhd.101.10.361.x/dhd_debug_linux.c new file mode 100755 index 0000000..81aa730 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_debug_linux.c @@ -0,0 +1,528 @@ +/* + * DHD debugability Linux os layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +typedef void (*dbg_ring_send_sub_t)(void *ctx, const int ring_id, const void *data, + const uint32 len, const dhd_dbg_ring_status_t ring_status); +typedef void (*dbg_urgent_noti_sub_t)(void *ctx, const void *data, + const uint32 len, const uint32 fw_len); + +static dbg_ring_send_sub_t ring_send_sub_cb[DEBUG_RING_ID_MAX]; +static dbg_urgent_noti_sub_t urgent_noti_sub_cb; +typedef struct dhd_dbg_os_ring_info { + dhd_pub_t *dhdp; + int ring_id; + int log_level; + unsigned long interval; + struct delayed_work work; + uint64 tsoffset; +} linux_dbgring_info_t; + +struct log_level_table dhd_event_map[] = { + {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, "DRIVER EAPOL TX REQ"}, + {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, "DRIVER EAPOL RX"}, + {2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, "SCAN_REQUESTED"}, + {2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, "SCAN COMPELETE"}, + {3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, "SCAN RESULT FOUND"}, + {2, WIFI_EVENT_DRIVER_PNO_ADD, "PNO ADD"}, + {2, WIFI_EVENT_DRIVER_PNO_REMOVE, "PNO REMOVE"}, + {2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, "PNO NETWORK FOUND"}, + {2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, "PNO SCAN_REQUESTED"}, + {1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, "PNO SCAN RESULT FOUND"}, + {1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, "PNO SCAN COMPELETE"} +}; + +static void +debug_data_send(dhd_pub_t *dhdp, int ring_id, const void *data, const uint32 len, + const dhd_dbg_ring_status_t ring_status) +{ + struct net_device *ndev; + dbg_ring_send_sub_t ring_sub_send; + ndev = dhd_linux_get_primary_netdev(dhdp); + if (!ndev) + return; + if (!VALID_RING(ring_id)) + return; + if (ring_send_sub_cb[ring_id]) { + ring_sub_send = ring_send_sub_cb[ring_id]; + ring_sub_send(ndev, ring_id, data, len, ring_status); + } +} + +static void +dhd_os_dbg_urgent_notifier(dhd_pub_t *dhdp, const void *data, const uint32 len) +{ + struct net_device *ndev; + ndev = dhd_linux_get_primary_netdev(dhdp); + if (!ndev) + return; + if (urgent_noti_sub_cb) { + urgent_noti_sub_cb(ndev, data, len, dhdp->soc_ram_length); + } +} + +static void +dbg_ring_poll_worker(struct work_struct *work) +{ + struct delayed_work *d_work = to_delayed_work(work); + bool sched = TRUE; + dhd_dbg_ring_t *ring; + linux_dbgring_info_t *ring_info; + dhd_pub_t *dhdp; + int ringid; + dhd_dbg_ring_status_t ring_status; + void *buf; + dhd_dbg_ring_entry_t *hdr; + uint32 buflen, rlen; + unsigned long flags; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + ring_info = container_of(d_work, linux_dbgring_info_t, work); + GCC_DIAGNOSTIC_POP(); + + dhdp = ring_info->dhdp; + ringid = ring_info->ring_id; + + ring = &dhdp->dbg->dbg_rings[ringid]; + DHD_DBG_RING_LOCK(ring->lock, flags); + dhd_dbg_get_ring_status(dhdp, ringid, &ring_status); + + if (ring->wp > ring->rp) { + buflen = ring->wp - ring->rp; + } else if (ring->wp < ring->rp) { + buflen = ring->ring_size - ring->rp + ring->wp; + } else { + goto exit; + } + + if (buflen > ring->ring_size) { + goto exit; + } + + buf = MALLOCZ(dhdp->osh, buflen); + if (!buf) { + DHD_ERROR(("%s failed to allocate read buf\n", __FUNCTION__)); + sched = FALSE; + goto exit; + } + + DHD_DBG_RING_UNLOCK(ring->lock, flags); + rlen = dhd_dbg_pull_from_ring(dhdp, ringid, buf, buflen); + DHD_DBG_RING_LOCK(ring->lock, flags); + + if (!ring->sched_pull) { + ring->sched_pull = TRUE; + } + + hdr = (dhd_dbg_ring_entry_t *)buf; + while (rlen > 0) { + ring_status.read_bytes += ENTRY_LENGTH(hdr); + /* offset fw ts to host ts */ + hdr->timestamp += ring_info->tsoffset; + debug_data_send(dhdp, ringid, hdr, ENTRY_LENGTH(hdr), + ring_status); + rlen -= ENTRY_LENGTH(hdr); + hdr = (dhd_dbg_ring_entry_t *)((char *)hdr + ENTRY_LENGTH(hdr)); + } + MFREE(dhdp->osh, buf, buflen); + +exit: + if (sched) { + /* retrigger the work at same interval */ + if ((ring_status.written_bytes == ring_status.read_bytes) && + (ring_info->interval)) { + schedule_delayed_work(d_work, ring_info->interval); + } + } + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return; +} + +int +dhd_os_dbg_register_callback(int ring_id, dbg_ring_send_sub_t callback) +{ + if (!VALID_RING(ring_id)) + return BCME_RANGE; + + ring_send_sub_cb[ring_id] = callback; + return BCME_OK; +} + +int +dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, dbg_urgent_noti_sub_t urgent_noti_sub) +{ + if (!dhdp || !urgent_noti_sub) + return BCME_BADARG; + urgent_noti_sub_cb = urgent_noti_sub; + + return BCME_OK; +} + +int +dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level, + int flags, int time_intval, int threshold) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + + ring_id = dhd_dbg_find_ring_id(dhdp, ring_name); + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + + DHD_INFO(("%s , log_level : %d, time_intval : %d, threshod %d Bytes\n", + __FUNCTION__, log_level, time_intval, threshold)); + + /* change the configuration */ + ret = dhd_dbg_set_configuration(dhdp, ring_id, log_level, flags, threshold); + if (ret) { + DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret)); + return ret; + } + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + ring_info = &os_priv[ring_id]; + ring_info->log_level = log_level; + if (time_intval == 0 || log_level == 0) { + ring_info->interval = 0; + cancel_delayed_work_sync(&ring_info->work); + } else { + ring_info->interval = msecs_to_jiffies(time_intval * MSEC_PER_SEC); + cancel_delayed_work_sync(&ring_info->work); + schedule_delayed_work(&ring_info->work, ring_info->interval); + } + + return ret; +} + +int +dhd_os_reset_logging(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + + /* Stop all rings */ + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + DHD_INFO(("%s: Stop ring buffer %d\n", __FUNCTION__, ring_id)); + + ring_info = &os_priv[ring_id]; + /* cancel any pending work */ + cancel_delayed_work_sync(&ring_info->work); + /* log level zero makes stop logging on that ring */ + ring_info->log_level = 0; + ring_info->interval = 0; + /* change the configuration */ + ret = dhd_dbg_set_configuration(dhdp, ring_id, 0, 0, 0); + if (ret) { + DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret)); + return ret; + } + } + return ret; +} + +#define SUPPRESS_LOG_LEVEL 1 +int +dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress) +{ + int ret = BCME_OK; + int max_log_level; + int enable = (suppress) ? 0 : 1; + linux_dbgring_info_t *os_priv; + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + + max_log_level = os_priv[FW_VERBOSE_RING_ID].log_level; + + if (max_log_level == SUPPRESS_LOG_LEVEL) { + /* suppress the logging in FW not to wake up host while device in suspend mode */ + ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0, + TRUE); + if (ret < 0 && (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("logtrace is failed : %d\n", ret)); + } + } + + return ret; +} + +int +dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status) +{ + return dhd_dbg_get_ring_status(dhdp, ring_id, dbg_ring_status); +} + +int +dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + ring_id = dhd_dbg_find_ring_id(dhdp, ring_name); + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + os_priv = dhd_dbg_get_priv(dhdp); + if (os_priv) { + ring_info = &os_priv[ring_id]; + if (ring_info->interval) { + cancel_delayed_work_sync(&ring_info->work); + } + schedule_delayed_work(&ring_info->work, 0); + } else { + DHD_ERROR(("%s : os_priv is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + } + return ret; +} + +int +dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len) +{ + int ret = BCME_OK, i; + dhd_dbg_ring_entry_t msg_hdr; + log_conn_event_t *event_data = (log_conn_event_t *)data; + linux_dbgring_info_t *os_priv, *ring_info = NULL; + + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + os_priv = dhd_dbg_get_priv(dhdp); + + if (os_priv) { + ring_info = &os_priv[ring_id]; + } else + return BCME_NORESOURCE; + + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + + if (ring_id == DHD_EVENT_RING_ID) { + msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY; + msg_hdr.timestamp = osl_localtime_ns(); + /* convert to ms */ + msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC); + msg_hdr.len = data_len; + /* filter the event for higher log level with current log level */ + for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) { + if ((dhd_event_map[i].tag == event_data->event) && + dhd_event_map[i].log_level > ring_info->log_level) { + return ret; + } + } + } +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + else if (ring_id == FW_VERBOSE_RING_ID || ring_id == DRIVER_LOG_RING_ID || + ring_id == ROAM_STATS_RING_ID) { + msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP; + msg_hdr.timestamp = osl_localtime_ns(); + msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC); + msg_hdr.len = strlen(data); + } +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ + ret = dhd_dbg_push_to_ring(dhdp, ring_id, &msg_hdr, event_data); + if (ret) { + DHD_ERROR(("%s : failed to push data into the ring (%d) with ret(%d)\n", + __FUNCTION__, ring_id, ret)); + } + + return ret; +} + +#ifdef DBG_PKT_MON +int +dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_attach_pkt_monitor(dhdp, dhd_os_dbg_monitor_tx_pkts, + dhd_os_dbg_monitor_tx_status, dhd_os_dbg_monitor_rx_pkts); +} + +int +dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_start_pkt_monitor(dhdp); +} + +int +dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid) +{ + return dhd_dbg_monitor_tx_pkts(dhdp, pkt, pktid); +} + +int +dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + return dhd_dbg_monitor_tx_status(dhdp, pkt, pktid, status); +} + +int +dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt) +{ + return dhd_dbg_monitor_rx_pkts(dhdp, pkt); +} + +int +dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_stop_pkt_monitor(dhdp); +} + +int +dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + return dhd_dbg_monitor_get_tx_pkts(dhdp, user_buf, req_count, resp_count); +} + +int +dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + return dhd_dbg_monitor_get_rx_pkts(dhdp, user_buf, req_count, resp_count); +} + +int +dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_detach_pkt_monitor(dhdp); +} +#endif /* DBG_PKT_MON */ + +int +dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features) +{ + int ret = BCME_OK; +#ifdef DEBUGABILITY +#ifndef DEBUGABILITY_DISABLE_MEMDUMP + struct dhd_conf *conf = dhdp->conf; +#endif /* !DEBUGABILITY_DISABLE_MEMDUMP */ +#endif + + /* XXX : we need to find a way to get the features for dbg */ + *features = 0; +#ifdef DEBUGABILITY +#ifndef DEBUGABILITY_DISABLE_MEMDUMP + // fix for RequestFirmwareDebugDump issue of VTS + if ((conf->chip != BCM4359_CHIP_ID) && (conf->chip != BCM43751_CHIP_ID) && + (conf->chip != BCM43752_CHIP_ID) && (conf->chip != BCM4375_CHIP_ID)) + *features |= DBG_MEMORY_DUMP_SUPPORTED; +#endif /* !DEBUGABILITY_DISABLE_MEMDUMP */ + if (FW_SUPPORTED(dhdp, logtrace)) { + *features |= DBG_CONNECT_EVENT_SUPPORTED; + *features |= DBG_VERBOSE_LOG_SUPPORTED; + } + if (FW_SUPPORTED(dhdp, hchk)) { + *features |= DBG_HEALTH_CHECK_SUPPORTED; + } +#ifdef DBG_PKT_MON + if (FW_SUPPORTED(dhdp, d11status)) { + *features |= DBG_PACKET_FATE_SUPPORTED; + } +#endif /* DBG_PKT_MON */ +#endif /* DEBUGABILITY */ + return ret; +} + +static void +dhd_os_dbg_pullreq(void *os_priv, int ring_id) +{ + linux_dbgring_info_t *ring_info; + + ring_info = &((linux_dbgring_info_t *)os_priv)[ring_id]; + cancel_delayed_work(&ring_info->work); + schedule_delayed_work(&ring_info->work, 0); +} + +int +dhd_os_dbg_attach(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + linux_dbgring_info_t *os_priv, *ring_info; + int ring_id; + + /* os_dbg data */ + os_priv = MALLOCZ(dhdp->osh, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + if (!os_priv) + return BCME_NOMEM; + + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; + ring_id++) { + ring_info = &os_priv[ring_id]; + INIT_DELAYED_WORK(&ring_info->work, dbg_ring_poll_worker); + ring_info->dhdp = dhdp; + ring_info->ring_id = ring_id; + } + + ret = dhd_dbg_attach(dhdp, dhd_os_dbg_pullreq, dhd_os_dbg_urgent_notifier, os_priv); + if (ret) + MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + + return ret; +} + +void +dhd_os_dbg_detach(dhd_pub_t *dhdp) +{ + linux_dbgring_info_t *os_priv, *ring_info; + int ring_id; + /* free os_dbg data */ + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return; + /* abort pending any job */ + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + ring_info = &os_priv[ring_id]; + if (ring_info->interval) { + ring_info->interval = 0; + cancel_delayed_work_sync(&ring_info->work); + } + } + MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + + return dhd_dbg_detach(dhdp); +} diff --git a/bcmdhd.101.10.361.x/dhd_event_log_filter.c b/bcmdhd.101.10.361.x/dhd_event_log_filter.c new file mode 100755 index 0000000..7964c7b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_event_log_filter.c @@ -0,0 +1,3236 @@ +/* + * Wifi dongle status Filter and Report + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/* + * Filter MODULE and Report MODULE + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DHD_STATUS_LOGGING +#include +#endif /* DHD_STATUS_LOGGING */ + +#ifdef IL_BIGENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINAN */ + +#define DHD_FILTER_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__)) +#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE +#define DHD_FILTER_TRACE_INTERNAL(fmt, ...) DHD_TRACE(("EWPF-" fmt, ##__VA_ARGS__)) +#else +#define DHD_FILTER_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__)) +#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */ + +#define DHD_FILTER_ERR(x) DHD_FILTER_ERR_INTERNAL x +#define DHD_FILTER_TRACE(x) DHD_FILTER_TRACE_INTERNAL x + +/* ========= EWP Filter functions ============= */ +//#define EWPF_DEBUG +#define EWPF_DEBUG_BUF_LEN 512 +#define EWPF_VAL_CNT_PLINE 16 + +#define EWPF_REPORT_MAX_DATA 32 /* MAX record per slice */ + +#define EWPF_INVALID (-1) +#define EWPF_XTLV_INVALID 0 + +#define EWPF_MAX_IDX_TYPE 4 +#define EWPF_IDX_TYPE_SLICE 1 +#define EWPF_IDX_TYPE_IFACE 2 +#define EWPF_IDX_TYPE_EVENT 3 +#define EWPF_IDX_TYPE_KEY_INFO 4 + +#define EWPF_MAX_SLICE 2 /* MAX slice in dongle */ +#define EWPF_SLICE_MAIN 0 /* SLICE ID for 5GHZ */ +#define EWPF_SLICE_AUX 1 /* SLICE ID for 2GHZ */ + +#define EWPF_MAX_IFACE 2 /* MAX IFACE supported, 0: STA */ +#define EWPF_MAX_EVENT 1 /* MAX EVENT counter supported */ +#define EWPF_MAX_KEY_INFO 1 /* MAX KEY INFO counter supported */ + +#define EWPF_ARM_TO_MSEC 1 +#define EWPF_NO_UNIT_CONV 1 +#define EWPF_MSEC_TO_SEC 1000 +#define EWPF_USEC_TO_MSEC 1000 +#define EWPF_NSEC_TO_MSEC 1000000 +#define EWPF_USEC_TO_SEC 1000000 +#define EWPF_EPOCH 1000 +#define EWPF_NONSEC_TO_SEC 1000000000 +#define EWPF_REPORT_YEAR_MUL 10000 +#define EWPF_REPORT_MON_MUL 100 +#define EWPF_REPORT_HOUR_MUL 10000 +#define EWPF_REPORT_MIN_MUL 100 +#define EWPF_REPORT_MINUTES 60 +#define EWPF_REPORT_YEAR_BASE 1900 + +#define EWPF_NO_ABS FALSE +#define EWPF_NEED_ABS TRUE + +#define EWPF_MAX_INFO_TYPE 5 +#define EWPF_INFO_VER 0 +#define EWPF_INFO_TYPE 1 +#define EWPF_INFO_ECNT 2 +#define EWPF_INFO_IOVAR 3 +#define EWPF_INFO_CPLOG 4 +#define EWPF_INFO_DHDSTAT 5 + +#define EWPF_UPDATE_ARM_CYCLE_OFFSET 1 + +/* EWPF element of slice type */ +typedef struct { + uint32 armcycle; /* dongle arm cycle for this record */ + union { + wl_periodic_compact_cntrs_v1_t compact_cntr_v1; + wl_periodic_compact_cntrs_v2_t compact_cntr_v2; + wl_periodic_compact_cntrs_v3_t compact_cntr_v3; + }; + evt_hist_compact_toss_stats_v1_t hist_tx_toss_stat; + evt_hist_compact_toss_stats_v1_t hist_rx_toss_stat; + wlc_btc_stats_v4_t btc_stat; + wl_compact_he_cnt_wlc_v2_t compact_he_cnt; +} EWPF_slc_elem_t; + +/* EWPF element for interface type */ +typedef struct { + uint32 armcycle; /* dongle arm cycle for this record */ + wl_if_stats_t if_stat; + wl_lqm_t lqm; + wl_if_infra_stats_t infra; + wl_if_mgt_stats_t mgmt_stat; + wl_if_state_compact_t if_comp_stat; + wl_adps_dump_summary_v2_t adps_dump_summary; + wl_adps_energy_gain_v1_t adps_energy_gain; + wl_roam_stats_v1_t roam_stat; +} EWPF_ifc_elem_t; + +typedef struct { + uint32 first_armcycle; /* first dongle arm cycle for this record */ + uint32 updated_armcycle; /* last updated dongle arm cycle for this record */ + wl_event_based_statistics_v4_t event_stat; +} EWPF_event_elem_t; + +typedef struct { + uint32 first_armcycle; /* first dongle arm cycle for this record */ + uint32 updated_armcycle; /* last updaated dongle arm cycle for this record */ + key_update_info_v1_t key_update_info; +} EWPF_key_info_elem_t; + +typedef struct { + uint32 first_armcycle; /* first dongle arm cycle for this record */ + uint32 updated_armcycle; /* last updated dongle arm cycle for this record */ + wl_roam_stats_v1_t roam_stat; +} EWPF_roam_stats_event_elem_t; + +typedef struct { + int enabled; /* enabled/disabled */ + dhd_pub_t *dhdp; + uint32 tmp_armcycle; /* global ARM CYCLE for TAG */ + int idx_type; /* 0 : SLICE, 1: IFACE */ + int xtlv_idx; /* Slice/Interface index : global for TAG */ + void *s_ring[EWPF_MAX_SLICE]; + void *i_ring[EWPF_MAX_IFACE]; + void *e_ring[EWPF_MAX_EVENT]; + void *k_ring[EWPF_MAX_KEY_INFO]; + + /* used by Report module */ + uint8 last_bssid[ETHER_ADDR_LEN]; /* BSSID of last conencted/request */ + int last_channel; + uint32 last_armcycle; /* ARM CYCLE prior last connection */ +} EWP_filter_t; + +/* status gathering functions : XTLV callback functions */ +typedef int (*EWPF_filter_cb)(void *ctx, const uint8 *data, uint16 type, uint16 len); +static int evt_xtlv_print_cb(void *ctx, const uint8 *data, uint16 type, uint16 len); +static int evt_xtlv_copy_cb(void *ctx, const uint8 *data, uint16 type, uint16 len); +static int evt_xtlv_idx_cb(void *ctx, const uint8 *data, uint16 type, uint16 len); +static int evt_xtlv_type_cb(void *ctx, const uint8 *data, uint16 type, uint16 len); +static int filter_main_cb(void *ctx, const uint8 *data, uint16 type, uint16 len); +static int evt_xtlv_roam_cb(void *ctx, const uint8 *data, uint16 type, uint16 len); + +/* ========= Event Handler functions and its callbacks: ============= */ +typedef struct _EWPF_tbl { + uint16 xtlv_id; /* XTLV ID, to handle */ + EWPF_filter_cb cb_func; /* specific call back function, usually for structre */ + int idx_type; /* structure specific info: belonged type */ + int max_idx; /* structure specific info: ALLOWED MAX IDX */ + uint32 offset; /* offset of structure in EWPF_elem-t, valid if cb is not null */ + uint32 member_length; /* MAX length of reserved for this structure */ + struct _EWPF_tbl *tbl; /* sub table if XTLV map to XLTV */ +} EWPF_tbl_t; + +/* Context structre for XTLV callback */ +typedef struct { + dhd_pub_t *dhdp; + EWPF_tbl_t *tbl; +} EWPF_ctx_t; + +#define SLICE_INFO(a) EWPF_IDX_TYPE_SLICE, EWPF_MAX_SLICE, OFFSETOF(EWPF_slc_elem_t, a), \ + sizeof(((EWPF_slc_elem_t *)NULL)->a) +#define IFACE_INFO(a) EWPF_IDX_TYPE_IFACE, EWPF_MAX_IFACE, OFFSETOF(EWPF_ifc_elem_t, a), \ + sizeof(((EWPF_ifc_elem_t *)NULL)->a) +#define EVENT_INFO(a) EWPF_IDX_TYPE_EVENT, EWPF_MAX_EVENT, OFFSETOF(EWPF_event_elem_t, a), \ + sizeof(((EWPF_event_elem_t *)NULL)->a) +#define KEY_INFO(a) EWPF_IDX_TYPE_KEY_INFO, EWPF_MAX_KEY_INFO, OFFSETOF(EWPF_key_info_elem_t, a), \ + sizeof(((EWPF_key_info_elem_t *)NULL)->a) + +#define SLICE_U_SIZE(a) sizeof(((EWPF_slc_elem_t *)NULL)->a) +#define SLICE_INFO_UNION(a) EWPF_IDX_TYPE_SLICE, EWPF_MAX_SLICE, OFFSETOF(EWPF_slc_elem_t, a) +#define NONE_INFO(a) 0, 0, a, 0 +/* XTLV TBL for WL_SLICESTATS_XTLV_PERIODIC_STATE */ +static EWPF_tbl_t EWPF_periodic[] = +{ + { + WL_STATE_COMPACT_COUNTERS, + evt_xtlv_copy_cb, + SLICE_INFO(compact_cntr_v3), + NULL + }, + { + WL_STATE_COMPACT_HE_COUNTERS, + evt_xtlv_copy_cb, + SLICE_INFO(compact_he_cnt), + NULL + }, + {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL} +}; + +static EWPF_tbl_t EWPF_if_periodic[] = +{ + { + WL_STATE_IF_COMPACT_STATE, + evt_xtlv_copy_cb, + IFACE_INFO(if_comp_stat), + NULL + }, + { + WL_STATE_IF_ADPS_STATE, + evt_xtlv_copy_cb, + IFACE_INFO(adps_dump_summary), + NULL + }, + { + WL_STATE_IF_ADPS_ENERGY_GAIN, + evt_xtlv_copy_cb, + IFACE_INFO(adps_energy_gain), + NULL + }, + {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL} +}; + +static EWPF_tbl_t EWPF_roam[] = +{ + { + WL_IFSTATS_XTLV_ROAM_STATS_EVENT, + evt_xtlv_print_cb, + NONE_INFO(0), + NULL + }, + { + WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC, + evt_xtlv_copy_cb, + IFACE_INFO(roam_stat), + NULL + }, + {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL} +}; + +/* XTLV TBL for EVENT_LOG_TAG_STATS */ +static EWPF_tbl_t EWPF_main[] = +{ + /* MAIN XTLV */ + { + WL_IFSTATS_XTLV_WL_SLICE, + evt_xtlv_type_cb, + NONE_INFO(0), + EWPF_main + }, + { + WL_IFSTATS_XTLV_IF, + evt_xtlv_type_cb, + NONE_INFO(0), + EWPF_main + }, + /* ID XTLVs */ + { + WL_IFSTATS_XTLV_SLICE_INDEX, + evt_xtlv_idx_cb, + NONE_INFO(0), + NULL + }, + { + WL_IFSTATS_XTLV_IF_INDEX, + evt_xtlv_idx_cb, + NONE_INFO(0), + NULL + }, + /* NORMAL XTLVS */ + { + WL_SLICESTATS_XTLV_PERIODIC_STATE, + NULL, + NONE_INFO(0), + EWPF_periodic + }, + { + WL_IFSTATS_XTLV_IF_LQM, + evt_xtlv_copy_cb, + IFACE_INFO(lqm), + NULL + }, + { + WL_IFSTATS_XTLV_GENERIC, + evt_xtlv_copy_cb, + IFACE_INFO(if_stat), + NULL + }, + { + WL_IFSTATS_XTLV_MGT_CNT, + evt_xtlv_copy_cb, + IFACE_INFO(mgmt_stat), + NULL + }, + { + WL_IFSTATS_XTLV_IF_PERIODIC_STATE, + NULL, + NONE_INFO(0), + EWPF_if_periodic + }, + { + WL_IFSTATS_XTLV_INFRA_SPECIFIC, + evt_xtlv_copy_cb, + IFACE_INFO(infra), + NULL + }, + { + WL_SLICESTATS_XTLV_HIST_TX_STATS, + evt_xtlv_copy_cb, + SLICE_INFO(hist_tx_toss_stat), + NULL + }, + { + WL_SLICESTATS_XTLV_HIST_RX_STATS, + evt_xtlv_copy_cb, + SLICE_INFO(hist_rx_toss_stat), + NULL + }, + { + WL_IFSTATS_XTLV_WL_SLICE_BTCOEX, + evt_xtlv_copy_cb, + SLICE_INFO(btc_stat), + NULL + }, + { + WL_IFSTATS_XTLV_IF_EVENT_STATS, + evt_xtlv_copy_cb, + EVENT_INFO(event_stat), + NULL + }, + { + WL_IFSTATS_XTLV_IF_EVENT_STATS, + evt_xtlv_print_cb, + NONE_INFO(0), + NULL + }, + { + WL_IFSTATS_XTLV_KEY_PLUMB_INFO, + evt_xtlv_copy_cb, + KEY_INFO(key_update_info), + NULL + }, + { + WL_IFSTATS_XTLV_ROAM_STATS_EVENT, + evt_xtlv_roam_cb, + NONE_INFO(0), + EWPF_roam + }, + { + WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC, + evt_xtlv_roam_cb, + IFACE_INFO(roam_stat), + EWPF_roam + }, + + {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL} +}; + +#if defined(DHD_EWPR_VER2) && defined(DHD_STATUS_LOGGING) + +#define EWP_DHD_STAT_SIZE 2 + +uint8 +dhd_statlog_filter[] = +{ + ST(WLAN_POWER_ON), /* Wi-Fi Power on */ + ST(WLAN_POWER_OFF), /* Wi-Fi Power off */ + ST(ASSOC_START), /* connect to the AP triggered by upper layer */ + ST(AUTH_DONE), /* complete to authenticate with the AP */ + ST(ASSOC_REQ), /* send or receive Assoc Req */ + ST(ASSOC_RESP), /* send or receive Assoc Resp */ + ST(ASSOC_DONE), /* complete to disconnect to the associated AP */ + ST(DISASSOC_START), /* disconnect to the associated AP by upper layer */ + ST(DISASSOC_INT_START), /* initiate the disassoc by DHD */ + ST(DISASSOC_DONE), /* complete to disconnect to the associated AP */ + ST(DISASSOC), /* send or receive Disassoc */ + ST(DEAUTH), /* send or receive Deauth */ + ST(LINKDOWN), /* receive the link down event */ + ST(REASSOC_START), /* reassoc the candidate AP */ + ST(REASSOC_INFORM), /* inform reassoc completion to upper layer */ + ST(REASSOC_DONE), /* complete to reassoc */ + ST(EAPOL_M1), /* send or receive the EAPOL M1 */ + ST(EAPOL_M2), /* send or receive the EAPOL M2 */ + ST(EAPOL_M3), /* send or receive the EAPOL M3 */ + ST(EAPOL_M4), /* send or receive the EAPOL M4 */ + ST(EAPOL_GROUPKEY_M1), /* send or receive the EAPOL Group key handshake M1 */ + ST(EAPOL_GROUPKEY_M2), /* send or receive the EAPOL Group key handshake M2 */ + ST(EAP_REQ_IDENTITY), /* send or receive the EAP REQ IDENTITY */ + ST(EAP_RESP_IDENTITY), /* send or receive the EAP RESP IDENTITY */ + ST(EAP_REQ_TLS), /* send or receive the EAP REQ TLS */ + ST(EAP_RESP_TLS), /* send or receive the EAP RESP TLS */ + ST(EAP_REQ_LEAP), /* send or receive the EAP REQ LEAP */ + ST(EAP_RESP_LEAP), /* send or receive the EAP RESP LEAP */ + ST(EAP_REQ_TTLS), /* send or receive the EAP REQ TTLS */ + ST(EAP_RESP_TTLS), /* send or receive the EAP RESP TTLS */ + ST(EAP_REQ_AKA), /* send or receive the EAP REQ AKA */ + ST(EAP_RESP_AKA), /* send or receive the EAP RESP AKA */ + ST(EAP_REQ_PEAP), /* send or receive the EAP REQ PEAP */ + ST(EAP_RESP_PEAP), /* send or receive the EAP RESP PEAP */ + ST(EAP_REQ_FAST), /* send or receive the EAP REQ FAST */ + ST(EAP_RESP_FAST), /* send or receive the EAP RESP FAST */ + ST(EAP_REQ_PSK), /* send or receive the EAP REQ PSK */ + ST(EAP_RESP_PSK), /* send or receive the EAP RESP PSK */ + ST(EAP_REQ_AKAP), /* send or receive the EAP REQ AKAP */ + ST(EAP_RESP_AKAP), /* send or receive the EAP RESP AKAP */ + ST(EAP_SUCCESS), /* send or receive the EAP SUCCESS */ + ST(EAP_FAILURE), /* send or receive the EAP FAILURE */ + ST(EAPOL_START), /* send or receive the EAPOL-START */ + ST(WSC_START), /* send or receive the WSC START */ + ST(WSC_DONE), /* send or receive the WSC DONE */ + ST(WPS_M1), /* send or receive the WPS M1 */ + ST(WPS_M2), /* send or receive the WPS M2 */ + ST(WPS_M3), /* send or receive the WPS M3 */ + ST(WPS_M4), /* send or receive the WPS M4 */ + ST(WPS_M5), /* send or receive the WPS M5 */ + ST(WPS_M6), /* send or receive the WPS M6 */ + ST(WPS_M7), /* send or receive the WPS M7 */ + ST(WPS_M8), /* send or receive the WPS M8 */ + ST(8021X_OTHER), /* send or receive the other 8021X frames */ + ST(INSTALL_KEY), /* install the key */ + ST(DELETE_KEY), /* remove the key */ + ST(INSTALL_PMKSA), /* install PMKID information */ + ST(INSTALL_OKC_PMK), /* install PMKID information for OKC */ + ST(DHCP_DISCOVER), /* send or recv DHCP Discover */ + ST(DHCP_OFFER), /* send or recv DHCP Offer */ + ST(DHCP_REQUEST), /* send or recv DHCP Request */ + ST(DHCP_DECLINE), /* send or recv DHCP Decline */ + ST(DHCP_ACK), /* send or recv DHCP ACK */ + ST(DHCP_NAK), /* send or recv DHCP NACK */ + ST(DHCP_RELEASE), /* send or recv DHCP Release */ + ST(DHCP_INFORM), /* send or recv DHCP Inform */ + ST(REASSOC_SUCCESS), /* reassociation success */ + ST(REASSOC_FAILURE), /* reassociation failure */ + ST(AUTH_TIMEOUT), /* authentication timeout */ + ST(AUTH_FAIL), /* authentication failure */ + ST(AUTH_NO_ACK), /* authentication failure due to no ACK */ + ST(AUTH_OTHERS), /* authentication failure with other status */ + ST(ASSOC_TIMEOUT), /* association timeout */ + ST(ASSOC_FAIL), /* association failure */ + ST(ASSOC_NO_ACK), /* association failure due to no ACK */ + ST(ASSOC_ABORT), /* association abort */ + ST(ASSOC_UNSOLICITED), /* association unsolicited */ + ST(ASSOC_NO_NETWORKS), /* association failure due to no networks */ + ST(ASSOC_OTHERS), /* association failure due to no networks */ + ST(REASSOC_DONE_OTHERS) /* complete to reassoc with other reason */ +}; +#endif /* DHD_EWPR_VER2 && DHD_STATUS_LOGGING */ + +/* ========= Module functions : exposed to others ============= */ +int +dhd_event_log_filter_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size) +{ + + EWP_filter_t *filter; + int idx; + uint32 req_size; + uint32 s_ring_size; /* slice ring */ + uint32 i_ring_size; /* interface ring */ + uint32 e_ring_size; /* event counter ring */ + uint32 k_ring_size; /* key info ring */ + uint8 *buf_ptr = buf; + EWPF_ctx_t ctx; + wl_event_based_statistics_v4_t dummy_event_stat; + key_update_info_v1_t dummy_key_update_info; +#if defined(DHD_EWPR_VER2) && defined(DHD_STATUS_LOGGING) + stat_bdmask_req_t req; +#endif /* DHD_EWPR_VER2 && DHD_STATUS_LOGGING */ + + DHD_FILTER_ERR(("STARTED\n")); + + if (!dhdp || !buf) { + DHD_FILTER_ERR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf)); + return BCME_ERROR; + } + + i_ring_size = s_ring_size = e_ring_size = k_ring_size = dhd_ring_get_hdr_size(); + s_ring_size += ((uint32)sizeof(EWPF_slc_elem_t)) * EWPF_REPORT_MAX_DATA; + i_ring_size += ((uint32)sizeof(EWPF_ifc_elem_t)) * EWPF_REPORT_MAX_DATA; + e_ring_size += ((uint32)sizeof(EWPF_event_elem_t)) * EWPF_REPORT_MAX_DATA; + k_ring_size += ((uint32)sizeof(EWPF_key_info_elem_t)) * EWPF_REPORT_MAX_DATA; + + req_size = s_ring_size * EWPF_MAX_SLICE + i_ring_size * EWPF_MAX_IFACE + + e_ring_size * EWPF_MAX_EVENT + k_ring_size * EWPF_MAX_KEY_INFO; + req_size += (uint32)sizeof(EWP_filter_t); + + if (buf_size < req_size) { + DHD_FILTER_ERR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n", + req_size, buf_size)); + return BCME_ERROR; + } + + BCM_REFERENCE(dhdp); + filter = (EWP_filter_t *)buf; + buf_ptr += sizeof(EWP_filter_t); + + /* initialize control block */ + memset(filter, 0, sizeof(EWP_filter_t)); + + filter->idx_type = EWPF_INVALID; + filter->xtlv_idx = EWPF_INVALID; + filter->tmp_armcycle = 0; + + for (idx = 0; idx < EWPF_MAX_SLICE; idx++) { + filter->s_ring[idx] = dhd_ring_init(dhdp, buf_ptr, s_ring_size, + sizeof(EWPF_slc_elem_t), EWPF_REPORT_MAX_DATA, + DHD_RING_TYPE_FIXED); + if (!filter->s_ring[idx]) { + DHD_FILTER_ERR(("FAIL TO INIT SLICE RING: %d\n", idx)); + return BCME_ERROR; + } + buf_ptr += s_ring_size; + } + + for (idx = 0; idx < EWPF_MAX_IFACE; idx++) { + filter->i_ring[idx] = dhd_ring_init(dhdp, buf_ptr, i_ring_size, + sizeof(EWPF_ifc_elem_t), EWPF_REPORT_MAX_DATA, + DHD_RING_TYPE_FIXED); + if (!filter->i_ring[idx]) { + DHD_FILTER_ERR(("FAIL TO INIT INTERFACE RING: %d\n", idx)); + return BCME_ERROR; + } + buf_ptr += i_ring_size; + } + + for (idx = 0; idx < EWPF_MAX_EVENT; idx++) { + filter->e_ring[idx] = dhd_ring_init(dhdp, buf_ptr, e_ring_size, + sizeof(EWPF_event_elem_t), EWPF_REPORT_MAX_DATA, + DHD_RING_TYPE_FIXED); + if (!filter->e_ring[idx]) { + DHD_FILTER_ERR(("FAIL TO INIT INTERFACE RING: %d\n", idx)); + return BCME_ERROR; + } + buf_ptr += e_ring_size; + } + + for (idx = 0; idx < EWPF_MAX_KEY_INFO; idx++) { + filter->k_ring[idx] = dhd_ring_init(dhdp, buf_ptr, k_ring_size, + sizeof(EWPF_key_info_elem_t), EWPF_REPORT_MAX_DATA, + DHD_RING_TYPE_FIXED); + if (!filter->k_ring[idx]) { + DHD_FILTER_ERR(("FAIL TO INIT INTERFACE RING: %d\n", idx)); + return BCME_ERROR; + } + buf_ptr += k_ring_size; + } + + dhdp->event_log_filter = filter; + filter->dhdp = dhdp; + filter->enabled = TRUE; + + /* + * put dummy element of event based encounters to prevent error + * in case of no event happened when data collection is triggered + */ + ctx.dhdp = dhdp; + ctx.tbl = EWPF_main; + memset(&dummy_event_stat, 0x00, sizeof(dummy_event_stat)); + evt_xtlv_copy_cb(&ctx, (uint8 *)&dummy_event_stat, WL_IFSTATS_XTLV_IF_EVENT_STATS, + sizeof(wl_event_based_statistics_v4_t)); + + memset(&dummy_key_update_info, 0x00, sizeof(dummy_key_update_info)); + evt_xtlv_copy_cb(&ctx, (uint8 *)&dummy_key_update_info, WL_IFSTATS_XTLV_KEY_PLUMB_INFO, + sizeof(key_update_info_v1_t)); + +#if defined(DHD_EWPR_VER2) && defined(DHD_STATUS_LOGGING) + /* create status filter for bigdata logging */ + req.req_buf = dhd_statlog_filter; + req.req_buf_len = sizeof(dhd_statlog_filter); + dhd_statlog_generate_bdmask(dhdp, &req); +#endif /* DHD_EWPR_VER2 && DHD_STATUS_LOGGING */ + + return BCME_OK; +} + +void +dhd_event_log_filter_deinit(dhd_pub_t *dhdp) +{ + EWP_filter_t *filter; + int idx; + + if (!dhdp) { + return; + } + + if (dhdp->event_log_filter) { + filter = (EWP_filter_t *)dhdp->event_log_filter; + for (idx = 0; idx < EWPF_MAX_SLICE; idx ++) { + dhd_ring_deinit(dhdp, filter->s_ring[idx]); + } + for (idx = 0; idx < EWPF_MAX_IFACE; idx ++) { + dhd_ring_deinit(dhdp, filter->i_ring[idx]); + } + for (idx = 0; idx < EWPF_MAX_EVENT; idx ++) { + dhd_ring_deinit(dhdp, filter->e_ring[idx]); + } + for (idx = 0; idx < EWPF_MAX_KEY_INFO; idx ++) { + dhd_ring_deinit(dhdp, filter->k_ring[idx]); + } + dhdp->event_log_filter = NULL; + } +} + +void +dhd_event_log_filter_notify_connect_request(dhd_pub_t *dhdp, uint8 *bssid, int channel) +{ + EWP_filter_t *filter; + void *last_elem; + + if (!dhdp || !dhdp->event_log_filter) { + return; + } + + filter = (EWP_filter_t *)dhdp->event_log_filter; + if (filter->enabled != TRUE) { + DHD_FILTER_ERR(("EWP Filter is not enabled\n")); + return; + } + + memcpy(filter->last_bssid, bssid, ETHER_ADDR_LEN); + filter->last_channel = channel; + + /* Refer STA interface */ + last_elem = dhd_ring_get_last(filter->i_ring[0]); + if (last_elem == NULL) { + filter->last_armcycle = 0; + } else { + /* EXCLUDE before connect start */ + filter->last_armcycle = *(uint32 *)last_elem + EWPF_EPOCH + 1; + } +} + +void +dhd_event_log_filter_notify_connect_done(dhd_pub_t *dhdp, uint8 *bssid, int roam) +{ + EWP_filter_t *filter; + void *last_elem; + int channel; + char buf[EWPF_DEBUG_BUF_LEN]; + int ret; + uint32 armcycle; + struct channel_info *ci; + + if (!dhdp || !dhdp->event_log_filter) { + return; + } + + filter = (EWP_filter_t *)dhdp->event_log_filter; + if (filter->enabled != TRUE) { + DHD_FILTER_ERR(("EWP Filter is not enabled\n")); + return; + } + + /* GET CHANNEL */ + *(uint32 *)buf = htod32(EWPF_DEBUG_BUF_LEN); + ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_CHANNEL, buf, EWPF_DEBUG_BUF_LEN, FALSE, 0); + if (ret != BCME_OK) { + DHD_FILTER_ERR(("FAIL TO GET BSS INFO: %d\n", ret)); + return; + } + + ci = (struct channel_info *)(buf + sizeof(uint32)); + channel = dtoh32(ci->hw_channel); + DHD_FILTER_TRACE(("CHANNEL:prev %d new:%d\n", filter->last_channel, channel)); + + memcpy(filter->last_bssid, bssid, ETHER_ADDR_LEN); + filter->last_channel = channel; + if (roam == FALSE) { + return; + } + + /* update connect time for roam */ + /* Refer STA interface */ + last_elem = dhd_ring_get_last(filter->i_ring[0]); + if (last_elem == NULL) { + armcycle = 0; + } else { + /* EXCLUDE before roam done */ + armcycle = *(uint32 *)last_elem + EWPF_EPOCH + 1; + } + + filter->last_armcycle = armcycle; +} + +static int +evt_xtlv_print_cb(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx; + EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter; + uint32 armcycle = 0; + uint8 bssid[ETHER_ADDR_LEN]; + uint32 initial_assoc_time = 0; + uint32 prev_roam_time = 0; + uint32 last_roam_event_type = 0; + uint32 last_roam_event_status = 0; + uint32 last_roam_event_reason = 0; + wl_wips_event_info_t wips_event; + bzero(&wips_event, sizeof(wips_event)); + + DHD_FILTER_TRACE(("%s type:%d %x len:%d %x\n", __FUNCTION__, type, type, len, len)); + + /* get current armcycle */ + if (filter) { + armcycle = filter->tmp_armcycle; + } + if (type == WL_IFSTATS_XTLV_IF_EVENT_STATS) { + wl_event_based_statistics_v1_t *elem; + + elem = (wl_event_based_statistics_v1_t *)(uintptr_t)data; + if (elem->txdeauthivalclass > 0) { + memcpy(bssid, &elem->BSSID, ETHER_ADDR_LEN); + DHD_ERROR(("DHD STA sent DEAUTH frame with invalid class : %d times" + ", BSSID("MACDBG")\n", elem->txdeauthivalclass, MAC2STRDBG(bssid))); + } + if (elem->version == WL_EVENT_STATISTICS_VER_2) { + wl_event_based_statistics_v2_t *elem_v2; + + elem_v2 = (wl_event_based_statistics_v2_t *)(uintptr_t)data; + memcpy(&wips_event.bssid, &elem_v2->last_deauth, ETHER_ADDR_LEN); + wips_event.misdeauth = elem_v2->misdeauth; + wips_event.current_RSSI = elem_v2->cur_rssi; + wips_event.deauth_RSSI = elem_v2->deauth_rssi; + wips_event.timestamp = elem_v2->timestamp; + } else if (elem->version == WL_EVENT_STATISTICS_VER_3) { + wl_event_based_statistics_v3_t *elem_v3; + + elem_v3 = (wl_event_based_statistics_v3_t *)(uintptr_t)data; + memcpy(&wips_event.bssid, &elem_v3->last_deauth, ETHER_ADDR_LEN); + wips_event.misdeauth = elem_v3->misdeauth; + wips_event.current_RSSI = elem_v3->cur_rssi; + wips_event.deauth_RSSI = elem_v3->deauth_rssi; + wips_event.timestamp = elem_v3->timestamp; + /* roam statistics */ + initial_assoc_time = elem_v3->initial_assoc_time; + prev_roam_time = elem_v3->prev_roam_time; + last_roam_event_type = elem_v3->last_roam_event_type; + last_roam_event_status = elem_v3->last_roam_event_status; + last_roam_event_reason = elem_v3->last_roam_event_reason; + } else if (elem->version == WL_EVENT_STATISTICS_VER_4) { + wl_event_based_statistics_v4_t *elem_v4; + + elem_v4 = (wl_event_based_statistics_v4_t *)(uintptr_t)data; + memcpy(&wips_event.bssid, &elem_v4->last_deauth, ETHER_ADDR_LEN); + wips_event.misdeauth = elem_v4->misdeauth; + wips_event.current_RSSI = elem_v4->cur_rssi; + wips_event.deauth_RSSI = elem_v4->deauth_rssi; + wips_event.timestamp = elem_v4->timestamp; + } + if (wips_event.misdeauth > 1) { + DHD_ERROR(("WIPS attack!! cnt=%d, curRSSI=%d, deauthRSSI=%d " + ", time=%d, MAC="MACDBG"\n", + wips_event.misdeauth, wips_event.current_RSSI, + wips_event.deauth_RSSI, wips_event.timestamp, + MAC2STRDBG(&wips_event.bssid))); +#if defined(WL_CFG80211) && defined(WL_WIPSEVT) + wl_cfg80211_wips_event_ext(&wips_event); +#endif /* WL_CFG80211 && WL_WIPSEVT */ + } + } else if (type == WL_IFSTATS_XTLV_ROAM_STATS_EVENT) { + wl_roam_stats_v1_t *roam_elem; + roam_elem = (wl_roam_stats_v1_t *)(uintptr_t)data; + if (roam_elem->version == WL_ROAM_STATS_VER_1) { + wl_roam_stats_v1_t *roam_elem_v1; + + roam_elem_v1 = (wl_roam_stats_v1_t *)(uintptr_t)data; + /* roam statistics */ + initial_assoc_time = roam_elem_v1->initial_assoc_time; + prev_roam_time = roam_elem_v1->prev_roam_time; + last_roam_event_type = roam_elem_v1->last_roam_event_type; + last_roam_event_status = roam_elem_v1->last_roam_event_status; + last_roam_event_reason = roam_elem_v1->last_roam_event_reason; + } + } else { + DHD_FILTER_ERR(("%s TYPE(%d) IS NOT SUPPORTED TO PRINT\n", + __FUNCTION__, type)); + return BCME_ERROR; + } + if (initial_assoc_time > 0 && prev_roam_time > 0) { + DHD_ERROR(("Last roam event before disconnection : " + "current armcycle %d, initial assoc time %d, " + "last event time %d, type %d, status %d, reason %d\n", + armcycle, initial_assoc_time, prev_roam_time, + last_roam_event_type, last_roam_event_status, + last_roam_event_reason)); + } + + return BCME_OK; +} + +#ifdef BCM_SDC +static int +evt_get_last_toss_hist(uint8 *ptr, const uint8 *data, uint16 len) +{ + bcm_xtlv_t *bcm_xtlv_desc = (bcm_xtlv_t *)data; + wl_hist_compact_toss_stats_v2_t *ewp_stats; + evt_hist_compact_toss_stats_v1_t bidata_stats; + int16 max_rcidx = EWPF_INVALID, secnd_rcidx = EWPF_INVALID; + uint16 cur_rnidx = 0, prev_rnidx = 0; + uint16 max_rccnt = 0, cur_rccnt = 0; + uint16 idx; + + if (!ptr || !data) { + return BCME_ERROR; + } + + if (bcm_xtlv_desc->len != sizeof(wl_hist_compact_toss_stats_v2_t)) { + DHD_FILTER_ERR(("%s : size is not matched %d\n", __FUNCTION__, + bcm_xtlv_desc->len)); + return BCME_ERROR; + } + + ewp_stats = (wl_hist_compact_toss_stats_v2_t *)(&bcm_xtlv_desc->data[0]); + if (ewp_stats->htr_type == WL_STATE_HIST_TX_TOSS_REASONS) { + if (ewp_stats->version != WL_HIST_COMPACT_TOSS_STATS_TX_VER_2) { + DHD_FILTER_ERR(("%s : unsupported version %d (type: %d)\n", + __FUNCTION__, ewp_stats->version, ewp_stats->htr_type)); + return BCME_ERROR; + } + } else if (ewp_stats->htr_type == WL_STATE_HIST_RX_TOSS_REASONS) { + if (ewp_stats->version != WL_HIST_COMPACT_TOSS_STATS_RX_VER_2) { + DHD_FILTER_ERR(("%s : unsupported version %d (type: %d)\n", + __FUNCTION__, ewp_stats->version, ewp_stats->htr_type)); + return BCME_ERROR; + } + } else { + DHD_FILTER_ERR(("%s : unsupported type %d\n", __FUNCTION__, + ewp_stats->htr_type)); + return BCME_ERROR; + } + /* + * htr_rnidx is pointing the next empty slot to be used + * Need to get previous index which is valid + */ + if (ewp_stats->htr_rnidx > 0) { + cur_rnidx = ewp_stats->htr_rnidx - 1; + } else { + cur_rnidx = WLC_HIST_TOSS_LEN - 1; + } + if (cur_rnidx > 0) { + prev_rnidx = cur_rnidx - 1; + } else { + prev_rnidx = WLC_HIST_TOSS_LEN - 1; + } + /* + * Need to get largest count of toss reasons + */ + for (idx = 0; idx < WLC_HIST_TOSS_LEN; idx ++) { + cur_rccnt = (uint16)((ewp_stats->htr_rc[idx] & + HIST_TOSS_RC_COUNT_MASK)>>HIST_TOSS_RC_COUNT_POS); + DHD_FILTER_TRACE(("%s: idx %d htr_rc %04x cur_rccnt %d\n", + __FUNCTION__, idx, ewp_stats->htr_rc[idx], cur_rccnt)); + if (ewp_stats->htr_rc_ts[idx] && max_rccnt < cur_rccnt) { + max_rccnt = cur_rccnt; + secnd_rcidx = max_rcidx; + max_rcidx = idx; + DHD_FILTER_TRACE(("%s: max_rcidx updated -" + "max_rcidx %d secnd_rcidx %d\n", + __FUNCTION__, max_rcidx, secnd_rcidx)); + } + } + + memset(&bidata_stats, 0x00, sizeof(bidata_stats)); + bidata_stats.version = ewp_stats->version; + bidata_stats.htr_type = ewp_stats->htr_type; + bidata_stats.htr_num = ewp_stats->htr_num; + bidata_stats.htr_rn_last = ewp_stats->htr_running[cur_rnidx]; + bidata_stats.htr_rn_ts_last = ewp_stats->htr_rn_ts[cur_rnidx]; + bidata_stats.htr_rn_prev = ewp_stats->htr_running[prev_rnidx]; + bidata_stats.htr_rn_ts_prev = ewp_stats->htr_rn_ts[prev_rnidx]; + if (max_rcidx != EWPF_INVALID) { + bidata_stats.htr_rc_max = ewp_stats->htr_rc[max_rcidx]; + bidata_stats.htr_rc_ts_max = ewp_stats->htr_rc_ts[max_rcidx]; + } + if (secnd_rcidx != EWPF_INVALID) { + bidata_stats.htr_rc_secnd = ewp_stats->htr_rc[secnd_rcidx]; + bidata_stats.htr_rc_ts_secnd = ewp_stats->htr_rc_ts[secnd_rcidx]; + } + DHD_FILTER_TRACE(("%s: ver %d type %d num %d " + "htr_rn_last %d htr_rn_ts_last %d htr_rn_prev %d htr_rn_ts_prev %d " + "htr_rc_max %d htr_rc_ts_max %d htr_rc_secnd %d htr_rc_ts_secnd %d\n", + __FUNCTION__, bidata_stats.version, + bidata_stats.htr_type, bidata_stats.htr_num, + bidata_stats.htr_rn_last, bidata_stats.htr_rn_ts_last, + bidata_stats.htr_rn_prev, bidata_stats.htr_rn_ts_prev, + bidata_stats.htr_rc_max, bidata_stats.htr_rc_ts_max, + bidata_stats.htr_rc_secnd, bidata_stats.htr_rc_ts_secnd)); + + memcpy(ptr, &bidata_stats, sizeof(bidata_stats)); + + return BCME_OK; +} +#endif /* BCM_SDC */ + +static int +evt_xtlv_copy_cb(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx; + EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter; + uint32 *armcycle; + EWPF_tbl_t *tbl; + void *ring; + void *target; + uint8 *ptr; + int tbl_idx; + uint32 elem_size; + + DHD_FILTER_TRACE(("%s type:%d %x len:%d %x\n", __FUNCTION__, type, type, len, len)); + + for (tbl_idx = 0; ; tbl_idx++) { + if (cur_ctx->tbl[tbl_idx].xtlv_id == EWPF_XTLV_INVALID) { + DHD_FILTER_ERR(("%s NOT SUPPORTED TYPE(%d)\n", __FUNCTION__, type)); + return BCME_OK; + } + if (cur_ctx->tbl[tbl_idx].xtlv_id == type) { + tbl = &cur_ctx->tbl[tbl_idx]; + break; + } + } + + /* Set index type and xtlv_idx for event stats and key plumb info */ + if (type == WL_IFSTATS_XTLV_IF_EVENT_STATS) { + filter->idx_type = EWPF_IDX_TYPE_EVENT; + filter->xtlv_idx = 0; + DHD_FILTER_TRACE(("EVENT XTLV\n")); + } else if (type == WL_IFSTATS_XTLV_KEY_PLUMB_INFO) { + filter->idx_type = EWPF_IDX_TYPE_KEY_INFO; + filter->xtlv_idx = 0; + DHD_FILTER_TRACE(("KEY INFO XTLV\n")); + } + + /* Check Validation */ + if (filter->idx_type == EWPF_INVALID || + filter->xtlv_idx == EWPF_INVALID || + filter->idx_type != tbl->idx_type || + filter->xtlv_idx >= tbl->max_idx) { + DHD_FILTER_ERR(("XTLV VALIDATION FAILED: type:%x xtlv:%x idx:%d\n", + filter->idx_type, tbl->xtlv_id, filter->xtlv_idx)); + return BCME_OK; + } + + /* SET RING INFO */ + if (filter->idx_type == EWPF_IDX_TYPE_SLICE) { + ring = filter->s_ring[filter->xtlv_idx]; + elem_size = sizeof(EWPF_slc_elem_t); + } else if (filter->idx_type == EWPF_IDX_TYPE_IFACE) { + ring = filter->i_ring[filter->xtlv_idx]; + elem_size = sizeof(EWPF_ifc_elem_t); + } else if (filter->idx_type == EWPF_IDX_TYPE_EVENT) { + DHD_FILTER_TRACE(("%s: EWPF_IDX_TYPE_EVENT FOUND\n", + __FUNCTION__)); + ring = filter->e_ring[filter->xtlv_idx]; + elem_size = sizeof(EWPF_event_elem_t); + } else if (filter->idx_type == EWPF_IDX_TYPE_KEY_INFO) { + DHD_FILTER_TRACE(("%s: EWPF_IDX_TYPE_KEY_INFO FOUND\n", + __FUNCTION__)); + ring = filter->k_ring[filter->xtlv_idx]; + elem_size = sizeof(EWPF_key_info_elem_t); + } else { + DHD_FILTER_TRACE(("%s unsupported idx_type:%d\n", + __FUNCTION__, filter->idx_type)); + return BCME_OK; + } + + /* Check armcycle epoch is changed */ + target = dhd_ring_get_last(ring); + if (target != NULL) { + armcycle = (uint32 *)target; + if (*armcycle + EWPF_EPOCH <= filter->tmp_armcycle) { + /* EPOCH is changed (longer than 1sec) */ + target = NULL; + } else if (*armcycle - EWPF_EPOCH >= filter->tmp_armcycle) { + /* dongle is rebooted */ + target = NULL; + } + } + + if (target == NULL) { + /* Get new idx */ + target = dhd_ring_get_empty(ring); + if (target == NULL) { + /* no available slot due to oldest slot is locked */ + DHD_FILTER_ERR(("SKIP to logging xltv(%x) due to locking\n", type)); + return BCME_OK; + } + + /* clean up target */ + armcycle = (uint32 *)target; + memset(target, 0, elem_size); + memcpy(armcycle, &filter->tmp_armcycle, sizeof(*armcycle)); + } + +#ifdef EWPF_DEBUG + DHD_FILTER_ERR(("idx:%d write_:%p %u %u\n", + filter->xtlv_idx, target, *armcycle, filter->tmp_armcycle)); +#endif + + /* Additionally put updated armcycle for event based EWP */ + if (filter->idx_type == EWPF_IDX_TYPE_EVENT || + filter->idx_type == EWPF_IDX_TYPE_KEY_INFO) { + DHD_FILTER_TRACE(("%s: updated armcycle for event based EWP\n", + __FUNCTION__)); + memcpy((uint32 *)(armcycle + EWPF_UPDATE_ARM_CYCLE_OFFSET), + &filter->tmp_armcycle, sizeof(*armcycle)); + } + + ptr = (uint8 *)target; + +#ifdef DHD_EWPR_VER2 + if (tbl->xtlv_id == WL_SLICESTATS_XTLV_HIST_TX_STATS || + tbl->xtlv_id == WL_SLICESTATS_XTLV_HIST_RX_STATS) { +#ifdef BCM_SDC + int err; + + DHD_FILTER_TRACE(("TOSS_REASONS received (%d)\n", tbl->xtlv_id)); + + err = evt_get_last_toss_hist(ptr + cur_ctx->tbl[tbl_idx].offset, data, len); + if (err) { + DHD_FILTER_ERR(("%s: get toss hist failed\n", + __FUNCTION__)); + return BCME_ERROR; + } +#else + DHD_FILTER_ERR(("%s: Unabled to copy hist TX/RX stats, BCM_SDC must be included\n", + __FUNCTION__)); +#endif /* BCM_SDC */ + } else +#endif /* DHD_EWPR_VER2 */ + { + /* XXX multiversion shall be use same structure of old version */ + if (len > cur_ctx->tbl[tbl_idx].member_length) { + DHD_FILTER_TRACE(("data Length is too big to save: (alloc = %d), " + "(data = %d)\n", cur_ctx->tbl[tbl_idx].member_length, len)); + len = cur_ctx->tbl[tbl_idx].member_length; + } + + memcpy(ptr + cur_ctx->tbl[tbl_idx].offset, data, len); + } + return BCME_OK; +} + +static int +evt_xtlv_idx_cb(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx; + EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter; + + filter->xtlv_idx = data[0]; + + if (filter->idx_type == EWPF_IDX_TYPE_SLICE) { + if (type != WL_IFSTATS_XTLV_SLICE_INDEX || + filter->xtlv_idx >= EWPF_MAX_SLICE) { + goto idx_fail; + } + } else if (filter->idx_type == EWPF_IDX_TYPE_IFACE) { + if (type != WL_IFSTATS_XTLV_IF_INDEX || + filter->xtlv_idx >= EWPF_MAX_IFACE) { + DHD_FILTER_ERR(("CHANGE IFACE TO 0 in FORCE\n")); + return BCME_OK; + } + } else { + goto idx_fail; + } + return BCME_OK; + +idx_fail: + DHD_FILTER_ERR(("UNEXPECTED IDX XTLV: filter_type:%d input_type%x idx:%d\n", + filter->idx_type, type, filter->xtlv_idx)); + filter->idx_type = EWPF_INVALID; + filter->xtlv_idx = EWPF_INVALID; + return BCME_OK; +} + +static int +evt_xtlv_type_cb(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx; + EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter; + + if (type == WL_IFSTATS_XTLV_WL_SLICE) { + filter->idx_type = EWPF_IDX_TYPE_SLICE; + DHD_FILTER_TRACE(("SLICE XTLV\n")); + } else if (type == WL_IFSTATS_XTLV_IF) { + filter->idx_type = EWPF_IDX_TYPE_IFACE; + DHD_FILTER_TRACE(("IFACE XTLV\n")); + } + + bcm_unpack_xtlv_buf(ctx, data, len, + BCM_XTLV_OPTION_ALIGN32, filter_main_cb); + return BCME_OK; +} + +static int +evt_xtlv_roam_cb(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + + EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx; + EWPF_tbl_t *new_tbl = EWPF_roam; + EWPF_ctx_t sub_ctx; + int idx; + + for (idx = 0; ; idx++) { + if (new_tbl[idx].xtlv_id == EWPF_XTLV_INVALID) { + DHD_FILTER_TRACE(("%s NOT SUPPORTED TYPE(%d)\n", __FUNCTION__, type)); + return BCME_OK; + } + if (new_tbl[idx].xtlv_id == type) { + break; + } + } + + /* MULTI version may not applied */ + if (len > sizeof(cur_ctx->dhdp->roam_evt)) { + DHD_FILTER_ERR(("data length is too big :max= %d, cur=%d\n", + (int)sizeof(cur_ctx->dhdp->roam_evt), len)); + len = sizeof(cur_ctx->dhdp->roam_evt); + } + + /* save latest roam event to report via get_bss_info */ + (void)memcpy_s((char *)&cur_ctx->dhdp->roam_evt, sizeof(cur_ctx->dhdp->roam_evt), + data, len); + + sub_ctx.dhdp = cur_ctx->dhdp; + sub_ctx.tbl = new_tbl; + new_tbl[idx].cb_func(&sub_ctx, data, type, len); + return BCME_OK; +} + +static int +filter_main_cb(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx; + EWPF_ctx_t sub_ctx; + int idx; + int err = BCME_OK; + + DHD_FILTER_TRACE(("%s type:%x len:%d\n", __FUNCTION__, type, len)); + + sub_ctx.dhdp = cur_ctx->dhdp; + for (idx = 0; ; idx++) { + if (cur_ctx->tbl[idx].xtlv_id == EWPF_XTLV_INVALID) { + DHD_FILTER_TRACE(("%s NOT SUPPORTED TYPE(%d)\n", __FUNCTION__, type)); + return BCME_OK; + } + if (cur_ctx->tbl[idx].xtlv_id == type) { + /* parse sub xtlv */ + if (cur_ctx->tbl[idx].cb_func == NULL) { + sub_ctx.tbl = cur_ctx->tbl[idx].tbl; + err = bcm_unpack_xtlv_buf(&sub_ctx, data, len, + BCM_XTLV_OPTION_ALIGN32, filter_main_cb); + return err; + } + + /* handle for structure/variable */ + err = cur_ctx->tbl[idx].cb_func(ctx, data, type, len); + if (err != BCME_OK) { + return err; + } + } + } + + return err; +} + +void +dhd_event_log_filter_event_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, uint32 *data) +{ + int err; + EWP_filter_t *filter; + EWPF_ctx_t ctx; + + if (!dhdp->event_log_filter) { + DHD_FILTER_ERR(("NO FILTER MODULE\n")); + return; + } + + if (!plog_hdr || !data) { + /* XXX Validation check is done by caller */ + DHD_FILTER_ERR(("INVALID PARAMETER\n")); + return; + } + + filter = (EWP_filter_t *)dhdp->event_log_filter; + if (filter->enabled != TRUE) { + DHD_FILTER_ERR(("FITLER IS NOT STARTED\n")); + return; + } + + /* get ARMCYCLE */ + filter->tmp_armcycle = plog_hdr->armcycle; + filter->idx_type = EWPF_INVALID; + filter->xtlv_idx = EWPF_INVALID; + +#ifdef EWPF_DEBUG + { + char buf[EWPF_DEBUG_BUF_LEN]; + int idx; + + memset(buf, 0, sizeof(buf)); + DHD_FILTER_ERR(("tag %d(%x) count %d(%x)\n", + plog_hdr->tag, plog_hdr->tag, plog_hdr->count, plog_hdr->count)); + for (idx = 0; idx < plog_hdr->count; idx++) { + sprintf(&buf[strlen(buf)], "%08x ", data[idx]); + if ((idx + 1) % EWPF_VAL_CNT_PLINE == 0) { + DHD_FILTER_ERR(("%s\n", buf)); + memset(buf, 0, sizeof(buf)); + } + } + if (strlen(buf) > 0) { + DHD_FILTER_ERR(("%s\n", buf)); + } + } +#endif /* EWPF_DEBUG */ + + ctx.dhdp = dhdp; + ctx.tbl = EWPF_main; + if ((err = bcm_unpack_xtlv_buf( + &ctx, + (const uint8 *)data, + (plog_hdr->count - 1) * sizeof(uint32), + BCM_XTLV_OPTION_ALIGN32, + filter_main_cb))) { + DHD_FILTER_ERR(("FAIL TO UNPACK XTLV: err(%d)\n", err)); + } +} +/* ========= Private Command(Serialize) ============= */ +/* XXX REPORT MODULE will be done after discuss with customer */ +/* XXX Current implementation is temporal to verify FILTER MODULE works */ +//#define EWPR_DEBUG +#ifdef EWPR_DEBUG +#undef DHD_FILTER_TRACE +#define DHD_FILTER_TRACE DHD_FILTER_ERR +#endif /* EWPR_DEBUG */ +#define EWPR_DEBUG_BUF_LEN 512 + +#define EWP_REPORT_ELEM_PRINT_BUF 256 +#define EWP_REPORT_NAME_MAX 64 + +#ifdef DHD_EWPR_VER2 +#define EWP_REPORT_VERSION 0x20190514 +#define EWP_REPORT_SET_DEFAULT 0x01 +#define EWPR_CSDCLIENT_DIFF 10 +#define EWPR_INTERVAL 3 +#define EWPR_DELTA_CNT 1 /* 3 seconds before */ +#define EWPR_ARRAY_CNT 10 /* INTERVAL * ARRAY total 30 seconds to lock */ +#define EWPR_DELTA_LAST_POS 6 + +#define INDEX_STR_SIZE 6 +#define DHD_STAT_STR_SIZE 2 +#define REPORT_VERSION_STR_SIZE 8 +#define DELIMITER_LEN 1 +#else +#define EWP_REPORT_VERSION 0x20170905 +#define EWPR_CSDCLIENT_DIFF 4 +#define EWPR_INTERVAL 3 +#define EWPR_ARRAY_CNT 10 /* INTERVAL * ARRAY total 30 seconds to lock */ +#endif /* DHD_EWPR_VER2 */ + +#define EWPR_DELTA3_POS 3 +#define EWPR_DELTA2_POS 2 +#define EWPR_DELTA1_POS 1 +#define EWPR_NOW_POS 0 + +#define EWPR_DELTA1_CNT 2 /* 6 seconds before */ +#define EWPR_DELTA2_CNT 5 /* 15 seconds before */ +#define EWPR_DELTA3_CNT 9 /* 27 seconds before */ + +#define EWPR_CNT_PER_LINE 5 + +/* EWP Reporter display format */ +#define EWP_DEC 1 +#define EWP_HEX 2 +#define EWP_BIN 3 + +/* EWP Filter Data type */ +/* BASIC : signed + length */ +#define EWP_UINT8 2 +#define EWP_UINT16 4 +#define EWP_UINT32 8 +#define EWP_UINT64 16 +#define EWP_INT8 102 +#define EWP_INT16 104 +#define EWP_INT32 108 +#define EWP_BIT 201 + +/* NON BAISC : need special handling */ +#define EWP_NON_BASIC 200 +#define EWP_DATE 201 +#define EWP_TIME 202 +#define EWP_BSSID 203 +#define EWP_OUI 204 + +/* Delimiter between values */ +#define KEY_DEL ' ' +#define RAW_DEL '_' + +/* IOVAR BUF SIZE */ +#define EWPR_IOV_BUF_LEN 64 + +typedef struct { + void *ring; /* INPUT ring to lock */ + void **elem_list; /* OUTPUT elem ptr list for each delta */ + uint32 max_armcycle; /* IN/OUT arm cycle should be less than this */ + uint32 min_armcycle; /* IN/OUT arm cycle should be bigger than this */ + uint32 max_period; /* IN allowed time diff between first and last */ + uint32 delta_cnt; /* IN finding delta count */ + uint32 *delta_list; /* IN delta values to find */ +} ewpr_lock_param_t; + +#define MAX_MULTI_VER 3 +typedef struct { + uint32 version; /* VERSION for multiple version struct */ + uint32 offset; /* offset of the member at the version */ +} ewpr_MVT_offset_elem_t; /* elem for multi version type */ + +typedef struct { + uint32 version_offset; /* offset of version */ + ewpr_MVT_offset_elem_t opv[MAX_MULTI_VER]; /* offset per version */ +} ewpr_MVT_offset_t; /* multi_version type */ + +typedef struct { + char name[EWP_REPORT_NAME_MAX]; + int ring_type; /* Ring Type : EWPF_IDX_TYPE_SLICE, EWPF_IDX_TYPE_IFACE */ + int is_multi_version; /* is multi version */ + union { + uint32 offset; /* Offset from start of element structure */ + ewpr_MVT_offset_t v_info; + }; + int data_type; /* Data type : one of EWP Filter Data Type */ + int display_format; /* Display format : one of EWP Reporter display */ + int display_type; /* MAX display BYTE : valid for HEX and BIN FORM */ +#ifdef DHD_EWPR_VER2 + int info_type; /* info type : EWPF_INFO_ECNT, EWPF_INFO_IOVAR, ... */ + int display_bit_length; /* packing bit : valid for BIN FORM */ + int display_array_size; /* array size */ + int display_method; /* serial or diff */ + int unit_convert; /* unit conversion + * 0 or 1 : no conversion, put data as is + * greater than 1, divide value by unit_convert + */ + bool need_abs; /* need absolute function for negative value */ +#endif /* DHD_EWPR_VER2 */ +} ewpr_serial_info_t; + +/* offset defines */ +#define EWPR_CNT_VERSION_OFFSET \ + OFFSETOF(EWPF_slc_elem_t, compact_cntr_v3) + +#define EWPR_CNT_V1_OFFSET(a) \ + WL_PERIODIC_COMPACT_CNTRS_VER_1, \ + (OFFSETOF(EWPF_slc_elem_t, compact_cntr_v1) + OFFSETOF(wl_periodic_compact_cntrs_v1_t, a)) +#define EWPR_CNT_V2_OFFSET(a) \ + WL_PERIODIC_COMPACT_CNTRS_VER_2, \ + (OFFSETOF(EWPF_slc_elem_t, compact_cntr_v2) + OFFSETOF(wl_periodic_compact_cntrs_v2_t, a)) +#define EWPR_CNT_V3_OFFSET(a) \ + WL_PERIODIC_COMPACT_CNTRS_VER_3, \ + (OFFSETOF(EWPF_slc_elem_t, compact_cntr_v3) + OFFSETOF(wl_periodic_compact_cntrs_v3_t, a)) +#define EWPR_STAT_OFFSET(a) \ + (OFFSETOF(EWPF_ifc_elem_t, if_stat) + OFFSETOF(wl_if_stats_t, a)) +#define EWPR_INFRA_OFFSET(a) \ + (OFFSETOF(EWPF_ifc_elem_t, infra) + OFFSETOF(wl_if_infra_stats_t, a)) +#define EWPR_MGMT_OFFSET(a) \ + (OFFSETOF(EWPF_ifc_elem_t, mgmt_stat) + OFFSETOF(wl_if_mgt_stats_t, a)) +#define EWPR_LQM_OFFSET(a) \ + (OFFSETOF(EWPF_ifc_elem_t, lqm) + OFFSETOF(wl_lqm_t, a)) +#define EWPR_SIGNAL_OFFSET(a) \ + (EWPR_LQM_OFFSET(current_bss) + OFFSETOF(wl_rx_signal_metric_t, a)) +#define EWPR_IF_COMP_OFFSET(a) \ + (OFFSETOF(EWPF_ifc_elem_t, if_comp_stat) + OFFSETOF(wl_if_state_compact_t, a)) +#define EWPR_EVENT_COUNTER_OFFSET(a) \ + (OFFSETOF(EWPF_event_elem_t, event_stat) + OFFSETOF(wl_event_based_statistics_v4_t, a)) +#define EWPR_KEY_INFO_OFFSET(a) \ + (OFFSETOF(EWPF_key_info_elem_t, key_update_info) + OFFSETOF(key_update_info_v1_t, a)) +#define EWPR_TX_TOSS_HIST_OFFSET(a) \ + (OFFSETOF(EWPF_slc_elem_t, hist_tx_toss_stat) + \ + OFFSETOF(evt_hist_compact_toss_stats_v1_t, a)) +#define EWPR_RX_TOSS_HIST_OFFSET(a) \ + (OFFSETOF(EWPF_slc_elem_t, hist_rx_toss_stat) + \ + OFFSETOF(evt_hist_compact_toss_stats_v1_t, a)) +#define EWPR_BTC_STAT_OFFSET(a) \ + (OFFSETOF(EWPF_slc_elem_t, btc_stat) + \ + OFFSETOF(wlc_btc_stats_v4_t, a)) +#define EWPR_COMPACT_HE_CNT_OFFSET(a) \ + (OFFSETOF(EWPF_slc_elem_t, compact_he_cnt) + \ + OFFSETOF(wl_compact_he_cnt_wlc_v2_t, a)) +#define EWPR_ROAM_STATS_PERIODIC_OFFSET(a) \ + (OFFSETOF(EWPF_ifc_elem_t, roam_stat) + OFFSETOF(wl_roam_stats_v1_t, a)) + +/* serail info type define */ +#define EWPR_SERIAL_CNT(a) {\ + #a, EWPF_IDX_TYPE_SLICE, TRUE, \ + .v_info = { EWPR_CNT_VERSION_OFFSET, \ + {{EWPR_CNT_V1_OFFSET(a)}, \ + {EWPR_CNT_V2_OFFSET(a)}, \ + {EWPR_CNT_V3_OFFSET(a)}}}, \ + EWP_UINT32, EWP_HEX, EWP_UINT32} +#define EWPR_SERIAL_CNT_16(a) {\ + #a, EWPF_IDX_TYPE_SLICE, TRUE, \ + .v_info = { EWPR_CNT_VERSION_OFFSET, \ + {{EWPR_CNT_V1_OFFSET(a)}, \ + {EWPR_CNT_V2_OFFSET(a)}, \ + {EWPR_CNT_V3_OFFSET(a)}}}, \ + EWP_UINT32, EWP_HEX, EWP_UINT16} +#define EWPR_SERIAL_STAT(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_STAT_OFFSET(a), \ + EWP_UINT64, EWP_HEX, EWP_UINT32} +#define EWPR_SERIAL_INFRA(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_INFRA_OFFSET(a), \ + EWP_UINT32, EWP_HEX, EWP_UINT16} +#define EWPR_SERIAL_MGMT(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_MGMT_OFFSET(a), \ + EWP_UINT32, EWP_HEX, EWP_UINT16} +#define EWPR_SERIAL_LQM(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_LQM_OFFSET(a), \ + EWP_INT32, EWP_DEC, EWP_INT8} +#define EWPR_SERIAL_SIGNAL(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_SIGNAL_OFFSET(a), \ + EWP_INT32, EWP_DEC, EWP_INT8} +#define EWPR_SERIAL_IFCOMP_8(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_IF_COMP_OFFSET(a), \ + EWP_INT8, EWP_DEC, EWP_INT8} +#define EWPR_SERIAL_IFCOMP_16(a) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_IF_COMP_OFFSET(a), \ + EWP_UINT16, EWP_DEC, EWP_UINT16} +#define EWPR_SERIAL_ARM(a) {\ + "armcycle:" #a, EWPF_IDX_TYPE_##a, FALSE, {0, }, \ + EWP_UINT32, EWP_DEC, EWP_UINT32} +#define EWPR_SERIAL_NONE {"", EWPF_INVALID, FALSE, {0, }, 0, 0, 0} + +#ifdef DHD_EWPR_VER2 + +#define RAW_BUFFER_SIZE 720u +#define BASE64_BUFFER_SIZE 960u /* 33 percent larger than original binary data */ +#define EWPR_HEADER_SIZE 39u +#define EWPR_MAX_STR_SIZE EWPR_HEADER_SIZE + EWPR_HEADER_SIZE + +#define EWPR_DISPLAY_METHOD_SINGLE 0 +#define EWPR_DISPLAY_METHOD_DIFF 1 + +#define MAX_BIT_SIZE 8 +#define MAX_BIT_SHIFT 7 + +#define INDEX_UNSPECIFIED 0u + +enum ewpr_context_type { + EWP_CONTEXT_TYPE_UNWANTED_NETWORK = 0, + EWP_CONTEXT_TYPE_ASSOC_FAIL = 1, + EWP_CONTEXT_TYPE_ABNORMAL_DISCONNECT = 2, + EWP_CONTEXT_TYPE_MAX = 3 +}; + +enum ewpr_unwanted_net_sub_type { + EWP_UNWANT_NET_SUB_TYPE_UNSPECIFIED = 0, + EWP_UNWANT_NET_SUB_TYPE_ARP_FAIL = 1, + EWP_UNWANT_NET_SUB_TYPE_TXBAD = 2, + EWP_UNWANT_NET_SUB_TYPE_MAX = 3 +}; + +enum ewpr_assoc_fail_sub_type { + EWP_ASSOC_FAIL_SUB_TYPE_UNSPECIFIED = 0, + EWP_ASSOC_FAIL_SUB_TYPE_DHCP_FAIL = 1, + EWP_ASSOC_FAIL_SUB_TYPE_EAP_FAIL = 2, + EWP_ASSOC_FAIL_SUB_TYPE_EAP_TIMEOUT = 3, + EWP_ASSOC_FAIL_SUB_TYPE_4WAY_FAIL = 4, + EWP_ASSOC_FAIL_SUB_TYPE_MAX = 5 +}; + +enum ewpr_abnormal_disconnect_sub_type { + EWP_ABNRML_DISCONNCET_SUB_TYPE_UNSPECIFIED = 0, + EWP_ABNRML_DISCONNCET_SUB_TYPE_DISCONNECT_BY_HOST = 1, + EWP_ABNRML_DISCONNCET_SUB_TYPE_MAX = 2 +}; + +typedef struct { + uint32 index1; + uint32 index2; + uint32 index3; + ewpr_serial_info_t *table; +} ewpr_serial_context_info_t; + +#define EWPR_SINGLE_DEFAULT EWPR_DISPLAY_METHOD_SINGLE, EWPF_NO_UNIT_CONV +#define EWPR_DIFF_DEFAULT EWPR_DISPLAY_METHOD_DIFF, EWPF_NO_UNIT_CONV + +#define EWPR_SINGLE_NSEC_TO_MSEC EWPR_DISPLAY_METHOD_SINGLE, EWPF_NSEC_TO_MSEC +#define EWPR_SINGLE_USEC_TO_MSEC EWPR_DISPLAY_METHOD_SINGLE, EWPF_USEC_TO_MSEC +#define EWPR_SINGLE_USEC_TO_SEC EWPR_DISPLAY_METHOD_SINGLE, EWPF_USEC_TO_SEC + +/* serail info type define */ +#define EWPR_SERIAL_CNT_V3_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_SLICE, TRUE, \ + .v_info = { EWPR_CNT_VERSION_OFFSET, \ + {{EWPR_CNT_V3_OFFSET(a)}}}, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_STAT_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_STAT_OFFSET(a), \ + EWP_UINT64, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_INFRA_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_INFRA_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_MGMT_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_MGMT_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_LQM_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_LQM_OFFSET(a), \ + EWP_INT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_SIGNAL_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_SIGNAL_OFFSET(a), \ + EWP_INT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_IFCOMP_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_IF_COMP_OFFSET(a), \ + EWP_INT8, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_EVENT_COUNTER_16_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_EVENT, FALSE, .offset = EWPR_EVENT_COUNTER_OFFSET(a), \ + EWP_UINT16, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_EVENT_COUNTER_32_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_EVENT, FALSE, .offset = EWPR_EVENT_COUNTER_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_KEY_INFO_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_KEY_INFO, FALSE, .offset = EWPR_KEY_INFO_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_ROAM_STATS_PERIODIC_OFFSET(a), \ + EWP_UINT16, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_ROAM_STATS_PERIODIC_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_ARM_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_##a, FALSE, {0, }, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_IOVAR_BIT(a, b) {\ + #a, 0, 0, .offset = 0, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_IOVAR, b, 1, EWPR_SINGLE_DEFAULT} +#define EWPR_SERIAL_VERSION_BIT(a, b) {\ + #a, 0, 0, .offset = 0, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_VER, b, 1, EWPR_SINGLE_DEFAULT} +#define EWPR_SERIAL_TYPE_BIT(a, b) {\ + #a, 0, 0, .offset = 0, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_TYPE, b, 1, EWPR_SINGLE_DEFAULT} +#define EWPR_SERIAL_CPLOG_BIT(a, b, c) {\ + #a, 0, 0, .offset = 0, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_CPLOG, b, c, EWPR_SINGLE_DEFAULT} +#define EWPR_SERIAL_DHDSTAT_BIT(a, b, c, d) {\ + #a, 0, 0, .offset = 0, \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_DHDSTAT, b, c, d} +#define EWPR_SERIAL_TX_TOSS_HIST_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_TX_TOSS_HIST_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_RX_TOSS_HIST_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_RX_TOSS_HIST_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_BTC_STAT_16_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_BTC_STAT_OFFSET(a), \ + EWP_UINT16, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_BTC_STAT_32_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_BTC_STAT_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} +#define EWPR_SERIAL_COMPACT_HE_CNT_BIT(a, b, c, d) {\ + #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_COMPACT_HE_CNT_OFFSET(a), \ + EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d} + +#define EWPR_SERIAL_NONE_BIT {"", EWPF_INVALID, FALSE, {0, }, 0, 0, 0, 0, 0, 0, 0} + +#ifdef EWPR_DEBUG +static void ewpr_print_byte_as_bits(char val); +#endif /* EWPR_DEBUG */ + +static int32 +ewpr_diff_bit_pack(ewpr_serial_info_t *info, char *buf, int buf_len, + void *_f_op, void *_s_op, int32 bit_offset); +static int32 +ewpr_single_bit_pack(ewpr_serial_info_t *info, char *buf, int buf_len, + void *_ptr, int32 bit_offset); +static int32 +ewpr_bit_pack_basic(char *buf, int buf_len, uint32 data, int32 format, + int32 display_type, int32 display_bit_length, int32 bit_offset); + +static char* +ewpr_base64_encode(dhd_pub_t *dhdp, char* input, int32 length); +#endif /* DHD_EWPR_VER2 */ + +ewpr_serial_info_t +ewpr_serial_CSDCLIENT_key_tbl[] = { + EWPR_SERIAL_STAT(txframe), + EWPR_SERIAL_STAT(txerror), + EWPR_SERIAL_STAT(rxframe), + EWPR_SERIAL_STAT(rxerror), + EWPR_SERIAL_STAT(txretrans), + EWPR_SERIAL_INFRA(rxbeaconmbss), + EWPR_SERIAL_CNT(txallfrm), + EWPR_SERIAL_CNT(rxrsptmout), + EWPR_SERIAL_CNT(rxbadplcp), + EWPR_SERIAL_CNT(rxcrsglitch), + EWPR_SERIAL_CNT(rxbadfcs), + EWPR_SERIAL_CNT_16(rxbeaconmbss), + EWPR_SERIAL_CNT_16(rxbeaconobss), + EWPR_SERIAL_NONE +}; + +ewpr_serial_info_t +ewpr_serial_CSDCLIENT_diff_tbl[] = { + EWPR_SERIAL_STAT(txframe), + EWPR_SERIAL_STAT(txerror), + EWPR_SERIAL_STAT(rxframe), + EWPR_SERIAL_STAT(rxerror), + EWPR_SERIAL_STAT(txretrans), + EWPR_SERIAL_INFRA(rxbeaconmbss), + EWPR_SERIAL_MGMT(txassocreq), + EWPR_SERIAL_MGMT(txreassocreq), + EWPR_SERIAL_MGMT(txdisassoc), + EWPR_SERIAL_MGMT(rxdisassoc), + EWPR_SERIAL_MGMT(rxassocrsp), + EWPR_SERIAL_MGMT(rxreassocrsp), + EWPR_SERIAL_MGMT(txauth), + EWPR_SERIAL_MGMT(rxauth), + EWPR_SERIAL_MGMT(txdeauth), + EWPR_SERIAL_MGMT(rxdeauth), + EWPR_SERIAL_MGMT(txaction), + EWPR_SERIAL_MGMT(rxaction), + EWPR_SERIAL_CNT(txallfrm), + EWPR_SERIAL_CNT(rxrsptmout), + EWPR_SERIAL_CNT(rxbadplcp), + EWPR_SERIAL_CNT(rxcrsglitch), + EWPR_SERIAL_CNT(rxbadfcs), + EWPR_SERIAL_CNT_16(rxbeaconmbss), + EWPR_SERIAL_CNT_16(rxbeaconobss), + EWPR_SERIAL_NONE +}; + +ewpr_serial_info_t +ewpr_serial_CSDCLIENT_array_tbl[] = { + EWPR_SERIAL_IFCOMP_8(rssi_sum), + EWPR_SERIAL_IFCOMP_8(snr), + EWPR_SERIAL_IFCOMP_8(noise_level), + EWPR_SERIAL_NONE +}; + +#ifdef EWPR_DEBUG +ewpr_serial_info_t +ewpr_serial_dbg_tbl[] = { + EWPR_SERIAL_ARM(IFACE), + EWPR_SERIAL_ARM(SLICE), + EWPR_SERIAL_NONE +}; +#endif /* EWPR_DEBUG */ + +#ifdef DHD_EWPR_VER2 + +ewpr_serial_info_t +ewpr_serial_bit_unwanted_network_default_tbl[] = { + EWPR_SERIAL_VERSION_BIT(version, 32), + EWPR_SERIAL_TYPE_BIT(type, 5), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IOVAR_BIT(auth, 8), + EWPR_SERIAL_IOVAR_BIT(wsec, 8), + EWPR_SERIAL_IOVAR_BIT(mfp, 1), + EWPR_SERIAL_IOVAR_BIT(bip, 8), + EWPR_SERIAL_ARM_BIT(IFACE, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_STAT_BIT(txframe, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(txerror, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(rxframe, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(rxerror, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(txretrans, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txassocreq, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txreassocreq, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txdisassoc, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxdisassoc, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxassocrsp, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxreassocrsp, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txdeauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxdeauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txaction, 7, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxaction, 7, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(txallfrm, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxrsptmout, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbadplcp, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxcrsglitch, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbadfcs, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbeaconmbss, 5, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbeaconobss, 12, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(lqcm_report, 19, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(tx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxretry, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxdup, 15, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(chswitch_cnt, 8, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(pm_dur, 12, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxholes, 15, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_map, 16, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_a2dp_hiwat_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_datadelay_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_crtpri_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_pri_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf5cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf6cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf7cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_32_BIT(bt_gnt_dur, 12, 3, EWPR_SINGLE_USEC_TO_MSEC), + EWPR_SERIAL_IFCOMP_BIT(rssi_sum, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IFCOMP_BIT(snr, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IFCOMP_BIT(noise_level, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(initial_assoc_time, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(prev_roam_time, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_type, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_status, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_reason, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_success_cnt, 10, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_fail_cnt, 10, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_attempt_cnt, 11, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(partial_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(full_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxtrig_myaid, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_colormiss_cnt, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxmsta_back, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_txtbppdu, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_null_tbppdu, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(timestamp, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(algo, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(key_flags, 16, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_CPLOG_BIT(packtlog, 22, 70), + EWPR_SERIAL_NONE +}; + +ewpr_serial_info_t +ewpr_serial_bit_assoc_fail_default_tbl[] = { + EWPR_SERIAL_VERSION_BIT(version, 32), + EWPR_SERIAL_TYPE_BIT(type, 5), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IOVAR_BIT(auth, 8), + EWPR_SERIAL_IOVAR_BIT(wsec, 8), + EWPR_SERIAL_IOVAR_BIT(mfp, 1), + EWPR_SERIAL_IOVAR_BIT(bip, 8), + EWPR_SERIAL_ARM_BIT(IFACE, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_STAT_BIT(txframe, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(txerror, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(rxframe, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(rxerror, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(txretrans, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txassocreq, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txreassocreq, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txdisassoc, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxdisassoc, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxassocrsp, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxreassocrsp, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txdeauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxdeauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txaction, 7, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxaction, 7, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(txallfrm, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxrsptmout, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbadplcp, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxcrsglitch, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbadfcs, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbeaconmbss, 5, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbeaconobss, 12, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(lqcm_report, 19, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(tx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxretry, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxdup, 15, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(chswitch_cnt, 8, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(pm_dur, 12, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxholes, 15, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_map, 16, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_a2dp_hiwat_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_datadelay_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_crtpri_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_pri_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf5cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf6cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf7cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_32_BIT(bt_gnt_dur, 12, 3, EWPR_SINGLE_USEC_TO_MSEC), + EWPR_SERIAL_IFCOMP_BIT(rssi_sum, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IFCOMP_BIT(snr, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IFCOMP_BIT(noise_level, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxtrig_myaid, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_colormiss_cnt, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxmsta_back, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_txtbppdu, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_null_tbppdu, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(timestamp, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(algo, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(key_flags, 16, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_CPLOG_BIT(packtlog, 22, 70), + EWPR_SERIAL_NONE +}; + +ewpr_serial_info_t +ewpr_serial_bit_abnormal_disconnect_default_tbl[] = { + EWPR_SERIAL_VERSION_BIT(version, 32), + EWPR_SERIAL_TYPE_BIT(type, 5), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC), + EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IOVAR_BIT(auth, 8), + EWPR_SERIAL_IOVAR_BIT(wsec, 8), + EWPR_SERIAL_IOVAR_BIT(mfp, 1), + EWPR_SERIAL_IOVAR_BIT(bip, 8), + EWPR_SERIAL_ARM_BIT(IFACE, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_STAT_BIT(txframe, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(txerror, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(rxframe, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(rxerror, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_STAT_BIT(txretrans, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txassocreq, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txreassocreq, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txdisassoc, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxdisassoc, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxassocrsp, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxreassocrsp, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txdeauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxdeauth, 4, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(txaction, 7, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_MGMT_BIT(rxaction, 7, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(txallfrm, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxrsptmout, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbadplcp, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxcrsglitch, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbadfcs, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbeaconmbss, 5, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxbeaconobss, 12, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(lqcm_report, 19, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(tx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxretry, 17, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxdup, 15, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(chswitch_cnt, 8, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(pm_dur, 12, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_CNT_V3_BIT(rxholes, 15, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_map, 16, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_a2dp_hiwat_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_datadelay_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_crtpri_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(bt_pri_cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf5cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf6cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf7cnt, 12, 3, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_BTC_STAT_32_BIT(bt_gnt_dur, 12, 3, EWPR_SINGLE_USEC_TO_MSEC), + EWPR_SERIAL_IFCOMP_BIT(rssi_sum, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IFCOMP_BIT(snr, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_IFCOMP_BIT(noise_level, 7, 6, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(initial_assoc_time, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(prev_roam_time, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_type, 8, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_status, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_reason, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_success_cnt, 10, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_fail_cnt, 10, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_attempt_cnt, 11, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(partial_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(full_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxtrig_myaid, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_colormiss_cnt, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxmsta_back, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_txtbppdu, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_null_tbppdu, 10, 6, EWPR_DIFF_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(timestamp, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(algo, 6, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_KEY_INFO_BIT(key_flags, 16, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT), + EWPR_SERIAL_CPLOG_BIT(packtlog, 22, 70), + EWPR_SERIAL_NONE +}; + +ewpr_serial_context_info_t ewpr_serial_context_info[] = { + {EWP_CONTEXT_TYPE_UNWANTED_NETWORK, EWP_UNWANT_NET_SUB_TYPE_UNSPECIFIED, + INDEX_UNSPECIFIED, &ewpr_serial_bit_unwanted_network_default_tbl[0]}, + {EWP_CONTEXT_TYPE_UNWANTED_NETWORK, EWP_UNWANT_NET_SUB_TYPE_ARP_FAIL, + INDEX_UNSPECIFIED, &ewpr_serial_bit_unwanted_network_default_tbl[0]}, + {EWP_CONTEXT_TYPE_UNWANTED_NETWORK, EWP_UNWANT_NET_SUB_TYPE_TXBAD, + INDEX_UNSPECIFIED, &ewpr_serial_bit_unwanted_network_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_UNSPECIFIED, + INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_DHCP_FAIL, + INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_EAP_FAIL, + INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_EAP_TIMEOUT, + INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_4WAY_FAIL, + INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ABNORMAL_DISCONNECT, EWP_ABNRML_DISCONNCET_SUB_TYPE_UNSPECIFIED, + INDEX_UNSPECIFIED, &ewpr_serial_bit_abnormal_disconnect_default_tbl[0]}, + {EWP_CONTEXT_TYPE_ABNORMAL_DISCONNECT, EWP_ABNRML_DISCONNCET_SUB_TYPE_DISCONNECT_BY_HOST, + INDEX_UNSPECIFIED, &ewpr_serial_bit_abnormal_disconnect_default_tbl[0]}, + {EWP_CONTEXT_TYPE_MAX, INDEX_UNSPECIFIED, INDEX_UNSPECIFIED, NULL} +}; +#endif /* DHD_EWPR_VER2 */ + +int ewpr_set_period_lock(ewpr_lock_param_t *param); +int ewpr_diff_serial(ewpr_serial_info_t *info, char *buf, + int buf_len, void *_f_op, void *_s_op, char del); +int ewpr_single_serial(ewpr_serial_info_t *info, char *buf, int buf_len, void *ptr, char del); + +int +ewpr_serial_basic(char *buf, int buf_len, uint32 data, int format, int display_type, char del) +{ + if (format == EWP_HEX) { + switch (display_type) { + case EWP_INT8: + case EWP_UINT8: + return scnprintf(buf, buf_len, "%c%02x", del, data & 0xff); + case EWP_INT16: + case EWP_UINT16: + return scnprintf(buf, buf_len, "%c%04x", del, data & 0xffff); + case EWP_INT32: + case EWP_UINT32: + return scnprintf(buf, buf_len, "%c%08x", del, data & 0xffffffff); + default: + DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type)); + return 0; + } + } + + if (format == EWP_DEC) { + int32 sdata = (int32) data; + switch (display_type) { + case EWP_INT8: + case EWP_UINT8: + return scnprintf(buf, buf_len, "%c%04d", del, sdata); + case EWP_INT16: + case EWP_UINT16: + return scnprintf(buf, buf_len, "%c%06d", del, sdata); + case EWP_INT32: + case EWP_UINT32: + return scnprintf(buf, buf_len, "%c%011d", del, sdata); + default: + DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type)); + return 0; + } + } + + if (format == EWP_BIN) { + int32 sdata = (int32) data; + switch (display_type) { + case EWP_BIT: + return scnprintf(buf, buf_len, "%c%011d", del, sdata); + default: + DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type)); + return 0; + } + } + + DHD_FILTER_ERR(("INVALID FORMAT for Serial:%d", format)); + return 0; +} + +static int +ewpr_get_multi_offset(uint16 looking_version, ewpr_serial_info_t *info) +{ + int idx; + ewpr_MVT_offset_elem_t *opv; + + DHD_FILTER_TRACE(("FINDING MULTI OFFSET: type = %s version = %d\n", + info->name, looking_version)); + for (idx = 0; idx < MAX_MULTI_VER; idx ++) { + opv = &(info->v_info.opv[idx]); + + /* END OF MULTI VERSION */ + if (opv->version == 0) { + break; + } + if (looking_version == opv->version) { + return opv->offset; + } + } + /* return first version if no version is found */ + return info->v_info.opv[0].offset; +} +int +ewpr_single_serial(ewpr_serial_info_t *info, char *buf, int buf_len, void *_ptr, char del) +{ + uint32 sval = 0; + char *ptr = (char *)_ptr; + uint32 offset = EWPF_INVALID; + uint16 version; + + if (info->is_multi_version == TRUE) { + version = *(uint16 *)((char *)_ptr + info->v_info.version_offset); + offset = ewpr_get_multi_offset(version, info); + } else { + offset = info->offset; + } + + if (offset == EWPF_INVALID) { + DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name)); + return 0; + } + + ptr += offset; + + switch (info->data_type) { + case EWP_INT8: + sval = *(int8 *)ptr; + break; + case EWP_UINT8: + sval = *(uint8 *)ptr; + break; + case EWP_INT16: + sval = *(int16 *)ptr; + break; + case EWP_UINT16: + sval = *(uint16 *)ptr; + break; + case EWP_INT32: + sval = *(int32 *)ptr; + break; + case EWP_UINT32: + sval = *(uint32 *)ptr; + break; + /* XXX UINT64 is used only for debug */ +#ifdef EWPR_DEBUG + case EWP_UINT64: + sval = (uint32)(*(uint64 *)ptr); + break; +#endif /* EWPR_DEBUG */ + case EWP_BIT: + default: + DHD_FILTER_ERR(("INVALID TYPE for Single Serial:%d", info->data_type)); + return 0; + } + + return ewpr_serial_basic(buf, buf_len, sval, info->display_format, info->display_type, del); +} + +int +ewpr_diff_serial(ewpr_serial_info_t *info, + char *buf, int buf_len, void *_f_op, void *_s_op, char del) +{ + char *f_op = (char *)_f_op; + char *s_op = (char *)_s_op; + uint32 diff; + uint32 offset = EWPF_INVALID; + uint16 version; + + if (info->is_multi_version == TRUE) { + version = *(uint16 *)(f_op + info->v_info.version_offset); + offset = ewpr_get_multi_offset(version, info); + } else { + offset = info->offset; + } + + if (offset == EWPF_INVALID) { + DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name)); + return 0; + } + + f_op = f_op + offset; + s_op = s_op + offset; + + switch (info->data_type) { + case EWP_INT8: + case EWP_UINT8: + diff = *(uint8 *)f_op - *(uint8 *)s_op; + break; + case EWP_INT16: + case EWP_UINT16: + diff = *(uint16 *)f_op - *(uint16 *)s_op; + break; + case EWP_INT32: + case EWP_UINT32: + diff = *(uint32 *)f_op - *(uint32 *)s_op; + break; + case EWP_UINT64: + diff = (uint32)(*(uint64 *)f_op - *(uint64 *)s_op); + break; + case EWP_BIT: + default: + DHD_FILTER_ERR(("INVALID TYPE to DIFF:%d", info->data_type)); + return 0; + } + + return ewpr_serial_basic(buf, buf_len, diff, info->display_format, info->display_type, del); +} + +#ifdef EWPR_DEBUG +void +ewpr_debug_dump(ewpr_serial_info_t *tbl, void **ring) +{ + void *elem; + int idx, idx2; + ewpr_serial_info_t *info; + char buf[EWPR_DEBUG_BUF_LEN]; + uint32 bytes_written; + int lock_cnt; + + for (idx = 0; strlen(tbl[idx].name) != 0; idx++) { + info = &tbl[idx]; +#ifdef DHD_EWPR_VER2 + if (info->info_type != EWPF_INFO_ECNT) { + DHD_FILTER_ERR(("%s: unable to dump value\n", info->name)); + break; + } +#endif /* DHD_EWPR_VER2 */ + memset(buf, 0, sizeof(buf)); + lock_cnt = dhd_ring_lock_get_count(ring[info->ring_type - 1]); + elem = dhd_ring_lock_get_first(ring[info->ring_type - 1]); + bytes_written = scnprintf(buf, EWPR_DEBUG_BUF_LEN, "%s:", info->name); + for (idx2 = 0; elem && (idx2 < lock_cnt); idx2++) { + bytes_written += ewpr_single_serial(info, &buf[bytes_written], + EWPR_DEBUG_BUF_LEN - bytes_written, elem, KEY_DEL); + elem = dhd_ring_get_next(ring[info->ring_type - 1], elem); + } + DHD_FILTER_ERR(("%s\n", buf)); + } +} +#endif /* EWPR_DEBUG */ + +uint32 +dhd_event_log_filter_serialize(dhd_pub_t *dhdp, char *in_buf, uint32 tot_len, int type) +{ + EWP_filter_t *filter = (EWP_filter_t *)dhdp->event_log_filter; + void *ring[EWPF_MAX_IDX_TYPE]; + char *ret_buf = in_buf; + int slice_id; + int iface_id; + int idx, idx2; + uint32 bytes_written = 0; + void *elem[EWPF_MAX_IDX_TYPE][EWPR_CSDCLIENT_DIFF]; + void **elem_list; + int lock_cnt, lock_cnt2; + char *last_print; + void *arr_elem; + uint32 delta_list[EWPR_CSDCLIENT_DIFF]; + ewpr_lock_param_t lock_param; + int print_name = FALSE; + char cookie_str[DEBUG_DUMP_TIME_BUF_LEN]; + char iov_buf[EWPR_IOV_BUF_LEN]; + + if (type != 0) { + DHD_FILTER_ERR(("NOT SUPPORTED TYPE: %d\n", type)); + return 0; + } + + iface_id = 0; /* STA INTERFACE ONLY */ + if (filter->last_channel <= CH_MAX_2G_CHANNEL) { + slice_id = EWPF_SLICE_AUX; + } else { + slice_id = EWPF_SLICE_MAIN; + } + ring[EWPF_IDX_TYPE_SLICE - 1] = filter->s_ring[slice_id]; + ring[EWPF_IDX_TYPE_IFACE - 1] = filter->i_ring[iface_id]; + + /* Configure common LOCK parameter */ + lock_param.max_armcycle = (uint32)EWPF_INVALID; + lock_param.min_armcycle = filter->last_armcycle; + lock_param.max_period = (EWPR_ARRAY_CNT - 1)* EWPR_INTERVAL; + lock_param.max_period *= EWPF_MSEC_TO_SEC * EWPF_ARM_TO_MSEC; + lock_param.delta_cnt = ARRAYSIZE(delta_list); + lock_param.delta_list = delta_list; + + delta_list[EWPR_DELTA3_POS] = EWPR_DELTA3_CNT; + delta_list[EWPR_DELTA2_POS] = EWPR_DELTA2_CNT; + delta_list[EWPR_DELTA1_POS] = EWPR_DELTA1_CNT; + delta_list[EWPR_NOW_POS] = 0; + lock_param.ring = ring[EWPF_IDX_TYPE_IFACE -1]; + lock_param.elem_list = elem[EWPF_IDX_TYPE_IFACE -1]; + lock_cnt = ewpr_set_period_lock(&lock_param); + if (lock_cnt <= 0) { + DHD_FILTER_ERR(("FAIL TO GET IFACE LOCK: %d\n", iface_id)); + bytes_written = 0; + goto finished; + } + + delta_list[EWPR_DELTA3_POS] = EWPR_DELTA3_CNT; + delta_list[EWPR_DELTA2_POS] = EWPR_DELTA2_CNT; + delta_list[EWPR_DELTA1_POS] = EWPR_DELTA1_CNT; + delta_list[EWPR_NOW_POS] = 0; + lock_param.ring = ring[EWPF_IDX_TYPE_SLICE -1]; + lock_param.elem_list = elem[EWPF_IDX_TYPE_SLICE -1]; + lock_cnt2 = ewpr_set_period_lock(&lock_param); + if (lock_cnt2 <= 0) { + DHD_FILTER_ERR(("FAIL TO GET SLICE LOCK: %d\n", slice_id)); + goto finished; + } + + if (lock_cnt != lock_cnt2) { + DHD_FILTER_ERR(("Lock Count is Diff: iface:%d slice:%d\n", lock_cnt, lock_cnt2)); + lock_cnt = MIN(lock_cnt, lock_cnt2); + } + +#ifdef EWPR_DEBUG + print_name = TRUE; + ewpr_debug_dump(ewpr_serial_dbg_tbl, ring); + ewpr_debug_dump(ewpr_serial_CSDCLIENT_diff_tbl, ring); + ewpr_debug_dump(ewpr_serial_CSDCLIENT_array_tbl, ring); +#endif /* EWPR_DEBUG */ + + memset(ret_buf, 0, tot_len); + memset(cookie_str, 0, DEBUG_DUMP_TIME_BUF_LEN); + bytes_written = 0; + last_print = ret_buf; + + /* XXX Counters BIG DATA not matched to file yet */ + get_debug_dump_time(cookie_str); +#ifdef DHD_LOG_DUMP + dhd_logdump_cookie_save(dhdp, cookie_str, "ECNT"); +#endif + + /* KEY DATA */ + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%08x", EWP_REPORT_VERSION); + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%s", KEY_DEL, cookie_str); + DHD_FILTER_ERR(("%d: %s\n", bytes_written, last_print)); + last_print = &ret_buf[bytes_written]; + + for (idx = 0; strlen(ewpr_serial_CSDCLIENT_key_tbl[idx].name) != 0; idx++) { + ewpr_serial_info_t *info = &ewpr_serial_CSDCLIENT_key_tbl[idx]; + elem_list = elem[info->ring_type - 1]; + if (print_name) { + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, " %s:", info->name); + } + bytes_written += ewpr_diff_serial(info, &ret_buf[bytes_written], + tot_len - bytes_written, + elem_list[EWPR_NOW_POS], + elem_list[EWPR_DELTA1_POS], + KEY_DEL); + if ((idx + 1) % EWPR_CNT_PER_LINE == 0) { + DHD_FILTER_ERR(("%d:%s\n", bytes_written, last_print)); + last_print = &ret_buf[bytes_written]; + } + } + + /* RAW DATA */ + /* XXX FIRST data shall use space:KEY delimiter */ + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%08x", KEY_DEL, EWP_REPORT_VERSION); + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%s", RAW_DEL, cookie_str); + + for (idx = 0; strlen(ewpr_serial_CSDCLIENT_diff_tbl[idx].name) != 0; idx++) { + ewpr_serial_info_t *info = &ewpr_serial_CSDCLIENT_diff_tbl[idx]; + elem_list = elem[info->ring_type - 1]; + if (print_name) { + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, " %s:", info->name); + } + bytes_written += ewpr_diff_serial(info, &ret_buf[bytes_written], + tot_len - bytes_written, + elem_list[EWPR_NOW_POS], + elem_list[EWPR_DELTA1_POS], + RAW_DEL); + bytes_written += ewpr_diff_serial(info, &ret_buf[bytes_written], + tot_len - bytes_written, + elem_list[EWPR_DELTA1_POS], + elem_list[EWPR_DELTA2_POS], + RAW_DEL); + if ((idx + 1) % EWPR_CNT_PER_LINE == 0) { + DHD_FILTER_ERR(("%d:%s\n", bytes_written, last_print)); + last_print = &ret_buf[bytes_written]; + } + } + + /* FILL BSS SPECIFIC DATA LATER */ + if (dhd_iovar(dhdp, 0, "auth", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) { + DHD_FILTER_ERR(("fail to get auth\n")); + *(uint32 *)iov_buf = EWPF_INVALID; + + } + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%08x", RAW_DEL, *(uint32 *)iov_buf); + + if (dhd_iovar(dhdp, 0, "wsec", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) { + DHD_FILTER_ERR(("fail to get wsec\n")); + *(uint32 *)iov_buf = EWPF_INVALID; + + } + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%08x", RAW_DEL, *(uint32 *)iov_buf); + + if (dhd_iovar(dhdp, 0, "mfp", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) { + DHD_FILTER_ERR(("fail to get mfp\n")); + *(uint8 *)iov_buf = EWPF_INVALID; + + } + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%02x", RAW_DEL, *(uint8 *)iov_buf); + + if (dhd_iovar(dhdp, 0, "bip", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) { + DHD_FILTER_ERR(("fail to get bip\n")); + *(uint8 *)iov_buf = EWPF_INVALID; + } + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, "%c%02x", RAW_DEL, *(uint8 *)iov_buf); + + for (idx = 0; strlen(ewpr_serial_CSDCLIENT_array_tbl[idx].name) != 0; idx++) { + ewpr_serial_info_t *info = &ewpr_serial_CSDCLIENT_array_tbl[idx]; + if (print_name) { + bytes_written += scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, " %s:", info->name); + } + for (idx2 = 0; idx2 < EWPR_ARRAY_CNT - lock_cnt; idx2++) { + bytes_written += ewpr_serial_basic(&ret_buf[bytes_written], + tot_len - bytes_written, 0, + info->display_format, info->display_type, RAW_DEL); + } + arr_elem = elem[info->ring_type - 1][EWPR_DELTA3_POS]; + for (; idx2 < EWPR_ARRAY_CNT; idx2++) { + if (arr_elem == NULL) { + DHD_FILTER_ERR(("ARR IS NULL : %d %p \n", + idx2, elem[info->ring_type - 1][EWPR_DELTA3_POS])); + break; + } + bytes_written += ewpr_single_serial(info, &ret_buf[bytes_written], + tot_len - bytes_written, arr_elem, RAW_DEL); + arr_elem = dhd_ring_get_next(ring[info->ring_type - 1], arr_elem); + } + DHD_FILTER_ERR(("%d:%s\n", bytes_written, last_print)); + last_print = &ret_buf[bytes_written]; + } + +finished: + DHD_FILTER_ERR(("RET LEN:%d\n", (int)strlen(ret_buf))); + dhd_ring_lock_free(ring[EWPF_IDX_TYPE_SLICE - 1]); + dhd_ring_lock_free(ring[EWPF_IDX_TYPE_IFACE - 1]); + return bytes_written; +} + +int +ewpr_set_period_lock(ewpr_lock_param_t *param) +{ + void *last; + void *first; + void *cur; + int lock_cnt; + int idx2; + int delta_idx; + uint32 last_armcycle; + uint32 first_armcycle; + uint32 cur_armcycle = 0; + void *ring = param->ring; + + /* GET LATEST PTR */ + last = dhd_ring_get_last(ring); + while (TRUE) { + if (last == NULL) { + DHD_FILTER_ERR(("NO LAST\n")); + return -1; + } + last_armcycle = *(uint32 *)last; + if (last_armcycle <= param->max_armcycle || + last_armcycle + EWPF_EPOCH >= param->max_armcycle) { + break; + } + last = dhd_ring_get_prev(ring, last); + } + + if (last_armcycle != param->max_armcycle) { + DHD_FILTER_TRACE(("MAX ARMCYCLE IS CHANGEd new:%u prev:%u\n", + last_armcycle, param->max_armcycle)); + param->max_armcycle = last_armcycle; + } + + if (last_armcycle < param->min_armcycle) { + param->min_armcycle = 0; + } + + /* GET FIRST PTR */ + first_armcycle = last_armcycle; + first = last; + while (TRUE) { + cur = dhd_ring_get_prev(ring, first); + if (cur == NULL) { + break; + } + cur_armcycle = *(uint32 *)cur; + if (cur_armcycle >= first_armcycle) { + DHD_FILTER_TRACE(("case 1: %u %u\n", first_armcycle, cur_armcycle)); + /* dongle is rebooted */ + break; + } + if (cur_armcycle + EWPF_EPOCH < param->min_armcycle) { + DHD_FILTER_TRACE(("case 2: %u %u\n", param->min_armcycle, cur_armcycle)); + /* Reach Limitation */ + break; + } + if (cur_armcycle + param->max_period + EWPF_EPOCH < last_armcycle) { + DHD_FILTER_TRACE(("case 3: %u %u\n", param->max_period, cur_armcycle)); + /* exceed max period */ + break; + } + first = cur; + first_armcycle = cur_armcycle; + } + + if (first_armcycle != param->min_armcycle) { + DHD_FILTER_TRACE(("MIN ARMCYCLE IS CHANGEd new:%u prev:%u %u\n", + first_armcycle, param->min_armcycle, cur_armcycle)); + param->min_armcycle = first_armcycle; + } + + DHD_FILTER_TRACE(("ARM CYCLE of first(%u), last(%u)\n", first_armcycle, last_armcycle)); + + dhd_ring_lock(ring, first, last); + + lock_cnt = dhd_ring_lock_get_count(ring); + if (lock_cnt <= 0) { + DHD_FILTER_ERR((" NO VALID RECORD : %d\n", lock_cnt)); + return -1; + } + DHD_FILTER_TRACE(("Lock Count:%d\n", lock_cnt)); + + /* Validate delta position */ + for (idx2 = 0; idx2 < param->delta_cnt - 1; idx2++) { + if (param->delta_list[idx2] >= param->delta_list[idx2 + 1]) { + DHD_FILTER_ERR(("INVALID DELTA at %d\n", idx2 + 1)); + param->delta_list[idx2 + 1] = param->delta_list[idx2]; + } + } + + delta_idx = 0; + for (idx2 = 0; idx2 < lock_cnt && delta_idx < param->delta_cnt; idx2++) { + if (idx2 == 0) { + cur = dhd_ring_lock_get_last(ring); + } else { + cur = dhd_ring_get_prev(ring, cur); + } + + if (idx2 >= param->delta_list[delta_idx]) { + param->elem_list[delta_idx] = cur; + delta_idx ++; + } + } + + /* COPY last elem to rest of the list */ + delta_idx--; + for (idx2 = delta_idx + 1; idx2 < param->delta_cnt; idx2++) { + param->elem_list[idx2] = cur; + } + return lock_cnt; +} + +#ifdef DHD_EWPR_VER2 +static int +ewpr_single_bit_pack(ewpr_serial_info_t * info, char * buf, int buf_len, + void * _ptr, int bit_offset) +{ + int32 sval = 0; + char *ptr = (char *)_ptr; + uint32 offset = EWPF_INVALID; + uint16 version; + bool is_signed = FALSE; + + if (info->is_multi_version == TRUE) { + version = *(uint16 *)((char *)_ptr + info->v_info.version_offset); + offset = ewpr_get_multi_offset(version, info); + } else { + offset = info->offset; + } + + if (offset == EWPF_INVALID) { + DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name)); + return 0; + } + + ptr += offset; + + switch (info->data_type) { + case EWP_INT8: + sval = *(int8 *)ptr; + is_signed = TRUE; + break; + case EWP_UINT8: + sval = *(uint8 *)ptr; + break; + case EWP_INT16: + sval = *(int16 *)ptr; + is_signed = TRUE; + break; + case EWP_UINT16: + sval = *(uint16 *)ptr; + break; + case EWP_INT32: + sval = *(int32 *)ptr; + is_signed = TRUE; + break; + case EWP_UINT32: + sval = *(uint32 *)ptr; + break; +#ifdef EWPR_DEBUG + case EWP_UINT64: + sval = (int32)(*(uint64 *)ptr); + break; +#endif /* EWPR_DEBUG */ + default: + DHD_FILTER_ERR(("INVALID TYPE for Single Serial:%d", info->data_type)); + return 0; + } + + /* convert negative value to positive before bit packing */ + if (is_signed) { + if (sval < 0) { + DHD_FILTER_TRACE(("convert to positive value %d\n", sval)); + sval = ABS(sval); + } + } + + if (info->unit_convert > 1) { + DHD_FILTER_TRACE(("convert unit %d / %d\n", sval, info->unit_convert)); + sval = sval / info->unit_convert; + } + + if (is_signed) { + DHD_FILTER_TRACE(("%s : signed value : %d, bit length: %d", + info->name, sval, info->display_bit_length)); + } else { + DHD_FILTER_TRACE(("%s : unsigned value : %u, bit length: %d", + info->name, sval, info->display_bit_length)); + } + + return ewpr_bit_pack_basic(buf, buf_len, sval, info->display_format, + info->display_type, info->display_bit_length, bit_offset); +} + +static int +ewpr_diff_bit_pack(ewpr_serial_info_t *info, char *buf, int buf_len, + void *_f_op, void *_s_op, int bit_offset) +{ + char *f_op = (char *)_f_op; + char *s_op = (char *)_s_op; + int32 diff; + uint32 offset = EWPF_INVALID; + uint16 version; + + if (info->is_multi_version == TRUE) { + version = *(uint16 *)(f_op + info->v_info.version_offset); + offset = ewpr_get_multi_offset(version, info); + } else { + offset = info->offset; + } + + if (offset == EWPF_INVALID) { + DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name)); + return 0; + } + + f_op = f_op + offset; + s_op = s_op + offset; + + switch (info->data_type) { + case EWP_INT8: + case EWP_UINT8: + diff = *(uint8 *)f_op - *(uint8 *)s_op; + break; + case EWP_INT16: + case EWP_UINT16: + diff = *(uint16 *)f_op - *(uint16 *)s_op; + break; + case EWP_INT32: + case EWP_UINT32: + diff = *(uint32 *)f_op - *(uint32 *)s_op; + break; + case EWP_UINT64: + diff = (uint32)(*(uint64 *)f_op - *(uint64 *)s_op); + break; + default: + DHD_FILTER_ERR(("INVALID TYPE to DIFF:%d", info->data_type)); + return 0; + } + + if (diff < 0) { + DHD_FILTER_TRACE(("convert to positive value %d\n", diff)); + diff = ABS(diff); + } + + if (info->unit_convert > 1) { + DHD_FILTER_TRACE(("convert unit %d / %d\n", diff, info->unit_convert)); + diff = diff / info->unit_convert; + } + + DHD_FILTER_TRACE(("%s : value : %d, bit length: %d", + info->name, diff, info->display_bit_length)); + return ewpr_bit_pack_basic(buf, buf_len, diff, info->display_format, + info->display_type, info->display_bit_length, bit_offset); +} + +static int +ewpr_bit_pack_basic(char *buf, int buf_len, uint32 data, int format, int display_type, + int display_bit_length, int bit_offset) +{ + if (format == EWP_BIN) { + uint32 sdata = (uint32) data; + switch (display_type) { + case EWP_BIT: + /* call bit packing */ + return dhd_bit_pack(buf, buf_len, bit_offset, + sdata, display_bit_length); + default: + DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type)); + return 0; + } + } + + DHD_FILTER_ERR(("INVALID FORMAT for Serial:%d", format)); + return 0; +} + +static ewpr_serial_info_t* +ewpr_find_context_info(int index1, int index2, int index3) +{ + int idx = 0; + ewpr_serial_info_t *context_info = NULL; + + for (idx = 0; ewpr_serial_context_info[idx].table != NULL; idx++) { + if (index1 == ewpr_serial_context_info[idx].index1 && + index2 == ewpr_serial_context_info[idx].index2 && + index3 == ewpr_serial_context_info[idx].index3) { + context_info = ewpr_serial_context_info[idx].table; + break; + } + } + + if (context_info == NULL) { + DHD_FILTER_ERR(("unable to find context info for index number: %02x:%02x:%02x\n", + index1, index2, index3)); + return NULL; + } + + return context_info; +} + +static int +ewpr_find_context_type(ewpr_serial_info_t* context_info) +{ + int idx = 0; + int context_type = BCME_ERROR; + + /* index2, index3 are reserved */ + + for (idx = 0; ewpr_serial_context_info[idx].table != NULL; idx++) { + if (context_info == ewpr_serial_context_info[idx].table) { + context_type = idx; + break; + } + } + + return context_type; +} + +static uint32 +ewpr_scnprintf(char *buf, uint32 buf_len, uint32 input_len, char *data_type, char *fmt, ...) +{ + va_list args; + + if (buf_len < input_len) { + DHD_FILTER_ERR(("%s: input length(%d) is larger than " + "remain buffer length(%d)\n", data_type, + input_len, buf_len)); + } + va_start(args, fmt); + buf_len = vscnprintf(buf, buf_len, fmt, args); + va_end(args); + + return buf_len; +} + +uint32 +dhd_event_log_filter_serialize_bit(dhd_pub_t *dhdp, char *in_buf, uint32 tot_len, + int index1, int index2, int index3) +{ + EWP_filter_t *filter = (EWP_filter_t *)dhdp->event_log_filter; + void *ring[EWPF_MAX_IDX_TYPE]; + char *ret_buf = in_buf; + int slice_id; + int iface_id; + int event_id; + int key_info_id; + int idx; + int idx2; + uint32 bytes_written = 0; + int bits_written = 0; + void *elem[EWPF_MAX_IDX_TYPE][EWPR_CSDCLIENT_DIFF]; + void **elem_list; + int lock_cnt, lock_cnt2; + uint32 delta_list[EWPR_CSDCLIENT_DIFF]; + ewpr_lock_param_t lock_param; + char cookie_str[DEBUG_DUMP_TIME_BUF_LEN]; + char iov_buf[EWPR_IOV_BUF_LEN]; + char *raw_buf = NULL; + char *raw_encode_buf = NULL; + int raw_buf_size; + int ret = 0; + ewpr_serial_info_t *context_info = NULL; + int context_type; +#ifdef DHD_STATUS_LOGGING + uint32 conv_cnt = 0; +#endif /* DHD_STATUS_LOGGING */ + +#ifdef DHD_STATUS_LOGGING + stat_elem_t dhd_stat[EWP_DHD_STAT_SIZE]; + stat_query_t query; + + memset(&dhd_stat[0], 0x00, sizeof(stat_elem_t) * EWP_DHD_STAT_SIZE); +#endif /* DHD_STATUS_LOGGING */ + + context_info = ewpr_find_context_info(index1, index2, index3); + if (!context_info) { + return bytes_written; + } + + if (tot_len < EWPR_MAX_STR_SIZE) { + DHD_FILTER_ERR(("%s: insufficient buffer size %d\n", + __FUNCTION__, tot_len)); + return bytes_written; + } + + iface_id = 0; /* STA INTERFACE ONLY */ + event_id = 0; /* COMMON ID */ + key_info_id = 0; /* COMMON ID */ + if (filter->last_channel <= CH_MAX_2G_CHANNEL) { + slice_id = EWPF_SLICE_AUX; + } else { + slice_id = EWPF_SLICE_MAIN; + } + ring[EWPF_IDX_TYPE_SLICE - 1] = filter->s_ring[slice_id]; + ring[EWPF_IDX_TYPE_IFACE - 1] = filter->i_ring[iface_id]; + ring[EWPF_IDX_TYPE_EVENT - 1] = filter->e_ring[event_id]; + ring[EWPF_IDX_TYPE_KEY_INFO - 1] = filter->k_ring[key_info_id]; + + /* Configure common LOCK parameter */ + lock_param.max_armcycle = (uint32)EWPF_INVALID; + lock_param.min_armcycle = filter->last_armcycle; + lock_param.max_period = (EWPR_ARRAY_CNT - 1)* EWPR_INTERVAL; + lock_param.max_period *= EWPF_MSEC_TO_SEC * EWPF_ARM_TO_MSEC; + lock_param.delta_cnt = ARRAYSIZE(delta_list); + lock_param.delta_list = delta_list; + + for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) { + delta_list[idx] = idx * EWPR_DELTA_CNT; + } + + lock_param.ring = ring[EWPF_IDX_TYPE_IFACE -1]; + lock_param.elem_list = elem[EWPF_IDX_TYPE_IFACE -1]; + lock_cnt = ewpr_set_period_lock(&lock_param); + if (lock_cnt <= 0) { + DHD_FILTER_ERR(("FAIL TO GET IFACE LOCK: %d\n", iface_id)); + bytes_written = 0; + goto finished; + } + + for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) { + delta_list[idx] = idx * EWPR_DELTA_CNT; + } + + lock_param.ring = ring[EWPF_IDX_TYPE_SLICE -1]; + lock_param.elem_list = elem[EWPF_IDX_TYPE_SLICE -1]; + lock_cnt2 = ewpr_set_period_lock(&lock_param); + if (lock_cnt2 <= 0) { + DHD_FILTER_ERR(("FAIL TO GET SLICE LOCK: %d\n", slice_id)); + goto finished; + } + + if (lock_cnt != lock_cnt2) { + DHD_FILTER_ERR(("Lock Count is Diff: iface:%d slice:%d\n", lock_cnt, lock_cnt2)); + lock_cnt = MIN(lock_cnt, lock_cnt2); + } + + for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) { + delta_list[idx] = idx * EWPR_DELTA_CNT; + } + + lock_param.ring = ring[EWPF_IDX_TYPE_EVENT -1]; + lock_param.elem_list = elem[EWPF_IDX_TYPE_EVENT -1]; + lock_cnt = ewpr_set_period_lock(&lock_param); + if (lock_cnt <= 0) { + DHD_FILTER_ERR(("FAIL TO GET EVENT ECNT LOCK: %d\n", iface_id)); + bytes_written = 0; + goto finished; + } + + for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) { + delta_list[idx] = idx * EWPR_DELTA_CNT; + } + + lock_param.ring = ring[EWPF_IDX_TYPE_KEY_INFO -1]; + lock_param.elem_list = elem[EWPF_IDX_TYPE_KEY_INFO -1]; + lock_cnt = ewpr_set_period_lock(&lock_param); + if (lock_cnt <= 0) { + DHD_FILTER_ERR(("FAIL TO GET KEY INFO LOCK: %d\n", iface_id)); + bytes_written = 0; + goto finished; + } + +#ifdef EWPR_DEBUG + ewpr_debug_dump(context_info, ring); +#endif /* EWPR_DEBUG */ + + memset(ret_buf, 0, tot_len); + memset(cookie_str, 0, DEBUG_DUMP_TIME_BUF_LEN); + bytes_written = 0; + bits_written = 0; + + /* XXX Counters BIG DATA not matched to file yet */ + get_debug_dump_time(cookie_str); +#ifdef DHD_LOG_DUMP + dhd_logdump_cookie_save(dhdp, cookie_str, "ECNT"); +#endif /* DHD_LOG_DUMP */ + + /* KEY DATA */ + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, REPORT_VERSION_STR_SIZE, + "report version", "%08x", EWP_REPORT_VERSION); + + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + strlen(cookie_str), + "cookie string", "%c%s", KEY_DEL, cookie_str); + + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + INDEX_STR_SIZE, + "host trigger index", "%c%02x%02x%02x", KEY_DEL, index1, index2, index3); + +#ifdef DHD_STATUS_LOGGING + DHD_FILTER_TRACE(("print dhd_stat size:%d * %d, size of filter list: %d\n", + (uint32)sizeof(dhd_stat[0]), EWP_DHD_STAT_SIZE, + (uint32)sizeof(dhd_statlog_filter))); + query.req_buf = NULL; + query.req_buf_len = 0; + query.resp_buf = (char *)dhd_stat; + query.resp_buf_len = DHD_STATLOG_RING_SIZE(EWP_DHD_STAT_SIZE); + query.req_num = EWP_DHD_STAT_SIZE; + ret = dhd_statlog_get_latest_info(dhdp, (void *)&query); + if (ret < 0) { + DHD_FILTER_ERR(("fail to get dhd statlog - %d\n", ret)); + } +#ifdef EWPR_DEBUG + for (idx = 0; idx < EWP_DHD_STAT_SIZE; idx++) { + DHD_FILTER_TRACE(("DHD status index: %d, timestamp: %llu, stat: %d\n", + idx, dhd_stat[idx].ts, dhd_stat[idx].stat)); + } +#endif + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE, + "current dhd status", "%c%02x", KEY_DEL, dhd_stat[0].stat); + + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE, + "previous dhd status", "%c%02x", KEY_DEL, dhd_stat[1].stat); +#else + /* reserved for dhd status information */ + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE, + "current dhd status", "%c%02x", KEY_DEL, 0x00); + + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE, + "previous dhd status", "%c%02x", KEY_DEL, 0x00); +#endif /* DHD_STATUS_LOGGING */ + + /* RAW DATA */ + raw_buf = MALLOCZ(dhdp->osh, RAW_BUFFER_SIZE); + + for (idx = 0; strlen(context_info[idx].name) != 0; idx++) { + ewpr_serial_info_t *info = &context_info[idx]; + elem_list = elem[info->ring_type - 1]; + DHD_FILTER_TRACE(("%s : array_size: %d\n", info->name, info->display_array_size)); + switch (info->info_type) { + case EWPF_INFO_VER: + DHD_FILTER_TRACE(("write %s - value: %d\n", info->name, + EWP_REPORT_VERSION)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, bits_written, + EWP_REPORT_VERSION, info->display_bit_length); + break; + case EWPF_INFO_TYPE: + context_type = ewpr_find_context_type(context_info); + if (context_type < 0) { + DHD_FILTER_ERR(("fail to get context_type - %d\n", + context_type)); + break; + } + DHD_FILTER_TRACE(("write %s - value: %d\n", info->name, + (uint32)context_type)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, bits_written, + (uint32)context_type, info->display_bit_length); + break; + case EWPF_INFO_DHDSTAT: + if (strcmp("dhdstat_last_ts", info->name) == 0) { +#ifdef DHD_STATUS_LOGGING + if (info->unit_convert > 1) { + conv_cnt = dhd_stat[0].ts_tz / info->unit_convert; + } else { + conv_cnt = dhd_stat[0].ts_tz; + } + DHD_FILTER_TRACE(("DHD status last timestamp:" + " %llu, %u", dhd_stat[0].ts_tz, + conv_cnt)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, conv_cnt, + info->display_bit_length); +#else + DHD_FILTER_TRACE(("No DHD status log timestamp\n")); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, 0x00, info->display_bit_length); +#endif /* DHD_STATUS_LOGGING */ + } else if (strcmp("dhdstat_last", info->name) == 0) { +#ifdef DHD_STATUS_LOGGING + DHD_FILTER_TRACE(("DHD status last stat: %d(0x%02x)", + dhd_stat[0].stat, dhd_stat[0].stat)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, (uint32)dhd_stat[0].stat, + info->display_bit_length); +#else + DHD_FILTER_TRACE(("No DHD status log value\n")); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, 0x00, info->display_bit_length); +#endif /* DHD_STATUS_LOGGING */ + } else if (strcmp("dhdstat_prev_ts", info->name) == 0) { +#ifdef DHD_STATUS_LOGGING + if (info->unit_convert > 1) { + conv_cnt = dhd_stat[1].ts_tz / info->unit_convert; + } else { + conv_cnt = dhd_stat[1].ts_tz; + } + DHD_FILTER_TRACE(("DHD status prev timestamp:" + " %llu, %u", dhd_stat[1].ts_tz, + conv_cnt)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, conv_cnt, + info->display_bit_length); +#else + DHD_FILTER_TRACE(("No DHD status log timestamp\n")); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, 0x00, info->display_bit_length); +#endif /* DHD_STATUS_LOGGING */ + } else if (strcmp("dhdstat_prev", info->name) == 0) { +#ifdef DHD_STATUS_LOGGING + DHD_FILTER_TRACE(("DHD status prev stat: %d(0x%02x)", + dhd_stat[1].stat, dhd_stat[1].stat)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, (uint32)dhd_stat[1].stat, + info->display_bit_length); +#else + DHD_FILTER_TRACE(("No DHD status log value\n")); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, 0x00, info->display_bit_length); +#endif /* DHD_STATUS_LOGGING */ + } else { + DHD_FILTER_ERR(("unknown dhdstat name - %s\n", + info->name)); + } + break; + case EWPF_INFO_ECNT: + for (idx2 = 0; idx2 < info->display_array_size; idx2++) { + if (info->display_method == EWPR_DISPLAY_METHOD_SINGLE) { + bits_written = ewpr_single_bit_pack(info, raw_buf, + RAW_BUFFER_SIZE, elem_list[idx2], + bits_written); + } else { + bits_written = ewpr_diff_bit_pack(info, raw_buf, + RAW_BUFFER_SIZE, elem_list[idx2], + elem_list[idx2+1], bits_written); + } + } + break; + case EWPF_INFO_IOVAR: + if (dhd_iovar(dhdp, 0, info->name, NULL, 0, iov_buf, + ARRAYSIZE(iov_buf), FALSE) < 0) { + DHD_FILTER_ERR(("fail to get auth\n")); + *(uint32 *)iov_buf = EWPF_INVALID; + } + DHD_FILTER_TRACE(("write %s - value: %d\n", info->name, + *(uint8 *)iov_buf)); + bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, + bits_written, *(uint8 *)iov_buf, + info->display_bit_length); + break; + case EWPF_INFO_CPLOG: + DHD_FILTER_TRACE(("write compact packt log\n")); + ret = 0; +#if defined(DHD_PKT_LOGGING) && defined(DHD_COMPACT_PKT_LOG) + ret = dhd_cpkt_log_proc(dhdp, raw_buf, RAW_BUFFER_SIZE, + bits_written, info->display_array_size); +#endif /* DHD_PKT_LOGGING && DHD_COMPACT_PKT_LOG */ + if (ret < 0) { + DHD_FILTER_ERR(("fail to get compact packet log - %d\n", + ret)); + break; + } + /* update bit offset */ + DHD_FILTER_TRACE(("%d bits written\n", ret)); + if (ret > 0) { + bits_written = ret; + } + break; + default: + DHD_FILTER_ERR(("unsupported info type\n")); + break; + } + DHD_FILTER_TRACE(("%d bits written\n", bits_written)); + } + + /* encode data */ + raw_buf_size = BYTE_SIZE(bits_written); + raw_encode_buf = ewpr_base64_encode(dhdp, raw_buf, raw_buf_size); + +#ifdef EWPR_DEBUG + DHD_FILTER_ERR(("raw_buf:\n")); + for (idx = 0; idx < raw_buf_size; idx++) { + ewpr_print_byte_as_bits(raw_buf[idx]); + } +#endif /* EWPR_DEBUG */ + DHD_FILTER_TRACE(("base64 encoding result:\n")); + DHD_FILTER_TRACE(("%s", raw_encode_buf)); + + bytes_written += ewpr_scnprintf(&ret_buf[bytes_written], + tot_len - bytes_written, DELIMITER_LEN + strlen(raw_encode_buf), + "base64 encoded raw data", "%c%s", KEY_DEL, raw_encode_buf); + +finished: + DHD_FILTER_ERR(("RET LEN:%d\n", (int)strlen(ret_buf))); + DHD_FILTER_TRACE(("ret_buf: %s", ret_buf)); + + dhd_ring_lock_free(ring[EWPF_IDX_TYPE_SLICE - 1]); + dhd_ring_lock_free(ring[EWPF_IDX_TYPE_IFACE - 1]); + dhd_ring_lock_free(ring[EWPF_IDX_TYPE_EVENT - 1]); + dhd_ring_lock_free(ring[EWPF_IDX_TYPE_KEY_INFO - 1]); + + MFREE(dhdp->osh, raw_buf, RAW_BUFFER_SIZE); + MFREE(dhdp->osh, raw_encode_buf, BASE64_BUFFER_SIZE); + return bytes_written; +} + +#ifdef EWPR_DEBUG +static void +ewpr_print_byte_as_bits(char val) +{ + int32 idx; + char buf[EWPR_DEBUG_BUF_LEN]; + for (idx = 0; idx < MAX_BIT_SIZE; idx++) { + scnprintf(&buf[idx], EWPR_DEBUG_BUF_LEN-idx, "%c", + (val & (1 << (MAX_BIT_SHIFT-idx))) ? '1' : '0'); + } + buf[MAX_BIT_SIZE] = 0x0; + DHD_FILTER_ERR(("%s\n", buf)); +} +#endif /* EWPR_DEBUG */ + +static char* +ewpr_base64_encode(dhd_pub_t *dhdp, char* input, int32 length) +{ + /* set up a destination buffer large enough to hold the encoded data */ + char *output = MALLOCZ(dhdp->osh, BASE64_BUFFER_SIZE); + int32 cnt = 0; + + if (length > RAW_BUFFER_SIZE) { + DHD_FILTER_ERR(("%s: input data size is too big, size is limited to %d\n", + __FUNCTION__, RAW_BUFFER_SIZE)); + length = RAW_BUFFER_SIZE; + } + + cnt = dhd_base64_encode(input, length, output, BASE64_BUFFER_SIZE); + if (cnt == 0) { + DHD_FILTER_ERR(("%s: base64 encoding error\n", __FUNCTION__)); + } + return output; +} +#endif /* DHD_EWPR_VER2 */ + +#ifdef WLADPS_ENERGY_GAIN +#define ADPS_GAIN_ENERGY_CONV_UNIT 100000 /* energy unit(10^-2) * dur unit(10^-3) */ +static int +dhd_calculate_adps_energy_gain(wl_adps_energy_gain_v1_t *data) +{ + int i; + int energy_gain = 0; + + /* energy unit: (uAh * 10^-2)/sec */ + int pm0_idle_energy[MAX_BANDS] = + {ADPS_GAIN_2G_PM0_IDLE, ADPS_GAIN_5G_PM0_IDLE}; + int txpspoll_energy[MAX_BANDS] = + {ADPS_GAIN_2G_TX_PSPOLL, ADPS_GAIN_5G_TX_PSPOLL}; + + if (data->version == 0 || data->length != sizeof(*data)) { + DHD_FILTER_ERR(("%s - invalid adps_energy_gain data\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* dur unit: mSec */ + for (i = 0; i < MAX_BANDS; i++) { + energy_gain += (data->gain_data[i].pm_dur_gain * pm0_idle_energy[i]); + energy_gain -= (data->gain_data[i].step0_dur * txpspoll_energy[i]); + } + energy_gain /= ADPS_GAIN_ENERGY_CONV_UNIT; + + if (energy_gain < 0) { + energy_gain = 0; + } + + return energy_gain; +} + +int dhd_event_log_filter_adps_energy_gain(dhd_pub_t *dhdp) +{ + int ret; + + void *last_elem; + EWP_filter_t *filter; + EWPF_ifc_elem_t *ifc_elem; + + if (!dhdp || !dhdp->event_log_filter) { + DHD_FILTER_ERR(("%s - dhdp or event_log_filter is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + filter = (EWP_filter_t *)dhdp->event_log_filter; + + if (filter->enabled != TRUE) { + DHD_FILTER_ERR(("%s - EWP Filter is not enabled\n", __FUNCTION__)); + return BCME_UNSUPPORTED; + } + + /* Refer to STA interface */ + last_elem = dhd_ring_get_last(filter->i_ring[0]); + if (last_elem == NULL) { + DHD_FILTER_ERR(("%s - last_elem is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + ifc_elem = (EWPF_ifc_elem_t *)last_elem; + ret = dhd_calculate_adps_energy_gain(&ifc_elem->adps_energy_gain); + + return ret; +} +#endif /* WLADPS_ENERGY_GAIN */ diff --git a/bcmdhd.101.10.361.x/dhd_event_log_filter.h b/bcmdhd.101.10.361.x/dhd_event_log_filter.h new file mode 100755 index 0000000..a39da3b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_event_log_filter.h @@ -0,0 +1,56 @@ +/* + * Wifi dongle status Filter and Report + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef dhd_event_log_filter_h +#define dhd_event_log_filter_h +#include +#include +#include + +typedef struct { + uint16 version; + uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */ + uint8 htr_num; /* number of elements in htr_running or htr_rc */ + uint32 htr_rn_last; /* last reasons along with seq, etc */ + uint32 htr_rn_ts_last; /* last time stamps corr to htr_rn_last */ + uint32 htr_rn_prev; /* last reasons along with seq, etc */ + uint32 htr_rn_ts_prev; /* last time stamps corr to htr_rn_prev */ + uint32 htr_rc_max; /* largest toss reasons and counts */ + uint32 htr_rc_ts_max; /* latest time stamp corr to htr_rc_max */ + uint32 htr_rc_secnd; /* second largest toss reasons and counts */ + uint32 htr_rc_ts_secnd; /* latest time stamps corr to htr_rc_second */ +} evt_hist_compact_toss_stats_v1_t; + +int dhd_event_log_filter_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size); +void dhd_event_log_filter_deinit(dhd_pub_t *dhdp); +void dhd_event_log_filter_event_handler( + dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, uint32 *data); + +void dhd_event_log_filter_notify_connect_request(dhd_pub_t *dhdp, uint8 *bssid, int channel); +void dhd_event_log_filter_notify_connect_done(dhd_pub_t *dhdp, uint8 *bssid, int roam); +#ifdef WLADPS_ENERGY_GAIN +int dhd_event_log_filter_adps_energy_gain(dhd_pub_t *dhdp); +#endif /* WLADPS_ENERGY_GAIN */ +#endif /* !dhd_event_log_filter_h */ diff --git a/bcmdhd.101.10.361.x/dhd_flowring.c b/bcmdhd.101.10.361.x/dhd_flowring.c new file mode 100755 index 0000000..0841176 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_flowring.c @@ -0,0 +1,1466 @@ +/* + * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities + * + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** XXX Twiki: [PCIeFullDongleArchitecture] */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include <802.1d.h> +#include +#include +#include +#include + +static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); + +static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid); +int dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); + +#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) +#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) + +#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE +#define DHD_FLOWRING_INFO DHD_TRACE +#else +#define DHD_FLOWRING_INFO DHD_INFO +#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */ + +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; +const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + +/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ +static INLINE int +dhd_flow_queue_throttle(flow_queue_t *queue) +{ +#if defined(BCM_ROUTER_DHD) + /* Two tests + * 1) Test whether overall level 2 (grandparent) cummulative threshold crossed. + * 2) Or test whether queue's budget and overall cummulative threshold crossed. + */ + void *gp_clen_ptr = DHD_FLOW_QUEUE_L2CLEN_PTR(queue); + void *parent_clen_ptr = DHD_FLOW_QUEUE_CLEN_PTR(queue); + int gp_cumm_threshold = DHD_FLOW_QUEUE_L2THRESHOLD(queue); + int cumm_threshold = DHD_FLOW_QUEUE_THRESHOLD(queue); + + int ret = ((DHD_CUMM_CTR_READ(gp_clen_ptr) > gp_cumm_threshold) || + ((DHD_FLOW_QUEUE_OVFL(queue, DHD_FLOW_QUEUE_MAX(queue))) && + (DHD_CUMM_CTR_READ(parent_clen_ptr) > cumm_threshold))); + return ret; +#else + return DHD_FLOW_QUEUE_FULL(queue); +#endif /* ! BCM_ROUTER_DHD */ +} + +int +BCMFASTPATH(dhd_flow_queue_overflow)(flow_queue_t *queue, void *pkt) +{ + return BCME_NORESOURCE; +} + +/** Returns flow ring given a flowid */ +flow_ring_node_t * +dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(flowid <= dhdp->max_tx_flowid); + if (flowid > dhdp->max_tx_flowid) { + return NULL; + } + + flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]); + + ASSERT(flow_ring_node->flowid == flowid); + return flow_ring_node; +} + +/** Returns 'backup' queue given a flowid */ +flow_queue_t * +dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node = NULL; + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + if (flow_ring_node) + return &flow_ring_node->queue; + else + return NULL; +} + +/* Flow ring's queue management functions */ + +/** Reinitialize a flow ring's queue. */ +void +dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + queue->head = queue->tail = NULL; + queue->len = 0; + + /* Set queue's threshold and queue's parent cummulative length counter */ + ASSERT(max > 1); + DHD_FLOW_QUEUE_SET_MAX(queue, max); + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); + DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); + DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr); + + queue->failures = 0U; + queue->cb = &dhd_flow_queue_overflow; +} + +/** Initialize a flow ring's queue, called on driver initialization. */ +void +dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + dll_init(&queue->list); + dhd_flow_queue_reinit(dhdp, queue, max); +} + +/** Register an enqueue overflow callback handler */ +void +dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) +{ + ASSERT(queue != NULL); + queue->cb = cb; +} + +/** + * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on + * to the flow ring itself. + */ +int +BCMFASTPATH(dhd_flow_queue_enqueue)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + int ret = BCME_OK; + + ASSERT(queue != NULL); + + if (dhd_flow_queue_throttle(queue)) { + queue->failures++; + ret = (*queue->cb)(queue, pkt); + goto done; + } + + if (queue->head) { + FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt); + } else { + queue->head = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); + + queue->tail = pkt; /* at tail */ + + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* increment grandparent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); + +done: + return ret; +} + +/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */ +void * +BCMFASTPATH(dhd_flow_queue_dequeue)(dhd_pub_t *dhdp, flow_queue_t *queue) +{ + void * pkt; + + ASSERT(queue != NULL); + + pkt = queue->head; /* from head */ + + if (pkt == NULL) { + ASSERT((queue->len == 0) && (queue->tail == NULL)); + goto done; + } + + queue->head = FLOW_QUEUE_PKT_NEXT(pkt); + if (queue->head == NULL) + queue->tail = NULL; + + queue->len--; + /* decrement parent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* decrement grandparent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ + +done: + return pkt; +} + +/** Reinsert a dequeued 802.3 packet back at the head */ +void +BCMFASTPATH(dhd_flow_queue_reinsert)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + if (queue->head == NULL) { + queue->tail = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head); + queue->head = pkt; + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* increment grandparent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); +} + +/** Fetch the backup queue for a flowring, and assign flow control thresholds */ +void +dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr, + int l2cumm_threshold, void *l2cumm_ctr) +{ + flow_queue_t * queue = NULL; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(queue_budget > 1); + ASSERT(cumm_threshold > 1); + ASSERT(cumm_ctr != (void*)NULL); + ASSERT(l2cumm_threshold > 1); + ASSERT(l2cumm_ctr != (void*)NULL); + + queue = dhd_flow_queue(dhdp, flowid); + if (queue) { + DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */ + + /* Set the queue's parent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); + DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); + + /* Set the queue's grandparent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold); + DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr); + } +} + +/* + * This function returns total number of flowrings that can be created for a INFRA STA. + * For prio2ac mapping, it will return 4, prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 } + * For prio2tid mapping, it will return 8, prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 } + */ +uint8 +dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp) +{ + uint8 prio_count = 0; + int i; + /* Pick all elements one by one */ + for (i = 0; i < NUMPRIO; i++) + { + /* Check if the picked element is already counted */ + int j; + for (j = 0; j < i; j++) { + if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) { + break; + } + } + /* If not counted earlier, then count it */ + if (i == j) { + prio_count++; + } + } + + return prio_count; +} + +uint8 +dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp) +{ + uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp); + uint8 total_tx_flow_rings = (uint8)dhd_get_max_flow_rings(dhdp); + uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings; + return max_multi_client_flow_rings; +} + +int +dhd_flowid_map_init(dhd_pub_t *dhdp, uint16 max_tx_flow_rings) +{ +#if defined(DHD_HTPUT_TUNABLES) + uint16 max_normal_tx_flow_rings = max_tx_flow_rings - HTPUT_TOTAL_FLOW_RINGS; +#else + uint16 max_normal_tx_flow_rings = max_tx_flow_rings; +#endif /* DHD_HTPUT_TUNABLES */ + + /* Construct a normal flowid allocator from FLOWID_RESERVED to + * (max_normal_tx_flow_rings - 1) + */ + dhdp->flowid_allocator = id16_map_init(dhdp->osh, max_normal_tx_flow_rings, + FLOWID_RESERVED); + if (dhdp->flowid_allocator == NULL) { + DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); + return BCME_NOMEM; + } + +#if defined(DHD_HTPUT_TUNABLES) + if (HTPUT_TOTAL_FLOW_RINGS > 0) { + dhdp->htput_flow_ring_start = max_normal_tx_flow_rings + FLOWID_RESERVED; + /* Construct a htput flowid allocator from htput_flow_ring_start to + * (htput_flow_ring_start + HTPUT_TOTAL_FLOW_RINGS - 1) + */ + dhdp->htput_flowid_allocator = id16_map_init(dhdp->osh, HTPUT_TOTAL_FLOW_RINGS, + dhdp->htput_flow_ring_start); + if (dhdp->htput_flowid_allocator == NULL) { + DHD_ERROR(("%s: htput flowid allocator init failure\n", __FUNCTION__)); + return BCME_NOMEM; + } + dhdp->htput_client_flow_rings = 0u; + } +#endif /* !DHD_HTPUT_TUNABLES */ + + return BCME_OK; +} + +void +dhd_flowid_map_deinit(dhd_pub_t *dhdp) +{ + if (dhdp->flowid_allocator) { + dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator); + } + ASSERT(dhdp->flowid_allocator == NULL); + +#if defined(DHD_HTPUT_TUNABLES) + if (dhdp->htput_flowid_allocator) { + dhdp->htput_flowid_allocator = id16_map_fini(dhdp->osh, + dhdp->htput_flowid_allocator); + ASSERT(dhdp->htput_flowid_allocator == NULL); + } + dhdp->htput_client_flow_rings = 0u; +#endif /* !DHD_HTPUT_TUNABLES */ + return; +} + +/** Initializes data structures of multiple flow rings + * num_h2d_rings - max_h2d_rings including static and dynamic rings + */ +int +dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_h2d_rings) +{ + uint32 idx; + uint32 flow_ring_table_sz = 0; + uint32 if_flow_lkup_sz = 0; + flow_ring_table_t *flow_ring_table = NULL; + if_flow_lkup_t *if_flow_lkup = NULL; + void *lock = NULL; + void *list_lock = NULL; + unsigned long flags; + uint16 max_tx_flow_rings; + + DHD_INFO(("%s\n", __FUNCTION__)); + + /* + * Only 16-bit flowid map will be allocated for actual number of Tx flowrings + * excluding common rings. + * Rest all flowring data structure will be allocated for all num_h2d_rings. + */ + max_tx_flow_rings = dhd_get_max_flow_rings(dhdp); + if (dhd_flowid_map_init(dhdp, max_tx_flow_rings) != BCME_OK) { + DHD_ERROR(("%s: dhd_flowid_map_init failure\n", __FUNCTION__)); + goto fail; + } + + /* Any Tx flow id should not be > max_tx_flowid */ + dhdp->max_tx_flowid = max_tx_flow_rings + FLOWID_RESERVED - 1; + + /* Allocate a flow ring table, comprising of requested number of rings */ + flow_ring_table_sz = (num_h2d_rings * sizeof(flow_ring_node_t)); + flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz); + if (flow_ring_table == NULL) { + DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize flow ring table state */ + DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); + DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr); + bzero((uchar *)flow_ring_table, flow_ring_table_sz); + for (idx = 0; idx < num_h2d_rings; idx++) { + flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; + flow_ring_table[idx].flowid = (uint16)idx; + flow_ring_table[idx].lock = osl_spin_lock_init(dhdp->osh); +#ifdef IDLE_TX_FLOW_MGMT + flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME(); +#endif /* IDLE_TX_FLOW_MGMT */ + if (flow_ring_table[idx].lock == NULL) { + DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); + goto fail; + } + + dll_init(&flow_ring_table[idx].list); + + /* Initialize the per flow ring backup queue */ + dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue, + dhdp->conf->flow_ring_queue_threshold); + } + + /* Allocate per interface hash table (for fast lookup from interface to flow ring) */ + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, + DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); + if (if_flow_lkup == NULL) { + DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize per interface hash table */ + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + int hash_ix; + if_flow_lkup[idx].status = 0; + if_flow_lkup[idx].role = 0; + for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) + if_flow_lkup[idx].fl_hash[hash_ix] = NULL; + } + + lock = osl_spin_lock_init(dhdp->osh); + if (lock == NULL) + goto fail; + + list_lock = osl_spin_lock_init(dhdp->osh); + if (list_lock == NULL) + goto lock_fail; + + dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp); + dhdp->multi_client_flow_rings = 0U; + +#ifdef DHD_LOSSLESS_ROAMING + dhdp->dequeue_prec_map = ALLPRIO; +#endif + /* Now populate into dhd pub */ + DHD_FLOWID_LOCK(lock, flags); + dhdp->num_h2d_rings = num_h2d_rings; + dhdp->flow_ring_table = (void *)flow_ring_table; + dhdp->if_flow_lkup = (void *)if_flow_lkup; + dhdp->flowid_lock = lock; + dhdp->flow_rings_inited = TRUE; + dhdp->flowring_list_lock = list_lock; + DHD_FLOWID_UNLOCK(lock, flags); + + DHD_INFO(("%s done\n", __FUNCTION__)); + return BCME_OK; + +lock_fail: + /* deinit the spinlock */ + osl_spin_lock_deinit(dhdp->osh, lock); + +fail: + /* Destruct the per interface flow lkup table */ + if (if_flow_lkup != NULL) { + DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); + } + if (flow_ring_table != NULL) { + for (idx = 0; idx < num_h2d_rings; idx++) { + if (flow_ring_table[idx].lock != NULL) + osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + dhd_flowid_map_deinit(dhdp); + + return BCME_NOMEM; +} + +/** Deinit Flow Ring specific data structures */ +void dhd_flow_rings_deinit(dhd_pub_t *dhdp) +{ + uint16 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + void *lock; + + DHD_INFO(("dhd_flow_rings_deinit\n")); + + if (!(dhdp->flow_rings_inited)) { + DHD_ERROR(("dhd_flow_rings not initialized!\n")); + return; + } + + if (dhdp->flow_ring_table != NULL) { + + ASSERT(dhdp->num_h2d_rings > 0); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + dhdp->flow_ring_table = NULL; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + for (idx = 0; idx < dhdp->num_h2d_rings; idx++) { + if (flow_ring_table[idx].active) { + dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); + + /* Deinit flow ring queue locks before destroying flow ring table */ + if (flow_ring_table[idx].lock != NULL) { + osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } + flow_ring_table[idx].lock = NULL; + + } + + /* Destruct the flow ring table */ + flow_ring_table_sz = dhdp->num_h2d_rings * sizeof(flow_ring_table_t); + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + + /* Destruct the per interface flow lkup table */ + if (dhdp->if_flow_lkup != NULL) { + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz); + DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz); + dhdp->if_flow_lkup = NULL; + } + + /* Destruct the flowid allocator */ + dhd_flowid_map_deinit(dhdp); + + dhdp->num_h2d_rings = 0U; + bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + dhdp->max_multi_client_flow_rings = 0U; + dhdp->multi_client_flow_rings = 0U; + + lock = dhdp->flowid_lock; + dhdp->flowid_lock = NULL; + + if (lock) { + DHD_FLOWID_UNLOCK(lock, flags); + osl_spin_lock_deinit(dhdp->osh, lock); + } + + osl_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); + dhdp->flowring_list_lock = NULL; + + ASSERT(dhdp->if_flow_lkup == NULL); + ASSERT(dhdp->flow_ring_table == NULL); + dhdp->flow_rings_inited = FALSE; +} + +/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */ +uint8 +dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) +{ + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + return if_flow_lkup[ifindex].role; +} + +#ifdef WLTDLS +bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) +{ + unsigned long flags; + tdls_peer_node_t *cur = NULL; + + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + /* Check only if tdls peer is added */ + if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da))) { + cur = dhdp->peer_tbl.node; + + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return TRUE; + } + cur = cur->next; + } + } + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return FALSE; +} +#endif /* WLTDLS */ + +/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */ +static INLINE uint16 +dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + int hash; + bool ismcast = FALSE; + flow_hash_info_t *cur; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return FLOWID_INVALID; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + ASSERT(if_flow_lkup); + + if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) { +#ifdef WLTDLS + if (is_tdls_destination(dhdp, da)) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + while (cur != NULL) { + if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return FLOWID_INVALID; + } +#endif /* WLTDLS */ + /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */ + cur = if_flow_lkup[ifindex].fl_hash[prio]; + if (cur) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + } else { + + if (ETHER_ISMULTI(da)) { + ismcast = TRUE; + hash = 0; + } else { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + } + + cur = if_flow_lkup[ifindex].fl_hash[hash]; + + while (cur) { + if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) || + (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) && + (cur->flow_info.tid == prio))) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + +#ifdef DHD_EFI + DHD_TRACE(("%s: cannot find flowid\n", __FUNCTION__)); +#else + DHD_FLOWRING_INFO(("%s: cannot find flowid\n", __FUNCTION__)); +#endif + return FLOWID_INVALID; +} /* dhd_flowid_find */ + +static uint16 +dhd_flowid_map_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *da) +{ + uint16 flowid = FLOWID_INVALID; + ASSERT(dhdp->flowid_allocator != NULL); + +#if defined(DHD_HTPUT_TUNABLES) + if (dhdp->htput_flowid_allocator) { + if (prio == HTPUT_FLOW_RING_PRIO) { + if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) { + /* For STA case, only one flowring per PRIO is created, + * so no need to have a HTPUT counter variable for STA case. + * If already HTPUT flowring is allocated for given HTPUT_PRIO, + * then this function will not even get called as dhd_flowid_find + * will take care assigning same for those HTPUT_PRIO packets. + */ + flowid = id16_map_alloc(dhdp->htput_flowid_allocator); + } else if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex) && !ETHER_ISMULTI(da)) { + /* Use HTPUT flowrings for only HTPUT_NUM_CLIENT_FLOW_RINGS */ + if (dhdp->htput_client_flow_rings < HTPUT_NUM_CLIENT_FLOW_RINGS) { + flowid = id16_map_alloc(dhdp->htput_flowid_allocator); + /* increment htput client counter */ + if (flowid != FLOWID_INVALID) { + dhdp->htput_client_flow_rings++; + } + } + } + } + } +#endif /* !DHD_HTPUT_TUNABLES */ + + BCM_REFERENCE(flowid); + + /* + * For HTPUT case, if the high throughput flowrings are already allocated + * for the given role, the control comes here. + */ + if (flowid == FLOWID_INVALID) { + flowid = id16_map_alloc(dhdp->flowid_allocator); + } + + return flowid; +} + +/** Create unique Flow ID, called when a flow ring is created. */ +static INLINE uint16 +dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + flow_hash_info_t *fl_hash_node, *cur; + if_flow_lkup_t *if_flow_lkup; + int hash; + uint16 flowid; + unsigned long flags; + + fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t)); + if (fl_hash_node == NULL) { + DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__)); + return FLOWID_INVALID; + } + memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + flowid = dhd_flowid_map_alloc(dhdp, ifindex, prio, da); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (flowid == FLOWID_INVALID) { + MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t)); + DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__)); + return FLOWID_INVALID; + } + + fl_hash_node->flowid = flowid; + fl_hash_node->flow_info.tid = prio; + fl_hash_node->flow_info.ifindex = ifindex; + fl_hash_node->next = NULL; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) { + /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */ +#ifdef WLTDLS + if (is_tdls_destination(dhdp, da)) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else { + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + } else +#endif /* WLTDLS */ + if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node; + } else { + + /* For bcast/mcast assign first slot in in interface */ + hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_FLOWRING_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid)); + + if (fl_hash_node->flowid > dhdp->max_tx_flowid) { + DHD_ERROR(("%s: flowid=%d max_tx_flowid=%d ifindex=%d prio=%d role=%d\n", + __FUNCTION__, fl_hash_node->flowid, dhdp->max_tx_flowid, + ifindex, prio, if_flow_lkup[ifindex].role)); + dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL); + dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL); + return FLOWID_INVALID; + } + + return fl_hash_node->flowid; +} /* dhd_flowid_alloc */ + +/** Get flow ring ID, if not present try to create one */ +static INLINE int +dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid) +{ + uint16 id; + flow_ring_node_t *flow_ring_node; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + int ret; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (!dhdp->flow_ring_table) { + return BCME_ERROR; + } + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return BCME_BADARG; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + + id = dhd_flowid_find(dhdp, ifindex, prio, sa, da); + + if (id == FLOWID_INVALID) { + bool if_role_multi_client; + if_flow_lkup_t *if_flow_lkup; + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (!if_flow_lkup[ifindex].status) + return BCME_ERROR; + + /* check role for multi client case */ + if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex); + + /* Abort Flowring creation if multi client flowrings crossed the threshold */ +#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + if (if_role_multi_client && + (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) { + DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n", + __FUNCTION__, dhdp->multi_client_flow_rings, + dhdp->max_multi_client_flow_rings)); + return BCME_ERROR; + } +#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ + + /* Do not create Flowring if peer is not associated */ +#if (defined(linux) || defined(LINUX)) && defined(PCIE_FULL_DONGLE) + if (if_role_multi_client && !ETHER_ISMULTI(da) && + !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) { + DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */ + + id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); + if (id == FLOWID_INVALID) { + DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n", + __FUNCTION__, ifindex, if_flow_lkup[ifindex].status)); + return BCME_ERROR; + } + + ASSERT(id <= dhdp->max_tx_flowid); + + /* Only after flowid alloc, increment multi_client_flow_rings */ + if (if_role_multi_client) { + dhdp->multi_client_flow_rings++; + } + + /* register this flowid in dhd_pub */ + dhd_add_flowid(dhdp, ifindex, prio, da, id); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Init Flow info */ + memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); + memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); + flow_ring_node->flow_info.tid = prio; + flow_ring_node->flow_info.ifindex = ifindex; + flow_ring_node->active = TRUE; + flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING; + +#ifdef DEVICE_TX_STUCK_DETECT + flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME(); + flow_ring_node->stuck_count = 0; +#endif /* DEVICE_TX_STUCK_DETECT */ +#ifdef TX_STATUS_LATENCY_STATS + flow_ring_node->flow_info.num_tx_status = 0; + flow_ring_node->flow_info.cum_tx_status_latency = 0; + flow_ring_node->flow_info.num_tx_pkts = 0; +#endif /* TX_STATUS_LATENCY_STATS */ +#ifdef BCMDBG + bzero(&flow_ring_node->flow_info.tx_status[0], + sizeof(uint32) * DHD_MAX_TX_STATUS_MSGS); +#endif + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Create and inform device about the new flow */ + if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) + != BCME_OK) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_CLOSED; + flow_ring_node->active = FALSE; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); + return BCME_ERROR; + } + + *flowid = id; + return BCME_OK; + } else { + /* if the Flow id was found in the hash */ + + if (id > dhdp->max_tx_flowid) { + DHD_ERROR(("%s: Invalid flow id : %u, max_tx_flowid : %u\n", + __FUNCTION__, id, dhdp->max_tx_flowid)); + *flowid = FLOWID_INVALID; + ASSERT(0); + return BCME_ERROR; + } + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* + * If the flow_ring_node is in Open State or Status pending state then + * we can return the Flow id to the caller.If the flow_ring_node is in + * FLOW_RING_STATUS_PENDING this means the creation is in progress and + * hence the packets should be queued. + * + * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or + * FLOW_RING_STATUS_CLOSED, then we should return Error. + * Note that if the flowing is being deleted we would mark it as + * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and + * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets. + * We should drop the packets in that case. + * The decission to return OK should NOT be based on 'active' variable, beause + * active is made TRUE when a flow_ring_node gets allocated and is made + * FALSE when the flow ring gets removed and does not reflect the True state + * of the Flow ring. + * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring + * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid + * is to be returned and from dhd_bus_txdata, the flowring would be resumed again. + * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to + * FLOW_RING_STATUS_CREATE_PENDING. + */ + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING || + flow_ring_node->status == FLOW_RING_STATUS_CLOSED) { + *flowid = FLOWID_INVALID; + ret = BCME_ERROR; + } else { + *flowid = id; + ret = BCME_OK; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return ret; + } /* Flow Id found in the hash */ +} /* dhd_flowid_lookup */ + +int +dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ + int hashidx = 0; + bool found = FALSE; + flow_hash_info_t *cur; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + if (!dhdp->flow_ring_table) { + DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) { + cur = if_flow_lkup[ifindex].fl_hash[hashidx]; + if (cur) { + if (cur->flowid == flowid) { + found = TRUE; + } + + while (!found && cur) { + if (cur->flowid == flowid) { + found = TRUE; + break; + } + cur = cur->next; + } + + if (found) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return BCME_OK; + } + } + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + return BCME_ERROR; +} + +int +dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid) +{ + return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid); +} + +/** + * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to + * select the flowring to send the packet to the dongle. + */ +int +BCMFASTPATH(dhd_flowid_update)(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + uint16 flowid = 0; + + ASSERT(ifindex < DHD_MAX_IFS); + + if (ifindex >= DHD_MAX_IFS) { + return BCME_BADARG; + } + + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost, + &flowid) != BCME_OK) { + return BCME_ERROR; + } + + DHD_FLOWRING_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid)); + + /* Tag the packet with flowid */ + DHD_PKT_SET_FLOWID(pktbuf, flowid); + return BCME_OK; +} + +static void +dhd_flowid_map_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ +#if defined(DHD_HTPUT_TUNABLES) + if (dhdp->htput_flowid_allocator) { + if (DHD_IS_FLOWID_HTPUT(dhdp, flowid)) { + id16_map_free(dhdp->htput_flowid_allocator, flowid); + /* decrement htput client counter */ + if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex)) { + dhdp->htput_client_flow_rings--; + } + return; + } + } +#endif /* !DHD_HTPUT_TUNABLES */ + + id16_map_free(dhdp->flowid_allocator, flowid); + + return; +} + +void +dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ + int hashix; + bool found = FALSE; + flow_hash_info_t *cur, *prev; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + bool if_role_multi_client; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex); + + for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) { + + cur = if_flow_lkup[ifindex].fl_hash[hashix]; + + if (cur) { + if (cur->flowid == flowid) { + found = TRUE; + } + + prev = NULL; + while (!found && cur) { + if (cur->flowid == flowid) { + found = TRUE; + break; + } + prev = cur; + cur = cur->next; + } + if (found) { + if (!prev) { + if_flow_lkup[ifindex].fl_hash[hashix] = cur->next; + } else { + prev->next = cur->next; + } + + /* Decrement multi_client_flow_rings */ + if (if_role_multi_client) { + dhdp->multi_client_flow_rings--; + } + + /* deregister flowid from dhd_pub. */ + dhd_del_flowid(dhdp, ifindex, flowid); + + dhd_flowid_map_free(dhdp, ifindex, flowid); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t)); + + return; + } + } + } + + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n", + __FUNCTION__, flowid)); +} /* dhd_flowid_free */ + +/** + * Delete all Flow rings associated with the given interface. Is called when eg the dongle + * indicates that a wireless link has gone down. + */ +void +dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_h2d_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +void +dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + + for (id = 0; id < dhdp->num_h2d_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + dhd_bus_flow_ring_flush_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Delete flow ring(s) for given peer address. */ +void +dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_h2d_rings; id++) { + /* + * Send flowring delete request even if flowring status is + * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND + * event comes ahead of flowring create response. + * Otherwise the flowring will not be deleted later as there will not be any + * DISASSOC_IND event. With this change, when create response event comes to DHD, + * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response + * event will come, upon which DHD will delete the flowring. + */ + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && + ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) || + (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) { + DHD_ERROR(("%s: deleting flowid %d\n", + __FUNCTION__, flow_ring_table[id].flowid)); + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Handles interface ADD, CHANGE, DEL indications from the dongle */ +void +dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + DHD_INFO(("%s: ifindex %u op %u role is %u \n", + __FUNCTION__, ifindex, op, role)); + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return; + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) { + + if_flow_lkup[ifindex].role = role; + + if (role == WLC_E_IF_ROLE_WDS) { + /** + * WDS role does not send WLC_E_LINK event after interface is up. + * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself. + * same is true while making the status as FALSE. + * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the + * interfaces are handled uniformly. + */ + if_flow_lkup[ifindex].status = TRUE; + DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + } + } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) { + if_flow_lkup[ifindex].status = FALSE; + DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); +} + +/** Handles a STA 'link' indication from the dongle */ +int +dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return BCME_BADARG; + + DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (status) { + if_flow_lkup[ifindex].status = TRUE; + } else { + if_flow_lkup[ifindex].status = FALSE; + } + + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + return BCME_OK; +} + +/** Update flow priority mapping, called on IOVAR */ +int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) +{ + uint16 flowid; + flow_ring_node_t *flow_ring_node; + + if (map > DHD_FLOW_PRIO_LLR_MAP) + return BCME_BADOPTION; + + /* Check if we need to change prio map */ + if (map == dhdp->flow_prio_map_type) + return BCME_OK; + + /* If any ring is active we cannot change priority mapping for flow rings */ + for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (flow_ring_node->active) + return BCME_EPERM; + } + + /* Inform firmware about new mapping type */ + if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) + return BCME_ERROR; + + /* update internal structures */ + dhdp->flow_prio_map_type = map; + if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP) + bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + else + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp); + + return BCME_OK; +} + +/** Inform firmware on updated flow priority mapping, called on IOVAR */ +int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) +{ + uint8 iovbuf[WLC_IOCTL_SMLEN]; + int len; + uint32 val; + if (!set) { + bzero(&iovbuf, sizeof(iovbuf)); + len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); + if (len == 0) { + return BCME_BUFTOOSHORT; + } + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { + DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__)); + return BCME_ERROR; + } + *map = iovbuf[0]; + return BCME_OK; + } + val = (uint32)map[0]; + len = bcm_mkiovar("bus:fl_prio_map", (char *)&val, sizeof(val), + (char*)iovbuf, sizeof(iovbuf)); + if (len == 0) { + return BCME_BUFTOOSHORT; + } + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) { + DHD_ERROR(("%s: failed to set fl_prio_map \n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +uint32 +dhd_active_tx_flowring_bkpq_len(dhd_pub_t *dhd) +{ + unsigned long list_lock_flags; + dll_t *item, *prev; + flow_ring_node_t *flow_ring_node; + dhd_bus_t *bus = dhd->bus; + uint32 active_tx_flowring_qlen = 0; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags); + + for (item = dll_tail_p(&bus->flowring_active_list); + !dll_end(&bus->flowring_active_list, item); item = prev) { + + prev = dll_prev_p(item); + + flow_ring_node = dhd_constlist_to_flowring(item); + if (flow_ring_node->active) { + DHD_INFO(("%s :%d\n", __FUNCTION__, flow_ring_node->queue.len)); + active_tx_flowring_qlen += flow_ring_node->queue.len; + } + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags); + return active_tx_flowring_qlen; +} + +#ifdef DHD_AWDL +/** + * Handle/Intercept awdl peer op IOVAR fired by user + * buf = NULL means delete all peers in awdl interface + */ +void +dhd_awdl_peer_op(dhd_pub_t *dhdp, uint8 ifindex, void *buf, uint32 buflen) +{ + awdl_peer_op_t *peer = (awdl_peer_op_t *)buf; + DHD_TRACE(("%s\n", __FUNCTION__)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + if (!buf) { + /* Delete all peers in awdl interface */ + if_flow_lkup_t *if_flow_lkup; + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + if (if_flow_lkup[ifindex].role != WLC_E_IF_ROLE_AWDL) { + DHD_ERROR(("%s: Iinterface %d is not a awdl peer \n", + __FUNCTION__, ifindex)); + return; + } + dhd_flow_rings_delete(dhdp, ifindex); + return; + } + /* Parse awdl_peer_op info now */ + if (buflen < sizeof(awdl_peer_op_t)) { + DHD_ERROR(("%s: cannot handle awdl_peer_op add/del\n", __FUNCTION__)); + return; + } + /** + * Only flowring deletion is handled here + * Flowring addition is taken care in dhd_flowid_lookup + */ + if (peer->opcode == AWDL_PEER_OP_DEL) { + dhd_del_sta(dhdp, ifindex, &peer->addr.octet[0]); + dhd_flow_rings_delete_for_peer(dhdp, ifindex, (char *)&peer->addr.octet[0]); + } else if (peer->opcode == AWDL_PEER_OP_ADD) { + dhd_findadd_sta(dhdp, ifindex, &peer->addr.octet[0]); + } + return; +} +#endif /* DHD_AWDL */ diff --git a/bcmdhd.101.10.361.x/dhd_flowring.h b/bcmdhd.101.10.361.x/dhd_flowring.h new file mode 100755 index 0000000..873ca68 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_flowring.h @@ -0,0 +1,350 @@ +/* + * @file Header file describing the flow rings DHD interfaces. + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities. + * + * Provides type definitions and function prototypes used to create, delete and manage flow rings at + * high level. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** XXX Twiki: [PCIeFullDongleArchitecture] */ + +/**************** + * Common types * + */ + +#ifndef _dhd_flowrings_h_ +#define _dhd_flowrings_h_ + +/* Max pkts held in a flow ring's backup queue */ +#define FLOW_RING_QUEUE_THRESHOLD (2048) + +/* Number of H2D common rings */ +#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS + +#define FLOWID_INVALID (ID16_INVALID) +#define FLOWID_RESERVED (FLOW_RING_COMMON) + +#define FLOW_RING_STATUS_OPEN 0 +#define FLOW_RING_STATUS_CREATE_PENDING 1 +#define FLOW_RING_STATUS_CLOSED 2 +#define FLOW_RING_STATUS_DELETE_PENDING 3 +#define FLOW_RING_STATUS_FLUSH_PENDING 4 + +#ifdef IDLE_TX_FLOW_MGMT +#define FLOW_RING_STATUS_SUSPENDED 5 +#define FLOW_RING_STATUS_RESUME_PENDING 6 +#endif /* IDLE_TX_FLOW_MGMT */ +#define FLOW_RING_STATUS_STA_FREEING 7 + +#if defined(DHD_HTPUT_TUNABLES) +#define HTPUT_FLOW_RING_PRIO PRIO_8021D_BE +#define HTPUT_NUM_STA_FLOW_RINGS 1u +#define HTPUT_NUM_CLIENT_FLOW_RINGS 3u +#define HTPUT_TOTAL_FLOW_RINGS (HTPUT_NUM_STA_FLOW_RINGS + HTPUT_NUM_CLIENT_FLOW_RINGS) +#define DHD_IS_FLOWID_HTPUT(pub, flowid) \ + ((flowid >= (pub)->htput_flow_ring_start) && \ + (flowid < ((pub)->htput_flow_ring_start + HTPUT_TOTAL_FLOW_RINGS))) +#endif /* DHD_HTPUT_TUNABLES */ + +#ifdef DHD_EFI +/* + * Each lbuf is of size 2048 bytes. But the last 112 bytes is occupied for lbuf header. + * Since lbuf is crucial data structure we want to avoid operations very close to lbuf. + * so providing a pad of 136 bytes. so lbuf and pad together is 248 bytes. + * + * So the maximum usable lbuf size is 1800 bytes. + * + * These 1800 bytes is utilized for below purposes. + * + * 1. FW operating in mode2 requires 98 bytes for extra headers + * like SNAP, PLCP etc. Whereas FW operating in mode4 requires 70 bytes. + * So in EFI DHD we will consider 98 bytes which fits for chips operating in both mode2 and mode4. + * + * 2. For TPUT tests in EFI user can request a maximum payload of 1500 bytes. + * To add ethernet header and TPUT header etc we are reserving 100bytes. So 1600 bytes are utilized + * for headers and payload. + * + * so 1698(98 + 1600) bytes by are consumed by 1 and 2. + * So we still have 112 bytes which can be utilized + * if FW needs buffer for more headers in future. + * + * --Update-- 13Jul2018 (above comments preserved for history) + * 3. In case of 11ax chips more headroom is required, FW requires a min. of 1920 bytes for Rx + * buffers, or it will trap. Therefore bumping up the size to 1920 bytes. Which leaves + * only 16 bytes pad between data and lbuf header ! Further size increase may not be possible !! + */ +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 1920 +#else +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 +#endif /* DHD_EFI */ + +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX 4096 + +#define DHD_FLOWRING_TX_BIG_PKT_SIZE (3700u) + +#define DHD_FLOW_PRIO_AC_MAP 0 +#define DHD_FLOW_PRIO_TID_MAP 1 +/* Flow ring prority map for lossless roaming */ +#define DHD_FLOW_PRIO_LLR_MAP 2 + +/* Hashing a MacAddress for lkup into a per interface flow hash table */ +#define DHD_FLOWRING_HASH_SIZE 256 +#define DHD_FLOWRING_HASHINDEX(ea, prio) \ + ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \ + % DHD_FLOWRING_HASH_SIZE) + +#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) +#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) +#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) +#define DHD_IF_ROLE_P2PGC(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_CLIENT) +#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) +#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS) +#define DHD_IF_ROLE_IBSS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_IBSS) +#define DHD_IF_ROLE_NAN(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_NAN) + +#define DHD_IF_ROLE_GENERIC_STA(pub, idx) \ + (DHD_IF_ROLE_STA(pub, idx) || DHD_IF_ROLE_P2PGC(pub, idx) || DHD_IF_ROLE_WDS(pub, idx)) + +#ifdef DHD_AWDL +#define DHD_IF_ROLE_AWDL(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AWDL) +#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \ + (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) || DHD_IF_ROLE_AWDL(pub, idx) ||\ + DHD_IF_ROLE_NAN(pub, idx)) +#else +#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \ + (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) ||\ + DHD_IF_ROLE_NAN(pub, idx)) +#endif /* DHD_AWDL */ + +#define DHD_FLOW_RING(dhdp, flowid) \ + (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) + +struct flow_queue; + +/* Flow Ring Queue Enqueue overflow callback */ +typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt); + +/** + * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred + * between queue and ring. A packet from the host stack is first added to the queue, and in a later + * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the + * flow ring are device owned. + */ +typedef struct flow_queue { + dll_t list; /* manage a flowring queue in a double linked list */ + void * head; /* first packet in the queue */ + void * tail; /* last packet in the queue */ + uint16 len; /* number of packets in the queue */ + uint16 max; /* maximum or min budget (used in cumm) */ + uint32 threshold; /* parent's cummulative length threshold */ + void * clen_ptr; /* parent's cummulative length counter */ + uint32 failures; /* enqueue failures due to queue overflow */ + flow_queue_cb_t cb; /* callback invoked on threshold crossing */ + uint32 l2threshold; /* grandparent's (level 2) cummulative length threshold */ + void * l2clen_ptr; /* grandparent's (level 2) cummulative length counter */ +} flow_queue_t; + +#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len) +#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max) +#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold) +#define DHD_FLOW_QUEUE_L2THRESHOLD(queue) ((int)(queue)->l2threshold) +#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0) +#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures) + +#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len)) +#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max) + +#define DHD_FLOW_QUEUE_OVFL(queue, budget) \ + (((queue)->len) > budget) + +#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \ + ((queue)->max) = ((budget) - 1) + +/* Queue's cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \ + ((queue)->threshold) = ((cumm_threshold) - 1) + +/* Queue's cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr) + +/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \ + ((queue)->clen_ptr) = (void *)(parent_clen_ptr) + +/* Queue's level 2 cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold) \ + ((queue)->l2threshold) = ((l2cumm_threshold) - 1) + +/* Queue's level 2 cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_L2CLEN_PTR(queue) ((queue)->l2clen_ptr) + +/* Set a queue's level 2 cumm_len point to a grandparent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_L2CLEN(queue, grandparent_clen_ptr) \ + ((queue)->l2clen_ptr) = (void *)(grandparent_clen_ptr) + +#if defined(BCMDBG) +#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) \ + dhd_bus_flow_ring_cnt_update(bus, flowid, txstatus) +#else +#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) +#endif /* BCMDBG */ + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_pkttag_fr { + uint16 flowid; + uint16 ifid; +} dhd_pkttag_fr_t; + +#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) +#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) +#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) +#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) +#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) +#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) + +/** each flow ring is dedicated to a tid/sa/da combination */ +typedef struct flow_info { + uint8 tid; + uint8 ifindex; + uchar sa[ETHER_ADDR_LEN]; + uchar da[ETHER_ADDR_LEN]; +#if defined(BCMDBG) + uint32 tx_status[DHD_MAX_TX_STATUS_MSGS]; +#endif +#ifdef TX_STATUS_LATENCY_STATS + /* total number of tx_status received on this flowid */ + uint64 num_tx_status; + /* cumulative tx_status latency for this flowid */ + uint64 cum_tx_status_latency; + /* num tx packets sent on this flowring */ + uint64 num_tx_pkts; +#endif /* TX_STATUS_LATENCY_STATS */ +} flow_info_t; + +/** a flow ring is used for outbound (towards antenna) 802.3 packets */ +typedef struct flow_ring_node { + dll_t list; /* manage a constructed flowring in a dll, must be at first place */ + flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */ + bool active; + uint8 status; + /* + * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For + * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation. + */ + uint16 flowid; + flow_info_t flow_info; + void *prot_info; + void *lock; /* lock for flowring access protection */ + +#ifdef IDLE_TX_FLOW_MGMT + uint64 last_active_ts; /* contains last active timestamp */ +#endif /* IDLE_TX_FLOW_MGMT */ +#ifdef DEVICE_TX_STUCK_DETECT + /* Time stamp(msec) when last time a Tx packet completion is received on this flow ring */ + uint32 tx_cmpl; + /* + * Holds the tx_cmpl which was read during the previous + * iteration of the stuck detection algo + */ + uint32 tx_cmpl_prev; + /* counter to decide if this particlur flow is stuck or not */ + uint32 stuck_count; +#endif /* DEVICE_TX_STUCK_DETECT */ +#ifdef DHD_HP2P + bool hp2p_ring; +#endif /* DHD_HP2P */ +} flow_ring_node_t; + +typedef flow_ring_node_t flow_ring_table_t; + +typedef struct flow_hash_info { + uint16 flowid; + flow_info_t flow_info; + struct flow_hash_info *next; +} flow_hash_info_t; + +typedef struct if_flow_lkup { + bool status; + uint8 role; /* Interface role: STA/AP */ + flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */ +} if_flow_lkup_t; + +static INLINE flow_ring_node_t * +dhd_constlist_to_flowring(dll_t *item) +{ + return ((flow_ring_node_t *)item); +} + +/* Exported API */ + +/* Flow ring's queue management functions */ +extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid); +extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid); + +extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); +extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); +extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); +extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); + +extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr, + int l2cumm_threshold, void *l2cumm_ctr); +extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_h2d_rings); + +extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); + +extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, + void *pktbuf); +extern int dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid); +extern int dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifidex, uint16 flowid); + +extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); + +extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); +extern void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex); + +extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, + char *addr); + +/* Handle Interface ADD, DEL operations */ +extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role); + +/* Handle a STA interface link status update */ +extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, + uint8 status); +extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set); +extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map); +extern uint32 dhd_active_tx_flowring_bkpq_len(dhd_pub_t *dhdp); +#ifdef DHD_AWDL +/* DHD handler for awdl peer op IOVAR */ +extern void dhd_awdl_peer_op(dhd_pub_t *dhdp, uint8 ifindex, + void *buf, uint32 buflen); +#endif /* DHD_AWDL */ +extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex); +#endif /* _dhd_flowrings_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_fwtrace.c b/bcmdhd.101.10.361.x/dhd_fwtrace.c new file mode 100755 index 0000000..4737b43 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_fwtrace.c @@ -0,0 +1,563 @@ +/* + * Firmware trace handling on the DHD side. Kernel thread reads the trace data and writes + * to the file and implements various utility functions. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + * + * $Id$ + */ + +#ifdef BCMINTERNAL + +#ifdef DHD_FWTRACE + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include + +static int fwtrace_write_to_file(uint8 *buf, uint16 buf_len, dhd_pub_t *dhdp); +static int fwtrace_close_file(dhd_pub_t *dhdp); +static int fwtrace_open_file(uint32 fw_trace_enabled, dhd_pub_t *dhdp); +static fwtrace_buf_t *fwtrace_get_trace_data_ptr(dhd_pub_t *dhdp); +static void fwtrace_free_trace_buf(dhd_pub_t *dhdp); + +typedef struct fwtrace_info { + struct file *fw_trace_fp; + int file_index; + int part_index; + int trace_buf_index; + int trace_buf_count; + uint16 overflow_counter; + + char trace_file[TRACE_FILE_NAME_LEN]; + + fwtrace_buf_t *trace_data_ptr; + + uint16 prev_seq; + + uint32 fwtrace_enable; /* Enable firmware tracing and the + * trace file management. + */ + struct mutex fwtrace_lock; /* Synchronization between the + * ioctl and the kernel thread. + */ + dhd_dma_buf_t fwtrace_buf; /* firmware trace buffer */ +} fwtrace_info_t; + +int +dhd_fwtrace_attach(dhd_pub_t *dhdp) +{ + fwtrace_info_t *fwtrace_info; + + /* Allocate prot structure */ + if (!(fwtrace_info = (fwtrace_info_t *)VMALLOCZ(dhdp->osh, sizeof(*fwtrace_info)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + return (BCME_NOMEM); + } + + bzero(fwtrace_info, sizeof(*fwtrace_info)); + dhdp->fwtrace_info = fwtrace_info; + + mutex_init(&dhdp->fwtrace_info->fwtrace_lock); + + DHD_INFO(("allocated DHD fwtrace\n")); + + return BCME_OK; +} + +int +dhd_fwtrace_detach(dhd_pub_t *dhdp) +{ + fwtrace_info_t *fwtrace_info; + + DHD_TRACE(("%s: %d\n", __FUNCTION__, __LINE__)); + + if (!dhdp) { + return BCME_OK; + } + + if (!dhdp->fwtrace_info) { + return BCME_OK; + } + + fwtrace_info = dhdp->fwtrace_info; + + dhd_dma_buf_free(dhdp, &dhdp->fwtrace_info->fwtrace_buf); + + /* close the file if valid */ + if (!(IS_ERR_OR_NULL(dhdp->fwtrace_info->fw_trace_fp))) { + (void) filp_close(dhdp->fwtrace_info->fw_trace_fp, 0); + } + + mutex_destroy(&dhdp->fwtrace_info->fwtrace_lock); + + VMFREE(dhdp->osh, fwtrace_info, sizeof(*fwtrace_info)); + + dhdp->fwtrace_info = NULL; + + DHD_INFO(("Deallocated DHD fwtrace_info\n")); + + return (BCME_OK); +} + +uint16 +get_fw_trace_overflow_counter(dhd_pub_t *dhdp) +{ + return (dhdp->fwtrace_info->overflow_counter); +} + +void +process_fw_trace_data(dhd_pub_t *dhdp) +{ + fwtrace_info_t *fwtrace_info = dhdp->fwtrace_info; + uint16 length; + uint16 incoming_seq; + uint32 trace_buf_index = fwtrace_info->trace_buf_index; + fwtrace_buf_t * trace_buf; + fwtrace_buf_t * curr_buf; + + mutex_lock(&fwtrace_info->fwtrace_lock); + + if (fwtrace_info->fw_trace_fp == NULL) { + goto done; + } + + if ((trace_buf = fwtrace_get_trace_data_ptr(dhdp)) == NULL) { + goto done; + } + + do { + curr_buf = trace_buf + trace_buf_index; + + length = curr_buf->info.length; + /* If the incoming length is 0, means nothing is updated by the firmware */ + if (length == 0) { + break; + } + + incoming_seq = curr_buf->info.seq_num; + + if (((uint16)(fwtrace_info->prev_seq + 1) != incoming_seq) && + length != sizeof(*curr_buf)) { + DHD_ERROR(("*** invalid trace len idx = %u, length = %u, " + "cur seq = %u, in-seq = %u \n", + trace_buf_index, length, + fwtrace_info->prev_seq, incoming_seq)); + break; + } + + DHD_TRACE(("*** TRACE BUS: IDX:%d, in-seq:%d(prev-%d), ptr:%p(%llu), len:%d\n", + trace_buf_index, incoming_seq, fwtrace_info->prev_seq, + curr_buf, (uint64)curr_buf, length)); + + /* Write trace data to a file */ + if (fwtrace_write_to_file((uint8 *) curr_buf, length, dhdp) != BCME_OK) { + DHD_ERROR(("*** fwtrace_write_to_file has failed \n")); + break; + } + + /* Reset length after consuming the fwtrace data */ + curr_buf->info.length = 0; + + if ((fwtrace_info->prev_seq + 1) != incoming_seq) { + DHD_ERROR(("*** seq mismatch, index = %u, length = %u, " + "cur seq = %u, in-seq = %u \n", + trace_buf_index, length, + fwtrace_info->prev_seq, incoming_seq)); + } + fwtrace_info->prev_seq = incoming_seq; + + trace_buf_index++; + trace_buf_index &= (fwtrace_info->trace_buf_count - 1u); + fwtrace_info->trace_buf_index = trace_buf_index; + } while (true); + +done: + mutex_unlock(&fwtrace_info->fwtrace_lock); + return; +} + +/* + * Write the incoming trace data to a file. The maximum file size is 1MB. After that + * the trace data is saved into a new file. + */ +static int +fwtrace_write_to_file(uint8 *buf, uint16 buf_len, dhd_pub_t *dhdp) +{ + fwtrace_info_t *fwtrace_info = dhdp->fwtrace_info; + int ret_val = BCME_OK; + int ret_val_1 = 0; + mm_segment_t old_fs; + loff_t pos = 0; + struct kstat stat; + int error; + + /* Change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + if (buf == NULL) { + ret_val = BCME_ERROR; + goto done; + } + + if (IS_ERR_OR_NULL(fwtrace_info->fw_trace_fp)) { + ret_val = BCME_ERROR; + goto done; + } + + // + // Get the file size + // if the size + buf_len > TRACE_FILE_SIZE, then write to a different file. + // + error = vfs_stat(fwtrace_info->trace_file, &stat); + if (error) { + DHD_ERROR(("vfs_stat has failed with error code = %d\n", error)); + goto done; + } + + if ((int) stat.size + buf_len > TRACE_FILE_SIZE) { + fwtrace_close_file(dhdp); + (fwtrace_info->part_index)++; + fwtrace_open_file(TRUE, dhdp); + } + + pos = fwtrace_info->fw_trace_fp->f_pos; + /* Write buf to file */ + ret_val_1 = vfs_write(fwtrace_info->fw_trace_fp, + (char *) buf, (uint32) buf_len, &pos); + if (ret_val_1 < 0) { + DHD_ERROR(("write file error, err = %d\n", ret_val_1)); + ret_val = BCME_ERROR; + goto done; + } + fwtrace_info->fw_trace_fp->f_pos = pos; + + /* Sync file from filesystem to physical media */ + ret_val_1 = vfs_fsync(fwtrace_info->fw_trace_fp, 0); + if (ret_val_1 < 0) { + DHD_ERROR(("sync file error, error = %d\n", ret_val_1)); + ret_val = BCME_ERROR; + goto done; + } + +done: + /* restore previous address limit */ + set_fs(old_fs); + return (ret_val); +} + +/* + * Start the trace, gets called from the ioctl handler. + */ +int +fw_trace_start(dhd_pub_t *dhdp, uint32 fw_trace_enabled) +{ + int ret_val = BCME_OK; + + (dhdp->fwtrace_info->file_index)++; + dhdp->fwtrace_info->part_index = 1; + + dhdp->fwtrace_info->trace_buf_index = 0; + + mutex_lock(&dhdp->fwtrace_info->fwtrace_lock); + ret_val = fwtrace_open_file(fw_trace_enabled, dhdp); + if (ret_val == BCME_OK) { + dhdp->fwtrace_info->fwtrace_enable = fw_trace_enabled; + } + mutex_unlock(&dhdp->fwtrace_info->fwtrace_lock); + + return (ret_val); +} + +/* + * Stop the trace collection and close the file descriptor. + */ +int +fw_trace_stop(dhd_pub_t *dhdp) +{ + int ret_val = BCME_OK; + + /* Check to see if there is any trace data */ + process_fw_trace_data(dhdp); + + mutex_lock(&dhdp->fwtrace_info->fwtrace_lock); /* acquire lock */ + /* flush the trace buffer */ + ret_val = fwtrace_close_file(dhdp); + + /* free the trace buffer */ + fwtrace_free_trace_buf(dhdp); + mutex_unlock(&dhdp->fwtrace_info->fwtrace_lock); /* release the lock */ + + return (ret_val); +} + +/* + * The trace file format is: fw_trace_w_part_x_y_z + * where w is the file index, x is the part index, + * y is in seconds and z is in milliseconds + * + * fw_trace_1_part_1_1539298163209110 + * fw_trace_1_part_2_1539298194739003 etc. + * + */ +static int +fwtrace_open_file(uint32 fw_trace_enabled, dhd_pub_t *dhdp) +{ + fwtrace_info_t *fwtrace_info = dhdp->fwtrace_info; + int ret_val = BCME_OK; + uint32 file_mode; + char ts_str[DEBUG_DUMP_TIME_BUF_LEN]; + + if (fw_trace_enabled) { + if (!(IS_ERR_OR_NULL(fwtrace_info->fw_trace_fp))) { + (void) filp_close(fwtrace_info->fw_trace_fp, 0); + } + + DHD_INFO((" *** Creating the trace file \n")); + + file_mode = O_CREAT | O_WRONLY | O_SYNC; + clear_debug_dump_time(ts_str); + get_debug_dump_time(ts_str); + + snprintf(fwtrace_info->trace_file, + sizeof(fwtrace_info->trace_file), + "%sfw_trace_%d_part_%d_%x_%s", + DHD_COMMON_DUMP_PATH, fwtrace_info->file_index, + fwtrace_info->part_index, + dhd_bus_get_bp_base(dhdp), + ts_str); + + fwtrace_info->fw_trace_fp = + filp_open(fwtrace_info->trace_file, file_mode, 0664); + + if (IS_ERR(fwtrace_info->fw_trace_fp)) { + DHD_ERROR(("Unable to create the fw trace file file: %s\n", + fwtrace_info->trace_file)); + ret_val = BCME_ERROR; + goto done; + } + } + +done: + return (ret_val); +} + +static int +fwtrace_close_file(dhd_pub_t *dhdp) +{ + int ret_val = BCME_OK; + + if (!(IS_ERR_OR_NULL(dhdp->fwtrace_info->fw_trace_fp))) { + (void) filp_close(dhdp->fwtrace_info->fw_trace_fp, 0); + } + + dhdp->fwtrace_info->fw_trace_fp = NULL; + + return (ret_val); +} + +#define FWTRACE_HADDR_PARAMS_SIZE 256u +#define FW_TRACE_FLUSH 0x8u /* bit 3 */ + +static int send_fw_trace_val(dhd_pub_t *dhdp, int val); + +/* + * Initialize FWTRACE. + * Allocate trace buffer and open trace file. + */ +int +fwtrace_init(dhd_pub_t *dhdp) +{ + int ret_val = BCME_OK; + fwtrace_hostaddr_info_t host_buf_info; + + if (dhdp->fwtrace_info->fwtrace_buf.va != NULL) { + /* Already initialized */ + goto done; + } + + ret_val = fwtrace_get_haddr(dhdp, &host_buf_info); + + if (ret_val != BCME_OK) { + goto done; + } + + DHD_INFO(("dhd_get_trace_haddr: addr = %llx, len = %u\n", + host_buf_info.haddr.u64, host_buf_info.num_bufs)); + + /* Initialize and setup the file */ + ret_val = fw_trace_start(dhdp, TRUE); + +done: + return ret_val; +} + +/* + * Process the fwtrace set command to enable/disable firmware tracing. + * Always, enable preceeds with disable. + */ +int +handle_set_fwtrace(dhd_pub_t *dhdp, uint32 val) +{ + int ret, ret_val = BCME_OK; + + /* On set, consider only lower two bytes for now */ + dhdp->fwtrace_info->fwtrace_enable = (val & 0xFFFF); + + if (val & FW_TRACE_FLUSH) { /* only flush the trace buffer */ + if ((ret_val = send_fw_trace_val(dhdp, val)) != BCME_OK) { + goto done; + } + } else if (val == 0) { /* disable the tracing */ + /* Disable the trace in the firmware */ + if ((ret_val = send_fw_trace_val(dhdp, val)) != BCME_OK) { + goto done; + } + + /* cleanup in the driver */ + fw_trace_stop(dhdp); + } else { /* enable the tracing */ + fwtrace_hostaddr_info_t haddr_info; + + ret_val = fwtrace_init(dhdp); + if (ret_val != BCME_OK) { + goto done; + } + + if ((ret_val = fwtrace_get_haddr(dhdp, &haddr_info)) != BCME_OK) { + DHD_ERROR(("%s: set dhd_iovar has failed for " + "fw_trace_haddr, " + "ret=%d\n", __FUNCTION__, ret_val)); + goto done; + } + + ret = dhd_iovar(dhdp, 0, "dngl:fwtrace_haddr", + (char *) &haddr_info, sizeof(haddr_info), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set dhd_iovar has failed for " + "fwtrace_haddr, " + "ret=%d\n", __FUNCTION__, ret)); + ret_val = BCME_NOMEM; + goto done; + } + + /* Finaly, enable the trace in the firmware */ + if ((ret_val = send_fw_trace_val(dhdp, val)) != BCME_OK) { + goto done; + } + } +done: + return (ret_val); +} + +/* + * Send dngl:fwtrace IOVAR to the firmware. + */ + +static int +send_fw_trace_val(dhd_pub_t *dhdp, int val) +{ + int ret_val = BCME_OK; + + if ((ret_val = dhd_iovar(dhdp, 0, "dngl:fwtrace", (char *)&val, sizeof(val), + NULL, 0, TRUE)) < 0) { + DHD_ERROR(("%s: set dhd_iovar has failed fwtrace, " + "ret=%d\n", __FUNCTION__, ret_val)); + } + + return (ret_val); +} + +/* + * Returns the virual address for the firmware trace buffer. + * DHD monitors this buffer for an update from the firmware. + */ +static fwtrace_buf_t * +fwtrace_get_trace_data_ptr(dhd_pub_t *dhdp) +{ + return ((fwtrace_buf_t *) dhdp->fwtrace_info->fwtrace_buf.va); +} + +int +fwtrace_get_haddr(dhd_pub_t *dhdp, fwtrace_hostaddr_info_t *haddr_info) +{ + int ret_val = BCME_NOMEM; + int num_host_buffers = FWTRACE_NUM_HOST_BUFFERS; + + if (haddr_info == NULL) { + ret_val = BCME_BADARG; + goto done; + } + + if (dhdp->fwtrace_info->fwtrace_buf.va != NULL) { + /* Use the existing buffer and send to the firmware */ + haddr_info->haddr.u64 = HTOL64(*(uint64 *) + &dhdp->fwtrace_info->fwtrace_buf.pa); + haddr_info->num_bufs = dhdp->fwtrace_info->trace_buf_count; + haddr_info->buf_len = sizeof(fwtrace_buf_t); + ret_val = BCME_OK; + goto done; + } + + do { + /* Initialize firmware trace buffer */ + if (dhd_dma_buf_alloc(dhdp, &dhdp->fwtrace_info->fwtrace_buf, + sizeof(fwtrace_buf_t) * num_host_buffers) == BCME_OK) { + dhdp->fwtrace_info->trace_buf_count = num_host_buffers; + ret_val = BCME_OK; + break; + } + + DHD_ERROR(("%s: Allocing %d buffers of size %lu bytes failed\n", + __FUNCTION__, num_host_buffers, + sizeof(fwtrace_buf_t) * num_host_buffers)); + + /* Retry with smaller numbers */ + num_host_buffers >>= 1; + } while (num_host_buffers > 0); + + haddr_info->haddr.u64 = HTOL64(*(uint64 *)&dhdp->fwtrace_info->fwtrace_buf.pa); + haddr_info->num_bufs = num_host_buffers; + haddr_info->buf_len = sizeof(fwtrace_buf_t); + + DHD_INFO(("Firmware trace buffer, host address = %llx, count = %u \n", + haddr_info->haddr.u64, + haddr_info->num_bufs)); +done: + return (ret_val); +} + +/* + * Frees the host buffer. + */ +static void +fwtrace_free_trace_buf(dhd_pub_t *dhdp) +{ + dhd_dma_buf_free(dhdp, &dhdp->fwtrace_info->fwtrace_buf); + return; +} + +#endif /* DHD_FWTRACE */ + +#endif /* BCMINTERNAL */ diff --git a/bcmdhd.101.10.361.x/dhd_fwtrace.h b/bcmdhd.101.10.361.x/dhd_fwtrace.h new file mode 100755 index 0000000..4e977bf --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_fwtrace.h @@ -0,0 +1,55 @@ +/* + * Data structures required for the firmware tracing support on Linux. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + * + * $Id$ + */ + +#ifndef _DHD_FWTRACE_H +#define _DHD_FWTRACE_H + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE /* firmware tracing */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TRACE_FILE_NAME_LEN 128u /* bytes */ +#define TRACE_FILE_SIZE (1024u * 1024u) /* Trace file size is 1 MB */ + +/* Prototypes */ +void dhd_event_logtrace_enqueue_fwtrace(dhd_pub_t *dhdp); +int dhd_fwtrace_attach(dhd_pub_t *dhdinfo); +int dhd_fwtrace_detach(dhd_pub_t *dhdinfo); + +void process_fw_trace_data(dhd_pub_t *dhdp); +uint32 dhd_bus_get_bp_base(dhd_pub_t *dhdp); +int fwtrace_init(dhd_pub_t *dhdp); +int fw_trace_start(dhd_pub_t *dhdp, uint32 fw_trace_enabled); +int fw_trace_stop(dhd_pub_t *dhdp); +int handle_set_fwtrace(dhd_pub_t *dhdp, uint32 val); +uint16 get_fw_trace_overflow_counter(dhd_pub_t *dhdp); +int fwtrace_get_haddr(dhd_pub_t *dhdp, fwtrace_hostaddr_info_t *haddr_info); + +#endif /* DHD_FWTRACE */ + +#endif /* BCMINTERNAL */ + +#endif /* _DHD_FWTRACE_H */ diff --git a/bcmdhd.101.10.361.x/dhd_gpio.c b/bcmdhd.101.10.361.x/dhd_gpio.c new file mode 100755 index 0000000..60c04be --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_gpio.c @@ -0,0 +1,497 @@ + +#include +#include +#include + +#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO) +#include +#include +#include +#include +#endif /* defined(BUS_POWER_RESTORE) && defined(BCMSDIO) */ + +#ifdef CONFIG_DHD_USE_STATIC_BUF +#ifdef DHD_STATIC_IN_DRIVER +extern int dhd_static_buf_init(void); +extern void dhd_static_buf_exit(void); +#endif /* DHD_STATIC_IN_DRIVER */ +#ifdef BCMDHD_MDRIVER +extern void *bcmdhd_mem_prealloc(uint bus_type, int index, + int section, unsigned long size); +#else +extern void *bcmdhd_mem_prealloc(int section, unsigned long size); +#endif +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +#ifdef BCMDHD_DTS +/* This is sample code in dts file. +bcmdhd { + compatible = "android,bcmdhd_wlan"; + gpio_wl_reg_on = <&gpio GPIOH_4 GPIO_ACTIVE_HIGH>; + gpio_wl_host_wake = <&gpio GPIOZ_15 GPIO_ACTIVE_HIGH>; +}; +*/ +#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan" +#define GPIO_WL_REG_ON_PROPNAME "gpio_wl_reg_on" +#define GPIO_WL_HOST_WAKE_PROPNAME "gpio_wl_host_wake" +#endif + +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) +#include +extern int wifi_irq_trigger_level(void); +extern u8 *wifi_get_mac(void); +extern u8 *wifi_get_ap_mac(void); +#endif +extern void sdio_reinit(void); +extern void set_usb_bt_power(int is_power); +extern void set_usb_wifi_power(int is_power); +extern void extern_wifi_set_enable(int is_on); +extern void pci_remove_reinit(unsigned int vid, unsigned int pid, int delBus); +//extern void amlogic_pcie_power_on_atu_fixup(void); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +extern int wifi_irq_num(void); +#endif +int dhd_pwr_ctrl = 1; +module_param(dhd_pwr_ctrl, int, 0); +#endif + +static int +dhd_wlan_set_power(int on, wifi_adapter_info_t *adapter) +{ + int gpio_wl_reg_on = adapter->gpio_wl_reg_on; + int err = 0; + +#ifdef CUSTOMER_HW_AMLOGIC + printf("######### dhd_pwr_ctrl=%d #########\n", dhd_pwr_ctrl); +#endif + + if (on) { + printf("======== PULL WL_REG_ON(%d) HIGH! ========\n", gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_direction_output(gpio_wl_reg_on, 1); + if (err) { + printf("%s: WL_REG_ON didn't output high\n", __FUNCTION__); + return -EIO; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#ifdef BCMSDIO + extern_wifi_set_enable(0); + mdelay(200); + extern_wifi_set_enable(1); + mdelay(200); +// sdio_reinit(); +#endif +#ifdef BCMDBUS + if (dhd_pwr_ctrl) { + set_usb_wifi_power(0); + mdelay(200); + set_usb_wifi_power(1); + mdelay(200); + } +#endif +#ifdef BCMPCIE +// extern_wifi_set_enable(0); +// mdelay(200); +// extern_wifi_set_enable(1); +// mdelay(200); +// amlogic_pcie_power_on_atu_fixup(); +#endif +#endif +#ifdef BUS_POWER_RESTORE +#ifdef BCMSDIO +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) + if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + mdelay(100); + printf("======== mmc_power_restore_host! ========\n"); + mmc_power_restore_host(adapter->sdio_func->card->host); + } +#endif +#elif defined(BCMPCIE) + if (adapter->pci_dev) { + mdelay(100); + printf("======== pci_set_power_state PCI_D0! ========\n"); + pci_set_power_state(adapter->pci_dev, PCI_D0); + if (adapter->pci_saved_state) + pci_load_and_free_saved_state(adapter->pci_dev, &adapter->pci_saved_state); + pci_restore_state(adapter->pci_dev); + err = pci_enable_device(adapter->pci_dev); + if (err < 0) + printf("%s: PCI enable device failed", __FUNCTION__); + pci_set_master(adapter->pci_dev); + } +#endif /* BCMPCIE */ +#endif /* BUS_POWER_RESTORE */ + /* Lets customer power to get stable */ + mdelay(100); + } else { +#ifdef BUS_POWER_RESTORE +#ifdef BCMSDIO +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) + if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + printf("======== mmc_power_save_host! ========\n"); + mmc_power_save_host(adapter->sdio_func->card->host); + } +#endif +#elif defined(BCMPCIE) + if (adapter->pci_dev) { + printf("======== pci_set_power_state PCI_D3hot! ========\n"); + pci_save_state(adapter->pci_dev); + adapter->pci_saved_state = pci_store_saved_state(adapter->pci_dev); + if (pci_is_enabled(adapter->pci_dev)) + pci_disable_device(adapter->pci_dev); + pci_set_power_state(adapter->pci_dev, PCI_D3hot); + } +#endif /* BCMPCIE */ +#endif /* BUS_POWER_RESTORE */ + printf("======== PULL WL_REG_ON(%d) LOW! ========\n", gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_direction_output(gpio_wl_reg_on, 0); + if (err) { + printf("%s: WL_REG_ON didn't output low\n", __FUNCTION__); + return -EIO; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#ifdef BCMSDIO + extern_wifi_set_enable(0); + mdelay(200); +#endif +#ifdef BCMDBUS + if (dhd_pwr_ctrl) { + set_usb_wifi_power(0); + mdelay(200); + } +#endif +#ifdef BCMPCIE +// extern_wifi_set_enable(0); +// mdelay(200); +#endif +#endif + } + + return err; +} + +static int dhd_wlan_set_reset(int onoff) +{ + return 0; +} + +static int dhd_wlan_set_carddetect(int present) +{ + int err = 0; + +#if !defined(BUS_POWER_RESTORE) + if (present) { +#if defined(BCMSDIO) + printf("======== Card detection to detect SDIO card! ========\n"); +#ifdef CUSTOMER_HW_PLATFORM + err = sdhci_force_presence_change(&sdmmc_channel, 1); +#endif /* CUSTOMER_HW_PLATFORM */ +#ifdef CUSTOMER_HW_AMLOGIC + sdio_reinit(); +#endif +#elif defined(BCMPCIE) + printf("======== Card detection to detect PCIE card! ========\n"); +#endif + } else { +#if defined(BCMSDIO) + printf("======== Card detection to remove SDIO card! ========\n"); +#ifdef CUSTOMER_HW_PLATFORM + err = sdhci_force_presence_change(&sdmmc_channel, 0); +#endif /* CUSTOMER_HW_PLATFORM */ +#ifdef CUSTOMER_HW_AMLOGIC + extern_wifi_set_enable(0); + mdelay(200); +#endif +#elif defined(BCMPCIE) + printf("======== Card detection to remove PCIE card! ========\n"); +#ifdef CUSTOMER_HW_AMLOGIC +// extern_wifi_set_enable(0); +// mdelay(200); +#endif +#endif + } +#endif /* BUS_POWER_RESTORE */ + + return err; +} + +static int dhd_wlan_get_mac_addr(unsigned char *buf, int ifidx) +{ + int err = 0; + + if (ifidx == 1) { +#ifdef EXAMPLE_GET_MAC + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); +#endif /* EXAMPLE_GET_MAC */ +#ifdef CUSTOMER_HW_AMLOGIC +#ifdef CUSTOM_AP_MAC + bcopy((char *)wifi_get_ap_mac(), buf, sizeof(struct ether_addr)); + if (buf[0] == 0xff) { + printf("custom wifi ap mac is not set\n"); + err = -1; + } else + printf("custom wifi ap mac-addr: %02x:%02x:%02x:%02x:%02x:%02x\n", + buf[0], buf[1], buf[2], + buf[3], buf[4], buf[5]); +#else + err = -1; +#endif +#endif + } else { +#ifdef EXAMPLE_GET_MAC + struct ether_addr ea_example = {{0x02, 0x11, 0x22, 0x33, 0x44, 0x55}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); +#endif /* EXAMPLE_GET_MAC */ +#ifdef CUSTOMER_HW_AMLOGIC + bcopy((char *)wifi_get_mac(), buf, sizeof(struct ether_addr)); + if (buf[0] == 0xff) { + printf("custom wifi mac is not set\n"); + err = -1; + } else + printf("custom wifi mac-addr: %02x:%02x:%02x:%02x:%02x:%02x\n", + buf[0], buf[1], buf[2], + buf[3], buf[4], buf[5]); +#endif + } + +#ifdef EXAMPLE_GET_MAC_VER2 + /* EXAMPLE code */ + { + char macpad[56]= { + 0x00,0xaa,0x9c,0x84,0xc7,0xbc,0x9b,0xf6, + 0x02,0x33,0xa9,0x4d,0x5c,0xb4,0x0a,0x5d, + 0xa8,0xef,0xb0,0xcf,0x8e,0xbf,0x24,0x8a, + 0x87,0x0f,0x6f,0x0d,0xeb,0x83,0x6a,0x70, + 0x4a,0xeb,0xf6,0xe6,0x3c,0xe7,0x5f,0xfc, + 0x0e,0xa7,0xb3,0x0f,0x00,0xe4,0x4a,0xaf, + 0x87,0x08,0x16,0x6d,0x3a,0xe3,0xc7,0x80}; + bcopy(macpad, buf+6, sizeof(macpad)); + } +#endif /* EXAMPLE_GET_MAC_VER2 */ + + printf("======== %s err=%d ========\n", __FUNCTION__, err); + + return err; +} + +static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = { + /* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XT", 49}, /* Universal if Country code is unknown or empty */ + {"US", "US", 0}, +#endif /* EXMAPLE_TABLE */ +}; + +#ifdef CUSTOM_FORCE_NODFS_FLAG +struct cntry_locales_custom brcm_wlan_translate_nodfs_table[] = { +#ifdef EXAMPLE_TABLE + {"", "XT", 50}, /* Universal if Country code is unknown or empty */ + {"US", "US", 0}, +#endif /* EXMAPLE_TABLE */ +}; +#endif + +static void *dhd_wlan_get_country_code(char *ccode +#ifdef CUSTOM_FORCE_NODFS_FLAG + , u32 flags +#endif +) +{ + struct cntry_locales_custom *locales; + int size; + int i; + + if (!ccode) + return NULL; + +#ifdef CUSTOM_FORCE_NODFS_FLAG + if (flags & WLAN_PLAT_NODFS_FLAG) { + locales = brcm_wlan_translate_nodfs_table; + size = ARRAY_SIZE(brcm_wlan_translate_nodfs_table); + } else { +#endif + locales = brcm_wlan_translate_custom_table; + size = ARRAY_SIZE(brcm_wlan_translate_custom_table); +#ifdef CUSTOM_FORCE_NODFS_FLAG + } +#endif + + for (i = 0; i < size; i++) + if (strcmp(ccode, locales[i].iso_abbrev) == 0) + return &locales[i]; + return NULL; +} + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_set_power, + .set_reset = dhd_wlan_set_reset, + .set_carddetect = dhd_wlan_set_carddetect, + .get_mac_addr = dhd_wlan_get_mac_addr, +#ifdef CONFIG_DHD_USE_STATIC_BUF + .mem_prealloc = bcmdhd_mem_prealloc, +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + .get_country_code = dhd_wlan_get_country_code, +}; + +int dhd_wlan_init_gpio(wifi_adapter_info_t *adapter) +{ +#ifdef BCMDHD_DTS + char wlan_node[32]; + struct device_node *root_node = NULL; +#endif + int err = 0; + int gpio_wl_reg_on; +#ifdef CUSTOMER_OOB + int gpio_wl_host_wake; + int host_oob_irq = -1; + uint host_oob_irq_flags = 0; +#endif + + /* Please check your schematic and fill right GPIO number which connected to + * WL_REG_ON and WL_HOST_WAKE. + */ +#ifdef BCMDHD_DTS + strcpy(wlan_node, DHD_DT_COMPAT_ENTRY); + printf("======== Get GPIO from DTS(%s) ========\n", wlan_node); + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (root_node) { + gpio_wl_reg_on = of_get_named_gpio(root_node, GPIO_WL_REG_ON_PROPNAME, 0); +#ifdef CUSTOMER_OOB + gpio_wl_host_wake = of_get_named_gpio(root_node, GPIO_WL_HOST_WAKE_PROPNAME, 0); +#endif + } else +#endif + { + gpio_wl_reg_on = -1; +#ifdef CUSTOMER_OOB + gpio_wl_host_wake = -1; +#endif + } + +#ifdef CUSTOMER_HW_AMLOGIC +#if defined(BCMPCIE) + printf("======== Card detection to detect PCIE card! ========\n"); +// pci_remove_reinit(0x14e4, 0x43ec, 1); +#endif +#endif + + if (gpio_wl_reg_on >= 0) { + err = gpio_request(gpio_wl_reg_on, "WL_REG_ON"); + if (err < 0) { + printf("%s: gpio_request(%d) for WL_REG_ON failed\n", + __FUNCTION__, gpio_wl_reg_on); + gpio_wl_reg_on = -1; + } + } + adapter->gpio_wl_reg_on = gpio_wl_reg_on; + +#ifdef CUSTOMER_OOB + if (gpio_wl_host_wake >= 0) { + err = gpio_request(gpio_wl_host_wake, "bcmdhd"); + if (err < 0) { + printf("%s: gpio_request(%d) for WL_HOST_WAKE failed\n", + __FUNCTION__, gpio_wl_host_wake); + return -1; + } + adapter->gpio_wl_host_wake = gpio_wl_host_wake; + err = gpio_direction_input(gpio_wl_host_wake); + if (err < 0) { + printf("%s: gpio_direction_input(%d) for WL_HOST_WAKE failed\n", + __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + return -1; + } + host_oob_irq = gpio_to_irq(gpio_wl_host_wake); + if (host_oob_irq < 0) { + printf("%s: gpio_to_irq(%d) for WL_HOST_WAKE failed\n", + __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + return -1; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + host_oob_irq = INT_GPIO_4; +#else + host_oob_irq = wifi_irq_num(); +#endif +#endif + +#ifdef HW_OOB +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + if (wifi_irq_trigger_level() == GPIO_IRQ_LOW) + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE; + else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else +#ifdef HW_OOB_LOW_LEVEL + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#endif +#endif +#else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE; +#endif + host_oob_irq_flags &= IRQF_TRIGGER_MASK; + + adapter->irq_num = host_oob_irq; + adapter->intr_flags = host_oob_irq_flags; + printf("%s: WL_HOST_WAKE=%d, oob_irq=%d, oob_irq_flags=0x%x\n", __FUNCTION__, + gpio_wl_host_wake, host_oob_irq, host_oob_irq_flags); +#endif /* CUSTOMER_OOB */ + printf("%s: WL_REG_ON=%d\n", __FUNCTION__, gpio_wl_reg_on); + + return 0; +} + +static void dhd_wlan_deinit_gpio(wifi_adapter_info_t *adapter) +{ + int gpio_wl_reg_on = adapter->gpio_wl_reg_on; +#ifdef CUSTOMER_OOB + int gpio_wl_host_wake = adapter->gpio_wl_host_wake; +#endif + + if (gpio_wl_reg_on >= 0) { + printf("%s: gpio_free(WL_REG_ON %d)\n", __FUNCTION__, gpio_wl_reg_on); + gpio_free(gpio_wl_reg_on); + gpio_wl_reg_on = -1; + } +#ifdef CUSTOMER_OOB + if (gpio_wl_host_wake >= 0) { + printf("%s: gpio_free(WL_HOST_WAKE %d)\n", __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + gpio_wl_host_wake = -1; + } +#endif /* CUSTOMER_OOB */ +} + +int dhd_wlan_init_plat_data(wifi_adapter_info_t *adapter) +{ + int err = 0; + + printf("======== %s ========\n", __FUNCTION__); + if (adapter->index == -1) { + adapter->index = 0; + } + err = dhd_wlan_init_gpio(adapter); + +#ifdef DHD_STATIC_IN_DRIVER + dhd_static_buf_init(); +#endif + return err; +} + +void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter) +{ + printf("======== %s ========\n", __FUNCTION__); +#ifdef DHD_STATIC_IN_DRIVER + dhd_static_buf_exit(); +#endif + dhd_wlan_deinit_gpio(adapter); +} diff --git a/bcmdhd.101.10.361.x/dhd_ip.c b/bcmdhd.101.10.361.x/dhd_ip.c new file mode 100755 index 0000000..3f9b625 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_ip.c @@ -0,0 +1,1425 @@ +/* + * IP Packet Parser Module. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include + +#include +#include +#include <802.3.h> +#include +#include + +#include + +#include +#include + +#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK) +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */ + +/* special values */ +/* 802.3 llc/snap header */ +static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; + +pkt_frag_t pkt_frag_info(osl_t *osh, void *p) +{ + uint8 *frame; + int length; + uint8 *pt; /* Pointer to type field */ + uint16 ethertype; + struct ipv4_hdr *iph; /* IP frame pointer */ + int ipl; /* IP frame length */ + uint16 iph_frag; + + ASSERT(osh && p); + + frame = PKTDATA(osh, p); + length = PKTLEN(osh, p); + + /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ + if (length < ETHER_HDR_LEN) { + DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) { + /* Frame is Ethernet II */ + pt = frame + ETHER_TYPE_OFFSET; + } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && + !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { + pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; + } else { + DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + + /* Skip VLAN tag, if any */ + if (ethertype == ETHER_TYPE_8021Q) { + pt += VLAN_TAG_LEN; + + if (pt + ETHER_TYPE_LEN > frame + length) { + DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + } + + if (ethertype != ETHER_TYPE_IP) { + DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n", + __FUNCTION__, ethertype, length)); + return DHD_PKT_FRAG_NONE; + } + + iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN); + ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame)); + + /* We support IPv4 only */ + if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) { + DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl)); + return DHD_PKT_FRAG_NONE; + } + + iph_frag = ntoh16(iph->frag); + + if (iph_frag & IPV4_FRAG_DONT) { + return DHD_PKT_FRAG_NONE; + } else if ((iph_frag & IPV4_FRAG_MORE) == 0) { + return DHD_PKT_FRAG_LAST; + } else { + return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST; + } +} + +#ifdef DHDTCPACK_SUPPRESS + +typedef struct { + void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */ + void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */ + int ifidx; + uint8 supp_cnt; + dhd_pub_t *dhdp; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + timer_list_compat_t timer; +#else + struct tasklet_hrtimer timer; +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ +} tcpack_info_t; + +typedef struct _tdata_psh_info_t { + uint32 end_seq; /* end seq# of a received TCP PSH DATA pkt */ + struct _tdata_psh_info_t *next; /* next pointer of the link chain */ +} tdata_psh_info_t; + +typedef struct { + struct { + uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */ + uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */ + } ip_addr; + struct { + uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */ + uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */ + } tcp_port; + tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */ + tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */ + uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */ +} tcpdata_info_t; + +/* TCPACK SUPPRESS module */ +typedef struct { + int tcpack_info_cnt; + tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM]; /* Info of TCP ACK to send */ + int tcpdata_info_cnt; + tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM]; /* Info of received TCP DATA */ + tdata_psh_info_t *tdata_psh_info_pool; /* Pointer to tdata_psh_info elements pool */ + tdata_psh_info_t *tdata_psh_info_free; /* free tdata_psh_info elements chain in pool */ +#ifdef DHDTCPACK_SUP_DBG + int psh_info_enq_num; /* Number of free TCP PSH DATA info elements in pool */ +#endif /* DHDTCPACK_SUP_DBG */ +} tcpack_sup_module_t; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1}; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + +static void +_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod, + tdata_psh_info_t *tdata_psh_info) +{ + if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) { + DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod, tdata_psh_info)); + return; + } + + ASSERT(tdata_psh_info->next == NULL); + tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free; + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num++; +#endif +} + +static tdata_psh_info_t* +_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info = NULL; + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod)); + return NULL; + } + + tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free; + if (tdata_psh_info == NULL) + DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__)); + else { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num--; +#endif /* DHDTCPACK_SUP_DBG */ + } + + return tdata_psh_info; +} + +#ifdef BCMSDIO +static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info_pool = NULL; + uint i; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) + return BCME_ERROR; + + ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL); + ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL); + + tdata_psh_info_pool = + MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + + if (tdata_psh_info_pool == NULL) + return BCME_NOMEM; + bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num = 0; +#endif /* DHDTCPACK_SUP_DBG */ + + /* Enqueue newly allocated tcpdata psh info elements to the pool */ + for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++) + _tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]); + + ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL); + tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool; + + return BCME_OK; +} + +static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + uint i; + tdata_psh_info_t *tdata_psh_info; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n", + __FUNCTION__, __LINE__)); + return; + } + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + /* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */ + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + } + tcpdata_info->tdata_psh_info_tail = NULL; + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + i = 0; + /* Be sure we recollected all tdata_psh_info elements */ + while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; + i++; + } + ASSERT(i == TCPDATA_PSH_INFO_MAXNUM); + MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool, + sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + + return; +} +#endif /* BCMSDIO */ + +#ifdef BCMPCIE +#ifndef TCPACK_SUPPRESS_HOLD_HRT +static void dhd_tcpack_send(ulong data) +#else +static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer) +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ +{ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *cur_tbl; + dhd_pub_t *dhdp; + int ifidx; + void* pkt; + unsigned long flags; + +#ifndef TCPACK_SUPPRESS_HOLD_HRT + cur_tbl = (tcpack_info_t *)data; +#else + cur_tbl = container_of(timer, tcpack_info_t, timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + + if (!cur_tbl) { + goto done; + } + + dhdp = cur_tbl->dhdp; + if (!dhdp) { + goto done; + } + + flags = dhd_os_tcpacklock(dhdp); + + if (unlikely(dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD)) { + dhd_os_tcpackunlock(dhdp, flags); + goto done; + } + + tcpack_sup_mod = dhdp->tcpack_sup_module; + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto done; + } + pkt = cur_tbl->pkt_in_q; + ifidx = cur_tbl->ifidx; + if (!pkt) { + dhd_os_tcpackunlock(dhdp, flags); + goto done; + } + cur_tbl->pkt_in_q = NULL; + cur_tbl->pkt_ether_hdr = NULL; + cur_tbl->ifidx = 0; + cur_tbl->supp_cnt = 0; + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + } + + dhd_os_tcpackunlock(dhdp, flags); + + dhd_sendpkt(dhdp, ifidx, pkt); + +done: +#ifndef TCPACK_SUPPRESS_HOLD_HRT + return; +#else + return HRTIMER_NORESTART; +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ +} +#endif /* BCMPCIE */ + +int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) +{ + int ret = BCME_OK; + unsigned long flags; + tcpack_sup_module_t *tcpack_sup_module; + uint8 invalid_mode = FALSE; + int prev_mode; + int i = 0; + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_module = dhdp->tcpack_sup_module; + prev_mode = dhdp->tcpack_sup_mode; + + if (prev_mode == mode) { + DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode)); + goto exit; + } + + invalid_mode |= (mode >= TCPACK_SUP_LAST_MODE); +#ifdef BCMSDIO + invalid_mode |= (mode == TCPACK_SUP_HOLD); +#endif /* BCMSDIO */ +#ifdef BCMPCIE + invalid_mode |= ((mode == TCPACK_SUP_REPLACE) || (mode == TCPACK_SUP_DELAYTX)); +#endif /* BCMPCIE */ + + if (invalid_mode) { + DHD_ERROR(("%s %d: Invalid TCP ACK Suppress mode %d\n", + __FUNCTION__, __LINE__, mode)); + ret = BCME_BADARG; + goto exit; + } + + printf("%s: TCP ACK Suppress mode %d -> mode %d\n", + __FUNCTION__, dhdp->tcpack_sup_mode, mode); + printf("%s: TCPACK_INFO_MAXNUM=%d, TCPDATA_INFO_MAXNUM=%d\n", + __FUNCTION__, TCPACK_INFO_MAXNUM, TCPDATA_INFO_MAXNUM); + + /* Pre-process routines to change a new mode as per previous mode */ + switch (prev_mode) { + case TCPACK_SUP_OFF: + if (tcpack_sup_module == NULL) { + tcpack_sup_module = MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t)); + if (tcpack_sup_module == NULL) { + DHD_ERROR(("%s[%d]: Failed to allocate the new memory for " + "tcpack_sup_module\n", __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + ret = BCME_NOMEM; + goto exit; + } + dhdp->tcpack_sup_module = tcpack_sup_module; + } + bzero(tcpack_sup_module, sizeof(tcpack_sup_module_t)); + break; +#ifdef BCMSDIO + case TCPACK_SUP_DELAYTX: + if (tcpack_sup_module) { + /* We won't need tdata_psh_info pool and + * tcpddata_info_tbl anymore + */ + _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_module); + tcpack_sup_module->tcpdata_info_cnt = 0; + bzero(tcpack_sup_module->tcpdata_info_tbl, + sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM); + } + + /* For half duplex bus interface, tx precedes rx by default */ + if (dhdp->bus) { + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + } + + if (tcpack_sup_module == NULL) { + DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n", + __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + goto exit; + } + break; +#endif /* BCMSDIO */ + } + + /* Update a new mode */ + dhdp->tcpack_sup_mode = mode; + + /* Process for a new mode */ + switch (mode) { + case TCPACK_SUP_OFF: + ASSERT(tcpack_sup_module != NULL); + /* Clean up timer/data structure for + * any remaining/pending packet or timer. + */ + if (tcpack_sup_module) { + /* Check if previous mode is TCAPACK_SUP_HOLD */ + if (prev_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + tcpack_info_t *tcpack_info_tbl = + &tcpack_sup_module->tcpack_info_tbl[i]; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + del_timer(&tcpack_info_tbl->timer); +#else + hrtimer_cancel(&tcpack_info_tbl->timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + if (tcpack_info_tbl->pkt_in_q) { + PKTFREE(dhdp->osh, + tcpack_info_tbl->pkt_in_q, TRUE); + tcpack_info_tbl->pkt_in_q = NULL; + } + } + } + MFREE(dhdp->osh, tcpack_sup_module, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = NULL; + } else { + DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n", + __FUNCTION__, __LINE__)); + } + break; +#ifdef BCMSDIO + case TCPACK_SUP_REPLACE: + /* There is nothing to configure for this mode */ + break; + case TCPACK_SUP_DELAYTX: + ret = _tdata_psh_info_pool_init(dhdp, tcpack_sup_module); + if (ret != BCME_OK) { + DHD_ERROR(("%s %d: pool init fail with %d\n", + __FUNCTION__, __LINE__, ret)); + break; + } + if (dhdp->bus) { + dhd_bus_set_dotxinrx(dhdp->bus, FALSE); + } + break; +#endif /* BCMSDIO */ +#ifdef BCMPCIE + case TCPACK_SUP_HOLD: + dhdp->tcpack_sup_ratio = dhdp->conf->tcpack_sup_ratio; + dhdp->tcpack_sup_delay = dhdp->conf->tcpack_sup_delay; + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + tcpack_info_t *tcpack_info_tbl = + &tcpack_sup_module->tcpack_info_tbl[i]; + tcpack_info_tbl->dhdp = dhdp; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + init_timer_compat(&tcpack_info_tbl->timer, dhd_tcpack_send, + tcpack_info_tbl); +#else + tasklet_hrtimer_init(&tcpack_info_tbl->timer, + dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + } + break; +#endif /* BCMPCIE */ + } + +exit: + dhd_os_tcpackunlock(dhdp, flags); + return ret; +} + +void +dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp) +{ + tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; + int i; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + flags = dhd_os_tcpacklock(dhdp); + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) { + PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q, + TRUE); + tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0; + tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0; + } + } + } else { + tcpack_sup_mod->tcpack_info_cnt = 0; + bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM); + } + + dhd_os_tcpackunlock(dhdp, flags); + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { +#ifndef TCPACK_SUPPRESS_HOLD_HRT + del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer); +#else + hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + } + } + +exit: + return; +} + +inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) +{ + uint8 i; + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int tbl_cnt; + int ret = BCME_OK; + void *pdata; + uint32 pktlen; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + pdata = PKTDATA(dhdp->osh, pkt); + pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata); + + if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, pktlen)); + goto exit; + } + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tbl_cnt = tcpack_sup_mod->tcpack_info_cnt; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM); + + for (i = 0; i < tbl_cnt; i++) { + if (tcpack_info_tbl[i].pkt_in_q == pkt) { + DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n", + __FUNCTION__, __LINE__, pkt, i, tbl_cnt)); + /* This pkt is being transmitted so remove the tcp_ack_info of it. */ + if (i < tbl_cnt - 1) { + bcopy(&tcpack_info_tbl[tbl_cnt - 1], + &tcpack_info_tbl[i], sizeof(tcpack_info_t)); + } + bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t)); + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + ret = BCME_ERROR; + } + break; + } + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr, + uint8 *tcp_hdr, uint32 tcp_ack_num) +{ + tcpack_sup_module_t *tcpack_sup_mod; + int i; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info = NULL; + bool ret = FALSE; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_ack_num)); + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tcpdata_info_tmp->tcp_port.src), + ntoh16_ua(tcpdata_info_tmp->tcp_port.dst))); + + /* If either IP address or TCP port number does not match, skip. */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 && + memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET], + tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 && + memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) { + tcpdata_info = tcpdata_info_tmp; + break; + } + } + + if (tcpdata_info == NULL) { + DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + if (tcpdata_info->tdata_psh_info_head == NULL) { + DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__)); + } + + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) { + DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n", + __FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq)); + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + ret = TRUE; + } else + break; + } + if (tdata_psh_info == NULL) + tcpdata_info->tdata_psh_info_tail = NULL; + +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + +exit: + return ret; +} + +bool +dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i; + bool ret = FALSE; + bool set_dotxinrx = TRUE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + counter_printlog(&tack_tbl); + tack_tbl.cnt[0]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) { + /* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[5]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else + set_dotxinrx = FALSE; + + for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) + continue; + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) { + /* New packet has higher TCP ACK number, so it replaces the old packet */ + if (new_ip_hdr_len == old_ip_hdr_len && + new_tcp_hdr_len == old_tcp_hdr_len) { + ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0); + bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len); + PKTFREE(dhdp->osh, pkt, FALSE); + DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n", + __FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num)); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[2]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + ret = TRUE; + } else { +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[6]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d" + " ACK %u -> %u\n", __FUNCTION__, __LINE__, + new_ip_hdr_len, old_ip_hdr_len, + new_tcp_hdr_len, old_tcp_hdr_len, + old_tcpack_num, new_tcp_ack_num)); + } + } else if (new_tcp_ack_num == old_tcpack_num) { + set_dotxinrx = TRUE; + /* TCPACK retransmission */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[3]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n", + __FUNCTION__, __LINE__, old_tcpack_num, oldpkt, + new_tcp_ack_num, pkt)); + } + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + tcpack_sup_mod->tcpack_info_cnt)); + + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt; + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr; + tcpack_sup_mod->tcpack_info_cnt++; +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[1]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + ASSERT(i == tcpack_sup_mod->tcpack_info_cnt); + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */ + if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx) + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + + return ret; +} + +bool +dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *ether_hdr; /* Ethernet header of the new packet */ + uint16 ether_type; /* Ethernet type of the new packet */ + uint8 *ip_hdr; /* IP header of the new packet */ + uint8 *tcp_hdr; /* TCP header of the new packet */ + uint32 ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint16 ip_total_len; /* Total length of IP packet for the new packet */ + uint32 tcp_hdr_len; /* TCP header length of the new packet */ + uint32 tcp_seq_num; /* TCP sequence number of the new packet */ + uint16 tcp_data_len; /* TCP DATA length that excludes IP and TCP headers */ + uint32 end_tcp_seq_num; /* TCP seq number of the last byte in the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info; + + int i; + bool ret = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + ether_type = ether_hdr[12] << 8 | ether_hdr[13]; + + if (ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type)); + + ip_hdr = ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + ip_hdr_len = IPV4_HLEN(ip_hdr); + if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr))); + goto exit; + } + + tcp_hdr = ip_hdr + ip_hdr_len; + cur_framelen -= ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]); + tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet is mere TCP ACK, so do nothing */ + if (ip_total_len == ip_hdr_len + tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len); + + if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) { + DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_hdr[TCP_FLAGS_OFFSET])); + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + /* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */ + i = 0; + while (i < tcpack_sup_mod->tcpdata_info_cnt) { + tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + uint32 now_in_ms = OSL_SYSUPTIME(); + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tdata_info_tmp->tcp_port.src), + ntoh16_ua(tdata_info_tmp->tcp_port.dst))); + + /* If both IP address and TCP port number match, we found it so break. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) { + tcpdata_info = tdata_info_tmp; + tcpdata_info->last_used_time = now_in_ms; + break; + } + + if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) { + tdata_psh_info_t *tdata_psh_info_tmp; + tcpdata_info_t *last_tdata_info; + + while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) { + tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next; + tdata_psh_info_tmp->next = NULL; + DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n", + __FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq)); + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp); + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + tcpack_sup_mod->tcpdata_info_cnt--; + ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0); + + last_tdata_info = + &tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt]; + if (i < tcpack_sup_mod->tcpdata_info_cnt) { + ASSERT(last_tdata_info != tdata_info_tmp); + bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t)); + } + bzero(last_tdata_info, sizeof(tcpdata_info_t)); + DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt)); + /* Don't increase "i" here, so that the prev last tcpdata_info is checked */ + } else + i++; + } + + tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]); + tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len; + end_tcp_seq_num = tcp_seq_num + tcp_data_len; + + if (tcpdata_info == NULL) { + ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt); + if (i >= TCPDATA_INFO_MAXNUM) { + DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + + /* No TCP flow with the same IP addr and TCP port is found + * in tcp_data_info_tbl. So add this flow to the table. + */ + DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr, + IPV4_ADDR_LEN * 2); + bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port, + TCP_PORT_LEN * 2); + + tcpdata_info->last_used_time = OSL_SYSUPTIME(); + tcpack_sup_mod->tcpdata_info_cnt++; + } + + ASSERT(tcpdata_info != NULL); + + tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod); +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + if (tdata_psh_info == NULL) { + DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tdata_psh_info->end_seq = end_tcp_seq_num; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[4]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n", + __FUNCTION__, __LINE__, tdata_psh_info->end_seq)); + + ASSERT(tdata_psh_info->next == NULL); + + if (tcpdata_info->tdata_psh_info_head == NULL) + tcpdata_info->tdata_psh_info_head = tdata_psh_info; + else { + ASSERT(tcpdata_info->tdata_psh_info_tail); + tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info; + } + tcpdata_info->tdata_psh_info_tail = tdata_psh_info; + + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +bool +dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i, free_slot = TCPACK_INFO_MAXNUM; + bool hold = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) { + goto exit; + } + + if (dhdp->tcpack_sup_ratio == 1) { + goto exit; + } + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + hold = TRUE; + + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + if (free_slot == TCPACK_INFO_MAXNUM) { + free_slot = i; + } + continue; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n", + __FUNCTION__, __LINE__, i)); + hold = FALSE; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) { + continue; + } + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) { + tcpack_info_tbl[i].supp_cnt++; + if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) { + tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_info_tbl[i].ifidx = 0; + tcpack_info_tbl[i].supp_cnt = 0; + hold = FALSE; + } else { + tcpack_info_tbl[i].pkt_in_q = pkt; + tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[i].ifidx = ifidx; + } + PKTFREE(dhdp->osh, oldpkt, TRUE); + } else { + PKTFREE(dhdp->osh, pkt, TRUE); + } + dhd_os_tcpackunlock(dhdp, flags); + + if (!hold) { +#ifndef TCPACK_SUPPRESS_HOLD_HRT + del_timer_sync(&tcpack_info_tbl[i].timer); +#else + hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + } + goto exit; + } + + if (free_slot < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + free_slot)); + + tcpack_info_tbl[free_slot].pkt_in_q = pkt; + tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[free_slot].ifidx = ifidx; + tcpack_info_tbl[free_slot].supp_cnt = 1; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer, + jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay)); +#else + tasklet_hrtimer_start(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer, + ktime_set(0, dhdp->tcpack_sup_delay*1000000), + HRTIMER_MODE_REL); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + tcpack_sup_mod->tcpack_info_cnt++; + } else { + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return hold; +} +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHDTCPSYNC_FLOOD_BLK +tcp_hdr_flag_t +dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *ether_hdr; /* Ethernet header of the new packet */ + uint16 ether_type; /* Ethernet type of the new packet */ + uint8 *ip_hdr; /* IP header of the new packet */ + uint8 *tcp_hdr; /* TCP header of the new packet */ + uint32 ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint8 flags; + + ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + ether_type = ether_hdr[12] << 8 | ether_hdr[13]; + + if (ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, ether_type)); + return FLAG_OTHERS; + } + + ip_hdr = ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + if (cur_framelen < IPV4_MIN_HEADER_LEN) { + return FLAG_OTHERS; + } + + ip_hdr_len = IPV4_HLEN(ip_hdr); + if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr))); + return FLAG_OTHERS; + } + + tcp_hdr = ip_hdr + ip_hdr_len; + + flags = (uint8)tcp_hdr[TCP_FLAGS_OFFSET]; + + if (flags & TCP_FLAG_SYN) { + if (flags & TCP_FLAG_ACK) { + return FLAG_SYNCACK; + } + return FLAG_SYNC; + } + return FLAG_OTHERS; +} +#endif /* DHDTCPSYNC_FLOOD_BLK */ diff --git a/bcmdhd.101.10.361.x/dhd_ip.h b/bcmdhd.101.10.361.x/dhd_ip.h new file mode 100755 index 0000000..a478c89 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_ip.h @@ -0,0 +1,96 @@ +/* + * Header file describing the common ip parser function. + * + * Provides type definitions and function prototypes used to parse ip packet. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dhd_ip_h_ +#define _dhd_ip_h_ + +#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK) +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */ + +typedef enum pkt_frag +{ + DHD_PKT_FRAG_NONE = 0, + DHD_PKT_FRAG_FIRST, + DHD_PKT_FRAG_CONT, + DHD_PKT_FRAG_LAST +} pkt_frag_t; + +extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p); + +#ifdef DHDTCPSYNC_FLOOD_BLK +typedef enum tcp_hdr_flags { + FLAG_SYNC, + FLAG_SYNCACK, + FLAG_RST, + FLAG_OTHERS +} tcp_hdr_flag_t; + +extern tcp_hdr_flag_t dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + +#ifdef DHDTCPACK_SUPPRESS +#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN) +/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */ +#define TCPACKSZMAX (TCPACKSZMIN + 100) + +/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */ +#ifndef TCPACK_INFO_MAXNUM +#define TCPACK_INFO_MAXNUM 4 +#endif +#ifndef TCPDATA_INFO_MAXNUM +#define TCPDATA_INFO_MAXNUM 4 +#endif +#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM) + +#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */ + +#define DEFAULT_TCPACK_SUPP_RATIO 3 +#ifndef CUSTOM_TCPACK_SUPP_RATIO +#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO +#endif /* CUSTOM_TCPACK_SUPP_RATIO */ + +#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */ +#ifndef CUSTOM_TCPACK_DELAY_TIME +#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME +#endif /* CUSTOM_TCPACK_DELAY_TIME */ + +extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on); +extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp); +extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx); +/* #define DHDTCPACK_SUP_DBG */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +extern counter_tbl_t tack_tbl; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ +#endif /* DHDTCPACK_SUPPRESS */ + +#endif /* _dhd_ip_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_linux.c b/bcmdhd.101.10.361.x/dhd_linux.c new file mode 100755 index 0000000..f2ab283 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux.c @@ -0,0 +1,29878 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface. + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#ifdef SHOW_LOGTRACE +#include +#include +#endif /* SHOW_LOGTRACE */ + +#if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE) +#include +#endif /* PCIE_FULL_DONGLE */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_TIZEN) +#include +#endif /* CONFIG_TIZEN */ +#include +#ifdef ENABLE_ADAPTIVE_SCHED +#include +#endif /* ENABLE_ADAPTIVE_SCHED */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include /* need to still support chips no longer in trunk firmware */ +#include +#include + +#include +#include +#include +#include <802.3.h> + +#ifdef WL_NANHO +#include +#endif /* WL_NANHO */ +#include +#include +#include +#include +#ifdef DHD_WET +#include +#endif /* DHD_WET */ +#ifdef PCIE_FULL_DONGLE +#include +#endif +#include +#include +#include +#ifdef WL_ESCAN +#include +#endif +#include +#include +#include +#if defined(WL_CFG80211) +#include +#ifdef WL_BAM +#include +#endif /* WL_BAM */ +#endif /* WL_CFG80211 */ +#ifdef PNO_SUPPORT +#include +#endif +#ifdef RTT_SUPPORT +#include +#endif +#ifdef DHD_TIMESYNC +#include +#include +#include +#endif /* DHD_TIMESYNC */ + +#include + +#ifdef CSI_SUPPORT +#include +#endif /* CSI_SUPPORT */ + +#ifdef CONFIG_COMPAT +#include +#endif + +#ifdef CONFIG_ARCH_EXYNOS +#ifndef SUPPORT_EXYNOS7420 +#include +#endif /* SUPPORT_EXYNOS7420 */ +#endif /* CONFIG_ARCH_EXYNOS */ + +#ifdef DHD_WMF +#include +#endif /* DHD_WMF */ + +#ifdef DHD_L2_FILTER +#include +#include +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + +#ifdef AMPDU_VO_ENABLE +/* XXX: Enabling VO AMPDU to reduce FER */ +#include <802.1d.h> +#endif /* AMPDU_VO_ENABLE */ + +#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK) +#include +#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */ +#include +#ifdef DHD_PKT_LOGGING +#include +#endif /* DHD_PKT_LOGGING */ +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +#include +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#ifdef DHD_DEBUG_PAGEALLOC +typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); +void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); +extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(DHD_TCP_WINSIZE_ADJUST) +#include +#include +#endif /* DHD_TCP_WINSIZE_ADJUST */ + +#ifdef ENABLE_DHD_GRO +#include +#endif /* ENABLE_DHD_GRO */ + +#define IP_PROT_RESERVED 0xFF + +#ifdef DHD_MQ +#define MQ_MAX_QUEUES AC_COUNT +#define MQ_MAX_CPUS 16 +int enable_mq = TRUE; +module_param(enable_mq, int, 0644); +int mq_select_disable = FALSE; +#endif + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE +#include +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#if defined(DHD_LB) +#if !defined(PCIE_FULL_DONGLE) +#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE" +#endif /* !PCIE_FULL_DONGLE */ +#endif /* DHD_LB */ + +#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP) || defined(DHD_LB_STATS) +#if !defined(DHD_LB) +#error "DHD loadbalance derivatives are supported only if DHD_LB is defined" +#endif /* !DHD_LB */ +#endif /* DHD_LB_RXP || DHD_LB_TXP || DHD_LB_STATS */ + +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +static void dhd_m4_state_handler(struct work_struct * work); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP) +static int dhd_wait_for_file_dump(dhd_pub_t *dhdp); +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */ + +#ifdef FIX_CPU_MIN_CLOCK +#include +#endif /* FIX_CPU_MIN_CLOCK */ + +#ifdef ENABLE_ADAPTIVE_SCHED +#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ +#ifndef CUSTOM_CPUFREQ_THRESH +#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH +#endif /* CUSTOM_CPUFREQ_THRESH */ +#endif /* ENABLE_ADAPTIVE_SCHED */ + +/* enable HOSTIP cache update from the host side when an eth0:N is up */ +#define AOE_IP_ALIAS_SUPPORT 1 + +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#if defined(OEM_ANDROID) +#include +#endif + +/* Maximum STA per radio */ +#if defined(BCM_ROUTER_DHD) +#define DHD_MAX_STA 128 +#else +#define DHD_MAX_STA 32 +#endif /* BCM_ROUTER_DHD */ + +#ifdef CUSTOMER_HW_AMLOGIC +#include +#endif + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) +#include + +#ifdef CTFPOOL +#define RXBUFPOOLSZ 2048 +#define RXBUFSZ DHD_FLOWRING_RX_BUFPOST_PKTSZ /* packet data buffer size */ +#endif /* CTFPOOL */ +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#ifdef BCMDBG +#include +#endif /* BCMDBG */ + +#ifdef DHD_EVENT_LOG_FILTER +#include +#endif /* DHD_EVENT_LOG_FILTER */ + +#ifdef DHDTCPSYNC_FLOOD_BLK +static void dhd_blk_tsfl_handler(struct work_struct * work); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + +#ifdef WL_NATOE +#include +#endif /* WL_NATOE */ + +#ifdef DHD_TX_PROFILE +#include +#include +#include +#include +#endif /* defined(DHD_TX_PROFILE) */ + +#if defined(DHD_TCP_WINSIZE_ADJUST) +static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0}; +static uint dhd_use_tcp_window_size_adjust = FALSE; +static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb); +#endif /* DHD_TCP_WINSIZE_ADJUST */ + +#ifdef SET_RANDOM_MAC_SOFTAP +#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL +#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 +#endif +static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; +#endif /* SET_RANDOM_MAC_SOFTAP */ + +#if defined(BCM_ROUTER_DHD) +/* + * Queue budget: Minimum number of packets that a queue must be allowed to hold + * to prevent starvation. + */ +#define DHD_QUEUE_BUDGET_DEFAULT (256) +int dhd_queue_budget = DHD_QUEUE_BUDGET_DEFAULT; + +module_param(dhd_queue_budget, int, 0); + +/* + * Per station pkt threshold: Sum total of all packets in the backup queues of + * flowrings belonging to the station, not including packets already admitted + * to flowrings. + */ +#define DHD_STA_THRESHOLD_DEFAULT (2048) +int dhd_sta_threshold = DHD_STA_THRESHOLD_DEFAULT; +module_param(dhd_sta_threshold, int, 0); + +/* + * Per interface pkt threshold: Sum total of all packets in the backup queues of + * flowrings belonging to the interface, not including packets already admitted + * to flowrings. + */ +#define DHD_IF_THRESHOLD_DEFAULT (2048 * 32) +int dhd_if_threshold = DHD_IF_THRESHOLD_DEFAULT; +module_param(dhd_if_threshold, int, 0); +#endif /* BCM_ROUTER_DHD */ + +/* XXX: where does this belong? */ +/* XXX: this needs to reviewed for host OS. */ +const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; +const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; +#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] + +#ifdef ARP_OFFLOAD_SUPPORT +void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx); +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inetaddr_notifier = { + .notifier_call = dhd_inetaddr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in the kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inetaddr_notifier_registered = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +int dhd_inet6addr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inet6addr_notifier = { + .notifier_call = dhd_inet6addr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inet6addr_notifier_registered = FALSE; +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +#if defined (CONFIG_PM_SLEEP) +#include +volatile bool dhd_mmc_suspend = FALSE; +DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); +#ifdef ENABLE_WAKEUP_PKT_DUMP +volatile bool dhd_mmc_wake = FALSE; +long long temp_raw; +#endif /* ENABLE_WAKEUP_PKT_DUMP */ +#endif /* defined(CONFIG_PM_SLEEP) */ + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN) +extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ +#if defined(OEM_ANDROID) +static void dhd_hang_process(struct work_struct *work_data); +#endif /* OEM_ANDROID */ +MODULE_LICENSE("GPL and additional rights"); + +#if defined(MULTIPLE_SUPPLICANT) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) +DEFINE_MUTEX(_dhd_mutex_lock_); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ +#endif +static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force); + +#include + +/* XXX Set up an MTU change notifier per linux/notifier.h? */ +#ifndef PROP_TXSTATUS +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) +#else +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) +#endif + +#ifdef PROP_TXSTATUS +extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx); +extern void dhd_wlfc_plat_init(void *dhd); +extern void dhd_wlfc_plat_deinit(void *dhd); +#endif /* PROP_TXSTATUS */ +#ifdef USE_DYNAMIC_F2_BLKSIZE +extern uint sd_f2_blocksize; +extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef CONFIG_PARTIALSUSPEND_SLP +/* XXX SLP use defferent earlysuspend header file and some functions + * But most of meaning is same as Android + */ +#include +#define CONFIG_HAS_EARLYSUSPEND +#define DHD_USE_EARLYSUSPEND +#define register_early_suspend register_pre_suspend +#define unregister_early_suspend unregister_pre_suspend +#define early_suspend pre_suspend +#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50 +#else +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ +#endif /* CONFIG_PARTIALSUSPEND_SLP */ + +#ifdef CONFIG_IRQ_HISTORY +#include +#endif /* CONFIG_IRQ_HISTORY */ + +#if defined(OEM_ANDROID) +#include +#endif /* OEM_ANDROID */ + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len); +static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable); +static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id); +#endif /* PKT_FILTER_SUPPORT && APF */ + +#ifdef DHD_FW_COREDUMP +static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_LOG_DUMP + +struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM]; + +/* Only header for log dump buffers is stored in array + * header for sections like 'dhd dump', 'ext trap' + * etc, is not in the array, because they are not log + * ring buffers + */ +dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = { + {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL}, + {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE}, + {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL} +}; +static int dld_buf_size[DLD_BUFFER_NUM] = { + LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */ + LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */ + LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */ +}; + +static void dhd_log_dump_init(dhd_pub_t *dhd); +static void dhd_log_dump_deinit(dhd_pub_t *dhd); +static void dhd_log_dump(void *handle, void *event_info, u8 event); +static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type); +static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size); +static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type); +char *dhd_dbg_get_system_timestamp(void); +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_DEBUG_UART +#include +#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu" +static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event); +static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd); +#endif /* DHD_DEBUG_UART */ + +static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); +static struct notifier_block dhd_reboot_notifier = { + .notifier_call = dhd_reboot_callback, + .priority = 1, +}; + +#ifdef OEM_ANDROID +#ifdef BCMPCIE +static int is_reboot = 0; +#endif /* BCMPCIE */ +#endif /* OEM_ANDROID */ + +dhd_pub_t *g_dhd_pub = NULL; + +#if defined(BT_OVER_SDIO) +#include "dhd_bt_interface.h" +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef WL_NANHO +static int dhd_nho_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf); +static int dhd_nho_ioctl_cb(void *drv_ctx, int ifidx, wl_ioctl_t *ioc, bool drv_lock); +static int dhd_nho_evt_cb(void *drv_ctx, int ifidx, bcm_event_t *evt, uint16 evt_len); +#endif /* WL_NANHO */ + +#ifdef WL_STATIC_IF +bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev); +#endif /* WL_STATIC_IF */ + +atomic_t exit_in_progress = ATOMIC_INIT(0); + +static void dhd_process_daemon_msg(struct sk_buff *skb); +static void dhd_destroy_to_notifier_skt(void); +static int dhd_create_to_notifier_skt(void); +static struct sock *nl_to_event_sk = NULL; +int sender_pid = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +struct netlink_kernel_cfg dhd_netlink_cfg = { + .groups = 1, + .input = dhd_process_daemon_msg, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */ + +#ifdef DHD_PKTTS +static int dhd_create_to_notifier_ts(void); +static void dhd_destroy_to_notifier_ts(void); + +static struct sock *nl_to_ts = NULL; +int sender_pid_ts = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +static void dhd_recv_msg_from_ts(struct sk_buff *skb); + +struct netlink_kernel_cfg dhd_netlink_ts = { + .groups = 1, + .input = dhd_recv_msg_from_ts, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */ + +#define GET_METADATA_VER(val) ((uint16)((val & 0xffff0000) >> 16)) +#define GET_METADATA_BUFLEN(val) ((uint16)(val & 0x0000ffff)) +#endif /* DHD_PKTTS */ + +#if defined(BT_OVER_SDIO) +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = TRUE; +#else +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = FALSE; +#endif +/* Flag to indicate if we should download firmware on driver load */ +uint dhd_download_fw_on_driverload = TRUE; + +/* Definitions to provide path to the firmware and nvram + * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" + */ +char firmware_path[MOD_PARAM_PATHLEN]; +char nvram_path[MOD_PARAM_PATHLEN]; +char clm_path[MOD_PARAM_PATHLEN]; +char config_path[MOD_PARAM_PATHLEN]; +#ifdef DHD_UCODE_DOWNLOAD +char ucode_path[MOD_PARAM_PATHLEN]; +#endif /* DHD_UCODE_DOWNLOAD */ + +module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660); + +/* backup buffer for firmware and nvram path */ +char fw_bak_path[MOD_PARAM_PATHLEN]; +char nv_bak_path[MOD_PARAM_PATHLEN]; + +/* information string to keep firmware, chio, cheip version info visiable from log */ +char info_string[MOD_PARAM_INFOLEN]; +module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); +int op_mode = 0; +int disable_proptx = 0; +module_param(op_mode, int, 0644); +#if defined(OEM_ANDROID) +extern int wl_control_wl_start(struct net_device *dev); +#if defined(BCMLXSDMMC) || defined(BCMDBUS) +struct semaphore dhd_registration_sem; +#endif /* BCMXSDMMC */ +#endif /* defined(OEM_ANDROID) */ +void dhd_generate_rand_mac_addr(struct ether_addr *ea_addr); + +#ifdef DHD_LOG_DUMP +int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE; +module_param(logdump_max_filesize, int, 0644); +int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE; +module_param(logdump_max_bufsize, int, 0644); +int logdump_periodic_flush = FALSE; +module_param(logdump_periodic_flush, int, 0644); +#ifdef EWP_ECNTRS_LOGGING +int logdump_ecntr_enable = TRUE; +#else +int logdump_ecntr_enable = FALSE; +#endif /* EWP_ECNTRS_LOGGING */ +module_param(logdump_ecntr_enable, int, 0644); +#ifdef EWP_RTT_LOGGING +int logdump_rtt_enable = TRUE; +#else +int logdump_rtt_enable = FALSE; +#endif /* EWP_RTT_LOGGING */ +int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE; +#endif /* DHD_LOG_DUMP */ + +#ifdef EWP_EDL +int host_edl_support = TRUE; +module_param(host_edl_support, int, 0644); +#endif + +/* deferred handlers */ +static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); +static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); +#ifdef BCM_ROUTER_DHD +static void dhd_inform_dhd_monitor_handler(void *handle, void *event_info, u8 event); +#endif +#ifdef WL_NATOE +static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event); +static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event); +#endif /* WL_NATOE */ + +#ifdef DHD_UPDATE_INTF_MAC +static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event); +#endif /* DHD_UPDATE_INTF_MAC */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef WL_CFG80211 +extern void dhd_netdev_free(struct net_device *ndev); +#endif /* WL_CFG80211 */ +static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev); + +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) +static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev); +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ + +#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) +/* update rx_pkt_chainable state of dhd interface */ +static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx); +#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ + +/* Error bits */ +module_param(dhd_msg_level, int, 0); +#if defined(WL_WIRELESS_EXT) +module_param(iw_msg_level, int, 0); +#endif +#ifdef WL_CFG80211 +module_param(wl_dbg_level, int, 0); +#endif +module_param(android_msg_level, int, 0); +module_param(config_msg_level, int, 0); + +#ifdef ARP_OFFLOAD_SUPPORT +/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ +/* XXX ARP HOST Auto Reply can cause dongle trap at VSDB situation */ +/* XXX ARP OL SNOOP can be used to more good quility */ + +#ifdef ENABLE_ARP_SNOOP_MODE +uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY | + ARP_OL_UPDATE_HOST_CACHE); +#else +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE; +#endif /* ENABLE_ARP_SNOOP_MODE */ + +module_param(dhd_arp_mode, uint, 0); +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* Disable Prop tx */ +module_param(disable_proptx, int, 0644); +/* load firmware and/or nvram values from the filesystem */ +module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0); +#ifdef DHD_UCODE_DOWNLOAD +module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660); +#endif /* DHD_UCODE_DOWNLOAD */ + +/* wl event forwarding */ +#ifdef WL_EVENT_ENAB +uint wl_event_enable = true; +#else +uint wl_event_enable = false; +#endif /* WL_EVENT_ENAB */ +module_param(wl_event_enable, uint, 0660); + +/* wl event forwarding */ +#ifdef LOGTRACE_PKT_SENDUP +uint logtrace_pkt_sendup = true; +#else +uint logtrace_pkt_sendup = false; +#endif /* LOGTRACE_PKT_SENDUP */ +module_param(logtrace_pkt_sendup, uint, 0660); + +/* Watchdog interval */ +/* extend watchdog expiration to 2 seconds when DPC is running */ +#define WATCHDOG_EXTEND_INTERVAL (2000) + +uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS; +module_param(dhd_watchdog_ms, uint, 0); + +#ifdef DHD_PCIE_RUNTIMEPM +uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS; +#endif /* DHD_PCIE_RUNTIMEPMT */ +#if defined(DHD_DEBUG) +/* Console poll interval */ +#if defined(OEM_ANDROID) +uint dhd_console_ms = 0; /* XXX andrey by default no fw msg prints */ +#else +uint dhd_console_ms = 250; +#endif /* OEM_ANDROID */ +module_param(dhd_console_ms, uint, 0644); +#else +uint dhd_console_ms = 0; +#endif /* DHD_DEBUG */ + +uint dhd_slpauto = TRUE; +module_param(dhd_slpauto, uint, 0); + +#ifdef PKT_FILTER_SUPPORT +/* Global Pkt filter enable control */ +uint dhd_pkt_filter_enable = TRUE; +module_param(dhd_pkt_filter_enable, uint, 0); +#endif + +/* Pkt filter init setup */ +uint dhd_pkt_filter_init = 0; +module_param(dhd_pkt_filter_init, uint, 0); + +/* Pkt filter mode control */ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER +uint dhd_master_mode = FALSE; +#else +uint dhd_master_mode = FALSE; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ +module_param(dhd_master_mode, uint, 0); + +int dhd_watchdog_prio = 0; +module_param(dhd_watchdog_prio, int, 0); + +/* DPC thread priority */ +int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING; +module_param(dhd_dpc_prio, int, 0); + +/* RX frame thread priority */ +int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; +module_param(dhd_rxf_prio, int, 0); + +#if !defined(BCMDBUS) +extern int dhd_dongle_ramsize; +module_param(dhd_dongle_ramsize, int, 0); +#endif /* !BCMDBUS */ + +#ifdef WL_CFG80211 +int passive_channel_skip = 0; +module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); +#endif /* WL_CFG80211 */ +static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev); + +#ifdef DHD_MSI_SUPPORT +uint enable_msi = TRUE; +module_param(enable_msi, uint, 0); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_SSSR_DUMP +int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len); +module_param(sssr_enab, uint, 0); +module_param(fis_enab, uint, 0); +#endif /* DHD_SSSR_DUMP */ + +/* Keep track of number of instances */ +static int dhd_found = 0; +static int instance_base = 0; /* Starting instance number */ +module_param(instance_base, int, 0644); + +#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) +/* + * Rx path process budget(dhd_napi_weight) number of packets in one go and hands over + * the packets to network stack. + * + * dhd_dpc tasklet is the producer(packets received from dongle) and dhd_napi_poll() + * is the consumer. The maximum number of packets that can be received from the dongle + * at any given point of time are D2HRING_RXCMPLT_MAX_ITEM. + * Also DHD will always post fresh rx buffers to dongle while processing rx completions. + * + * The consumer must consume the packets at equal are better rate than the producer. + * i.e if dhd_napi_poll() does not process at the same rate as the producer(dhd_dpc), + * rx_process_queue depth increases, which can even consume the entire system memory. + * Such situation will be tacken care by rx flow control. + * + * Device drivers are strongly advised to not use bigger value than NAPI_POLL_WEIGHT + */ +static int dhd_napi_weight = NAPI_POLL_WEIGHT; +module_param(dhd_napi_weight, int, 0644); +#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ + +#ifdef PCIE_FULL_DONGLE +extern int h2d_max_txpost; +module_param(h2d_max_txpost, int, 0644); + +#if defined(DHD_HTPUT_TUNABLES) +extern int h2d_htput_max_txpost; +module_param(h2d_htput_max_txpost, int, 0644); +#endif /* DHD_HTPUT_TUNABLES */ + +#ifdef AGG_H2D_DB +extern bool agg_h2d_db_enab; +module_param(agg_h2d_db_enab, bool, 0644); +extern uint agg_h2d_db_timeout; +module_param(agg_h2d_db_timeout, uint, 0644); +extern uint agg_h2d_db_inflight_thresh; +module_param(agg_h2d_db_inflight_thresh, uint, 0644); +#endif /* AGG_H2D_DB */ + +extern uint dma_ring_indices; +module_param(dma_ring_indices, uint, 0644); + +extern bool h2d_phase; +module_param(h2d_phase, bool, 0644); +extern bool force_trap_bad_h2d_phase; +module_param(force_trap_bad_h2d_phase, bool, 0644); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef FORCE_TPOWERON +/* + * On Fire's reference platform, coming out of L1.2, + * there is a constant delay of 45us between CLKREQ# and stable REFCLK + * Due to this delay, with tPowerOn < 50 + * there is a chance of the refclk sense to trigger on noise. + * + * 0x29 when written to L1SSControl2 translates to 50us. + */ +#define FORCE_TPOWERON_50US 0x29 +uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */ +module_param(tpoweron_scale, uint, 0644); +#endif /* FORCE_TPOWERON */ + +#ifdef SHOW_LOGTRACE +#if defined(CUSTOMER_HW4_DEBUG) +#define WIFI_PATH "/etc/wifi/" +static char *logstrs_path = VENDOR_PATH WIFI_PATH"logstrs.bin"; +char *st_str_file_path = VENDOR_PATH WIFI_PATH"rtecdc.bin"; +static char *map_file_path = VENDOR_PATH WIFI_PATH"rtecdc.map"; +static char *rom_st_str_file_path = VENDOR_PATH WIFI_PATH"roml.bin"; +static char *rom_map_file_path = VENDOR_PATH WIFI_PATH"roml.map"; +#else +static char *logstrs_path = PLATFORM_PATH"logstrs.bin"; +char *st_str_file_path = PLATFORM_PATH"rtecdc.bin"; +static char *map_file_path = PLATFORM_PATH"rtecdc.map"; +static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin"; +static char *rom_map_file_path = PLATFORM_PATH"roml.map"; +#endif /* CUSTOMER_HW4_DEBUG */ + +static char *ram_file_str = "rtecdc"; +static char *rom_file_str = "roml"; + +module_param(logstrs_path, charp, S_IRUGO); +module_param(st_str_file_path, charp, S_IRUGO); +module_param(map_file_path, charp, S_IRUGO); +module_param(rom_st_str_file_path, charp, S_IRUGO); +module_param(rom_map_file_path, charp, S_IRUGO); + +static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp); +static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end); +static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, + char *map_file); +#endif /* SHOW_LOGTRACE */ + +#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) +static void gdb_proxy_fs_try_create(dhd_info_t *dhd, const char *dev_name); +static void gdb_proxy_fs_remove(dhd_info_t *dhd); +#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ + +#ifdef D2H_MINIDUMP +void dhd_d2h_minidump(dhd_pub_t *dhdp); +#endif /* D2H_MINIDUMP */ + +#define DHD_MEMDUMP_TYPE_STR_LEN 32 +#define DHD_MEMDUMP_PATH_STR_LEN 128 + +#ifdef DHD_TX_PROFILE +/* process layer 3 headers, to ultimately determine if a + * dhd_tx_profile_protocol_t matches + */ +static int process_layer3_headers(uint8 **p, int plen, uint16 *type); + +/* process layer 2 headers, to ultimately determine if a + * dhd_tx_profile_protocol_t matches + */ +static int process_layer2_headers(uint8 **p, int *plen, uint16 *type, bool is_host_sfhllc); + +/* whether or not a dhd_tx_profile_protocol_t matches with data in a packet */ +bool dhd_protocol_matches_profile(uint8 *p, int plen, const + dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc); +#endif /* defined(DHD_TX_PROFILE) */ + +#define PATH_BANDLOCK_INFO PLATFORM_PATH".bandlock.info" + +static void dhd_set_bandlock(dhd_pub_t * dhd); + +static void +dhd_tx_stop_queues(struct net_device *net) +{ +#ifdef DHD_MQ + netif_tx_stop_all_queues(net); +#else + netif_stop_queue(net); +#endif +} + +static void +dhd_tx_start_queues(struct net_device *net) +{ +#ifdef DHD_MQ + netif_tx_wake_all_queues(net); +#else + netif_wake_queue(net); +#endif +} + +#ifdef USE_WFA_CERT_CONF +int g_frameburst = 1; +#endif /* USE_WFA_CERT_CONF */ + +static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); + +#ifdef PCIE_FULL_DONGLE +#define DHD_IF_STA_LIST_LOCK_INIT(lock) spin_lock_init(lock) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, + struct list_head *snapshot_list); +static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ +#endif /* PCIE_FULL_DONGLE */ + +/* Control fw roaming */ +#ifdef BCMCCX +uint dhd_roam_disable = 0; +#else +#ifdef OEM_ANDROID +uint dhd_roam_disable = 0; +#else +uint dhd_roam_disable = 1; +#endif +#endif /* BCMCCX */ + +#ifdef BCMDBGFS +extern void dhd_dbgfs_init(dhd_pub_t *dhdp); +extern void dhd_dbgfs_remove(void); +#endif + +/* Enable TX status metadta report: 0=disable 1=enable 2=debug */ +static uint pcie_txs_metadata_enable = 0; +module_param(pcie_txs_metadata_enable, int, 0); + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* DS Exit response timeout */ +int ds_exit_timeout_msec = DS_EXIT_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +#ifdef BCMSDIO +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + +#ifdef BCMINTERNAL +extern uint dhd_anychip; +module_param(dhd_anychip, uint, 0); +#endif /* BCMINTERNAL */ +#endif /* BCMSDIO */ + +#ifdef BCMSLTGT +#ifdef BCMFPGA_HW +/* For FPGA use fixed htclkration as 30 */ +uint htclkratio = 30; +#else +uint htclkratio = 1; +#endif /* BCMFPGA_HW */ +module_param(htclkratio, uint, 0); + +int dngl_xtalfreq = 0; +module_param(dngl_xtalfreq, int, 0); +#endif /* BCMSLTGT */ + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ + +#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING +uint dhd_dscpmap_enable = 1; +module_param(dhd_dscpmap_enable, uint, 0644); +#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */ + +#if defined(BCMSUP_4WAY_HANDSHAKE) +/* Use in dongle supplicant for 4-way handshake */ +#if defined(WLFBT) || defined(WL_ENABLE_IDSUP) +/* Enable idsup by default (if supported in fw) */ +uint dhd_use_idsup = 1; +#else +uint dhd_use_idsup = 0; +#endif /* WLFBT || WL_ENABLE_IDSUP */ +module_param(dhd_use_idsup, uint, 0); +#endif /* BCMSUP_4WAY_HANDSHAKE */ + +#ifndef BCMDBUS +#if defined(OEM_ANDROID) +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +#elif defined(BCM_ROUTER_DHD) +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +#else +int allow_delay_fwdl = TRUE; +#endif /* OEM_ANDROID */ +module_param(allow_delay_fwdl, int, 0); +#endif /* !BCMDBUS */ + +#ifdef GDB_PROXY +/* Adds/replaces deadman_to= in NVRAM file with deadman_to=0 */ +static uint nodeadman = 0; +module_param(nodeadman, uint, 0); +#endif /* GDB_PROXY */ + +#ifdef ECOUNTER_PERIODIC_DISABLE +uint enable_ecounter = FALSE; +#else +uint enable_ecounter = TRUE; +#endif +module_param(enable_ecounter, uint, 0); + +#ifdef BCMQT_HW +int qt_flr_reset = FALSE; +module_param(qt_flr_reset, int, 0); + +int qt_dngl_timeout = 0; // dongle attach timeout in ms +module_param(qt_dngl_timeout, int, 0); +#endif /* BCMQT_HW */ + +/* TCM verification flag */ +uint dhd_tcm_test_enable = FALSE; +module_param(dhd_tcm_test_enable, uint, 0644); + +extern char dhd_version[]; +extern char fw_version[]; +extern char clm_version[]; + +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); + +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + +#ifdef DHD_PM_CONTROL_FROM_FILE +bool g_pm_control; +#ifdef DHD_EXPORT_CNTL_FILE +uint32 pmmode_val = 0xFF; +#endif /* DHD_EXPORT_CNTL_FILE */ +#ifdef CUSTOMER_HW10 +void dhd_control_pm(dhd_pub_t *dhd, uint *); +#else +void sec_control_pm(dhd_pub_t *dhd, uint *); +#endif /* CUSTOMER_HW10 */ +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef DHD_PM_OVERRIDE +bool g_pm_override; +#endif /* DHD_PM_OVERRIDE */ + +#ifndef BCMDBUS +static void dhd_dpc(ulong data); +#endif /* !BCMDBUS */ +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); +void dhd_os_wd_timer_extend(void *bus, bool extend); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if defined(CONFIG_PM_SLEEP) +static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + bool suspend = FALSE; + dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, const dhd_info_t, pm_notifier); + dhd_pub_t *dhd = &dhdinfo->pub; + struct dhd_conf *conf = dhd->conf; + int suspend_mode = conf->suspend_mode; + + BCM_REFERENCE(dhdinfo); + BCM_REFERENCE(suspend); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + suspend = TRUE; + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + suspend = FALSE; + break; + } + + if(!dhd->early_suspended && suspend_mode != PM_NOTIFIER) { + suspend_mode = PM_NOTIFIER; + conf->suspend_mode = PM_NOTIFIER; + conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND); + printf("%s: switch suspend_mode to %d\n", __FUNCTION__, suspend_mode); + } + printf("%s: action=%ld, suspend=%d, suspend_mode=%d\n", + __FUNCTION__, action, suspend, suspend_mode); + if (suspend) { + DHD_OS_WAKE_LOCK_WAIVE(dhd); + if (suspend_mode == PM_NOTIFIER) + dhd_suspend_resume_helper(dhdinfo, suspend, 0); +#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) + dhd_wlfc_suspend(dhd); +#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ + if (suspend_mode == PM_NOTIFIER || suspend_mode == SUSPEND_MODE_2) + dhd_conf_set_suspend_resume(dhd, suspend); + DHD_OS_WAKE_LOCK_RESTORE(dhd); + } else { + if (suspend_mode == PM_NOTIFIER || suspend_mode == SUSPEND_MODE_2) + dhd_conf_set_suspend_resume(dhd, suspend); +#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) + dhd_wlfc_resume(dhd); +#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ + if (suspend_mode == PM_NOTIFIER) + dhd_suspend_resume_helper(dhdinfo, suspend, 0); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 39)) + dhd_mmc_suspend = suspend; + smp_mb(); +#endif + + return ret; +} + +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_pm_notifier_registered = FALSE; + +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* CONFIG_PM_SLEEP */ + +/* Request scheduling of the bus rx frame */ +static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); +static void dhd_os_rxflock(dhd_pub_t *pub); +static void dhd_os_rxfunlock(dhd_pub_t *pub); + +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void +dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event); +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +/** priv_link is the link between netdev and the dhdif and dhd_info structs. */ +typedef struct dhd_dev_priv { + dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ + dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ + int ifidx; /* interface index */ + void * lkup; +} dhd_dev_priv_t; + +#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) +#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) +#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) +#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) +#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup) + +/** Clear the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_clear(struct net_device * dev) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = (dhd_info_t *)NULL; + dev_priv->ifp = (dhd_if_t *)NULL; + dev_priv->ifidx = DHD_BAD_IF; + dev_priv->lkup = (void *)NULL; +} + +/** Setup the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, + int ifidx) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = dhd; + dev_priv->ifp = ifp; + dev_priv->ifidx = ifidx; +} + +/* Return interface pointer */ +struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) +{ + ASSERT(ifidx < DHD_MAX_IFS); + + if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS) + return NULL; + + return dhdp->info->iflist[ifidx]; +} + +#ifdef WLEASYMESH +int +dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast) +{ + dhd_if_t *ifp; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp(dhdp, ifidx); + if (ifp == NULL) { + return BCME_ERROR; + } + if (mcast) { + memcpy(ifp->_1905_al_mcast, ea, ETHER_ADDR_LEN); + } else { + memcpy(ifp->_1905_al_ucast, ea, ETHER_ADDR_LEN); + } + return BCME_OK; +} +int +dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast) +{ + dhd_if_t *ifp; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp(dhdp, ifidx); + if (ifp == NULL) { + return BCME_ERROR; + } + if (mcast) { + memcpy(ea, ifp->_1905_al_mcast, ETHER_ADDR_LEN); + } else { + memcpy(ea, ifp->_1905_al_ucast, ETHER_ADDR_LEN); + } + return BCME_OK; +} +#endif /* WLEASYMESH */ + +#ifdef PCIE_FULL_DONGLE + +/** Dummy objects are defined with state representing bad|down. + * Performance gains from reducing branch conditionals, instruction parallelism, + * dual issue, reducing load shadows, avail of larger pipelines. + * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer + * is accessed via the dhd_sta_t. + */ + +/* Dummy dhd_info object */ +dhd_info_t dhd_info_null = { + .pub = { + .info = &dhd_info_null, +#ifdef DHDTCPACK_SUPPRESS + .tcpack_sup_mode = TCPACK_SUP_REPLACE, +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(BCM_ROUTER_DHD) + .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE }, +#endif + .up = FALSE, + .busstate = DHD_BUS_DOWN + } +}; +#define DHD_INFO_NULL (&dhd_info_null) +#define DHD_PUB_NULL (&dhd_info_null.pub) + +/* Dummy netdevice object */ +struct net_device dhd_net_dev_null = { + .reg_state = NETREG_UNREGISTERED +}; +#define DHD_NET_DEV_NULL (&dhd_net_dev_null) + +/* Dummy dhd_if object */ +dhd_if_t dhd_if_null = { +#ifdef WMF + .wmf = { .wmf_enable = TRUE }, +#endif + .info = DHD_INFO_NULL, + .net = DHD_NET_DEV_NULL, + .idx = DHD_BAD_IF +}; +#define DHD_IF_NULL (&dhd_if_null) + +/* XXX should we use the sta_pool[0] object as DHD_STA_NULL? */ +#define DHD_STA_NULL ((dhd_sta_t *)NULL) + +/** Interface STA list management. */ + +/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ +static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); +static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); + +/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ +static void dhd_if_del_sta_list(dhd_if_t * ifp); + +/* Construct/Destruct a sta pool. */ +static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); +static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); + +/** Reset a dhd_sta object and free into the dhd pool. */ +static void +dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) +{ + int prio; + + ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + /* + * Flush and free all packets in all flowring's queues belonging to sta. + * Packets in flow ring will be flushed later. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + uint16 flowid = sta->flowid[prio]; + + if (flowid != FLOWID_INVALID) { + unsigned long flags; + flow_ring_node_t * flow_ring_node; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + if (flow_ring_node) { + flow_queue_t *queue = &flow_ring_node->queue; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; + + if (!DHD_FLOW_QUEUE_EMPTY(queue)) { + void * pkt; + while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != + NULL) { + PKTFREE(dhdp->osh, pkt, TRUE); + } + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + } + } + + sta->flowid[prio] = FLOWID_INVALID; + } + + id16_map_free(dhdp->staid_allocator, sta->idx); + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ + sta->ifidx = DHD_BAD_IF; + bzero(sta->ea.octet, ETHER_ADDR_LEN); + INIT_LIST_HEAD(&sta->list); + sta->idx = ID16_INVALID; /* implying free */ +} + +/** Allocate a dhd_sta object from the dhd pool. */ +static dhd_sta_t * +dhd_sta_alloc(dhd_pub_t * dhdp) +{ + uint16 idx; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + idx = id16_map_alloc(dhdp->staid_allocator); + if (idx == ID16_INVALID) { + DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); + sta = &sta_pool[idx]; + + ASSERT((sta->idx == ID16_INVALID) && + (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); + + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + + sta->idx = idx; /* implying allocated */ + + return sta; +} + +/** Delete all STAs in an interface's STA list. */ +static void +dhd_if_del_sta_list(dhd_if_t *ifp) +{ + dhd_sta_t *sta, *next; + unsigned long flags; + + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + GCC_DIAGNOSTIC_POP(); + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); + + return; +} + +/** Construct a pool of dhd_sta_t objects to be used by interfaces. */ +static int +dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void * staid_allocator; + + ASSERT(dhdp != (dhd_pub_t *)NULL); + ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); + if (staid_allocator == NULL) { + DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Pre allocate a pool of dhd_sta objects (one extra). */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ + sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); + if (sta_pool == NULL) { + DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); + id16_map_fini(dhdp->osh, staid_allocator); + return BCME_ERROR; + } + + dhdp->sta_pool = sta_pool; + dhdp->staid_allocator = staid_allocator; + + /* Initialize all sta(s) for the pre-allocated free pool. */ + bzero((uchar *)sta_pool, sta_pool_memsz); + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } + + return BCME_OK; +} + +/** Destruct the pool of dhd_sta_t objects. + * Caller must ensure that no STA objects are currently associated with an if. + */ +static void +dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) +{ + dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + + if (sta_pool) { + int idx; + int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + for (idx = 1; idx <= max_sta; idx++) { + ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); + ASSERT(sta_pool[idx].idx == ID16_INVALID); + } + MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); + } + + id16_map_fini(dhdp->osh, dhdp->staid_allocator); + dhdp->staid_allocator = NULL; +} + +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void +dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void *staid_allocator; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + staid_allocator = dhdp->staid_allocator; + + if (!sta_pool) { + DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); + return; + } + + if (!staid_allocator) { + DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); + return; + } + + /* clear free pool */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + bzero((uchar *)sta_pool, sta_pool_memsz); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + id16_map_clear(staid_allocator, max_sta, 1); + + /* Initialize all sta(s) for the pre-allocated free pool. */ + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } +} + +/** Find STA with MAC address ea in an interface's STA list. */ +dhd_sta_t * +dhd_find_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(sta, &ifp->sta_list, list) { + GCC_DIAGNOSTIC_POP(); + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_INFO(("%s: Found STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); + return sta; + } + } + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); + + return DHD_STA_NULL; +} + +/** Add STA into the interface's STA list. */ +dhd_sta_t * +dhd_add_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea)); + return DHD_STA_NULL; + } + + sta = dhd_sta_alloc((dhd_pub_t *)pub); + if (sta == DHD_STA_NULL) { + DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); + + /* link the sta and the dhd interface */ + sta->ifp = ifp; + sta->ifidx = ifidx; +#ifdef DHD_WMF + sta->psta_prim = NULL; +#endif + INIT_LIST_HEAD(&sta->list); + + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + + list_add_tail(&sta->list, &ifp->sta_list); + + DHD_ERROR(("%s: Adding STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); + + return sta; +} + +/** Delete all STAs from the interface's STA list. */ +void +dhd_del_all_sta(void *pub, int ifidx) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + GCC_DIAGNOSTIC_POP(); + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, + ifp->phnd_arp_table, FALSE, + sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + } + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); + + return; +} + +/** Delete STA from the interface's STA list. */ +void +dhd_del_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + GCC_DIAGNOSTIC_POP(); + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: Deleting STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(sta->ea.octet))); + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + } + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, + ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + return; +} + +/** Add STA if it doesn't exist. Not reentrant. */ +dhd_sta_t* +dhd_findadd_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + + sta = dhd_find_sta(pub, ifidx, ea); + + if (!sta) { + /* Add entry */ + sta = dhd_add_sta(pub, ifidx, ea); + } + + return sta; +} + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * +dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) +{ + unsigned long flags; + dhd_sta_t *sta, *snapshot; + + INIT_LIST_HEAD(snapshot_list); + + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + /* allocate one and add to snapshot */ + snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); + if (snapshot == NULL) { + DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); + continue; + } + + memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); + + INIT_LIST_HEAD(&snapshot->list); + list_add_tail(&snapshot->list, snapshot_list); + } + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); + + return snapshot_list; +} + +static void +dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) +{ + dhd_sta_t *sta, *next; + + list_for_each_entry_safe(sta, next, snapshot_list, list) { + list_del(&sta->list); + MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); + } +} +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else +static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} +static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } +static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} +static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} +dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; } +void dhd_del_sta(void *pub, int ifidx, void *ea) {} +#endif /* PCIE_FULL_DONGLE */ + +#ifdef BCM_ROUTER_DHD +/** Bind a flowid to the dhd_sta's flowid table. */ +void +dhd_add_flowid(dhd_pub_t * dhdp, int ifidx, uint8 ac_prio, void * ea, + uint16 flowid) +{ + int prio; + dhd_if_t * ifp; + dhd_sta_t * sta; + flow_queue_t * queue; + + ASSERT((dhdp != (dhd_pub_t *)NULL) && (ea != NULL)); + + /* Fetch the dhd_if object given the if index */ + ifp = dhd_get_ifp(dhdp, ifidx); + if (ifp == (dhd_if_t *)NULL) /* ifp fetched from dhdp iflist[] */ + return; + + /* Intializing the backup queue parameters */ + if (DHD_IF_ROLE_WDS(dhdp, ifidx) || +#ifdef DHD_WET + WET_ENABLED(dhdp) || +#endif /* DHD_WET */ + 0) { + queue = dhd_flow_queue(dhdp, flowid); + dhd_flow_ring_config_thresholds(dhdp, flowid, + dhd_queue_budget, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue), + dhd_if_threshold, (void *)&ifp->cumm_ctr); + return; + } else if ((sta = dhd_find_sta(dhdp, ifidx, ea)) == DHD_STA_NULL) { + /* Fetch the station with a matching Mac address. */ + /* Update queue's grandparent cummulative length threshold */ + if (ETHER_ISMULTI((char *)ea)) { + queue = dhd_flow_queue(dhdp, flowid); + if (ifidx != 0 && DHD_IF_ROLE_STA(dhdp, ifidx)) { + /* Use default dhdp->cumm_ctr and dhdp->l2cumm_ctr, + * in PSTA mode the ifp will be deleted but we don't delete + * the PSTA flowring. + */ + dhd_flow_ring_config_thresholds(dhdp, flowid, + queue->max, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue), + dhd_if_threshold, DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); + } + else if (DHD_FLOW_QUEUE_L2CLEN_PTR(queue) != (void *)&ifp->cumm_ctr) { + dhd_flow_ring_config_thresholds(dhdp, flowid, + queue->max, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue), + dhd_if_threshold, (void *)&ifp->cumm_ctr); + } + } + return; + } + + /* Set queue's min budget and queue's parent cummulative length threshold */ + dhd_flow_ring_config_thresholds(dhdp, flowid, dhd_queue_budget, + dhd_sta_threshold, (void *)&sta->cumm_ctr, + dhd_if_threshold, (void *)&ifp->cumm_ctr); + + /* Populate the flowid into the stations flowid table, for all packet + * priorities that would match the given flow's ac priority. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + if (dhdp->flow_prio_map[prio] == ac_prio) { + /* flowring shared for all these pkt prio */ + sta->flowid[prio] = flowid; + } + } +} + +/** Unbind a flowid to the sta's flowid table. */ +void +dhd_del_flowid(dhd_pub_t * dhdp, int ifidx, uint16 flowid) +{ + int prio; + dhd_if_t * ifp; + dhd_sta_t * sta; + unsigned long flags; + + /* Fetch the dhd_if object given the if index */ + ifp = dhd_get_ifp(dhdp, ifidx); + if (ifp == (dhd_if_t *)NULL) /* ifp fetched from dhdp iflist[] */ + return; + + /* Walk all stations and delete clear any station's reference to flowid */ + DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + for (prio = 0; prio < (int)NUMPRIO; prio++) { + if (sta->flowid[prio] == flowid) { + sta->flowid[prio] = FLOWID_INVALID; + } + } + } + + DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags); +} +#endif /* BCM_ROUTER_DHD */ + +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) +void +dhd_axi_error_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + schedule_work(&dhd->axi_error_dispatcher_work); +} + +static void dhd_axi_error_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, axi_error_dispatcher_work); + dhd_axi_error(&dhd->pub); +} +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ + +/** Returns dhd iflist index corresponding the the bssidx provided by apps */ +int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_if_t *ifp; + dhd_info_t *dhd = dhdp->info; + int i; + + ASSERT(bssidx < DHD_MAX_IFS); + ASSERT(dhdp); + + for (i = 0; i < DHD_MAX_IFS; i++) { + ifp = dhd->iflist[i]; + if (ifp && (ifp->bssidx == bssidx)) { + DHD_TRACE(("Index manipulated for %s from %d to %d\n", + ifp->name, bssidx, i)); + break; + } + } + return i; +} + +static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) +{ + uint32 store_idx; + uint32 sent_idx; + + if (!skb) { + DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); + return BCME_ERROR; + } + + dhd_os_rxflock(dhdp); + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + if (dhdp->skbbuf[store_idx] != NULL) { + /* Make sure the previous packets are processed */ + dhd_os_rxfunlock(dhdp); +#ifdef RXF_DEQUEUE_ON_BUSY + DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + return BCME_BUSY; +#else /* RXF_DEQUEUE_ON_BUSY */ + DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + /* removed msleep here, should use wait_event_timeout if we + * want to give rx frame thread a chance to run + */ +#if defined(WAIT_DEQUEUE) + OSL_SLEEP(1); +#endif + return BCME_ERROR; +#endif /* RXF_DEQUEUE_ON_BUSY */ + } + DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", + skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); + dhdp->skbbuf[store_idx] = skb; + dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); + dhd_os_rxfunlock(dhdp); + + return BCME_OK; +} + +static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) +{ + uint32 store_idx; + uint32 sent_idx; + void *skb; + + dhd_os_rxflock(dhdp); + + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + skb = dhdp->skbbuf[sent_idx]; + + if (skb == NULL) { + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n", + store_idx, sent_idx)); + return NULL; + } + + dhdp->skbbuf[sent_idx] = NULL; + dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); + + DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n", + skb, sent_idx)); + + dhd_os_rxfunlock(dhdp); + + return skb; +} + +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) +{ +#if defined(BCMSDIO) || defined(BCMPCIE) + uint chipid = dhd_bus_chip_id(dhdp); + int ret = BCME_OK; + if (prepost) { /* pre process */ + ret = dhd_alloc_cis(dhdp); + if (ret != BCME_OK) { + return ret; + } + switch (chipid) { +#ifndef DHD_READ_CIS_FROM_BP + case BCM4389_CHIP_GRPID: + /* BCM4389B0 or higher rev is used new otp iovar */ + dhd_read_otp_sw_rgn(dhdp); + break; +#endif /* !DHD_READ_CIS_FROM_BP */ + default: + dhd_read_cis(dhdp); + break; + } + dhd_check_module_cid(dhdp); + dhd_check_module_mac(dhdp); + dhd_set_macaddr_from_file(dhdp); + } else { /* post process */ + dhd_write_macaddr(&dhdp->mac); + dhd_clear_cis(dhdp); + } +#endif + + return BCME_OK; +} + +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(PKT_FILTER_SUPPORT) +#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +static bool +_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param) +{ + bool _apply = FALSE; + /* In case of IBSS mode, apply arp pkt filter */ + if (op_mode_param & DHD_FLAG_IBSS_MODE) { + _apply = TRUE; + goto exit; + } + /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ + if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) { + _apply = TRUE; + goto exit; + } + +exit: + return _apply; +} +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + +void +dhd_set_packet_filter(dhd_pub_t *dhd) +{ + int i; + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + if (dhd_pkt_filter_enable) { + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + } + } +} + +void +dhd_enable_packet_filter(int value, dhd_pub_t *dhd) +{ + int i; + + DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); + if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value && + !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) { + DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); + return; + } + /* 1 - Enable packet filter, only allow unicast packet to send up */ + /* 0 - Disable packet filter */ + if (dhd_pkt_filter_enable && (!value || + (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) || + dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))) + { + for (i = 0; i < dhd->pktfilter_count; i++) { +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) + if (value && (i == DHD_ARP_FILTER_NUM) && + !_turn_on_arp_filter(dhd, dhd->op_mode)) { + DHD_TRACE(("Do not turn on ARP white list pkt filter:" + "val %d, cnt %d, op_mode 0x%x\n", + value, i, dhd->op_mode)); + continue; + } +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ +#ifdef APSTA_BLOCK_ARP_DURING_DHCP + if (value && (i == DHD_BROADCAST_ARP_FILTER_NUM) && + dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM]) { + /* XXX: BROADCAST_ARP_FILTER is only for the + * STA/SoftAP concurrent mode (Please refer to RB:90348) + * Remove the filter for other cases explicitly + */ + DHD_ERROR(("%s: Remove the DHD_BROADCAST_ARP_FILTER\n", + __FUNCTION__)); + dhd_packet_filter_add_remove(dhd, FALSE, + DHD_BROADCAST_ARP_FILTER_NUM); + } +#endif /* APSTA_BLOCK_ARP_DURING_DHCP */ + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + value, dhd_master_mode); + } + } +} + +int +dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num) +{ + char *filterp = NULL; + int filter_id = 0; + + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + filter_id = 101; + break; + case DHD_MULTICAST4_FILTER_NUM: + filter_id = 102; + if (FW_SUPPORTED((dhdp), pf6)) { + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + if (!add_remove) { + filterp = DISCARD_IPV4_MCAST; + add_remove = 1; + break; + } + } /* XXX: intend omitting else case */ + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + break; + case DHD_MULTICAST6_FILTER_NUM: + filter_id = 103; + if (FW_SUPPORTED((dhdp), pf6)) { + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + if (!add_remove) { + filterp = DISCARD_IPV6_MCAST; + add_remove = 1; + break; + } + } /* XXX: intend omitting else case */ + filterp = "103 0 0 0 0xFFFF 0x3333"; + break; + case DHD_MDNS_FILTER_NUM: + filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + filter_id = 104; + break; + case DHD_ARP_FILTER_NUM: + filterp = "105 0 0 12 0xFFFF 0x0806"; + filter_id = 105; + break; + case DHD_BROADCAST_ARP_FILTER_NUM: + filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806" + " 0xFFFFFFFFFFFF0000000000000806"; + filter_id = 106; + break; + default: + return -EINVAL; + } + + /* Add filter */ + if (add_remove) { + dhdp->pktfilter[num] = filterp; + dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]); + } else { /* Delete filter */ + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + } + + return 0; +} +#endif /* PKT_FILTER_SUPPORT */ + +static int dhd_set_suspend(int value, dhd_pub_t *dhd) +{ +#ifndef SUPPORT_PM2_ONLY + int power_mode = PM_MAX; +#endif /* SUPPORT_PM2_ONLY */ + /* wl_pkt_filter_enable_t enable_parm; */ + int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */ + int ret = 0; +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + int roam_time_thresh = 0; /* (ms) */ +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + uint roamvar = 1; +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + int bcn_li_bcn = 1; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint nd_ra_filter = 0; +#ifdef ENABLE_IPMCAST_FILTER + int ipmcast_l2filter; +#endif /* ENABLE_IPMCAST_FILTER */ +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ +#endif /* DHD_USE_EARLYSUSPEND */ +#ifdef PASS_ALL_MCAST_PKTS + struct dhd_info *dhdinfo; + uint32 allmulti; + uint i; +#endif /* PASS_ALL_MCAST_PKTS */ +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + +#if defined(DHD_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND) + /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND in suspend, otherwise CUSTOM_BCN_TIMEOUT */ + int bcn_timeout = CUSTOM_BCN_TIMEOUT; +#endif /* DHD_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */ +#if defined(OEM_ANDROID) && defined(BCMPCIE) + int lpas = 0; + int dtim_period = 0; + int bcn_interval = 0; + int bcn_to_dly = 0; +#endif /* OEM_ANDROID && BCMPCIE */ + + if (!dhd) + return -ENODEV; + +#ifdef PASS_ALL_MCAST_PKTS + dhdinfo = dhd->info; +#endif /* PASS_ALL_MCAST_PKTS */ + + DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", + __FUNCTION__, value, dhd->in_suspend)); + + dhd_suspend_lock(dhd); + +#ifdef CUSTOM_SET_CPUCORE + DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value)); + /* set specific cpucore */ + dhd_set_cpucore(dhd, TRUE); +#endif /* CUSTOM_SET_CPUCORE */ + if (dhd->up) { + if (value && dhd->in_suspend) { + dhd->early_suspended = 1; + /* Kernel suspended */ + DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__)); + +#ifndef SUPPORT_PM2_ONLY + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter, + * only allow unicast packet to send up + */ + dhd_enable_packet_filter(1, dhd); +#ifdef APF + dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd)); +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arpoe_enable) { + dhd_arp_offload_enable(dhd, TRUE); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 0; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) { + ret = dhd_iovar(dhd, i, "allmulti", + (char *)&allmulti, + sizeof(allmulti), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s allmulti failed %d\n", + __FUNCTION__, ret)); + } + } + } +#endif /* PASS_ALL_MCAST_PKTS */ + + /* If DTIM skip is set up as default, force it to wake + * each third DTIM for better power savings. Note that + * one side effect is a chance to miss BC/MC packet. + */ +#ifdef WLTDLS + /* Do not set bcn_li_ditm on WFD mode */ + if (dhd->tdls_mode) { + bcn_li_dtim = 0; + } else +#endif /* WLTDLS */ +#if defined(OEM_ANDROID) && defined(BCMPCIE) + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period, + &bcn_interval); + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_li_dtim failed %d\n", + __FUNCTION__, ret)); + } + if ((bcn_li_dtim * dtim_period * bcn_interval) >= + MIN_DTIM_FOR_ROAM_THRES_EXTEND) { + /* + * Increase max roaming threshold from 2 secs to 8 secs + * the real roam threshold is MIN(max_roam_threshold, + * bcn_timeout/2) + */ + lpas = 1; + ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), + NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s lpas, UNSUPPORTED\n", + __FUNCTION__)); + } else { + DHD_ERROR(("%s set lpas failed %d\n", + __FUNCTION__, ret)); + } + } + bcn_to_dly = 1; + /* + * if bcn_to_dly is 1, the real roam threshold is + * MIN(max_roam_threshold, bcn_timeout -1); + * notify link down event after roaming procedure complete + * if we hit bcn_timeout while we are in roaming progress. + */ + ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, + sizeof(bcn_to_dly), NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s bcn_to_dly, UNSUPPORTED\n", + __FUNCTION__)); + } else { + DHD_ERROR(("%s set bcn_to_dly failed %d\n", + __FUNCTION__, ret)); + } + } + } +#else + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd); + if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0) + DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); +#endif /* OEM_ANDROID && BCMPCIE */ + +#ifdef DHD_USE_EARLYSUSPEND +#ifdef DHD_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, + ret)); + } +#endif /* DHD_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND; + ret = dhd_iovar(dhd, 0, "roam_time_thresh", + (char *)&roam_time_thresh, + sizeof(roam_time_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_time_thresh failed %d\n", + __FUNCTION__, ret)); + } +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + /* Disable firmware roaming during suspend */ + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, + sizeof(roamvar), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_off failed %d\n", + __FUNCTION__, ret)); + } +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + if (bcn_li_dtim) { + bcn_li_bcn = 0; + } + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ +#if defined(WL_CFG80211) && defined(WL_BCNRECV) + ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd)); + if (ret != BCME_OK) { + DHD_ERROR(("failed to stop beacon recv event on" + " suspend state (%d)\n", ret)); + } +#endif /* WL_CFG80211 && WL_BCNRECV */ +#ifdef NDO_CONFIG_SUPPORT + if (dhd->ndo_enable) { + if (!dhd->ndo_host_ip_overflow) { + /* enable ND offload on suspend */ + ret = dhd_ndo_enable(dhd, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: failed to enable NDO\n", + __FUNCTION__)); + } + } else { + DHD_INFO(("%s: NDO disabled on suspend due to" + "HW capacity\n", __FUNCTION__)); + } + } +#endif /* NDO_CONFIG_SUPPORT */ +#ifndef APF + if (FW_SUPPORTED(dhd, ndoe)) +#else + if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) +#endif /* APF */ + { + /* enable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 1; + ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable", + (char *)&nd_ra_filter, sizeof(nd_ra_filter), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } + dhd_os_suppress_logging(dhd, TRUE); +#ifdef ENABLE_IPMCAST_FILTER + ipmcast_l2filter = 1; + ret = dhd_iovar(dhd, 0, "ipmcast_l2filter", + (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret)); + } +#endif /* ENABLE_IPMCAST_FILTER */ +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = CUSTOM_INTR_WIDTH; + ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width, + sizeof(intr_width), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#ifdef CUSTOM_EVENT_PM_WAKE + pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4; + ret = dhd_iovar(dhd, 0, "const_awake_thresh", + (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", + __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef CONFIG_SILENT_ROAM + if (!dhd->sroamed) { + ret = dhd_sroam_set_mon(dhd, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set sroam failed %d\n", + __FUNCTION__, ret)); + } + } + dhd->sroamed = FALSE; +#endif /* CONFIG_SILENT_ROAM */ +#endif /* DHD_USE_EARLYSUSPEND */ + } else { + dhd->early_suspended = 0; + /* Kernel resumed */ + DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__)); +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = 0; + ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width, + sizeof(intr_width), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#ifndef SUPPORT_PM2_ONLY + power_mode = PM_FAST; + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ +#if defined(WL_CFG80211) && defined(WL_BCNRECV) + ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd)); + if (ret != BCME_OK) { + DHD_ERROR(("failed to resume beacon recv state (%d)\n", + ret)); + } +#endif /* WL_CF80211 && WL_BCNRECV */ +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arpoe_enable) { + dhd_arp_offload_enable(dhd, FALSE); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhd); +#ifdef APF + dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd)); +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 1; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + ret = dhd_iovar(dhd, i, "allmulti", + (char *)&allmulti, + sizeof(allmulti), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: allmulti failed:%d\n", + __FUNCTION__, ret)); + } + } +#endif /* PASS_ALL_MCAST_PKTS */ +#if defined(OEM_ANDROID) && defined(BCMPCIE) + /* restore pre-suspend setting */ + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_li_ditm failed:%d\n", + __FUNCTION__, ret)); + } + ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, + 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s lpas, UNSUPPORTED\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s set lpas failed %d\n", + __FUNCTION__, ret)); + } + } + ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, + sizeof(bcn_to_dly), NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s bcn_to_dly UNSUPPORTED\n", + __FUNCTION__)); + } else { + DHD_ERROR(("%s set bcn_to_dly failed %d\n", + __FUNCTION__, ret)); + } + } +#else + /* restore pre-suspend setting for dtim_skip */ + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret)); + } +#endif /* OEM_ANDROID && BCMPCIE */ +#ifdef DHD_USE_EARLYSUSPEND +#ifdef DHD_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT; + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_timeout failed:%d\n", + __FUNCTION__, ret)); + } +#endif /* DHD_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = 2000; + ret = dhd_iovar(dhd, 0, "roam_time_thresh", + (char *)&roam_time_thresh, + sizeof(roam_time_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:roam_time_thresh failed:%d\n", + __FUNCTION__, ret)); + } + +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + roamvar = dhd_roam_disable; + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, + sizeof(roamvar), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: bcn_li_bcn failed:%d\n", + __FUNCTION__, ret)); + } +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ +#ifdef NDO_CONFIG_SUPPORT + if (dhd->ndo_enable) { + /* Disable ND offload on resume */ + ret = dhd_ndo_enable(dhd, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO\n", + __FUNCTION__)); + } + } +#endif /* NDO_CONFIG_SUPPORT */ +#ifndef APF + if (FW_SUPPORTED(dhd, ndoe)) +#else + if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) +#endif /* APF */ + { + /* disable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 0; + ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable", + (char *)&nd_ra_filter, sizeof(nd_ra_filter), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } + } + dhd_os_suppress_logging(dhd, FALSE); +#ifdef ENABLE_IPMCAST_FILTER + ipmcast_l2filter = 0; + ret = dhd_iovar(dhd, 0, "ipmcast_l2filter", + (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret)); + } +#endif /* ENABLE_IPMCAST_FILTER */ +#ifdef CUSTOM_EVENT_PM_WAKE + ret = dhd_iovar(dhd, 0, "const_awake_thresh", + (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", + __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef CONFIG_SILENT_ROAM + ret = dhd_sroam_set_mon(dhd, FALSE); + if (ret < 0) { + DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_SILENT_ROAM */ +#endif /* DHD_USE_EARLYSUSPEND */ + } + } + dhd_suspend_unlock(dhd); + + return 0; +} + +static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) +{ + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_OS_WAKE_LOCK(dhdp); + + /* Set flag when early suspend was called */ + dhdp->in_suspend = val; + if ((force || !dhdp->suspend_disable_flag) && + (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND))) + { + ret = dhd_set_suspend(val, dhdp); + } + + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +static void dhd_early_suspend(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd && (dhd->pub.conf->suspend_mode == EARLY_SUSPEND || + dhd->pub.conf->suspend_mode == SUSPEND_MODE_2)) { + dhd_suspend_resume_helper(dhd, 1, 0); + if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND) + dhd_conf_set_suspend_resume(&dhd->pub, 1); + } +} + +static void dhd_late_resume(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd && (dhd->pub.conf->suspend_mode == EARLY_SUSPEND || + dhd->pub.conf->suspend_mode == SUSPEND_MODE_2)) { + dhd_conf_set_suspend_resume(&dhd->pub, 0); + if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND) + dhd_suspend_resume_helper(dhd, 0, 0); + } +} +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +/* + * Generalized timeout mechanism. Uses spin sleep with exponential back-off until + * the sleep time reaches one jiffy, then switches over to task delay. Usage: + * + * dhd_timeout_start(&tmo, usec); + * while (!dhd_timeout_expired(&tmo)) + * if (poll_something()) + * break; + * if (dhd_timeout_expired(&tmo)) + * fatal(); + */ + +void +dhd_timeout_start(dhd_timeout_t *tmo, uint usec) +{ +#ifdef BCMQT + tmo->limit = usec * htclkratio; +#else + tmo->limit = usec; +#endif + tmo->increment = 0; + tmo->elapsed = 0; + tmo->tick = 10 * USEC_PER_MSEC; /* 10 msec */ +} + +int +dhd_timeout_expired(dhd_timeout_t *tmo) +{ + /* Does nothing the first call */ + if (tmo->increment == 0) { + tmo->increment = USEC_PER_MSEC; /* Start with 1 msec */ + return 0; + } + + if (tmo->elapsed >= tmo->limit) + return 1; + + DHD_INFO(("%s: CAN_SLEEP():%d tmo->increment=%ld msec\n", + __FUNCTION__, CAN_SLEEP(), tmo->increment / USEC_PER_MSEC)); + + CAN_SLEEP() ? OSL_SLEEP(tmo->increment / USEC_PER_MSEC) : OSL_DELAY(tmo->increment); + + /* Till tmo->tick, the delay will be in 2x, after that delay will be constant + * tmo->tick (10 msec), till timer elapses. + */ + tmo->increment = (tmo->increment >= tmo->tick) ? tmo->tick : (tmo->increment * 2); + + /* Add the delay that's about to take place */ +#ifdef BCMQT + tmo->elapsed += tmo->increment * htclkratio; +#else + tmo->elapsed += tmo->increment; +#endif + + return 0; +} + +int +dhd_net2idx(dhd_info_t *dhd, struct net_device *net) +{ + int i = 0; + + if (!dhd) { + DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__)); + return DHD_BAD_IF; + } + + while (i < DHD_MAX_IFS) { + if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net)) + return i; + i++; + } + + return DHD_BAD_IF; +} + +struct net_device * dhd_idx2net(void *pub, int ifidx) +{ + struct dhd_pub *dhd_pub = (struct dhd_pub *)pub; + struct dhd_info *dhd_info; + + if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS) + return NULL; + dhd_info = dhd_pub->info; + if (dhd_info && dhd_info->iflist[ifidx]) + return dhd_info->iflist[ifidx]->net; + return NULL; +} + +int +dhd_ifname2idx(dhd_info_t *dhd, char *name) +{ + int i = DHD_MAX_IFS; + + ASSERT(dhd); + + if (name == NULL || *name == '\0') + return 0; + + while (--i > 0) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ)) + break; + + DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); + + return i; /* default - the primary interface */ +} + +char * +dhd_ifname(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + ASSERT(dhd); + + if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx]->net) + return dhd->iflist[ifidx]->net->name; + + return ""; +} + +uint8 * +dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx) +{ + int i; + dhd_info_t *dhd = (dhd_info_t *)dhdp; + + ASSERT(dhd); + for (i = 0; i < DHD_MAX_IFS; i++) + if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx) + return dhd->iflist[i]->mac_addr; + + return NULL; +} + +static void +_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) +{ + struct net_device *dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *mclist; +#endif + uint32 allmulti, cnt; + + wl_ioctl_t ioc; + char *buf, *bufp; + uint buflen; + int ret; + +#ifdef MCAST_LIST_ACCUMULATION + int i; + uint32 cnt_iface[DHD_MAX_IFS]; + cnt = 0; + allmulti = 0; + + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + dev = dhd->iflist[i]->net; + if (!dev) + continue; + netif_addr_lock_bh(dev); + cnt_iface[i] = netdev_mc_count(dev); + cnt += cnt_iface[i]; + netif_addr_unlock_bh(dev); + + /* Determine initial value of allmulti flag */ + allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; + } + } +#else /* !MCAST_LIST_ACCUMULATION */ + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx)); + return; + } + dev = dhd->iflist[ifidx]->net; + if (!dev) + return; + netif_addr_lock_bh(dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt = netdev_mc_count(dev); +#else + cnt = dev->mc_count; +#endif /* LINUX_VERSION_CODE */ + + netif_addr_unlock_bh(dev); + + /* Determine initial value of allmulti flag */ + allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; +#endif /* MCAST_LIST_ACCUMULATION */ + +#ifdef PASS_ALL_MCAST_PKTS +#ifdef PKT_FILTER_SUPPORT + if (!dhd->pub.early_suspended) +#endif /* PKT_FILTER_SUPPORT */ + allmulti = TRUE; +#endif /* PASS_ALL_MCAST_PKTS */ + + /* Send down the multicast list first. */ + + /* XXX Not using MAXMULTILIST to avoid including wlc_pub.h; but + * maybe we should? (Or should that be in wlioctl.h instead?) + */ + + buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); + if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + return; + } + + strlcpy(bufp, "mcast_list", buflen); + bufp += strlen("mcast_list") + 1; + + cnt = htol32(cnt); + memcpy(bufp, &cnt, sizeof(cnt)); + bufp += sizeof(cnt); + +#ifdef MCAST_LIST_ACCUMULATION + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i)); + dev = dhd->iflist[i]->net; + + netif_addr_lock_bh(dev); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + netdev_for_each_mc_addr(ha, dev) { + GCC_DIAGNOSTIC_POP(); + if (!cnt_iface[i]) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + DHD_TRACE(("_dhd_set_multicast_list: cnt " + "%d " MACDBG "\n", + cnt_iface[i], MAC2STRDBG(ha->addr))); + cnt_iface[i]--; + } + netif_addr_unlock_bh(dev); + } + } +#else /* !MCAST_LIST_ACCUMULATION */ + netif_addr_lock_bh(dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + netdev_for_each_mc_addr(ha, dev) { + GCC_DIAGNOSTIC_POP(); + if (!cnt) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + cnt--; + } +#else + for (mclist = dev->mc_list; (mclist && (cnt > 0)); + cnt--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif /* LINUX_VERSION_CODE */ + netif_addr_unlock_bh(dev); +#endif /* MCAST_LIST_ACCUMULATION */ + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + allmulti = cnt ? TRUE : allmulti; + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + + allmulti = htol32(allmulti); + ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set allmulti %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } + + /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ + +#ifdef MCAST_LIST_ACCUMULATION + allmulti = 0; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + dev = dhd->iflist[i]->net; + allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE; + } + } +#else + allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; +#endif /* MCAST_LIST_ACCUMULATION */ + + allmulti = htol32(allmulti); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_PROMISC; + ioc.buf = &allmulti; + ioc.len = sizeof(allmulti); + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set promisc %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } +} + +int +_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr, bool skip_stop) +{ + int ret; + +#ifdef DHD_NOTIFY_MAC_CHANGED + if (skip_stop) { + WL_MSG(dhd_ifname(&dhd->pub, ifidx), "close dev for mac changing\n"); + dhd->pub.skip_dhd_stop = TRUE; + dev_close(dhd->iflist[ifidx]->net); + } +#endif /* DHD_NOTIFY_MAC_CHANGED */ + + ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr, + ETHER_ADDR_LEN, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set cur_etheraddr %pM failed ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), addr, ret)); + goto exit; + } else { + memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); + if (ifidx == 0) + memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); + WL_MSG(dhd_ifname(&dhd->pub, ifidx), "MACID %pM is overwritten\n", addr); + } + +exit: +#ifdef DHD_NOTIFY_MAC_CHANGED + if (skip_stop) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + dev_open(dhd->iflist[ifidx]->net, NULL); +#else + dev_open(dhd->iflist[ifidx]->net); +#endif + dhd->pub.skip_dhd_stop = FALSE; + WL_MSG(dhd_ifname(&dhd->pub, ifidx), "notify mac changed done\n"); + } +#endif /* DHD_NOTIFY_MAC_CHANGED */ + + return ret; +} + +int dhd_update_rand_mac_addr(dhd_pub_t *dhd) +{ + struct ether_addr mac_addr; + dhd_generate_rand_mac_addr(&mac_addr); + if (_dhd_set_mac_address(dhd->info, 0, mac_addr.octet, TRUE) != 0) { + DHD_ERROR(("randmac setting failed\n")); +#ifdef STA_RANDMAC_ENFORCED + return BCME_BADADDR; +#endif /* STA_RANDMAC_ENFORCED */ + } + return BCME_OK; +} + +#ifdef BCM_ROUTER_DHD +void dhd_update_dpsta_interface_for_sta(dhd_pub_t* dhdp, int ifidx, void* event_data) +{ + struct wl_dpsta_intf_event *dpsta_prim_event = (struct wl_dpsta_intf_event *)event_data; + dhd_if_t *ifp = dhdp->info->iflist[ifidx]; + + if (dpsta_prim_event->intf_type == WL_INTF_DWDS) { + ifp->primsta_dwds = TRUE; + } else { + ifp->primsta_dwds = FALSE; + } +} +#endif /* BCM_ROUTER_DHD */ + +#ifdef DHD_WMF +void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea, + void* event_data) +{ + struct wl_psta_primary_intf_event *psta_prim_event = + (struct wl_psta_primary_intf_event*)event_data; + dhd_sta_t *psta_interface = NULL; + dhd_sta_t *sta = NULL; + uint8 ifindex; + ASSERT(ifname); + ASSERT(psta_prim_event); + ASSERT(ea); + + ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname); + sta = dhd_find_sta(dhdp, ifindex, ea); + if (sta != NULL) { + psta_interface = dhd_find_sta(dhdp, ifindex, + (void *)(psta_prim_event->prim_ea.octet)); + if (psta_interface != NULL) { + sta->psta_prim = psta_interface; + } + } +} + +/* Get wmf_psta_disable configuration configuration */ +int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + return ifp->wmf_psta_disable; +} + +/* Set wmf_psta_disable configuration configuration */ +int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + ifp->wmf_psta_disable = val; + return 0; +} +#endif /* DHD_WMF */ + +#ifdef DHD_PSTA +/* Get psta/psr configuration configuration */ +int dhd_get_psta_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->psta_mode; +} +/* Set psta/psr configuration configuration */ +int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->psta_mode = val; + return 0; +} +#endif /* DHD_PSTA */ + +#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) +static void +dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if ( +#ifdef DHD_L2_FILTER + (ifp->block_ping) || +#endif +#ifdef DHD_WET + (dhd->wet_mode) || +#endif +#ifdef DHD_MCAST_REGEN + (ifp->mcast_regen_bss_enable) || +#endif + FALSE) { + ifp->rx_pkt_chainable = FALSE; + } +} +#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ + +#ifdef DHD_WET +/* Get wet configuration configuration */ +int dhd_get_wet_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->wet_mode; +} + +/* Set wet configuration configuration */ +int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->wet_mode = val; + dhd_update_rx_pkt_chainable_state(dhdp, 0); + return 0; +} +#endif /* DHD_WET */ + +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +int32 dhd_role_to_nl80211_iftype(int32 role) +{ + switch (role) { + case WLC_E_IF_ROLE_STA: + return NL80211_IFTYPE_STATION; + case WLC_E_IF_ROLE_AP: + return NL80211_IFTYPE_AP; + case WLC_E_IF_ROLE_WDS: + return NL80211_IFTYPE_WDS; + case WLC_E_IF_ROLE_P2P_GO: + return NL80211_IFTYPE_P2P_GO; + case WLC_E_IF_ROLE_P2P_CLIENT: + return NL80211_IFTYPE_P2P_CLIENT; + case WLC_E_IF_ROLE_IBSS: + case WLC_E_IF_ROLE_NAN: + return NL80211_IFTYPE_ADHOC; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } +} +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +static void +dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_event_t *if_event = event_info; + int ifidx, bssidx; + int ret = 0; +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + struct wl_if_event_info info; +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + struct net_device *ndev = NULL; +#endif +#else + struct net_device *ndev; +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#ifdef DHD_AWDL + bool is_awdl_iface = FALSE; +#endif /* DHD_AWDL */ + + BCM_REFERENCE(ret); + if (event != DHD_WQ_WORK_IF_ADD) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + bssidx = if_event->event.bssidx; + DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx)); + +#ifdef DHD_AWDL + if (if_event->event.opcode == WLC_E_IF_ADD && + if_event->event.role == WLC_E_IF_ROLE_AWDL) { + dhd->pub.awdl_ifidx = ifidx; + is_awdl_iface = TRUE; + } +#endif /* DHD_AWDL */ + +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (if_event->event.ifidx > 0) { + u8 *mac_addr; + bzero(&info, sizeof(info)); + info.ifidx = ifidx; + info.bssidx = bssidx; + info.role = if_event->event.role; + strlcpy(info.name, if_event->name, sizeof(info.name)); + if (is_valid_ether_addr(if_event->mac)) { + mac_addr = if_event->mac; + } else { + mac_addr = NULL; + } + +#ifdef WLEASYMESH + if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net, + &info, mac_addr, if_event->name, true)) == NULL) +#else + if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net, + &info, mac_addr, NULL, true) == NULL) +#endif + { + /* Do the post interface create ops */ + DHD_ERROR(("Post ifcreate ops failed. Returning \n")); + ret = BCME_ERROR; + goto done; + } + } +#else + /* This path is for non-android case */ + /* The interface name in host and in event msg are same */ + /* if name in event msg is used to create dongle if list on host */ + ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name, + if_event->mac, bssidx, TRUE, if_event->name); + if (!ndev) { + DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__)); + ret = BCME_NOMEM; + goto done; + } + + ret = dhd_register_if(&dhd->pub, ifidx, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + goto done; + } + +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +#ifndef PCIE_FULL_DONGLE + /* Turn on AP isolation in the firmware for interfaces operating in AP mode */ + if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) { + uint32 var_int = 1; + ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int), + NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + } + } +#endif /* PCIE_FULL_DONGLE */ + +done: +#ifdef DHD_AWDL + if (ret != BCME_OK && is_awdl_iface) { + dhd->pub.awdl_ifidx = 0; + } +#endif /* DHD_AWDL */ + + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + if (dhd->pub.info->iflist[ifidx]) { + dhd_bridge_dev_set(dhd, ifidx, ndev); + } +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx; + dhd_if_event_t *if_event = event_info; + + if (event != DHD_WQ_WORK_IF_DEL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + DHD_TRACE(("Removing interface with idx %d\n", ifidx)); +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + if (dhd->pub.info->iflist[ifidx]) { + dhd_bridge_dev_set(dhd, ifidx, NULL); + } +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ + + if (!dhd->pub.info->iflist[ifidx]) { + /* No matching netdev found */ + DHD_ERROR(("Netdev not found! Do nothing.\n")); + goto done; + } +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (if_event->event.ifidx > 0) { + /* Do the post interface del ops */ + if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, + true, if_event->event.ifidx) != 0) { + DHD_TRACE(("Post ifdel ops failed. Returning \n")); + goto done; + } + } +#else + /* For non-cfg80211 drivers */ + dhd_remove_if(&dhd->pub, ifidx, TRUE); +#ifdef DHD_AWDL + if (if_event->event.opcode == WLC_E_IF_DEL && + if_event->event.role == WLC_E_IF_ROLE_AWDL) { + dhd->pub.awdl_ifidx = 0; + } +#endif /* DHD_AWDL */ + +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +done: + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +#ifdef DHD_UPDATE_INTF_MAC +static void +dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx; + dhd_if_event_t *if_event = event_info; + + if (event != DHD_WQ_WORK_IF_UPDATE) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx)); + + dhd_op_if_update(&dhd->pub, ifidx); + + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx) +{ + dhd_info_t * dhdinfo = NULL; + dhd_if_t * ifp = NULL; + int ret = 0; + char buf[128]; + + if ((NULL==dhdpub)||(NULL==dhdpub->info)) { + DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__)); + return -1; + } else { + dhdinfo = (dhd_info_t *)dhdpub->info; + ifp = dhdinfo->iflist[ifidx]; + if (NULL==ifp) { + DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__)); + return -2; + } + } + + DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx)); + // Get MAC address + strcpy(buf, "cur_etheraddr"); + ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx); + if (0>ret) { + DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret)); + // avoid collision + dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1; + // force locally administrate address + ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr); + } else { + DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n", + ifp->name, ifp->idx, + (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2], + (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5])); + memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN); + if (dhdinfo->iflist[ifp->idx]->net) { + memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN); + } + } + + return ret; +} +#endif /* DHD_UPDATE_INTF_MAC */ + +static void +dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_t *ifp = event_info; + + if (event != DHD_WQ_WORK_SET_MAC) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + // terence 20160907: fix for not able to set mac when wlan0 is down + if (ifp == NULL || !ifp->set_macaddress) { + goto done; + } + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + ifp->set_macaddress = FALSE; + +#ifdef DHD_NOTIFY_MAC_CHANGED + rtnl_lock(); +#endif /* DHD_NOTIFY_MAC_CHANGED */ + + if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr, TRUE) == 0) + DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); + else + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + +#ifdef DHD_NOTIFY_MAC_CHANGED + rtnl_unlock(); +#endif /* DHD_NOTIFY_MAC_CHANGED */ + +done: + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx = (int)((long int)event_info); + dhd_if_t *ifp = NULL; + + if (event != DHD_WQ_WORK_SET_MCAST_LIST) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + ifp = dhd->iflist[ifidx]; + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + ifidx = ifp->idx; + +#ifdef MCAST_LIST_ACCUMULATION + ifidx = 0; +#endif /* MCAST_LIST_ACCUMULATION */ + + _dhd_set_multicast_list(dhd, ifidx); + DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx)); + +done: + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static int +dhd_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + dhd_if_t *dhdif; +#ifdef WL_STATIC_IF + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif /* WL_STATIC_IF */ + dhd_pub_t *dhdp = &dhd->pub; + + BCM_REFERENCE(ifidx); + + DHD_TRACE(("%s \n", __func__)); + + dhdif = dhd_get_ifp_by_ndev(dhdp, dev); + if (!dhdif) { + return -ENODEV; + } + ifidx = dhdif->idx; + dhd_net_if_lock_local(dhd); + memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN); + dhdif->set_macaddress = TRUE; + dhd_net_if_unlock_local(dhd); + + WL_MSG(dev->name, "macaddr = %pM\n", dhdif->mac_addr); +#ifdef WL_CFG80211 + /* Check wdev->iftype for the role */ + if (wl_cfg80211_macaddr_sync_reqd(dev)) { + /* Supplicant and certain user layer applications expect macaddress to be + * set once the context returns. so set it from the same context + */ +#ifdef WL_STATIC_IF + if (wl_cfg80211_static_if(cfg, dev) && !(dev->flags & IFF_UP)) { + /* In softap case, the macaddress will be applied before interface up + * and hence curether_addr can't be done at this stage (no fw iface + * available). Store the address and return. macaddr will be applied + * from interface create context. + */ + (void)memcpy_s(dev->dev_addr, ETH_ALEN, dhdif->mac_addr, ETH_ALEN); +#ifdef DHD_NOTIFY_MAC_CHANGED +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + dev_open(dev, NULL); +#else + dev_open(dev); +#endif +#endif /* DHD_NOTIFY_MAC_CHANGED */ + return ret; + } +#endif /* WL_STATIC_IF */ + wl_cfg80211_handle_macaddr_change(dev, dhdif->mac_addr); + return _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr, TRUE); + } +#endif /* WL_CFG80211 */ + + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC, + dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW); + return ret; +} + +static void +dhd_set_multicast_list(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return; + + dhd->iflist[ifidx]->set_multicast = TRUE; + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx), + DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW); + + // terence 20160907: fix for not able to set mac when wlan0 is down + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], + DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW); +} + +#ifdef DHD_UCODE_DOWNLOAD +/* Get ucode path */ +char * +dhd_get_ucode_path(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return dhd->uc_path; +} +#endif /* DHD_UCODE_DOWNLOAD */ + +#ifdef PROP_TXSTATUS +int +dhd_os_wlfc_block(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + ASSERT(di != NULL); + /* terence 20161229: don't do spin lock if proptx not enabled */ + if (disable_proptx) + return 1; +#ifdef BCMDBUS + spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags); +#else + spin_lock_bh(&di->wlfc_spinlock); +#endif /* BCMDBUS */ + return 1; +} + +int +dhd_os_wlfc_unblock(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + + ASSERT(di != NULL); + /* terence 20161229: don't do spin lock if proptx not enabled */ + if (disable_proptx) + return 1; +#ifdef BCMDBUS + spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags); +#else + spin_unlock_bh(&di->wlfc_spinlock); +#endif /* BCMDBUS */ + return 1; +} + +#endif /* PROP_TXSTATUS */ + +#if defined(WL_MONITOR) && defined(BCMSDIO) +static void +dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx); +bool +dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx); +#endif /* WL_MONITOR && BCMSDIO */ + +/* This routine do not support Packet chain feature, Currently tested for + * proxy arp feature + */ +int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) +{ + struct sk_buff *skb; + void *skbhead = NULL; + void *skbprev = NULL; + dhd_if_t *ifp; + ASSERT(!PKTISCHAINED(p)); + skb = PKTTONATIVE(dhdp->osh, p); + + ifp = dhdp->info->iflist[ifidx]; + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx(skb); + } else { + if (dhdp->info->rxthread_enabled) { + if (!skbhead) { + skbhead = skb; + } else { + PKTSETNEXT(dhdp->osh, skbprev, skb); + } + skbprev = skb; + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if defined(WL_MONITOR) && defined(BCMSDIO) + if (dhd_monitor_enabled(dhdp, ifidx)) + dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx); + else +#endif /* WL_MONITOR && BCMSDIO */ + netif_rx_ni(skb); + } + } + + if (dhdp->info->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + return BCME_OK; +} + +void +dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata, uint32 pktid, + uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake, bool pkt_log) +{ + struct ether_header *eh; + uint16 ether_type; + uint32 pkthash; + uint8 pkt_type = PKT_TYPE_DATA; + + if (!pktdata || pktlen < ETHER_HDR_LEN) { + return; + } + + eh = (struct ether_header *)pktdata; + ether_type = ntoh16(eh->ether_type); + + /* Check packet type */ + if (dhd_check_ip_prot(pktdata, ether_type)) { + if (dhd_check_dhcp(pktdata)) { + pkt_type = PKT_TYPE_DHCP; + } else if (dhd_check_icmp(pktdata)) { + pkt_type = PKT_TYPE_ICMP; + } else if (dhd_check_dns(pktdata)) { + pkt_type = PKT_TYPE_DNS; + } + } + else if (dhd_check_arp(pktdata, ether_type)) { + pkt_type = PKT_TYPE_ARP; + } + else if (ether_type == ETHER_TYPE_802_1X) { + pkt_type = PKT_TYPE_EAP; + } + +#ifdef DHD_SBN + /* Set UDR based on packet type */ + if (dhd_udr && (pkt_type == PKT_TYPE_DHCP || + pkt_type == PKT_TYPE_DNS || + pkt_type == PKT_TYPE_ARP)) { + *dhd_udr = TRUE; + } +#endif /* DHD_SBN */ + +#ifdef DHD_PKT_LOGGING +#ifdef DHD_SKIP_PKTLOGGING_FOR_DATA_PKTS + if (pkt_type != PKT_TYPE_DATA) +#endif + { + if (pkt_log) { + if (tx) { + if (pktfate) { + /* Tx status */ + DHD_PKTLOG_TXS(dhdp, pkt, pktdata, pktid, *pktfate); + } else { + /* Tx packet */ + DHD_PKTLOG_TX(dhdp, pkt, pktdata, pktid); + } + pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + } else { + struct sk_buff *skb = (struct sk_buff *)pkt; + if (pkt_wake) { + DHD_PKTLOG_WAKERX(dhdp, skb, pktdata); + } else { + DHD_PKTLOG_RX(dhdp, skb, pktdata); + } + } + } + } +#endif /* DHD_PKT_LOGGING */ + + /* Dump packet data */ + if (!tx) { + switch (pkt_type) { + case PKT_TYPE_DHCP: + dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate); + break; + case PKT_TYPE_ICMP: + dhd_icmp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate); + break; + case PKT_TYPE_DNS: + dhd_dns_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate); + break; + case PKT_TYPE_ARP: + dhd_arp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate); + break; + case PKT_TYPE_EAP: + dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen, tx, &pkthash, pktfate); + break; + default: + break; + } + } +} + +int +BCMFASTPATH(__dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = BCME_OK; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh = NULL; + uint8 pkt_flow_prio; + +#if (defined(DHD_L2_FILTER) || (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))) + dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); +#endif /* DHD_L2_FILTER || (BCM_ROUTER_DHD && QOS_MAP_SET) */ + + /* Reject if down */ + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + /* free the packet here since the caller won't */ + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return NETDEV_TX_BUSY; + } +#endif /* PCIE_FULL_DONGLE */ + + /* Reject if pktlen > MAX_MTU_SZ */ + if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) { + /* free the packet here since the caller won't */ + dhdp->tx_big_packets++; + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + +#ifdef DHD_L2_FILTER + /* if dhcp_unicast is enabled, we need to convert the */ + /* broadcast DHCP ACK/REPLY packets to Unicast. */ + if (ifp->dhcp_unicast) { + uint8* mac_addr; + uint8* ehptr = NULL; + int ret; + ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr); + if (ret == BCME_OK) { + /* if given mac address having valid entry in sta list + * copy the given mac address, and return with BCME_OK + */ + if (dhd_find_sta(dhdp, ifidx, mac_addr)) { + ehptr = PKTDATA(dhdp->osh, pktbuf); + bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + } + } + + if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } + + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } +#endif /* DHD_L2_FILTER */ + /* Update multicast statistic */ + if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *)pktdata; + + if (ETHER_ISMULTI(eh->ether_dhost)) + dhdp->tx_multicast++; + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) { +#ifdef DHD_LOSSLESS_ROAMING + uint8 prio = (uint8)PKTPRIO(pktbuf); + + /* back up 802.1x's priority */ + dhdp->prio_8021x = prio; +#endif /* DHD_LOSSLESS_ROAMING */ + DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED); + atomic_inc(&dhd->pend_8021x_cnt); +#if defined(WL_CFG80211) && defined (WL_WPS_SYNC) + wl_handle_wps_states(dhd_idx2net(dhdp, ifidx), + pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE); +#endif /* WL_CFG80211 && WL_WPS_SYNC */ +#ifdef EAPOL_RESEND + wl_ext_backup_eapol_txpkt(dhdp, ifidx, pktbuf); +#endif /* EAPOL_RESEND */ + } + dhd_dump_pkt(dhdp, ifidx, pktdata, + (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL); + } else { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + if (ifp->qosmap_up_table_enable) { + pktsetprio_qms(pktbuf, ifp->qosmap_up_table, FALSE); + } + else +#endif + { + /* Look into the packet and update the packet priority */ +#ifndef PKTPRIO_OVERRIDE + /* XXX RB:6270 Ignore skb->priority from TCP/IP stack */ + if (PKTPRIO(pktbuf) == 0) +#endif /* !PKTPRIO_OVERRIDE */ + { +#if (!defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE); +#else + /* For LLR, pkt prio will be changed to 7(NC) here */ + pktsetprio(pktbuf, FALSE); +#endif /* QOS_MAP_SET */ + } +#ifndef PKTPRIO_OVERRIDE + else { + /* Some protocols like OZMO use priority values from 256..263. + * these are magic values to indicate a specific 802.1d priority. + * make sure that priority field is in range of 0..7 + */ + PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7); + } +#endif /* !PKTPRIO_OVERRIDE */ + } + +#if defined(BCM_ROUTER_DHD) + traffic_mgmt_pkt_set_prio(dhdp, pktbuf); + +#endif /* BCM_ROUTER_DHD */ + + BCM_REFERENCE(pkt_flow_prio); + /* Intercept and create Socket level statistics */ + /* + * TODO: Some how moving this code block above the pktsetprio code + * is resetting the priority back to 0, but this does not happen for + * packets generated from iperf uisng -S option. Can't understand why. + */ + dhd_update_sock_flows(dhd, pktbuf); + +#ifdef SUPPORT_SET_TID + dhd_set_tid_based_on_uid(dhdp, pktbuf); +#endif /* SUPPORT_SET_TID */ + +#ifdef PCIE_FULL_DONGLE + /* + * Lkup the per interface hash table, for a matching flowring. If one is not + * available, allocate a unique flowid and add a flowring entry. + * The found or newly created flowid is placed into the pktbuf's tag. + */ + +#ifdef DHD_TX_PROFILE + if (dhdp->tx_profile_enab && dhdp->num_profiles > 0 && + dhd_protocol_matches_profile(PKTDATA(dhdp->osh, pktbuf), + PKTLEN(dhdp->osh, pktbuf), dhdp->protocol_filters, + dhdp->host_sfhllc_supported)) { + /* we only have support for one tx_profile at the moment */ + + /* tagged packets must be put into TID 6 */ + pkt_flow_prio = PRIO_8021D_VO; + } else +#endif /* defined(DHD_TX_PROFILE) */ + { + pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))]; + } + + ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf); + if (ret != BCME_OK) { + PKTCFREE(dhd->pub.osh, pktbuf, TRUE); + return ret; + } +#endif /* PCIE_FULL_DONGLE */ + /* terence 20150901: Micky add to ajust the 802.1X priority */ + /* Set the 802.1X packet with the highest priority 7 */ + if (dhdp->conf->pktprio8021x >= 0) + pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x); + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_supported(dhdp)) { + /* store the interface ID */ + DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx); + + /* store destination MAC in the tag as well */ + DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost); + + /* decide which FIFO this packet belongs to */ + if (ETHER_ISMULTI(eh->ether_dhost)) + /* one additional queue index (highest AC + 1) is used for bc/mc queue */ + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT); + else + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); + } else +#endif /* PROP_TXSTATUS */ + { + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + } + + /* Use bus module to send data frame */ +#ifdef PROP_TXSTATUS + { + if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata, + dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) { + /* non-proptxstatus way */ +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ + } + } +#else +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ +#endif /* PROP_TXSTATUS */ +#ifdef BCMDBUS + if (ret) + PKTCFREE(dhdp->osh, pktbuf, TRUE); +#endif /* BCMDBUS */ + + return ret; +} + +int +BCMFASTPATH(dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = 0; + unsigned long flags; + dhd_if_t *ifp; + + DHD_GENERAL_LOCK(dhdp, flags); + ifp = dhd_get_ifp(dhdp, ifidx); + if (!ifp || ifp->del_in_progress) { + DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n", + __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhdp->busstate)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT); + DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + ret = -EBUSY; + goto exit; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp); + DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT); + dhd_os_tx_completion_wake(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + + ret = __dhd_sendpkt(dhdp, ifidx, pktbuf); + +#ifdef DHD_PCIE_RUNTIMEPM +exit: +#endif + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp); + DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT); + dhd_os_tx_completion_wake(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + return ret; +} + +#ifdef DHD_MQ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +static uint16 +BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +#else +static uint16 +BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb) +#endif /* LINUX_VERSION_CODE */ +{ + dhd_info_t *dhd_info = DHD_DEV_INFO(net); + dhd_pub_t *dhdp = &dhd_info->pub; + uint16 prio = 0; + + BCM_REFERENCE(dhd_info); + BCM_REFERENCE(dhdp); + BCM_REFERENCE(prio); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + if (mq_select_disable) { + /* if driver side queue selection is disabled via sysfs, call the kernel + * supplied fallback function to select the queue, which is usually + * '__netdev_pick_tx()' in net/core/dev.c + */ + return fallback(net, skb); + } +#endif /* LINUX_VERSION */ + + prio = dhdp->flow_prio_map[skb->priority]; + if (prio < AC_COUNT) + return prio; + else + return AC_BK; +} +#endif /* DHD_MQ */ + +netdev_tx_t +BCMFASTPATH(dhd_start_xmit)(struct sk_buff *skb, struct net_device *net) +{ + int ret; + uint datalen; + void *pktbuf; + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp = NULL; + int ifidx; + unsigned long flags; +#if !defined(BCM_ROUTER_DHD) + uint8 htsfdlystat_sz = 0; +#endif /* ! BCM_ROUTER_DHD */ +#ifdef DHD_WMF + struct ether_header *eh; + uint8 *iph; +#endif /* DHD_WMF */ +#if defined(DHD_MQ) && defined(DHD_MQ_STATS) + int qidx = 0; + int cpuid = 0; + int prio = 0; +#endif /* DHD_MQ && DHD_MQ_STATS */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#if defined(DHD_MQ) && defined(DHD_MQ_STATS) + qidx = skb_get_queue_mapping(skb); + /* if in a non pre-emptable context, smp_processor_id can be used + * else get_cpu and put_cpu should be used + */ + if (!CAN_SLEEP()) { + cpuid = smp_processor_id(); + } + else { + cpuid = get_cpu(); + put_cpu(); + } + prio = dhd->pub.flow_prio_map[skb->priority]; + DHD_TRACE(("%s: Q idx = %d, CPU = %d, prio = %d \n", __FUNCTION__, + qidx, cpuid, prio)); + dhd->pktcnt_qac_histo[qidx][prio]++; + dhd->pktcnt_per_ac[prio]++; + dhd->cpu_qstats[qidx][cpuid]++; +#endif /* DHD_MQ && DHD_MQ_STATS */ + + if (dhd_query_bus_erros(&dhd->pub)) { + return -ENODEV; + } + + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_SET_IN_TX(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) { + /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */ + /* stop the network queue temporarily until resume done */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + if (!dhdpcie_is_resume_done(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + return NETDEV_TX_BUSY; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); +#ifdef BCMPCIE + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); +#ifdef PCIE_FULL_DONGLE + /* Stop tx queues if suspend is in progress */ + if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } +#endif /* PCIE_FULL_DONGLE */ + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + return NETDEV_TX_BUSY; + } +#else + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state)); + } +#endif + + DHD_OS_WAKE_LOCK(&dhd->pub); + +#if defined(DHD_HANG_SEND_UP_TEST) + if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) { + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + dhd->pub.busstate = DHD_BUS_DOWN; + } +#endif /* DHD_HANG_SEND_UP_TEST */ + + /* Reject if down */ + /* XXX kernel panic issue when first bootup time, + * rmmod without interface down make unnecessary hang event. + */ + if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", + __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); + dhd_tx_stop_queues(net); +#if defined(OEM_ANDROID) + /* Send Event when bus down detected during data session */ + if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) { + DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); + dhd->pub.hang_reason = HANG_REASON_BUS_DOWN; + net_os_send_hang_message(net); + } +#endif /* OEM_ANDROID */ + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return NETDEV_TX_BUSY; + } + + ifp = DHD_DEV_IFP(net); + ifidx = DHD_DEV_IFIDX(net); +#ifdef DHD_BUZZZ_LOG_ENABLED + BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb); +#endif /* DHD_BUZZZ_LOG_ENABLED */ + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); + dhd_tx_stop_queues(net); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return NETDEV_TX_BUSY; + } + + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + /* If tput test is in progress */ + if (dhd->pub.tput_data.tput_test_running) { + return NETDEV_TX_BUSY; + } + + ASSERT(ifidx == dhd_net2idx(dhd, net)); + ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx]))); + + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + + /* re-align socket buffer if "skb->data" is odd address */ + if (((unsigned long)(skb->data)) & 0x1) { + unsigned char *data = skb->data; + uint32 length = skb->len; + PKTPUSH(dhd->pub.osh, skb, 1); + memmove(skb->data, data, length); + PKTSETLEN(dhd->pub.osh, skb, length); + } + + datalen = PKTLEN(dhd->pub.osh, skb); + +#ifdef TPUT_MONITOR + if (dhd->pub.conf->tput_monitor_ms) { + dhd_os_sdlock_txq(&dhd->pub); + dhd->pub.conf->net_len += datalen; + dhd_os_sdunlock_txq(&dhd->pub); + if ((dhd->pub.conf->data_drop_mode == XMIT_DROP) && + (PKTLEN(dhd->pub.osh, skb) > 500)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + } +#endif + /* Make sure there's enough room for any header */ +#if !defined(BCM_ROUTER_DHD) + if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + dhd->pub.tx_realloc++; + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); + + dev_kfree_skb(skb); + if ((skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + ret = -ENOMEM; + goto done; + } + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + } +#endif /* !BCM_ROUTER_DHD */ + + /* move from dhdsdio_sendfromq(), try to orphan skb early */ + if (dhd->pub.conf->orphan_move == 2) + PKTORPHAN(skb, dhd->pub.conf->tsq); + else if (dhd->pub.conf->orphan_move == 3) + skb_orphan(skb); + + /* Convert to packet */ + if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { + DHD_ERROR(("%s: PKTFRMNATIVE failed\n", + dhd_ifname(&dhd->pub, ifidx))); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto done; + } + +#ifdef DHD_WET + /* wet related packet proto manipulation should be done in DHD + since dongle doesn't have complete payload + */ + if (WET_ENABLED(&dhd->pub) && + (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) { + DHD_INFO(("%s:%s: wet send proc failed\n", + __FUNCTION__, dhd_ifname(&dhd->pub, ifidx))); + PKTFREE(dhd->pub.osh, pktbuf, FALSE); + ret = -EFAULT; + goto done; + } +#endif /* DHD_WET */ + +#ifdef DHD_WMF + eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf); + iph = (uint8 *)eh + ETHER_HDR_LEN; + + /* WMF processing for multicast packets + * Only IPv4 packets are handled + */ + if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) && + (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) || + ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) { +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) + void *sdu_clone; + bool ucast_convert = FALSE; +#ifdef DHD_UCAST_UPNP + uint32 dest_ip; + + dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip); +#endif /* DHD_UCAST_UPNP */ +#ifdef DHD_IGMP_UCQUERY + ucast_convert |= dhd->pub.wmf_ucast_igmp_query && + (IPV4_PROT(iph) == IP_PROT_IGMP) && + (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY); +#endif /* DHD_IGMP_UCQUERY */ + if (ucast_convert) { + dhd_sta_t *sta; + unsigned long flags; + struct list_head snapshot_list; + struct list_head *wmf_ucforward_list; + + ret = NETDEV_TX_OK; + + /* For non BCM_GMAC3 platform we need a snapshot sta_list to + * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue. + */ + wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + /* Convert upnp/igmp query to unicast for each assoc STA */ + list_for_each_entry(sta, wmf_ucforward_list, list) { + GCC_DIAGNOSTIC_POP(); + /* Skip sending to proxy interfaces of proxySTA */ + if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) { + continue; + } + if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) { + ret = WMF_NOP; + break; + } + dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1); + } + DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list); + + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + if (ret == NETDEV_TX_OK) + PKTFREE(dhd->pub.osh, pktbuf, TRUE); + + return ret; + } else +#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */ + { + /* There will be no STA info if the packet is coming from LAN host + * Pass as NULL + */ + ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0); + switch (ret) { + case WMF_TAKEN: + case WMF_DROP: + /* Either taken by WMF or we should drop it. + * Exiting send path + */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return NETDEV_TX_OK; + default: + /* Continue the transmit path */ + break; + } + } + } +#endif /* DHD_WMF */ +#ifdef DHD_PSTA + /* PSR related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (PSR_ENABLED(&dhd->pub) && +#ifdef BCM_ROUTER_DHD + !(ifp->primsta_dwds) && +#endif /* BCM_ROUTER_DHD */ + (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) { + + DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__, + dhd_ifname(&dhd->pub, ifidx))); + } +#endif /* DHD_PSTA */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_PACING_SHIFT) +#ifndef DHD_DEFAULT_TCP_PACING_SHIFT +#define DHD_DEFAULT_TCP_PACING_SHIFT 7 +#endif /* DHD_DEFAULT_TCP_PACING_SHIFT */ + if (skb->sk) { + sk_pacing_shift_update(skb->sk, DHD_DEFAULT_TCP_PACING_SHIFT); + } +#endif /* LINUX_VERSION_CODE >= 4.19.0 && DHD_TCP_PACING_SHIFT */ + +#ifdef DHDTCPSYNC_FLOOD_BLK + if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) { + ifp->tsyncack_txed ++; + } +#endif /* DHDTCPSYNC_FLOOD_BLK */ + +#ifdef DHDTCPACK_SUPPRESS + if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) { + /* If this packet has been hold or got freed, just return */ + if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) { + ret = 0; + goto done; + } + } else { + /* If this packet has replaced another packet and got freed, just return */ + if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) { + ret = 0; + goto done; + } + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* + * If Load Balance is enabled queue the packet + * else send directly from here. + */ +#if defined(DHD_LB_TXP) + ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf); +#else + ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf); +#endif + +done: + /* XXX Bus modules may have different "native" error spaces? */ + /* XXX USB is native linux and it'd be nice to retain errno */ + /* XXX meaning, but SDIO is not so we'd need an OSL_ERROR. */ + if (ret) { + ifp->stats.tx_dropped++; + dhd->pub.tx_dropped++; + } else { +#ifdef PROP_TXSTATUS + /* tx_packets counter can counted only when wlfc is disabled */ + if (!dhd_wlfc_is_supported(&dhd->pub)) +#endif + { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } + dhd->pub.actual_tx_pkts++; + } + + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT); + dhd_os_tx_completion_wake(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#ifdef DHD_BUZZZ_LOG_ENABLED + BUZZZ_LOG(START_XMIT_END, 0); +#endif /* DHD_BUZZZ_LOG_ENABLED */ + /* Return ok: we always eat the packet */ + return NETDEV_TX_OK; +} + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +void dhd_rx_wq_wakeup(struct work_struct *ptr) +{ + struct dhd_rx_tx_work *work; + struct dhd_pub * pub; + + work = container_of(ptr, struct dhd_rx_tx_work, work); + + pub = work->pub; + + DHD_RPM(("%s: ENTER. \n", __FUNCTION__)); + + if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) { + return; + } + + DHD_OS_WAKE_LOCK(pub); + if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) { + + // do nothing but wakeup the bus. + pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus)); + } + DHD_OS_WAKE_UNLOCK(pub); + kfree(work); +} + +void dhd_start_xmit_wq_adapter(struct work_struct *ptr) +{ + struct dhd_rx_tx_work *work; + netdev_tx_t ret; + dhd_info_t *dhd; + struct dhd_bus * bus; + + work = container_of(ptr, struct dhd_rx_tx_work, work); + + dhd = DHD_DEV_INFO(work->net); + + bus = dhd->pub.bus; + + if (atomic_read(&dhd->pub.block_bus)) { + kfree_skb(work->skb); + kfree(work); + dhd_netif_start_queue(bus); + return; + } + + if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) { + ret = dhd_start_xmit(work->skb, work->net); + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); + } + kfree(work); + dhd_netif_start_queue(bus); + + if (ret) + netdev_err(work->net, + "error: dhd_start_xmit():%d\n", ret); +} + +netdev_tx_t +BCMFASTPATH(dhd_start_xmit_wrapper)(struct sk_buff *skb, struct net_device *net) +{ + struct dhd_rx_tx_work *start_xmit_work; + netdev_tx_t ret; + dhd_info_t *dhd = DHD_DEV_INFO(net); + + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__)); + + dhd_netif_stop_queue(dhd->pub.bus); + + start_xmit_work = (struct dhd_rx_tx_work*) + kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC); + + if (!start_xmit_work) { + netdev_err(net, + "error: failed to alloc start_xmit_work\n"); + ret = -ENOMEM; + goto exit; + } + + INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter); + start_xmit_work->skb = skb; + start_xmit_work->net = net; + queue_work(dhd->tx_wq, &start_xmit_work->work); + ret = NET_XMIT_SUCCESS; + + } else if (dhd->pub.busstate == DHD_BUS_DATA) { + ret = dhd_start_xmit(skb, net); + } else { + /* when bus is down */ + ret = -ENODEV; + } + +exit: + return ret; +} +void +dhd_bus_wakeup_work(dhd_pub_t *dhdp) +{ + struct dhd_rx_tx_work *rx_work; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC); + if (!rx_work) { + DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__)); + return; + } + + INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup); + rx_work->pub = dhdp; + queue_work(dhd->rx_wq, &rx_work->work); + +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +static void +__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state) +{ + if (state == ON) { + if (!netif_queue_stopped(net)) { + DHD_INFO(("%s: Stop Netif Queue\n", __FUNCTION__)); + netif_stop_queue(net); + } else { + DHD_INFO(("%s: Netif Queue already stopped\n", __FUNCTION__)); + } + } + + if (state == OFF) { + if (netif_queue_stopped(net)) { + DHD_INFO(("%s: Start Netif Queue\n", __FUNCTION__)); + netif_wake_queue(net); + } else { + DHD_INFO(("%s: Netif Queue already started\n", __FUNCTION__)); + } + } +} + +void +dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) +{ + struct net_device *net; + dhd_info_t *dhd = dhdp->info; + unsigned long flags; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(dhd); + +#ifdef DHD_LOSSLESS_ROAMING + /* block flowcontrol during roaming */ + if ((dhdp->dequeue_prec_map == (1 << dhdp->flow_prio_map[PRIO_8021D_NC])) && (state == ON)) + { + DHD_ERROR_RLMT(("%s: Roaming in progress, cannot stop network queue (0x%x:%d)\n", + __FUNCTION__, dhdp->dequeue_prec_map, dhdp->flow_prio_map[PRIO_8021D_NC])); + return; + } +#endif + + flags = dhd_os_sdlock_txoff(&dhd->pub); + if (ifidx == ALL_INTERFACES) { + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + net = dhd->iflist[i]->net; + __dhd_txflowcontrol(dhdp, net, state); + } + } + } else { + if (dhd->iflist[ifidx]) { + net = dhd->iflist[ifidx]->net; + __dhd_txflowcontrol(dhdp, net, state); + } + } + dhdp->txoff = state; + dhd_os_sdunlock_txoff(&dhd->pub, flags); +} + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + +/* Dump CTF stats */ +void +dhd_ctf_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd = dhdp->info; + + bcm_bprintf(strbuf, "CTF stats:\n"); + ctf_dump(dhd->cih, strbuf); +} + +bool +BCMFASTPATH(dhd_rx_pkt_chainable)(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp = dhd->iflist[ifidx]; + + return ifp->rx_pkt_chainable; +} + +/* Returns FALSE if block ping is enabled */ +bool +BCMFASTPATH(dhd_l2_filter_chainable)(dhd_pub_t *dhdp, uint8 *eh, int ifidx) +{ +#ifdef DHD_L2_FILTER + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp = dhd->iflist[ifidx]; + ASSERT(ifp != NULL); + return ifp->block_ping ? FALSE : TRUE; +#else + return TRUE; +#endif /* DHD_L2_FILTER */ +} +/* Returns FALSE if WET is enabled */ +bool +BCMFASTPATH(dhd_wet_chainable)(dhd_pub_t *dhdp) +{ +#ifdef DHD_WET + return (!WET_ENABLED(dhdp)); +#else + return TRUE; +#endif +} + +/* Returns TRUE if hot bridge entry for this da is present */ +bool +BCMFASTPATH(dhd_ctf_hotbrc_check)(dhd_pub_t *dhdp, uint8 *eh, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp = dhd->iflist[ifidx]; + + ASSERT(ifp != NULL); + + if (!dhd->brc_hot) + return FALSE; + + return CTF_HOTBRC_CMP(dhd->brc_hot, (eh), (void *)(ifp->net)); +} + +/* + * Try to forward the complete packet chain through CTF. + * If unsuccessful, + * - link the chain by skb->next + * - change the pnext to the 2nd packet of the chain + * - the chained packets will be sent up to the n/w stack + */ +static inline int32 +BCMFASTPATH(dhd_ctf_forward)(dhd_info_t *dhd, struct sk_buff *skb, void **pnext) +{ + dhd_pub_t *dhdp = &dhd->pub; + void *p, *n; + void *old_pnext; + + /* try cut thru first */ + if (!CTF_ENAB(dhd->cih) || (ctf_forward(dhd->cih, skb, skb->dev) == BCME_ERROR)) { + /* Fall back to slow path if ctf is disabled or if ctf_forward fails */ + + /* clear skipct flag before sending up */ + PKTCLRSKIPCT(dhdp->osh, skb); + +#ifdef CTFPOOL + /* allocate and add a new skb to the pkt pool */ + if (PKTISFAST(dhdp->osh, skb)) + osl_ctfpool_add(dhdp->osh); + + /* clear fast buf flag before sending up */ + PKTCLRFAST(dhdp->osh, skb); + + /* re-init the hijacked field */ + CTFPOOLPTR(dhdp->osh, skb) = NULL; +#endif /* CTFPOOL */ + + /* link the chained packets by skb->next */ + if (PKTISCHAINED(skb)) { + old_pnext = *pnext; + PKTFRMNATIVE(dhdp->osh, skb); + p = (void *)skb; + FOREACH_CHAINED_PKT(p, n) { + PKTCLRCHAINED(dhdp->osh, p); + PKTCCLRFLAGS(p); + if (p == (void *)skb) + PKTTONATIVE(dhdp->osh, p); + if (n) + PKTSETNEXT(dhdp->osh, p, n); + else + PKTSETNEXT(dhdp->osh, p, old_pnext); + } + *pnext = PKTNEXT(dhdp->osh, skb); + PKTSETNEXT(dhdp->osh, skb, NULL); + } + return (BCME_ERROR); + } + + return (BCME_OK); +} +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#ifdef DHD_WMF +bool +dhd_is_rxthread_enabled(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + return dhd->rxthread_enabled; +} +#endif /* DHD_WMF */ + +#ifdef DHD_MCAST_REGEN +/* + * Description: This function is called to do the reverse translation + * + * Input eh - pointer to the ethernet header + */ +int32 +dhd_mcast_reverse_translation(struct ether_header *eh) +{ + uint8 *iph; + uint32 dest_ip; + + iph = (uint8 *)eh + ETHER_HDR_LEN; + dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + + /* Only IP packets are handled */ + if (eh->ether_type != hton16(ETHER_TYPE_IP)) + return BCME_ERROR; + + /* Non-IPv4 multicast packets are not handled */ + if (IP_VER(iph) != IP_VER_4) + return BCME_ERROR; + + /* + * The packet has a multicast IP and unicast MAC. That means + * we have to do the reverse translation + */ + if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) { + ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip); + return BCME_OK; + } + + return BCME_ERROR; +} +#endif /* MCAST_REGEN */ + +void +dhd_dpc_tasklet_dispatcher_work(struct work_struct * work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct dhd_info *dhd; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(dw, struct dhd_info, dhd_dpc_dispatcher_work); + GCC_DIAGNOSTIC_POP(); + + DHD_INFO(("%s:\n", __FUNCTION__)); + + tasklet_schedule(&dhd->tasklet); +} + +void +dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int dpc_cpu = atomic_read(&dhd->dpc_cpu); + DHD_INFO(("%s:\n", __FUNCTION__)); + + /* scheduler will take care of scheduling to appropriate cpu if dpc_cpu is not online */ + schedule_delayed_work_on(dpc_cpu, &dhd->dhd_dpc_dispatcher_work, delay); + + return; +} + +#ifdef SHOW_LOGTRACE +static void +dhd_netif_rx_ni(struct sk_buff * skb) +{ + /* Do not call netif_recieve_skb as this workqueue scheduler is + * not from NAPI Also as we are not in INTR context, do not call + * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which + * does netif_rx, disables irq, raise NET_IF_RX softirq and + * enables interrupts back + */ + netif_rx_ni(skb); +} + +static int +dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = BCME_OK; + uint datalen; + bcm_event_msg_u_t evu; + void *data = NULL; + void *pktdata = NULL; + bcm_event_t *pvt_data; + uint pktlen; + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + /* In dhd_rx_frame, header is stripped using skb_pull + * of size ETH_HLEN, so adjust pktlen accordingly + */ + pktlen = skb->len + ETH_HLEN; + + pktdata = (void *)skb_mac_header(skb); + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + + if (ret != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret)); + goto exit; + } + + datalen = ntoh32(evu.event.datalen); + + pvt_data = (bcm_event_t *)pktdata; + data = &pvt_data[1]; + + dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen); + +exit: + return ret; +} + +/* + * dhd_event_logtrace_process_items processes + * each skb from evt_trace_queue. + * Returns TRUE if more packets to be processed + * else returns FALSE + */ + +static int +dhd_event_logtrace_process_items(dhd_info_t *dhd) +{ + dhd_pub_t *dhdp; + struct sk_buff *skb; + uint32 qlen; + uint32 process_len; + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return 0; + } + + dhdp = &dhd->pub; + + if (!dhdp) { + DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__)); + return 0; + } + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + /* Check if there is any update in the firmware trace buffer */ + process_fw_trace_data(dhdp); +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + qlen = skb_queue_len(&dhd->evt_trace_queue); + process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND); + + /* Run while loop till bound is reached or skb queue is empty */ + while (process_len--) { + int ifid = 0; + skb = skb_dequeue(&dhd->evt_trace_queue); + if (skb == NULL) { + DHD_ERROR(("%s: skb is NULL, which is not valid case\n", + __FUNCTION__)); + break; + } + BCM_REFERENCE(ifid); +#ifdef PCIE_FULL_DONGLE + /* Check if pkt is from INFO ring or WLC_E_TRACE */ + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + if (ifid == DHD_DUMMY_INFO_IF) { + /* Process logtrace from info rings */ + dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data); + } else +#endif /* PCIE_FULL_DONGLE */ + { + /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */ + dhd_event_logtrace_pkt_process(dhdp, skb); + } + + /* Dummy sleep so that scheduler kicks in after processing any logprints */ + OSL_SLEEP(0); + + /* Send packet up if logtrace_pkt_sendup is TRUE */ + if (dhdp->logtrace_pkt_sendup) { +#ifdef DHD_USE_STATIC_CTRLBUF + /* If bufs are allocated via static buf pool + * and logtrace_pkt_sendup enabled, make a copy, + * free the local one and send the copy up. + */ + void *npkt = PKTDUP(dhdp->osh, skb); + /* Clone event and send it up */ + PKTFREE_STATIC(dhdp->osh, skb, FALSE); + if (npkt) { + skb = npkt; + } else { + DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n")); + /* Packet is already freed, go to next packet */ + continue; + } +#endif /* DHD_USE_STATIC_CTRLBUF */ +#ifdef PCIE_FULL_DONGLE + /* For infobuf packets as if is DHD_DUMMY_INFO_IF, + * to send skb to network layer, assign skb->dev with + * Primary interface n/w device + */ + if (ifid == DHD_DUMMY_INFO_IF) { + skb = PKTTONATIVE(dhdp->osh, skb); + skb->dev = dhd->iflist[0]->net; + } +#endif /* PCIE_FULL_DONGLE */ + /* Send pkt UP */ + dhd_netif_rx_ni(skb); + } else { + /* Don't send up. Free up the packet. */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } + } + + /* Reschedule if more packets to be processed */ + return (qlen >= DHD_EVENT_LOGTRACE_BOUND); +} + +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE +static int +dhd_logtrace_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub; + int ret; + + while (1) { + dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS(); + if (!binary_sema_down(tsk)) { + dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS(); + SMP_RD_BARRIER_DEPENDS(); + if (dhd->pub.dongle_reset == FALSE) { + do { + /* Check terminated before processing the items */ + if (tsk->terminated) { + DHD_ERROR(("%s: task terminated\n", __FUNCTION__)); + goto exit; + } +#ifdef EWP_EDL + /* check if EDL is being used */ + if (dhd->pub.dongle_edl_support) { + ret = dhd_prot_process_edl_complete(&dhd->pub, + &dhd->event_data); + } else { + ret = dhd_event_logtrace_process_items(dhd); + } +#else + ret = dhd_event_logtrace_process_items(dhd); +#endif /* EWP_EDL */ + /* if ret > 0, bound has reached so to be fair to other + * processes need to yield the scheduler. + * The comment above yield()'s definition says: + * If you want to use yield() to wait for something, + * use wait_event(). + * If you want to use yield() to be 'nice' for others, + * use cond_resched(). + * If you still want to use yield(), do not! + */ + if (ret > 0) { + cond_resched(); + OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS); + } else if (ret < 0) { + DHD_ERROR(("%s: ERROR should not reach here\n", + __FUNCTION__)); + } + } while (ret > 0); + } + if (tsk->flush_ind) { + DHD_ERROR(("%s: flushed\n", __FUNCTION__)); + dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS(); + tsk->flush_ind = 0; + complete(&tsk->flushed); + } + } else { + DHD_ERROR(("%s: unexpted break\n", __FUNCTION__)); + dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS(); + break; + } + } +exit: + complete_and_exit(&tsk->completed, 0); + dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS(); +} +#else +static void +dhd_event_logtrace_process(struct work_struct * work) +{ +/* Ignore compiler warnings due to -Werror=cast-qual */ + struct delayed_work *dw = to_delayed_work(work); + struct dhd_info *dhd; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(dw, struct dhd_info, event_log_dispatcher_work); + GCC_DIAGNOSTIC_POP(); + +#ifdef EWP_EDL + if (dhd->pub.dongle_edl_support) { + ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data); + } else { + ret = dhd_event_logtrace_process_items(dhd); + } +#else + ret = dhd_event_logtrace_process_items(dhd); +#endif /* EWP_EDL */ + + if (ret > 0) { + schedule_delayed_work(&(dhd)->event_log_dispatcher_work, + msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS)); + } + return; +} +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + +void +dhd_schedule_logtrace(void *dhd_info) +{ + dhd_info_t *dhd = (dhd_info_t *)dhd_info; + +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + if (dhd->thr_logtrace_ctl.thr_pid >= 0) { + binary_sema_up(&dhd->thr_logtrace_ctl); + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + schedule_delayed_work(&dhd->event_log_dispatcher_work, 0); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + return; +} + +void +dhd_cancel_logtrace_process_sync(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + if (dhd->thr_logtrace_ctl.thr_pid >= 0) { + PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl); + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + cancel_delayed_work_sync(&dhd->event_log_dispatcher_work); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +void +dhd_flush_logtrace_process(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + if (dhd->thr_logtrace_ctl.thr_pid >= 0) { + PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl); + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + flush_delayed_work(&dhd->event_log_dispatcher_work); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +int +dhd_init_logtrace_process(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID; + PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread"); + if (dhd->thr_logtrace_ctl.thr_pid < 0) { + DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + return BCME_OK; +} + +int +dhd_reinit_logtrace_process(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + /* Re-init only if PROC_STOP from dhd_stop was called + * which can be checked via thr_pid + */ + if (dhd->thr_logtrace_ctl.thr_pid < 0) { + PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, + 0, "dhd_logtrace_thread"); + if (dhd->thr_logtrace_ctl.thr_pid < 0) { + DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } + } +#else + /* No need to re-init for WQ as calcel_delayed_work_sync will + * will not delete the WQ + */ +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + return BCME_OK; +} + +void +dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + +#ifdef PCIE_FULL_DONGLE + /* Add ifidx in the PKTTAG */ + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx); +#endif /* PCIE_FULL_DONGLE */ + skb_queue_tail(&dhd->evt_trace_queue, pktbuf); + + dhd_schedule_logtrace(dhd); +} + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE +void +dhd_event_logtrace_enqueue_fwtrace(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *) dhdp->info; + + /* Schedule a kernel thread */ + dhd_schedule_logtrace(dhd); + + return; +} +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +void +dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +} + +#ifdef EWP_EDL +void +dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg) +{ + struct sk_buff *skb = NULL; + uint32 pktsize = 0; + void *pkt = NULL; + info_buf_payload_hdr_t *infobuf = NULL; + dhd_info_t *dhd = dhdp->info; + uint8 *pktdata = NULL; + + if (!msg) + return; + + /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|| */ + infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32)); + pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) + + sizeof(uint32)); + pkt = PKTGET(dhdp->osh, pktsize, FALSE); + if (!pkt) { + DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__)); + } else { + PKTSETLEN(dhdp->osh, pkt, pktsize); + pktdata = PKTDATA(dhdp->osh, pkt); + memcpy(pktdata, msg, pktsize); + /* For infobuf packets assign skb->dev with + * Primary interface n/w device + */ + skb = PKTTONATIVE(dhdp->osh, pkt); + skb->dev = dhd->iflist[0]->net; + /* Send pkt UP */ + dhd_netif_rx_ni(skb); + } +} +#endif /* EWP_EDL */ +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG +static void +dhd_bt_log_process(struct work_struct *work) +{ + struct dhd_info *dhd; + dhd_pub_t *dhdp; + struct sk_buff *skb; + + /* Ignore compiler warnings due to -Werror=cast-qual */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(work, struct dhd_info, bt_log_dispatcher_work); + GCC_DIAGNOSTIC_POP(); + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + if (!dhdp) { + DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__)); + return; + } + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + /* Run while(1) loop till all skbs are dequeued */ + while ((skb = skb_dequeue(&dhd->bt_log_queue)) != NULL) { + dhd_bt_log_pkt_process(dhdp, skb); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +} + +void +dhd_rx_bt_log(dhd_pub_t *dhdp, void *pkt) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + skb_queue_tail(&dhd->bt_log_queue, pkt); + + /* schedule workqueue to process bt logs */ + schedule_work(&dhd->bt_log_dispatcher_work); +} +#endif /* BTLOG */ + +#ifdef EWP_EDL +static void +dhd_edl_process_work(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct dhd_info *dhd_info; + /* Ignore compiler warnings due to -Werror=cast-qual */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd_info = container_of(dw, struct dhd_info, edl_dispatcher_work); + GCC_DIAGNOSTIC_POP(); + + if (dhd_info) + dhd_prot_process_edl_complete(&dhd_info->pub, &dhd_info->event_data); +} + +void +dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + schedule_delayed_work(&dhd->edl_dispatcher_work, msecs_to_jiffies(delay_ms)); +} +#endif /* EWP_EDL */ + +#ifdef WL_NANHO +/* forward NAN event to NANHO host module. API returns TRUE if event is consumed by NANHO */ +static bool +dhd_nho_evt_process(dhd_pub_t *pub, int ifidx, wl_event_msg_t *evt_msg, + void *pktdata, uint16 pktlen) +{ + uint32 evt_type = ntoh32_ua(&evt_msg->event_type); + bool consumed = FALSE; + + if ((evt_type == WLC_E_NAN_CRITICAL) || (evt_type == WLC_E_NAN_NON_CRITICAL)) { + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + uint32 event_len = sizeof(wl_event_msg_t) + ntoh32_ua(&evt_msg->datalen); + + bcm_nanho_evt(pub->nanhoi, &pvt_data->event, event_len, &consumed); + } + return consumed; +} + +static int +dhd_nho_evt_cb(void *drv_ctx, int ifidx, bcm_event_t *evt, uint16 evt_len) +{ + struct sk_buff *p, *skb; + dhd_if_t *ifp; + dhd_pub_t *dhdp = (dhd_pub_t *)drv_ctx; + + if ((p = PKTGET(dhdp->osh, evt_len, FALSE))) { + memcpy(PKTDATA(dhdp->osh, p), (uint8 *)evt, evt_len); + skb = PKTTONATIVE(dhdp->osh, p); + + ifp = dhdp->info->iflist[ifidx]; + if (ifp == NULL) { + /* default to main interface */ + ifp = dhdp->info->iflist[0]; + } + ASSERT(ifp); + + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + + /* strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } else { + DHD_ERROR(("NHO: dhd_nho_evt_cb: unable to alloc sk_buf")); + return BCME_NOMEM; + } + + return BCME_OK; +} +#endif /* WL_NANHO */ + +#ifdef ENABLE_WAKEUP_PKT_DUMP +static void +update_wake_pkt_info(struct sk_buff *skb) +{ + struct iphdr *ip_header; + struct ipv6hdr *ipv6_header; + struct udphdr *udp_header; + struct tcphdr *tcp_header; + uint16 dport = 0; + + ip_header = (struct iphdr *)(skb->data); + + temp_raw |= ((long long)ntoh16(skb->protocol)) << 48; + + DHD_INFO(("eth_hdr(skb)->h_dest : %pM\n", eth_hdr(skb)->h_dest)); + if (eth_hdr(skb)->h_dest[0] & 0x01) { + temp_raw |= (long long)1 << 39; + } + + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { + wl_event_msg_t event; + bcm_event_msg_u_t evu; + int ret; + uint event_type; + + ret = wl_host_event_get_data( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb_mac_header(skb), +#else + skb->mac.raw, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + skb->len, &evu); + if (ret != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret)); + } + + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + event_type = ntoh32_ua((void *)&event.event_type); + + temp_raw |= (long long)event_type << 40; + } else if (ntoh16(skb->protocol) == ETHER_TYPE_IP || + ntoh16(skb->protocol) == ETHER_TYPE_IPV6) { + if (ip_header->version == 6) { + ipv6_header = (struct ipv6hdr *)ip_header; + temp_raw |= ((long long)ipv6_header->nexthdr) << 40; + dport = 0; + + if (ipv6_header->daddr.s6_addr[0] & 0xff) { + temp_raw |= (long long)1 << 38; + } + + DHD_INFO(("IPv6 [%x]%pI6c > %pI6c:%d\n", + ip_header->protocol, &(ipv6_header->saddr.s6_addr), + &(ipv6_header->daddr.s6_addr), dport)); + } else if (ip_header->version == 4) { + temp_raw |= ((long long)ip_header->protocol) << 40; + +#define IP_HDR_OFFSET ((char *)ip_header + IPV4_HLEN(ip_header)) + if (ip_header->protocol == IPPROTO_TCP) { + tcp_header = (struct tcphdr *)IP_HDR_OFFSET; + dport = ntohs(tcp_header->dest); + } + else if (ip_header->protocol == IPPROTO_UDP) { + udp_header = (struct udphdr *)IP_HDR_OFFSET; + dport = ntohs(udp_header->dest); + } + + if (ipv4_is_multicast(ip_header->daddr)) { + temp_raw |= (long long)1 << 38; + } + + DHD_INFO(("IP [%x] %pI4 > %pI4:%d\n", + ip_header->protocol, &(ip_header->saddr), + &(ip_header->daddr), dport)); + } + + temp_raw |= (long long)dport << 16; + } +} +#endif /* ENABLE_WAKEUP_PKT_DUMP */ + +#if defined(BCMPCIE) +int +dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, + dmaaddr_t *pa, uint32 pktid) +{ + struct sk_buff *skb; + struct skb_shared_info *shinfo; + + if (!pktbuf) + return BCME_ERROR; + + skb = PKTTONATIVE(dhdp->osh, pktbuf); + shinfo = skb_shinfo(skb); + + if (shinfo->nr_frags) { +#ifdef CONFIG_64BIT + DHD_ERROR(("!!Invalid nr_frags: %u pa.loaddr: 0x%llx pa.hiaddr: 0x%llx " + "skb: 0x%llx skb_data: 0x%llx skb_head: 0x%llx skb_tail: 0x%llx " + "skb_end: 0x%llx skb_len: %u shinfo: 0x%llx pktid: %u\n", + shinfo->nr_frags, (uint64)(pa->loaddr), (uint64)(pa->hiaddr), + (uint64)skb, (uint64)(skb->data), (uint64)(skb->head), (uint64)(skb->tail), + (uint64)(skb->end), skb->len, (uint64)shinfo, pktid)); +#else + DHD_ERROR(("!!Invalid nr_frags: %u " + "skb: 0x%x skb_data: 0x%x skb_head: 0x%x skb_tail: 0x%x " + "skb_end: 0x%x skb_len: %u shinfo: 0x%x pktid: %u\n", + shinfo->nr_frags, + (uint)skb, (uint)(skb->data), (uint)(skb->head), (uint)(skb->tail), + (uint)(skb->end), skb->len, (uint)shinfo, pktid)); +#endif + prhex("shinfo", (char*)shinfo, sizeof(struct skb_shared_info)); + if (!dhd_query_bus_erros(dhdp)) { +#ifdef DHD_FW_COREDUMP + /* Collect socram dump */ + if (dhdp->memdump_enabled) { + /* collect core dump */ + dhdp->memdump_type = DUMP_TYPE_INVALID_SHINFO_NRFRAGS; + dhd_bus_mem_dump(dhdp); + } else +#endif /* DHD_FW_COREDUMP */ + { + shinfo->nr_frags = 0; + /* In production case, free the packet and continue + * if nfrags is corrupted. Whereas in non-production + * case collect memdump and call BUG_ON(). + */ + PKTCFREE(dhdp->osh, pktbuf, FALSE); + } + } + return BCME_ERROR; + } + return BCME_OK; +} +#endif /* BCMPCIE */ + +/** Called when a frame is received by the dongle on interface 'ifidx' */ +void +dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + uchar *eth; + uint len; + void *data, *pnext = NULL; + int i; + dhd_if_t *ifp; + wl_event_msg_t event; +#if defined(OEM_ANDROID) + int tout_rx = 0; + int tout_ctrl = 0; +#endif /* OEM_ANDROID */ + void *skbhead = NULL; + void *skbprev = NULL; + uint16 protocol; + unsigned char *dump_data; +#ifdef DHD_MCAST_REGEN + uint8 interface_role; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; +#endif +#ifdef DHD_WAKE_STATUS + wake_counts_t *wcp = NULL; +#endif /* DHD_WAKE_STATUS */ + int pkt_wake = 0; +#ifdef ENABLE_DHD_GRO + bool dhd_gro_enable = TRUE; + struct Qdisc *qdisc = NULL; +#endif /* ENABLE_DHD_GRO */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + BCM_REFERENCE(dump_data); + BCM_REFERENCE(pkt_wake); + +#ifdef DHD_TPUT_PATCH + if (dhdp->conf->pktsetsum) + PKTSETSUMGOOD(pktbuf, TRUE); +#endif + +#ifdef ENABLE_DHD_GRO + if (ifidx < DHD_MAX_IFS) { + ifp = dhd->iflist[ifidx]; + if (ifp && ifp->net->qdisc) { + if (ifp->net->qdisc->ops->cl_ops) { + dhd_gro_enable = FALSE; + DHD_TRACE(("%s: disable sw gro becasue of" + " qdisc tx traffic control\n", __FUNCTION__)); + } + + if (dev_ingress_queue(ifp->net)) { + qdisc = dev_ingress_queue(ifp->net)->qdisc_sleeping; + if (qdisc != NULL && (qdisc->flags & TCQ_F_INGRESS)) { + dhd_gro_enable = FALSE; + DHD_TRACE(("%s: disable sw gro because of" + " qdisc rx traffic control\n", __FUNCTION__)); + } + } + } + } +#ifdef DHD_GRO_ENABLE_HOST_CTRL + if (!dhdp->permitted_gro && dhd_gro_enable) { + dhd_gro_enable = FALSE; + } +#endif /* DHD_GRO_ENABLE_HOST_CTRL */ +#endif /* ENABLE_DHD_GRO */ + + for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { + struct ether_header *eh; + + pnext = PKTNEXT(dhdp->osh, pktbuf); + PKTSETNEXT(dhdp->osh, pktbuf, NULL); + + /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a + * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data + * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame). + */ + if (ifidx == DHD_DUMMY_INFO_IF) { + /* Event msg printing is called from dhd_rx_frame which is in Tasklet + * context in case of PCIe FD, in case of other bus this will be from + * DPC context. If we get bunch of events from Dongle then printing all + * of them from Tasklet/DPC context that too in data path is costly. + * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as + * events with type WLC_E_TRACE. + * We'll print this console logs from the WorkQueue context by enqueing SKB + * here and Dequeuing will be done in WorkQueue and will be freed only if + * logtrace_pkt_sendup is TRUE + */ +#ifdef SHOW_LOGTRACE + dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf); +#else /* !SHOW_LOGTRACE */ + /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF, + * free the PKT here itself + */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ +#endif /* SHOW_LOGTRACE */ + continue; + } +#ifdef DHD_WAKE_STATUS +#ifdef BCMDBUS + wcp = NULL; +#else + pkt_wake = dhd_bus_get_bus_wake(dhdp); + wcp = dhd_bus_get_wakecount(dhdp); +#endif /* BCMDBUS */ + if (wcp == NULL) { + /* If wakeinfo count buffer is null do not update wake count values */ + pkt_wake = 0; + } +#endif /* DHD_WAKE_STATUS */ + + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); +#ifdef DHD_AWDL + if (dhdp->awdl_llc_enabled && + dhdp->awdl_ifidx && ifidx == dhdp->awdl_ifidx) { + if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) { + dhd_awdl_llc_to_eth_hdr(dhdp, eh, pktbuf); + } + } +#endif /* DHD_AWDL */ + + if (dhd->pub.tput_data.tput_test_running && + dhd->pub.tput_data.direction == TPUT_DIR_RX && + ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + dhd_tput_test_rx(dhdp, pktbuf); + PKTFREE(dhd->pub.osh, pktbuf, FALSE); + continue; + } + + if (ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n", + __FUNCTION__, ifidx)); + if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + } + continue; + } + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) { + DHD_ERROR_RLMT(("%s: ifp is NULL. drop packet\n", + __FUNCTION__)); + if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + } + continue; + } + + /* Dropping only data packets before registering net device to avoid kernel panic */ +#ifndef PROP_TXSTATUS_VSDB + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) +#else + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) +#endif /* PROP_TXSTATUS_VSDB */ + { + DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) { + /* WLFC may send header only packet when + there is an urgent message but no packet to + piggy-back on + */ + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif +#ifdef DHD_L2_FILTER + /* If block_ping is enabled drop the ping packet */ + if (ifp->block_ping) { + if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + continue; + } + } + if (ifp->block_tdls) { + if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_MCAST_REGEN + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + + interface_role = if_flow_lkup[ifidx].role; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) && + !DHD_IF_ROLE_AP(dhdp, ifidx) && + ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_mcast_reverse_translation(eh) == BCME_OK) { +#ifdef DHD_PSTA + /* Change bsscfg to primary bsscfg for unicast-multicast packets */ + if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) || + (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) { + if (ifidx != 0) { + /* Let the primary in PSTA interface handle this + * frame after unicast to Multicast conversion + */ + ifp = dhd_get_ifp(dhdp, 0); + ASSERT(ifp); + } + } + } +#endif /* PSTA */ + } +#endif /* MCAST_REGEN */ + +#ifdef DHD_WMF + /* WMF processing for multicast packets */ + if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) { + dhd_sta_t *sta; + int ret; + + sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost); + ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1); + switch (ret) { + case WMF_TAKEN: + /* The packet is taken by WMF. Continue to next iteration */ + continue; + case WMF_DROP: + /* Packet DROP decision by WMF. Toss it */ + DHD_ERROR(("%s: WMF decides to drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + default: + /* Continue the transmit path */ + break; + } + } +#endif /* DHD_WMF */ + +#ifdef DHDTCPSYNC_FLOOD_BLK + if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) { + int delta_sec; + int delta_sync; + int sync_per_sec; + u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + ifp->tsync_rcvd ++; + delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed; + delta_sec = curr_time - ifp->last_sync; + if (delta_sec > 1) { + sync_per_sec = delta_sync/delta_sec; + if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) { + schedule_work(&ifp->blk_tsfl_work); + DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! " + "sync recvied %d pkt/sec \n", + ifidx, sync_per_sec)); + ifp->tsync_per_sec = sync_per_sec; + } + dhd_reset_tcpsync_info_by_ifp(ifp); + } + + } +#endif /* DHDTCPSYNC_FLOOD_BLK */ + +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpdata_info_get(dhdp, pktbuf); +#endif + skb = PKTTONATIVE(dhdp->osh, pktbuf); + + ASSERT(ifp); + skb->dev = ifp->net; +#ifdef DHD_WET + /* wet related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info, + pktbuf) < 0)) { + DHD_INFO(("%s:%s: wet recv proc failed\n", + __FUNCTION__, dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_WET */ + +#ifdef DHD_PSTA + if (PSR_ENABLED(dhdp) && +#ifdef BCM_ROUTER_DHD + !(ifp->primsta_dwds) && +#endif /* BCM_ROUTER_DHD */ + (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) { + DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__, + dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_PSTA */ + +#if defined(BCM_ROUTER_DHD) + /* XXX Use WOFA for both dhdap and dhdap-atlas router. */ + /* XXX dhd_sendpkt verify pkt accounting (TO/FRM NATIVE) and PKTCFREE */ + + if (DHD_IF_ROLE_AP(dhdp, ifidx) && (!ifp->ap_isolate)) { + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + if (ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) { + dhd_sendpkt(dhdp, ifidx, pktbuf); + continue; + } + } else { + void *npkt; +#if defined(HNDCTF) + if (PKTISCHAINED(pktbuf)) { /* XXX WAR */ + DHD_ERROR(("Error: %s():%d Chained non unicast pkt<%p>\n", + __FUNCTION__, __LINE__, pktbuf)); + PKTFRMNATIVE(dhdp->osh, pktbuf); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif /* HNDCTF */ + if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) && + ((npkt = PKTDUP(dhdp->osh, pktbuf)) != NULL)) + dhd_sendpkt(dhdp, ifidx, npkt); + } + } + +#if defined(HNDCTF) + /* try cut thru' before sending up */ + if (dhd_ctf_forward(dhd, skb, &pnext) == BCME_OK) { + continue; + } +#endif /* HNDCTF */ + +#else /* !BCM_ROUTER_DHD */ +#ifdef PCIE_FULL_DONGLE + if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) && + (!ifp->ap_isolate)) { + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + if (ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) { + dhd_sendpkt(dhdp, ifidx, pktbuf); + continue; + } + } else { + if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE)) { + void *npktbuf = NULL; + /* + * If host_sfhllc_supported enabled, do skb_copy as SFHLLC + * header will be inserted during Tx, due to which network + * stack will not decode the Rx packet. + * Else PKTDUP(skb_clone) is enough. + */ + if (dhdp->host_sfhllc_supported) { + npktbuf = skb_copy(skb, GFP_ATOMIC); + } else { + npktbuf = PKTDUP(dhdp->osh, pktbuf); + } + if (npktbuf != NULL) { + dhd_sendpkt(dhdp, ifidx, npktbuf); + } + } + } + } +#endif /* PCIE_FULL_DONGLE */ +#endif /* BCM_ROUTER_DHD */ +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) && + (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) && + (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) { + DHD_ERROR(("%s: Reassoc is in progress. " + "Drop EAPOL M1 frame\n", __FUNCTION__)); + PKTFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ +#ifdef WLEASYMESH + if ((dhdp->conf->fw_type == FW_TYPE_EZMESH) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) { + uint16 * da = (uint16 *)(eh->ether_dhost); + ASSERT(ISALIGNED(da, 2)); + + /* XXX: Special handling for 1905 messages + * if DA matches with configured 1905 AL MAC addresses + * bypass fwder and foward it to linux stack + */ + if (ntoh16(eh->ether_type) == ETHER_TYPE_1905_1) { + if (!eacmp(da, ifp->_1905_al_ucast) || !eacmp(da, ifp->_1905_al_mcast)) { + //skb->fwr_flood = 0; + } else { + //skb->fwr_flood = 1; + } + } + } +#endif /* WLEASYMESH */ + /* Get the protocol, maintain skb around eth_type_trans() + * The main reason for this hack is for the limitation of + * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' + * to perform skb_pull inside vs ETH_HLEN. Since to avoid + * coping of the packet coming from the network stack to add + * BDC, Hardware header etc, during network interface registration + * we set the 'net->hard_header_len' to ETH_HLEN + extra space required + * for BDC, Hardware header etc. and not just the ETH_HLEN + */ + eth = skb->data; + len = skb->len; + dump_data = skb->data; + protocol = (skb->data[12] << 8) | skb->data[13]; + + if (protocol == ETHER_TYPE_802_1X) { + DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED); +#if defined(WL_CFG80211) && defined(WL_WPS_SYNC) + wl_handle_wps_states(ifp->net, dump_data, len, FALSE); +#endif /* WL_CFG80211 && WL_WPS_SYNC */ +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) { + OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED); + } +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#ifdef EAPOL_RESEND + wl_ext_release_eapol_txpkt(dhdp, ifidx, TRUE); +#endif /* EAPOL_RESEND */ + } + dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL); + +#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP) + if (pkt_wake) { + dhd_prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 64), DHD_ERROR_VAL); + DHD_ERROR(("config check in_suspend: %d ", dhdp->in_suspend)); +#ifdef ARP_OFFLOAD_SUPPORT + DHD_ERROR(("arp hmac_update:%d \n", dhdp->hmac_updated)); +#endif /* ARP_OFFLOAD_SUPPORT */ + } +#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */ + +#ifdef BCMINTERNAL + if (dhd->pub.loopback) { + struct ether_header *local_eh = (struct ether_header *)eth; + if (ntoh16(local_eh->ether_type) == ETHER_TYPE_IP) { + uint8 *myp = (uint8 *)local_eh; + struct ipv4_hdr *iph = (struct ipv4_hdr *)(myp + ETHER_HDR_LEN); + uint16 iplen = (iph->version_ihl & 0xf) * sizeof(uint32); + if (iph->prot == 1) { + uint8 *icmph = (uint8 *)iph + iplen; + if (icmph[0] == 8) { + uint8 temp_addr[ETHER_ADDR_LEN]; + uint8 temp_ip[IPV4_ADDR_LEN]; + /* Ether header flip */ + memcpy(temp_addr, local_eh->ether_dhost, + ETHER_ADDR_LEN); + memcpy(local_eh->ether_dhost, + local_eh->ether_shost, ETHER_ADDR_LEN); + memcpy(local_eh->ether_shost, temp_addr, + ETHER_ADDR_LEN); + + /* IP header flip */ + memcpy(temp_ip, iph->src_ip, IPV4_ADDR_LEN); + memcpy(iph->src_ip, iph->dst_ip, IPV4_ADDR_LEN); + memcpy(iph->dst_ip, temp_ip, IPV4_ADDR_LEN); + + /* ICMP header flip */ + icmph[0] = 0; + } + } else if (iph->prot == 17) { + uint8 *udph = (uint8 *)iph + iplen; + uint16 destport = ntoh16(*((uint16 *)udph + 1)); + if (destport == 8888) { + uint8 temp_addr[ETHER_ADDR_LEN]; + uint8 temp_ip[IPV4_ADDR_LEN]; + /* Ether header flip */ + memcpy(temp_addr, local_eh->ether_dhost, + ETHER_ADDR_LEN); + memcpy(local_eh->ether_dhost, + local_eh->ether_shost, ETHER_ADDR_LEN); + memcpy(local_eh->ether_shost, temp_addr, + ETHER_ADDR_LEN); + + /* IP header flip */ + memcpy(temp_ip, iph->src_ip, IPV4_ADDR_LEN); + memcpy(iph->src_ip, iph->dst_ip, IPV4_ADDR_LEN); + memcpy(iph->dst_ip, temp_ip, IPV4_ADDR_LEN); + + /* Reset UDP checksum to */ + *((uint16 *)udph + 3) = 0; + } + } + } + } +#endif /* BCMINTERNAL */ + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) { + dhd->pub.rx_multicast++; + ifp->stats.multicast++; + } + + skb->data = eth; + skb->len = len; + + /* TODO: XXX: re-look into dropped packets. */ + DHD_DBG_PKT_MON_RX(dhdp, skb); + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + +#ifdef ENABLE_WAKEUP_PKT_DUMP + if (dhd_mmc_wake) { + DHD_INFO(("wake_pkt %s(%d)\n", __FUNCTION__, __LINE__)); + if (DHD_INFO_ON()) { + prhex("wake_pkt", (char*) eth, MIN(len, 48)); + } + update_wake_pkt_info(skb); +#ifdef CONFIG_IRQ_HISTORY + add_irq_history(0, "WIFI"); +#endif + dhd_mmc_wake = FALSE; + } +#endif /* ENABLE_WAKEUP_PKT_DUMP */ + + /* Process special event packets and then discard them */ + /* XXX Decide on a better way to fit this in */ + memset(&event, 0, sizeof(event)); + + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { + bcm_event_msg_u_t evu; + int ret_event, event_type; + void *pkt_data = skb_mac_header(skb); + + ret_event = wl_host_event_get_data(pkt_data, len, &evu); + + if (ret_event != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret_event)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } + + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + event_type = ntoh32_ua((void *)&event.event_type); +#ifdef SHOW_LOGTRACE + /* Event msg printing is called from dhd_rx_frame which is in Tasklet + * context in case of PCIe FD, in case of other bus this will be from + * DPC context. If we get bunch of events from Dongle then printing all + * of them from Tasklet/DPC context that too in data path is costly. + * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as + * events with type WLC_E_TRACE. + * We'll print this console logs from the WorkQueue context by enqueing SKB + * here and Dequeuing will be done in WorkQueue and will be freed only if + * logtrace_pkt_sendup is true + */ + if (event_type == WLC_E_TRACE) { + DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__)); + dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf); + continue; + } +#endif /* SHOW_LOGTRACE */ + +#ifdef WL_NANHO + /* Process firmware NAN event by NANHO host module */ + if (dhd_nho_evt_process(dhdp, ifidx, &event, pkt_data, len)) { + /* NANHO host module consumed NAN event. free pkt here. */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } +#endif /* WL_NANHO */ + + ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data); + + wl_event_to_host_order(&event); +#if defined(OEM_ANDROID) + if (!tout_ctrl) + tout_ctrl = DHD_PACKET_TIMEOUT_MS; +#endif /* OEM_ANDROID */ + +#if (defined(OEM_ANDROID) && defined(PNO_SUPPORT)) + if (event_type == WLC_E_PFN_NET_FOUND) { + /* enforce custom wake lock to garantee that Kernel not suspended */ + tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS; + } +#endif /* PNO_SUPPORT */ + if (numpkt != 1) { + DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n", + __FUNCTION__)); + } + +#ifdef DHD_WAKE_STATUS + if (unlikely(pkt_wake)) { +#ifdef DHD_WAKE_EVENT_STATUS + if (event.event_type < WLC_E_LAST) { + wcp->rc_event[event.event_type]++; + wcp->rcwake++; + pkt_wake = 0; + } +#endif /* DHD_WAKE_EVENT_STATUS */ + } +#endif /* DHD_WAKE_STATUS */ + + /* For delete virtual interface event, wl_host_event returns positive + * i/f index, do not proceed. just free the pkt. + */ + if ((event_type == WLC_E_IF) && (ret_event > 0)) { + DHD_ERROR(("%s: interface is deleted. Free event packet\n", + __FUNCTION__)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } + + /* + * For the event packets, there is a possibility + * of ifidx getting modifed.Thus update the ifp + * once again. + */ + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; +#ifndef PROP_TXSTATUS_VSDB + if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED))) +#else + if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) && + dhd->pub.up)) +#endif /* PROP_TXSTATUS_VSDB */ + { + DHD_ERROR(("%s: net device is NOT registered. drop event packet\n", + __FUNCTION__)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } + +#ifdef SENDPROB + if (dhdp->wl_event_enabled || + (dhdp->recv_probereq && (event.event_type == WLC_E_PROBREQ_MSG))) +#else + if (dhdp->wl_event_enabled) +#endif + { +#ifdef DHD_USE_STATIC_CTRLBUF + /* If event bufs are allocated via static buf pool + * and wl events are enabled, make a copy, free the + * local one and send the copy up. + */ + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); + /* Copy event and send it up */ + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); + if (nskb) { + skb = nskb; + } else { + DHD_ERROR(("skb clone failed. dropping event.\n")); + continue; + } +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + /* If event enabled not explictly set, drop events */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + continue; + } + } else { +#if defined(OEM_ANDROID) + tout_rx = DHD_PACKET_TIMEOUT_MS; +#endif /* OEM_ANDROID */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb)); +#endif /* PROP_TXSTATUS */ + +#ifdef DHD_WAKE_STATUS + if (unlikely(pkt_wake)) { + wcp->rxwake++; +#ifdef DHD_WAKE_RX_STATUS +#define ETHER_ICMP6_HEADER 20 +#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2) +#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN) +#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN) + + if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */ + wcp->rx_arp++; + if (dump_data[0] == 0xFF) { /* Broadcast */ + wcp->rx_bcast++; + } else if (dump_data[0] & 0x01) { /* Multicast */ + wcp->rx_mcast++; + if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) { + wcp->rx_multi_ipv6++; + if ((skb->len > ETHER_ICMP6_HEADER) && + (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) { + wcp->rx_icmpv6++; + if (skb->len > ETHER_ICMPV6_TYPE) { + switch (dump_data[ETHER_ICMPV6_TYPE]) { + case NDISC_ROUTER_ADVERTISEMENT: + wcp->rx_icmpv6_ra++; + break; + case NDISC_NEIGHBOUR_ADVERTISEMENT: + wcp->rx_icmpv6_na++; + break; + case NDISC_NEIGHBOUR_SOLICITATION: + wcp->rx_icmpv6_ns++; + break; + } + } + } + } else if (dump_data[2] == 0x5E) { + wcp->rx_multi_ipv4++; + } else { + wcp->rx_multi_other++; + } + } else { /* Unicast */ + wcp->rx_ucast++; + } +#undef ETHER_ICMP6_HEADER +#undef ETHER_IPV6_SADDR +#undef ETHER_IPV6_DAADR +#undef ETHER_ICMPV6_TYPE +#endif /* DHD_WAKE_RX_STATUS */ + pkt_wake = 0; + } +#endif /* DHD_WAKE_STATUS */ + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + ifp->net->last_rx = jiffies; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */ + + if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) { + dhdp->dstats.rx_bytes += skb->len; + dhdp->rx_packets++; /* Local count */ + ifp->stats.rx_bytes += skb->len; + ifp->stats.rx_packets++; + } +#if defined(DHD_TCP_WINSIZE_ADJUST) + if (dhd_use_tcp_window_size_adjust) { + if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) { + dhd_adjust_tcp_winsize(dhdp->op_mode, skb); + } + } +#endif /* DHD_TCP_WINSIZE_ADJUST */ + + /* XXX WL here makes sure data is 4-byte aligned? */ + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if defined(DHD_LB_RXP) +#ifdef ENABLE_DHD_GRO + /* The pktlog module clones a skb using skb_clone and + * stores the skb point to the ring buffer of the pktlog module. + * Once the buffer is full, + * the PKTFREE is called for removing the oldest skb. + * The kernel panic occurred when the pktlog module free + * the rx frame handled by napi_gro_receive(). + * It is a fix code that DHD don't use napi_gro_receive() for + * the packet used in pktlog module. + */ + if (dhd_gro_enable && !skb_cloned(skb) && + ntoh16(skb->protocol) != ETHER_TYPE_BRCM) { + napi_gro_receive(&dhd->rx_napi_struct, skb); + } else { + netif_receive_skb(skb); + } +#else +#if defined(WL_MONITOR) && defined(BCMSDIO) + if (dhd_monitor_enabled(dhdp, ifidx)) + dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx); + else +#endif /* WL_MONITOR && BCMSDIO */ + netif_receive_skb(skb); +#endif /* ENABLE_DHD_GRO */ +#else /* !defined(DHD_LB_RXP) */ + netif_rx(skb); +#endif /* !defined(DHD_LB_RXP) */ + } else { + if (dhd->rxthread_enabled) { + if (!skbhead) + skbhead = skb; + else + PKTSETNEXT(dhdp->osh, skbprev, skb); + skbprev = skb; + } else { + + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB_RXP) +#ifdef ENABLE_DHD_GRO + if (dhd_gro_enable && !skb_cloned(skb) && + ntoh16(skb->protocol) != ETHER_TYPE_BRCM) { + napi_gro_receive(&dhd->rx_napi_struct, skb); + } else { + netif_receive_skb(skb); + } +#else + netif_receive_skb(skb); +#endif /* ENABLE_DHD_GRO */ +#else /* !defined(DHD_LB_RXP) */ + netif_rx_ni(skb); +#endif /* !defined(DHD_LB_RXP) */ + } + } + } + + if (dhd->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + +#if defined(OEM_ANDROID) + DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); +#endif /* OEM_ANDROID */ +} + +void +dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) +{ + /* Linux version has nothing to do */ + return; +} + +void +dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + if (dhdp->tput_data.tput_test_running) { + + dhdp->batch_tx_pkts_cmpl++; + + /* don't count the stop pkt */ + if (success && + dhdp->batch_tx_pkts_cmpl <= dhdp->batch_tx_num_pkts) + dhdp->tput_data.pkts_good++; + else if (!success) + dhdp->tput_data.pkts_bad++; + + /* we dont care for the stop packet in tput test */ + if (dhdp->batch_tx_pkts_cmpl == dhdp->batch_tx_num_pkts) { + dhdp->tput_stop_ts = OSL_SYSUPTIME_US(); + dhdp->tput_data.pkts_cmpl += dhdp->batch_tx_pkts_cmpl; + dhdp->tput_data.num_pkts += dhdp->batch_tx_num_pkts; + dhd_os_tput_test_wake(dhdp); + } + } + /* XXX where does this stuff belong to? */ + dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + + /* XXX Use packet tag when it is available to identify its type */ + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + + if (type == ETHER_TYPE_802_1X) { + atomic_dec(&dhd->pend_8021x_cnt); + } + +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) { + dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))]; + uint datalen = PKTLEN(dhd->pub.osh, txp); + if (ifp != NULL) { + if (success) { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } else { + ifp->stats.tx_dropped++; + } + } + } +#endif + if (success) { + dhd->pub.tot_txcpl++; + } +} + +int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition, + uint timeout_ms) +{ + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(timeout_ms); + pub->tput_test_done = FALSE; + condition = (uint *)&pub->tput_test_done; + timeout = wait_event_timeout(pub->tx_tput_test_wait, + (*condition), timeout); + + return timeout; +} + +int dhd_os_tput_test_wake(dhd_pub_t * pub) +{ + OSL_SMP_WMB(); + pub->tput_test_done = TRUE; + OSL_SMP_WMB(); + wake_up(&(pub->tx_tput_test_wait)); + return 0; +} + +static struct net_device_stats * +dhd_get_stats(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + goto error; + } + + ifp = dhd_get_ifp_by_ndev(&dhd->pub, net); + if (!ifp) { + /* return empty stats */ + DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); + goto error; + } + + if (dhd->pub.up) { + /* Use the protocol to get dongle stats */ + dhd_prot_dstats(&dhd->pub); + } + return &ifp->stats; + +error: + memset(&net->stats, 0, sizeof(net->stats)); + return &net->stats; +} + +#ifndef BCMDBUS +static int +dhd_watchdog_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; +#ifdef BCMPCIE + DHD_OS_WD_WAKE_LOCK(&dhd->pub); +#endif /* BCMPCIE */ + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { +#ifdef BCMPCIE + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +#endif /* BCMPCIE */ + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + dhd_analyze_sock_flows(dhd, dhd_watchdog_ms); + dhd_bus_watchdog(&dhd->pub); + +#ifdef DHD_TIMESYNC + /* Call the timesync module watchdog */ + dhd_timesync_watchdog(&dhd->pub); +#endif /* DHD_TIMESYNC */ +#if defined(BCM_ROUTER_DHD) && defined(CTFPOOL) + /* allocate and add a new skb to the pkt pool */ + if (CTF_ENAB(dhd->cih)) + osl_ctfpool_replenish(dhd->pub.osh, CTFPOOL_REFILL_THRESH); +#endif /* BCM_ROUTER_DHD && CTFPOOL */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) { + mod_timer(&dhd->timer, + jiffies + + msecs_to_jiffies(dhd_watchdog_ms) - + min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } +#ifdef BCMPCIE + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +#endif /* BCMPCIE */ + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +static void dhd_watchdog(ulong data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + unsigned long flags; + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + up(&dhd->thr_wdt_ctl.sema); + return; + } + +#ifdef BCMPCIE + DHD_OS_WD_WAKE_LOCK(&dhd->pub); +#endif /* BCMPCIE */ + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + +#ifdef DHD_TIMESYNC + /* Call the timesync module watchdog */ + dhd_timesync_watchdog(&dhd->pub); +#endif /* DHD_TIMESYNC */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#ifdef BCMPCIE + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +#endif /* BCMPCIE */ +#if defined(BCM_ROUTER_DHD) && defined(CTFPOOL) + /* allocate and add a new skb to the pkt pool */ + if (CTF_ENAB(dhd->cih)) + osl_ctfpool_replenish(dhd->pub.osh, CTFPOOL_REFILL_THRESH); +#endif /* BCM_ROUTER_DHD && CTFPOOL */ +} + +#ifdef DHD_PCIE_RUNTIMEPM +static int +dhd_rpm_state_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + if (dhd->pub.up) { +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + dhd_bus_dw_deassert(&dhd->pub); +#endif /* PCIE_OOB || PCIE_INB_DW */ + if (dhd_get_rpm_state(&dhd->pub)) { + dhd_runtimepm_state(&dhd->pub); + } + } + DHD_GENERAL_LOCK(&dhd->pub, flags); + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->rpm_timer_valid) { + mod_timer(&dhd->rpm_timer, + jiffies + + msecs_to_jiffies(dhd_runtimepm_ms) - + min(msecs_to_jiffies(dhd_runtimepm_ms), + time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +static void dhd_runtimepm(ulong data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + up(&dhd->thr_rpm_ctl.sema); + return; + } +} + +void dhd_runtime_pm_disable(dhd_pub_t *dhdp) +{ + dhd_set_rpm_state(dhdp, FALSE); + dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0)); +} + +void dhd_runtime_pm_enable(dhd_pub_t *dhdp) +{ + /* Enable Runtime PM except for MFG Mode */ + if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) { + if (dhd_get_idletime(dhdp)) { + dhd_set_rpm_state(dhdp, TRUE); + } + } +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef ENABLE_ADAPTIVE_SCHED +static void +dhd_sched_policy(int prio) +{ + struct sched_param param; + if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) { + param.sched_priority = 0; + setScheduler(current, SCHED_NORMAL, ¶m); + } else { + if (get_scheduler_policy(current) != SCHED_FIFO) { + param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + } +} +#endif /* ENABLE_ADAPTIVE_SCHED */ +#ifdef DEBUG_CPU_FREQ +static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) +{ + dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans); + struct cpufreq_freqs *freq = data; + if (dhd) { + if (!dhd->new_freq) + goto exit; + if (val == CPUFREQ_POSTCHANGE) { + DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n", + freq->new, freq->cpu)); + *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new; + } + } +exit: + return 0; +} +#endif /* DEBUG_CPU_FREQ */ + +static int +dhd_dpc_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_dpc_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + +#ifdef CUSTOM_DPC_CPUCORE + set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE)); +#endif +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_dpc = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (dhd->pub.conf->dpc_cpucore >= 0) { + printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore); + set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore)); + dhd->pub.conf->dpc_cpucore = -1; + } + if (dhd->pub.conf->dhd_dpc_prio >= 0) { + struct sched_param param; + printf("%s: set dhd_dpc_prio %d\n", __FUNCTION__, dhd->pub.conf->dhd_dpc_prio); + param.sched_priority = (dhd->pub.conf->dhd_dpc_prio < MAX_RT_PRIO)? + dhd->pub.conf->dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + dhd->pub.conf->dhd_dpc_prio = -1; + } + if (!binary_sema_down(tsk)) { +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_dpc_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + DHD_OS_WAKE_UNLOCK(&dhd->pub); + break; + } + + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { +#ifdef DEBUG_DPC_THREAD_WATCHDOG + int resched_cnt = 0; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + dhd_os_wd_timer_extend(&dhd->pub, TRUE); + while (dhd_bus_dpc(dhd->pub.bus)) { + /* process all data */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + resched_cnt++; + if (resched_cnt > MAX_RESCHED_CNT) { + DHD_INFO(("%s Calling msleep to" + "let other processes run. \n", + __FUNCTION__)); + dhd->pub.dhd_bug_on = true; + resched_cnt = 0; + OSL_SLEEP(1); + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + dhd_os_wd_timer_extend(&dhd->pub, FALSE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } else { + if (dhd->pub.up) + dhd_bus_stop(dhd->pub.bus, TRUE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +static int +dhd_rxf_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; +#if defined(WAIT_DEQUEUE) +#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */ + ulong watchdogTime = OSL_SYSUPTIME(); /* msec */ +#endif + dhd_pub_t *pub = &dhd->pub; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_rxf_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_rxf = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (dhd->pub.conf->rxf_cpucore >= 0) { + printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore); + set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore)); + dhd->pub.conf->rxf_cpucore = -1; + } + if (down_interruptible(&tsk->sema) == 0) { + void *skb; +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_rxf_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + + SMP_RD_BARRIER_DEPENDS(); + + if (tsk->terminated) { + DHD_OS_WAKE_UNLOCK(pub); + break; + } + skb = dhd_rxf_dequeue(pub); + + if (skb == NULL) { + continue; + } + while (skb) { + void *skbnext = PKTNEXT(pub->osh, skb); + PKTSETNEXT(pub->osh, skb, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if defined(WL_MONITOR) && defined(BCMSDIO) + if (dhd_monitor_enabled(pub, 0)) + dhd_rx_mon_pkt_sdio(pub, skb, 0); + else +#endif /* WL_MONITOR && BCMSDIO */ + netif_rx_ni(skb); + skb = skbnext; + } +#if defined(WAIT_DEQUEUE) + if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) { + OSL_SLEEP(1); + watchdogTime = OSL_SYSUPTIME(); + } +#endif + + DHD_OS_WAKE_UNLOCK(pub); + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +#ifdef BCMPCIE +void dhd_dpc_enable(dhd_pub_t *dhdp) +{ +#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP) + dhd_info_t *dhd; + + if (!dhdp || !dhdp->info) + return; + dhd = dhdp->info; +#endif /* DHD_LB_RXP || DHD_LB_TXP */ + +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_TXP + skb_queue_head_init(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ +} +#endif /* BCMPCIE */ + +#ifdef BCMPCIE +void +dhd_dpc_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_kill(&dhd->tasklet); + DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); + } + + cancel_delayed_work_sync(&dhd->dhd_dpc_dispatcher_work); +#ifdef DHD_LB +#ifdef DHD_LB_RXP + cancel_work_sync(&dhd->rx_napi_dispatcher_work); + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP + cancel_work_sync(&dhd->tx_dispatcher_work); + skb_queue_purge(&dhd->tx_pend_queue); + tasklet_kill(&dhd->tx_tasklet); +#endif /* DHD_LB_TXP */ +#endif /* DHD_LB */ +} + +void +dhd_dpc_tasklet_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_kill(&dhd->tasklet); + } +} +#endif /* BCMPCIE */ + +static void +dhd_dpc(ulong data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + int curr_cpu = get_cpu(); + put_cpu(); + + /* Store current cpu as dpc_cpu */ + atomic_set(&dhd->dpc_cpu, curr_cpu); + + /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c] + * down below , wake lock is set, + * the tasklet is initialized in dhd_attach() + */ + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { +#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE) + DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); +#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */ + if (dhd_bus_dpc(dhd->pub.bus)) { + tasklet_schedule(&dhd->tasklet); + } + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + } + + /* Store as prev_dpc_cpu, which will be used in Rx load balancing for deciding candidacy */ + atomic_set(&dhd->prev_dpc_cpu, curr_cpu); + +} + +void +dhd_sched_dpc(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + DHD_OS_WAKE_LOCK(dhdp); + /* If the semaphore does not get up, + * wake unlock should be done here + */ + if (!binary_sema_up(&dhd->thr_dpc_ctl)) { + DHD_OS_WAKE_UNLOCK(dhdp); + } + return; + } else { + tasklet_schedule(&dhd->tasklet); + } +} +#endif /* BCMDBUS */ + +static void +dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; +#ifdef RXF_DEQUEUE_ON_BUSY + int ret = BCME_OK; + int retry = 2; +#endif /* RXF_DEQUEUE_ON_BUSY */ + + DHD_OS_WAKE_LOCK(dhdp); + + DHD_TRACE(("dhd_sched_rxf: Enter\n")); +#ifdef RXF_DEQUEUE_ON_BUSY + do { + ret = dhd_rxf_enqueue(dhdp, skb); + if (ret == BCME_OK || ret == BCME_ERROR) + break; + else + OSL_SLEEP(50); /* waiting for dequeueing */ + } while (retry-- > 0); + + if (retry <= 0 && ret == BCME_BUSY) { + void *skbp = skb; + + while (skbp) { + void *skbnext = PKTNEXT(dhdp->osh, skbp); + PKTSETNEXT(dhdp->osh, skbp, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx_ni(skbp); + skbp = skbnext; + } + DHD_ERROR(("send skb to kernel backlog without rxf_thread\n")); + } else { + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } + } +#else /* RXF_DEQUEUE_ON_BUSY */ + do { + if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK) + break; + } while (1); + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } else { + DHD_OS_WAKE_UNLOCK(dhdp); + } + return; +#endif /* RXF_DEQUEUE_ON_BUSY */ +} + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef TOE +/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ +static int +dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) +{ + char buf[32]; + int ret; + + ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + + if (ret < 0) { + if (ret == -EIO) { + DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub, + ifidx))); + return -EOPNOTSUPP; + } + + DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + memcpy(toe_ol, buf, sizeof(uint32)); + return 0; +} + +/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ +static int +dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) +{ + int toe, ret; + + /* Set toe_ol as requested */ + ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + /* Enable toe globally only if any components are enabled. */ + toe = (toe_ol != 0); + ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + return 0; +} +#endif /* TOE */ + +#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE) +void dhd_set_scb_probe(dhd_pub_t *dhd) +{ + wl_scb_probe_t scb_probe; + char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)]; + int ret; + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + return; + } + + ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__)); + } + + memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t)); + + scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE; + + ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__)); + return; + } +} +#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */ + +static void +dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + + snprintf(info->driver, sizeof(info->driver), "wl"); + snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version); +} + +struct ethtool_ops dhd_ethtool_ops = { + .get_drvinfo = dhd_ethtool_get_drvinfo +}; + +static int +dhd_ethtool(dhd_info_t *dhd, void *uaddr) +{ + struct ethtool_drvinfo info; + char drvname[sizeof(info.driver)]; + uint32 cmd; +#ifdef TOE + struct ethtool_value edata; + uint32 toe_cmpnt, csum_dir; + int ret; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* all ethtool calls start with a cmd word */ + if (copy_from_user(&cmd, uaddr, sizeof (uint32))) + return -EFAULT; + + switch (cmd) { + case ETHTOOL_GDRVINFO: + /* Copy out any request driver name */ + bzero(&info.driver, sizeof(info.driver)); + if (copy_from_user(&info, uaddr, sizeof(info))) + return -EFAULT; + if (info.driver[sizeof(info.driver) - 1] != '\0') { + DHD_ERROR(("%s: Exceeds the size of info.driver" + "truncating last byte with null\n", __FUNCTION__)); + info.driver[sizeof(info.driver) - 1] = '\0'; + } + strlcpy(drvname, info.driver, sizeof(drvname)); + + /* clear struct for return */ + memset(&info, 0, sizeof(info)); + info.cmd = cmd; + + /* if dhd requested, identify ourselves */ + if (strcmp(drvname, "?dhd") == 0) { + snprintf(info.driver, sizeof(info.driver), "dhd"); + strlcpy(info.version, EPI_VERSION_STR, sizeof(info.version)); + } + + /* otherwise, require dongle to be up */ + else if (!dhd->pub.up) { + DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); + return -ENODEV; + } + + /* finally, report dongle driver type */ + else if (dhd->pub.iswl) + snprintf(info.driver, sizeof(info.driver), "wl"); + else + snprintf(info.driver, sizeof(info.driver), "xx"); + + snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version); + if (copy_to_user(uaddr, &info, sizeof(info))) + return -EFAULT; + DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, + (int)sizeof(drvname), drvname, info.driver)); + break; + +#ifdef TOE + /* Get toe offload components from dongle */ + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + edata.cmd = cmd; + edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; + + if (copy_to_user(uaddr, &edata, sizeof(edata))) + return -EFAULT; + break; + + /* Set toe offload components in dongle */ + case ETHTOOL_SRXCSUM: + case ETHTOOL_STXCSUM: + if (copy_from_user(&edata, uaddr, sizeof(edata))) + return -EFAULT; + + /* Read the current settings, update and write back */ + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + if (edata.data != 0) + toe_cmpnt |= csum_dir; + else + toe_cmpnt &= ~csum_dir; + + if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) + return ret; + + /* If setting TX checksum mode, tell Linux the new mode */ + if (cmd == ETHTOOL_STXCSUM) { + if (edata.data) + dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; + } + + break; +#endif /* TOE */ + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/* XXX function to detect that FW is dead and send Event up */ +static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) +{ +#if defined(OEM_ANDROID) + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return FALSE; + } + + if (!dhdp->up) + return FALSE; + +#if (!defined(BCMDBUS) && !defined(BCMPCIE)) + if (dhdp->info->thr_dpc_ctl.thr_pid < 0) { + DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__)); + return FALSE; + } +#endif /* BCMDBUS */ + + if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) || + ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) { +#ifdef BCMPCIE + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n", + __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, + dhdp->d3ackcnt_timeout, error, dhdp->busstate)); +#else + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); +#endif /* BCMPCIE */ + if (dhdp->hang_reason == 0) { + if (dhdp->dongle_trap_occured) { + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; +#ifdef BCMPCIE + } else if (dhdp->d3ackcnt_timeout) { + dhdp->hang_reason = dhdp->is_sched_error ? + HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR : + HANG_REASON_D3_ACK_TIMEOUT; +#endif /* BCMPCIE */ + } else { + dhdp->hang_reason = dhdp->is_sched_error ? + HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR : + HANG_REASON_IOCTL_RESP_TIMEOUT; + } + } + printf("%s\n", info_string); + printf("MAC %pM\n", &dhdp->mac); + net_os_send_hang_message(net); + return TRUE; + } +#endif /* OEM_ANDROID */ + return FALSE; +} + +#ifdef WL_MONITOR +bool +dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx) +{ + return (dhd->info->monitor_type != 0); +} + +#ifdef BCMSDIO +static void +dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL) + return; + } + + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dhd->monitor_skb = NULL; + return; + } + + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + + netif_rx_ni(dhd->monitor_skb); + + dhd->monitor_skb = NULL; +} +#elif defined(BCMPCIE) + +void +dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; +#ifdef HOST_RADIOTAP_CONV + if (dhd->host_radiotap_conv) { + uint16 len = 0, offset = 0; + monitor_pkt_info_t pkt_info; + + memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker)); + memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t)); + + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL) + return; + } + + len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt), + PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset); + + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dev_kfree_skb(dhd->monitor_skb); + return; + } + + PKTFREE(dhdp->osh, pkt, FALSE); + + if (!len) { + return; + } + + skb_put(dhd->monitor_skb, len); + skb_pull(dhd->monitor_skb, offset); + + dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb, + dhd->monitor_skb->dev); + } + else +#endif /* HOST_RADIOTAP_CONV */ + { + uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >> + BCMPCIE_PKT_FLAGS_MONITOR_SHIFT; + switch (amsdu_flag) { + case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU: + default: + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) + == NULL) + return; + } + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dhd->monitor_skb = NULL; + return; + } + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + break; + + case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT: + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) + == NULL) + return; + dhd->monitor_len = 0; + } + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dev_kfree_skb(dhd->monitor_skb); + return; + } + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb), + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len = PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + return; + + case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT: + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len, + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len += PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + return; + + case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT: + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len, + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len += PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + skb_put(dhd->monitor_skb, dhd->monitor_len); + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + break; + } + } + + if (skb_headroom(dhd->monitor_skb) < ETHER_HDR_LEN) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + + skb2 = skb_realloc_headroom(dhd->monitor_skb, ETHER_HDR_LEN); + + dev_kfree_skb(dhd->monitor_skb); + if ((dhd->monitor_skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + return; + } + } + PKTPUSH(dhd->pub.osh, dhd->monitor_skb, ETHER_HDR_LEN); + + /* XXX WL here makes sure data is 4-byte aligned? */ + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx(dhd->monitor_skb); + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + + netif_rx_ni(dhd->monitor_skb); + } + + dhd->monitor_skb = NULL; +} +#endif + +typedef struct dhd_mon_dev_priv { + struct net_device_stats stats; +} dhd_mon_dev_priv_t; + +#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t)) +#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats) + +static int +dhd_monitor_start(struct sk_buff *skb, struct net_device *dev) +{ + PKTFREE(NULL, skb, FALSE); + return 0; +} + +#if defined(BT_OVER_SDIO) + +void +dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp) +{ + dhdp->info->bus_user_count++; +} + +void +dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp) +{ + dhdp->info->bus_user_count--; +} + +/* Return values: + * Success: Returns 0 + * Failure: Returns -1 or errono code + */ +int +dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = 0; + + mutex_lock(&dhd->bus_user_lock); + ++dhd->bus_user_count; + if (dhd->bus_user_count < 0) { + DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (dhd->bus_user_count == 1) { + + dhd->pub.hang_was_sent = 0; + + /* First user, turn on WL_REG, start the bus */ + DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__)); + + if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) { + /* Enable F1 */ + ret = dhd_bus_resume(dhdp, 0); + if (ret) { + DHD_ERROR(("%s(): Failed to enable F1, err=%d\n", + __FUNCTION__, ret)); + goto exit; + } + } + + /* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware + * name. This is indeed a hack but we have to make it work properly before we have + * a better solution + */ + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path); + /* download the firmware, Enable F2 */ + /* TODO: Should be done only in case of FW switch */ + ret = dhd_bus_devreset(dhdp, FALSE); + dhd_bus_resume(dhdp, 1); + if (!ret) { + if (dhd_sync_with_dongle(&dhd->pub) < 0) { + DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__)); + ret = -EFAULT; + } + } else { + DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret)); + } + } else { + DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n", + __FUNCTION__, dhd->bus_user_count)); + } +exit: + mutex_unlock(&dhd->bus_user_lock); + return ret; +} +EXPORT_SYMBOL(dhd_bus_get); + +/* Return values: + * Success: Returns 0 + * Failure: Returns -1 or errono code + */ +int +dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = 0; + BCM_REFERENCE(owner); + + mutex_lock(&dhd->bus_user_lock); + --dhd->bus_user_count; + if (dhd->bus_user_count < 0) { + DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__)); + dhd->bus_user_count = 0; + ret = -1; + goto exit; + } + + if (dhd->bus_user_count == 0) { + /* Last user, stop the bus and turn Off WL_REG */ + DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n", + __FUNCTION__)); +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) { + dhd_wlfc_deinit(&dhd->pub); + } +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } +#endif /* PNO_SUPPORT */ +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ + ret = dhd_bus_devreset(dhdp, TRUE); + if (!ret) { + dhd_bus_suspend(dhdp); + wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY); + } + } else { + DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n", + __FUNCTION__, dhd->bus_user_count)); + } +exit: + mutex_unlock(&dhd->bus_user_lock); + return ret; +} +EXPORT_SYMBOL(dhd_bus_put); + +int +dhd_net_bus_get(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_get(&dhd->pub, WLAN_MODULE); +} + +int +dhd_net_bus_put(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_put(&dhd->pub, WLAN_MODULE); +} + +/* + * Function to enable the Bus Clock + * Returns BCME_OK on success and BCME_xxx on failure + * + * This function is not callable from non-sleepable context + */ +int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + int ret; + + dhd_os_sdlock(dhdp); + /* + * The second argument is TRUE, that means, we expect + * the function to "wait" until the clocks are really + * available + */ + ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE); + dhd_os_sdunlock(dhdp); + + return ret; +} +EXPORT_SYMBOL(dhd_bus_clk_enable); + +/* + * Function to disable the Bus Clock + * Returns BCME_OK on success and BCME_xxx on failure + * + * This function is not callable from non-sleepable context + */ +int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + int ret; + + dhd_os_sdlock(dhdp); + /* + * The second argument is TRUE, that means, we expect + * the function to "wait" until the clocks are really + * disabled + */ + ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE); + dhd_os_sdunlock(dhdp); + + return ret; +} +EXPORT_SYMBOL(dhd_bus_clk_disable); + +/* + * Function to reset bt_use_count counter to zero. + * + * This function is not callable from non-sleepable context + */ +void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + /* take the lock and reset bt use count */ + dhd_os_sdlock(dhdp); + dhdsdio_reset_bt_use_count(dhdp->bus); + dhd_os_sdunlock(dhdp); +} +EXPORT_SYMBOL(dhd_bus_reset_bt_use_count); + +void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + dhdp->hang_was_sent = 0; + + dhd_os_send_hang_message(&dhd->pub); +} +EXPORT_SYMBOL(dhd_bus_retry_hang_recovery); + +#endif /* BT_OVER_SDIO */ + +static int +dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +static struct net_device_stats* +dhd_monitor_get_stats(struct net_device *dev) +{ + return &DHD_MON_DEV_STATS(dev); +} + +static const struct net_device_ops netdev_monitor_ops = +{ + .ndo_start_xmit = dhd_monitor_start, + .ndo_get_stats = dhd_monitor_get_stats, + .ndo_do_ioctl = dhd_monitor_ioctl +}; + +static void +dhd_add_monitor_if(dhd_info_t *dhd) +{ + struct net_device *dev; + char *devname; +#ifdef HOST_RADIOTAP_CONV + dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub; +#endif /* HOST_RADIOTAP_CONV */ + uint32 scan_suppress = FALSE; + int ret = BCME_OK; + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (dhd->monitor_dev) { + DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__)); + return; + } + + dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE); + if (!dev) { + DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__)); + return; + } + + devname = "radiotap"; + + snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit); + +#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */ +#define ARPHRD_IEEE80211_PRISM 802 +#endif + +#ifndef ARPHRD_IEEE80211_RADIOTAP +#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ +#endif /* ARPHRD_IEEE80211_RADIOTAP */ + + dev->type = ARPHRD_IEEE80211_RADIOTAP; + + dev->netdev_ops = &netdev_monitor_ops; + + /* XXX: This is called from IOCTL path, in this case, rtnl_lock is already taken. + * So, register_netdev() shouldn't be called. It leads to deadlock. + * To avoid deadlock due to rtnl_lock(), register_netdevice() should be used. + */ + if (register_netdevice(dev)) { + DHD_ERROR(("%s, register_netdev failed for %s\n", + __FUNCTION__, dev->name)); + free_netdev(dev); + return; + } + + if (FW_SUPPORTED((&dhd->pub), monitor)) { +#ifdef DHD_PCIE_RUNTIMEPM + /* Disable RuntimePM in monitor mode */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ + scan_suppress = TRUE; + /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */ + ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress, + sizeof(scan_suppress), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret)); + } + } + +#ifdef HOST_RADIOTAP_CONV + bcmwifi_monitor_create(&dhd->monitor_info); + bcmwifi_set_corerev_major(dhd->monitor_info, dhdpcie_get_corerev_major(dhdp)); + bcmwifi_set_corerev_minor(dhd->monitor_info, dhdpcie_get_corerev_minor(dhdp)); +#endif /* HOST_RADIOTAP_CONV */ + dhd->monitor_dev = dev; +} + +static void +dhd_del_monitor_if(dhd_info_t *dhd) +{ + int ret = BCME_OK; + uint32 scan_suppress = FALSE; + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!dhd->monitor_dev) { + DHD_ERROR(("%s: monitor i/f doesn't exist\n", __FUNCTION__)); + return; + } + + if (FW_SUPPORTED((&dhd->pub), monitor)) { +#ifdef DHD_PCIE_RUNTIMEPM + /* Enable RuntimePM */ + DHD_ENABLE_RUNTIME_PM(&dhd->pub); + DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ + scan_suppress = FALSE; + /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */ + ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress, + sizeof(scan_suppress), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret)); + } + } + + if (dhd->monitor_dev) { + if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) { + free_netdev(dhd->monitor_dev); + } else { + if (rtnl_is_locked()) { + unregister_netdevice(dhd->monitor_dev); + } else { + unregister_netdev(dhd->monitor_dev); + } + } + dhd->monitor_dev = NULL; + } +#ifdef HOST_RADIOTAP_CONV + if (dhd->monitor_info) { + bcmwifi_monitor_delete(dhd->monitor_info); + dhd->monitor_info = NULL; + } +#endif /* HOST_RADIOTAP_CONV */ +} + +void +dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val) +{ + dhd_info_t *dhd = pub->info; + + DHD_TRACE(("%s: val %d\n", __FUNCTION__, val)); + + dhd_net_if_lock_local(dhd); + if (!val) { + /* Delete monitor */ + dhd_del_monitor_if(dhd); + } else { + /* Add monitor */ + dhd_add_monitor_if(dhd); + } + dhd->monitor_type = val; + dhd_net_if_unlock_local(dhd); +} +#endif /* WL_MONITOR */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +/* + * Helper function: + * Used for RTE console message time syncing with Host printk + */ +void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp) +{ + dhd_info_t *info = dhdp->info; + + /* Ideally the "state" should be always TRUE */ + dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH, + dhd_deferred_work_rte_log_time_sync, + DHD_WQ_WORK_PRIORITY_LOW); +} + +void +dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd_info = handle; + dhd_pub_t *dhd; + + if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd_info) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd = &dhd_info->pub; + + /* + * Function to send IOVAR for console timesyncing + * between Host and Dongle. + * If the IOVAR fails, + * 1. dhd_rte_time_sync_ms is set to 0 and + * 2. HOST Dongle console time sync will *not* happen. + */ + dhd_h2d_log_time_sync(dhd); +} +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf) +{ + int bcmerror = BCME_OK; + int buflen = 0; + struct net_device *net; + + net = dhd_idx2net(pub, ifidx); + if (!net) { + bcmerror = BCME_BADARG; + /* + * The netdev pointer is bad means the DHD can't communicate + * to higher layers, so just return from here + */ + return bcmerror; + } + + /* check for local dhd ioctl and handle it */ + if (ioc->driver == DHD_IOCTL_MAGIC) { + if (data_buf) { + /* Return error if nvram size is too big */ + if (!bcmstricmp((char *)data_buf, "vars")) { + DHD_ERROR(("%s: nvram len(%d) MAX_NVRAMBUF_SIZE(%d)\n", + __FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE)); + if (ioc->len > MAX_NVRAMBUF_SIZE) { + DHD_ERROR(("%s: nvram len(%d) > MAX_NVRAMBUF_SIZE(%d)\n", + __FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE)); + bcmerror = BCME_BUFTOOLONG; + goto done; + } + buflen = ioc->len; + } else if (!bcmstricmp((char *)data_buf, "dump")) { + buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN_32K); + } else { + /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */ + buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN); + } + } + bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen); + if (bcmerror) + pub->bcmerror = bcmerror; + goto done; + } + + /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */ + if (data_buf) + buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN); + +#ifndef BCMDBUS + /* send to dongle (must be up, and wl). */ + if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) { + if ((!pub->dongle_trap_occured) && allow_delay_fwdl) { + int ret; + if (atomic_read(&exit_in_progress)) { + DHD_ERROR(("%s module exit in progress\n", __func__)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + ret = dhd_bus_start(pub); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } else { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } + + if (!pub->iswl) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } +#endif /* !BCMDBUS */ + + /* + * Flush the TX queue if required for proper message serialization: + * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to + * prevent M4 encryption and + * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to + * prevent disassoc frame being sent before WPS-DONE frame. + */ + if (ioc->cmd == WLC_SET_KEY || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("wsec_key", data_buf, 9) == 0) || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("bsscfg:wsec_key", data_buf, 15) == 0) || + ioc->cmd == WLC_DISASSOC) + dhd_wait_pend8021x(net); + + if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) && + data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) { + bcmerror = BCME_UNSUPPORTED; + goto done; + } + + /* XXX this typecast is BAD !!! */ + bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); + +#ifdef REPORT_FATAL_TIMEOUTS + /* ensure that the timeouts/flags are started/set after the ioctl returns success */ + if (bcmerror == BCME_OK) { + if (ioc->cmd == WLC_SET_WPA_AUTH) { + int wpa_auth; + + wpa_auth = *((int *)ioc->buf); + DHD_INFO(("wpa_auth:%d\n", wpa_auth)); + if (wpa_auth != WPA_AUTH_DISABLED) { + /* If AP is with security then enable + * WLC_E_PSK_SUP event checking + */ + pub->secure_join = TRUE; + } else { + /* If AP is with open then disable + * WLC_E_PSK_SUP event checking + */ + pub->secure_join = FALSE; + } + } + + if (ioc->cmd == WLC_SET_AUTH) { + int auth; + auth = *((int *)ioc->buf); + DHD_INFO(("Auth:%d\n", auth)); + + if (auth != WL_AUTH_OPEN_SYSTEM) { + /* If AP is with security then enable + * WLC_E_PSK_SUP event checking + */ + pub->secure_join = TRUE; + } else { + /* If AP is with open then disable WLC_E_PSK_SUP event checking */ + pub->secure_join = FALSE; + } + } + + if (ioc->cmd == WLC_SET_SSID) { + bool set_ssid_rcvd = OSL_ATOMIC_READ(pub->osh, &pub->set_ssid_rcvd); + if ((!set_ssid_rcvd) && (!pub->secure_join)) { + dhd_start_join_timer(pub); + } else { + DHD_ERROR(("%s: didnot start join timer." + "open join, set_ssid_rcvd: %d secure_join: %d\n", + __FUNCTION__, set_ssid_rcvd, pub->secure_join)); + OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE); + } + } + + if (ioc->cmd == WLC_SCAN) { + dhd_start_scan_timer(pub, 0); + } + } +#endif /* REPORT_FATAL_TIMEOUTS */ + +done: +#if defined(OEM_ANDROID) + dhd_check_hang(net, pub, bcmerror); +#endif /* OEM_ANDROID */ + + return bcmerror; +} + +#ifdef WL_NANHO +static bool +dhd_nho_iovar_filter(dhd_ioctl_t *ioc) +{ + bool forward_to_nanho = FALSE; + + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + if ((ioc->len >= sizeof("nan")) && !strcmp(ioc->buf, "nan")) { + /* forward nan iovar to nanho module */ + forward_to_nanho = TRUE; + } else if ((ioc->len >= sizeof("slot_bss")) && !strcmp(ioc->buf, "slot_bss")) { + /* forward slot_bss iovar to nanho module */ + forward_to_nanho = TRUE; + } + } + return forward_to_nanho; +} + +static int +dhd_nho_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf) +{ + int err; + + if (dhd_nho_iovar_filter(ioc)) { + /* forward iovar to nanho module */ + err = bcm_nanho_iov(pub->nanhoi, ifidx, (wl_ioctl_t *)ioc); + } else { + /* all other iovars bypass nanho and issued through normal path */ + err = dhd_ioctl_process(pub, ifidx, ioc, data_buf); + } + return err; +} + +static int +dhd_nho_ioctl_cb(void *drv_ctx, int ifidx, wl_ioctl_t *ioc, bool drv_lock) +{ + int err; + + if (drv_lock) { + DHD_OS_WAKE_LOCK((dhd_pub_t *)drv_ctx); + } + + err = dhd_ioctl_process((dhd_pub_t *)drv_ctx, ifidx, (dhd_ioctl_t *)ioc, ioc->buf); + + if (drv_lock) { + DHD_OS_WAKE_UNLOCK((dhd_pub_t *)drv_ctx); + } + + return err; +} +#endif /* WL_NANHO */ + +/* XXX For the moment, local ioctls will return BCM errors */ +/* XXX Others return linux codes, need to be changed... */ +/** + * Called by the OS (optionally via a wrapper function). + * @param net Linux per dongle instance + * @param ifr Linux request structure + * @param cmd e.g. SIOCETHTOOL + */ +static int +dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + void __user *data, +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + int cmd) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_ioctl_t ioc; + int bcmerror = 0; + int ifidx; + int ret; + void *local_buf = NULL; /**< buffer in kernel space */ + void __user *ioc_buf_user = NULL; /**< buffer in user space */ + u16 buflen = 0; + + if (atomic_read(&exit_in_progress)) { + DHD_ERROR(("%s module exit in progress\n", __func__)); + bcmerror = BCME_DONGLE_DOWN; + return OSL_ERROR(bcmerror); + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + +#if defined(OEM_ANDROID) + /* Interface up check for built-in type */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) { + DHD_ERROR(("%s: Interface is down \n", __FUNCTION__)); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return OSL_ERROR(BCME_NOTUP); + } +#endif /* (OEM_ANDROID) */ + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); + +#if defined(WL_STATIC_IF) + /* skip for static ndev when it is down */ + if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) { + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -1; + } +#endif /* WL_STATIC_iF */ + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -1; + } + +#if defined(WL_WIRELESS_EXT) + /* linux wireless extensions */ + if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { + /* may recurse, do NOT lock */ + ret = wl_iw_ioctl(net, ifr, cmd); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#endif /* defined(WL_WIRELESS_EXT) */ + + if (cmd == SIOCETHTOOL) { + ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } + +#if defined(OEM_ANDROID) + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(net, ifr); + dhd_check_hang(net, &dhd->pub, ret); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } + +#endif /* OEM_ANDROID */ + + if (cmd != SIOCDEVPRIVATE) { + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -EOPNOTSUPP; + } + + memset(&ioc, 0, sizeof(ioc)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, data, sizeof(wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } +#else +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif /* LINUX_VER >= 4.6 */ + { + compat_wl_ioctl_t compat_ioc; + if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } + ioc.cmd = compat_ioc.cmd; + if (ioc.cmd & WLC_SPEC_FLAG) { + memset(&ioc, 0, sizeof(ioc)); + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } + ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */ + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } + + } else { /* ioc.cmd & WLC_SPEC_FLAG */ + ioc.buf = compat_ptr(compat_ioc.buf); + ioc.len = compat_ioc.len; + ioc.set = compat_ioc.set; + ioc.used = compat_ioc.used; + ioc.needed = compat_ioc.needed; + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } + } /* ioc.cmd & WLC_SPEC_FLAG */ + } else +#endif /* CONFIG_COMPAT */ + { + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } +#ifdef CONFIG_COMPAT + ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/ +#endif + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } + } +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + + if (!capable(CAP_NET_ADMIN)) { + bcmerror = BCME_EPERM; + goto done; + } + + /* Take backup of ioc.buf and restore later */ + ioc_buf_user = ioc.buf; + + if (ioc.len > 0) { + /* + * some IOVARs in DHD require 32K user memory. So allocate the + * maximum local buffer. + * + * For IOVARS which donot require 32K user memory, dhd_ioctl_process() + * takes care of trimming the length to DHD_IOCTL_MAXLEN(16K). So that DHD + * will not overflow the buffer size while updating the buffer. + */ + buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN_32K); + if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) { + bcmerror = BCME_NOMEM; + goto done; + } + + if (copy_from_user(local_buf, ioc.buf, buflen)) { + bcmerror = BCME_BADADDR; + goto done; + } + + *((char *)local_buf + buflen) = '\0'; + + /* For some platforms accessing userspace memory + * of ioc.buf is causing kernel panic, so to avoid that + * make ioc.buf pointing to kernel space memory local_buf + */ + ioc.buf = local_buf; + } + +#if defined(OEM_ANDROID) + /* Skip all the non DHD iovars (wl iovars) after f/w hang */ + if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) { + DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } +#endif /* OEM_ANDROID */ + +#ifdef WL_NANHO + bcmerror = dhd_nho_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); +#else + bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); +#endif /* WL_NANHO */ + + /* Restore back userspace pointer to ioc.buf */ + ioc.buf = ioc_buf_user; + if (!bcmerror && buflen && local_buf && ioc.buf) { + if (copy_to_user(ioc.buf, local_buf, buflen)) + bcmerror = -EFAULT; + } + +done: + if (local_buf) + MFREE(dhd->pub.osh, local_buf, buflen+1); + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return OSL_ERROR(bcmerror); +} + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +/* Flags to indicate if we distingish power off policy when + * user set the memu "Keep Wi-Fi on during sleep" to "Never" + */ +int trigger_deep_sleep = 0; +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + +#ifdef FIX_CPU_MIN_CLOCK +static int dhd_init_cpufreq_fix(dhd_info_t *dhd) +{ + if (dhd) { + mutex_init(&dhd->cpufreq_fix); + dhd->cpufreq_fix_status = FALSE; + } + return 0; +} + +static void dhd_fix_cpu_freq(dhd_info_t *dhd) +{ + mutex_lock(&dhd->cpufreq_fix); + if (dhd && !dhd->cpufreq_fix_status) { + pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = TRUE; + } + mutex_unlock(&dhd->cpufreq_fix); +} + +static void dhd_rollback_cpu_freq(dhd_info_t *dhd) +{ + mutex_lock(&dhd ->cpufreq_fix); + if (dhd && dhd->cpufreq_fix_status != TRUE) { + mutex_unlock(&dhd->cpufreq_fix); + return; + } + + pm_qos_remove_request(&dhd->dhd_cpu_qos); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_remove_request(&dhd->dhd_bus_qos); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = FALSE; + mutex_unlock(&dhd->cpufreq_fix); +} +#endif /* FIX_CPU_MIN_CLOCK */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int +dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + void __user *data, +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + int cmd) +{ + int error; + dhd_info_t *dhd = DHD_DEV_INFO(net); + + if (atomic_read(&dhd->pub.block_bus)) + return -EHOSTDOWN; + + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0) + return BCME_ERROR; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + error = dhd_ioctl_entry(net, ifr, data, cmd); +#else + error = dhd_ioctl_entry(net, ifr, cmd); +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); + + return error; +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef CONFIG_HAS_WAKELOCK +#define dhd_wake_lock_unlock_destroy(wlock) \ +{ \ + if (dhd_wake_lock_active(wlock)) { \ + dhd_wake_unlock(wlock); \ + } \ + dhd_wake_lock_destroy(wlock); \ +} +#endif /* CONFIG_HAS_WAKELOCK */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT) +#define DHD_TCP_LIMIT_OUTPUT_BYTES (4 * 1024 * 1024) +#ifndef TCP_DEFAULT_LIMIT_OUTPUT +#define TCP_DEFAULT_LIMIT_OUTPUT (256 * 1024) +#endif /* TSQ_DEFAULT_LIMIT_OUTPUT */ +void +dhd_ctrl_tcp_limit_output_bytes(int level) +{ + if (level == 0) { + init_net.ipv4.sysctl_tcp_limit_output_bytes = TCP_DEFAULT_LIMIT_OUTPUT; + } else if (level == 1) { + init_net.ipv4.sysctl_tcp_limit_output_bytes = DHD_TCP_LIMIT_OUTPUT_BYTES; + } +} +#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */ + +static int +dhd_stop(struct net_device *net) +{ + int ifidx = 0; + bool skip_reset = false; +#ifdef WL_CFG80211 + unsigned long flags = 0; +#ifdef WL_STATIC_IF + struct bcm_cfg80211 *cfg = wl_get_cfg(net); +#endif /* WL_STATIC_IF */ +#endif /* WL_CFG80211 */ + dhd_info_t *dhd = DHD_DEV_INFO(net); + + DHD_OS_WAKE_LOCK(&dhd->pub); + WL_MSG(net->name, "Enter\n"); + dhd->pub.rxcnt_timeout = 0; + dhd->pub.txcnt_timeout = 0; + +#ifdef BCMPCIE + dhd->pub.d3ackcnt_timeout = 0; +#endif /* BCMPCIE */ + + mutex_lock(&dhd->pub.ndev_op_sync); + if (dhd->pub.up == 0) { + goto exit; + } +#if defined(DHD_HANG_SEND_UP_TEST) + if (dhd->pub.req_hang_type) { + DHD_ERROR(("%s, Clear HANG test request 0x%x\n", + __FUNCTION__, dhd->pub.req_hang_type)); + dhd->pub.req_hang_type = 0; + } +#endif /* DHD_HANG_SEND_UP_TEST */ + +#if defined(WLAN_ACCEL_BOOT) + if (!dhd->wl_accel_force_reg_on && dhd_query_bus_erros(&dhd->pub)) { + DHD_ERROR(("%s: set force reg on\n", __FUNCTION__)); + dhd->wl_accel_force_reg_on = TRUE; + } +#endif /* WLAN_ACCEL_BOOT */ + +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) + dhd_rollback_cpu_freq(dhd); +#endif /* FIX_CPU_MIN_CLOCK */ + + ifidx = dhd_net2idx(dhd, net); + BCM_REFERENCE(ifidx); + + DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx)); + +#if defined(WL_STATIC_IF) && defined(WL_CFG80211) + /* If static if is operational, don't reset the chip */ + if (wl_cfg80211_static_if_active(cfg)) { + WL_MSG(net->name, "static if operational. skip chip reset.\n"); + skip_reset = true; + wl_cfg80211_sta_ifdown(net); + goto exit; + } +#endif /* WL_STATIC_IF && WL_CFG80211 */ +#ifdef DHD_NOTIFY_MAC_CHANGED + if (dhd->pub.skip_dhd_stop) { + WL_MSG(net->name, "skip chip reset.\n"); + skip_reset = true; +#if defined(WL_CFG80211) + wl_cfg80211_sta_ifdown(net); +#endif /* WL_CFG80211 */ + goto exit; + } +#endif /* DHD_NOTIFY_MAC_CHANGED */ + +#ifdef WL_CFG80211 + if (ifidx == 0) { + dhd_if_t *ifp; + wl_cfg80211_down(net); + + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); +#ifdef WL_CFG80211 + /* Disable Runtime PM before interface down */ + DHD_STOP_RPM_TIMER(&dhd->pub); + + DHD_UP_LOCK(&dhd->pub.up_lock, flags); + dhd->pub.up = 0; + DHD_UP_UNLOCK(&dhd->pub.up_lock, flags); +#else + dhd->pub.up = 0; +#endif /* WL_CFG80211 */ +#if defined(BCMPCIE) && defined(CONFIG_ARCH_MSM) + dhd_bus_inform_ep_loaded_to_rc(&dhd->pub, dhd->pub.up); +#endif /* BCMPCIE && CONFIG_ARCH_MSM */ + + ifp = dhd->iflist[0]; + /* + * For CFG80211: Clean up all the left over virtual interfaces + * when the primary Interface is brought down. [ifconfig wlan0 down] + */ + if (!dhd_download_fw_on_driverload) { + DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0); + if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && + (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + int i; +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + dhd_cleanup_m4_state_work(&dhd->pub, ifidx); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#ifdef DHD_PKTDUMP_ROAM + dhd_dump_pkt_clear(&dhd->pub); +#endif /* DHD_PKTDUMP_ROAM */ + + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) + dhd_remove_if(&dhd->pub, i, FALSE); + + if (ifp && ifp->net) { + dhd_if_del_sta_list(ifp); + } +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd_net_if_unlock_local(dhd); + } +#if 0 + // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process + cancel_work_sync(dhd->dhd_deferred_wq); +#endif + +#ifdef SHOW_LOGTRACE + /* Wait till event logs work/kthread finishes */ + dhd_cancel_logtrace_process_sync(dhd); +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG + /* Wait till bt_log_dispatcher_work finishes */ + cancel_work_sync(&dhd->bt_log_dispatcher_work); +#endif /* BTLOG */ + +#ifdef EWP_EDL + cancel_delayed_work_sync(&dhd->edl_dispatcher_work); +#endif + +#if defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + } +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB_RXP) + if (ifp && ifp->net == dhd->rx_napi_netdev) { + DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + skb_queue_purge(&dhd->rx_napi_queue); + napi_disable(&dhd->rx_napi_struct); + netif_napi_del(&dhd->rx_napi_struct); + dhd->rx_napi_netdev = NULL; + } +#endif /* DHD_LB_RXP */ + } +#endif /* WL_CFG80211 */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_cleanup(&dhd->pub, NULL, 0); +#endif +#ifdef SHOW_LOGTRACE + if (!dhd_download_fw_on_driverload) { + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(&dhd->pub); + if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { + if (dhd->event_data.fmts) { + MFREE(dhd->pub.osh, dhd->event_data.fmts, + dhd->event_data.fmts_size); + } + if (dhd->event_data.raw_fmts) { + MFREE(dhd->pub.osh, dhd->event_data.raw_fmts, + dhd->event_data.raw_fmts_size); + } + if (dhd->event_data.raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.raw_sstr, + dhd->event_data.raw_sstr_size); + } + if (dhd->event_data.rom_raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr, + dhd->event_data.rom_raw_sstr_size); + } + dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT; + } + } +#endif /* SHOW_LOGTRACE */ +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING + /* Stop all ring buffer */ + dhd_os_reset_logging(&dhd->pub); +#endif +#ifdef APF + dhd_dev_apf_delete_filter(net); +#endif /* APF */ + + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + OLD_MOD_DEC_USE_COUNT; +exit: + if (skip_reset == false) { +#ifdef WL_ESCAN + if (ifidx == 0) { + wl_escan_down(net); + } +#endif /* WL_ESCAN */ + if (ifidx == 0 && !dhd_download_fw_on_driverload) { +#if defined(WLAN_ACCEL_BOOT) + wl_android_wifi_accel_off(net, dhd->wl_accel_force_reg_on); +#else +#if defined (BT_OVER_SDIO) + dhd_bus_put(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(FALSE); +#else + wl_android_wifi_off(net, TRUE); +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach_netdev(net, ifidx); +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_event_dettach(net, ifidx); +#endif /* WL_ESCAN */ +#ifdef WL_EVENT + wl_ext_event_dettach_netdev(net, ifidx); +#endif /* WL_EVENT */ +#endif /* BT_OVER_SDIO */ +#endif /* WLAN_ACCEL_BOOT */ + } +#ifdef SUPPORT_DEEP_SLEEP + else { + /* CSP#505233: Flags to indicate if we distingish + * power off policy when user set the memu + * "Keep Wi-Fi on during sleep" to "Never" + */ + if (trigger_deep_sleep) { + dhd_deepsleep(net, 1); + trigger_deep_sleep = 0; + } + } +#endif /* SUPPORT_DEEP_SLEEP */ + dhd->pub.hang_was_sent = 0; + dhd->pub.hang_was_pending = 0; + + /* Clear country spec for for built-in type driver */ + if (!dhd_download_fw_on_driverload) { + dhd->pub.dhd_cspec.country_abbrev[0] = 0x00; + dhd->pub.dhd_cspec.rev = 0; + dhd->pub.dhd_cspec.ccode[0] = 0x00; + } + +#ifdef BCMDBGFS + dhd_dbgfs_remove(); +#endif + } + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + /* Destroy wakelock */ + if (!dhd_download_fw_on_driverload && + (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) && + (skip_reset == false)) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT) + dhd_ctrl_tcp_limit_output_bytes(0); +#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */ + WL_MSG(net->name, "Exit\n"); + + mutex_unlock(&dhd->pub.ndev_op_sync); + return 0; +} + +#if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \ + defined(USE_INITIAL_SHORT_DWELL_TIME)) +extern bool g_first_broadcast_scan; +#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */ + +#ifdef WL11U +static int dhd_interworking_enable(dhd_pub_t *dhd) +{ + uint32 enable = true; + int ret = BCME_OK; + + ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret)); + } + + return ret; +} +#endif /* WL11u */ + +#if defined(WLAN_ACCEL_BOOT) +void +dhd_verify_firmware_mode_change(dhd_info_t *dhd) +{ + int current_mode = 0; + + /* + * check for the FW change + * previous FW mode - dhd->pub.op_mode remember the previous mode + * current mode - update fw/nv path, get current FW mode from dhd->fw_path + */ + dhd_update_fw_nv_path(dhd); +#ifdef WL_MONITOR + DHD_INFO(("%s : check monitor mode with fw_path : %s\n", __FUNCTION__, dhd->fw_path)); + + if (strstr(dhd->fw_path, "_mon") != NULL) { + DHD_ERROR(("%s : monitor mode is enabled, set force reg on", __FUNCTION__)); + dhd->wl_accel_force_reg_on = TRUE; + return; + } else if (dhd->pub.monitor_enable == TRUE) { + DHD_ERROR(("%s : monitor was enabled, changed to other fw_mode", __FUNCTION__)); + dhd->wl_accel_force_reg_on = TRUE; + return; + } +#endif /* WL_MONITOR */ + current_mode = dhd_get_fw_mode(dhd); + + DHD_ERROR(("%s: current_mode 0x%x, prev_opmode 0x%x", __FUNCTION__, + current_mode, dhd->pub.op_mode)); + + if (!(dhd->pub.op_mode & current_mode)) { + DHD_ERROR(("%s: firmware path has changed, set force reg on", __FUNCTION__)); + dhd->wl_accel_force_reg_on = TRUE; + } +} + +#ifndef DHD_FS_CHECK_RETRY_DELAY_MS +#define DHD_FS_CHECK_RETRY_DELAY_MS 3000 +#endif + +#ifndef DHD_FS_CHECK_RETRIES +#define DHD_FS_CHECK_RETRIES 3 +#endif + +static bool +dhd_check_filesystem_is_up(void) +{ + struct file *fp; + const char *clm = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH; + fp = filp_open(clm, O_RDONLY, 0); + + if (IS_ERR(fp)) { + DHD_ERROR(("%s: filp_open(%s) failed(%d) schedule wl_accel_work\n", + __FUNCTION__, clm, (int)IS_ERR(fp))); + return FALSE; + } + filp_close(fp, NULL); + + return TRUE; +} + +static void +dhd_wifi_accel_on_work_cb(struct work_struct *work) +{ + int ret = 0; + struct delayed_work *dw = to_delayed_work(work); + struct dhd_info *dhd; + struct net_device *net; + + /* Ignore compiler warnings due to -Werror=cast-qual */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(dw, struct dhd_info, wl_accel_work); + GCC_DIAGNOSTIC_POP(); + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* Initialise force regon to TRUE and it will be made FALSE at the end */ + dhd->wl_accel_force_reg_on = TRUE; + + if (!dhd_check_filesystem_is_up()) { + if (!dhd->fs_check_retry--) { + DHD_ERROR(("%s: max retry reached, BACKOFF\n", __FUNCTION__)); + return; + } + schedule_delayed_work(&dhd->wl_accel_work, + msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS)); + return; + } + + net = dhd->iflist[0]->net; + + /* + * Keep wlan turn on and download firmware during bootup + * by making g_wifi_on = FALSE + */ + ret = wl_android_wifi_on(net); + if (ret) { + DHD_ERROR(("%s: wl_android_wifi_on failed(%d)\n", __FUNCTION__, ret)); + goto fail; + } + + /* Disable host access from dongle */ + ret = dhd_wl_ioctl_set_intiovar(&dhd->pub, "bus:host_access", 0, WLC_SET_VAR, TRUE, 0); + if (ret) { + /* Proceed even if iovar fails for backward compatibilty */ + DHD_ERROR(("%s: bus:host_access(0) failed(%d)\n", __FUNCTION__, ret)); + } + + /* After bootup keep in suspend state */ + ret = dhd_net_bus_suspend(net); + if (ret) { + DHD_ERROR(("%s: dhd_net_bus_suspend failed(%d)\n", __FUNCTION__, ret)); + goto fail; + } + + /* Set force regon to FALSE and it will be set for Big Hammer case */ + dhd->wl_accel_force_reg_on = FALSE; + +fail: + /* mark wl_accel_boot_on_done for dhd_open to proceed */ + dhd->wl_accel_boot_on_done = TRUE; + return; + +} +#endif /* WLAN_ACCEL_BOOT */ + +int +dhd_open(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); +#ifdef TOE + uint32 toe_ol; +#endif + int ifidx; + int32 ret = 0; +#if defined(OOB_INTR_ONLY) + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; +#endif +#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT) + int bytes_written = 0; +#endif + int retry = POWERUP_MAX_RETRY; + +#if defined(PREVENT_REOPEN_DURING_HANG) + /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */ + if (dhd->pub.hang_was_sent == 1) { + DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); + /* Force to bring down WLAN interface in case dhd_stop() is not called + * from the upper layer when HANG event is triggered. + */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) { + DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__)); + dhd_stop(net); + } else { + return -1; + } + } +#endif /* PREVENT_REOPEN_DURING_HANG */ + + mutex_lock(&dhd->pub.ndev_op_sync); + +#ifdef SCAN_SUPPRESS + wl_ext_reset_scan_busy(&dhd->pub); +#endif + + if (dhd->pub.up == 1) { + /* already up */ + WL_MSG(net->name, "Primary net_device is already up\n"); + mutex_unlock(&dhd->pub.ndev_op_sync); + return BCME_OK; + } + + if (!dhd_download_fw_on_driverload) { +#if defined(WLAN_ACCEL_BOOT) + if (dhd->wl_accel_boot_on_done == FALSE) { +#if defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH) + dhd_wifi_accel_on_work_cb(&dhd->wl_accel_work.work); +#else + DHD_ERROR(("%s: WLAN accel boot not done yet\n", __FUNCTION__)); + mutex_unlock(&dhd->pub.ndev_op_sync); + return -1; +#endif /* WLAN_ACCEL_SKIP_WQ_IN_ATTACH */ + } + if (!dhd->wl_accel_force_reg_on && dhd_query_bus_erros(&dhd->pub)) { + DHD_ERROR(("%s: set force reg on\n", __FUNCTION__)); + dhd->wl_accel_force_reg_on = TRUE; + } +#endif /* WLAN_ACCEL_BOOT */ + if (!dhd_driver_init_done) { + DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); + mutex_unlock(&dhd->pub.ndev_op_sync); + return -1; + } + } + + WL_MSG(net->name, "Enter\n"); + DHD_ERROR(("%s\n", dhd_version)); + /* Init wakelock */ + if (!dhd_download_fw_on_driverload) { + if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + +#ifdef SHOW_LOGTRACE + skb_queue_head_init(&dhd->evt_trace_queue); + + if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) { + ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data); + if (ret == BCME_OK) { + dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data, + st_str_file_path, map_file_path); + dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data, + rom_st_str_file_path, rom_map_file_path); + dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT; + } + } +#endif /* SHOW_LOGTRACE */ + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + dhd->pub.dongle_trap_occured = 0; +#ifdef BT_OVER_PCIE + dhd->pub.dongle_trap_due_to_bt = 0; +#endif /* BT_OVER_PCIE */ + dhd->pub.hang_was_sent = 0; + dhd->pub.hang_was_pending = 0; + dhd->pub.hang_reason = 0; + dhd->pub.iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhd->pub.d3ack_timeout_occured = 0; + dhd->pub.livelock_occured = 0; + dhd->pub.pktid_audit_failed = 0; +#endif /* PCIE_FULL_DONGLE */ + dhd->pub.iface_op_failed = 0; + dhd->pub.scan_timeout_occurred = 0; + dhd->pub.scan_busy_occurred = 0; + dhd->pub.smmu_fault_occurred = 0; +#ifdef DHD_LOSSLESS_ROAMING + dhd->pub.dequeue_prec_map = ALLPRIO; +#endif +#ifdef DHD_GRO_ENABLE_HOST_CTRL + dhd->pub.permitted_gro = TRUE; +#endif /* DHD_GRO_ENABLE_HOST_CTRL */ +#if 0 + /* + * Force start if ifconfig_up gets called before START command + * We keep WEXT's wl_control_wl_start to provide backward compatibility + * This should be removed in the future + */ + ret = wl_control_wl_start(net); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + +#endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */ + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (ifidx < 0) { + DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx)); + +#if defined(WLAN_ACCEL_BOOT) + dhd_verify_firmware_mode_change(dhd); +#endif /* WLAN_ACCEL_BOOT */ + + if (ifidx == 0) { + atomic_set(&dhd->pend_8021x_cnt, 0); + if (!dhd_download_fw_on_driverload) { + DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0); +#ifdef WL_EVENT + wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx); +#endif /* WL_EVENT */ +#ifdef WL_ESCAN + wl_escan_event_attach(net, ifidx); +#endif /* WL_ESCAN */ +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx); +#endif /* WL_EXT_IAPSTA */ +#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME) + g_first_broadcast_scan = TRUE; +#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */ +#ifdef SHOW_LOGTRACE + /* dhd_cancel_logtrace_process_sync is called in dhd_stop + * for built-in models. Need to start logtrace kthread before + * calling wifi on, because once wifi is on, EDL will be in action + * any moment, and if kthread is not active, FW event logs will + * not be available + */ + if (dhd_reinit_logtrace_process(dhd) != BCME_OK) { + goto exit; + } +#endif /* SHOW_LOGTRACE */ +#if defined(WLAN_ACCEL_BOOT) + ret = wl_android_wifi_accel_on(net, dhd->wl_accel_force_reg_on); + /* Enable wl_accel_force_reg_on if ON fails, else disable it */ + if (ret) { + dhd->wl_accel_force_reg_on = TRUE; + } else { + dhd->wl_accel_force_reg_on = FALSE; + } +#else +#if defined(BT_OVER_SDIO) + ret = dhd_bus_get(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(TRUE); +#else + do { + dhd->pub.hang_reason = 0; + ret = wl_android_wifi_on(net); + if (!dhd->pub.hang_reason) { + break; + } + DHD_ERROR(("%s: hang_reason=%d, retry %d\n", + __FUNCTION__, dhd->pub.hang_reason, retry)); + wl_android_wifi_off(net, TRUE); + } while (retry-- > 0); +#endif /* BT_OVER_SDIO */ +#endif /* WLAN_ACCEL_BOOT */ + if (ret != 0) { + DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n", + __FUNCTION__, ret)); + ret = -1; + goto exit; + } + } +#ifdef SUPPORT_DEEP_SLEEP + else { + /* Flags to indicate if we distingish + * power off policy when user set the memu + * "Keep Wi-Fi on during sleep" to "Never" + */ + if (trigger_deep_sleep) { +#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME) + g_first_broadcast_scan = TRUE; +#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */ + dhd_deepsleep(net, 0); + trigger_deep_sleep = 0; + } + } +#endif /* SUPPORT_DEEP_SLEEP */ +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) { + dhd_init_cpufreq_fix(dhd); + dhd_fix_cpu_freq(dhd); + } +#endif /* FIX_CPU_MIN_CLOCK */ +#if defined(OOB_INTR_ONLY) + if (dhd->pub.conf->dpc_cpucore >= 0) { + dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num); + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + if (adapter) { + printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore); + irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore)); + } + } +#endif + + if (dhd->pub.busstate != DHD_BUS_DATA) { +#ifdef BCMDBUS + dhd_set_path(&dhd->pub); + DHD_MUTEX_UNLOCK(); + wait_event_interruptible_timeout(dhd->adapter->status_event, + wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY), + msecs_to_jiffies(DHD_FW_READY_TIMEOUT)); + DHD_MUTEX_LOCK(); + if ((ret = dbus_up(dhd->pub.bus)) != 0) { + DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret)); + goto exit; + } else { + dhd->pub.busstate = DHD_BUS_DATA; + } + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + goto exit; + } +#else + /* try to bring up bus */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) { + ret = dhd_bus_start(&dhd->pub); + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); + } +#else + ret = dhd_bus_start(&dhd->pub); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (ret) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } +#endif /* !BCMDBUS */ + + } +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_attach_name(net, ifidx); +#endif + +#ifdef BT_OVER_SDIO + if (dhd->pub.is_bt_recovery_required) { + DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(TRUE); + } + dhd->pub.is_bt_recovery_required = FALSE; +#endif + + /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */ + memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + +#ifdef TOE + /* Get current TOE mode from dongle */ + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) { + dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; + } else { + dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; + } +#endif /* TOE */ + +#ifdef DHD_LB +#ifdef ENABLE_DHD_GRO + dhd->iflist[ifidx]->net->features |= NETIF_F_GRO; +#endif /* ENABLE_DHD_GRO */ + +#ifdef HOST_SFH_LLC + dhd->iflist[ifidx]->net->needed_headroom = DOT11_LLC_SNAP_HDR_LEN; +#endif + +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + if (dhd->rx_napi_netdev == NULL) { + dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; + memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); + netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, + dhd_napi_poll, dhd_napi_weight); + DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s> dhd_napi_weight: %d\n", + __FUNCTION__, &dhd->rx_napi_struct, net, + net->name, dhd_napi_weight)); + napi_enable(&dhd->rx_napi_struct); + DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); + skb_queue_head_init(&dhd->rx_napi_queue); + __skb_queue_head_init(&dhd->rx_process_queue); + } /* rx_napi_netdev == NULL */ +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + /* Use the variant that uses locks */ + skb_queue_head_init(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + dhd->dhd_lb_candidacy_override = FALSE; +#endif /* DHD_LB */ + netdev_update_features(net); +#ifdef DHD_PM_OVERRIDE + g_pm_override = FALSE; +#endif /* DHD_PM_OVERRIDE */ +#if defined(WL_CFG80211) + if (unlikely(wl_cfg80211_up(net))) { + DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); + ret = -1; + goto exit; + } + if (!dhd_download_fw_on_driverload) { +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + } + +#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON) + dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE); +#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */ +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) + dhd_irq_set_affinity(&dhd->pub, cpumask_of(0)); +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ +#if defined(NUM_SCB_MAX_PROBE) + dhd_set_scb_probe(&dhd->pub); +#endif /* NUM_SCB_MAX_PROBE */ +#endif /* WL_CFG80211 */ +#ifdef WL_ESCAN + if (unlikely(wl_escan_up(net))) { + DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__)); + ret = -1; + goto exit; + } +#endif /* WL_ESCAN */ +#if defined(ISAM_PREINIT) + if (!dhd_download_fw_on_driverload) { + if (dhd->pub.conf) { + wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written); + wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written); + wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written); + } + } +#endif + } + + dhd->pub.up = 1; +#if defined(BCMPCIE) && defined(CONFIG_ARCH_MSM) + dhd_bus_inform_ep_loaded_to_rc(&dhd->pub, dhd->pub.up); +#endif /* BCMPCIE && CONFIG_ARCH_MSM */ + DHD_START_RPM_TIMER(&dhd->pub); + + if (wl_event_enable) { + /* For wl utility to receive events */ + dhd->pub.wl_event_enabled = true; + } else { + dhd->pub.wl_event_enabled = false; + } + + if (logtrace_pkt_sendup) { + /* For any deamon to recieve logtrace */ + dhd->pub.logtrace_pkt_sendup = true; + } else { + dhd->pub.logtrace_pkt_sendup = false; + } + + OLD_MOD_INC_USE_COUNT; + +#ifdef BCMDBGFS + dhd_dbgfs_init(&dhd->pub); +#endif + +exit: + mutex_unlock(&dhd->pub.ndev_op_sync); +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && defined(NO_POWER_OFF_AFTER_OPEN) + dhd_download_fw_on_driverload = TRUE; + dhd_driver_init_done = TRUE; +#elif defined(ENABLE_INSMOD_NO_FW_LOAD) && defined(ENABLE_INSMOD_NO_POWER_OFF) + dhd_download_fw_on_driverload = FALSE; + dhd_driver_init_done = TRUE; +#endif + if (ret) { + dhd_stop(net); + } + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + WL_MSG(net->name, "Exit ret=%d\n", ret); + return ret; +} + +/* + * ndo_start handler for primary ndev + */ +static int +dhd_pri_open(struct net_device *net) +{ + s32 ret; + + DHD_MUTEX_IS_LOCK_RETURN(); + DHD_MUTEX_LOCK(); + ret = dhd_open(net); + if (unlikely(ret)) { + DHD_ERROR(("Failed to open primary dev ret %d\n", ret)); + DHD_MUTEX_UNLOCK(); + return ret; + } + + /* Allow transmit calls */ + dhd_tx_start_queues(net); + WL_MSG(net->name, "tx queue started\n"); + +#if defined(SET_RPS_CPUS) + dhd_rps_cpus_enable(net, TRUE); +#endif + +#if defined(SET_XPS_CPUS) + dhd_xps_cpus_enable(net, TRUE); +#endif + DHD_MUTEX_UNLOCK(); + + return ret; +} + +/* + * ndo_stop handler for primary ndev + */ +static int +dhd_pri_stop(struct net_device *net) +{ + s32 ret; + + /* Set state and stop OS transmissions */ + dhd_tx_stop_queues(net); + WL_MSG(net->name, "tx queue stopped\n"); + + ret = dhd_stop(net); + if (unlikely(ret)) { + DHD_ERROR(("dhd_stop failed: %d\n", ret)); + return ret; + } + + return ret; +} + +#ifdef PCIE_INB_DW +bool +dhd_check_cfg_in_progress(dhd_pub_t *dhdp) +{ +#if defined(WL_CFG80211) + return wl_cfg80211_check_in_progress(dhd_linux_get_primary_netdev(dhdp)); +#endif /* WL_CFG80211 */ + return FALSE; +} +#endif + +#if defined(WL_STATIC_IF) && defined(WL_CFG80211) +/* + * For static I/Fs, the firmware interface init + * is done from the IFF_UP context. + */ +static int +dhd_static_if_open(struct net_device *net) +{ + s32 ret = 0; + struct bcm_cfg80211 *cfg; + struct net_device *primary_netdev = NULL; +#ifdef WLEASYMESH + dhd_info_t *dhd = DHD_DEV_INFO(net); +#endif /* WLEASYMESH */ + + DHD_MUTEX_LOCK(); + cfg = wl_get_cfg(net); + primary_netdev = bcmcfg_to_prmry_ndev(cfg); + + if (!wl_cfg80211_static_if(cfg, net)) { + WL_MSG(net->name, "non-static interface ..do nothing\n"); + ret = BCME_OK; + goto done; + } + + WL_MSG(net->name, "Enter\n"); + /* Ensure fw is initialized. If it is already initialized, + * dhd_open will return success. + */ +#ifdef WLEASYMESH + WL_MSG(net->name, "switch to EasyMesh fw\n"); + dhd->pub.conf->fw_type = FW_TYPE_EZMESH; + ret = dhd_stop(primary_netdev); + if (unlikely(ret)) { + printf("===>%s, Failed to close primary dev ret %d\n", __FUNCTION__, ret); + goto done; + } + OSL_SLEEP(1); +#endif /* WLEASYMESH */ + ret = dhd_open(primary_netdev); + if (unlikely(ret)) { + DHD_ERROR(("Failed to open primary dev ret %d\n", ret)); + goto done; + } + + ret = wl_cfg80211_static_if_open(net); + if (ret == BCME_OK) { + /* Allow transmit calls */ + netif_start_queue(net); + } +done: + WL_MSG(net->name, "Exit ret=%d\n", ret); + DHD_MUTEX_UNLOCK(); + return ret; +} + +static int +dhd_static_if_stop(struct net_device *net) +{ + struct bcm_cfg80211 *cfg; + struct net_device *primary_netdev = NULL; + int ret = BCME_OK; + dhd_info_t *dhd = DHD_DEV_INFO(net); + + WL_MSG(net->name, "Enter\n"); + + cfg = wl_get_cfg(net); + if (!wl_cfg80211_static_if(cfg, net)) { + DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name)); + return BCME_OK; + } +#ifdef DHD_NOTIFY_MAC_CHANGED + if (dhd->pub.skip_dhd_stop) { + WL_MSG(net->name, "Exit skip stop\n"); + return BCME_OK; + } +#endif /* DHD_NOTIFY_MAC_CHANGED */ + + /* Ensure queue is disabled */ + netif_tx_disable(net); + + dhd_net_if_lock_local(dhd); + ret = wl_cfg80211_static_if_close(net); + dhd_net_if_unlock_local(dhd); + + if (dhd->pub.up == 0) { + /* If fw is down, return */ + DHD_ERROR(("fw down\n")); + return BCME_OK; + } + /* If STA iface is not in operational, invoke dhd_close from this + * context. + */ + primary_netdev = bcmcfg_to_prmry_ndev(cfg); +#ifdef WLEASYMESH + if (dhd->pub.conf->fw_type == FW_TYPE_EZMESH) { + WL_MSG(net->name, "switch to STA fw\n"); + dhd->pub.conf->fw_type = FW_TYPE_STA; + } else +#endif /* WLEASYMESH */ + if (!(primary_netdev->flags & IFF_UP)) { + ret = dhd_stop(primary_netdev); + } else { + DHD_ERROR(("Skipped dhd_stop, as sta is operational\n")); + } + WL_MSG(net->name, "Exit ret=%d\n", ret); + + return ret; +} +#endif /* WL_STATIC_IF && WL_CF80211 */ + +int dhd_do_driver_init(struct net_device *net) +{ + dhd_info_t *dhd = NULL; + int ret = 0; + + if (!net) { + DHD_ERROR(("Primary Interface not initialized \n")); + return -EINVAL; + } + + DHD_MUTEX_IS_LOCK_RETURN(); + DHD_MUTEX_LOCK(); + + /* && defined(OEM_ANDROID) && defined(BCMSDIO) */ + dhd = DHD_DEV_INFO(net); + + /* If driver is already initialized, do nothing + */ + if (dhd->pub.busstate == DHD_BUS_DATA) { + DHD_TRACE(("Driver already Inititalized. Nothing to do")); + goto exit; + } + + if (dhd_open(net) < 0) { + DHD_ERROR(("Driver Init Failed \n")); + ret = -1; + goto exit; + } + +exit: + DHD_MUTEX_UNLOCK(); + return ret; +} + +int +dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK) + return BCME_OK; +#endif + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else. This has to be done asynchronously otherwise + * DPC will be blocked (and iovars will timeout as DPC has no chance + * to read the response back) + */ + if (ifevent->ifidx > 0) { + dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strlcpy(if_event->name, name, sizeof(if_event->name)); + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, + DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW); + } + + return BCME_OK; +} + +int +dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + dhd_if_event_t *if_event; + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + return BCME_OK; +#endif /* WL_CFG80211 */ + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else + */ + if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strlcpy(if_event->name, name, sizeof(if_event->name)); + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL, + dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW); + + return BCME_OK; +} + +int +dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ +#ifdef DHD_UPDATE_INTF_MAC + dhd_if_event_t *if_event; +#endif /* DHD_UPDATE_INTF_MAC */ + +#ifdef WL_CFG80211 + wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx); +#endif /* WL_CFG80211 */ + +#ifdef DHD_UPDATE_INTF_MAC + /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and + * anything else + */ + if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + // construct a change event + if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name); + if_event->event.opcode = WLC_E_IF_CHANGE; + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE, + dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW); +#endif /* DHD_UPDATE_INTF_MAC */ + + return BCME_OK; +} + +#ifdef WL_NATOE +/* Handler to update natoe info and bind with new subscriptions if there is change in config */ +static void +dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + wl_event_data_natoe_t *natoe = event_info; + dhd_nfct_info_t *nfct = dhd->pub.nfct; + + if (event != DHD_WQ_WORK_NATOE_EVENT) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port && + (natoe->start_port < natoe->end_port)) { + /* Rebind subscriptions to start receiving notifications from groups */ + if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) { + dhd_ct_close(nfct); + } + dhd_ct_send_dump_req(nfct); + } else if (!natoe->natoe_active) { + /* Rebind subscriptions to stop receiving notifications from groups */ + if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) { + dhd_ct_close(nfct); + } + } +} + +/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions. + * Scheduling workq to switch from tasklet context as bind call may sleep in handler + */ +int +dhd_natoe_ct_event(dhd_pub_t *dhd, char *data) +{ + wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data; + + if (dhd->nfct) { + wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info; + uint8 prev_enable = natoe->natoe_active; + + spin_lock_bh(&dhd->nfct_lock); + memcpy(natoe, event_data, sizeof(*event_data)); + spin_unlock_bh(&dhd->nfct_lock); + + if (prev_enable != event_data->natoe_active) { + dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, + (void *)natoe, DHD_WQ_WORK_NATOE_EVENT, + dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW); + } + return BCME_OK; + } + DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__)); + return BCME_ERROR; +} + +/* Handler to send natoe ioctl to dongle */ +static void +dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event) +{ + dhd_info_t *dhd = handle; + dhd_ct_ioc_t *ct_ioc = event_info; + + if (event != DHD_WQ_WORK_NATOE_IOCTL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) { + DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__)); + } +} + +/* When Netlink message contains port collision info, the info must be sent to dongle FW + * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl + */ +void +dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc) +{ + + dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc, + DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler, + DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* WL_NATOE */ + +/* This API maps ndev to ifp inclusive of static IFs */ +static dhd_if_t * +dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev) +{ + dhd_if_t *ifp = NULL; +#ifdef WL_STATIC_IF + u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1); +#else + u32 ifidx = (DHD_MAX_IFS - 1); +#endif /* WL_STATIC_IF */ + + dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info; + do { + ifp = dhdinfo->iflist[ifidx]; + if (ifp && (ifp->net == ndev)) { + DHD_TRACE(("match found for %s. ifidx:%d\n", + ndev->name, ifidx)); + return ifp; + } + } while (ifidx--); + + DHD_ERROR(("no entry found for %s\n", ndev->name)); + return NULL; +} + +bool +dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev) +{ + dhd_if_t *ifp = NULL; + + if (!dhdp || !ndev) { + DHD_ERROR(("wrong input\n")); + ASSERT(0); + return false; + } + + ifp = dhd_get_ifp_by_ndev(dhdp, ndev); + return (ifp && (ifp->static_if == true)); +} + +#ifdef WL_STATIC_IF +/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name + * are not known. For e.g: static i/f case. This function lets to update it once + * it is known. + */ +s32 +dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx, + uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info; + dhd_if_t *ifp, *ifp_new; + s32 cur_idx; + dhd_dev_priv_t * dev_priv; + + DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n", + if_state, ifidx)); + + ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS))); + + if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) { + return -ENODEV; + } + cur_idx = ifp->idx; + + if (if_state == NDEV_STATE_OS_IF_CREATED) { + /* mark static if */ + ifp->static_if = TRUE; + return BCME_OK; + } + + ifp_new = dhdinfo->iflist[ifidx]; + if (ifp_new && (ifp_new != ifp)) { + /* There should be only one entry for a given ifidx. */ + DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx)); + ASSERT(0); + dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE; + net_os_send_hang_message(ifp->net); + return -EINVAL; + } + + /* For static if delete case, cleanup the if before ifidx update */ + if ((if_state == NDEV_STATE_FW_IF_DELETED) || + (if_state == NDEV_STATE_FW_IF_FAILED)) { + dhd_cleanup_if(ifp->net); + dev_priv = DHD_DEV_PRIV(ndev); + dev_priv->ifidx = ifidx; + } + + /* update the iflist ifidx slot with cached info */ + dhdinfo->iflist[ifidx] = ifp; + dhdinfo->iflist[cur_idx] = NULL; + + /* update the values */ + ifp->idx = ifidx; + ifp->bssidx = bssidx; + + if (if_state == NDEV_STATE_FW_IF_CREATED) { + dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx); + /* initialize the dongle provided if name */ + if (dngl_name) { + strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ); + } else if (ndev->name[0] != '\0') { + strncpy(ifp->dngl_name, ndev->name, IFNAMSIZ); + } + if (mac != NULL && ifp->set_macaddress == FALSE) { + /* To and fro locations have same size - ETHER_ADDR_LEN */ + (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN); + } +#ifdef WL_EVENT + wl_ext_event_attach_netdev(ndev, ifidx, bssidx); +#endif /* WL_EVENT */ +#ifdef WL_ESCAN + wl_escan_event_attach(ndev, ifidx); +#endif /* WL_ESCAN */ +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_ifadding(ndev, ifidx); + wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx); + wl_ext_iapsta_attach_name(ndev, ifidx); +#endif /* WL_EXT_IAPSTA */ + } + else if (if_state == NDEV_STATE_FW_IF_DELETED) { +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach_netdev(ndev, cur_idx); +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_event_dettach(ndev, cur_idx); +#endif /* WL_ESCAN */ +#ifdef WL_EVENT + wl_ext_event_dettach_netdev(ndev, cur_idx); +#endif /* WL_EVENT */ + } + DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n", + ifidx, cur_idx, if_state)); + return BCME_OK; +} +#endif /* WL_STATIC_IF */ + +/* unregister and free the existing net_device interface (if any) in iflist and + * allocate a new one. the slot is reused. this function does NOT register the + * new interface to linux kernel. dhd_register_if does the job + */ +struct net_device* +dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + + ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS))); + if (!dhdinfo || ifidx < 0 || ifidx >= (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)) { + return NULL; + } + + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { + if (ifp->net != NULL) { + DHD_ERROR(("%s: free existing IF %s ifidx:%d \n", + __FUNCTION__, ifp->net->name, ifidx)); + + if (ifidx == 0) { + /* For primary ifidx (0), there shouldn't be + * any netdev present already. + */ + DHD_ERROR(("Primary ifidx populated already\n")); + ASSERT(0); + return NULL; + } + + dhd_dev_priv_clear(ifp->net); /* clear net_device private */ + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ +#if defined(CONFIG_TIZEN) + net_stat_tizen_unregister(ifp->net); +#endif /* CONFIG_TIZEN */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + dhd_tx_stop_queues(ifp->net); + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); + } + ifp->net = NULL; + } + } else { + ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t)); + if (ifp == NULL) { + DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t))); + return NULL; + } + } + + memset(ifp, 0, sizeof(dhd_if_t)); + ifp->info = dhdinfo; + ifp->idx = ifidx; + ifp->bssidx = bssidx; +#ifdef DHD_MCAST_REGEN + ifp->mcast_regen_bss_enable = FALSE; +#endif + /* set to TRUE rx_pkt_chainable at alloc time */ + ifp->rx_pkt_chainable = TRUE; + + if (mac != NULL) + memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN); + + /* Allocate etherdev, including space for private structure */ +#ifdef DHD_MQ + if (enable_mq) { + ifp->net = alloc_etherdev_mq(DHD_DEV_PRIV_SIZE, MQ_MAX_QUEUES); + } else { + ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE); + } +#else + ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE); +#endif /* DHD_MQ */ + + if (ifp->net == NULL) { + DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo))); + goto fail; + } + + /* Setup the dhd interface's netdevice private structure. */ + dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx); + + if (name && name[0]) { + strlcpy(ifp->net->name, name, IFNAMSIZ); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9)) + /* as priv_destructor calls free_netdev, no need to set need_free_netdev */ + ifp->net->needs_free_netdev = 0; +#ifdef WL_CFG80211 + if (ifidx == 0) + ifp->net->priv_destructor = free_netdev; + else + ifp->net->priv_destructor = dhd_netdev_free; +#else + ifp->net->priv_destructor = free_netdev; +#endif /* WL_CFG80211 */ +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */ +#ifdef WL_CFG80211 + if (ifidx == 0) + ifp->net->destructor = free_netdev; + else + ifp->net->destructor = dhd_netdev_free; +#else + ifp->net->destructor = free_netdev; +#endif /* WL_CFG80211 */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */ + strlcpy(ifp->name, ifp->net->name, sizeof(ifp->name)); + dhdinfo->iflist[ifidx] = ifp; + +/* initialize the dongle provided if name */ + if (dngl_name) { + strlcpy(ifp->dngl_name, dngl_name, sizeof(ifp->dngl_name)); + } else if (name) { + strlcpy(ifp->dngl_name, name, sizeof(ifp->dngl_name)); + } + +#ifdef PCIE_FULL_DONGLE + /* Initialize STA info list */ + INIT_LIST_HEAD(&ifp->sta_list); + DHD_IF_STA_LIST_LOCK_INIT(&ifp->sta_list_lock); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh); + ifp->parp_allnode = TRUE; +#endif /* DHD_L2_FILTER */ + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + ifp->qosmap_up_table = ((uint8*)MALLOCZ(dhdpub->osh, UP_TABLE_MAX)); + ifp->qosmap_up_table_enable = FALSE; +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + + DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); + +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + ifp->recv_reassoc_evt = FALSE; + ifp->post_roam_evt = FALSE; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ + +#ifdef DHDTCPSYNC_FLOOD_BLK + INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler); + dhd_reset_tcpsync_info_by_ifp(ifp); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + + return ifp->net; + +fail: + if (ifp != NULL) { + if (ifp->net != NULL) { +#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) + if (ifp->net == dhdinfo->rx_napi_netdev) { + napi_disable(&dhdinfo->rx_napi_struct); + netif_napi_del(&dhdinfo->rx_napi_struct); + skb_queue_purge(&dhdinfo->rx_napi_queue); + dhdinfo->rx_napi_netdev = NULL; + } +#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ + dhd_dev_priv_clear(ifp->net); + free_netdev(ifp->net); + ifp->net = NULL; + } + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + } + dhdinfo->iflist[ifidx] = NULL; + return NULL; +} + +static void +dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp) +{ +#ifdef PCIE_FULL_DONGLE + s32 ifidx = 0; + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; +#endif /* PCIE_FULL_DONGLE */ + + if (ifp != NULL) { + if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) { + DHD_ERROR(("Wrong idx:%d \n", ifp->idx)); + ASSERT(0); + return; + } +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdpub->tickcnt); + deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + MFREE(dhdpub->osh, ifp->qosmap_up_table, UP_TABLE_MAX); + ifp->qosmap_up_table = NULL; + ifp->qosmap_up_table_enable = FALSE; +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + + dhd_if_del_sta_list(ifp); +#ifdef PCIE_FULL_DONGLE + /* Delete flowrings of virtual interface */ + ifidx = ifp->idx; + if ((ifidx != 0) && + ((if_flow_lkup != NULL) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP))) { + dhd_flow_rings_delete(dhdp, ifidx); + } +#endif /* PCIE_FULL_DONGLE */ + } +} + +void +dhd_cleanup_if(struct net_device *net) +{ + dhd_info_t *dhdinfo = DHD_DEV_INFO(net); + dhd_pub_t *dhdp = &dhdinfo->pub; + dhd_if_t *ifp; + + ifp = dhd_get_ifp_by_ndev(dhdp, net); + if (ifp) { + if (ifp->idx >= DHD_MAX_IFS) { + DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp->idx)); + ASSERT(0); + return; + } + dhd_cleanup_ifp(dhdp, ifp); + } +} + +/* unregister and free the the net_device interface associated with the indexed + * slot, also free the slot memory and set the slot pointer to NULL + */ +#define DHD_TX_COMPLETION_TIMEOUT 5000 +int +dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + unsigned long flags; + long timeout; + + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + cancel_delayed_work_sync(&ifp->m4state_work); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHDTCPSYNC_FLOOD_BLK + cancel_work_sync(&ifp->blk_tsfl_work); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + + dhd_cleanup_ifp(dhdpub, ifp); +#ifdef WL_STATIC_IF + if (ifp->static_if) { + /* static IF will be handled in detach */ + DHD_TRACE(("Skip del iface for static interface\n")); + return BCME_OK; + } +#endif /* WL_STATIC_IF */ + if (ifp->net != NULL) { + DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx)); + + DHD_GENERAL_LOCK(dhdpub, flags); + ifp->del_in_progress = true; + DHD_GENERAL_UNLOCK(dhdpub, flags); + + /* If TX is in progress, hold the if del */ + if (DHD_IF_IS_TX_ACTIVE(ifp)) { + DHD_INFO(("TX in progress. Wait for it to be complete.")); + timeout = wait_event_timeout(dhdpub->tx_completion_wait, + ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0), + msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT)); + if (!timeout) { + /* Tx completion timeout. Attempt proceeding ahead */ + DHD_ERROR(("Tx completion timed out!\n")); + ASSERT(0); + } + } else { + DHD_TRACE(("No outstanding TX!\n")); + } + dhdinfo->iflist[ifidx] = NULL; + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_tx_disable(ifp->net); + +#if defined(SET_RPS_CPUS) + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + if (dhdinfo->cih) + ctf_dev_unregister(dhdinfo->cih, ifp->net); +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach_netdev(ifp->net, ifidx); +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_event_dettach(ifp->net, ifidx); +#endif /* WL_ESCAN */ +#ifdef WL_EVENT + wl_ext_event_dettach_netdev(ifp->net, ifidx); +#endif /* WL_EVENT */ + } + ifp->net = NULL; + DHD_GENERAL_LOCK(dhdpub, flags); + ifp->del_in_progress = false; + DHD_GENERAL_UNLOCK(dhdpub, flags); + } +#ifdef DHD_WMF + dhd_wmf_cleanup(dhdpub, ifidx); +#endif /* DHD_WMF */ + DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); + + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + ifp = NULL; + } + + return BCME_OK; +} + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) +int +dhd_set_qosmap_up_table(dhd_pub_t *dhdp, uint32 idx, bcm_tlv_t *qos_map_ie) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + if (!ifp) + return BCME_ERROR; + + wl_set_up_table(ifp->qosmap_up_table, qos_map_ie); + ifp->qosmap_up_table_enable = TRUE; + + return BCME_OK; +} +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + +static struct net_device_ops dhd_ops_pri = { + .ndo_open = dhd_pri_open, + .ndo_stop = dhd_pri_stop, + .ndo_get_stats = dhd_get_stats, +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + .ndo_siocdevprivate = dhd_ioctl_entry_wrapper, +#else + .ndo_do_ioctl = dhd_ioctl_entry_wrapper, +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + .ndo_start_xmit = dhd_start_xmit_wrapper, +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + .ndo_siocdevprivate = dhd_ioctl_entry, +#else + .ndo_do_ioctl = dhd_ioctl_entry, +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + .ndo_start_xmit = dhd_start_xmit, +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif +#ifdef DHD_MQ + .ndo_select_queue = dhd_select_queue +#endif +}; + +static struct net_device_ops dhd_ops_virt = { +#if defined(WL_CFG80211) && defined(WL_STATIC_IF) + .ndo_open = dhd_static_if_open, + .ndo_stop = dhd_static_if_stop, +#endif + .ndo_get_stats = dhd_get_stats, +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + .ndo_siocdevprivate = dhd_ioctl_entry_wrapper, +#else + .ndo_do_ioctl = dhd_ioctl_entry_wrapper, +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + .ndo_start_xmit = dhd_start_xmit_wrapper, +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + .ndo_siocdevprivate = dhd_ioctl_entry, +#else + .ndo_do_ioctl = dhd_ioctl_entry, +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */ + .ndo_start_xmit = dhd_start_xmit, +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif +}; + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) +static void +dhd_ctf_detach(ctf_t *ci, void *arg) +{ + dhd_info_t *dhd = (dhd_info_t *)arg; + dhd->cih = NULL; + +#ifdef CTFPOOL + /* free the buffers in fast pool */ + osl_ctfpool_cleanup(dhd->pub.osh); +#endif /* CTFPOOL */ + + return; +} +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +int +dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf, + unsigned long buflen) +{ + loff_t wr_posn = *posn; + + if (!fp || !buf || buflen == 0) + return -1; + + if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0) + return -1; + + *posn = wr_posn; + return 0; +} + +#ifdef SHOW_LOGTRACE +int +dhd_os_read_file(void *file, char *buf, uint32 size) +{ + struct file *filep = (struct file *)file; + + if (!file || !buf) + return -1; + + return vfs_read(filep, buf, size, &filep->f_pos); +} + +int +dhd_os_seek_file(void *file, int64 offset) +{ + struct file *filep = (struct file *)file; + if (!file) + return -1; + + /* offset can be -ve */ + filep->f_pos = filep->f_pos + offset; + + return 0; +} + +static int +dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp) +{ + struct file *filep = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + struct kstat stat; + mm_segment_t fs; + int error = 0; +#endif + char *raw_fmts = NULL; + int logstrs_size = 0; + + if (control_logtrace != LOGTRACE_PARSED_FMT) { + DHD_ERROR_NO_HW4(("%s : turned off logstr parsing\n", __FUNCTION__)); + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + filep = filp_open(logstrs_path, O_RDONLY, 0); + + if (IS_ERR(filep)) { + DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + error = vfs_stat(logstrs_path, &stat); + if (error) { + DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + logstrs_size = (int) stat.size; +#else + logstrs_size = dhd_os_get_image_size(filep); +#endif + if (logstrs_size <= 0) { + DHD_ERROR(("%s: get file size fails %d! \n", __FUNCTION__, logstrs_size)); + goto fail; + } + + if (temp->raw_fmts != NULL) { + raw_fmts = temp->raw_fmts; /* reuse already malloced raw_fmts */ + } else { + raw_fmts = MALLOC(osh, logstrs_size); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__)); + goto fail; + } + } + + if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) { + DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path)); + goto fail; + } + + if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp) + == BCME_OK) { + filp_close(filep, NULL); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(fs); +#endif + return BCME_OK; + } + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, logstrs_size); + } + if (temp->fmts != NULL) { + MFREE(osh, temp->fmts, temp->num_fmts * sizeof(char *)); + } + +fail1: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(fs); +#endif + temp->fmts = NULL; + temp->raw_fmts = NULL; + + return BCME_ERROR; +} + +static int +dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + struct file *filep = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t fs; +#endif + int err = BCME_ERROR; + + if (fname == NULL) { + DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + filep = filp_open(fname, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname)); + goto fail; + } + + if ((err = dhd_parse_map_file(osh, filep, ramstart, + rodata_start, rodata_end)) < 0) + goto fail; + +fail: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(fs); +#endif + + return err; +} +#ifdef DHD_COREDUMP +#define PC_FOUND_BIT 0x01 +#define LR_FOUND_BIT 0x02 +#define ALL_ADDR_VAL (PC_FOUND_BIT | LR_FOUND_BIT) +#define READ_NUM_BYTES 1000 +#define DHD_FUNC_STR_LEN 80 +static int +dhd_lookup_map(osl_t *osh, char *fname, uint32 pc, char *pc_fn, + uint32 lr, char *lr_fn) +{ + struct file *filep = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t fs; +#endif + char *raw_fmts = NULL, *raw_fmts_loc = NULL, *cptr = NULL; + uint32 read_size = READ_NUM_BYTES; + int err = BCME_ERROR; + uint32 addr = 0, addr1 = 0, addr2 = 0; + char type = '?', type1 = '?', type2 = '?'; + char func[DHD_FUNC_STR_LEN] = "\0"; + char func1[DHD_FUNC_STR_LEN] = "\0"; + char func2[DHD_FUNC_STR_LEN] = "\0"; + uint8 count = 0; + int num, len = 0, offset; + + DHD_TRACE(("%s: fname %s pc 0x%x lr 0x%x \n", + __FUNCTION__, fname, pc, lr)); + if (fname == NULL) { + DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = MALLOCZ(osh, read_size + 1); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", + __FUNCTION__)); + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + filep = filp_open(fname, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname)); + goto fail; + } + + if (pc_fn == NULL) { + count |= PC_FOUND_BIT; + } + if (lr_fn == NULL) { + count |= LR_FOUND_BIT; + } + while (count != ALL_ADDR_VAL) + { + err = dhd_os_read_file(filep, raw_fmts, read_size); + if (err < 0) { + DHD_ERROR(("%s: map file read failed err:%d \n", + __FUNCTION__, err)); + goto fail; + } + + /* End raw_fmts with NULL as strstr expects NULL terminated + * strings + */ + raw_fmts[read_size] = '\0'; + raw_fmts_loc = raw_fmts; + offset = 0; + + while ((count != ALL_ADDR_VAL) && (offset < read_size)) + { + cptr = bcmstrtok(&raw_fmts_loc, "\n", 0); + if (cptr == NULL) { + DHD_TRACE(("%s: cptr is NULL, offset %d" + " raw_fmts_loc %s \n", + __FUNCTION__, offset, raw_fmts_loc)); + break; + } + DHD_TRACE(("%s: %s \n", __FUNCTION__, cptr)); + if ((type2 == 'A') || + (type2 == 'T') || + (type2 == 'W')) { + addr1 = addr2; + type1 = type2; + (void)memcpy_s(func1, DHD_FUNC_STR_LEN, + func2, DHD_FUNC_STR_LEN); + DHD_TRACE(("%s: %x %c %s \n", + __FUNCTION__, addr1, type1, func1)); + } + len = strlen(cptr); + num = sscanf(cptr, "%x %c %79s", &addr, &type, func); + DHD_TRACE(("%s: num %d addr %x type %c func %s \n", + __FUNCTION__, num, addr, type, func)); + if (num == 3) { + addr2 = addr; + type2 = type; + (void)memcpy_s(func2, DHD_FUNC_STR_LEN, + func, DHD_FUNC_STR_LEN); + } + + if (!(count & PC_FOUND_BIT) && + (pc >= addr1 && pc < addr2)) { + if ((cptr = strchr(func1, '$')) != NULL) { + (void)strncpy(func, cptr + 1, + DHD_FUNC_STR_LEN - 1); + } else { + (void)memcpy_s(func, DHD_FUNC_STR_LEN, + func1, DHD_FUNC_STR_LEN); + } + if ((cptr = strstr(func, "__bcmromfn")) + != NULL) { + *cptr = 0; + } + if (pc > addr1) { + sprintf(pc_fn, "%.68s+0x%x", + func, pc - addr1); + } else { + (void)memcpy_s(pc_fn, DHD_FUNC_STR_LEN, + func, DHD_FUNC_STR_LEN); + } + count |= PC_FOUND_BIT; + DHD_INFO(("%s: found addr1 %x pc %x" + " addr2 %x \n", + __FUNCTION__, addr1, pc, addr2)); + } + if (!(count & LR_FOUND_BIT) && + (lr >= addr1 && lr < addr2)) { + if ((cptr = strchr(func1, '$')) != NULL) { + (void)strncpy(func, cptr + 1, + DHD_FUNC_STR_LEN - 1); + } else { + (void)memcpy_s(func, DHD_FUNC_STR_LEN, + func1, DHD_FUNC_STR_LEN); + } + if ((cptr = strstr(func, "__bcmromfn")) + != NULL) { + *cptr = 0; + } + if (lr > addr1) { + sprintf(lr_fn, "%.68s+0x%x", + func, lr - addr1); + } else { + (void)memcpy_s(lr_fn, DHD_FUNC_STR_LEN, + func, DHD_FUNC_STR_LEN); + } + count |= LR_FOUND_BIT; + DHD_INFO(("%s: found addr1 %x lr %x" + " addr2 %x \n", + __FUNCTION__, addr1, lr, addr2)); + } + offset += (len + 1); + } + + if (err < (int)read_size) { + /* + * since we reset file pos back to earlier pos by + * bytes of one line we won't reach EOF. + * The reason for this is if string is spreaded across + * bytes, the read function should not miss it. + * So if ret value is less than read_size, reached EOF + * don't read further + */ + break; + } + memset(raw_fmts, 0, read_size); + /* + * go back to bytes of one line so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + dhd_os_seek_file(filep, -(len + 1)); + DHD_TRACE(("%s: seek %d \n", __FUNCTION__, -(len + 1))); + } + +fail: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(fs); +#endif + + if (!(count & PC_FOUND_BIT)) { + sprintf(pc_fn, "0x%08x", pc); + } + if (!(count & LR_FOUND_BIT)) { + sprintf(lr_fn, "0x%08x", lr); + } + return err; +} +#endif /* DHD_COREDUMP */ + +static int +dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file) +{ + struct file *filep = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t fs; +#endif + char *raw_fmts = NULL; + uint32 logstrs_size = 0; + int error = 0; + uint32 ramstart = 0; + uint32 rodata_start = 0; + uint32 rodata_end = 0; + uint32 logfilebase = 0; + + error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end); + if (error != BCME_OK) { + DHD_ERROR(("readmap Error!! \n")); + /* don't do event log parsing in actual case */ + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + return error; + } + DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + ramstart, rodata_start, rodata_end)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + filep = filp_open(str_file, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file)); + goto fail; + } + + if (TRUE) { + /* Full file size is huge. Just read required part */ + logstrs_size = rodata_end - rodata_start; + logfilebase = rodata_start - ramstart; + } + + if (logstrs_size == 0) { + DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__)); + goto fail1; + } + + if (strstr(str_file, ram_file_str) != NULL && temp->raw_sstr != NULL) { + raw_fmts = temp->raw_sstr; /* reuse already malloced raw_fmts */ + } else if (strstr(str_file, rom_file_str) != NULL && temp->rom_raw_sstr != NULL) { + raw_fmts = temp->rom_raw_sstr; /* reuse already malloced raw_fmts */ + } else { + raw_fmts = MALLOC(osh, logstrs_size); + + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + } + + if (TRUE) { + error = generic_file_llseek(filep, logfilebase, SEEK_SET); + if (error < 0) { + DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + } + + error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos)); + if (error != logstrs_size) { + DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = raw_fmts; + temp->raw_sstr_size = logstrs_size; + temp->rodata_start = rodata_start; + temp->rodata_end = rodata_end; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = raw_fmts; + temp->rom_raw_sstr_size = logstrs_size; + temp->rom_rodata_start = rodata_start; + temp->rom_rodata_end = rodata_end; + } + + filp_close(filep, NULL); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(fs); +#endif + + return BCME_OK; + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, logstrs_size); + } + +fail1: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(fs); +#endif + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + + return error; +} /* dhd_init_static_strs_array */ + +#endif /* SHOW_LOGTRACE */ + +#ifdef BT_OVER_PCIE +void request_bt_quiesce(bool quiesce) __attribute__ ((weak)); +void response_bt_quiesce(bool quiesce); + +static void (*request_bt_quiesce_ptr)(bool); +typedef void (*response_bt_quiesce_ptr)(bool); + +response_bt_quiesce_ptr +register_request_bt_quiesce(void (*fnc)(bool)) +{ + request_bt_quiesce_ptr = fnc; + return response_bt_quiesce; +} +EXPORT_SYMBOL(register_request_bt_quiesce); + +void +unregister_request_bt_quiesce(void) +{ + request_bt_quiesce_ptr = NULL; + return; +} +EXPORT_SYMBOL(unregister_request_bt_quiesce); +#endif /* BT_OVER_PCIE */ + +#ifdef DHD_ERPOM +uint enable_erpom = 0; +module_param(enable_erpom, int, 0); + +int +dhd_wlan_power_off_handler(void *handler, unsigned char reason) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handler; + bool dongle_isolation = dhdp->dongle_isolation; + + DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason)); + + if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) { +#if defined(DHD_FW_COREDUMP) + /* save core dump to a file */ + if (dhdp->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__)); + dhdp->collect_sssr = TRUE; +#endif /* DHD_SSSR_DUMP */ + dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_FW_COREDUMP */ + } + + /* pause data on all the interfaces */ + dhd_bus_stop_queue(dhdp->bus); + + /* Devreset function will perform FLR again, to avoid it set dongle_isolation */ + dhdp->dongle_isolation = TRUE; + dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */ + dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */ + return 0; +} + +int +dhd_wlan_power_on_handler(void *handler, unsigned char reason) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handler; + bool dongle_isolation = dhdp->dongle_isolation; + + DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason)); + /* Devreset function will perform FLR again, to avoid it set dongle_isolation */ + dhdp->dongle_isolation = TRUE; + dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */ + dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */ + /* resume data on all the interfaces */ + dhd_bus_start_queue(dhdp->bus); + return 0; + +} + +#endif /* DHD_ERPOM */ + +#ifdef BCMDBUS +uint +dhd_get_rxsz(dhd_pub_t *pub) +{ + struct net_device *net = NULL; + dhd_info_t *dhd = NULL; + uint rxsz; + + /* Assign rxsz for dbus_attach */ + dhd = pub->info; + net = dhd->iflist[0]->net; + net->hard_header_len = ETH_HLEN + pub->hdrlen; + rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + + return rxsz; +} + +void +dhd_set_path(dhd_pub_t *pub) +{ + dhd_info_t *dhd = NULL; + + dhd = pub->info; + + /* try to download image and nvram to the dongle */ + if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) { + DHD_INFO(("%s: fw %s, nv %s, conf %s\n", + __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); + } +} +#endif + +/** Called once for each hardware (dongle) instance that this DHD manages */ +dhd_pub_t * +dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen +#ifdef BCMDBUS + , void *data +#endif +) +{ + dhd_info_t *dhd = NULL; + struct net_device *net = NULL; + char if_name[IFNAMSIZ] = {'\0'}; +#ifdef SHOW_LOGTRACE + int ret; +#endif /* SHOW_LOGTRACE */ +#ifdef DHD_ERPOM + pom_func_handler_t *pom_handler; +#endif /* DHD_ERPOM */ +#if defined(BCMSDIO) || defined(BCMPCIE) + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; +#elif defined(BCMDBUS) + wifi_adapter_info_t *adapter = data; +#endif + + dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef PCIE_FULL_DONGLE + ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ); + ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ); +#endif /* PCIE_FULL_DONGLE */ + + /* will implement get_ids for DBUS later */ +#if defined(BCMSDIO) || defined(BCMPCIE) + dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num); + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); +#endif + + /* Allocate primary dhd_info */ + dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t)); + if (dhd == NULL) { + dhd = MALLOC(osh, sizeof(dhd_info_t)); + if (dhd == NULL) { + DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); + goto dhd_null_flag; + } + } + memset(dhd, 0, sizeof(dhd_info_t)); + dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC; + + dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */ + + dhd->pub.osh = osh; +#ifdef DUMP_IOCTL_IOV_LIST + dll_init(&(dhd->pub.dump_iovlist_head)); +#endif /* DUMP_IOCTL_IOV_LIST */ + + dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */ + + dhd->adapter = adapter; + dhd->pub.adapter = (void *)adapter; +#ifdef BT_OVER_SDIO + dhd->pub.is_bt_recovery_required = FALSE; + mutex_init(&dhd->bus_user_lock); +#endif /* BT_OVER_SDIO */ + + g_dhd_pub = &dhd->pub; + +#ifdef DHD_DEBUG + dll_init(&(dhd->pub.mw_list_head)); +#endif /* DHD_DEBUG */ + +#ifdef CUSTOM_FORCE_NODFS_FLAG + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, + dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec, + dhd->pub.dhd_cflags); +#endif /* CUSTOM_COUNTRY_CODE */ +#ifndef BCMDBUS + dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; + dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; +#ifdef DHD_WET + dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub); +#endif /* DHD_WET */ +#ifdef WL_NANHO + /* initialize NANHO host module */ + if (bcm_nanho_init(&dhd->pub.nanhoi, &dhd->pub, + dhd_nho_ioctl_cb, dhd_nho_evt_cb, NULL) != BCME_OK) { + goto fail; + } +#endif /* WL_NANHO */ + /* Initialize thread based operation and lock */ + sema_init(&dhd->sdsem, 1); +#endif /* BCMDBUS */ +#if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV) + dhd->host_radiotap_conv = FALSE; +#endif /* WL_MONITOR */ + dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable; + + /* Link to info module */ + dhd->pub.info = dhd; + + /* Link to bus module */ + dhd->pub.bus = bus; + dhd->pub.hdrlen = bus_hdrlen; + dhd->pub.txoff = FALSE; +#ifdef CHECK_TRAP_ROT + dhd->pub.check_trap_rot = TRUE; +#else + dhd->pub.check_trap_rot = FALSE; +#endif /* CHECK_TRAP_ROT */ + + /* dhd_conf must be attached after linking dhd to dhd->pub.info, + * because dhd_detech will check .info is NULL or not. + */ + if (dhd_conf_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_conf_attach failed\n")); + goto fail; + } +#ifndef BCMDBUS + dhd_conf_reset(&dhd->pub); + dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus)); + dhd_conf_preinit(&dhd->pub); +#endif /* !BCMDBUS */ + + /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name. + * This is indeed a hack but we have to make it work properly before we have a better + * solution + */ + dhd_update_fw_nv_path(dhd); + + /* Set network interface name if it was provided as module parameter */ + if (iface_name[0]) { + int len; + char ch; + strlcpy(if_name, iface_name, sizeof(if_name)); + len = strlen(if_name); + ch = if_name[len - 1]; + if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) { + strncat(if_name, "%d", sizeof(if_name) - len - 1); + } + } + + /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */ + net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL); + if (net == NULL) { + goto fail; + } + mutex_init(&dhd->pub.ndev_op_sync); + + dhd_state |= DHD_ATTACH_STATE_ADD_IF; +#ifdef DHD_L2_FILTER + /* initialize the l2_filter_cnt */ + dhd->pub.l2_filter_cnt = 0; +#endif + net->netdev_ops = NULL; + + mutex_init(&dhd->dhd_iovar_mutex); + sema_init(&dhd->proto_sem, 1); + +#if defined(DHD_HANG_SEND_UP_TEST) + dhd->pub.req_hang_type = 0; +#endif /* DHD_HANG_SEND_UP_TEST */ + +#ifdef PROP_TXSTATUS + spin_lock_init(&dhd->wlfc_spinlock); + + dhd->pub.skip_fc = dhd_wlfc_skip_fc; + dhd->pub.plat_init = dhd_wlfc_plat_init; + dhd->pub.plat_deinit = dhd_wlfc_plat_deinit; + +#ifdef DHD_WLFC_THREAD + init_waitqueue_head(&dhd->pub.wlfc_wqhead); + dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread"); + if (IS_ERR(dhd->pub.wlfc_thread)) { + DHD_ERROR(("create wlfc thread failed\n")); + goto fail; + } else { + wake_up_process(dhd->pub.wlfc_thread); + } +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + + /* Initialize other structure content */ + /* XXX Some of this goes away, leftover from USB */ + /* XXX Some could also move to bus_init()? */ + init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->pub.tx_tput_test_wait); + init_waitqueue_head(&dhd->d3ack_wait); +#ifdef PCIE_INB_DW + init_waitqueue_head(&dhd->ds_exit_wait); +#endif /* PCIE_INB_DW */ + init_waitqueue_head(&dhd->ctrl_wait); + init_waitqueue_head(&dhd->dhd_bus_busy_state_wait); + init_waitqueue_head(&dhd->dmaxfer_wait); +#ifdef BT_OVER_PCIE + init_waitqueue_head(&dhd->quiesce_wait); +#endif /* BT_OVER_PCIE */ + init_waitqueue_head(&dhd->pub.tx_completion_wait); + dhd->pub.dhd_bus_busy_state = 0; + /* Initialize the spinlocks */ + spin_lock_init(&dhd->sdlock); + spin_lock_init(&dhd->txqlock); + spin_lock_init(&dhd->dhd_lock); + spin_lock_init(&dhd->txoff_lock); + spin_lock_init(&dhd->rxf_lock); +#ifdef WLTDLS + spin_lock_init(&dhd->pub.tdls_lock); +#endif /* WLTDLS */ +#if defined(RXFRAME_THREAD) + dhd->rxthread_enabled = TRUE; +#endif /* defined(RXFRAME_THREAD) */ + +#ifdef DHDTCPACK_SUPPRESS + spin_lock_init(&dhd->tcpack_lock); +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHD_HP2P + spin_lock_init(&dhd->hp2p_lock); +#endif + /* Initialize Wakelock stuff */ + spin_lock_init(&dhd->wakelock_spinlock); + spin_lock_init(&dhd->wakelock_evt_spinlock); + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->wakelock_counter = 0; + /* wakelocks prevent a system from going into a low power state */ +#ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + dhd_wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); + dhd_wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); +#endif /* CONFIG_HAS_WAKELOCK */ + +#if defined(OEM_ANDROID) + mutex_init(&dhd->dhd_net_if_mutex); + mutex_init(&dhd->dhd_suspend_mutex); +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + mutex_init(&dhd->dhd_apf_mutex); +#endif /* PKT_FILTER_SUPPORT && APF */ +#endif /* defined(OEM_ANDROID) */ + dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + + /* Attach and link in the protocol */ + if (dhd_prot_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_prot_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; + +#ifdef DHD_TIMESYNC + /* attach the timesync module */ + if (dhd_timesync_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_timesync_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE; +#endif /* DHD_TIMESYNC */ + +#ifdef WL_CFG80211 + spin_lock_init(&dhd->pub.up_lock); + /* Attach and link in the cfg80211 */ + if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { + DHD_ERROR(("wl_cfg80211_attach failed\n")); + goto fail; + } + + dhd_monitor_init(&dhd->pub); + dhd_state |= DHD_ATTACH_STATE_CFG80211; +#endif + +#ifdef WL_EVENT + if (wl_ext_event_attach(net) != 0) { + DHD_ERROR(("wl_ext_event_attach failed\n")); + goto fail; + } +#endif /* WL_EVENT */ +#ifdef WL_ESCAN + /* Attach and link in the escan */ + if (wl_escan_attach(net) != 0) { + DHD_ERROR(("wl_escan_attach failed\n")); + goto fail; + } +#endif /* WL_ESCAN */ +#ifdef WL_EXT_IAPSTA + if (wl_ext_iapsta_attach(net) != 0) { + DHD_ERROR(("wl_ext_iapsta_attach failed\n")); + goto fail; + } +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_EXT_GENL + if (wl_ext_genl_init(net)) { + DHD_ERROR(("wl_ext_genl_init failed\n")); + goto fail; + } +#endif +#if defined(WL_WIRELESS_EXT) + /* Attach and link in the iw */ + if (wl_iw_attach(net) != 0) { + DHD_ERROR(("wl_iw_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef SHOW_LOGTRACE + ret = dhd_init_logstrs_array(osh, &dhd->event_data); + if (ret == BCME_OK) { + dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path); + dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path, + rom_map_file_path); + dhd_state |= DHD_ATTACH_LOGTRACE_INIT; + } +#endif /* SHOW_LOGTRACE */ + + /* attach debug if support */ + if (dhd_os_dbg_attach(&dhd->pub)) { + DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__)); + goto fail; + } +#ifdef DEBUGABILITY +#if !defined(OEM_ANDROID) && defined(SHOW_LOGTRACE) + /* enable verbose ring to support dump_trace_buf */ + dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0); +#endif /* !OEM_ANDROID && SHOW_LOGTRACE */ + +#if !defined(OEM_ANDROID) && defined(BTLOG) + /* enable bt log ring to support dump_bt_log */ + dhd_os_start_logging(&dhd->pub, BT_LOG_RING_NAME, 3, 0, 0, 0); +#endif /* !OEM_ANDROID && BTLOG */ +#ifdef DBG_PKT_MON + dhd->pub.dbg->pkt_mon_lock = osl_spin_lock_init(dhd->pub.osh); +#ifdef DBG_PKT_MON_INIT_DEFAULT + dhd_os_dbg_attach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON_INIT_DEFAULT */ +#endif /* DBG_PKT_MON */ + +#endif /* DEBUGABILITY */ + +#ifdef DHD_MEM_STATS + dhd->pub.mem_stats_lock = osl_spin_lock_init(dhd->pub.osh); + dhd->pub.txpath_mem = 0; + dhd->pub.rxpath_mem = 0; +#endif /* DHD_MEM_STATS */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + dhd->pub.awdl_stats_lock = osl_spin_lock_init(dhd->pub.osh); +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + +#ifdef DHD_STATUS_LOGGING + dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM, + MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN); + if (dhd->pub.statlog == NULL) { + DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__)); + } +#endif /* DHD_STATUS_LOGGING */ + +#ifdef DHD_LOG_DUMP + dhd_log_dump_init(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_PKTDUMP_ROAM + dhd_dump_pkt_init(&dhd->pub); +#endif /* DHD_PKTDUMP_ROAM */ +#ifdef DHD_PKT_LOGGING + dhd_os_attach_pktlog(&dhd->pub); +#endif /* DHD_PKT_LOGGING */ + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN); + if (dhd->pub.hang_info == NULL) { + DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__)); + } +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { + DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA)); + goto fail; + } + +#ifdef BCM_ROUTER_DHD +#if defined(HNDCTF) + dhd->cih = ctf_attach(dhd->pub.osh, "dhd", &dhd_msg_level, dhd_ctf_detach, dhd); + if (!dhd->cih) { + DHD_ERROR(("%s: ctf_attach() failed\n", __FUNCTION__)); + } +#ifdef CTFPOOL + { + int poolsz = RXBUFPOOLSZ; + if (CTF_ENAB(dhd->cih) && (osl_ctfpool_init(dhd->pub.osh, + poolsz, RXBUFSZ + BCMEXTRAHDROOM) < 0)) { + DHD_ERROR(("%s: osl_ctfpool_init() failed\n", __FUNCTION__)); + } + } +#endif /* CTFPOOL */ +#endif /* HNDCTF */ +#endif /* BCM_ROUTER_DHD */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!dhd->tx_wq) { + DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__)); + goto fail; + } + dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!dhd->rx_wq) { + DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__)); + destroy_workqueue(dhd->tx_wq); + dhd->tx_wq = NULL; + goto fail; + } +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifndef BCMDBUS + /* Set up the watchdog timer */ + init_timer_compat(&dhd->timer, dhd_watchdog, dhd); + dhd->default_wd_interval = dhd_watchdog_ms; + + if (dhd_watchdog_prio >= 0) { + /* Initialize watchdog thread */ + PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread"); + if (dhd->thr_wdt_ctl.thr_pid < 0) { + goto fail; + } + + } else { + dhd->thr_wdt_ctl.thr_pid = -1; + } + +#ifdef DHD_PCIE_RUNTIMEPM + /* Setup up the runtime PM Idlecount timer */ + init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd); + dhd->rpm_timer_valid = FALSE; + + dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID; + PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread"); + if (dhd->thr_rpm_ctl.thr_pid < 0) { + goto fail; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef SHOW_LOGTRACE + skb_queue_head_init(&dhd->evt_trace_queue); + + /* Create ring proc entries */ + dhd_dbg_ring_proc_create(&dhd->pub); +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG + skb_queue_head_init(&dhd->bt_log_queue); +#endif /* BTLOG */ + +#ifdef BT_OVER_PCIE + mutex_init(&dhd->quiesce_flr_lock); + mutex_init(&dhd->quiesce_lock); +#endif + + /* Set up the bottom half handler */ + if (dhd_dpc_prio >= 0) { + /* Initialize DPC thread */ + PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc"); + if (dhd->thr_dpc_ctl.thr_pid < 0) { + goto fail; + } + } else { + /* use tasklet for dpc */ + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->thr_dpc_ctl.thr_pid = -1; + } + + if (dhd->rxthread_enabled) { + bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND); + /* Initialize RXF thread */ + PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf"); + if (dhd->thr_rxf_ctl.thr_pid < 0) { + goto fail; + } + } +#endif /* !BCMDBUS */ + + dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; + +#if defined(CONFIG_PM_SLEEP) + if (!dhd_pm_notifier_registered) { + dhd_pm_notifier_registered = TRUE; + dhd->pm_notifier.notifier_call = dhd_pm_callback; + dhd->pm_notifier.priority = 10; + register_pm_notifier(&dhd->pm_notifier); + } + +#endif /* CONFIG_PM_SLEEP */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; + dhd->early_suspend.suspend = dhd_early_suspend; + dhd->early_suspend.resume = dhd_late_resume; + register_early_suspend(&dhd->early_suspend); + dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd); +#if defined (OEM_ANDROID) + INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process); +#endif /* OEM_ANDROID */ +#ifdef DEBUG_CPU_FREQ + dhd->new_freq = alloc_percpu(int); + dhd->freq_trans.notifier_call = dhd_cpufreq_notifier; + cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMSDIO + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX); +#elif defined(BCMPCIE) + /* xxx : In case of PCIe based Samsung Android project, enable TCP ACK Suppress + * when throughput is higher than threshold, following rps_cpus setting. + */ + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMSDIO */ +#endif /* DHDTCPACK_SUPPRESS */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef DHD_DEBUG_PAGEALLOC + register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub); +#endif /* DHD_DEBUG_PAGEALLOC */ + + INIT_DELAYED_WORK(&dhd->dhd_dpc_dispatcher_work, dhd_dpc_tasklet_dispatcher_work); + +#if defined(DHD_LB) +#if defined(DHD_LB_HOST_CTRL) + dhd->permitted_primary_cpu = FALSE; +#endif /* DHD_LB_HOST_CTRL */ + dhd_lb_set_default_cpus(dhd); + DHD_LB_STATS_INIT(&dhd->pub); + + /* Initialize the CPU Masks */ + if (dhd_cpumasks_init(dhd) == 0) { + /* Now we have the current CPU maps, run through candidacy */ + dhd_select_cpu_candidacy(dhd); + + /* Register the call backs to CPU Hotplug sub-system */ + dhd_register_cpuhp_callback(dhd); + + } else { + /* + * We are unable to initialize CPU masks, so candidacy algorithm + * won't run, but still Load Balancing will be honoured based + * on the CPUs allocated for a given job statically during init + */ + dhd->cpu_notifier.notifier_call = NULL; + DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n", + __FUNCTION__)); + } + +#ifdef DHD_LB_TXP +#ifdef DHD_LB_TXP_DEFAULT_ENAB + /* Trun ON the feature by default */ + atomic_set(&dhd->lb_txp_active, 1); +#else + /* Trun OFF the feature by default */ + atomic_set(&dhd->lb_txp_active, 0); +#endif /* DHD_LB_TXP_DEFAULT_ENAB */ +#endif /* DHD_LB_TXP */ + +#ifdef DHD_LB_RXP + /* Trun ON the feature by default */ + atomic_set(&dhd->lb_rxp_active, 1); +#endif /* DHD_LB_RXP */ + + /* Initialize the Load Balancing Tasklets and Napi object */ +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + skb_queue_head_init(&dhd->rx_napi_queue); + __skb_queue_head_init(&dhd->rx_process_queue); + /* Initialize the work that dispatches NAPI job to a given core */ + INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_work); + DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__)); + /* Initialize the work that dispatches DPC tasklet to a given core */ +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work); + skb_queue_head_init(&dhd->tx_pend_queue); + /* Initialize the work that dispatches TX job to a given core */ + tasklet_init(&dhd->tx_tasklet, + dhd_lb_tx_handler, (ulong)(dhd)); + DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__)); +#endif /* DHD_LB_TXP */ + + dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE; +#endif /* DHD_LB */ + +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) + INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn); +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ + +#ifdef BCMDBG + if (dhd_macdbg_attach(&dhd->pub) != BCME_OK) { + DHD_ERROR(("%s: dhd_macdbg_attach fail\n", __FUNCTION__)); + goto fail; + } +#endif /* BCMDBG */ + +#ifdef REPORT_FATAL_TIMEOUTS + init_dhd_timeouts(&dhd->pub); +#endif /* REPORT_FATAL_TIMEOUTS */ +#if defined(BCMPCIE) + dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + if (dhd->pub.extended_trap_data == NULL) { + DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__)); + } +#ifdef DNGL_AXI_ERROR_LOGGING + dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t)); + if (dhd->pub.axi_err_dump == NULL) { + DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__)); + } +#endif /* DNGL_AXI_ERROR_LOGGING */ +#endif /* BCMPCIE */ + +#ifdef SHOW_LOGTRACE + if (dhd_init_logtrace_process(dhd) != BCME_OK) { + goto fail; + } +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG + INIT_WORK(&dhd->bt_log_dispatcher_work, dhd_bt_log_process); +#endif /* BTLOG */ + +#ifdef EWP_EDL + INIT_DELAYED_WORK(&dhd->edl_dispatcher_work, dhd_edl_process_work); +#endif + + DHD_SSSR_MEMPOOL_INIT(&dhd->pub); + DHD_SSSR_REG_INFO_INIT(&dhd->pub); + +#ifdef DHD_SDTC_ETB_DUMP + dhd_sdtc_etb_mempool_init(&dhd->pub); +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef EWP_EDL + if (host_edl_support) { + if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) { + host_edl_support = FALSE; + } + } +#endif /* EWP_EDL */ + + dhd_init_sock_flows_buf(dhd, dhd_watchdog_ms); + + (void)dhd_sysfs_init(dhd); + +#ifdef WL_NATOE + /* Open Netlink socket for NF_CONNTRACK notifications */ + dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP, + CT_ALL); +#endif /* WL_NATOE */ +#ifdef GDB_PROXY + dhd->pub.gdb_proxy_nodeadman = nodeadman != 0; +#endif /* GDB_PROXY */ + dhd_state |= DHD_ATTACH_STATE_DONE; + dhd->dhd_state = dhd_state; + + dhd_found++; + +#ifdef CSI_SUPPORT + dhd_csi_init(&dhd->pub); +#endif /* CSI_SUPPORT */ + +#ifdef DHD_FW_COREDUMP + /* Set memdump default values */ +#ifdef CUSTOMER_HW4_DEBUG + dhd->pub.memdump_enabled = DUMP_DISABLED; +#elif defined(OEM_ANDROID) +#ifdef DHD_COREDUMP + dhd->pub.memdump_enabled = DUMP_MEMFILE; +#else + dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON; +#endif /* DHD_COREDUMP */ +#else + dhd->pub.memdump_enabled = DUMP_MEMFILE; +#endif /* CUSTOMER_HW4_DEBUG */ + /* Check the memdump capability */ + dhd_get_memdump_info(&dhd->pub); +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_ERPOM + if (enable_erpom) { + pom_handler = &dhd->pub.pom_wlan_handler; + pom_handler->func_id = WLAN_FUNC_ID; + pom_handler->handler = (void *)g_dhd_pub; + pom_handler->power_off = dhd_wlan_power_off_handler; + pom_handler->power_on = dhd_wlan_power_on_handler; + + dhd->pub.pom_func_register = NULL; + dhd->pub.pom_func_deregister = NULL; + dhd->pub.pom_toggle_reg_on = NULL; + + dhd->pub.pom_func_register = symbol_get(pom_func_register); + dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister); + dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on); + + symbol_put(pom_func_register); + symbol_put(pom_func_deregister); + symbol_put(pom_toggle_reg_on); + + if (!dhd->pub.pom_func_register || + !dhd->pub.pom_func_deregister || + !dhd->pub.pom_toggle_reg_on) { + DHD_ERROR(("%s, enable_erpom enabled through module parameter but " + "POM is not loaded\n", __FUNCTION__)); + ASSERT(0); + goto fail; + } + dhd->pub.pom_func_register(pom_handler); + dhd->pub.enable_erpom = TRUE; + + } +#endif /* DHD_ERPOM */ + +#ifdef DHD_DUMP_MNGR + dhd->pub.dump_file_manage = + (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t)); + if (unlikely(!dhd->pub.dump_file_manage)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dump_file_manage_t\n", __FUNCTION__)); + } +#endif /* DHD_DUMP_MNGR */ + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + /* Attach the fwtrace */ + if (dhd_fwtrace_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_fwtrace_attach has failed\n")); + goto fail; + } +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef RTT_SUPPORT + if (dhd_rtt_attach(&dhd->pub)) { + DHD_ERROR(("dhd_rtt_attach has failed\n")); + goto fail; + } +#endif /* RTT_SUPPORT */ + +#ifdef DHD_TX_PROFILE + if (dhd_tx_profile_attach(&dhd->pub) != BCME_OK) { + DHD_ERROR(("%s:\tdhd_tx_profile_attach has failed\n", __FUNCTION__)); + goto fail; + } +#endif /* defined(DHD_TX_PROFILE) */ + + return &dhd->pub; + +fail: + if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) { + DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n", + __FUNCTION__, dhd_state, &dhd->pub)); + dhd->dhd_state = dhd_state; + dhd_detach(&dhd->pub); + dhd_free(&dhd->pub); + } + +dhd_null_flag: + return NULL; +} + +int dhd_get_fw_mode(dhd_info_t *dhdinfo) +{ + if (strstr(dhdinfo->fw_path, "_apsta") != NULL) + return DHD_FLAG_HOSTAP_MODE; + if (strstr(dhdinfo->fw_path, "_p2p") != NULL) + return DHD_FLAG_P2P_MODE; + if (strstr(dhdinfo->fw_path, "_ibss") != NULL) + return DHD_FLAG_IBSS_MODE; + if (strstr(dhdinfo->fw_path, "_mfg") != NULL) + return DHD_FLAG_MFG_MODE; + + return DHD_FLAG_STA_MODE; +} + +int dhd_bus_get_fw_mode(dhd_pub_t *dhdp) +{ + return dhd_get_fw_mode(dhdp->info); +} + +extern char * nvram_get(const char *name); +bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) +{ + int fw_len; + int nv_len; + int clm_len; + int conf_len; + const char *fw = NULL; + const char *nv = NULL; + const char *clm = NULL; + const char *conf = NULL; +#ifdef DHD_UCODE_DOWNLOAD + int uc_len; + const char *uc = NULL; +#endif /* DHD_UCODE_DOWNLOAD */ + wifi_adapter_info_t *adapter = dhdinfo->adapter; + int fw_path_len = sizeof(dhdinfo->fw_path); + int nv_path_len = sizeof(dhdinfo->nv_path); + + /* Update firmware and nvram path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The firmware_path/nvram_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private + * command may change dhdinfo->fw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ +// if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_FW_PATH + fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ +#ifdef CONFIG_BCMDHD_NVRAM_PATH + nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH; +#endif /* CONFIG_BCMDHD_NVRAM_PATH */ +// } + + /* check if we need to initialize the path */ + if (dhdinfo->fw_path[0] == '\0') { + if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0') + fw = adapter->fw_path; + + } + if (dhdinfo->nv_path[0] == '\0') { + if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0') + nv = adapter->nv_path; + } + if (dhdinfo->clm_path[0] == '\0') { + if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0') + clm = adapter->clm_path; + } + if (dhdinfo->conf_path[0] == '\0') { + if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0') + conf = adapter->conf_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + * + * TODO: need a solution for multi-chip, can't use the same firmware for all chips + */ + if (firmware_path[0] != '\0') + fw = firmware_path; + + if (nvram_path[0] != '\0') + nv = nvram_path; + if (clm_path[0] != '\0') + clm = clm_path; + if (config_path[0] != '\0') + conf = config_path; + +#ifdef DHD_UCODE_DOWNLOAD + if (ucode_path[0] != '\0') + uc = ucode_path; +#endif /* DHD_UCODE_DOWNLOAD */ + +#ifdef BCM_ROUTER_DHD + if (!fw) { + char var[32]; + + snprintf(var, sizeof(var), "firmware_path%d", dhdinfo->unit); + fw = nvram_get(var); + } + if (!nv) { + char var[32]; + + snprintf(var, sizeof(var), "nvram_path%d", dhdinfo->unit); + nv = nvram_get(var); + } + DHD_ERROR(("dhd:%d: fw path:%s nv path:%s\n", dhdinfo->unit, fw, nv)); +#endif + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= fw_path_len) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n")); + return FALSE; + } + strlcpy(dhdinfo->fw_path, fw, fw_path_len); + } + if (nv && nv[0] != '\0') { + nv_len = strlen(nv); + if (nv_len >= nv_path_len) { + DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n")); + return FALSE; + } + memset(dhdinfo->nv_path, 0, nv_path_len); + strlcpy(dhdinfo->nv_path, nv, nv_path_len); +#ifdef DHD_USE_SINGLE_NVRAM_FILE + /* Remove "_net" or "_mfg" tag from current nvram path */ + { + char *nvram_tag = "nvram_"; + char *ext_tag = ".txt"; + char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len); + bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) + + strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len); + if (valid_buf) { + char *sp = sp_nvram + strlen(nvram_tag) - 1; + uint32 padding_size = (uint32)(dhdinfo->nv_path + + nv_path_len - sp); + memset(sp, 0, padding_size); + strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag)); + nv_len = strlen(dhdinfo->nv_path); + DHD_INFO(("%s: new nvram path = %s\n", + __FUNCTION__, dhdinfo->nv_path)); + } else if (sp_nvram) { + DHD_ERROR(("%s: buffer space for nvram path is not enough\n", + __FUNCTION__)); + return FALSE; + } else { + DHD_ERROR(("%s: Couldn't find the nvram tag. current" + " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path)); + } + } +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ + } + if (clm && clm[0] != '\0') { + clm_len = strlen(clm); + if (clm_len >= sizeof(dhdinfo->clm_path)) { + DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n")); + return FALSE; + } + strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path)); + if (dhdinfo->clm_path[clm_len-1] == '\n') + dhdinfo->clm_path[clm_len-1] = '\0'; + } + if (conf && conf[0] != '\0') { + conf_len = strlen(conf); + if (conf_len >= sizeof(dhdinfo->conf_path)) { + DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n")); + return FALSE; + } + strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path)); + if (dhdinfo->conf_path[conf_len-1] == '\n') + dhdinfo->conf_path[conf_len-1] = '\0'; + } +#ifdef DHD_UCODE_DOWNLOAD + if (uc && uc[0] != '\0') { + uc_len = strlen(uc); + if (uc_len >= sizeof(dhdinfo->uc_path)) { + DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n")); + return FALSE; + } + strlcpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path)); + } +#endif /* DHD_UCODE_DOWNLOAD */ + +#if 0 + /* clear the path in module parameter */ + if (dhd_download_fw_on_driverload) { + firmware_path[0] = '\0'; + nvram_path[0] = '\0'; + clm_path[0] = '\0'; + config_path[0] = '\0'; + } +#endif +#ifdef DHD_UCODE_DOWNLOAD + ucode_path[0] = '\0'; + DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path)); +#endif /* DHD_UCODE_DOWNLOAD */ + +#ifndef BCMEMBEDIMAGE + /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */ + if (dhdinfo->fw_path[0] == '\0') { + DHD_ERROR(("firmware path not found\n")); + return FALSE; + } + if (dhdinfo->nv_path[0] == '\0') { + DHD_ERROR(("nvram path not found\n")); + return FALSE; + } +#endif /* BCMEMBEDIMAGE */ + + return TRUE; +} + +#if defined(BT_OVER_SDIO) +extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path) +{ + int fw_len; + const char *fw = NULL; + wifi_adapter_info_t *adapter = dhdinfo->adapter; + + /* Update bt firmware path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The btfw_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private + * command may change dhdinfo->btfw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ + if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_BTFW_PATH + fw = CONFIG_BCMDHD_BTFW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ + } + + /* check if we need to initialize the path */ + if (dhdinfo->btfw_path[0] == '\0') { + if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0') + fw = adapter->btfw_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + */ + if (btfw_path[0] != '\0') + fw = btfw_path; + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= sizeof(dhdinfo->btfw_path)) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n")); + return FALSE; + } + strlcpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path)); + } + + /* clear the path in module parameter */ + btfw_path[0] = '\0'; + + if (dhdinfo->btfw_path[0] == '\0') { + DHD_ERROR(("bt firmware path not found\n")); + return FALSE; + } + + return TRUE; +} +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef CUSTOMER_HW4_DEBUG +bool dhd_validate_chipid(dhd_pub_t *dhdp) +{ + uint chipid = dhd_bus_chip_id(dhdp); + uint config_chipid; + +#ifdef BCM4389_CHIP_DEF + config_chipid = BCM4389_CHIP_ID; +#elif defined(BCM4375_CHIP) + config_chipid = BCM4375_CHIP_ID; +#elif defined(BCM4361_CHIP) + config_chipid = BCM4361_CHIP_ID; +#elif defined(BCM4359_CHIP) + config_chipid = BCM4359_CHIP_ID; +#elif defined(BCM4358_CHIP) + config_chipid = BCM4358_CHIP_ID; +#elif defined(BCM4354_CHIP) + config_chipid = BCM4354_CHIP_ID; +#elif defined(BCM4339_CHIP) + config_chipid = BCM4339_CHIP_ID; +#elif defined(BCM4335_CHIP) + config_chipid = BCM4335_CHIP_ID; +#elif defined(BCM43430_CHIP) + config_chipid = BCM43430_CHIP_ID; +#elif defined(BCM43018_CHIP) + config_chipid = BCM43018_CHIP_ID; +#elif defined(BCM43455_CHIP) + config_chipid = BCM4345_CHIP_ID; +#elif defined(BCM43454_CHIP) + config_chipid = BCM43454_CHIP_ID; +#elif defined(BCM43012_CHIP_) + config_chipid = BCM43012_CHIP_ID; +#elif defined(BCM43013_CHIP) + config_chipid = BCM43012_CHIP_ID; +#else + DHD_ERROR(("%s: Unknown chip id, if you use new chipset," + " please add CONFIG_BCMXXXX into the Kernel and" + " BCMXXXX_CHIP definition into the DHD driver\n", + __FUNCTION__)); + config_chipid = 0; + + return FALSE; +#endif /* BCM4354_CHIP */ + +#if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION) + if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) { + return TRUE; + } +#endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */ +#if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION) + if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) { + return TRUE; + } +#endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */ +#if defined(BCM4359_CHIP) + if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) { + return TRUE; + } +#endif /* BCM4359_CHIP */ +#if defined(BCM4361_CHIP) + if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) { + return TRUE; + } +#endif /* BCM4361_CHIP */ + + return config_chipid == chipid; +} +#endif /* CUSTOMER_HW4_DEBUG */ + +#if defined(BT_OVER_SDIO) +wlan_bt_handle_t dhd_bt_get_pub_hndl(void) +{ + DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub)); + /* assuming that dhd_pub_t type pointer is available from a global variable */ + return (wlan_bt_handle_t) g_dhd_pub; +} EXPORT_SYMBOL(dhd_bt_get_pub_hndl); + +int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path) +{ + int ret = -1; + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + /* Download BT firmware image to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) { + DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path)); + ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path); + if (ret < 0) { + DHD_ERROR(("%s: failed to download btfw from: %s\n", + __FUNCTION__, dhd->btfw_path)); + return ret; + } + } + return ret; +} EXPORT_SYMBOL(dhd_download_btfw); +#endif /* defined (BT_OVER_SDIO) */ + +#ifndef BCMDBUS +int +dhd_bus_start(dhd_pub_t *dhdp) +{ + int ret = -1; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + unsigned long flags; + +#if defined(DHD_DEBUG) && defined(BCMSDIO) + int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0; +#endif /* DHD_DEBUG && BCMSDIO */ + ASSERT(dhd); + + DHD_TRACE(("Enter %s:\n", __FUNCTION__)); + dhdp->memdump_type = 0; + dhdp->dongle_trap_occured = 0; +#if defined(BCMPCIE) + if (dhdp->extended_trap_data) { + memset(dhdp->extended_trap_data, 0, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } +#endif /* BCMPCIE */ +#ifdef DHD_SSSR_DUMP + /* Flag to indicate sssr dump is collected */ + dhdp->sssr_dump_collected = 0; +#endif /* DHD_SSSR_DUMP */ +#ifdef BT_OVER_PCIE + dhd->pub.dongle_trap_due_to_bt = 0; +#endif /* BT_OVER_PCIE */ + dhdp->iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhdp->d3ack_timeout_occured = 0; + dhdp->livelock_occured = 0; + dhdp->pktid_audit_failed = 0; +#endif /* PCIE_FULL_DONGLE */ + dhd->pub.iface_op_failed = 0; + dhd->pub.scan_timeout_occurred = 0; + dhd->pub.scan_busy_occurred = 0; + /* Retain BH induced errors and clear induced error during initialize */ + if (dhd->pub.dhd_induce_error) { + dhd->pub.dhd_induce_bh_error = dhd->pub.dhd_induce_error; + } + dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR; +#ifdef DHD_PKTTS + dhd->latency = 0; +#endif + dhd->pub.tput_test_done = FALSE; + +#if defined(BCMINTERNAL) && defined(BCMPCIE) + { + /* JIRA:SW4349-436 JIRA:HW4349-302 Work around for 4349a0 PCIE-D11 DMA bug */ + uint chipid = dhd_bus_chip_id(&dhd->pub); + uint revid = dhd_bus_chiprev_id(&dhd->pub); + + if ((chipid == BCM4349_CHIP_ID) && (revid == 1)) { + DHD_INFO(("%s:Detected 4349 A0 enable 16MB Mem restriction Flag", + __FUNCTION__)); + osl_flag_set(dhd->pub.osh, OSL_PHYS_MEM_LESS_THAN_16MB); + } + } +#endif /* BCMINTERNAL && BCMINTERNAL */ + /* try to download image and nvram to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { + /* Indicate FW Download has not yet done */ + dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS; + DHD_INFO(("%s download fw %s, nv %s, conf %s\n", + __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); +#if defined(DHD_DEBUG) && defined(BCMSDIO) + fw_download_start = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ + ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); +#if defined(DHD_DEBUG) && defined(BCMSDIO) + fw_download_end = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ + if (ret < 0) { + DHD_ERROR(("%s: failed to download firmware %s\n", + __FUNCTION__, dhd->fw_path)); + return ret; + } + /* Indicate FW Download has succeeded */ + dhd->pub.fw_download_status = FW_DOWNLOAD_DONE; + } + if (dhd->pub.busstate != DHD_BUS_LOAD) { + return -ENETDOWN; + } + +#ifdef BCMSDIO + dhd_os_sdlock(dhdp); +#endif /* BCMSDIO */ + + /* Start the watchdog timer */ + dhd->pub.tickcnt = 0; + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + + /* Bring up the bus */ + if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { + + DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + return ret; + } +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE) + /* Host registration for OOB interrupt */ + if (dhd_bus_oob_intr_register(dhdp)) { + /* deactivate timer and wait for the handler to finish */ +#if !defined(BCMPCIE_OOB_HOST_WAKE) + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + DHD_STOP_RPM_TIMER(&dhd->pub); + + DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhd_bus_oob_intr_set(dhdp, TRUE); +#else + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#elif defined(FORCE_WOWLAN) + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_FULL_DONGLE + { + /* max_h2d_rings includes H2D common rings */ + uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus); + + DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__, + max_h2d_rings)); + if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) { +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + return ret; + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* set default value for now. Will be updated again in dhd_preinit_ioctls() + * after querying FW + */ + dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS; + dhdp->event_log_max_sets_queried = FALSE; + + dhdp->smmu_fault_occurred = 0; +#ifdef DNGL_AXI_ERROR_LOGGING + dhdp->axi_error = FALSE; +#endif /* DNGL_AXI_ERROR_LOGGING */ + + /* Do protocol initialization necessary for IOCTL/IOVAR */ + ret = dhd_prot_init(&dhd->pub); + if (unlikely(ret) != BCME_OK) { + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return ret; + } + + /* If bus is not ready, can't come up */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + DHD_STOP_RPM_TIMER(&dhd->pub); +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + + /* Bus is ready, query any dongle information */ + /* XXX Since dhd_sync_with_dongle can sleep, should module count surround it? */ +#if defined(DHD_DEBUG) && defined(BCMSDIO) + f2_sync_start = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__)); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return ret; + } + +#ifdef BT_OVER_PCIE + /* Enable L1SS of RC and EP */ + dhd_bus_l1ss_enable_rc_ep(dhdp->bus, TRUE); +#endif /* BT_OVER_PCIE */ + +#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE) +#if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420) + /* XXX: JIRA SWWLAN-139454: Added L1ss enable + * after firmware download completion due to link down issue + * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point + */ + DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__)); +#if defined(CONFIG_SOC_GS101) + exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1); +#else + exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI); +#endif /* CONFIG_SOC_GS101 */ +#endif /* !CONFIG_SOC_EXYNOS8890 && !SUPPORT_EXYNOS7420 */ +#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */ +#if defined(DHD_DEBUG) && defined(BCMSDIO) + f2_sync_end = OSL_SYSUPTIME(); + DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n", + (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start))); +#endif /* DHD_DEBUG && BCMSDIO */ + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->pend_ipaddr) { +#ifdef AOE_IP_ALIAS_SUPPORT + /* XXX Assume pending ip address is belong to primary interface */ + aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0); +#endif /* AOE_IP_ALIAS_SUPPORT */ + dhd->pend_ipaddr = 0; + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(BCM_ROUTER_DHD) + bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t)); +#endif /* BCM_ROUTER_DHD */ + return 0; +} +#endif /* !BCMDBUS */ + +#ifdef WLTDLS +int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + uint32 tdls = tdls_on; + int ret = 0; + uint32 tdls_auto_op = 0; + uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING; + int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH; + int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW; + uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH; + uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW; + + BCM_REFERENCE(mac); + if (!FW_SUPPORTED(dhd, tdls)) + return BCME_ERROR; + + if (dhd->tdls_enable == tdls_on) + goto auto_mode; + ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret)); + goto exit; + } + dhd->tdls_enable = tdls_on; +auto_mode: + + tdls_auto_op = auto_on; + ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret)); + goto exit; + } + + if (tdls_auto_op) { + ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time, + sizeof(tdls_idle_time), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high, + sizeof(tdls_rssi_high), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low, + sizeof(tdls_rssi_low), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high, + sizeof(tdls_pktcnt_high), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low, + sizeof(tdls_pktcnt_low), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret)); + goto exit; + } + } + +exit: + return ret; +} + +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + if (dhd) + ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac); + else + ret = BCME_ERROR; + return ret; +} + +int +dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) +{ + int ret = 0; + bool auto_on = false; + uint32 mode = wfd_mode; + +#ifdef ENABLE_TDLS_AUTO_MODE + if (wfd_mode) { + auto_on = false; + } else { + auto_on = true; + } +#else + auto_on = false; +#endif /* ENABLE_TDLS_AUTO_MODE */ + ret = _dhd_tdls_enable(dhd, false, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE); + if ((ret < 0) && (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret)); + return ret; + } + + ret = _dhd_tdls_enable(dhd, true, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + dhd->tdls_mode = mode; + return ret; +} +#ifdef PCIE_FULL_DONGLE +int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event) +{ + dhd_pub_t *dhd_pub = dhdp; + tdls_peer_node_t *cur = dhd_pub->peer_tbl.node; + tdls_peer_node_t *new = NULL, *prev = NULL; + int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 *da = (uint8 *)&event->addr.octet[0]; + bool connect = FALSE; + uint32 reason = ntoh32(event->reason); + unsigned long flags; + + /* No handling needed for peer discovered reason */ + if (reason == WLC_E_TDLS_PEER_DISCOVERED) { + return BCME_ERROR; + } + if (reason == WLC_E_TDLS_PEER_CONNECTED) + connect = TRUE; + else if (reason == WLC_E_TDLS_PEER_DISCONNECTED) + connect = FALSE; + else + { + DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__)); + return BCME_ERROR; + } + if (ifindex == DHD_BAD_IF) + return BCME_ERROR; + + if (connect) { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: TDLS Peer exist already %d\n", + __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + cur = cur->next; + } + + new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t)); + if (new == NULL) { + DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__)); + return BCME_ERROR; + } + memcpy(new->addr, da, ETHER_ADDR_LEN); + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + new->next = dhd_pub->peer_tbl.node; + dhd_pub->peer_tbl.node = new; + dhd_pub->peer_tbl.tdls_peer_count++; + DHD_ERROR(("%s: Add TDLS peer, count=%d " MACDBG "\n", + __FUNCTION__, dhd_pub->peer_tbl.tdls_peer_count, + MAC2STRDBG((char *)da))); + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + + } else { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da); + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + if (prev) + prev->next = cur->next; + else + dhd_pub->peer_tbl.node = cur->next; + MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t)); + dhd_pub->peer_tbl.tdls_peer_count--; + DHD_ERROR(("%s: Remove TDLS peer, count=%d " MACDBG "\n", + __FUNCTION__, dhd_pub->peer_tbl.tdls_peer_count, + MAC2STRDBG((char *)da))); + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return BCME_OK; + } + prev = cur; + cur = cur->next; + } + DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__)); + } + return BCME_OK; +} +#endif /* PCIE_FULL_DONGLE */ +#endif /* BCMDBUS */ + +bool dhd_is_concurrent_mode(dhd_pub_t *dhd) +{ + if (!dhd) + return FALSE; + + if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE) + return TRUE; + else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) == + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) + return TRUE; + else + return FALSE; +} +#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) +/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware + * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA + * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware + * would still be named as fw_bcmdhd_apsta. + */ +uint32 +dhd_get_concurrent_capabilites(dhd_pub_t *dhd) +{ + int32 ret = 0; + char buf[WLC_IOCTL_SMLEN]; + bool mchan_supported = FALSE; + /* if dhd->op_mode is already set for HOSTAP and Manufacturing + * test mode, that means we only will use the mode as it is + */ + if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)) + return 0; + if (FW_SUPPORTED(dhd, vsdb)) { + mchan_supported = TRUE; + } + if (!FW_SUPPORTED(dhd, p2p)) { + DHD_TRACE(("Chip does not support p2p\n")); + return 0; + } else { + /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */ + memset(buf, 0, sizeof(buf)); + ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf, + sizeof(buf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); + return 0; + } else { + if (buf[0] == 1) { + /* By default, chip supports single chan concurrency, + * now lets check for mchan + */ + ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE; + if (mchan_supported) + ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE; + if (FW_SUPPORTED(dhd, rsdb)) { + ret |= DHD_FLAG_RSDB_MODE; + } +#ifdef WL_SUPPORT_MULTIP2P + if (FW_SUPPORTED(dhd, mp2p)) { + ret |= DHD_FLAG_MP2P_MODE; + } +#endif /* WL_SUPPORT_MULTIP2P */ +#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) + return ret; +#else + return 0; +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ + } + } + } + return 0; +} +#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */ + +#ifdef SUPPORT_AP_POWERSAVE +#define RXCHAIN_PWRSAVE_PPS 10 +#define RXCHAIN_PWRSAVE_QUIET_TIME 10 +#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0 +int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable) +{ + int32 pps = RXCHAIN_PWRSAVE_PPS; + int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME; + int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK; + int ret; + + if (enable) { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable), + NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("Failed to enable AP power save")); + } + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0, + TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("Failed to set pps")); + } + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time, + sizeof(quiet_time), NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("Failed to set quiet time")); + } + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check", + (char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("Failed to set stas assoc check")); + } + } else { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable), + NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("Failed to disable AP power save")); + } + } + + return 0; +} +#endif /* SUPPORT_AP_POWERSAVE */ + +#if defined(READ_CONFIG_FROM_FILE) +#include +#include + +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +bool PM_control = TRUE; + +static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value) +{ + int var_int; + wl_country_t cspec = {{0}, -1, {0}}; + char *revstr; + char *endptr = NULL; +#ifdef ROAM_AP_ENV_DETECTION + int roam_env_mode = AP_ENV_INDETERMINATE; +#endif /* ROAM_AP_ENV_DETECTION */ + + if (!strcmp(name, "country")) { + revstr = strchr(value, '/'); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->is_blob) { + cspec.rev = 0; + memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ); + memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ); + } else +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { + if (revstr) { + cspec.rev = strtoul(revstr + 1, &endptr, 10); + memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ); + cspec.country_abbrev[2] = '\0'; + memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ); + } else { + cspec.rev = -1; + memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ); + memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ); + get_customized_country_code(dhd->info->adapter, + (char *)&cspec.country_abbrev, &cspec); + } + + } + DHD_ERROR(("config country code is country : %s, rev : %d !!\n", + cspec.country_abbrev, cspec.rev)); + return dhd_iovar(dhd, 0, "country", (char*)&cspec, sizeof(cspec), NULL, 0, TRUE); + } else if (!strcmp(name, "roam_scan_period")) { + var_int = (int)simple_strtol(value, NULL, 0); + return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, + &var_int, sizeof(var_int), TRUE, 0); + } else if (!strcmp(name, "roam_delta")) { + struct { + int val; + int band; + } x; + x.val = (int)simple_strtol(value, NULL, 0); + /* x.band = WLC_BAND_AUTO; */ + x.band = WLC_BAND_ALL; + return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0); + } else if (!strcmp(name, "roam_trigger")) { + int ret = 0; + int roam_trigger[2]; + + roam_trigger[0] = (int)simple_strtol(value, NULL, 0); + roam_trigger[1] = WLC_BAND_ALL; + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger, + sizeof(roam_trigger), TRUE, 0); + +#ifdef ROAM_AP_ENV_DETECTION + if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) { + if (dhd_iovar(dhd, 0, "roam_env_detection", + (char *)&roam_env_mode, sizeof(roam_env_mode), NULL, + 0, TRUE) == BCME_OK) { + dhd->roam_env_detection = TRUE; + } else { + dhd->roam_env_detection = FALSE; + } + } +#endif /* ROAM_AP_ENV_DETECTION */ + return ret; + } else if (!strcmp(name, "PM")) { + int ret = 0; + var_int = (int)simple_strtol(value, NULL, 0); + + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, + &var_int, sizeof(var_int), TRUE, 0); + +#if defined(DHD_PM_CONTROL_FROM_FILE) || defined(CONFIG_PM_LOCK) + if (var_int == 0) { + g_pm_control = TRUE; + printk("%s var_int=%d don't control PM\n", __func__, var_int); + } else { + g_pm_control = FALSE; + printk("%s var_int=%d do control PM\n", __func__, var_int); + } +#endif + + return ret; + } + else if (!strcmp(name, "band")) { + int ret; + if (!strcmp(value, "auto")) + var_int = WLC_BAND_AUTO; + else if (!strcmp(value, "a")) + var_int = WLC_BAND_5G; + else if (!strcmp(value, "b")) + var_int = WLC_BAND_2G; + else if (!strcmp(value, "all")) + var_int = WLC_BAND_ALL; + else { + printk(" set band value should be one of the a or b or all\n"); + var_int = WLC_BAND_AUTO; + } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int, + sizeof(var_int), TRUE, 0)) < 0) + printk(" set band err=%d\n", ret); + return ret; + } else if (!strcmp(name, "cur_etheraddr")) { + struct ether_addr ea; + int ret; + + bcm_ether_atoe(value, &ea); + + ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN); + if (ret == 0) { + DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__)); + return 0; + } + + DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, + ea.octet[0], ea.octet[1], ea.octet[2], + ea.octet[3], ea.octet[4], ea.octet[5])); + + ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + return ret; + } else { + memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN); + return ret; + } + } else if (!strcmp(name, "lpc")) { + int ret = 0; + var_int = (int)simple_strtol(value, NULL, 0); + if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_ERROR(("%s: wl down failed\n", __FUNCTION__)); + } + ret = dhd_iovar(dhd, 0, "lpc", (char *)&var_int, sizeof(var_int), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); + } + if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) { + DHD_ERROR(("%s: wl up failed\n", __FUNCTION__)); + } + return ret; + } else if (!strcmp(name, "vht_features")) { + int ret = 0; + var_int = (int)simple_strtol(value, NULL, 0); + + if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_ERROR(("%s: wl down failed\n", __FUNCTION__)); + } + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&var_int, sizeof(var_int), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret)); + } + if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) { + DHD_ERROR(("%s: wl up failed\n", __FUNCTION__)); + } + return ret; + } else { + /* wlu_iovar_setint */ + var_int = (int)simple_strtol(value, NULL, 0); + + /* Setup timeout bcm_timeout from dhd driver 4.217.48 */ + + DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int)); + + return dhd_iovar(dhd, 0, name, (char *)&var_int, + sizeof(var_int), NULL, 0, TRUE); + } + + return 0; +} + +static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx) +{ + mm_segment_t old_fs; + struct kstat stat; + struct file *fp = NULL; + unsigned int len; + char *buf = NULL, *p, *name, *value; + int ret = 0; + char *config_path; + + config_path = CONFIG_BCMDHD_CONFIG_PATH; + + if (!config_path) + { + printk(KERN_ERR "config_path can't read. \n"); + return 0; + } + + old_fs = get_fs(); + set_fs(get_ds()); + if ((ret = vfs_stat(config_path, &stat))) { + set_fs(old_fs); + printk(KERN_ERR "%s: Failed to get information (%d)\n", + config_path, ret); + return ret; + } + set_fs(old_fs); + + if (!(buf = MALLOC(dhd->osh, stat.size + 1))) { + printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size); + return -ENOMEM; + } + memset(buf, 0x0, stat.size + 1); + printk("dhd_preinit_config : config path : %s \n", config_path); + + if (!(fp = dhd_os_open_image1(dhd, config_path)) || + (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0) + goto err; + + if (len != stat.size) { + printk("dhd_preinit_config : Error - read length mismatched len = %d\n", len); + goto err; + } + + buf[stat.size] = '\0'; + for (p = buf; *p; p++) { + if (isspace(*p)) + continue; + for (name = p++; *p && !isspace(*p); p++) { + if (*p == '=') { + *p = '\0'; + p++; + for (value = p; *p && !isspace(*p); p++); + *p = '\0'; + if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) { + printk(KERN_ERR "%s: %s=%s\n", + bcmerrorstr(ret), name, value); + } + break; + } + } + } + ret = 0; + +out: + if (fp) + dhd_os_close_image1(dhd, fp); + if (buf) + MFREE(dhd->osh, buf, stat.size+1); + return ret; + +err: + ret = -1; + goto out; +} +#endif /* READ_CONFIG_FROM_FILE */ + +#ifdef WLAIBSS +int +dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen) +{ + int ret = BCME_OK; + aibss_bcn_force_config_t bcn_config; + uint32 aibss; +#ifdef WLAIBSS_PS + uint32 aibss_ps; + s32 atim; +#endif /* WLAIBSS_PS */ + int ibss_coalesce; + + aibss = 1; + ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s aibss , UNSUPPORTED\n", __FUNCTION__)); + return BCME_OK; + } else { + DHD_ERROR(("%s Set aibss to %d err(%d)\n", __FUNCTION__, aibss, ret)); + return ret; + } + } + +#ifdef WLAIBSS_PS + aibss_ps = 1; + ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set aibss PS to %d failed %d\n", + __FUNCTION__, aibss, ret)); + return ret; + } + + atim = 10; + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM, + (char *)&atim, sizeof(atim), TRUE, 0)) < 0) { + DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n", + __FUNCTION__, ret)); + return ret; + } +#endif /* WLAIBSS_PS */ + + memset(&bcn_config, 0, sizeof(bcn_config)); + bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR; + bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR; + bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR; + bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0; + bcn_config.len = sizeof(bcn_config); + + ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config, + sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n", + __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR, + AIBSS_BCN_FLOOD_DUR, ret)); + return ret; + } + + ibss_coalesce = IBSS_COALESCE_DEFAULT; + ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce, + sizeof(ibss_coalesce), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n", + __FUNCTION__, ret)); + return ret; + } + + dhd->op_mode |= DHD_FLAG_IBSS_MODE; + return BCME_OK; +} +#endif /* WLAIBSS */ + +#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD) +#ifdef WL_BAM +static int +dhd_check_adps_bad_ap(dhd_pub_t *dhd) +{ + struct net_device *ndev; + struct bcm_cfg80211 *cfg; + struct wl_profile *profile; + struct ether_addr bssid; + + if (!dhd_is_associated(dhd, 0, NULL)) { + DHD_ERROR(("%s - not associated\n", __FUNCTION__)); + return BCME_OK; + } + + ndev = dhd_linux_get_primary_netdev(dhd); + if (!ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__)); + return -ENODEV; + } + + cfg = wl_get_cfg(ndev); + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return -EINVAL; + } + + profile = wl_get_profile_by_netdev(cfg, ndev); + memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN); + if (wl_adps_bad_ap_check(cfg, &bssid)) { + if (wl_adps_enabled(cfg, ndev)) { + wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND); + } + } + + return BCME_OK; +} +#endif /* WL_BAM */ + +int +dhd_enable_adps(dhd_pub_t *dhd, uint8 on) +{ + int i; + int len; + int ret = BCME_OK; + + bcm_iov_buf_t *iov_buf = NULL; + wl_adps_params_v1_t *data = NULL; + + len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data); + iov_buf = MALLOC(dhd->osh, len); + if (iov_buf == NULL) { + DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len)); + ret = BCME_NOMEM; + goto exit; + } + + iov_buf->version = WL_ADPS_IOV_VER; + iov_buf->len = sizeof(*data); + iov_buf->id = WL_ADPS_IOV_MODE; + + data = (wl_adps_params_v1_t *)iov_buf->data; + data->version = ADPS_SUB_IOV_VERSION_1; + data->length = sizeof(*data); + data->mode = on; + + for (i = 1; i <= MAX_BANDS; i++) { + data->band = i; + ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s adps, UNSUPPORTED\n", __FUNCTION__)); + ret = BCME_OK; + goto exit; + } + else { + DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n", + __FUNCTION__, on ? "On" : "Off", i, ret)); + goto exit; + } + } + } + +#ifdef WL_BAM + if (on) { + dhd_check_adps_bad_ap(dhd); + } +#endif /* WL_BAM */ + +exit: + if (iov_buf) { + MFREE(dhd->osh, iov_buf, len); + } + return ret; +} +#endif /* WLADPS || WLADPS_PRIVATE_CMD */ + +int +dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask) +{ + wl_el_set_type_t logset_type, logset_op; + wl_el_set_all_type_v1_t *logset_all_type_op = NULL; + bool use_logset_all_type = FALSE; + int ret = BCME_ERROR; + int err = 0; + uint8 i = 0; + int el_set_all_type_len; + + if (!dhd || !logset_mask) + return BCME_BADARG; + + el_set_all_type_len = OFFSETOF(wl_el_set_all_type_v1_t, set_type) + + (sizeof(wl_el_set_type_v1_t) * dhd->event_log_max_sets); + + logset_all_type_op = (wl_el_set_all_type_v1_t *) MALLOC(dhd->osh, el_set_all_type_len); + if (logset_all_type_op == NULL) { + DHD_ERROR(("%s: failed to allocate %d bytes for logset_all_type_op\n", + __FUNCTION__, el_set_all_type_len)); + return BCME_NOMEM; + } + + *logset_mask = 0; + memset(&logset_type, 0, sizeof(logset_type)); + memset(&logset_op, 0, sizeof(logset_op)); + logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION); + logset_type.len = htod16(sizeof(wl_el_set_type_t)); + + /* Try with set = event_log_max_sets, if fails, use legacy event_log_set_type */ + logset_type.set = dhd->event_log_max_sets; + err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, sizeof(logset_type), + (char *)logset_all_type_op, el_set_all_type_len, FALSE); + if (err == BCME_OK) { + DHD_ERROR(("%s: use optimised use_logset_all_type\n", __FUNCTION__)); + use_logset_all_type = TRUE; + } + + for (i = 0; i < dhd->event_log_max_sets; i++) { + if (use_logset_all_type) { + logset_op.type = logset_all_type_op->set_type[i].type_val; + } else { + logset_type.set = i; + err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, + sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE); + } + /* the iovar may return 'unsupported' error if a log set number is not present + * in the fw, so we should not return on error ! + */ + if (err == BCME_OK && + logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) { + *logset_mask |= 0x01u << i; + ret = BCME_OK; + DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i)); + } + } + + MFREE(dhd->osh, logset_all_type_op, el_set_all_type_len); + return ret; +} + +#ifndef OEM_ANDROID +/* For non-android FC modular builds, override firmware preinited values */ +void +dhd_override_fwprenit(dhd_pub_t * dhd) +{ + int ret = 0; + + { + /* Disable bcn_li_bcn */ + uint32 bcn_li_bcn = 0; + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: bcn_li_bcn failed:%d\n", + __FUNCTION__, ret)); + } + } + + { + /* Disable apsta */ + uint32 apsta = 0; + ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, + sizeof(apsta), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: apsta failed:%d\n", + __FUNCTION__, ret)); + } + } + + { + int ap_mode = 0; + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, (char *)&ap_mode, + sizeof(ap_mode), TRUE, 0)) < 0) { + DHD_ERROR(("%s: set apmode failed :%d\n", __FUNCTION__, ret)); + } + } +} +#endif /* !OEM_ANDROID */ + +int +dhd_get_fw_capabilities(dhd_pub_t * dhd) +{ + + int ret = 0; + uint32 cap_buf_size = sizeof(dhd->fw_capabilities); + memset(dhd->fw_capabilities, 0, cap_buf_size); + ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1), + FALSE); + + if (ret < 0) { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + return ret; + } + + memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1)); + dhd->fw_capabilities[0] = ' '; + dhd->fw_capabilities[cap_buf_size - 2] = ' '; + dhd->fw_capabilities[cap_buf_size - 1] = '\0'; + + return 0; +} + +int +dhd_optimised_preinit_ioctls(dhd_pub_t * dhd) +{ + int ret = 0; + /* Room for "event_msgs_ext" + '\0' + bitvec */ + char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16]; +#ifdef DHD_PKTTS + uint32 val = 0; +#endif + uint32 event_log_max_sets = 0; + char* iov_buf = NULL; + /* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED, + * based on FW build tag. + */ + int ret2 = 0; +#if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV) + uint monitor = 0; + dhd_info_t *dhdinfo = (dhd_info_t*)dhd->info; +#endif /* WL_MONITOR */ +#if defined(BCMSUP_4WAY_HANDSHAKE) + uint32 sup_wpa = 1; +#endif /* BCMSUP_4WAY_HANDSHAKE */ + + uint32 frameburst = CUSTOM_FRAMEBURST_SET; + uint wnm_bsstrans_resp = 0; +#ifdef DHD_BUS_MEM_ACCESS + uint32 enable_memuse = 1; +#endif /* DHD_BUS_MEM_ACCESS */ +#ifdef DHD_PM_CONTROL_FROM_FILE + uint power_mode = PM_FAST; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + char buf[WLC_IOCTL_SMLEN]; + char *ptr; +#ifdef ROAM_ENABLE + uint roamvar = 0; +#ifdef ROAM_AP_ENV_DETECTION + int roam_env_mode = 0; +#endif /* ROAM_AP_ENV_DETECTION */ +#endif /* ROAM_ENABLE */ +#if defined(SOFTAP) + uint dtim = 1; +#endif +/* xxx andrey tmp fix for dk8000 build error */ + struct ether_addr p2p_ea; +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef BCMPCIE_OOB_HOST_WAKE + uint32 hostwake_oob = 0; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + wl_wlc_version_t wlc_ver; + +#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME) + uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2; + uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME; +#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = TRUE; +#ifdef APF + dhd->apf_set = FALSE; +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ + dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + dhd->max_dtim_enable = TRUE; +#else + dhd->max_dtim_enable = FALSE; +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ + dhd->disable_dtim_in_suspend = FALSE; +#ifdef CUSTOM_SET_OCLOFF + dhd->ocl_off = FALSE; +#endif /* CUSTOM_SET_OCLOFF */ +#ifdef SUPPORT_SET_TID + dhd->tid_mode = SET_TID_OFF; + dhd->target_uid = 0; + dhd->target_tid = 0; +#endif /* SUPPORT_SET_TID */ + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + dhd->op_mode = 0; + +#ifdef ARP_OFFLOAD_SUPPORT + /* arpoe will be applied from the supsend context */ + dhd->arpoe_enable = TRUE; + dhd->arpol_configured = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ + + /* clear AP flags */ +#if defined(CUSTOM_COUNTRY_CODE) + dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG; +#endif /* CUSTOM_COUNTRY_CODE */ + +#ifdef CUSTOMER_HW4_DEBUG + if (!dhd_validate_chipid(dhd)) { + DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n", + __FUNCTION__, dhd_bus_chip_id(dhd))); +#ifndef SUPPORT_MULTIPLE_CHIPS + ret = BCME_BADARG; + goto done; +#endif /* !SUPPORT_MULTIPLE_CHIPS */ + } +#endif /* CUSTOMER_HW4_DEBUG */ + + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + DHD_ERROR(("Firmware version = %s\n", buf)); + strncpy(fw_version, buf, FW_VER_STR_LEN); + fw_version[FW_VER_STR_LEN-1] = '\0'; +#if defined(BCMSDIO) || defined(BCMPCIE) + dhd_set_version_info(dhd, buf); +#endif /* BCMSDIO || BCMPCIE */ + } + + /* query for 'wlc_ver' to get version info from firmware */ + /* memsetting to zero */ + memset_s(&wlc_ver, sizeof(wl_wlc_version_t), 0, + sizeof(wl_wlc_version_t)); + ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver, + sizeof(wl_wlc_version_t), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + dhd->wlc_ver_major = wlc_ver.wlc_ver_major; + dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor; + } +#ifdef BOARD_HIKEY + /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */ + if (strstr(fw_version, "WLTEST") != NULL) { + DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n", + __FUNCTION__)); + op_mode = DHD_FLAG_MFG_MODE; + } +#endif /* BOARD_HIKEY */ + /* get a capabilities from firmware */ + ret = dhd_get_fw_capabilities(dhd); + + if (ret < 0) { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + goto done; + } + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* disable runtimePM by default in MFG mode. */ + pm_runtime_disable(dhd_bus_to_dev(dhd->bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef DHD_PCIE_RUNTIMEPM + /* Disable RuntimePM in mfg mode */ + DHD_DISABLE_RUNTIME_PM(dhd); + DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ + /* Check and adjust IOCTL response timeout for Manufactring firmware */ + dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT); + DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n", + __FUNCTION__)); + +#if defined(ARP_OFFLOAD_SUPPORT) + dhd->arpoe_enable = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif /* PKT_FILTER_SUPPORT */ +#ifndef CUSTOM_SET_ANTNPM + if (FW_SUPPORTED(dhd, rsdb)) { + wl_config_t rsdb_mode; + memset(&rsdb_mode, 0, sizeof(rsdb_mode)); + ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", + __FUNCTION__, ret)); + } + } +#endif /* !CUSTOM_SET_ANTNPM */ + } else { + uint32 concurrent_mode = 0; + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__)); + + BCM_REFERENCE(concurrent_mode); + + dhd->op_mode = DHD_FLAG_STA_MODE; + + BCM_REFERENCE(p2p_ea); +#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) + if ((concurrent_mode = dhd_get_concurrent_capabilites(dhd))) { + dhd->op_mode |= concurrent_mode; + } + + /* Check if we are enabling p2p */ + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(&p2p_ea); + ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret)); + else + DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n")); + } +#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */ + + } + +#ifdef BCMPCIE_OOB_HOST_WAKE + ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob, + sizeof(hostwake_oob), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__)); + } else { + if (hostwake_oob == 0) { + DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n", + __FUNCTION__)); + ret = BCME_UNSUPPORTED; + goto done; + } else { + DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__)); + } + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DNGL_AXI_ERROR_LOGGING + ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr, + sizeof(dhd->axierror_logbuf_addr), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__)); + dhd->axierror_logbuf_addr = 0; + } else { + DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", + __FUNCTION__, dhd->axierror_logbuf_addr)); + } +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef GET_CUSTOM_MAC_ENABLE + ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet, 0); + if (!ret) { + ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN); + } else +#endif /* GET_CUSTOM_MAC_ENABLE */ + { + /* Get the default device MAC address directly from firmware */ + ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + + DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n", + __FUNCTION__, MAC2STRDBG(&buf))); + +#ifdef MACADDR_PROVISION_ENFORCED + if (ETHER_IS_LOCALADDR(buf)) { + DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__)); + ret = BCME_BADADDR; + goto done; + } +#endif /* MACADDR_PROVISION_ENFORCED */ + + /* Update public MAC address after reading from Firmware */ + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + } + + if (ETHER_ISNULLADDR(dhd->mac.octet)) { + DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__)); + ret = BCME_BADADDR; + goto done; + } else { + (void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN, + dhd->mac.octet, ETHER_ADDR_LEN); + } + + if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) { + DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__)); + goto done; + } + + DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", + dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { + /* get a ccode and revision for the country code */ +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec, dhd->dhd_cflags); +#else + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec); +#endif /* CUSTOM_COUNTRY_CODE */ + } + +#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) + dhd->info->rxthread_enabled = FALSE; + else + dhd->info->rxthread_enabled = TRUE; +#endif + /* Set Country code */ + if (dhd->dhd_cspec.ccode[0] != 0) { + ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + +#if defined(ROAM_ENABLE) + BCM_REFERENCE(roamvar); +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) { + DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar)); + } + /* roamvar is set to 0 by preinit fw, change only if roamvar is non-zero */ + if (roamvar != 0) { + /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret)); + } + } +#endif /* USE_WFA_CERT_CONF */ + +#ifdef ROAM_AP_ENV_DETECTION + /* Changed to GET iovar to read roam_env_mode */ + dhd->roam_env_detection = FALSE; + ret = dhd_iovar(dhd, 0, "roam_env_detection", NULL, 0, (char *)&roam_env_mode, + sizeof(roam_env_mode), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: roam_env_detection IOVAR not present\n", __FUNCTION__)); + } else { + if (roam_env_mode == AP_ENV_INDETERMINATE) { + dhd->roam_env_detection = TRUE; + } + } +#endif /* ROAM_AP_ENV_DETECTION */ +#ifdef CONFIG_ROAM_RSSI_LIMIT + ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G); + if (ret < 0) { + DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_ROAM_RSSI_LIMIT */ +#ifdef CONFIG_ROAM_MIN_DELTA + ret = dhd_roam_min_delta_set(dhd, CUSTOM_ROAM_MIN_DELTA, CUSTOM_ROAM_MIN_DELTA); + if (ret < 0) { + DHD_ERROR(("%s set roam_min_delta failed ret %d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_ROAM_MIN_DELTA */ +#endif /* ROAM_ENABLE */ + +#ifdef WLTDLS + dhd->tdls_enable = FALSE; + /* query tdls_eable */ + ret = dhd_iovar(dhd, 0, "tdls_enable", NULL, 0, (char *)&dhd->tdls_enable, + sizeof(dhd->tdls_enable), FALSE); + DHD_ERROR(("%s: tdls_enable=%d ret=%d\n", __FUNCTION__, dhd->tdls_enable, ret)); +#endif /* WLTDLS */ + +#ifdef DHD_PM_CONTROL_FROM_FILE +#ifdef CUSTOMER_HW10 + dhd_control_pm(dhd, &power_mode); +#else + sec_control_pm(dhd, &power_mode); +#endif /* CUSTOMER_HW10 */ +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#ifdef MIMO_ANT_SETTING + dhd_sel_ant_from_file(dhd); +#endif /* MIMO_ANT_SETTING */ + +#if defined(OEM_ANDROID) && defined(SOFTAP) + if (ap_fw_loaded == TRUE) { + dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); + } +#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */ + +#if defined(KEEP_ALIVE) + /* Set Keep Alive : be sure to use FW with -keepalive */ + if (!(dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + if ((ret = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, ret)); + } +#endif /* defined(KEEP_ALIVE) */ + + ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets, + sizeof(event_log_max_sets), FALSE); + if (ret == BCME_OK) { + dhd->event_log_max_sets = event_log_max_sets; + } else { + dhd->event_log_max_sets = NUM_EVENT_LOG_SETS; + } + BCM_REFERENCE(iovbuf); + /* Make sure max_sets is set first with wmb and then sets_queried, + * this will be used during parsing the logsets in the reverse order. + */ + OSL_SMP_WMB(); + dhd->event_log_max_sets_queried = TRUE; + DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n", + __FUNCTION__, dhd->event_log_max_sets, ret)); +#ifdef DHD_BUS_MEM_ACCESS + ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse, + sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: enable_memuse is failed ret=%d\n", + __FUNCTION__, ret)); + } else { + DHD_ERROR(("%s: enable_memuse = %d\n", + __FUNCTION__, enable_memuse)); + } +#endif /* DHD_BUS_MEM_ACCESS */ + +#ifdef USE_WFA_CERT_CONF +#ifdef USE_WL_FRAMEBURST + if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { + DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst)); + } +#endif /* USE_WL_FRAMEBURST */ + g_frameburst = frameburst; +#endif /* USE_WFA_CERT_CONF */ + +#ifdef DISABLE_WL_FRAMEBURST_SOFTAP + /* Disable Framebursting for SofAP */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + frameburst = 0; + } +#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */ + + BCM_REFERENCE(frameburst); +#if defined(USE_WL_FRAMEBURST) || defined(DISABLE_WL_FRAMEBURST_SOFTAP) + /* frameburst is set to 1 by preinit fw, change if otherwise */ + if (frameburst != 1) { + /* Set frameburst to value */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst, + sizeof(frameburst), TRUE, 0)) < 0) { + DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); + } + } +#endif /* USE_WL_FRAMEBURST || DISABLE_WL_FRAMEBURST_SOFTAP */ + + iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN); + if (iov_buf == NULL) { + DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN)); + ret = BCME_NOMEM; + goto done; + } + +#if defined(BCMSUP_4WAY_HANDSHAKE) + /* Read 4-way handshake requirements */ + if (dhd_use_idsup == 1) { + ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa), + (char *)&iovbuf, sizeof(iovbuf), FALSE); + /* sup_wpa iovar returns NOTREADY status on some platforms using modularized + * in-dongle supplicant. + */ + if (ret >= 0 || ret == BCME_NOTREADY) + dhd->fw_4way_handshake = TRUE; + DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake)); + } +#endif /* BCMSUP_4WAY_HANDSHAKE */ + +#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ + +#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */ + +#ifdef ARP_OFFLOAD_SUPPORT + DHD_ERROR(("arp_enable:%d arp_ol:%d\n", + dhd->arpoe_enable, dhd->arpol_configured)); +#endif /* ARP_OFFLOAD_SUPPORT */ + /* + * Retaining pktfilter fotr temporary, once fw preinit includes this, + * this will be removed. Caution is to skip the pktfilter check during + * each pktfilter removal. + */ +#ifdef PKT_FILTER_SUPPORT + /* Setup default defintions for pktfilter , enable in suspend */ + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; + if (!FW_SUPPORTED(dhd, pf6)) { + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + } else { + /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST; + } + /* apply APP pktfilter */ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; + +#ifdef BLOCK_IPV6_PACKET + /* Setup filter to allow only IPv4 unicast frames */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 " + HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR + " " + HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR; +#else + /* Setup filter to allow only unicast */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; +#endif /* BLOCK_IPV6_PACKET */ + +#ifdef PASS_IPV4_SUSPEND + /* XXX customer want to get IPv4 multicast packets */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E"; +#else + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; +#endif /* PASS_IPV4_SUSPEND */ + if (FW_SUPPORTED(dhd, pf6)) { + /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */ + dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST; + /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */ + dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP; + /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */ + dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID; + /* Immediately pkt filter TYPE 6 Dicard NETBIOS packet(port 137) */ + dhd->pktfilter[DHD_UDPNETBIOS_DROP_FILTER_NUM] = DISCARD_UDPNETBIOS; + dhd->pktfilter_count = 11; + } + +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd->pktfilter_count = 4; + /* Setup filter to block broadcast and NAT Keepalive packets */ + /* discard all broadcast packets */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ + +#if defined(SOFTAP) + if (ap_fw_loaded) { + /* XXX Andrey: fo SOFTAP disable pkt filters (if there were any ) */ + dhd_enable_packet_filter(0, dhd); + } +#endif /* defined(SOFTAP) */ + dhd_set_packet_filter(dhd); +#endif /* PKT_FILTER_SUPPORT */ + + /* query for 'clmver' to get clm version info from firmware */ + bzero(buf, sizeof(buf)); + ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + char *ver_temp_buf = NULL; + + if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) { + DHD_ERROR(("Couldn't find \"Data:\"\n")); + } else { + ptr = (ver_temp_buf + strlen("Data:")); + if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) { + DHD_ERROR(("Couldn't find New line character\n")); + } else { + bzero(clm_version, CLM_VER_STR_LEN); + strlcpy(clm_version, ver_temp_buf, + MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN)); + DHD_INFO(("CLM version = %s\n", clm_version)); + } + } + +#if defined(CUSTOMER_HW4_DEBUG) + if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) { + DHD_ERROR(("Couldn't find \"Customization:\"\n")); + } else { + char tokenlim; + ptr = (ver_temp_buf + strlen("Customization:")); + if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) { + DHD_ERROR(("Couldn't find project blob version" + "or New line character\n")); + } else if (tokenlim == '(') { + snprintf(clm_version, + CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ", + clm_version, ver_temp_buf); + DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version)); + if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) { + DHD_ERROR(("Couldn't find New line character\n")); + } else { + snprintf(clm_version, + strlen(clm_version) + strlen(ver_temp_buf), + "%s%s", clm_version, ver_temp_buf); + DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", + clm_version)); + + } + } else if (tokenlim == '\n') { + snprintf(clm_version, + strlen(clm_version) + strlen(", Blob ver = Major : ") + 1, + "%s, Blob ver = Major : ", clm_version); + snprintf(clm_version, + strlen(clm_version) + strlen(ver_temp_buf) + 1, + "%s%s", clm_version, ver_temp_buf); + DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version)); + } + } +#endif /* CUSTOMER_HW4_DEBUG */ + if (strlen(clm_version)) { + DHD_ERROR(("CLM version = %s\n", clm_version)); + } else { + DHD_ERROR(("Couldn't find CLM version!\n")); + } + + } + +#ifdef WRITE_WLANINFO + sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version); +#endif /* WRITE_WLANINFO */ + +#ifdef GEN_SOFTAP_INFO_FILE + sec_save_softap_info(); +#endif /* GEN_SOFTAP_INFO_FILE */ + +#ifdef PNO_SUPPORT + if (!dhd->pno_state) { + dhd_pno_init(dhd); + } +#endif + +#ifdef DHD_PKTTS + /* get the pkt metadata buffer length supported by FW */ + if (dhd_wl_ioctl_get_intiovar(dhd, "bus:metadata_info", &val, + WLC_GET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("%s: failed to get pkt metadata buflen, use IPC pkt TS.\n", + __FUNCTION__)); + /* + * if iovar fails, IPC method of collecting + * TS should be used, hence set metadata_buflen as + * 0 here. This will be checked later on Tx completion + * to decide if IPC or metadata method of reading TS + * should be used + */ + dhd->pkt_metadata_version = 0; + dhd->pkt_metadata_buflen = 0; + } else { + dhd->pkt_metadata_version = GET_METADATA_VER(val); + dhd->pkt_metadata_buflen = GET_METADATA_BUFLEN(val); + } + + /* Check FW supports pktlat, if supports enable pktts_enab iovar */ + ret = dhd_set_pktts_enab(dhd, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret)); + } +#endif /* DHD_PKTTS */ + +#ifdef RTT_SUPPORT + if (dhd->rtt_state) { + ret = dhd_rtt_init(dhd); + if (ret < 0) { + DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__)); + } + } +#endif + +#ifdef FILTER_IE + /* Failure to configure filter IE is not a fatal error, ignore it. */ + if (FW_SUPPORTED(dhd, fie) && + !(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + dhd_read_from_file(dhd); + } +#endif /* FILTER_IE */ + +#ifdef NDO_CONFIG_SUPPORT + dhd->ndo_enable = FALSE; + dhd->ndo_host_ip_overflow = FALSE; + dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES; +#endif /* NDO_CONFIG_SUPPORT */ + + /* ND offload version supported */ + dhd->ndo_version = dhd_ndo_get_version(dhd); + + /* check dongle supports wbtext (product policy) or not */ + dhd->wbtext_support = FALSE; + if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp, + WLC_GET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to get wnm_bsstrans_resp\n")); + } + dhd->wbtext_policy = wnm_bsstrans_resp; + if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) { + dhd->wbtext_support = TRUE; + } +#ifndef WBTEXT + /* driver can turn off wbtext feature through makefile */ + if (dhd->wbtext_support) { + if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp", + WL_BSSTRANS_POLICY_ROAM_ALWAYS, + WLC_SET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to disable WBTEXT\n")); + } + } +#endif /* !WBTEXT */ + +#ifdef DHD_NON_DMA_M2M_CORRUPTION + /* check pcie non dma loopback */ + if (dhd->op_mode == DHD_FLAG_MFG_MODE && + (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) { + goto done; + } +#endif /* DHD_NON_DMA_M2M_CORRUPTION */ + +#ifdef CUSTOM_ASSOC_TIMEOUT + /* set recreate_bi_timeout to increase assoc timeout : + * 20 * 100TU * 1024 / 1000 = 2 secs + * (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000) + */ + if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout", + CUSTOM_ASSOC_TIMEOUT, + WLC_SET_VAR, TRUE, 0) != BCME_OK) { + DHD_ERROR(("failed to set assoc timeout\n")); + } +#endif /* CUSTOM_ASSOC_TIMEOUT */ + + BCM_REFERENCE(ret2); +#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME) + if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win", + (char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win), + NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set RRM BCN request thrtl_win\n")); + } + if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time", + (char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time), + NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n")); + } +#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */ +#ifdef WL_MONITOR +#ifdef HOST_RADIOTAP_CONV + /* 'Wl monitor' IOVAR is fired to check whether the FW supports radiotap conversion or not. + * This is indicated through MSB(1<<31) bit, based on which host radiotap conversion + * will be enabled or disabled. + * 0 - Host supports Radiotap conversion. + * 1 - FW supports Radiotap conversion. + */ + bcm_mkiovar("monitor", (char *)&monitor, sizeof(monitor), iovbuf, sizeof(iovbuf)); + if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_MONITOR, iovbuf, + sizeof(iovbuf), FALSE, 0)) == 0) { + memcpy(&monitor, iovbuf, sizeof(monitor)); + dhdinfo->host_radiotap_conv = (monitor & HOST_RADIOTAP_CONV_BIT) ? TRUE : FALSE; + } else { + DHD_ERROR(("%s Failed to get monitor mode, err %d\n", + __FUNCTION__, ret2)); + } +#endif /* HOST_RADIOTAP_CONV */ + if (FW_SUPPORTED(dhd, monitor)) { + dhd->monitor_enable = TRUE; + DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__)); + } else { + dhd->monitor_enable = FALSE; + DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__)); + } +#endif /* WL_MONITOR */ + + /* store the preserve log set numbers */ + if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask) + != BCME_OK) { + DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__)); + } + +#ifdef CONFIG_SILENT_ROAM + dhd->sroam_turn_on = TRUE; + dhd->sroamed = FALSE; +#endif /* CONFIG_SILENT_ROAM */ + +#ifndef OEM_ANDROID + /* For non-android FC modular builds, override firmware preinited values */ + dhd_override_fwprenit(dhd); +#endif /* !OEM_ANDROID */ + dhd_set_bandlock(dhd); + +done: + if (iov_buf) { + MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN); + } + return ret; +} + +int +dhd_legacy_preinit_ioctls(dhd_pub_t *dhd) +{ + int ret = 0; + /* Room for "event_msgs_ext" + '\0' + bitvec */ + char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16]; + char *mask; + uint32 buf_key_b4_m4 = 1; +#ifdef DHD_PKTTS + uint32 val = 0; +#endif + uint8 msglen; + eventmsgs_ext_t *eventmask_msg = NULL; + uint32 event_log_max_sets = 0; + char* iov_buf = NULL; + /* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED, + * based on FW build tag. + */ + int ret2 = 0; + uint32 wnm_cap = 0; +#if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV) + uint monitor = 0; + dhd_info_t *dhdinfo = (dhd_info_t*)dhd->info; +#endif /* WL_MONITOR */ +#if defined(BCMSUP_4WAY_HANDSHAKE) + uint32 sup_wpa = 1; +#endif /* BCMSUP_4WAY_HANDSHAKE */ +#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \ + defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)) + uint32 ampdu_ba_wsize = 0; +#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */ +#if defined(CUSTOM_AMPDU_MPDU) + int32 ampdu_mpdu = 0; +#endif +#if defined(CUSTOM_AMPDU_RELEASE) + int32 ampdu_release = 0; +#endif +#if defined(CUSTOM_AMSDU_AGGSF) + int32 amsdu_aggsf = 0; +#endif + +#if defined(BCMSDIO) || defined(BCMDBUS) +#ifdef PROP_TXSTATUS + int wlfc_enable = TRUE; +#ifndef DISABLE_11N + uint32 hostreorder = 1; + uint wl_down = 1; +#endif /* DISABLE_11N */ +#endif /* PROP_TXSTATUS */ +#endif /* defined(BCMSDIO) || defined(BCMDBUS) */ + +#ifndef PCIE_FULL_DONGLE + uint32 wl_ap_isolate; +#endif /* PCIE_FULL_DONGLE */ + uint32 frameburst = CUSTOM_FRAMEBURST_SET; + uint wnm_bsstrans_resp = 0; +#ifdef SUPPORT_SET_CAC + uint32 cac = 1; +#endif /* SUPPORT_SET_CAC */ +#ifdef DHD_BUS_MEM_ACCESS + uint32 enable_memuse = 1; +#endif /* DHD_BUS_MEM_ACCESS */ +#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT) + uint32 vht_features = 0; /* init to 0, will be set based on each support */ +#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */ + +#ifdef OEM_ANDROID +#ifdef DHD_ENABLE_LPC + uint32 lpc = 1; +#endif /* DHD_ENABLE_LPC */ + uint power_mode = PM_FAST; +#if defined(BCMSDIO) + uint32 dongle_align = DHD_SDALIGN; + uint32 glom = CUSTOM_GLOM_SETTING; +#endif /* defined(BCMSDIO) */ + uint bcn_timeout = CUSTOM_BCN_TIMEOUT; + uint scancache_enab = TRUE; +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + uint32 bcn_li_bcn = 1; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint retry_max = CUSTOM_ASSOC_RETRY_MAX; + int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME; + int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME; + int scan_passive_time = DHD_SCAN_PASSIVE_TIME; + char buf[WLC_IOCTL_SMLEN]; + char *ptr; + uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + wl_el_tag_params_t *el_tag = NULL; +#endif /* DHD_8021X_DUMP */ +#ifdef DHD_RANDMAC_LOGGING + uint privacy_mask = 0; +#endif /* DHD_RANDMAC_LOGGING */ +#ifdef ROAM_ENABLE + uint roamvar = 0; + int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL}; + int roam_scan_period[2] = {10, WLC_BAND_ALL}; + int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL}; +#ifdef ROAM_AP_ENV_DETECTION + int roam_env_mode = AP_ENV_INDETERMINATE; +#endif /* ROAM_AP_ENV_DETECTION */ +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC + int roam_fullscan_period = 60; +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ + int roam_fullscan_period = 120; +#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#ifdef DISABLE_BCNLOSS_ROAM + uint roam_bcnloss_off = 1; +#endif /* DISABLE_BCNLOSS_ROAM */ +#else +#ifdef DISABLE_BUILTIN_ROAM + uint roamvar = 1; +#endif /* DISABLE_BUILTIN_ROAM */ +#endif /* ROAM_ENABLE */ + +#if defined(SOFTAP) + uint dtim = 1; +#endif +/* xxx andrey tmp fix for dk8000 build error */ +#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) + struct ether_addr p2p_ea; +#endif +#ifdef BCMCCX + uint32 ccx = 1; +#endif +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ +#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) + uint32 apsta = 1; /* Enable APSTA mode */ +#elif defined(SOFTAP_AND_GC) + uint32 apsta = 0; + int ap_mode = 1; +#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */ +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; + char hw_ether[62]; +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef OKC_SUPPORT + uint32 okc = 1; +#endif + +#ifdef DISABLE_11N + uint32 nmode = 0; +#endif /* DISABLE_11N */ + +#if defined(DISABLE_11AC) + uint32 vhtmode = 0; +#endif /* DISABLE_11AC */ +#ifdef USE_WL_TXBF + uint32 txbf = 1; +#endif /* USE_WL_TXBF */ +#ifdef DISABLE_TXBFR + uint32 txbf_bfr_cap = 0; +#endif /* DISABLE_TXBFR */ +#ifdef AMPDU_VO_ENABLE + /* XXX: Enabling VO AMPDU to reduce FER */ + struct ampdu_tid_control tid; +#endif +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + uint32 proptx = 0; +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ +#ifdef DHD_SET_FW_HIGHSPEED + uint32 ack_ratio = 250; + uint32 ack_ratio_depth = 64; +#endif /* DHD_SET_FW_HIGHSPEED */ +#ifdef DISABLE_11N_PROPRIETARY_RATES + uint32 ht_features = 0; +#endif /* DISABLE_11N_PROPRIETARY_RATES */ +#ifdef CUSTOM_PSPRETEND_THR + uint32 pspretend_thr = CUSTOM_PSPRETEND_THR; +#endif +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef DISABLE_PRUNED_SCAN + uint32 scan_features = 0; +#endif /* DISABLE_PRUNED_SCAN */ +#ifdef BCMPCIE_OOB_HOST_WAKE + uint32 hostwake_oob = 0; +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef EVENT_LOG_RATE_HC + /* threshold number of lines per second */ +#define EVENT_LOG_RATE_HC_THRESHOLD 1000 + uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD; +#endif /* EVENT_LOG_RATE_HC */ +#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA) + uint32 btmdelta = WBTEXT_BTMDELTA; +#endif /* WBTEXT && WBTEXT_BTMDELTA */ +#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME) + uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2; + uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME; +#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */ +#endif /* OEM_ANDROID */ + + BCM_REFERENCE(iovbuf); + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + +#ifdef ARP_OFFLOAD_SUPPORT + /* arpoe will be applied from the supsend context */ + dhd->arpoe_enable = TRUE; + dhd->arpol_configured = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef OEM_ANDROID +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = TRUE; +#ifdef APF + dhd->apf_set = FALSE; +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ + dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + dhd->max_dtim_enable = TRUE; +#else + dhd->max_dtim_enable = FALSE; +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ + dhd->disable_dtim_in_suspend = FALSE; +#ifdef CUSTOM_SET_OCLOFF + dhd->ocl_off = FALSE; +#endif /* CUSTOM_SET_OCLOFF */ +#ifdef SUPPORT_SET_TID + dhd->tid_mode = SET_TID_OFF; + dhd->target_uid = 0; + dhd->target_tid = 0; +#endif /* SUPPORT_SET_TID */ +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode); +#endif + dhd->op_mode = 0; + + /* clear AP flags */ +#if defined(CUSTOM_COUNTRY_CODE) + dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG; +#endif /* CUSTOM_COUNTRY_CODE */ + +#ifdef CUSTOMER_HW4_DEBUG + if (!dhd_validate_chipid(dhd)) { + DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n", + __FUNCTION__, dhd_bus_chip_id(dhd))); +#ifndef SUPPORT_MULTIPLE_CHIPS + ret = BCME_BADARG; + goto done; +#endif /* !SUPPORT_MULTIPLE_CHIPS */ + } +#endif /* CUSTOMER_HW4_DEBUG */ + + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + strncpy(fw_version, buf, FW_VER_STR_LEN); + fw_version[FW_VER_STR_LEN-1] = '\0'; + } + +#ifdef BOARD_HIKEY + /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */ + if (strstr(fw_version, "WLTEST") != NULL) { + DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n", + __FUNCTION__)); + op_mode = DHD_FLAG_MFG_MODE; + } +#endif /* BOARD_HIKEY */ + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* disable runtimePM by default in MFG mode. */ + pm_runtime_disable(dhd_bus_to_dev(dhd->bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef DHD_PCIE_RUNTIMEPM + /* Disable RuntimePM in mfg mode */ + DHD_DISABLE_RUNTIME_PM(dhd); + DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ + /* Check and adjust IOCTL response timeout for Manufactring firmware */ + dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT); + DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n", + __FUNCTION__)); + } else { + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__)); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob, + sizeof(hostwake_oob), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__)); + } else { + if (hostwake_oob == 0) { + DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n", + __FUNCTION__)); + ret = BCME_UNSUPPORTED; + goto done; + } else { + DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__)); + } + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DNGL_AXI_ERROR_LOGGING + ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr, + sizeof(dhd->axierror_logbuf_addr), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__)); + dhd->axierror_logbuf_addr = 0; + } else { + DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", + __FUNCTION__, dhd->axierror_logbuf_addr)); + } +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef EVENT_LOG_RATE_HC + ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc, + sizeof(event_log_rate_hc), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret)); + } else { + DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__, + event_log_rate_hc)); + } +#endif /* EVENT_LOG_RATE_HC */ + +#ifdef GET_CUSTOM_MAC_ENABLE + memset(hw_ether, 0, sizeof(hw_ether)); + ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, 0); +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + if (!memcmp(ðer_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) { + ret = 0; + } else +#endif + if (!ret) { + memset(buf, 0, sizeof(buf)); +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether)); +#endif + bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr)); + bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret) { + DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n", + __FUNCTION__, MAC2STRDBG(hw_ether), ret)); + prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN); + ret = BCME_NOTUP; + goto done; + } + } + } else { + DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } +#endif /* GET_CUSTOM_MAC_ENABLE */ + /* Get the default device MAC address directly from firmware */ + ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + + DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n", + __FUNCTION__, MAC2STRDBG(&buf))); + +#ifdef MACADDR_PROVISION_ENFORCED + if (ETHER_IS_LOCALADDR(buf)) { + DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__)); + ret = BCME_BADADDR; + goto done; + } +#endif /* MACADDR_PROVISION_ENFORCED */ + + /* Update public MAC address after reading from Firmware */ + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + + if (ETHER_ISNULLADDR(dhd->mac.octet)) { + DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__)); + ret = BCME_BADADDR; + goto done; + } else { + (void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN, + dhd->mac.octet, ETHER_ADDR_LEN); + } +#if defined(WL_STA_ASSOC_RAND) && defined(WL_STA_INIT_RAND) + /* Set cur_etheraddr of primary interface to randomized address to ensure + * that any action frame transmission will happen using randomized macaddr + * primary netdev->perm_addr will hold the original factory MAC. + */ + { + if ((ret = dhd_update_rand_mac_addr(dhd)) < 0) { + DHD_ERROR(("%s: failed to set macaddress\n", __FUNCTION__)); + goto done; + } + } +#endif /* WL_STA_ASSOC_RAND && WL_STA_INIT_RAND */ + + if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) { + DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__)); + goto done; + } + + /* get a capabilities from firmware */ + { + uint32 cap_buf_size = sizeof(dhd->fw_capabilities); + memset(dhd->fw_capabilities, 0, cap_buf_size); + ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1), + FALSE); + if (ret < 0) { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + return 0; + } + + memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1)); + dhd->fw_capabilities[0] = ' '; + dhd->fw_capabilities[cap_buf_size - 2] = ' '; + dhd->fw_capabilities[cap_buf_size - 1] = '\0'; + } + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) || + (op_mode == DHD_FLAG_HOSTAP_MODE)) { +#ifdef SET_RANDOM_MAC_SOFTAP + uint rand_mac; +#endif /* SET_RANDOM_MAC_SOFTAP */ + dhd->op_mode = DHD_FLAG_HOSTAP_MODE; +#ifdef PKT_FILTER_SUPPORT + if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) + dhd_pkt_filter_enable = TRUE; + else + dhd_pkt_filter_enable = FALSE; +#endif +#ifdef SET_RANDOM_MAC_SOFTAP + SRANDOM32((uint)jiffies); + rand_mac = RANDOM32(); + iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */ + iovbuf[1] = (unsigned char)(vendor_oui >> 8); + iovbuf[2] = (unsigned char)vendor_oui; + iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; + iovbuf[4] = (unsigned char)(rand_mac >> 8); + iovbuf[5] = (unsigned char)(rand_mac >> 16); + + ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); +#endif /* SET_RANDOM_MAC_SOFTAP */ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, TRUE); +#endif /* SUPPORT_AP_POWERSAVE */ +#ifdef SOFTAP_UAPSD_OFF + ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", + __FUNCTION__, ret)); + } +#endif /* SOFTAP_UAPSD_OFF */ + + /* set AP flag for specific country code of SOFTAP */ +#if defined(CUSTOM_COUNTRY_CODE) + dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG; +#endif /* CUSTOM_COUNTRY_CODE */ + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + dhd->arpoe_enable = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif /* PKT_FILTER_SUPPORT */ + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef USE_DYNAMIC_F2_BLKSIZE + /* XXX The 'wl counters' command triggers SDIO bus error + * if F2 block size is greater than 128 bytes using 4354A1 + * manufacturing firmware. To avoid this problem, F2 block + * size is set to 128 bytes only for DHD_FLAG_MFG_MODE. + * There is no problem for other chipset since big data + * transcation through SDIO bus is not happened during + * manufacturing test. + */ + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ +#ifndef CUSTOM_SET_ANTNPM + if (FW_SUPPORTED(dhd, rsdb)) { + wl_config_t rsdb_mode; + memset(&rsdb_mode, 0, sizeof(rsdb_mode)); + ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", + __FUNCTION__, ret)); + } + } +#endif /* !CUSTOM_SET_ANTNPM */ + } else { + uint32 concurrent_mode = 0; + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) || + (op_mode == DHD_FLAG_P2P_MODE)) { +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif + dhd->op_mode = DHD_FLAG_P2P_MODE; + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) || + (op_mode == DHD_FLAG_IBSS_MODE)) { + dhd->op_mode = DHD_FLAG_IBSS_MODE; + } else + dhd->op_mode = DHD_FLAG_STA_MODE; +#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) + if (dhd->op_mode != DHD_FLAG_IBSS_MODE && + (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) { + dhd->op_mode |= concurrent_mode; + } + + /* Check if we are enabling p2p */ + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, + TRUE); + if (ret < 0) + DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret)); + +#if defined(SOFTAP_AND_GC) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, + (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) { + DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret)); + } +#endif + memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(&p2p_ea); + ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret)); + else + DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n")); + } +#else + (void)concurrent_mode; +#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */ + } + +#ifdef DISABLE_PRUNED_SCAN + if (FW_SUPPORTED(dhd, rsdb)) { + ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features, + sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s get scan_features, UNSUPPORTED\n", + __FUNCTION__)); + } else { + DHD_ERROR(("%s get scan_features err(%d)\n", + __FUNCTION__, ret)); + } + + } else { + memcpy(&scan_features, iovbuf, 4); + scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM; + ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features, + sizeof(scan_features), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set scan_features err(%d)\n", + __FUNCTION__, ret)); + } + } + } +#endif /* DISABLE_PRUNED_SCAN */ + + DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", + dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { + /* get a ccode and revision for the country code */ +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec, dhd->dhd_cflags); +#else + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec); +#endif /* CUSTOM_COUNTRY_CODE */ + } + +#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) + dhd->info->rxthread_enabled = FALSE; + else + dhd->info->rxthread_enabled = TRUE; +#endif + /* Set Country code */ + if (dhd->dhd_cspec.ccode[0] != 0) { + ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + +#if defined(DISABLE_11AC) + ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode, sizeof(vhtmode), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret)); +#endif /* DISABLE_11AC */ + + /* Set Listen Interval */ + ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); + +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) { + DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar)); + } +#endif /* USE_WFA_CERT_CONF */ + /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret)); + } +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#if defined(ROAM_ENABLE) +#ifdef DISABLE_BCNLOSS_ROAM + ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, + sizeof(roam_bcnloss_off), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_bcnloss_off failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_BCNLOSS_ROAM */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger, + sizeof(roam_trigger), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period, + sizeof(roam_scan_period), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta, + sizeof(roam_delta), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret)); + ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period, + sizeof(roam_fullscan_period), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret)); +#ifdef ROAM_AP_ENV_DETECTION + if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) { + if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode, + sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK) + dhd->roam_env_detection = TRUE; + else + dhd->roam_env_detection = FALSE; + } +#endif /* ROAM_AP_ENV_DETECTION */ +#ifdef CONFIG_ROAM_RSSI_LIMIT + ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G); + if (ret < 0) { + DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_ROAM_RSSI_LIMIT */ +#ifdef CONFIG_ROAM_MIN_DELTA + ret = dhd_roam_min_delta_set(dhd, CUSTOM_ROAM_MIN_DELTA, CUSTOM_ROAM_MIN_DELTA); + if (ret < 0) { + DHD_ERROR(("%s set roam_min_delta failed ret %d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_ROAM_MIN_DELTA */ +#endif /* ROAM_ENABLE */ + +#ifdef CUSTOM_EVENT_PM_WAKE + /* XXX need to check time value */ + ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef OKC_SUPPORT + dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE); +#endif +#ifdef BCMCCX + dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE); +#endif /* BCMCCX */ + +#ifdef WLTDLS + dhd->tdls_enable = FALSE; + dhd_tdls_set_mode(dhd, false); +#endif /* WLTDLS */ + +#ifdef DHD_ENABLE_LPC + /* Set lpc 1 */ + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc)); + + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); + DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret)); + } + } +#endif /* DHD_ENABLE_LPC */ + +#ifdef WLADPS + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK && + (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("%s dhd_enable_adps failed %d\n", + __FUNCTION__, ret)); + } + } +#endif /* WLADPS */ + +#ifdef DHD_PM_CONTROL_FROM_FILE +#ifdef CUSTOMER_HW10 + dhd_control_pm(dhd, &power_mode); +#else + sec_control_pm(dhd, &power_mode); +#endif /* CUSTOMER_HW10 */ +#else + /* Set PowerSave mode */ + (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#if defined(BCMSDIO) + /* Match Host and Dongle rx alignment */ + ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set bus:txglomalign failed %d\n", __FUNCTION__, ret)); + } + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) { + DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom)); + } +#endif /* USE_WFA_CERT_CONF */ + if (glom != DEFAULT_GLOM_VALUE) { + DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom)); + ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set bus:txglom failed %d\n", __FUNCTION__, ret)); + } + } +#endif /* defined(BCMSDIO) */ + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set bcn_timeout failed %d\n", __FUNCTION__, ret)); + } + + /* Setup assoc_retry_max count to reconnect target AP in dongle */ + ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set assoc_retry_max failed %d\n", __FUNCTION__, ret)); + } + +#if defined(AP) && !defined(WLP2P) + ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set apsta failed %d\n", __FUNCTION__, ret)); + } + +#endif /* defined(AP) && !defined(WLP2P) */ + +#ifdef MIMO_ANT_SETTING + dhd_sel_ant_from_file(dhd); +#endif /* MIMO_ANT_SETTING */ + +#if defined(OEM_ANDROID) && defined(SOFTAP) + if (ap_fw_loaded == TRUE) { + dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); + } +#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */ + +#if defined(KEEP_ALIVE) + { + /* Set Keep Alive : be sure to use FW with -keepalive */ + int res; + +#if defined(OEM_ANDROID) && defined(SOFTAP) + if (ap_fw_loaded == FALSE) +#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */ + if (!(dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + if ((res = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, res)); + } + } +#endif /* defined(KEEP_ALIVE) */ + +#ifdef USE_WL_TXBF + ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret)); + +#endif /* USE_WL_TXBF */ + + ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret)); + } + +#else /* OEM_ANDROID */ + if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) { + DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__)); + goto done; + } + +#if defined(KEEP_ALIVE) + if (!(dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + if ((ret = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, ret)); + } +#endif + + /* get a capabilities from firmware */ + memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities)); + ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities), + FALSE); + if (ret < 0) { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + goto done; + } +#endif /* OEM_ANDROID */ + + ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets, + sizeof(event_log_max_sets), FALSE); + if (ret == BCME_OK) { + dhd->event_log_max_sets = event_log_max_sets; + } else { + dhd->event_log_max_sets = NUM_EVENT_LOG_SETS; + } + /* Make sure max_sets is set first with wmb and then sets_queried, + * this will be used during parsing the logsets in the reverse order. + */ + OSL_SMP_WMB(); + dhd->event_log_max_sets_queried = TRUE; + DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n", + __FUNCTION__, dhd->event_log_max_sets, ret)); +#ifdef DHD_BUS_MEM_ACCESS + ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse, + sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: enable_memuse is failed ret=%d\n", + __FUNCTION__, ret)); + } else { + DHD_ERROR(("%s: enable_memuse = %d\n", + __FUNCTION__, enable_memuse)); + } +#endif /* DHD_BUS_MEM_ACCESS */ + +#ifdef DISABLE_TXBFR + ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_TXBFR */ + +#ifdef USE_WFA_CERT_CONF +#ifdef USE_WL_FRAMEBURST + if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { + DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst)); + } +#endif /* USE_WL_FRAMEBURST */ + g_frameburst = frameburst; +#endif /* USE_WFA_CERT_CONF */ +#ifdef DISABLE_WL_FRAMEBURST_SOFTAP + /* Disable Framebursting for SofAP */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + frameburst = 0; + } +#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */ + /* Set frameburst to value */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst, + sizeof(frameburst), TRUE, 0)) < 0) { + DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); + } +#ifdef DHD_SET_FW_HIGHSPEED + /* Set ack_ratio */ + ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret)); + } + + /* Set ack_ratio_depth */ + ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth, + sizeof(ack_ratio_depth), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret)); + } +#endif /* DHD_SET_FW_HIGHSPEED */ + + iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN); + if (iov_buf == NULL) { + DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN)); + ret = BCME_NOMEM; + goto done; + } + + BCM_REFERENCE(ret2); + +#ifdef WLAIBSS + /* Apply AIBSS configurations */ + if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) { + DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n", + __FUNCTION__, ret)); + goto done; + } +#endif /* WLAIBSS */ + +#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \ + defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)) + /* Set ampdu ba wsize to 64 or 16 */ +#ifdef CUSTOM_AMPDU_BA_WSIZE + ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; +#endif +#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE) + if (dhd->op_mode == DHD_FLAG_IBSS_MODE) + ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE; +#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */ + if (ampdu_ba_wsize != 0) { + ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize, + sizeof(ampdu_ba_wsize), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n", + __FUNCTION__, ampdu_ba_wsize, ret)); + } + } +#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */ + +#if defined(CUSTOM_AMPDU_MPDU) + ampdu_mpdu = CUSTOM_AMPDU_MPDU; + if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) { + ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_MPDU, ret)); + } + } +#endif /* CUSTOM_AMPDU_MPDU */ + +#if defined(CUSTOM_AMPDU_RELEASE) + ampdu_release = CUSTOM_AMPDU_RELEASE; + if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) { + ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release, + sizeof(ampdu_release), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_release to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret)); + } + } +#endif /* CUSTOM_AMPDU_RELEASE */ + +#if defined(CUSTOM_AMSDU_AGGSF) + amsdu_aggsf = CUSTOM_AMSDU_AGGSF; + if (amsdu_aggsf != 0) { + ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", + __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret)); + } + } +#endif /* CUSTOM_AMSDU_AGGSF */ + +#if defined(BCMSUP_4WAY_HANDSHAKE) + /* Read 4-way handshake requirements */ + if (dhd_use_idsup == 1) { + ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa), + (char *)&iovbuf, sizeof(iovbuf), FALSE); + /* sup_wpa iovar returns NOTREADY status on some platforms using modularized + * in-dongle supplicant. + */ + if (ret >= 0 || ret == BCME_NOTREADY) + dhd->fw_4way_handshake = TRUE; + DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake)); + } +#endif /* BCMSUP_4WAY_HANDSHAKE */ +#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT) + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features), + (char *)&vht_features, sizeof(vht_features), FALSE); + if (ret < 0) { + DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret)); + vht_features = 0; + } else { +#ifdef SUPPORT_2G_VHT + vht_features |= 0x3; /* 2G support */ +#endif /* SUPPORT_2G_VHT */ +#ifdef SUPPORT_5G_1024QAM_VHT + vht_features |= 0x6; /* 5G 1024 QAM support */ +#endif /* SUPPORT_5G_1024QAM_VHT */ + } + if (vht_features) { + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features), + NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s vht_features fail WL_DOWN : %d," + " vht_features = 0x%x\n", + __FUNCTION__, ret, vht_features)); + + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, + sizeof(vht_features), NULL, 0, TRUE); + + DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret)); + } + if (ret != BCME_BADOPTION) { + DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret)); + } else { + DHD_INFO(("%s vht_features ret(%d) - need to check BANDLOCK\n", + __FUNCTION__, ret)); + } + } + } +#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */ +#ifdef DISABLE_11N_PROPRIETARY_RATES + ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_11N_PROPRIETARY_RATES */ +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +#if defined(DISABLE_HE_ENAB) + /* XXX DISABLE_HE_ENAB has higher priority than CUSTOM_CONTROL_HE_ENAB */ + control_he_enab = 0; +#endif /* DISABLE_HE_ENAB */ + dhd_control_he_enab(dhd, control_he_enab); +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ + +#ifdef CUSTOM_PSPRETEND_THR + /* Turn off MPC in AP mode */ + ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr, + sizeof(pspretend_thr), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n", + __FUNCTION__, ret)); + } +#endif + + /* XXX Enable firmware key buffering before sent 4-way M4 */ + ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret)); + } +#ifdef SUPPORT_SET_CAC + ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret)); + } +#endif /* SUPPORT_SET_CAC */ + /* make up event mask ext message iovar for event larger than 128 */ + msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE; + eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen); + if (eventmask_msg == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen)); + ret = BCME_NOMEM; + goto done; + } + bzero(eventmask_msg, msglen); + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + + /* Read event_msgs_ext mask */ + ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, + WLC_IOCTL_SMLEN, FALSE); + + /* event_msgs_ext must be supported */ + if (ret != BCME_OK) { + DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret)); + goto done; + } + + bcopy(iov_buf, eventmask_msg, msglen); + /* make up event mask ext message iovar for event larger than 128 */ + mask = eventmask_msg->mask; + + /* Setup event_msgs */ + setbit(mask, WLC_E_SET_SSID); + setbit(mask, WLC_E_PRUNE); + setbit(mask, WLC_E_AUTH); + setbit(mask, WLC_E_AUTH_IND); + setbit(mask, WLC_E_ASSOC); + setbit(mask, WLC_E_REASSOC); + setbit(mask, WLC_E_REASSOC_IND); + if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE)) + setbit(mask, WLC_E_DEAUTH); + setbit(mask, WLC_E_DEAUTH_IND); + setbit(mask, WLC_E_DISASSOC_IND); + setbit(mask, WLC_E_DISASSOC); + setbit(mask, WLC_E_JOIN); + setbit(mask, WLC_E_START); + setbit(mask, WLC_E_ASSOC_IND); + setbit(mask, WLC_E_PSK_SUP); + setbit(mask, WLC_E_LINK); + setbit(mask, WLC_E_MIC_ERROR); + setbit(mask, WLC_E_ASSOC_REQ_IE); + setbit(mask, WLC_E_ASSOC_RESP_IE); +#ifdef LIMIT_BORROW + setbit(mask, WLC_E_ALLOW_CREDIT_BORROW); +#endif +#ifndef WL_CFG80211 + setbit(mask, WLC_E_PMKID_CACHE); +// setbit(mask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event +#endif + setbit(mask, WLC_E_JOIN_START); +// setbit(mask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event +#ifdef DHD_DEBUG + setbit(mask, WLC_E_SCAN_CONFIRM_IND); +#endif +#ifdef PNO_SUPPORT + setbit(mask, WLC_E_PFN_NET_FOUND); + setbit(mask, WLC_E_PFN_BEST_BATCHING); + setbit(mask, WLC_E_PFN_BSSID_NET_FOUND); + setbit(mask, WLC_E_PFN_BSSID_NET_LOST); +#endif /* PNO_SUPPORT */ + /* enable dongle roaming event */ +#ifdef WL_CFG80211 +#if !defined(ROAM_EVT_DISABLE) + setbit(mask, WLC_E_ROAM); +#endif /* !ROAM_EVT_DISABLE */ + setbit(mask, WLC_E_BSSID); +#endif /* WL_CFG80211 */ +#ifdef BCMCCX + setbit(mask, WLC_E_ADDTS_IND); + setbit(mask, WLC_E_DELTS_IND); +#endif /* BCMCCX */ +#ifdef WLTDLS + setbit(mask, WLC_E_TDLS_PEER_EVENT); +#endif /* WLTDLS */ +#ifdef WL_ESCAN + setbit(mask, WLC_E_ESCAN_RESULT); +#endif /* WL_ESCAN */ +#ifdef CSI_SUPPORT + setbit(mask, WLC_E_CSI); +#endif /* CSI_SUPPORT */ +#ifdef RTT_SUPPORT + setbit(mask, WLC_E_PROXD); +#endif /* RTT_SUPPORT */ +#if !defined(WL_CFG80211) && !defined(OEM_ANDROID) + setbit(mask, WLC_E_ESCAN_RESULT); +#endif +#ifdef WL_CFG80211 + setbit(mask, WLC_E_ESCAN_RESULT); + setbit(mask, WLC_E_AP_STARTED); + setbit(mask, WLC_E_ACTION_FRAME_RX); + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + setbit(mask, WLC_E_P2P_DISC_LISTEN_COMPLETE); + } +#endif /* WL_CFG80211 */ +#ifdef WLAIBSS + setbit(mask, WLC_E_AIBSS_TXFAIL); +#endif /* WLAIBSS */ + +#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) + if (dhd_logtrace_from_file(dhd)) { + setbit(mask, WLC_E_TRACE); + } else { + clrbit(mask, WLC_E_TRACE); + } +#elif defined(SHOW_LOGTRACE) + setbit(mask, WLC_E_TRACE); +#else + clrbit(mask, WLC_E_TRACE); +#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */ + + setbit(mask, WLC_E_CSA_COMPLETE_IND); +#ifdef DHD_WMF + setbit(mask, WLC_E_PSTA_PRIMARY_INTF_IND); +#endif +#ifdef CUSTOM_EVENT_PM_WAKE + setbit(mask, WLC_E_EXCESS_PM_WAKE_EVENT); +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef DHD_LOSSLESS_ROAMING + setbit(mask, WLC_E_ROAM_PREP); +#endif + /* nan events */ + setbit(mask, WLC_E_NAN); +#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ + +#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */ + +#ifdef RSSI_MONITOR_SUPPORT + setbit(mask, WLC_E_RSSI_LQM); +#endif /* RSSI_MONITOR_SUPPORT */ +#ifdef GSCAN_SUPPORT + setbit(mask, WLC_E_PFN_GSCAN_FULL_RESULT); + setbit(mask, WLC_E_PFN_SCAN_COMPLETE); + setbit(mask, WLC_E_PFN_SSID_EXT); + setbit(mask, WLC_E_ROAM_EXP_EVENT); +#endif /* GSCAN_SUPPORT */ + setbit(mask, WLC_E_RSSI_LQM); +#ifdef BT_WIFI_HANDOVER + setbit(mask, WLC_E_BT_WIFI_HANDOVER_REQ); +#endif /* BT_WIFI_HANDOVER */ +#ifdef DBG_PKT_MON + setbit(mask, WLC_E_ROAM_PREP); +#endif /* DBG_PKT_MON */ +#ifdef WL_NATOE + setbit(mask, WLC_E_NATOE_NFCT); +#endif /* WL_NATOE */ +#ifdef BCM_ROUTER_DHD + setbit(mask, WLC_E_DPSTA_INTF_IND); +#endif /* BCM_ROUTER_DHD */ + setbit(mask, WLC_E_SLOTTED_BSS_PEER_OP); +#ifdef WL_BCNRECV + setbit(mask, WLC_E_BCNRECV_ABORTED); +#endif /* WL_BCNRECV */ +#ifdef WL_MBO + setbit(mask, WLC_E_MBO); +#endif /* WL_MBO */ +#ifdef WL_CLIENT_SAE + setbit(mask, WLC_E_JOIN_START); +#endif /* WL_CLIENT_SAE */ +#ifdef WL_CAC_TS + setbit(mask, WLC_E_ADDTS_IND); + setbit(mask, WLC_E_DELTS_IND); +#endif /* WL_BCNRECV */ +#ifdef CUSTOMER_HW6 + setbit(mask, WLC_E_COUNTRY_CODE_CHANGED); +#endif /* CUSTOMER_HW6 */ + + /* Write updated Event mask */ + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = EVENTMSGS_SET_MASK; + eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN; + ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret)); + goto done; + } + +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + /* Enabling event log trace for EAP events */ + el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t)); + if (el_tag == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", + (int)sizeof(wl_el_tag_params_t))); + ret = BCME_NOMEM; + goto done; + } + el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE; + el_tag->set = 1; + el_tag->flags = EVENT_LOG_TAG_FLAG_LOG; + ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set event_log_tag_control fail %d\n", __FUNCTION__, ret)); + } +#endif /* DHD_8021X_DUMP */ +#ifdef DHD_RANDMAC_LOGGING + if (FW_SUPPORTED((dhd), event_log)) { + if (dhd_iovar(dhd, 0, "privacy_mask", (char *)&privacy_mask, sizeof(privacy_mask), + NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set privacy mask\n")); + } + } else { + /* Don't enable feature to prevent macaddr print in clr text */ + DHD_ERROR(("skip privacy_mask set. event_log not enabled\n")); + } +#endif /* DHD_RANDMAC_LOGGING */ + +#ifdef OEM_ANDROID + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, + sizeof(scan_assoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, + sizeof(scan_unassoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, + sizeof(scan_passive_time), TRUE, 0); + +#ifdef ARP_OFFLOAD_SUPPORT + DHD_ERROR(("arp_enable:%d arp_ol:%d\n", + dhd->arpoe_enable, dhd->arpol_configured)); +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PKT_FILTER_SUPPORT + /* Setup default defintions for pktfilter , enable in suspend */ + if (dhd_master_mode) { + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; + if (!FW_SUPPORTED(dhd, pf6)) { + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + } else { + /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST; + } + /* apply APP pktfilter */ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; + +#ifdef BLOCK_IPV6_PACKET + /* Setup filter to allow only IPv4 unicast frames */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 " + HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR + " " + HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR; +#else + /* Setup filter to allow only unicast */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; +#endif /* BLOCK_IPV6_PACKET */ + +#ifdef PASS_IPV4_SUSPEND + /* XXX customer want to get IPv4 multicast packets */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E"; +#else + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; +#endif /* PASS_IPV4_SUSPEND */ + if (FW_SUPPORTED(dhd, pf6)) { + /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */ + dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST; + dhd->pktfilter_count = 8; + } + +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd->pktfilter_count = 4; + /* Setup filter to block broadcast and NAT Keepalive packets */ + /* discard all broadcast packets */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ + } else + dhd_conf_discard_pkt_filter(dhd); + dhd_conf_add_pkt_filter(dhd); + +#if defined(SOFTAP) + if (ap_fw_loaded) { + /* XXX Andrey: fo SOFTAP disable pkt filters (if there were any ) */ + dhd_enable_packet_filter(0, dhd); + } +#endif /* defined(SOFTAP) */ + dhd_set_packet_filter(dhd); +#endif /* PKT_FILTER_SUPPORT */ +#ifdef DISABLE_11N + ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret)); +#endif /* DISABLE_11N */ + +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set bcn_li_bcn failed %d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ +#ifdef AMPDU_VO_ENABLE + /* XXX: Enabling VO AMPDU to reduce FER */ + tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */ + tid.enable = TRUE; + ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret)); + } + + tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */ + tid.enable = TRUE; + ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret)); + } +#endif +#if defined(SOFTAP_TPUT_ENHANCE) + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { +#if defined(BCMSDIO) + dhd_bus_setidletime(dhd, (int)100); +#endif /* BCMSDIO */ +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF); +#endif +#if defined(DHD_TCP_WINSIZE_ADJUST) + dhd_use_tcp_window_size_adjust = TRUE; +#endif + +#if defined(BCMSDIO) + memset(buf, 0, sizeof(buf)); + ret = dhd_iovar(dhd, 0, "bus:txglom_auto_control", NULL, 0, buf, sizeof(buf), + FALSE); + if (ret < 0) { + glom = 0; + ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bus:txglom failed %d\n", __FUNCTION__, ret)); + } + } else { + if (buf[0] == 0) { + glom = 1; + ret = dhd_iovar(dhd, 0, "bus:txglom_auto_control", (char *)&glom, + sizeof(glom), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bus:txglom_auto_control failed %d\n", + __FUNCTION__, ret)); + } + } + } +#endif /* BCMSDIO */ + } +#endif /* SOFTAP_TPUT_ENHANCE */ + /* query for 'clmver' to get clm version info from firmware */ + bzero(buf, sizeof(buf)); + ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s clmver failed %d\n", __FUNCTION__, ret)); + else { + char *ver_temp_buf = NULL, *ver_date_buf = NULL; + int len; + + if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) { + DHD_ERROR(("Couldn't find \"Data:\"\n")); + } else { + ver_date_buf = bcmstrstr(buf, "Creation:"); + ptr = (ver_temp_buf + strlen("Data:")); + if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) { + DHD_ERROR(("Couldn't find New line character\n")); + } else { + memset(clm_version, 0, CLM_VER_STR_LEN); + len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf); + if (ver_date_buf) { + ptr = (ver_date_buf + strlen("Creation:")); + ver_date_buf = bcmstrtok(&ptr, "\n", 0); + if (ver_date_buf) + snprintf(clm_version+len, CLM_VER_STR_LEN-1-len, + " (%s)", ver_date_buf); + } + DHD_INFO(("CLM version = %s\n", clm_version)); + } + } + +#if defined(CUSTOMER_HW4_DEBUG) + if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) { + DHD_ERROR(("Couldn't find \"Customization:\"\n")); + } else { + char tokenlim; + ptr = (ver_temp_buf + strlen("Customization:")); + if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) { + DHD_ERROR(("Couldn't find project blob version" + "or New line character\n")); + } else if (tokenlim == '(') { + snprintf(clm_version, + CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ", + clm_version, ver_temp_buf); + DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version)); + if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) { + DHD_ERROR(("Couldn't find New line character\n")); + } else { + snprintf(clm_version, + strlen(clm_version) + strlen(ver_temp_buf), + "%s%s", clm_version, ver_temp_buf); + DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", + clm_version)); + + } + } else if (tokenlim == '\n') { + snprintf(clm_version, + strlen(clm_version) + strlen(", Blob ver = Major : ") + 1, + "%s, Blob ver = Major : ", clm_version); + snprintf(clm_version, + strlen(clm_version) + strlen(ver_temp_buf) + 1, + "%s%s", clm_version, ver_temp_buf); + DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version)); + } + } +#endif /* CUSTOMER_HW4_DEBUG */ + if (strlen(clm_version)) { + DHD_INFO(("CLM version = %s\n", clm_version)); + } else { + DHD_ERROR(("Couldn't find CLM version!\n")); + } + } + dhd_set_version_info(dhd, fw_version); + +#ifdef WRITE_WLANINFO + sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version); +#endif /* WRITE_WLANINFO */ + +#endif /* defined(OEM_ANDROID) */ +#ifdef GEN_SOFTAP_INFO_FILE + sec_save_softap_info(); +#endif /* GEN_SOFTAP_INFO_FILE */ + +#if defined(BCMSDIO) + dhd_txglom_enable(dhd, dhd->conf->bus_rxglom); +#endif /* defined(BCMSDIO) */ + +#if defined(BCMSDIO) || defined(BCMDBUS) +#ifdef PROP_TXSTATUS + if (disable_proptx || +#ifdef PROP_TXSTATUS_VSDB + /* enable WLFC only if the firmware is VSDB when it is in STA mode */ + (dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) || +#endif /* PROP_TXSTATUS_VSDB */ + FALSE) { + wlfc_enable = FALSE; + } + ret = dhd_conf_get_disable_proptx(dhd); + if (ret == 0){ + disable_proptx = 0; + wlfc_enable = TRUE; + } else if (ret >= 1) { + disable_proptx = 1; + wlfc_enable = FALSE; + /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */ + hostreorder = 0; + } + +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) { + DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx)); + wlfc_enable = proptx; + } +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ + +#ifndef DISABLE_11N + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0); + ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder), + NULL, 0, TRUE); + if (ret2 < 0) { + DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, + sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n", + __FUNCTION__, ret2, hostreorder)); + + ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, + sizeof(hostreorder), NULL, 0, TRUE); + DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + } + if (ret2 != BCME_OK) + hostreorder = 0; + } +#endif /* DISABLE_11N */ + +#ifdef READ_CONFIG_FROM_FILE + dhd_preinit_config(dhd, 0); +#endif /* READ_CONFIG_FROM_FILE */ + + if (wlfc_enable) { + dhd_wlfc_init(dhd); + /* terence 20161229: enable ampdu_hostreorder if tlv enabled */ + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE); + } +#ifndef DISABLE_11N + else if (hostreorder) + dhd_wlfc_hostreorder_init(dhd); +#endif /* DISABLE_11N */ +#else + /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */ + printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__); + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE); +#endif /* PROP_TXSTATUS */ +#endif /* BCMSDIO || BCMDBUS */ +#ifndef PCIE_FULL_DONGLE + /* For FD we need all the packets at DHD to handle intra-BSS forwarding */ + if (FW_SUPPORTED(dhd, ap)) { + wl_ap_isolate = AP_ISOLATE_SENDUP_ALL; + ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + } +#endif /* PCIE_FULL_DONGLE */ +#ifdef PNO_SUPPORT + if (!dhd->pno_state) { + dhd_pno_init(dhd); + } +#endif + +#ifdef DHD_PKTTS + /* get the pkt metadata buffer length supported by FW */ + if (dhd_wl_ioctl_get_intiovar(dhd, "bus:metadata_info", &val, + WLC_GET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("%s: failed to get pkt metadata buflen, use IPC pkt TS.\n", + __FUNCTION__)); + /* + * if iovar fails, IPC method of collecting + * TS should be used, hence set metadata_buflen as + * 0 here. This will be checked later on Tx completion + * to decide if IPC or metadata method of reading TS + * should be used + */ + dhd->pkt_metadata_version = 0; + dhd->pkt_metadata_buflen = 0; + } else { + dhd->pkt_metadata_version = GET_METADATA_VER(val); + dhd->pkt_metadata_buflen = GET_METADATA_BUFLEN(val); + } + + /* Check FW supports pktlat, if supports enable pktts_enab iovar */ + ret = dhd_set_pktts_enab(dhd, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret)); + } +#endif /* DHD_PKTTS */ + +#ifdef RTT_SUPPORT + if (dhd->rtt_state) { + ret = dhd_rtt_init(dhd); + if (ret < 0) { + DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__)); + } + } +#endif +#ifdef FILTER_IE + /* Failure to configure filter IE is not a fatal error, ignore it. */ + if (FW_SUPPORTED(dhd, fie) && + !(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + dhd_read_from_file(dhd); + } +#endif /* FILTER_IE */ +#ifdef WL11U + dhd_interworking_enable(dhd); +#endif /* WL11U */ + +#ifdef NDO_CONFIG_SUPPORT + dhd->ndo_enable = FALSE; + dhd->ndo_host_ip_overflow = FALSE; + dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES; +#endif /* NDO_CONFIG_SUPPORT */ + + /* ND offload version supported */ + dhd->ndo_version = dhd_ndo_get_version(dhd); + if (dhd->ndo_version > 0) { + DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version)); + +#ifdef NDO_CONFIG_SUPPORT + /* enable Unsolicited NA filter */ + ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1); + if (ret < 0) { + DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__)); + } +#endif /* NDO_CONFIG_SUPPORT */ + } + + /* check dongle supports wbtext (product policy) or not */ + dhd->wbtext_support = FALSE; + if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp, + WLC_GET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to get wnm_bsstrans_resp\n")); + } + dhd->wbtext_policy = wnm_bsstrans_resp; + if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) { + dhd->wbtext_support = TRUE; + } +#ifndef WBTEXT + /* driver can turn off wbtext feature through makefile */ + if (dhd->wbtext_support) { + if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp", + WL_BSSTRANS_POLICY_ROAM_ALWAYS, + WLC_SET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to disable WBTEXT\n")); + } + } +#endif /* !WBTEXT */ + +#ifdef DHD_NON_DMA_M2M_CORRUPTION + /* check pcie non dma loopback */ + if (dhd->op_mode == DHD_FLAG_MFG_MODE && + (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) { + goto done; + } +#endif /* DHD_NON_DMA_M2M_CORRUPTION */ + + /* WNM capabilities */ + wnm_cap = 0 +#ifdef WL11U + | WL_WNM_BSSTRANS | WL_WNM_NOTIF +#endif +#ifdef WBTEXT + | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE +#endif + ; +#if defined(WL_MBO) && defined(WL_OCE) + if (FW_SUPPORTED(dhd, estm)) { + wnm_cap |= WL_WNM_ESTM; + } +#endif /* WL_MBO && WL_OCE */ + if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set WNM capabilities\n")); + } + +#ifdef CUSTOM_ASSOC_TIMEOUT + /* set recreate_bi_timeout to increase assoc timeout : + * 20 * 100TU * 1024 / 1000 = 2 secs + * (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000) + */ + if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout", + CUSTOM_ASSOC_TIMEOUT, + WLC_SET_VAR, TRUE, 0) != BCME_OK) { + DHD_ERROR(("failed to set assoc timeout\n")); + } +#endif /* CUSTOM_ASSOC_TIMEOUT */ + +#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA) + if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta), + NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set BTM delta\n")); + } +#endif /* WBTEXT && WBTEXT_BTMDELTA */ +#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME) + if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win", + (char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win), + NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set RRM BCN request thrtl_win\n")); + } + if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time", + (char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time), + NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n")); + } +#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */ + +#ifdef WL_MONITOR +#ifdef HOST_RADIOTAP_CONV + /* 'Wl monitor' IOVAR is fired to check whether the FW supports radiotap conversion or not. + * This is indicated through MSB(1<<31) bit, based on which host radiotap conversion + * will be enabled or disabled. + * 0 - Host supports Radiotap conversion. + * 1 - FW supports Radiotap conversion. + */ + bcm_mkiovar("monitor", (char *)&monitor, sizeof(monitor), iovbuf, sizeof(iovbuf)); + if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_MONITOR, iovbuf, + sizeof(iovbuf), FALSE, 0)) == 0) { + memcpy(&monitor, iovbuf, sizeof(monitor)); + dhdinfo->host_radiotap_conv = (monitor & HOST_RADIOTAP_CONV_BIT) ? TRUE : FALSE; + } else { + DHD_ERROR(("%s Failed to get monitor mode, err %d\n", + __FUNCTION__, ret2)); + } +#endif /* HOST_RADIOTAP_CONV */ + if (FW_SUPPORTED(dhd, monitor)) { + dhd->monitor_enable = TRUE; + DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__)); + } else { + dhd->monitor_enable = FALSE; + DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__)); + } +#endif /* WL_MONITOR */ + + /* store the preserve log set numbers */ + if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask) + != BCME_OK) { + DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__)); + } + + if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) { + dhd_ecounter_configure(dhd, TRUE); + } + +#ifdef CONFIG_SILENT_ROAM + dhd->sroam_turn_on = TRUE; + dhd->sroamed = FALSE; +#endif /* CONFIG_SILENT_ROAM */ + dhd_set_bandlock(dhd); + + dhd_conf_postinit_ioctls(dhd); +done: + + if (eventmask_msg) { + MFREE(dhd->osh, eventmask_msg, msglen); + } + if (iov_buf) { + MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN); + } +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + if (el_tag) { + MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t)); + } +#endif /* DHD_8021X_DUMP */ + return ret; +} + +/* Deafult enable preinit optimisation */ +#define DHD_PREINIT_OPTIMISATION + +int +dhd_preinit_ioctls(dhd_pub_t *dhd) +{ + int ret = 0; + +#ifdef DHD_PREINIT_OPTIMISATION + int preinit_status = 0; + ret = dhd_iovar(dhd, 0, "preinit_status", NULL, 0, (char *)&preinit_status, + sizeof(preinit_status), FALSE); + + if (ret == BCME_OK) { + DHD_ERROR(("%s: preinit_status IOVAR present, use optimised preinit\n", + __FUNCTION__)); + dhd->fw_preinit = TRUE; + ret = dhd_optimised_preinit_ioctls(dhd); + } else if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s: preinit_status IOVAR not supported, use legacy preinit\n", + __FUNCTION__)); + dhd->fw_preinit = FALSE; + ret = dhd_legacy_preinit_ioctls(dhd); + } else { + DHD_ERROR(("%s: preinit_status IOVAR returned err(%d), ABORT\n", + __FUNCTION__, ret)); + } +#else + dhd->fw_preinit = FALSE; + ret = dhd_legacy_preinit_ioctls(dhd); +#endif /* DHD_PREINIT_OPTIMISATION */ + return ret; +} + +int +dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, + uint cmd_len, char **resptr, uint resp_len) +{ + int len = resp_len; + int ret; + char *buf = *resptr; + wl_ioctl_t ioc; + if (resp_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + memset(buf, 0, resp_len); + + ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + if (ret == 0) { + return BCME_BUFTOOSHORT; + } + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = 0; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + + return ret; +} + +int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) +{ + struct dhd_info *dhd = dhdp->info; + struct net_device *dev = NULL; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; + ASSERT(dev); + +#ifndef DHD_TPUT_PATCH + if (netif_running(dev)) { + DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name)); + return BCME_NOTDOWN; + } +#endif + +#define DHD_MIN_MTU 1500 +#define DHD_MAX_MTU 1752 + + if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) { + DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu)); + return BCME_BADARG; + } + + dev->mtu = new_mtu; + return 0; +} + +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP) +static int dhd_wait_for_file_dump(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + struct net_device *primary_ndev; + struct bcm_cfg80211 *cfg; + unsigned long flags = 0; + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + + if (!primary_ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__)); + return BCME_ERROR; + } + cfg = wl_get_cfg(primary_ndev); + + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__)); + return BCME_ERROR; + } + DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + DHD_OS_WAKE_LOCK(dhdp); + /* check for hal started and only then send event if not clear dump state here */ + if (wl_cfg80211_is_hal_started(cfg)) { + int timeleft = 0; + + DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__)); + dhd_dbg_send_urgent_evt(dhdp, NULL, 0); + + DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + timeleft = dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); + if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) { + DHD_ERROR(("%s: Timed out(%d) dhd_bus_busy_state=0x%x\n", + __FUNCTION__, timeleft, dhdp->dhd_bus_busy_state)); + ret = BCME_ERROR; + } + } else { + DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__)); + ret = BCME_ERROR; + } + + DHD_OS_WAKE_UNLOCK(dhdp); + /* In case of dhd_os_busbusy_wait_bitmask() timeout, + * hal dump bit will not be cleared. Hence clearing it here. + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + return ret; +} +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */ + +#ifdef ARP_OFFLOAD_SUPPORT +/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ +/* XXX add operation is more efficent */ +void +aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx) +{ + u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ + int i; + int ret; + + bzero(ipv4_buf, sizeof(ipv4_buf)); + + /* display what we've got */ + ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__)); +#ifdef AOE_DBG + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif + /* now we saved hoste_ip table, clr it in the dongle AOE */ + dhd_aoe_hostip_clr(dhd_pub, idx); + + if (ret) { + DHD_ERROR(("%s failed\n", __FUNCTION__)); + return; + } + + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (add && (ipv4_buf[i] == 0)) { + ipv4_buf[i] = ipa; + add = FALSE; /* added ipa to local table */ + DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", + __FUNCTION__, i)); + } else if (ipv4_buf[i] == ipa) { + ipv4_buf[i] = 0; + DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", + __FUNCTION__, ipa, i)); + } + + if (ipv4_buf[i] != 0) { + /* add back host_ip entries from our local cache */ + dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx); + DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n", + __FUNCTION__, ipv4_buf[i], i)); + } + } +#ifdef AOE_DBG + /* see the resulting hostip table */ + dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__)); + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif +} + +/* XXX this function is only for IP address */ +/* + * Notification mechanism from kernel to our driver. This function is called by the Linux kernel + * whenever there is an event related to an IP address. + * ptr : kernel provided pointer to IP address that has changed + */ +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + int idx; + + if (!ifa || !(ifa->ifa_dev->dev)) + return NOTIFY_DONE; + + /* Filter notifications meant for non Broadcom devices */ + if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) && + (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) { +#if defined(WL_ENABLE_P2P_IF) + if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops)) +#endif /* WL_ENABLE_P2P_IF */ + return NOTIFY_DONE; + } + + dhd = DHD_DEV_INFO(ifa->ifa_dev->dev); + if (!dhd) + return NOTIFY_DONE; + + dhd_pub = &dhd->pub; + + if (!dhd_pub->arpoe_enable) { + DHD_ERROR(("arpoe_enable not set")); + return NOTIFY_DONE; + } + + if (dhd_pub->arp_version == 1) { + idx = 0; + } else { + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev) + break; + } + if (idx < DHD_MAX_IFS) + DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net, + dhd->iflist[idx]->name, dhd->iflist[idx]->idx)); + else { + DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label)); + idx = 0; + } + } + + switch (event) { + case NETDEV_UP: + DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + /* + * Skip if Bus is not in a state to transport the IOVAR + * (or) the Dongle is not ready. + */ + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) || + dhd->pub.busstate == DHD_BUS_LOAD) { + DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n", + __FUNCTION__, dhd->pub.busstate)); + if (dhd->pend_ipaddr) { + DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n", + __FUNCTION__, dhd->pend_ipaddr)); + } + dhd->pend_ipaddr = ifa->ifa_address; + break; + } + +#ifdef AOE_IP_ALIAS_SUPPORT + /* XXX HOSTAPD will be rerturned at first */ + DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx); +#endif /* AOE_IP_ALIAS_SUPPORT */ + dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE); + break; + + case NETDEV_DOWN: + DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + dhd->pend_ipaddr = 0; +#ifdef AOE_IP_ALIAS_SUPPORT + /* XXX HOSTAPD will be rerturned at first */ + DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n", + __FUNCTION__)); + if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) || + (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) { + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx); + } else +#endif /* AOE_IP_ALIAS_SUPPORT */ + { + /* XXX clear ALL arp and hostip tables */ + dhd_aoe_hostip_clr(&dhd->pub, idx); + dhd_aoe_arp_clr(&dhd->pub, idx); + } + dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE); + break; + + default: + DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n", + __func__, ifa->ifa_label, event)); + break; + } + return NOTIFY_DONE; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +/* Neighbor Discovery Offload: defered handler */ +static void +dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) +{ + struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data; + dhd_info_t *dhd = (dhd_info_t *)dhd_info; + dhd_pub_t *dhdp; + int ret; + + if (!dhd) { + DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__)); + goto done; + } + dhdp = &dhd->pub; + + if (event != DHD_WQ_WORK_IPV6_NDO) { + DHD_ERROR(("%s: unexpected event\n", __FUNCTION__)); + goto done; + } + + if (!ndo_work) { + DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__)); + return; + } + + switch (ndo_work->event) { + case NETDEV_UP: +#ifndef NDO_CONFIG_SUPPORT + DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret)); + } +#endif /* !NDO_CONFIG_SUPPORT */ + DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__)); + if (dhdp->ndo_version > 0) { + /* inet6 addr notifier called only for unicast address */ + ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0], + WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx); + } else { + ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0], + ndo_work->if_idx); + } + if (ret < 0) { + DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n", + __FUNCTION__, ret)); + } + break; + case NETDEV_DOWN: + if (dhdp->ndo_version > 0) { + DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, + &ndo_work->ipv6_addr[0], ndo_work->if_idx); + } else { + DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__)); + ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx); + } + if (ret < 0) { + DHD_ERROR(("%s: Removing host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } +#ifdef NDO_CONFIG_SUPPORT + if (dhdp->ndo_host_ip_overflow) { + ret = dhd_dev_ndo_update_inet6addr( + dhd_idx2net(dhdp, ndo_work->if_idx)); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: Updating host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } + } +#else /* !NDO_CONFIG_SUPPORT */ + DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret)); + goto done; + } +#endif /* NDO_CONFIG_SUPPORT */ + break; + + default: + DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__)); + break; + } +done: + + /* free ndo_work. alloced while scheduling the work */ + if (ndo_work) { + kfree(ndo_work); + } + + return; +} /* dhd_init_logstrs_array */ + +/* + * Neighbor Discovery Offload: Called when an interface + * is assigned with ipv6 address. + * Handles only primary interface + */ +int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr) +{ + dhd_info_t *dhd; + dhd_pub_t *dhdp; + struct inet6_ifaddr *inet6_ifa = ptr; + struct ipv6_work_info_t *ndo_info; + int idx; + + /* Filter notifications meant for non Broadcom devices */ + if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) { + return NOTIFY_DONE; + } + + dhd = DHD_DEV_INFO(inet6_ifa->idev->dev); + if (!dhd) { + return NOTIFY_DONE; + } + dhdp = &dhd->pub; + + /* Supports only primary interface */ + idx = dhd_net2idx(dhd, inet6_ifa->idev->dev); + if (idx != 0) { + return NOTIFY_DONE; + } + + /* FW capability */ + if (!FW_SUPPORTED(dhdp, ndoe)) { + return NOTIFY_DONE; + } + + ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC); + if (!ndo_info) { + DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__)); + return NOTIFY_DONE; + } + + /* fill up ndo_info */ + ndo_info->event = event; + ndo_info->if_idx = idx; + memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN); + + /* defer the work to thread as it may block kernel */ + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO, + dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW); + return NOTIFY_DONE; +} +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +/* Network attach to be invoked from the bus probe handlers */ +int +dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock) +{ + struct net_device *primary_ndev; +#ifdef GET_CUSTOM_MAC_ENABLE + char hw_ether[62]; +#endif /* GET_CUSTOM_MAC_ENABLE */ +#if defined(GET_CUSTOM_MAC_ENABLE) || defined(GET_OTP_MAC_ENABLE) + int ret = BCME_ERROR; +#endif /* GET_CUSTOM_MAC_ENABLE || GET_OTP_MAC_ENABLE */ + + BCM_REFERENCE(primary_ndev); + +#ifdef GET_CUSTOM_MAC_ENABLE + ret = wifi_platform_get_mac_addr(dhdp->adapter, hw_ether, 0); + if (!ret) + bcopy(hw_ether, dhdp->mac.octet, ETHER_ADDR_LEN); +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#ifdef GET_OTP_MAC_ENABLE + if (ret && memcmp(ðer_null, &dhdp->conf->otp_mac, ETHER_ADDR_LEN)) + bcopy(&dhdp->conf->otp_mac, &dhdp->mac, ETHER_ADDR_LEN); +#endif /* GET_OTP_MAC_ENABLE */ + + /* Register primary net device */ + if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) { + return BCME_ERROR; + } + +#if defined(WL_CFG80211) + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + if (wl_cfg80211_net_attach(primary_ndev) < 0) { + /* fail the init */ + dhd_remove_if(dhdp, 0, TRUE); + return BCME_ERROR; + } +#endif /* WL_CFG80211 */ + return BCME_OK; +} + +int +dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + dhd_if_t *ifp; + struct net_device *net = NULL; + int err = 0; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; + + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (dhd == NULL || dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__)); + return BCME_ERROR; + } + + ASSERT(dhd && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; + net = ifp->net; + ASSERT(net && (ifp->idx == ifidx)); + + ASSERT(!net->netdev_ops); + net->netdev_ops = &dhd_ops_virt; + + /* Ok, link into the network layer... */ + if (ifidx == 0) { + /* + * device functions for the primary interface only + */ + net->netdev_ops = &dhd_ops_pri; + if (!ETHER_ISNULLADDR(dhd->pub.mac.octet)) + memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + } else { + /* + * We have to use the primary MAC for virtual interfaces + */ + memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN); +#if defined(OEM_ANDROID) + /* + * Android sets the locally administered bit to indicate that this is a + * portable hotspot. This will not work in simultaneous AP/STA mode, + * nor with P2P. Need to set the Donlge's MAC address, and then use that. + */ + if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr, + ETHER_ADDR_LEN)) { + DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n", + __func__, net->name)); + temp_addr[0] |= 0x02; + } +#endif /* defined(OEM_ANDROID) */ + } + + net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; +#ifdef HOST_SFH_LLC + net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN; +#endif + +#ifdef DHD_AWDL + if (dhdp->awdl_ifidx && + ifidx == dhdp->awdl_ifidx) { + /* A total of 30 bytes are required for the + * ethernet + AWDL LLC header. Out of this 14 + * bytes in the form of ethernet header is already + * present in the skb handed over by the stack. + * So we need to reserve an additonal 16 bytes as + * headroom. Out of these 16 bytes, if the host + * sfh llc feature is being used, then additonal + * 8 bytes are already being reserved + * during dhd_register_if (below), hence reserving + * only an additional 8 bytes is enough. If the host + * sfh llc feature is not used, then all of the 16 + * bytes need to be reserved from here + */ + net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN; +#ifndef HOST_SFH_LLC + net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN; +#endif /* HOST_SFH_LLC */ + } +#endif /* DHD_AWDL */ + + net->ethtool_ops = &dhd_ethtool_ops; + +#if defined(WL_WIRELESS_EXT) +#if WIRELESS_EXT < 19 + net->get_wireless_stats = dhd_get_wireless_stats; +#endif /* WIRELESS_EXT < 19 */ +#if WIRELESS_EXT > 12 + net->wireless_handlers = &wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ +#endif /* defined(WL_WIRELESS_EXT) */ + + /* XXX Set up an MTU change notifier as per linux/notifier.h? */ + dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + +#ifdef WLMESH + if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) { + temp_addr[4] ^= 0x80; + temp_addr[4] += ifidx; + temp_addr[5] += ifidx; + } +#endif + /* + * XXX Linux 2.6.25 does not like a blank MAC address, so use a + * dummy address until the interface is brought up. + */ + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + if (ifidx == 0) + printf("%s\n", dhd_version); + else { +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_update_net_device(net, ifidx); +#endif /* WL_EXT_IAPSTA */ + if (dhd->pub.up == 1) { + if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr, FALSE) == 0) + DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); + else + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + } + } + + if (need_rtnl_lock) + err = register_netdev(net); + else { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) && defined(WL_CFG80211) + err = cfg80211_register_netdevice(net); +#else + err = register_netdevice(net); +#endif + } + + if (err != 0) { + DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err)); + goto fail; + } + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + if ((ctf_dev_register(dhd->cih, net, FALSE) != BCME_OK) || + (ctf_enable(dhd->cih, net, TRUE, &dhd->brc_hot) != BCME_OK)) { + DHD_ERROR(("%s:%d: ctf_dev_register/ctf_enable failed for interface %d\n", + __FUNCTION__, __LINE__, ifidx)); + goto fail; + } +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#ifdef WL_EVENT + wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx); +#endif /* WL_EVENT */ +#ifdef WL_ESCAN + wl_escan_event_attach(net, ifidx); +#endif /* WL_ESCAN */ +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx); + wl_ext_iapsta_attach_name(net, ifidx); +#endif /* WL_EXT_IAPSTA */ + +#if defined(CONFIG_TIZEN) + net_stat_tizen_register(net); +#endif /* CONFIG_TIZEN */ + + printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name, +#if defined(CUSTOMER_HW4_DEBUG) + MAC2STRDBG(dhd->pub.mac.octet)); +#else + MAC2STRDBG(net->dev_addr)); +#endif /* CUSTOMER_HW4_DEBUG */ + +#if defined(OEM_ANDROID) && (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS)) + if (ifidx == 0) { +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + up(&dhd_registration_sem); +#endif /* BCMLXSDMMC */ + if (!dhd_download_fw_on_driverload) { +#ifdef WL_CFG80211 + wl_terminate_event_handler(net); +#endif /* WL_CFG80211 */ +#if defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + +#ifdef SHOW_LOGTRACE + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(dhdp); +#endif /* SHOW_LOGTRACE */ + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ + +#if defined(WLAN_ACCEL_BOOT) + dhd->fs_check_retry = DHD_FS_CHECK_RETRIES; + dhd->wl_accel_boot_on_done = FALSE; + INIT_DELAYED_WORK(&dhd->wl_accel_work, dhd_wifi_accel_on_work_cb); +#if !defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH) + /* If the WLAN_ACCEL_SKIP_WQ_IN_ATTACH feature is enabled, + * the dhd_wifi_accel_on_work_cb() is called in dhd_open() + * to skip dongle firmware downloading during insmod and dhd_attach. + */ + schedule_delayed_work(&dhd->wl_accel_work, + msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS)); +#endif /* !defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH) */ +#else + /* Turn off Wifi after boot up */ +#if defined (BT_OVER_SDIO) + dhd_bus_put(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(FALSE); +#else + wl_android_wifi_off(net, TRUE); +#endif /* BT_OVER_SDIO */ +#endif /* WLAN_ACCEL_BOOT */ + + } + } +#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC) */ +#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) + gdb_proxy_fs_try_create(ifp->info, net->name); +#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ + return 0; + +fail: + net->netdev_ops = NULL; + return err; +} + +void +dhd_bus_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + + /* + * In case of Android cfg80211 driver, the bus is down in dhd_stop, + * calling stop again will cuase SD read/write errors. + */ + if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) { + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + /* Stop the bus module */ +#ifdef BCMDBUS + /* Force Dongle terminated */ + if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0) + DHD_ERROR(("%s Setting WLC_TERMINATED failed\n", + __FUNCTION__)); + dbus_stop(dhd->pub.bus); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + dhd->pub.busstate = DHD_BUS_DOWN; +#else + dhd_bus_stop(dhd->pub.bus, TRUE); +#endif /* BCMDBUS */ + } + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE) + dhd_bus_oob_intr_unregister(dhdp); +#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */ + } + } +} + +void dhd_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + unsigned long flags; + int timer_valid = FALSE; + struct net_device *dev = NULL; + dhd_if_t *ifp; +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = NULL; +#endif + if (!dhdp) + return; + + dhd = (dhd_info_t *)dhdp->info; + if (!dhd) + return; + +#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) + gdb_proxy_fs_remove(dhd); +#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ + + /* primary interface 0 */ + ifp = dhd->iflist[0]; + if (ifp && ifp->net) { + dev = ifp->net; + } + + if (dev) { + rtnl_lock(); +#if defined(WL_CFG80211) && defined(WL_STATIC_IF) + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + wl_cfg80211_static_if_dev_close(dev); + } +#endif /* WL_CFG80211 && WL_STATIC_IF */ + if (dev->flags & IFF_UP) { + /* If IFF_UP is still up, it indicates that + * "ifconfig wlan0 down" hasn't been called. + * So invoke dev_close explicitly here to + * bring down the interface. + */ + DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n")); + dev_close(dev); + } + rtnl_unlock(); + } + + DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); + + /* XXX kernel panic issue when first bootup time, + * rmmod without interface down make unnecessary hang event. + */ + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); + dhd->pub.up = 0; + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + /* Give sufficient time for threads to start running in case + * dhd_attach() has failed + */ + OSL_SLEEP(100); + } +#ifdef DHD_WET + dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info); +#endif /* DHD_WET */ +#ifdef WL_NANHO + /* deinit NANHO host module */ + bcm_nanho_deinit(dhd->pub.nanhoi); +#endif /* WL_NANHO */ +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef PROP_TXSTATUS +#ifdef DHD_WLFC_THREAD + if (dhd->pub.wlfc_thread) { + kthread_stop(dhd->pub.wlfc_thread); + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); + } + dhd->pub.wlfc_thread = NULL; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + +#ifdef DHD_TIMESYNC + if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) { + dhd_timesync_detach(dhdp); + } +#endif /* DHD_TIMESYNC */ + + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { + +#if defined(OEM_ANDROID) || !defined(BCMSDIO) + dhd_bus_detach(dhdp); +#endif /* OEM_ANDROID || !BCMSDIO */ +#ifdef OEM_ANDROID +#ifdef BCMPCIE + if (is_reboot == SYS_RESTART) { + extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata; + if (dhd_wifi_platdata && !dhdp->dongle_reset) { + dhdpcie_bus_stop_host_dev(dhdp->bus); + wifi_platform_set_power(dhd_wifi_platdata->adapters, + FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* BCMPCIE */ +#endif /* OEM_ANDROID */ +#ifndef PCIE_FULL_DONGLE +#if defined(OEM_ANDROID) || !defined(BCMSDIO) + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif /* OEM_ANDROID || !BCMSDIO */ +#endif /* !PCIE_FULL_DONGLE */ + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { + if (dhd->early_suspend.suspend) + unregister_early_suspend(&dhd->early_suspend); + } +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#if defined(WL_WIRELESS_EXT) + if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { + /* Detatch and unlink in the iw */ + wl_iw_detach(dev); + } +#endif /* defined(WL_WIRELESS_EXT) */ +#ifdef WL_EXT_GENL + wl_ext_genl_deinit(dev); +#endif +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach(dev); +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_detach(dev); +#endif /* WL_ESCAN */ +#ifdef WL_EVENT + wl_ext_event_dettach(dhdp); +#endif /* WL_EVENT */ + + /* delete all interfaces, start with virtual */ + if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { + int i = 1; + + /* Cleanup virtual interfaces */ + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + dhd_remove_if(&dhd->pub, i, TRUE); + } + } + dhd_net_if_unlock_local(dhd); + + /* 'ifp' indicates primary interface 0, clean it up. */ + if (ifp && ifp->net) { +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + if (dhd->cih) + ctf_dev_unregister(dhd->cih, ifp->net); +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#ifdef WL_CFG80211 + cfg = wl_get_cfg(ifp->net); +#endif + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { +#ifdef SET_RPS_CPUS + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ + netif_tx_disable(ifp->net); + unregister_netdev(ifp->net); + } +#ifdef PCIE_FULL_DONGLE + ifp->net = DHD_NET_DEV_NULL; +#else + ifp->net = NULL; +#endif /* PCIE_FULL_DONGLE */ +#if defined(BCMSDIO) && !defined(OEM_ANDROID) + dhd_bus_detach(dhdp); + + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif /* BCMSDIO && !OEM_ANDROID */ + +#ifdef DHD_WMF + dhd_wmf_cleanup(dhdp, 0); +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdp->tickcnt); + deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + MFREE(dhdp->osh, ifp->qosmap_up_table, UP_TABLE_MAX); + ifp->qosmap_up_table_enable = FALSE; +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ + + dhd_if_del_sta_list(ifp); + + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + ifp = NULL; +#ifdef WL_CFG80211 + if (cfg && cfg->wdev) + cfg->wdev->netdev = NULL; +#endif + } + } + + /* Clear the watchdog timer */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + timer_valid = dhd->wd_timer_valid; + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + if (timer_valid) + del_timer_sync(&dhd->timer); + DHD_STOP_RPM_TIMER(&dhd->pub); + +#ifdef BCMDBUS + tasklet_kill(&dhd->tasklet); +#else + if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { +#ifdef DHD_PCIE_RUNTIMEPM + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rpm_ctl); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_wdt_ctl); + } + + if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rxf_ctl); + } + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_dpc_ctl); + } else + { + tasklet_kill(&dhd->tasklet); + } + } +#endif /* BCMDBUS */ + +#ifdef WL_NATOE + if (dhd->pub.nfct) { + dhd_ct_close(dhd->pub.nfct); + } +#endif /* WL_NATOE */ + + cancel_delayed_work_sync(&dhd->dhd_dpc_dispatcher_work); +#ifdef DHD_LB + if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) { + /* Clear the flag first to avoid calling the cpu notifier */ + dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE; + + /* Kill the Load Balancing Tasklets */ +#ifdef DHD_LB_RXP + cancel_work_sync(&dhd->rx_napi_dispatcher_work); + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP + cancel_work_sync(&dhd->tx_dispatcher_work); + tasklet_kill(&dhd->tx_tasklet); + __skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + + /* Unregister from CPU Hotplug framework */ + dhd_unregister_cpuhp_callback(dhd); + + dhd_cpumasks_deinit(dhd); + DHD_LB_STATS_DEINIT(&dhd->pub); + } +#endif /* DHD_LB */ + +#ifdef CSI_SUPPORT + dhd_csi_deinit(dhdp); +#endif /* CSI_SUPPORT */ + +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) + cancel_work_sync(&dhd->axi_error_dispatcher_work); +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ + + DHD_SSSR_REG_INFO_DEINIT(&dhd->pub); + DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub); + +#ifdef DHD_SDTC_ETB_DUMP + dhd_sdtc_etb_mempool_deinit(&dhd->pub); +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef EWP_EDL + if (host_edl_support) { + DHD_EDL_MEM_DEINIT(dhdp); + host_edl_support = FALSE; + } +#endif /* EWP_EDL */ + +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + if (!cfg) { + DHD_ERROR(("cfg NULL!\n")); + ASSERT(0); + } else { + wl_cfg80211_detach(cfg); + dhd_monitor_uninit(); + } + } +#endif + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + destroy_workqueue(dhd->tx_wq); + dhd->tx_wq = NULL; + destroy_workqueue(dhd->rx_wq); + dhd->rx_wq = NULL; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef DEBUGABILITY + if (dhdp->dbg) { +#ifdef DBG_PKT_MON + dhd_os_dbg_detach_pkt_monitor(dhdp); + osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock); +#endif /* DBG_PKT_MON */ + } +#endif /* DEBUGABILITY */ + if (dhdp->dbg) { + dhd_os_dbg_detach(dhdp); + } +#ifdef DHD_MEM_STATS + osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.mem_stats_lock); +#endif /* DHD_MEM_STATS */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.awdl_stats_lock); +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ +#ifdef DHD_PKT_LOGGING + dhd_os_detach_pktlog(dhdp); +#endif /* DHD_PKT_LOGGING */ +#ifdef DHD_STATUS_LOGGING + dhd_detach_statlog(dhdp); +#endif /* DHD_STATUS_LOGGING */ +#ifdef DHD_PKTDUMP_ROAM + dhd_dump_pkt_deinit(dhdp); +#endif /* DHD_PKTDUMP_ROAM */ +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + if (dhd->pub.hang_info) { + MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN); + } +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ +#ifdef SHOW_LOGTRACE + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(dhdp); + + /* Wait till event logtrace context finishes */ + dhd_cancel_logtrace_process_sync(dhd); + + /* Remove ring proc entries */ + dhd_dbg_ring_proc_destroy(&dhd->pub); + + if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { + if (dhd->event_data.fmts) { + MFREE(dhd->pub.osh, dhd->event_data.fmts, + dhd->event_data.fmts_size); + } + if (dhd->event_data.raw_fmts) { + MFREE(dhd->pub.osh, dhd->event_data.raw_fmts, + dhd->event_data.raw_fmts_size); + } + if (dhd->event_data.raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.raw_sstr, + dhd->event_data.raw_sstr_size); + } + if (dhd->event_data.rom_raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr, + dhd->event_data.rom_raw_sstr_size); + } + dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT; + } +#endif /* SHOW_LOGTRACE */ +#ifdef BTLOG + skb_queue_purge(&dhd->bt_log_queue); +#endif /* BTLOG */ +#ifdef PNO_SUPPORT + if (dhdp->pno_state) + dhd_pno_deinit(dhdp); +#endif +#ifdef RTT_SUPPORT + if (dhdp->rtt_state) { + dhd_rtt_detach(dhdp); + } +#endif +#if defined(CONFIG_PM_SLEEP) + if (dhd_pm_notifier_registered) { + unregister_pm_notifier(&dhd->pm_notifier); + dhd_pm_notifier_registered = FALSE; + } +#endif /* CONFIG_PM_SLEEP */ + +#ifdef DEBUG_CPU_FREQ + if (dhd->new_freq) + free_percpu(dhd->new_freq); + dhd->new_freq = NULL; + cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif + DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_wd_counter = 0; + dhd_wake_lock_unlock_destroy(&dhd->wl_wdwake); + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + dhd_wake_lock_unlock_destroy(&dhd->wl_wifi); +#endif /* CONFIG_HAS_WAKELOCK */ + if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + } + +#ifdef DHDTCPACK_SUPPRESS + /* This will free all MEM allocated for TCPACK SUPPRESS */ + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_deinit(dhdp); + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + dhd_free_tdls_peer_list(dhdp); +#endif + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + /* Release CTF pool ONLY after the prot layer is dettached and + * pkts, possibly from fast ctfpool are freed into ctfpool/kernel + */ +#ifdef CTFPOOL + /* free the buffers in fast pool */ + osl_ctfpool_cleanup(dhd->pub.osh); +#endif /* CTFPOOL */ + + /* free ctf resources */ + if (dhd->cih) + ctf_detach(dhd->cih); +#endif /* BCM_ROUTER_DHD && HNDCTF */ +#ifdef BCMDBG + dhd_macdbg_detach(dhdp); +#endif /* BCMDBG */ + +#ifdef DUMP_IOCTL_IOV_LIST + dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head)); +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef DHD_DEBUG + /* memory waste feature list initilization */ + dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head)); +#endif /* DHD_DEBUG */ +#ifdef WL_MONITOR + dhd_del_monitor_if(dhd); +#endif /* WL_MONITOR */ + +#ifdef DHD_ERPOM + if (dhdp->enable_erpom) { + dhdp->pom_func_deregister(&dhdp->pom_wlan_handler); + } +#endif /* DHD_ERPOM */ + + cancel_work_sync(&dhd->dhd_hang_process_work); + + /* Prefer adding de-init code above this comment unless necessary. + * The idea is to cancel work queue, sysfs and flags at the end. + */ + dhd_deferred_work_deinit(dhd->dhd_deferred_wq); + dhd->dhd_deferred_wq = NULL; + + /* log dump related buffers should be freed after wq is purged */ +#ifdef DHD_LOG_DUMP + dhd_log_dump_deinit(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#if defined(BCMPCIE) + if (dhdp->extended_trap_data) + { + MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + dhdp->extended_trap_data = NULL; + } +#ifdef DNGL_AXI_ERROR_LOGGING + if (dhdp->axi_err_dump) + { + MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t)); + dhdp->axi_err_dump = NULL; + } +#endif /* DNGL_AXI_ERROR_LOGGING */ +#endif /* BCMPCIE */ + +#ifdef BTLOG + /* Wait till bt_log_dispatcher_work finishes */ + cancel_work_sync(&dhd->bt_log_dispatcher_work); +#endif /* BTLOG */ + +#ifdef EWP_EDL + cancel_delayed_work_sync(&dhd->edl_dispatcher_work); +#endif + + (void)dhd_deinit_sock_flows_buf(dhd); + +#ifdef DHD_DUMP_MNGR + if (dhd->pub.dump_file_manage) { + MFREE(dhd->pub.osh, dhd->pub.dump_file_manage, + sizeof(dhd_dump_file_manage_t)); + } +#endif /* DHD_DUMP_MNGR */ + + dhd_sysfs_exit(dhd); + dhd->pub.fw_download_status = FW_UNLOADED; + +#if defined(BT_OVER_SDIO) + mutex_destroy(&dhd->bus_user_lock); +#endif /* BT_OVER_SDIO */ + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + (void) dhd_fwtrace_detach(dhdp); +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef DHD_TX_PROFILE + (void)dhd_tx_profile_detach(dhdp); +#endif /* defined(DHD_TX_PROFILE) */ + dhd_conf_detach(dhdp); + +} /* dhd_detach */ + +void +dhd_free(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + } + } + + dhd_sta_pool_fini(dhdp, DHD_MAX_STA); + + dhd = (dhd_info_t *)dhdp->info; + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + if (is_vmalloc_addr(dhdp->soc_ram)) { + VMFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); + } + else { + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } +#ifdef CACHE_FW_IMAGES + if (dhdp->cached_fw) { + MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize); + } + + if (dhdp->cached_nvram) { + MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE); + } +#endif + if (dhd != NULL) { +#ifdef REPORT_FATAL_TIMEOUTS + deinit_dhd_timeouts(&dhd->pub); +#endif /* REPORT_FATAL_TIMEOUTS */ + + /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ + if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, + DHD_PREALLOC_DHD_INFO, 0, FALSE)) + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + dhd = NULL; + } + } +} + +void +dhd_clear(dhd_pub_t *dhdp) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; +#ifdef DHDTCPACK_SUPPRESS + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + } + } + + dhd_sta_pool_clear(dhdp, DHD_MAX_STA); + + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + if (is_vmalloc_addr(dhdp->soc_ram)) { + VMFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); + } + else { + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } + } +} + +static void +dhd_module_cleanup(void) +{ + printf("%s: Enter\n", __FUNCTION__); + + dhd_bus_unregister(); + +#if defined(OEM_ANDROID) + wl_android_exit(); +#endif /* OEM_ANDROID */ + + dhd_wifi_platform_unregister_drv(); + +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + wifi_teardown_dt(); +#endif +#endif + printf("%s: Exit\n", __FUNCTION__); +} + +static void __exit +dhd_module_exit(void) +{ + atomic_set(&exit_in_progress, 1); +#ifdef DHD_BUZZZ_LOG_ENABLED + dhd_buzzz_detach(); +#endif /* DHD_BUZZZ_LOG_ENABLED */ + dhd_module_cleanup(); + unregister_reboot_notifier(&dhd_reboot_notifier); + dhd_destroy_to_notifier_skt(); +#ifdef DHD_PKTTS + dhd_destroy_to_notifier_ts(); +#endif /* DHD_PKTTS */ +} + +static int +_dhd_module_init(void) +{ + int err; + int retry = POWERUP_MAX_RETRY; + + printk(KERN_ERR PERCENT_S DHD_LOG_PREFIXS "%s: in %s\n", + PRINTF_SYSTEM_TIME, __FUNCTION__, dhd_version); + if (ANDROID_VERSION > 0) + printf("ANDROID_VERSION = %d\n", ANDROID_VERSION); +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + if (wifi_setup_dt()) { + printf("wifi_dt : fail to setup dt\n"); + } +#endif +#endif + +#ifdef DHD_BUZZZ_LOG_ENABLED + dhd_buzzz_attach(); +#endif /* DHD_BUZZZ_LOG_ENABLED */ + +#if defined(BCM_ROUTER_DHD) + { /* XXX Should we maintain nvram budget/thresholds per 5G|2G radio? */ + char * var; + if ((var = getvar(NULL, "dhd_queue_budget")) != NULL) { + dhd_queue_budget = bcm_strtoul(var, NULL, 0); + } + DHD_ERROR(("dhd_queue_budget = %d\n", dhd_queue_budget)); + + if ((var = getvar(NULL, "dhd_sta_threshold")) != NULL) { + dhd_sta_threshold = bcm_strtoul(var, NULL, 0); + } + DHD_ERROR(("dhd_sta_threshold = %d\n", dhd_sta_threshold)); + + if ((var = getvar(NULL, "dhd_if_threshold")) != NULL) { + dhd_if_threshold = bcm_strtoul(var, NULL, 0); + } + DHD_ERROR(("dhd_if_threshold = %d\n", dhd_if_threshold)); + } +#endif /* BCM_ROUTER_DHD */ + + if (firmware_path[0] != '\0') { + strlcpy(fw_bak_path, firmware_path, sizeof(fw_bak_path)); + } + + if (nvram_path[0] != '\0') { + strlcpy(nv_bak_path, nvram_path, sizeof(nv_bak_path)); + } + + do { + err = dhd_wifi_platform_register_drv(); + if (!err) { + register_reboot_notifier(&dhd_reboot_notifier); + break; + } else { + DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n", + __FUNCTION__, retry)); + strlcpy(firmware_path, fw_bak_path, sizeof(firmware_path)); + strlcpy(nvram_path, nv_bak_path, sizeof(nvram_path)); + } + } while (retry--); + + dhd_create_to_notifier_skt(); + +#ifdef DHD_PKTTS + dhd_create_to_notifier_ts(); +#endif /* DHD_PKTTS */ + + if (err) { +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + wifi_teardown_dt(); +#endif +#endif + DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__)); + } else { + if (!dhd_download_fw_on_driverload) { + dhd_driver_init_done = TRUE; + } + } + + printf("%s: Exit err=%d\n", __FUNCTION__, err); + return err; +} + +static int __init +dhd_module_init(void) +{ + int err; + + err = _dhd_module_init(); +#ifdef DHD_SUPPORT_HDM + if (err && !dhd_download_fw_on_driverload) { + dhd_hdm_wlan_sysfs_init(); + } +#endif /* DHD_SUPPORT_HDM */ + return err; + +} + +#ifdef DHD_SUPPORT_HDM +bool hdm_trigger_init = FALSE; +struct delayed_work hdm_sysfs_wq; + +int +dhd_module_init_hdm(void) +{ + int err = 0; + + hdm_trigger_init = TRUE; + + if (dhd_driver_init_done) { + DHD_INFO(("%s : Module is already inited\n", __FUNCTION__)); + return err; + } + + err = _dhd_module_init(); + + /* remove sysfs file after module load properly */ + if (!err && !dhd_download_fw_on_driverload) { + INIT_DELAYED_WORK(&hdm_sysfs_wq, dhd_hdm_wlan_sysfs_deinit); + schedule_delayed_work(&hdm_sysfs_wq, msecs_to_jiffies(SYSFS_DEINIT_MS)); + } + + hdm_trigger_init = FALSE; + return err; +} +#endif /* DHD_SUPPORT_HDM */ + +static int +dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused) +{ + DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code)); + if (code == SYS_RESTART) { +#ifdef OEM_ANDROID +#ifdef BCMPCIE + is_reboot = code; +#endif /* BCMPCIE */ +#else + dhd_module_cleanup(); +#endif /* OEM_ANDROID */ + } + return NOTIFY_DONE; +} + +#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH) +/* XXX To decrease the device boot time, deferred_module_init() macro can be + * used. The detailed principle and implemenation of deferred_module_init() + * is found at http://elinux.org/Deferred_Initcalls + * To enable this feature for module build, it needs to add another + * deferred_module_init() definition to include/linux/init.h in Linux Kernel. + * #define deferred_module_init(fn) module_init(fn) + */ +#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS) +deferred_module_init_sync(dhd_module_init); +#else +deferred_module_init(dhd_module_init); +#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */ +#elif defined(USE_LATE_INITCALL_SYNC) +late_initcall_sync(dhd_module_init); +#else +late_initcall(dhd_module_init); +#endif /* USE_LATE_INITCALL_SYNC */ + +module_exit(dhd_module_exit); + +/* + * OS specific functions required to implement DHD driver in OS independent way + */ +int +dhd_os_proto_block(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + down(&dhd->proto_sem); + + return 1; + } + + return 0; +} + +int +dhd_os_proto_unblock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + up(&dhd->proto_sem); + return 1; + } + + return 0; +} + +void +dhd_os_dhdiovar_lock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_dhdiovar_unlock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_logdump_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = NULL; + + if (!pub) + return; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->logdump_lock); + } +} + +void +dhd_os_logdump_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = NULL; + + if (!pub) + return; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->logdump_lock); + } +} + +unsigned long +dhd_os_dbgring_lock(void *lock) +{ + if (!lock) + return 0; + + mutex_lock((struct mutex *)lock); + + return 0; +} + +void +dhd_os_dbgring_unlock(void *lock, unsigned long flags) +{ + BCM_REFERENCE(flags); + + if (!lock) + return; + + mutex_unlock((struct mutex *)lock); +} + +unsigned int +dhd_os_get_ioctl_resp_timeout(void) +{ + return ((unsigned int)dhd_ioctl_timeout_msec); +} + +void +dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) +{ + dhd_ioctl_timeout_msec = (int)timeout_msec; +} + +int +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); + +#ifdef BCMQT_HW + DHD_ERROR(("%s, Timeout wait until %d mins (%d ms) in QT mode\n", + __FUNCTION__, (dhd_ioctl_timeout_msec / (60 * 1000)), dhd_ioctl_timeout_msec)); +#endif /* BCMQT_HW */ + + timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); + + return timeout; +} + +int +dhd_os_ioctl_resp_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->ioctl_resp_wait); + return 0; +} + +int +dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT); +#ifdef BCMSLTGT + timeout *= htclkratio; +#endif /* BCMSLTGT */ + + timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout); + + return timeout; +} + +#ifdef PCIE_INB_DW +int +dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(ds_exit_timeout_msec); +#ifdef BCMSLTGT + timeout *= htclkratio; +#endif /* BCMSLTGT */ + + timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout); + + return timeout; +} + +int +dhd_os_ds_exit_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up_all(&dhd->ds_exit_wait); + return 0; +} + +#endif /* PCIE_INB_DW */ + +int +dhd_os_d3ack_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->d3ack_wait); + return 0; +} + +int +dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Wait for bus usage contexts to gracefully exit within some timeout value + * Set time out to little higher than dhd_ioctl_timeout_msec, + * so that IOCTL timeout should not get affected. + */ + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout); + + return timeout; +} + +/* + * Wait until the condition *var == condition is met. + * Returns 0 if the @condition evaluated to false after the timeout elapsed + * Returns 1 if the @condition evaluated to true + */ +int +dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout); + + return timeout; +} + +/* + * Wait until the '(*var & bitmask) == condition' is met. + * Returns 0 if the @condition evaluated to false after the timeout elapsed + * Returns 1 if the @condition evaluated to true + */ +int +dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var, + uint bitmask, uint condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, + ((*var & bitmask) == condition), timeout); + + return timeout; +} + +int +dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition) +{ + int ret = 0; + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT); + + ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout); + + return ret; + +} + +int +dhd_os_dmaxfer_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->dmaxfer_wait); + return 0; +} + +void +dhd_os_tx_completion_wake(dhd_pub_t *dhd) +{ + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->tx_completion_wait); +} + +/* Fix compilation error for FC11 */ +INLINE int +dhd_os_busbusy_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->dhd_bus_busy_state_wait); + return 0; +} + +void +dhd_os_wd_timer_extend(void *bus, bool extend) +{ +#ifndef BCMDBUS + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + if (extend) + dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL); + else + dhd_os_wd_timer(bus, dhd->default_wd_interval); +#endif /* !BCMDBUS */ +} + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ +#ifndef BCMDBUS + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) { + DHD_GENERAL_UNLOCK(pub, flags); +#ifdef BCMSDIO + if (!wdtick) { + DHD_OS_WD_WAKE_UNLOCK(pub); + } +#endif /* BCMSDIO */ + return; + } + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->timer); +#ifdef BCMSDIO + DHD_OS_WD_WAKE_UNLOCK(pub); +#endif /* BCMSDIO */ + return; + } + + if (wdtick) { +#ifdef BCMSDIO + DHD_OS_WD_WAKE_LOCK(pub); + dhd_watchdog_ms = (uint)wdtick; +#endif /* BCMSDIO */ + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + DHD_GENERAL_UNLOCK(pub, flags); +#endif /* BCMDBUS */ +} + +#ifdef DHD_PCIE_RUNTIMEPM +void +dhd_os_runtimepm_timer(void *bus, uint tick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the RPM until fw is loaded */ + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) { + DHD_GENERAL_UNLOCK(pub, flags); + return; + } + + /* If tick is non-zero, the request is to start the timer */ + if (tick) { + /* Start the timer only if its not already running */ + if (dhd->rpm_timer_valid == FALSE) { + mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms)); + dhd->rpm_timer_valid = TRUE; + DHD_ERROR(("DHD Runtime PM Timer ON\n")); + } + } else { + /* tick is zero, we have to stop the timer */ + /* Stop the timer only if its running, otherwise we don't have to do anything */ + if (dhd->rpm_timer_valid == TRUE) { + dhd->rpm_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->rpm_timer); + DHD_ERROR(("DHD Runtime PM Timer OFF \n")); + /* we have already released the lock, so just go to exit */ + goto exit; + } + } + + DHD_GENERAL_UNLOCK(pub, flags); +exit: + return; + +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +void * +dhd_os_open_image1(dhd_pub_t *pub, char *filename) +{ + struct file *fp; + int size; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) { + fp = NULL; + goto err; + } + + if (!S_ISREG(file_inode(fp)->i_mode)) { + DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); + fp = NULL; + goto err; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); + fp = NULL; + goto err; + } + + DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); + +err: + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + int size; + + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + rdlen = kernel_read_compat(fp, fp->f_pos, buf, MIN(len, size)); + + if (len >= size && size != rdlen) { + return -EIO; + } + + if (rdlen > 0) { + fp->f_pos += rdlen; + } + + return rdlen; +} + +#if defined(BT_OVER_SDIO) +int +dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rd_len; + uint str_len = 0; + char *str_end = NULL; + + if (!image) + return 0; + + rd_len = kernel_read_compat(fp, fp->f_pos, str, len); + str_end = strnchr(str, len, '\n'); + if (str_end == NULL) { + goto err; + } + str_len = (uint)(str_end - str); + + /* Advance file pointer past the string length */ + fp->f_pos += str_len + 1; + bzero(str_end, rd_len - str_len); + +err: + return str_len; +} +#endif /* defined (BT_OVER_SDIO) */ + +int +dhd_os_get_image_size(void *image) +{ + struct file *fp = (struct file *)image; + int size; + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + + return size; +} + +void +dhd_os_close_image1(dhd_pub_t *pub, void *image) +{ + if (image) { + filp_close((struct file *)image, NULL); + } +} + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef BCMDBUS + spin_lock_bh(&dhd->sdlock); +#else + if (dhd_dpc_prio >= 0) + down(&dhd->sdsem); + else + spin_lock_bh(&dhd->sdlock); +#endif /* !BCMDBUS */ +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef BCMDBUS + spin_unlock_bh(&dhd->sdlock); +#else + if (dhd_dpc_prio >= 0) + up(&dhd->sdsem); + else + spin_unlock_bh(&dhd->sdlock); +#endif /* !BCMDBUS */ +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); +#ifdef BCMDBUS + spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags); +#else + spin_lock_bh(&dhd->txqlock); +#endif /* BCMDBUS */ +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); +#ifdef BCMDBUS + spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags); +#else + spin_unlock_bh(&dhd->txqlock); +#endif /* BCMDBUS */ +} + +unsigned long +dhd_os_sdlock_txoff(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_irqsave(&dhd->txoff_lock, flags); + + return flags; +} + +void +dhd_os_sdunlock_txoff(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_irqrestore(&dhd->txoff_lock, flags); +} + +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +static void +dhd_os_rxflock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->rxf_lock); + +} + +static void +dhd_os_rxfunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->rxf_lock); +} + +#ifdef DHDTCPACK_SUPPRESS +unsigned long +dhd_os_tcpacklock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + flags = osl_spin_lock(&dhd->tcpack_lock); +#endif /* BCMSDIO */ + } + + return flags; +} + +void +dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + +#ifdef BCMSDIO + BCM_REFERENCE(flags); +#endif /* BCMSDIO */ + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_unlock_bh(&dhd->tcpack_lock); +#else + osl_spin_unlock(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } +} +#endif /* DHDTCPACK_SUPPRESS */ + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail) +{ + uint8* buf; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + + buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); + if (buf == NULL && kmalloc_if_fail) + buf = kmalloc(size, flags); + + return buf; +} + +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size) +{ +} + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!dhd->pub.up) { + return NULL; + } + + if (!(dev->flags & IFF_UP)) { + return NULL; + } + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; +} +#endif /* defined(WL_WIRELESS_EXT) */ + +static int +dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event, void **data) +{ + int bcmerror = 0; +#ifdef WL_CFG80211 + unsigned long flags = 0; +#endif /* WL_CFG80211 */ + ASSERT(dhd != NULL); + +#ifdef SHOW_LOGTRACE + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + &dhd->event_data); +#else + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + NULL); +#endif /* SHOW_LOGTRACE */ + if (unlikely(bcmerror != BCME_OK)) { + return bcmerror; + } + + if (ntoh32(event->event_type) == WLC_E_IF) { + /* WLC_E_IF event types are consumed by wl_process_host_event. + * For ifadd/del ops, the netdev ptr may not be valid at this + * point. so return before invoking cfg80211/wext handlers. + */ + return BCME_OK; + } + +#ifdef WL_EVENT + wl_ext_event_send(dhd->pub.event_params, event, *data); +#endif + +#ifdef WL_CFG80211 + if (dhd->iflist[ifidx]->net) { + DHD_UP_LOCK(&dhd->pub.up_lock, flags); + if (dhd->pub.up) { + wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data); + } + DHD_UP_UNLOCK(&dhd->pub.up_lock, flags); + } +#endif /* defined(WL_CFG80211) */ + + return (bcmerror); +} + +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + /* Handle error case or further events here */ + default: + break; + } +} + +#ifdef LOG_INTO_TCPDUMP +void +dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) +{ + struct sk_buff *p, *skb; + uint32 pktlen; + int len; + dhd_if_t *ifp; + dhd_info_t *dhd; + uchar *skb_data; + int ifidx = 0; + struct ether_header eth; + + pktlen = sizeof(eth) + data_len; + dhd = dhdp->info; + + if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { + ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); + + bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN); + bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN); + ETHER_TOGGLE_LOCALADDR(ð.ether_shost); + eth.ether_type = hton16(ETHER_TYPE_BRCM); + + bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth)); + bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len); + skb = PKTTONATIVE(dhdp->osh, p); + skb_data = skb->data; + len = skb->len; + + ifidx = dhd_ifname2idx(dhd, "wlan0"); + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->data = skb_data; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + /* Send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } else { + /* Could not allocate a sk_buf */ + DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__)); + } +} +#endif /* LOG_INTO_TCPDUMP */ + +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +{ +#if defined(BCMSDIO) + struct dhd_info *dhdinfo = dhd->info; + + int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT); + + dhd_os_sdunlock(dhd); + wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); + dhd_os_sdlock(dhd); +#endif /* defined(BCMSDIO) */ + return; +} /* dhd_init_static_strs_array */ + +void dhd_wait_event_wakeup(dhd_pub_t *dhd) +{ +#if defined(BCMSDIO) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up(&dhdinfo->ctrl_wait); +#endif + return; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS) +int +dhd_net_bus_devreset(struct net_device *dev, uint8 flag) +{ + int ret; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0) + return BCME_ERROR; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (flag == TRUE) { +#ifndef WL_CFG80211 + /* Issue wl down command for non-cfg before resetting the chip */ + if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); + } +#endif /* !WL_CFG80211 */ +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) { + dhd_wlfc_deinit(&dhd->pub); + } +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } +#endif +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ + + DHD_SSSR_DUMP_DEINIT(&dhd->pub); +#ifdef DHD_SDTC_ETB_DUMP + if (dhd->pub.sdtc_etb_inited) { + dhd_sdtc_etb_deinit(&dhd->pub); + } +#endif /* DHD_SDTC_ETB_DUMP */ +/* + * XXX Detach only if the module is not attached by default at dhd_attach. + * If attached by default, we need to keep it till dhd_detach, so that + * module is not detached at wifi on/off + */ +#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT) + dhd_os_dbg_detach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON */ + } + +#ifdef BCMSDIO + /* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name. + * This is indeed a hack but we have to make it work properly before we have a better + * solution + */ + if (!flag) { + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); + } +#endif /* BCMSDIO */ +#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE) +#if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420) + /* XXX: JIRA SWWLAN-139454: Added L1ss enable + * after firmware download completion due to link down issue + * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point + */ + DHD_ERROR(("%s Disable L1ss EP side\n", __FUNCTION__)); + if (flag == FALSE && dhd->pub.busstate == DHD_BUS_DOWN) { +#if defined(CONFIG_SOC_GS101) + exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1); +#else + exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI); +#endif /* CONFIG_SOC_GS101 */ + } +#endif /* !CONFIG_SOC_EXYNOS8890 && !defined(SUPPORT_EXYNOS7420) */ +#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */ + + ret = dhd_bus_devreset(&dhd->pub, flag); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (flag) { + /* Clear some flags for recovery logic */ + dhd->pub.dongle_trap_occured = 0; +#ifdef BT_OVER_PCIE + dhd->pub.dongle_trap_due_to_bt = 0; +#endif /* BT_OVER_PCIE */ + dhd->pub.iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhd->pub.d3ack_timeout_occured = 0; + dhd->pub.livelock_occured = 0; + dhd->pub.pktid_audit_failed = 0; +#endif /* PCIE_FULL_DONGLE */ + dhd->pub.smmu_fault_occurred = 0; + dhd->pub.iface_op_failed = 0; + dhd->pub.scan_timeout_occurred = 0; + dhd->pub.scan_busy_occurred = 0; + } + + if (ret) { + DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + } + + return ret; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) +int +dhd_net_bus_suspend(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_suspend(&dhd->pub); +} + +int +dhd_net_bus_resume(struct net_device *dev, uint8 stage) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_resume(&dhd->pub, stage); +} + +#endif /* BCMSDIO || BCMPCIE */ +#endif /* BCMSDIO || BCMPCIE || BCMDBUS */ + +int net_os_set_suspend_disable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; + } + return ret; +} + +int net_os_set_suspend(struct net_device *dev, int val, int force) +{ + int ret = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd && (dhd->pub.conf->suspend_mode == EARLY_SUSPEND || + dhd->pub.conf->suspend_mode == SUSPEND_MODE_2)) { + if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND && !val) + dhd_conf_set_suspend_resume(&dhd->pub, val); +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + ret = dhd_set_suspend(val, &dhd->pub); +#else + ret = dhd_suspend_resume_helper(dhd, val, force); +#endif +#ifdef WL_CFG80211 + wl_cfg80211_update_power_mode(dev); +#endif + if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND && val) + dhd_conf_set_suspend_resume(&dhd->pub, val); + } + return ret; +} + +int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { + DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n", + __FUNCTION__, val)); + dhd->pub.suspend_bcn_li_dtim = val; + } + + return 0; +} + +int net_os_set_max_dtim_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { + DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n", + __FUNCTION__, (val ? "Enable" : "Disable"))); + if (val) { + dhd->pub.max_dtim_enable = TRUE; + } else { + dhd->pub.max_dtim_enable = FALSE; + } + } else { + return -1; + } + + return 0; +} + +#ifdef DISABLE_DTIM_IN_SUSPEND +int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { + DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n", + __FUNCTION__, (val ? "Enable" : "Disable"))); + if (val) { + dhd->pub.disable_dtim_in_suspend = TRUE; + } else { + dhd->pub.disable_dtim_in_suspend = FALSE; + } + } else { + return BCME_ERROR; + } + + return BCME_OK; +} +#endif /* DISABLE_DTIM_IN_SUSPEND */ + +#ifdef PKT_FILTER_SUPPORT +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) +{ + int ret = 0; + +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!dhd_master_mode) + add_remove = !add_remove; + DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) { + return 0; + } + +#ifdef BLOCK_IPV6_PACKET + /* customer want to use NO IPV6 packets only */ + if (num == DHD_MULTICAST6_FILTER_NUM) { + return 0; + } +#endif /* BLOCK_IPV6_PACKET */ + + if (num >= dhd->pub.pktfilter_count) { + return -EINVAL; + } + + ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num); +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + + return ret; +} + +/* XXX RB:4238 Change net_os_set_packet_filter() function name to net_os_enable_packet_filter() + * previous code do 'set' & 'enable' in one fucntion. + * but from now on, we are going to separate 'set' and 'enable' feature. + * - set : net_os_rxfilter_add_remove() -> dhd_set_packet_filter() -> dhd_pktfilter_offload_set() + * - enable : net_os_enable_packet_filter() -> dhd_enable_packet_filter() + * -> dhd_pktfilter_offload_enable() + */ +int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) + +{ + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhdp && dhdp->up) { + if (dhdp->in_suspend) { + if (!val || (val && !dhdp->suspend_disable_flag)) + dhd_enable_packet_filter(val, dhdp); + } + } + return ret; +} + +/* function to enable/disable packet for Network device */ +int net_os_enable_packet_filter(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); + return dhd_os_enable_packet_filter(&dhd->pub, val); +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret; + + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) + goto done; + +done: + return ret; +} + +int +dhd_dev_get_feature_set(struct net_device *dev) +{ + dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhd = (&ptr->pub); + int feature_set = 0; + + /* tdls capability or othters can be missed because of initialization */ + if (dhd_get_fw_capabilities(dhd) < 0) { + DHD_ERROR(("Capabilities rechecking fail\n")); + } + + if (FW_SUPPORTED(dhd, sta)) + feature_set |= WIFI_FEATURE_INFRA; + if (FW_SUPPORTED(dhd, dualband)) + feature_set |= WIFI_FEATURE_INFRA_5G; + if (FW_SUPPORTED(dhd, p2p)) + feature_set |= WIFI_FEATURE_P2P; + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + feature_set |= WIFI_FEATURE_SOFT_AP; + if (FW_SUPPORTED(dhd, tdls)) + feature_set |= WIFI_FEATURE_TDLS; + if (FW_SUPPORTED(dhd, vsdb)) + feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; + if (FW_SUPPORTED(dhd, nan)) { + feature_set |= WIFI_FEATURE_NAN; + /* NAN is essentail for d2d rtt */ + if (FW_SUPPORTED(dhd, rttd2d)) + feature_set |= WIFI_FEATURE_D2D_RTT; + } +#ifdef RTT_SUPPORT + if (dhd->rtt_supported) { + feature_set |= WIFI_FEATURE_D2D_RTT; + feature_set |= WIFI_FEATURE_D2AP_RTT; + } +#endif /* RTT_SUPPORT */ +#ifdef LINKSTAT_SUPPORT + feature_set |= WIFI_FEATURE_LINKSTAT; +#endif /* LINKSTAT_SUPPORT */ + +#ifdef CUSTOMER_HW_AMLOGIC + feature_set |= WIFI_FEATURE_SET_LATENCY_MODE; +#endif + +#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO) + if (dhd_is_pno_supported(dhd)) { + feature_set |= WIFI_FEATURE_PNO; +#ifdef BATCH_SCAN + /* Deprecated */ + feature_set |= WIFI_FEATURE_BATCH_SCAN; +#endif /* BATCH_SCAN */ +#ifdef GSCAN_SUPPORT + /* terence 20171115: remove to get GTS PASS + * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp + */ +// feature_set |= WIFI_FEATURE_GSCAN; +// feature_set |= WIFI_FEATURE_HAL_EPNO; +#endif /* GSCAN_SUPPORT */ + } +#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */ +#ifdef RSSI_MONITOR_SUPPORT + if (FW_SUPPORTED(dhd, rssi_mon)) { + feature_set |= WIFI_FEATURE_RSSI_MONITOR; + } +#endif /* RSSI_MONITOR_SUPPORT */ +#ifdef WL11U + feature_set |= WIFI_FEATURE_HOTSPOT; +#endif /* WL11U */ +#ifdef KEEP_ALIVE + feature_set |= WIFI_FEATURE_MKEEP_ALIVE; +#endif /* KEEP_ALIVE */ +#ifdef NDO_CONFIG_SUPPORT + feature_set |= WIFI_FEATURE_CONFIG_NDO; +#endif /* NDO_CONFIG_SUPPORT */ +#ifdef SUPPORT_RANDOM_MAC_SCAN + feature_set |= WIFI_FEATURE_SCAN_RAND; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +#ifdef FILTER_IE + if (FW_SUPPORTED(dhd, fie)) { + feature_set |= WIFI_FEATURE_FILTER_IE; + } +#endif /* FILTER_IE */ +#ifdef ROAMEXP_SUPPORT + feature_set |= WIFI_FEATURE_CONTROL_ROAMING; +#endif /* ROAMEXP_SUPPORT */ +#ifdef WL_LATENCY_MODE + feature_set |= WIFI_FEATURE_SET_LATENCY_MODE; +#endif /* WL_LATENCY_MODE */ +#ifdef WL_P2P_RAND + feature_set |= WIFI_FEATURE_P2P_RAND_MAC; +#endif /* WL_P2P_RAND */ +#ifdef WL_SAR_TX_POWER + feature_set |= WIFI_FEATURE_SET_TX_POWER_LIMIT; + feature_set |= WIFI_FEATURE_USE_BODY_HEAD_SAR; +#endif /* WL_SAR_TX_POWER */ +#ifdef WL_STATIC_IF + feature_set |= WIFI_FEATURE_AP_STA; +#endif /* WL_STATIC_IF */ + return feature_set; +} + +int +dhd_dev_get_feature_set_matrix(struct net_device *dev, int num) +{ + int feature_set_full; + int ret = 0; + + feature_set_full = dhd_dev_get_feature_set(dev); + + /* Common feature set for all interface */ + ret = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) | + (feature_set_full & WIFI_FEATURE_EPR); + + /* Specific feature group for each interface */ + switch (num) { + case 0: + ret |= (feature_set_full & WIFI_FEATURE_P2P) | + /* Not supported yet */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_HAL_EPNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA); + break; + + case 1: + ret |= (feature_set_full & WIFI_FEATURE_P2P); + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + break; + + case 2: + ret |= (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL); + break; + + default: + ret = WIFI_FEATURE_INVALID; + DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num)); + break; + } + + return ret; +} + +#ifdef CUSTOM_FORCE_NODFS_FLAG +int +dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (nodfs) + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + else + dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; + return 0; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef NDO_CONFIG_SUPPORT +int +dhd_dev_ndo_cfg(struct net_device *dev, u8 enable) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + if (enable) { + /* enable ND offload feature (will be enabled in FW on suspend) */ + dhdp->ndo_enable = TRUE; + + /* Update changes of anycast address & DAD failed address */ + ret = dhd_dev_ndo_update_inet6addr(dev); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret)); + return ret; + } + } else { + /* disable ND offload feature */ + dhdp->ndo_enable = FALSE; + + /* disable ND offload in FW */ + ret = dhd_ndo_enable(dhdp, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret)); + } + } + return ret; +} + +static int +dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6) +{ + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + int addr_count = 0; + + /* lock */ + read_lock_bh(&inet6->lock); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + /* Count valid unicast address */ + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + GCC_DIAGNOSTIC_POP(); + if ((ifa->flags & IFA_F_DADFAILED) == 0) { + addr_count++; + } + } + + /* Count anycast address */ + acaddr = inet6->ac_list; + while (acaddr) { + addr_count++; + acaddr = acaddr->aca_next; + } + + /* unlock */ + read_unlock_bh(&inet6->lock); + + return addr_count; +} + +int +dhd_dev_ndo_update_inet6addr(struct net_device *dev) +{ + dhd_info_t *dhd; + dhd_pub_t *dhdp; + struct inet6_dev *inet6; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + struct in6_addr *ipv6_addr = NULL; + int cnt, i; + int ret = BCME_OK; + + /* + * this function evaulates host ip address in struct inet6_dev + * unicast addr in inet6_dev->addr_list + * anycast addr in inet6_dev->ac_list + * while evaluating inet6_dev, read_lock_bh() is required to prevent + * access on null(freed) pointer. + */ + + if (dev) { + inet6 = dev->ip6_ptr; + if (!inet6) { + DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd = DHD_DEV_INFO(dev); + if (!dhd) { + DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__)); + return BCME_ERROR; + } + dhdp = &dhd->pub; + + if (dhd_net2idx(dhd, dev) != 0) { + DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__)); + return BCME_ERROR; + } + } else { + DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Check host IP overflow */ + cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6); + if (cnt > dhdp->ndo_max_host_ip) { + if (!dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = TRUE; + /* Disable ND offload in FW */ + DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, FALSE); + } + + return ret; + } + + /* + * Allocate ipv6 addr buffer to store addresses to be added/removed. + * driver need to lock inet6_dev while accessing structure. but, driver + * cannot use ioctl while inet6_dev locked since it requires scheduling + * hence, copy addresses to the buffer and do ioctl after unlock. + */ + ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh, + sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + if (!ipv6_addr) { + DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Find DAD failed unicast address to be removed */ + cnt = 0; + read_lock_bh(&inet6->lock); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + GCC_DIAGNOSTIC_POP(); + /* DAD failed unicast address */ + if ((ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); + + /* Remove DAD failed unicast address */ + for (i = 0; i < cnt; i++) { + DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0); + if (ret < 0) { + goto done; + } + } + + /* Remove all anycast address */ + ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } + + /* + * if ND offload was disabled due to host ip overflow, + * attempt to add valid unicast address. + */ + if (dhdp->ndo_host_ip_overflow) { + /* Find valid unicast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + GCC_DIAGNOSTIC_POP(); + /* valid unicast address */ + if (!(ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, + sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); + + /* Add valid unicast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0); + if (ret < 0) { + goto done; + } + } + } + + /* Find anycast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + acaddr = inet6->ac_list; + while (acaddr) { + if (cnt < dhdp->ndo_max_host_ip) { + memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr)); + cnt++; + } + acaddr = acaddr->aca_next; + } + read_unlock_bh(&inet6->lock); + + /* Add anycast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } + } + + /* Now All host IP addr were added successfully */ + if (dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = FALSE; + if (dhdp->in_suspend) { + /* drvier is in (early) suspend state, need to enable ND offload in FW */ + DHD_INFO(("%s: enable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, TRUE); + } + } + +done: + if (ipv6_addr) { + MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + } + + return ret; +} + +#endif /* NDO_CONFIG_SUPPORT */ + +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_stop_for_ssid */ +int +dhd_dev_pno_stop_for_ssid(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_stop_for_ssid(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_set_for_ssid */ +int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr, + pno_repeat, pno_freq_expo_max, channel_list, nchan)); +} + +/* Linux wrapper to call common dhd_pno_enable */ +int +dhd_dev_pno_enable(struct net_device *dev, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_enable(&dhd->pub, enable)); +} + +/* Linux wrapper to call common dhd_pno_set_for_hotlist */ +int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params)); +} +/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */ +int +dhd_dev_pno_stop_for_batch(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_stop_for_batch(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ +int +dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); +} + +/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); +} +#endif /* PNO_SUPPORT */ + +#if defined(OEM_ANDROID) && defined(PNO_SUPPORT) +#ifdef GSCAN_SUPPORT +bool +dhd_dev_is_legacy_pno_enabled(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_is_legacy_pno_enabled(&dhd->pub)); +} + +int +dhd_dev_set_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_set_epno(&dhd->pub); +} +int +dhd_dev_flush_fw_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_flush_fw_epno(&dhd->pub); +} + +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); +} + +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); +} + +/* Linux wrapper to call common dhd_wait_batch_results_complete */ +int +dhd_dev_wait_batch_results_complete(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_wait_batch_results_complete(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_lock_batch_results */ +int +dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_lock_batch_results(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_unlock_batch_results */ +void +dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_unlock_batch_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ +int +dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); +} + +/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ +int +dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); +} + +/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ +void * +dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len)); +} + +/* Linux wrapper to call common dhd_process_full_gscan_result */ +void * +dhd_dev_process_full_gscan_result(struct net_device *dev, +const void *data, uint32 len, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes)); +} + +void +dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); + + return; +} + +int +dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_retreive_batch_scan_results */ +int +dhd_dev_retrieve_batch_scan(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_retreive_batch_scan_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_process_epno_result */ +void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes)); +} + +int +dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + int err; + + if (!roam_param) { + return BCME_BADARG; + } + + DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n", + roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold)); + DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n", + roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor, + roam_param->cur_bssid_boost)); + DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n", + roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost)); + + memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT; + if (dhd->pub.lazy_roam_enable) { + roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG; + } + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + + memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + if (enable) { + roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG; + } + + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } else { + dhd->pub.lazy_roam_enable = (enable != 0); + } + return err; +} + +int +dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush) +{ + int err; + uint len; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + bssid_pref->version = BSSID_PREF_LIST_VERSION; + /* By default programming bssid pref flushes out old values */ + bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0; + len = sizeof(wl_bssid_pref_cfg_t); + if (bssid_pref->count) { + len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t); + } + err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref", + (char *)bssid_pref, len, NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* GSCAN_SUPPORT */ + +#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT) +int +dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int macmode; + + if (blacklist) { + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist, + len, TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err)); + return err; + } + } + /* By default programming blacklist flushes out old values */ + macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY; + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode, + sizeof(macmode), TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist, + uint32 len, uint32 flush) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_ssid_whitelist_t whitelist_ssid_flush; + + if (!ssid_whitelist) { + if (flush) { + ssid_whitelist = &whitelist_ssid_flush; + ssid_whitelist->ssid_count = 0; + } else { + DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__)); + return BCME_BADARG; + } + } + ssid_whitelist->version = SSID_WHITELIST_VERSION; + ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0; + err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL, + 0, TRUE); + if (err != BCME_OK) { + if (err == BCME_UNSUPPORTED) { + DHD_ERROR(("%s : roam_exp_bssid_pref, UNSUPPORTED \n", __FUNCTION__)); + } else { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", + __FUNCTION__, err)); + } + } + return err; +} +#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */ +#endif /* defined(OEM_ANDROID) && defined(PNO_SUPPORT) */ + +#ifdef RSSI_MONITOR_SUPPORT +int +dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi) +{ + int err; + wl_rssi_monitor_cfg_t rssi_monitor; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + rssi_monitor.version = RSSI_MONITOR_VERSION; + rssi_monitor.max_rssi = max_rssi; + rssi_monitor.min_rssi = min_rssi; + rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP; + err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor), + NULL, 0, TRUE); + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* RSSI_MONITOR_SUPPORT */ + +#ifdef DHDTCPACK_SUPPRESS +int +dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + err = dhd_tcpack_suppress_set(&dhd->pub, enable); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* DHDTCPACK_SUPPRESS */ + +int +dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (!dhdp || !oui) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return BCME_ERROR; + } + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + return BCME_ERROR; + } else { + uint8 *rand_mac_oui = dhdp->rand_mac_oui; + memcpy(rand_mac_oui, oui, DOT11_OUI_LEN); + DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n", + MACOUI2STRDBG(rand_mac_oui))); + } + return BCME_OK; +} + +int +dhd_set_rand_mac_oui(dhd_pub_t *dhd) +{ + int err; + wl_pfn_macaddr_cfg_t wl_cfg; + uint8 *rand_mac_oui = dhd->rand_mac_oui; + + memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN); + memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN); + wl_cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) { + wl_cfg.flags = 0; + } else { + wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + } + + DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n", + MACOUI2STRDBG(rand_mac_oui))); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err)); + } + return err; +} + +#if defined(RTT_SUPPORT) && defined(WL_CFG80211) +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_set_cfg(&dhd->pub, buf)); +} + +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); +} + +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); +} + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); +} + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_capability(&dhd->pub, capa)); +} + +int +dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_avail_channel(&dhd->pub, channel_info)); +} + +int +dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_enable_responder(&dhd->pub, channel_info)); +} + +int dhd_dev_rtt_cancel_responder(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_cancel_responder(&dhd->pub)); +} + +#endif /* RTT_SUPPORT */ +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static void _dhd_apf_lock_local(dhd_info_t *dhd) +{ + if (dhd) { + mutex_lock(&dhd->dhd_apf_mutex); + } +} + +static void _dhd_apf_unlock_local(dhd_info_t *dhd) +{ + if (dhd) { + mutex_unlock(&dhd->dhd_apf_mutex); + } +} + +static int +__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_t * pkt_filterp; + wl_apf_program_t *apf_program; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + char cmd[] = "pkt_filter_add"; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + cmd_len = sizeof(cmd); + + /* Check if the program_len is more than the expected len + * and if the program is NULL return from here. + */ + if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) { + DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n", + __FUNCTION__, program_len, program)); + return -EINVAL; + } + buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN + + WL_APF_PROGRAM_FIXED_LEN + program_len; + + buf = MALLOCZ(dhdp->osh, buf_len); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } + + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->negate_match = htod32(FALSE); + pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH); + + apf_program = &pkt_filterp->u.apf_program; + apf_program->version = htod16(WL_APF_INTERNAL_VERSION); + apf_program->instr_len = htod16(program_len); + memcpy(apf_program->instrs, program, program_len); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + if (buf) { + MFREE(dhdp->osh, buf, buf_len); + } + return ret; +} + +static int +__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_enable_t * pkt_filterp; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + char cmd[] = "pkt_filter_enable"; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + cmd_len = sizeof(cmd); + buf_len = cmd_len + sizeof(*pkt_filterp); + + buf = MALLOCZ(dhdp->osh, buf_len); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } + + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->enable = htod32(enable); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + goto exit; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode, + WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + +exit: + if (buf) { + MFREE(dhdp->osh, buf, buf_len); + } + return ret; +} + +static int +__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete", + htod32(filter_id), WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + return ret; +} + +void dhd_apf_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_lock_local(dhd); +} + +void dhd_apf_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_unlock_local(dhd); +} + +int +dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + + /* + * Notify Android framework that APF is not supported by setting + * version as zero. + */ + *version = 0; + return BCME_OK; + } + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF version, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + *max_len = 0; + return BCME_OK; + } + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret; + + DHD_APF_LOCK(ndev); + + /* delete, if filter already exists */ + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (unlikely(ret)) { + goto exit; + } + dhdp->apf_set = FALSE; + } + + ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len); + if (ret) { + goto exit; + } + dhdp->apf_set = TRUE; + + if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + /* Driver is still in (early) suspend state, enable APF filter back */ + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } +exit: + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_enable_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + bool nan_dp_active = false; + + DHD_APF_LOCK(ndev); +#ifdef WL_NAN + nan_dp_active = wl_cfgnan_is_dp_active(ndev); +#endif /* WL_NAN */ + if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) && + !nan_dp_active)) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_disable_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE); + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_delete_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (!ret) { + dhdp->apf_set = FALSE; + } + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} +#endif /* PKT_FILTER_SUPPORT && APF */ + +#if defined(OEM_ANDROID) +static void dhd_hang_process(struct work_struct *work_data) +{ + struct net_device *dev; +#ifdef IFACE_HANG_FORCE_DEV_CLOSE + struct net_device *ndev; + uint8 i = 0; +#endif /* IFACE_HANG_FORCE_DEV_CLOSE */ + struct dhd_info *dhd; + /* Ignore compiler warnings due to -Werror=cast-qual */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(work_data, dhd_info_t, dhd_hang_process_work); + GCC_DIAGNOSTIC_POP(); + + if (!dhd || !dhd->iflist[0]) + return; + dev = dhd->iflist[0]->net; + + if (dev) { +#if defined(WL_WIRELESS_EXT) + wl_iw_send_priv_event(dev, "HANG"); +#endif +#if defined(WL_CFG80211) + wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif + } +#ifdef IFACE_HANG_FORCE_DEV_CLOSE + /* + * For HW2, dev_close need to be done to recover + * from upper layer after hang. For Interposer skip + * dev_close so that dhd iovars can be used to take + * socramdump after crash, also skip for HW4 as + * handling of hang event is different + */ + + rtnl_lock(); + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL; + if (ndev && (ndev->flags & IFF_UP)) { + DHD_ERROR(("ndev->name : %s dev close\n", + ndev->name)); +#ifdef ENABLE_INSMOD_NO_FW_LOAD + dhd_download_fw_on_driverload = FALSE; +#endif + dev_close(ndev); + } + } + rtnl_unlock(); +#endif /* IFACE_HANG_FORCE_DEV_CLOSE */ +} + +#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE) +extern dhd_pub_t *link_recovery; +void dhd_host_recover_link(void) +{ + DHD_ERROR(("****** %s ******\n", __FUNCTION__)); + link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT; + dhd_bus_set_linkdown(link_recovery, TRUE); + dhd_os_send_hang_message(link_recovery); +} +EXPORT_SYMBOL(dhd_host_recover_link); +#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */ + +#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG +#define MAX_CONSECUTIVE_MFG_HANG_COUNT 2 +#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */ +int dhd_os_send_hang_message(dhd_pub_t *dhdp) +{ + int ret = 0; + dhd_info_t *dhd_info = NULL; +#ifdef WL_CFG80211 + struct net_device *primary_ndev; + struct bcm_cfg80211 *cfg; +#endif /* WL_CFG80211 */ + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_info = (dhd_info_t *)dhdp->info; + BCM_REFERENCE(dhd_info); + +#if defined(WLAN_ACCEL_BOOT) + if (!dhd_info->wl_accel_force_reg_on) { + DHD_ERROR(("%s: set force reg on\n", __FUNCTION__)); + dhd_info->wl_accel_force_reg_on = TRUE; + } +#endif /* WLAN_ACCEL_BOOT */ + + if (!dhdp->hang_report) { + DHD_ERROR(("%s: hang_report is disabled\n", __FUNCTION__)); + return BCME_ERROR; + } + +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) + if (dhd_info->scheduled_memdump) { + DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__)); + dhdp->hang_was_pending = 1; + return BCME_OK; + } +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */ + +#ifdef WL_CFG80211 + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + if (!primary_ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__)); + return -ENODEV; + } + cfg = wl_get_cfg(primary_ndev); + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return -EINVAL; + } + + /* Skip sending HANG event to framework if driver is not ready */ + if (!wl_get_drv_status(cfg, READY, primary_ndev)) { + DHD_ERROR(("%s: device is not ready\n", __FUNCTION__)); + return -ENODEV; + } +#endif /* WL_CFG80211 */ + +#if defined(DHD_HANG_SEND_UP_TEST) + if (dhdp->req_hang_type) { + DHD_ERROR(("%s, Clear HANG test request 0x%x\n", + __FUNCTION__, dhdp->req_hang_type)); + dhdp->req_hang_type = 0; + } +#endif /* DHD_HANG_SEND_UP_TEST */ + + if (!dhdp->hang_was_sent) { +#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG + if (dhdp->op_mode & DHD_FLAG_MFG_MODE) { + dhdp->hang_count++; + if (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT) { + DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n", + __FUNCTION__, dhdp->hang_count)); + BUG_ON(1); + } + } +#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */ +#ifdef DHD_DEBUG_UART + /* If PCIe lane has broken, execute the debug uart application + * to gether a ramdump data from dongle via uart + */ + if (!dhdp->info->duart_execute) { + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP, + dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH); + } +#endif /* DHD_DEBUG_UART */ + dhdp->hang_was_sent = 1; +#ifdef BT_OVER_SDIO + dhdp->is_bt_recovery_required = TRUE; +#endif + schedule_work(&dhdp->info->dhd_hang_process_work); + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate)); + printf("%s\n", info_string); + printf("MAC %pM\n", &dhdp->mac); + } + return ret; +} + +int net_os_send_hang_message(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + /* Report FW problem when enabled */ + if (dhd->pub.hang_report) { +#ifdef BT_OVER_SDIO + if (netif_running(dev)) { +#endif /* BT_OVER_SDIO */ + ret = dhd_os_send_hang_message(&dhd->pub); +#ifdef BT_OVER_SDIO + } + DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev)); +#endif /* BT_OVER_SDIO */ + } else { + DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", + __FUNCTION__)); + } + } + return ret; +} + +int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + int reason; + + dhd = DHD_DEV_INFO(dev); + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return 0; + } + + reason = bcm_strtoul(string_num, NULL, 0); + DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); + + if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + reason = 0; + } + + dhdp->hang_reason = reason; + + return net_os_send_hang_message(dev); +} +#endif /* OEM_ANDROID */ + +int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return wifi_platform_set_power(dhd->adapter, on, delay_msec); +} + +int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long delay_msec = on ? WIFI_TURNON_DELAY : WIFI_TURNOFF_DELAY; + return wifi_platform_set_power(dhd->adapter, on, delay_msec); +} + +bool dhd_force_country_change(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd && dhd->pub.up) + return dhd->pub.force_country_change; + return FALSE; +} + +void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->pub.is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->adapter, country_iso_code, cspec, + dhd->pub.dhd_cflags); +#else + get_customized_country_code(dhd->adapter, country_iso_code, cspec); +#endif /* CUSTOM_COUNTRY_CODE */ + } +#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE) + else { + /* Replace the ccode to XZ if ccode is undefined country */ + if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) { + strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ); + strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ); + strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ); + DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code)); + } + } +#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */ + +#ifdef KEEP_JP_REGREV +/* XXX Needed by customer's request */ + if (strncmp(country_iso_code, "JP", 3) == 0) { +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->pub.is_blob) { + if (strncmp(dhd->pub.vars_ccode, "J1", 3) == 0) { + memcpy(cspec->ccode, dhd->pub.vars_ccode, + sizeof(dhd->pub.vars_ccode)); + } + } else +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { + if (strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) { + cspec->rev = dhd->pub.vars_regrev; + } + } + } +#endif /* KEEP_JP_REGREV */ + BCM_REFERENCE(dhd); +} + +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif + + if (dhd && dhd->pub.up) { + memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); +#ifdef WL_CFG80211 + wl_update_wiphybands(cfg, notify); +#endif + } +} + +void dhd_bus_band_set(struct net_device *dev, uint band) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif + if (dhd && dhd->pub.up) { +#ifdef WL_CFG80211 + wl_update_wiphybands(cfg, true); +#endif + } +} + +int dhd_net_set_fw_path(struct net_device *dev, char *fw) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!fw || fw[0] == '\0') + return -EINVAL; + + strlcpy(dhd->fw_path, fw, sizeof(dhd->fw_path)); + +#if defined(OEM_ANDROID) && defined(SOFTAP) + if (strstr(fw, "apsta") != NULL) { + DHD_INFO(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + DHD_INFO(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } +#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */ + return 0; +} + +void dhd_net_if_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_lock_local(dhd); +} + +void dhd_net_if_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_unlock_local(dhd); +} + +static void dhd_net_if_lock_local(dhd_info_t *dhd) +{ +#if defined(OEM_ANDROID) + if (dhd) + mutex_lock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_net_if_unlock_local(dhd_info_t *dhd) +{ +#if defined(OEM_ANDROID) + if (dhd) + mutex_unlock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_suspend_lock(dhd_pub_t *pub) +{ +#if defined(OEM_ANDROID) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_lock(&dhd->dhd_suspend_mutex); +#endif +} + +static void dhd_suspend_unlock(dhd_pub_t *pub) +{ +#if defined(OEM_ANDROID) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_unlock(&dhd->dhd_suspend_mutex); +#endif +} + +unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags = 0; + + if (dhd) { + flags = osl_spin_lock(&dhd->dhd_lock); + } + + return flags; +} + +void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + osl_spin_unlock(&dhd->dhd_lock, flags); + } +} + +void * +dhd_os_dbgring_lock_init(osl_t *osh) +{ + struct mutex *mtx = NULL; + + mtx = MALLOCZ(osh, sizeof(*mtx)); + if (mtx) + mutex_init(mtx); + + return mtx; +} + +void +dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx) +{ + if (mtx) { + mutex_destroy(mtx); + MFREE(osh, mtx, sizeof(struct mutex)); + } +} + +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); +} + +#define MAX_WAIT_FOR_8021X_TX 100 + +int +dhd_wait_pend8021x(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int timeout = msecs_to_jiffies(10); + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); + + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(timeout); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); + } + if (ntimes == 0) + { + atomic_set(&dhd->pend_8021x_cnt, 0); + WL_MSG(dev->name, "TIMEOUT\n"); + } + return pend; +} + +#if defined(BCM_ROUTER_DHD) || defined(DHD_DEBUG) +int write_file(const char * file_name, uint32 flags, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t old_fs; +#endif + loff_t pos = 0; + + /* change to KERNEL_DS address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + /* open file to write */ + fp = filp_open(file_name, flags, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp))); + goto exit; + } + + /* Write buf to file */ + ret = vfs_write(fp, buf, size, &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + + /* Sync file from filesystem to physical media */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + ret = vfs_fsync(fp, 0); +#else + ret = vfs_fsync(fp, fp->f_path.dentry, 0); +#endif + if (ret < 0) { + DHD_ERROR(("sync file error, error = %d\n", ret)); + goto exit; + } + ret = BCME_OK; + +exit: + /* close file before return */ + if (!IS_ERR(fp)) + filp_close(fp, current->files); + + /* restore previous address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(old_fs); +#endif + + return ret; +} +#endif /* BCM_ROUTER_DHD || DHD_DEBUG */ + +#ifdef DHD_DEBUG +static void +dhd_convert_memdump_type_to_str(uint32 type, char *buf, size_t buf_len, int substr_type) +{ + char *type_str = NULL; + + switch (type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + type_str = "resumed_on_timeout"; + break; + case DUMP_TYPE_D3_ACK_TIMEOUT: + type_str = "D3_ACK_timeout"; + break; + case DUMP_TYPE_DONGLE_TRAP: + type_str = "Dongle_Trap"; + break; + case DUMP_TYPE_MEMORY_CORRUPTION: + type_str = "Memory_Corruption"; + break; + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + type_str = "PKTID_AUDIT_Fail"; + break; + case DUMP_TYPE_PKTID_INVALID: + type_str = "PKTID_INVALID"; + break; + case DUMP_TYPE_SCAN_TIMEOUT: + type_str = "SCAN_timeout"; + break; + case DUMP_TYPE_SCAN_BUSY: + type_str = "SCAN_Busy"; + break; + case DUMP_TYPE_BY_SYSDUMP: + if (substr_type == CMD_UNWANTED) { + type_str = "BY_SYSDUMP_FORUSER_unwanted"; + } else if (substr_type == CMD_DISCONNECTED) { + type_str = "BY_SYSDUMP_FORUSER_disconnected"; + } else { + type_str = "BY_SYSDUMP_FORUSER"; + } + break; + case DUMP_TYPE_BY_LIVELOCK: + type_str = "BY_LIVELOCK"; + break; + case DUMP_TYPE_AP_LINKUP_FAILURE: + type_str = "BY_AP_LINK_FAILURE"; + break; + case DUMP_TYPE_AP_ABNORMAL_ACCESS: + type_str = "INVALID_ACCESS"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX: + type_str = "ERROR_RX_TIMED_OUT"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX: + type_str = "ERROR_TX_TIMED_OUT"; + break; + case DUMP_TYPE_CFG_VENDOR_TRIGGERED: + type_str = "CFG_VENDOR_TRIGGERED"; + break; + case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR: + type_str = "BY_INVALID_RING_RDWR"; + break; + case DUMP_TYPE_IFACE_OP_FAILURE: + type_str = "BY_IFACE_OP_FAILURE"; + break; + case DUMP_TYPE_TRANS_ID_MISMATCH: + type_str = "BY_TRANS_ID_MISMATCH"; + break; +#ifdef DEBUG_DNGL_INIT_FAIL + case DUMP_TYPE_DONGLE_INIT_FAILURE: + type_str = "DONGLE_INIT_FAIL"; + break; +#endif /* DEBUG_DNGL_INIT_FAIL */ +#ifdef SUPPORT_LINKDOWN_RECOVERY + case DUMP_TYPE_READ_SHM_FAIL: + type_str = "READ_SHM_FAIL"; + break; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + case DUMP_TYPE_DONGLE_HOST_EVENT: + type_str = "BY_DONGLE_HOST_EVENT"; + break; + case DUMP_TYPE_SMMU_FAULT: + type_str = "SMMU_FAULT"; + break; +#ifdef DHD_ERPOM + case DUMP_TYPE_DUE_TO_BT: + type_str = "DUE_TO_BT"; + break; +#endif /* DHD_ERPOM */ + case DUMP_TYPE_BY_USER: + type_str = "BY_USER"; + break; + case DUMP_TYPE_LOGSET_BEYOND_RANGE: + type_str = "LOGSET_BEYOND_RANGE"; + break; + case DUMP_TYPE_CTO_RECOVERY: + type_str = "CTO_RECOVERY"; + break; + case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR: + type_str = "SEQUENTIAL_PRIVCMD_ERROR"; + break; + case DUMP_TYPE_PROXD_TIMEOUT: + type_str = "PROXD_TIMEOUT"; + break; + case DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE: + type_str = "INBAND_DEVICE_WAKE_FAILURE"; + break; + case DUMP_TYPE_PKTID_POOL_DEPLETED: + type_str = "PKTID_POOL_DEPLETED"; + break; + case DUMP_TYPE_ESCAN_SYNCID_MISMATCH: + type_str = "ESCAN_SYNCID_MISMATCH"; + break; + case DUMP_TYPE_INVALID_SHINFO_NRFRAGS: + type_str = "INVALID_SHINFO_NRFRAGS"; + break; + default: + type_str = "Unknown_type"; + break; + } + + strlcpy(buf, type_str, buf_len); +} + +void +dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname) +{ + char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN]; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + + /* Init file name */ + memset(memdump_path, 0, len); + memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN); + dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN, + dhdp->debug_dump_subcmd); + clear_debug_dump_time(dhdp->debug_dump_time_str); + get_debug_dump_time(dhdp->debug_dump_time_str); + snprintf(memdump_path, len, "%s%s_%s_" "%s", + DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str); + + if (strstr(fname, "sssr_dump")) { + DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path); + } else { + DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__, + memdump_path, FILE_NAME_HAL_TAG)); + } +} + +int +write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname) +{ + int ret = 0; + char memdump_path[DHD_MEMDUMP_PATH_STR_LEN]; + char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN]; + uint32 file_mode; + + /* Init file name */ + memset(memdump_path, 0, DHD_MEMDUMP_PATH_STR_LEN); + memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN); + dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN, + dhd->debug_dump_subcmd); + clear_debug_dump_time(dhd->debug_dump_time_str); + get_debug_dump_time(dhd->debug_dump_time_str); + + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s", + DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str); +#ifdef CUSTOMER_HW4_DEBUG + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY) + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__) + file_mode = O_CREAT | O_WRONLY; +#elif defined(OEM_ANDROID) + /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are + * calling BUG_ON immediately after collecting the socram dump. + * So the file write operation should directly write the contents into the + * file instead of caching it. O_TRUNC flag ensures that file will be re-written + * instead of appending. + */ + file_mode = O_CREAT | O_WRONLY | O_SYNC; + { + struct file *fp = filp_open(memdump_path, file_mode, 0664); + /* Check if it is live Brix image having /installmedia, else use /data */ + if (IS_ERR(fp)) { + DHD_ERROR(("open file %s, try /data/\n", memdump_path)); + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s", + "/data/", fname, memdump_type, dhd->debug_dump_time_str); + } else { + filp_close(fp, NULL); + } + } +#else + file_mode = O_CREAT | O_WRONLY; +#endif /* CUSTOMER_HW4_DEBUG */ + + /* print SOCRAM dump file path */ + DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path)); + +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size); +#endif /* DHD_LOG_DUMP */ + + /* Write file */ + ret = write_file(memdump_path, file_mode, buf, size); + +#ifdef DHD_DUMP_MNGR + if (ret == BCME_OK) { + dhd_dump_file_manage_enqueue(dhd, memdump_path, fname); + } +#endif /* DHD_DUMP_MNGR */ + + return ret; +} +#endif /* DHD_DEBUG */ + +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? + dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wakelock_rx_timeout_enable) + dhd_wake_lock_timeout(&dhd->wl_rxwake, + msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); + if (dhd->wakelock_ctrl_timeout_enable) + dhd_wake_lock_timeout(&dhd->wl_ctrlwake, + msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); +#endif + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int net_os_wake_lock_timeout(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_rx_timeout_enable) + dhd->wakelock_rx_timeout_enable = val; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_ctrl_timeout_enable) + dhd->wakelock_ctrl_timeout_enable = val; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd_wake_lock_active(&dhd->wl_ctrlwake)) + dhd_wake_unlock(&dhd->wl_ctrlwake); +#endif + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + return ret; +} + +int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); + return ret; +} + +#if defined(DHD_TRACE_WAKE_LOCK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#include +#else +#include +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +/* Define 2^5 = 32 bucket size hash table */ +DEFINE_HASHTABLE(wklock_history, 5); +#else +/* Define 2^5 = 32 bucket size hash table */ +struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +atomic_t trace_wklock_onoff; +typedef enum dhd_wklock_type { + DHD_WAKE_LOCK, + DHD_WAKE_UNLOCK, + DHD_WAIVE_LOCK, + DHD_RESTORE_LOCK +} dhd_wklock_t; + +struct wk_trace_record { + unsigned long addr; /* Address of the instruction */ + dhd_wklock_t lock_type; /* lock_type */ + unsigned long long counter; /* counter information */ + struct hlist_node wklock_node; /* hash node */ +}; + +static struct wk_trace_record *find_wklock_entry(unsigned long addr) +{ + struct wk_trace_record *wklock_info; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) +#else + struct hlist_node *entry; + int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + GCC_DIAGNOSTIC_POP(); + if (wklock_info->addr == addr) { + return wklock_info; + } + } + return NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#define HASH_ADD(hashtable, node, key) \ + do { \ + hash_add(hashtable, node, key); \ + } while (0); +#else +#define HASH_ADD(hashtable, node, key) \ + do { \ + int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ + hlist_add_head(node, &hashtable[index]); \ + } while (0); +#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ + +#define STORE_WKLOCK_RECORD(wklock_type) \ + do { \ + struct wk_trace_record *wklock_info = NULL; \ + unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ + wklock_info = find_wklock_entry(func_addr); \ + if (wklock_info) { \ + if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + } else { \ + wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ + if (!wklock_info) {\ + printk("Can't allocate wk_trace_record \n"); \ + } else { \ + wklock_info->addr = func_addr; \ + wklock_info->lock_type = wklock_type; \ + if (wklock_type == DHD_WAIVE_LOCK || \ + wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ + } \ + } \ + } while (0); + +static inline void dhd_wk_lock_rec_dump(void) +{ + int bkt; + struct wk_trace_record *wklock_info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + hash_for_each(wklock_history, bkt, wklock_info, wklock_node) +#else + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + GCC_DIAGNOSTIC_POP(); + switch (wklock_info->lock_type) { + case DHD_WAKE_LOCK: + DHD_ERROR(("wakelock lock : %pS lock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAKE_UNLOCK: + DHD_ERROR(("wakelock unlock : %pS," + " unlock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAIVE_LOCK: + DHD_ERROR(("wakelock waive : %pS before_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_RESTORE_LOCK: + DHD_ERROR(("wakelock restore : %pS, after_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + } + } +} + +static void dhd_wk_lock_trace_init(struct dhd_info *dhd) +{ + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + int i; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_init(wklock_history); +#else + for (i = 0; i < ARRAY_SIZE(wklock_history); i++) + INIT_HLIST_HEAD(&wklock_history[i]); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + atomic_set(&trace_wklock_onoff, 1); +} + +static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) +{ + int bkt; + struct wk_trace_record *wklock_info; + struct hlist_node *tmp; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) +#else + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry_safe(wklock_info, entry, tmp, + &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + { + GCC_DIAGNOSTIC_POP(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_del(&wklock_info->wklock_node); +#else + hlist_del_init(&wklock_info->wklock_node); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + kfree(wklock_info); + } + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); +} + +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + unsigned long flags; + + DHD_ERROR(("DHD Printing wl_wake Lock/Unlock Record \r\n")); + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + dhd_wk_lock_rec_dump(); + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + +} +#else +#define STORE_WKLOCK_RECORD(wklock_type) +#endif /* ! DHD_TRACE_WAKE_LOCK */ + +int dhd_os_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd->wakelock_counter++; + ret = dhd->wakelock_counter; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + + return ret; +} + +void dhd_event_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_lock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } +} + +void +dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + dhd_wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ +} + +void +dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + dhd_wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ +} + +void +dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + dhd_wake_lock_timeout(&dhd->wl_nanwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ +} + +int net_os_wake_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; +} + +int dhd_os_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + dhd_os_wake_lock_timeout(pub); + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + + if (dhd->wakelock_counter > 0) { + dhd->wakelock_counter--; +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_relax(pub); +#endif + } + ret = dhd->wakelock_counter; + } + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +void dhd_event_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_unlock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_relax(pub); +#endif + } +} + +void dhd_pm_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_pmwake is active, unlock it */ + if (dhd_wake_lock_active(&dhd->wl_pmwake)) { + dhd_wake_unlock(&dhd->wl_pmwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void dhd_txfl_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_txflwake is active, unlock it */ + if (dhd_wake_lock_active(&dhd->wl_txflwake)) { + dhd_wake_unlock(&dhd->wl_txflwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void dhd_nan_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_nanwake is active, unlock it */ + if (dhd_wake_lock_active(&dhd->wl_nanwake)) { + dhd_wake_unlock(&dhd->wl_nanwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +int dhd_os_check_wakelock(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO) +#if defined(CONFIG_HAS_WAKELOCK) + int l1, l2; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ + dhd_info_t *dhd; + + if (!pub) + return 0; + dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; + } +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + c = dhd->wakelock_counter; + l1 = dhd_wake_lock_active(&dhd->wl_wifi); + l2 = dhd_wake_lock_active(&dhd->wl_wdwake); + lock_active = (l1 || l2); + /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ + if (lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d\n", + __FUNCTION__, c, l1, l2)); + return 1; + } +#elif defined(BCMSDIO) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { + DHD_ERROR(("%s wakelock c-%d\n", __FUNCTION__, dhd->wakelock_counter)); + return 1; + } +#endif + return 0; +} + +int +dhd_os_check_wakelock_all(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO) +#if defined(CONFIG_HAS_WAKELOCK) + int l1, l2, l3, l4, l7, l8, l9, l10; + int l5 = 0, l6 = 0; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ + dhd_info_t *dhd; + + if (!pub) { + return 0; + } + if (pub->up == 0) { + DHD_ERROR(("%s: skip as down in progress\n", __FUNCTION__)); + return 0; + } + dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; + } +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + c = dhd->wakelock_counter; + l1 = dhd_wake_lock_active(&dhd->wl_wifi); + l2 = dhd_wake_lock_active(&dhd->wl_wdwake); + l3 = dhd_wake_lock_active(&dhd->wl_rxwake); + l4 = dhd_wake_lock_active(&dhd->wl_ctrlwake); + l7 = dhd_wake_lock_active(&dhd->wl_evtwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + l5 = dhd_wake_lock_active(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + l6 = dhd_wake_lock_active(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + l8 = dhd_wake_lock_active(&dhd->wl_pmwake); + l9 = dhd_wake_lock_active(&dhd->wl_txflwake); + l10 = dhd_wake_lock_active(&dhd->wl_nanwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9 || l10); + + /* Indicate to the Host to avoid going to suspend if internal locks are up */ + if (lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " + "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d nan-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10)); + return 1; + } +#elif defined(BCMSDIO) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { + DHD_ERROR(("%s wakelock c-%d\n", __FUNCTION__, dhd->wakelock_counter)); + return 1; + } +#endif /* defined(BCMSDIO) */ + return 0; +} + +int net_os_wake_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} + +int dhd_os_wd_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + /* if wakelock_wd_counter was never used : lock it at once */ + dhd_wake_lock(&dhd->wl_wdwake); +#endif + } + dhd->wakelock_wd_counter++; + ret = dhd->wakelock_wd_counter; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wd_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter > 0) { + dhd->wakelock_wd_counter = 0; + if (!dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_unlock(&dhd->wl_wdwake); +#endif + } + } + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +void +dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + dhd_wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_intrwake is active, unlock it */ + if (dhd_wake_lock_active(&dhd->wl_intrwake)) { + dhd_wake_unlock(&dhd->wl_intrwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_USE_SCAN_WAKELOCK +void +dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + dhd_wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_scan_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_scanwake is active, unlock it */ + if (dhd_wake_lock_active(&dhd->wl_scanwake)) { + dhd_wake_unlock(&dhd->wl_scanwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* DHD_USE_SCAN_WAKELOCK */ + +/* waive wakelocks for operations such as IOVARs in suspend function, must be closed + * by a paired function call to dhd_wakelock_restore. returns current wakelock counter + */ +int dhd_os_wake_lock_waive(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (dhd->waive_wakelock == FALSE) { +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + /* record current lock status */ + dhd->wakelock_before_waive = dhd->wakelock_counter; + dhd->waive_wakelock = TRUE; + } + ret = dhd->wakelock_wd_counter; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wake_lock_restore(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (!dhd) + return 0; + if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0) + return 0; + + DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (!dhd->waive_wakelock) + goto exit; + + dhd->waive_wakelock = FALSE; + /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore, + * we need to make it up by calling dhd_wake_lock or pm_stay_awake. or if somebody releases + * the lock in between, do the same by calling dhd_wake_unlock or pm_relax + */ +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + + if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_stay_awake(&dhd->pub); +#endif + } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + dhd_wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_relax(&dhd->pub); +#endif + } + dhd->wakelock_before_waive = 0; +exit: + ret = dhd->wakelock_wd_counter; + DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags); + return ret; +} + +void dhd_os_wake_lock_init(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + /* wakelocks prevent a system from going into a low power state */ +#ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + dhd_wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + dhd_wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + dhd_wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); + dhd_wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake"); + dhd_wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake"); +#ifdef BCMPCIE_OOB_HOST_WAKE + dhd_wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + dhd_wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); +#endif /* DHD_USE_SCAN_WAKELOCK */ + dhd_wake_lock_init(&dhd->wl_nanwake, WAKE_LOCK_SUSPEND, "wlan_nan_wake"); +#endif /* CONFIG_HAS_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_init(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +} + +void dhd_os_wake_lock_destroy(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + dhd_wake_lock_unlock_destroy(&dhd->wl_rxwake); + dhd_wake_lock_unlock_destroy(&dhd->wl_ctrlwake); + dhd_wake_lock_unlock_destroy(&dhd->wl_evtwake); + dhd_wake_lock_unlock_destroy(&dhd->wl_pmwake); + dhd_wake_lock_unlock_destroy(&dhd->wl_txflwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + dhd_wake_lock_unlock_destroy(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + dhd_wake_lock_unlock_destroy(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + dhd_wake_lock_unlock_destroy(&dhd->wl_nanwake); +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_deinit(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +#else /* !CONFIG_HAS_WAKELOCK */ + if (dhd->wakelock_counter > 0) { + DHD_ERROR(("%s: wake lock count=%d\n", + __FUNCTION__, dhd->wakelock_counter)); + while (dhd_os_wake_unlock(&dhd->pub)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +bool dhd_os_check_if_up(dhd_pub_t *pub) +{ + if (!pub) + return FALSE; + return pub->up; +} + +/* function to collect firmware, chip id and chip version info */ +void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) +{ + int i; + + i = snprintf(info_string, sizeof(info_string), + " Driver: %s\n%s Firmware: %s\n%s CLM: %s ", + EPI_VERSION_STR, + DHD_LOG_PREFIXS, fw, + DHD_LOG_PREFIXS, clm_version); + printf("%s\n", info_string); + + if (!dhdp) + return; + + i = snprintf(&info_string[i], sizeof(info_string) - i, + "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp), + dhd_conf_get_chiprev(dhdp)); +} + +int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) +{ + int ifidx; + int ret = 0; + dhd_info_t *dhd = NULL; + + if (!net || !DEV_PRIV(net)) { + DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n", + __FUNCTION__, net, DEV_PRIV(net))); + return -EINVAL; + } + + dhd = DHD_DEV_INFO(net); + if (!dhd) + return -EINVAL; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); + dhd_check_hang(net, &dhd->pub, ret); + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return ret; +} + +bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) +{ + struct net_device *net; + + net = dhd_idx2net(dhdp, ifidx); + if (!net) { + DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + return dhd_check_hang(net, dhdp, ret); +} + +/* Return instance */ +int dhd_get_instance(dhd_pub_t *dhdp) +{ + return dhdp->info->unit; +} + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */ +int dhd_deepsleep(struct net_device *dev, int flag) +{ + char iovbuf[20]; + uint powervar = 0; + dhd_info_t *dhd; + dhd_pub_t *dhdp; + int cnt = 0; + int ret = 0; + + dhd = DHD_DEV_INFO(dev); + dhdp = &dhd->pub; + + switch (flag) { + case 1 : /* Deepsleep on */ + DHD_ERROR(("[WiFi] Deepsleep On\n")); + /* give some time to sysioc_work before deepsleep */ + OSL_SLEEP(200); +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhdp); +#endif /* PKT_FILTER_SUPPORT */ + /* Disable MPC */ + powervar = 0; + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, + 0, TRUE); + if (ret) { + DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret)); + } + /* Enable Deepsleep */ + powervar = 1; + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar), + NULL, 0, TRUE); + if (ret) { + DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret)); + } + break; + + case 0: /* Deepsleep Off */ + DHD_ERROR(("[WiFi] Deepsleep Off\n")); + + /* Disable Deepsleep */ + for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) { + powervar = 0; + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, + sizeof(powervar), NULL, 0, TRUE); + if (ret) { + DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret)); + } + + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, + sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("the error of dhd deepsleep status" + " ret value :%d\n", ret)); + } else { + if (!(*(int *)iovbuf)) { + DHD_ERROR(("deepsleep mode is 0," + " count: %d\n", cnt)); + break; + } + } + } + + /* Enable MPC */ + powervar = 1; + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), + NULL, 0, TRUE); + if (ret) { + DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret)); + } + break; + } + + return 0; +} +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + +#ifdef PROP_TXSTATUS + +void dhd_wlfc_plat_init(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +void dhd_wlfc_plat_deinit(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx) +{ +#ifdef SKIP_WLFC_ON_CONCURRENT + +#ifdef WL_CFG80211 + struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx); + if (net) + /* enable flow control in vsdb mode */ + return !(wl_cfg80211_is_concurrent_mode(net)); +#else + return TRUE; /* skip flow control */ +#endif /* WL_CFG80211 */ + +#else + return FALSE; +#endif /* SKIP_WLFC_ON_CONCURRENT */ + return FALSE; +} +#endif /* PROP_TXSTATUS */ + +#ifdef BCMDBGFS +#include + +typedef struct dhd_dbgfs { + struct dentry *debugfs_dir; + struct dentry *debugfs_mem; + dhd_pub_t *dhdp; + uint32 size; +} dhd_dbgfs_t; + +dhd_dbgfs_t g_dbgfs; + +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + +static int +dhd_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +dhd_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + ssize_t rval; + uint32 tmp; + loff_t pos = *ppos; + size_t ret; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + /* XXX: The user can request any length they want, but they are getting 4 bytes */ + /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ + tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); + + ret = copy_to_user(ubuf, &tmp, 4); + if (ret == count) + return -EFAULT; + + count -= ret; + *ppos = pos + count; + rval = count; + + return rval; +} + +static ssize_t +dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t ret; + uint32 buf; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + ret = copy_from_user(&buf, ubuf, sizeof(uint32)); + if (ret == count) + return -EFAULT; + + /* XXX: The user can request any length they want, but they are getting 4 bytes */ + /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ + dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + + return count; +} + +loff_t +dhd_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + loff_t pos = -1; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = g_dbgfs.size - off; + } + return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); +} + +static const struct file_operations dhd_dbg_state_ops = { + .read = dhd_dbg_state_read, + .write = dhd_debugfs_write, + .open = dhd_dbg_state_open, + .llseek = dhd_debugfs_lseek +}; + +static void dhd_dbgfs_create(void) +{ + if (g_dbgfs.debugfs_dir) { + g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, + NULL, &dhd_dbg_state_ops); + } +} + +void dhd_dbgfs_init(dhd_pub_t *dhdp) +{ + g_dbgfs.dhdp = dhdp; + g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ + + g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); + if (IS_ERR(g_dbgfs.debugfs_dir)) { + g_dbgfs.debugfs_dir = NULL; + return; + } + + dhd_dbgfs_create(); + + return; +} + +void dhd_dbgfs_remove(void) +{ + debugfs_remove(g_dbgfs.debugfs_mem); + debugfs_remove(g_dbgfs.debugfs_dir); + + bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); +} +#endif /* BCMDBGFS */ + +#ifdef CUSTOM_SET_CPUCORE +void dhd_set_cpucore(dhd_pub_t *dhd, int set) +{ + int e_dpc = 0, e_rxf = 0, retry_set = 0; + + if (!(dhd->chan_isvht80)) { + DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80)); + return; + } + + if (DPC_CPUCORE) { + do { + if (set == TRUE) { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(DPC_CPUCORE)); + } else { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc)); + return; + } + if (e_dpc < 0) + OSL_SLEEP(1); + } while (e_dpc < 0); + } + if (RXF_CPUCORE) { + do { + if (set == TRUE) { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(RXF_CPUCORE)); + } else { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf)); + return; + } + if (e_rxf < 0) + OSL_SLEEP(1); + } while (e_rxf < 0); + } + DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); + + return; +} +#endif /* CUSTOM_SET_CPUCORE */ +#if defined(DHD_TCP_WINSIZE_ADJUST) +static int dhd_port_list_match(int port) +{ + int i; + for (i = 0; i < MAX_TARGET_PORTS; i++) { + if (target_ports[i] == port) + return 1; + } + return 0; +} +static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb) +{ + struct iphdr *ipheader; + struct tcphdr *tcpheader; + uint16 win_size; + int32 incremental_checksum; + + if (!(op_mode & DHD_FLAG_HOSTAP_MODE)) + return; + if (skb == NULL || skb->data == NULL) + return; + + ipheader = (struct iphdr*)(skb->data); + + if (ipheader->protocol == IPPROTO_TCP) { + tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2); + if (tcpheader) { + win_size = ntoh16(tcpheader->window); + if (win_size < MIN_TCP_WIN_SIZE && + dhd_port_list_match(ntoh16(tcpheader->dest))) { + incremental_checksum = ntoh16(tcpheader->check); + incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR; + if (incremental_checksum < 0) + --incremental_checksum; + tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR); + tcpheader->check = hton16((unsigned short)incremental_checksum); + } + } + skb_push(skb, (ipheader->ihl)<<2); + } +} +#endif /* DHD_TCP_WINSIZE_ADJUST */ + +#ifdef DHD_MCAST_REGEN +/* Get interface specific ap_isolate configuration */ +int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->mcast_regen_bss_enable; +} + +/* Set interface specific mcast_regen configuration */ +int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->mcast_regen_bss_enable = val; + + /* Disable rx_pkt_chain feature for interface, if mcast_regen feature + * is enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); + return BCME_OK; +} +#endif /* DHD_MCAST_REGEN */ + +/* Get interface specific ap_isolate configuration */ +int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->ap_isolate; +} + +/* Set interface specific ap_isolate configuration */ +int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + ifp->ap_isolate = val; + + return 0; +} + +#ifdef DHD_RND_DEBUG +/* + * XXX The filename to store .rnd.(in/out) is defined for each platform. + * - The default path of CUSTOMER_HW4 device is "PLATFORM_PATH/.memdump.info" + * - Brix platform will take default path "/installmedia/.memdump.info" + * New platforms can add their ifdefs accordingly below. + */ + +#ifdef CUSTOMER_HW4_DEBUG +#define RNDINFO PLATFORM_PATH".rnd" +#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY) +#define RNDINFO "/data/misc/wifi/.rnd" +#elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__) +#define RNDINFO "/data/misc/wifi/.rnd" +#elif defined(OEM_ANDROID) +#define RNDINFO_LIVE "/installmedia/.rnd" +#define RNDINFO_INST "/data/.rnd" +#define RNDINFO RNDINFO_LIVE +#else /* FC19 and Others */ +#define RNDINFO "/root/.rnd" +#endif /* CUSTOMER_HW4_DEBUG */ + +#define RND_IN RNDINFO".in" +#define RND_OUT RNDINFO".out" + +int +dhd_get_rnd_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + int ret = BCME_ERROR; + char *filepath = RND_IN; + uint32 file_mode = O_RDONLY; + mm_segment_t old_fs; + loff_t pos = 0; + + /* Read memdump info from the file */ + fp = filp_open(filepath, file_mode, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); +#if defined(CONFIG_X86) && defined(OEM_ANDROID) + /* Check if it is Live Brix Image */ + if (bcmstrstr(filepath, RNDINFO_LIVE)) { + goto err1; + } + /* Try if it is Installed Brix Image */ + filepath = RNDINFO_INST".in"; + DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); + fp = filp_open(filepath, file_mode, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto err1; + } +#else /* Non Brix Android platform */ + goto err1; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Handle success case */ + ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos); + if (ret < 0) { + DHD_ERROR(("%s: rnd_len read error, ret=%d\n", __FUNCTION__, ret)); + goto err2; + } + + dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len); + if (!dhd->rnd_buf) { + DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__)); + goto err2; + } + + ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos); + if (ret < 0) { + DHD_ERROR(("%s: rnd_buf read error, ret=%d\n", __FUNCTION__, ret)); + goto err3; + } + + set_fs(old_fs); + filp_close(fp, NULL); + + DHD_ERROR(("%s: RND read from %s\n", __FUNCTION__, filepath)); + return BCME_OK; + +err3: + MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len); + dhd->rnd_buf = NULL; +err2: + set_fs(old_fs); + filp_close(fp, NULL); +err1: + return BCME_ERROR; +} + +int +dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len) +{ + struct file *fp = NULL; + int ret = BCME_OK; + char *filepath = RND_OUT; + uint32 file_mode = O_CREAT | O_WRONLY | O_SYNC; + mm_segment_t old_fs; + loff_t pos = 0; + + /* Read memdump info from the file */ + fp = filp_open(filepath, file_mode, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); +#if defined(CONFIG_X86) && defined(OEM_ANDROID) + /* Check if it is Live Brix Image */ + if (bcmstrstr(filepath, RNDINFO_LIVE)) { + goto err1; + } + /* Try if it is Installed Brix Image */ + filepath = RNDINFO_INST".out"; + DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); + fp = filp_open(filepath, file_mode, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto err1; + } +#else /* Non Brix Android platform */ + goto err1; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Handle success case */ + ret = vfs_write(fp, (char *)&rnd_len, sizeof(rnd_len), &pos); + if (ret < 0) { + DHD_ERROR(("%s: rnd_len write error, ret=%d\n", __FUNCTION__, ret)); + goto err2; + } + + ret = vfs_write(fp, (char *)rnd_buf, rnd_len, &pos); + if (ret < 0) { + DHD_ERROR(("%s: rnd_buf write error, ret=%d\n", __FUNCTION__, ret)); + goto err2; + } + + set_fs(old_fs); + filp_close(fp, NULL); + DHD_ERROR(("%s: RND written to %s\n", __FUNCTION__, filepath)); + return BCME_OK; + +err2: + set_fs(old_fs); + filp_close(fp, NULL); +err1: + return BCME_ERROR; + +} +#endif /* DHD_RND_DEBUG */ + +#ifdef DHD_FW_COREDUMP +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) +{ + dhd_dump_t *dump = NULL; + unsigned long flags = 0; + dhd_info_t *dhd_info = NULL; +#if defined(DHD_LOG_DUMP) && !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + log_dump_type_t type = DLD_BUF_TYPE_ALL; +#endif /* DHD_LOG_DUMP && !DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + + dhd_info = (dhd_info_t *)dhdp->info; + dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); + if (dump == NULL) { + DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); + return; + } + dump->buf = buf; + dump->bufsize = size; +#ifdef BCMPCIE + dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf), + (uint32 *)(&dump->hscb_bufsize)); +#else + dump->hscb_bufsize = 0; +#endif /* BCMPCIE */ + +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhdp, "memdump", buf, size); +#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + /* Print out buffer infomation */ + dhd_log_dump_buf_addr(dhdp, &type); +#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */ +#endif /* DHD_LOG_DUMP */ + + if (dhdp->memdump_enabled == DUMP_MEMONLY) { + BUG_ON(1); + } + + if ((dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) || + (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) || + (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)) + { + dhd_info->scheduled_memdump = FALSE; + dhd_mem_dump((void *)dhdp->info, (void *)dump, 0); + /* No need to collect debug dump for init failure */ + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) { + return; + } +#ifdef DHD_LOG_DUMP + { + log_dump_type_t *flush_type = NULL; + /* for dongle init fail cases, 'dhd_mem_dump' does + * not call 'dhd_log_dump', so call it here. + */ + flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_log_dump(dhdp->info, flush_type, 0); + } + } +#endif /* DHD_LOG_DUMP */ + return; + } + + dhd_info->scheduled_memdump = TRUE; + + /* bus busy bit for mem dump will be cleared in mem dump + * work item context, after mem dump file is written + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, + DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} + +static void +dhd_mem_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp = NULL; + unsigned long flags = 0; + +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) + int ret = 0; +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */ + dhd_dump_t *dump = NULL; +#ifdef DHD_COREDUMP + char pc_fn[DHD_FUNC_STR_LEN] = "\0"; + char lr_fn[DHD_FUNC_STR_LEN] = "\0"; + char *map_path = VENDOR_PATH CONFIG_BCMDHD_MAP_PATH; + trap_t *tr; +#endif /* DHD_COREDUMP */ + + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); + goto exit; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + + dump = (dhd_dump_t *)event_info; + if (!dump) { + DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); + goto exit; + } + +#ifdef DHD_SDTC_ETB_DUMP + if (dhdp->collect_sdtc) { + dhd_sdtc_etb_dump(dhdp); + dhdp->collect_sdtc = FALSE; + } +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef DHD_SSSR_DUMP + DHD_ERROR(("%s: sssr_enab=%d dhdp->sssr_inited=%d dhdp->collect_sssr=%d\n", + __FUNCTION__, sssr_enab, dhdp->sssr_inited, dhdp->collect_sssr)); + if (sssr_enab && dhdp->sssr_inited && dhdp->collect_sssr) { + if (fis_enab && dhdp->sssr_reg_info->rev3.fis_enab) { + int bcmerror = dhd_bus_fis_trigger(dhdp); + + if (bcmerror == BCME_OK) { + dhd_bus_fis_dump(dhdp); + } else { + DHD_ERROR(("%s: FIS trigger failed: %d\n", + __FUNCTION__, bcmerror)); + } + } else { + DHD_ERROR(("%s: FIS not enabled (%d:%d), collect legacy sssr\n", + __FUNCTION__, fis_enab, dhdp->sssr_reg_info->rev3.fis_enab)); + dhdpcie_sssr_dump(dhdp); + } + } + dhdp->collect_sssr = FALSE; +#endif /* DHD_SSSR_DUMP */ + +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) + ret = dhd_wait_for_file_dump(dhdp); +#ifdef BOARD_HIKEY + /* For Hikey do force kernel write of socram if HAL dump fails */ + if (ret) { + if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); + } + } +#endif /* BOARD_HIKEY */ +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */ + +#ifdef DHD_COREDUMP + memset_s(dhdp->memdump_str, DHD_MEMDUMP_LONGSTR_LEN, 0, DHD_MEMDUMP_LONGSTR_LEN); + dhd_convert_memdump_type_to_str(dhdp->memdump_type, dhdp->memdump_str, + DHD_MEMDUMP_LONGSTR_LEN, dhdp->debug_dump_subcmd); + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_TRAP && + dhdp->dongle_trap_occured == TRUE) { + tr = &dhdp->last_trap_info; + dhd_lookup_map(dhdp->osh, map_path, + ltoh32(tr->epc), pc_fn, ltoh32(tr->r14), lr_fn); + sprintf(&dhdp->memdump_str[strlen(dhdp->memdump_str)], "_%.79s_%.79s", + pc_fn, lr_fn); + } + DHD_ERROR(("%s: dump reason: %s\n", __FUNCTION__, dhdp->memdump_str)); + if (wifi_platform_set_coredump(dhd->adapter, dump->buf, dump->bufsize, dhdp->memdump_str)) { + DHD_ERROR(("%s: writing SoC_RAM dump failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + } +#endif /* DHD_COREDUMP */ + + /* + * If kernel does not have file write access enabled + * then skip writing dumps to files. + * The dumps will be pushed to HAL layer which will + * write into files + */ +#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL + +#ifdef D2H_MINIDUMP + /* dump minidump */ + if (dhd_bus_is_minidump_enabled(dhdp)) { + dhd_d2h_minidump(&dhd->pub); + } else { + DHD_ERROR(("minidump is not enabled\n")); + } +#endif /* D2H_MINIDUMP */ + + if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + } + + if (dump->hscb_buf && dump->hscb_bufsize) { + if (write_dump_to_file(&dhd->pub, dump->hscb_buf, + dump->hscb_bufsize, "mem_dump_hscb")) { + DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + } + } + +#ifndef DHD_PKT_LOGGING + clear_debug_dump_time(dhdp->debug_dump_time_str); +#endif /* !DHD_PKT_LOGGING */ + + /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue + * context, no need to schedule another work queue for log dump. In case of + * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP), + * cfg layer is itself scheduling the log_dump work queue. + * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not + * collect debug_dump as it may be called from non-sleepable context. + */ +#ifdef DHD_LOG_DUMP + if (dhd->scheduled_memdump && + dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { + log_dump_type_t *flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_log_dump(dhd, flush_type, 0); + } + } +#endif /* DHD_LOG_DUMP */ + + /* before calling bug on, wait for other logs to be dumped. + * we cannot wait in case dhd_mem_dump is called directly + * as it may not be from a sleepable context + */ + if (dhd->scheduled_memdump) { + uint bitmask = 0; + int timeleft = 0; +#ifdef DHD_SSSR_DUMP + bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP; +#endif + if (bitmask != 0) { + DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + timeleft = dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, bitmask, 0); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + } + } + } +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + + if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && +#ifdef WLAN_ACCEL_BOOT + /* BUG_ON only if wlan accel boot up is done */ + dhd->wl_accel_boot_on_done == TRUE && +#endif /* WLAN_ACCEL_BOOT */ +#ifdef DHD_LOG_DUMP + dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && +#endif /* DHD_LOG_DUMP */ + dhd->pub.memdump_type != DUMP_TYPE_BY_USER && +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success == TRUE && +#endif /* DHD_DEBUG_UART */ +#ifdef DNGL_EVENT_SUPPORT + dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT && +#endif /* DNGL_EVENT_SUPPORT */ + dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) { +#ifdef SHOW_LOGTRACE + /* Wait till logtrace context is flushed */ + dhd_flush_logtrace_process(dhd); +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG + /* Wait till bt_log_dispatcher_work finishes */ + cancel_work_sync(&dhd->bt_log_dispatcher_work); +#endif /* BTLOG */ + +#ifdef EWP_EDL + cancel_delayed_work_sync(&dhd->edl_dispatcher_work); +#endif + + printf("%s\n", info_string); + printf("MAC %pM\n", &dhdp->mac); + DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__)); +// BUG_ON(1); + } + +exit: + if (dump) { + MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); + } + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + dhd->scheduled_memdump = FALSE; + +#ifdef OEM_ANDROID + if (dhdp->hang_was_pending) { + DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__)); + dhd_os_send_hang_message(dhdp); + dhdp->hang_was_pending = 0; + } +#endif /* OEM_ANDROID */ + DHD_ERROR(("%s: EXIT \n", __FUNCTION__)); + + return; +} +#endif /* DHD_FW_COREDUMP */ + +#ifdef D2H_MINIDUMP +void +dhd_d2h_minidump(dhd_pub_t *dhdp) +{ + char d2h_minidump[128]; + dhd_dma_buf_t *minidump_buf; + + minidump_buf = dhd_prot_get_minidump_buf(dhdp); + if (minidump_buf->va == NULL) { + DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__)); + return; + } + + /* Init file name */ + memset(d2h_minidump, 0, sizeof(d2h_minidump)); + snprintf(d2h_minidump, sizeof(d2h_minidump), "%s", "d2h_minidump"); + + if (write_dump_to_file(dhdp, (uint8 *)minidump_buf->va, minidump_buf->len, d2h_minidump)) { + DHD_ERROR(("%s: failed to dump d2h_minidump to file\n", __FUNCTION__)); + } +} +#endif /* D2H_MINIDUMP */ + +#ifdef DHD_SSSR_DUMP +uint +dhd_sssr_dig_buf_size(dhd_pub_t *dhdp) +{ + uint dig_buf_size = 0; + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhdp->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3: + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + if ((dhdp->sssr_reg_info->rev2.length > + OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) && + dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) { + dig_buf_size = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size; + } + break; + case SSSR_REG_INFO_VER_1 : + if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) { + dig_buf_size = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size; + } else if ((dhdp->sssr_reg_info->rev1.length > + OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) { + dig_buf_size = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size; + } + break; + case SSSR_REG_INFO_VER_0 : + if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) { + dig_buf_size = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size; + } + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + return dig_buf_size; +} + +uint +dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp) +{ + uint dig_buf_addr = 0; + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhdp->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + if ((dhdp->sssr_reg_info->rev2.length > + OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) && + dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) { + dig_buf_addr = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr; + } + break; + case SSSR_REG_INFO_VER_1 : + if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) { + dig_buf_addr = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_addr; + } else if ((dhdp->sssr_reg_info->rev1.length > + OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) { + dig_buf_addr = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_addr; + } + break; + case SSSR_REG_INFO_VER_0 : + if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) { + dig_buf_addr = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_addr; + } + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + return dig_buf_addr; +} + +uint +dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx) +{ + uint mac_buf_size = 0; + uint8 num_d11cores; + + num_d11cores = dhd_d11_slices_num_get(dhdp); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + if (core_idx < num_d11cores) { + switch (dhdp->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + mac_buf_size = dhdp->sssr_reg_info->rev2.mac_regs[core_idx].sr_size; + break; + case SSSR_REG_INFO_VER_1 : + mac_buf_size = dhdp->sssr_reg_info->rev1.mac_regs[core_idx].sr_size; + break; + case SSSR_REG_INFO_VER_0 : + mac_buf_size = dhdp->sssr_reg_info->rev0.mac_regs[core_idx].sr_size; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + } + + return mac_buf_size; +} + +uint +dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx) +{ + uint xmtaddress = 0; + uint8 num_d11cores; + + num_d11cores = dhd_d11_slices_num_get(dhdp); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + if (core_idx < num_d11cores) { + switch (dhdp->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + xmtaddress = dhdp->sssr_reg_info->rev2. + mac_regs[core_idx].base_regs.xmtaddress; + break; + case SSSR_REG_INFO_VER_1 : + xmtaddress = dhdp->sssr_reg_info->rev1. + mac_regs[core_idx].base_regs.xmtaddress; + break; + case SSSR_REG_INFO_VER_0 : + xmtaddress = dhdp->sssr_reg_info->rev0. + mac_regs[core_idx].base_regs.xmtaddress; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + } + + return xmtaddress; +} + +uint +dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx) +{ + uint xmtdata = 0; + uint8 num_d11cores; + + num_d11cores = dhd_d11_slices_num_get(dhdp); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + if (core_idx < num_d11cores) { + switch (dhdp->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + xmtdata = dhdp->sssr_reg_info->rev2. + mac_regs[core_idx].base_regs.xmtdata; + break; + case SSSR_REG_INFO_VER_1 : + xmtdata = dhdp->sssr_reg_info->rev1. + mac_regs[core_idx].base_regs.xmtdata; + break; + case SSSR_REG_INFO_VER_0 : + xmtdata = dhdp->sssr_reg_info->rev0. + mac_regs[core_idx].base_regs.xmtdata; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + } + + return xmtdata; +} + +#ifdef DHD_SSSR_DUMP_BEFORE_SR +int +dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len) +{ + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + uint dig_buf_size = 0; + + dig_buf_size = dhd_sssr_dig_buf_size(dhdp); + + if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before, + NULL, user_buf, dig_buf_size, &pos); + } + return ret; +} + +int +dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core) +{ + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + + if (dhdp->sssr_d11_before[core] && + dhdp->sssr_d11_outofreset[core] && + (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core], + NULL, user_buf, len, &pos); + } + return ret; +} +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + +int +dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len) +{ + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + uint dig_buf_size = 0; + + dig_buf_size = dhd_sssr_dig_buf_size(dhdp); + + if (dhdp->sssr_dig_buf_after) { + ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after, + NULL, user_buf, dig_buf_size, &pos); + } + return ret; +} + +int +dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core) +{ + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + + if (dhdp->sssr_d11_after[core] && + dhdp->sssr_d11_outofreset[core]) { + ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core], + NULL, user_buf, len, &pos); + } + return ret; +} + +void +dhd_sssr_dump_to_file(dhd_info_t* dhdinfo) +{ + dhd_info_t *dhd = dhdinfo; + dhd_pub_t *dhdp; + int i; +#ifdef DHD_SSSR_DUMP_BEFORE_SR + char before_sr_dump[128]; +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + char after_sr_dump[128]; + unsigned long flags = 0; + uint dig_buf_size = 0; + uint8 num_d11cores = 0; + uint d11_buf_size = 0; + + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__)); + goto exit; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + + num_d11cores = dhd_d11_slices_num_get(dhdp); + + for (i = 0; i < num_d11cores; i++) { + /* Init file name */ +#ifdef DHD_SSSR_DUMP_BEFORE_SR + memset(before_sr_dump, 0, sizeof(before_sr_dump)); +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + memset(after_sr_dump, 0, sizeof(after_sr_dump)); + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s", + "sssr_dump_core", i, "before_SR"); +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s", + "sssr_dump_core", i, "after_SR"); + + d11_buf_size = dhd_sssr_mac_buf_size(dhdp, i); + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] && + (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i], + d11_buf_size, before_sr_dump)) { + DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n", + __FUNCTION__)); + } + } +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + + if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i], + d11_buf_size, after_sr_dump)) { + DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n", + __FUNCTION__)); + } + } + } + + dig_buf_size = dhd_sssr_dig_buf_size(dhdp); + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before, + dig_buf_size, "sssr_dump_dig_before_SR")) { + DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n", + __FUNCTION__)); + } + } +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + + if (dhdp->sssr_dig_buf_after) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after, + dig_buf_size, "sssr_dump_dig_after_SR")) { + DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n", + __FUNCTION__)); + } + } + +exit: + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); +} + +void +dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode) +{ +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + dhdp->sssr_dump_mode = dump_mode; +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + + /* + * If kernel does not have file write access enabled + * then skip writing dumps to files. + * The dumps will be pushed to HAL layer which will + * write into files + */ +#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + return; +#else + /* + * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump + * Without workqueue - + * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT + * : These are called in own handler, not in the interrupt context + * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue + * Thus, it doesn't neeed to dump SSSR in workqueue + */ + DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__)); + dhd_sssr_dump_to_file(dhdp->info); +#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */ +} +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_SDTC_ETB_DUMP +void +dhd_sdtc_etb_dump(dhd_pub_t *dhd) +{ + etb_info_t etb_info; + uint8 *sdtc_etb_dump; + uint8 *sdtc_etb_mempool; + uint etb_dump_len; + int ret = 0; + + if (!dhd->sdtc_etb_inited) { + DHD_ERROR(("%s, SDTC ETB dump not supported\n", __FUNCTION__)); + return; + } + + bzero(&etb_info, sizeof(etb_info)); + + if ((ret = dhd_bus_get_etb_info(dhd, dhd->etb_addr_info.etbinfo_addr, &etb_info))) { + DHD_ERROR(("%s: failed to get etb info %d\n", __FUNCTION__, ret)); + return; + } + + if (etb_info.read_bytes == 0) { + DHD_ERROR(("%s ETB is of zero size. Hence donot collect SDTC ETB\n", __FUNCTION__)); + return; + } + + DHD_ERROR(("%s etb_info ver:%d len:%d rwp:%d etb_full:%d etb:addr:0x%x, len:%d\n", + __FUNCTION__, etb_info.version, etb_info.len, + etb_info.read_write_p, etb_info.etb_full, + etb_info.addr, etb_info.read_bytes)); + + /* + * etb mempool format = etb_info + etb + */ + etb_dump_len = etb_info.read_bytes + sizeof(etb_info); + if (etb_dump_len > DHD_SDTC_ETB_MEMPOOL_SIZE) { + DHD_ERROR(("%s etb_dump_len: %d is more than the alloced %d.Hence cannot collect\n", + __FUNCTION__, etb_dump_len, DHD_SDTC_ETB_MEMPOOL_SIZE)); + return; + } + sdtc_etb_mempool = dhd->sdtc_etb_mempool; + memcpy(sdtc_etb_mempool, &etb_info, sizeof(etb_info)); + sdtc_etb_dump = sdtc_etb_mempool + sizeof(etb_info); + if ((ret = dhd_bus_get_sdtc_etb(dhd, sdtc_etb_dump, etb_info.addr, etb_info.read_bytes))) { + DHD_ERROR(("%s: error to get SDTC ETB ret: %d\n", __FUNCTION__, ret)); + return; + } + + if (write_dump_to_file(dhd, (uint8 *)sdtc_etb_mempool, + etb_dump_len, "sdtc_etb_dump")) { + DHD_ERROR(("%s: failed to dump sdtc_etb to file\n", + __FUNCTION__)); + } +} +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef DHD_LOG_DUMP +static void +dhd_log_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + log_dump_type_t *type = (log_dump_type_t *)event_info; + + if (!dhd || !type) { + DHD_ERROR(("%s: dhd/type is NULL\n", __FUNCTION__)); + return; + } + +#ifdef WL_CFG80211 + /* flush the fw preserve logs */ + wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub), + FW_LOGSET_MASK_ALL); +#endif + + /* there are currently 3 possible contexts from which + * log dump can be scheduled - + * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command + * 3.HEALTH CHECK event + * The concise debug info buffer is a shared resource + * and in case a trap is one of the contexts then both the + * scheduled work queues need to run because trap data is + * essential for debugging. Hence a mutex lock is acquired + * before calling do_dhd_log_dump(). + */ + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_os_logdump_lock(&dhd->pub); + DHD_OS_WAKE_LOCK(&dhd->pub); + if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) { + DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); + } + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_os_logdump_unlock(&dhd->pub); +} + +void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type) +{ + DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__)); + + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + type, DHD_WQ_WORK_DHD_LOG_DUMP, + dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} + +static void +dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size) +{ +#ifdef DHD_FW_COREDUMP + if ((dhdp->memdump_enabled == DUMP_MEMONLY) || + (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) || + (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) || +#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG + (dhdp->op_mode & DHD_FLAG_MFG_MODE && + (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT-1)) || +#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */ + FALSE) +#else + if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) +#endif + { +#if defined(CONFIG_ARM64) + DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", + name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); +#elif defined(__ARM_ARCH_7A__) + DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", + name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); +#endif /* __ARM_ARCH_7A__ */ + } +} + +static void +dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type) +{ + int i; + unsigned long wr_size = 0; + struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; + size_t log_size = 0; + char buf_name[DHD_PRINT_BUF_NAME_LEN]; + dhd_dbg_ring_t *ring = NULL; + + BCM_REFERENCE(ring); + + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + log_size = (unsigned long)dld_buf->max - + (unsigned long)dld_buf->buffer; + if (dld_buf->wraparound) { + wr_size = log_size; + } else { + wr_size = (unsigned long)dld_buf->present - + (unsigned long)dld_buf->front; + } + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size); + } + +#ifdef DEBUGABILITY_ECNTRS_LOGGING + /* periodic flushing of ecounters is NOT supported */ + if (*type == DLD_BUF_TYPE_ALL && + logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { + + ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring; + dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE); + dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf, + LOG_DUMP_ECNTRS_MAX_BUFSIZE); + } +#endif /* DEBUGABILITY_ECNTRS_LOGGING */ + +#if defined(BCMPCIE) + if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) { + dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } +#endif /* BCMPCIE */ + +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) + /* if health check event was received */ + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data, + HEALTH_CHK_BUF_SIZE); + } +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + + /* append the concise debug information */ + if (dhdp->concise_dbg_buf) { + dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf, + CONCISE_DUMP_BUFLEN); + } +} + +#ifdef CUSTOMER_HW4_DEBUG +static void +dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len) +{ + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1]; + char *end = NULL; + unsigned long plen = 0; + + if (!bufptr || !len) + return; + + memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE); + end = bufptr + len; + while (bufptr < end) { + if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) { + memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE); + tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0'; + printf("%s", tmp_buf); + bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE; + } else { + plen = (unsigned long)end - (unsigned long)bufptr; + memcpy(tmp_buf, bufptr, plen); + tmp_buf[plen] = '\0'; + printf("%s", tmp_buf); + bufptr += plen; + } + } +} + +static void +dhd_log_dump_print_tail(dhd_pub_t *dhdp, + struct dhd_log_dump_buf *dld_buf, + uint tail_len) +{ + char *flush_ptr1 = NULL, *flush_ptr2 = NULL; + unsigned long len_flush1 = 0, len_flush2 = 0; + unsigned long flags = 0; + + /* need to hold the lock before accessing 'present' and 'remain' ptrs */ + DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags); + flush_ptr1 = dld_buf->present - tail_len; + if (flush_ptr1 >= dld_buf->front) { + /* tail content is within the buffer */ + flush_ptr2 = NULL; + len_flush1 = tail_len; + } else if (dld_buf->wraparound) { + /* tail content spans the buffer length i.e, wrap around */ + flush_ptr1 = dld_buf->front; + len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1; + len_flush2 = (unsigned long)tail_len - len_flush1; + flush_ptr2 = (char *)((unsigned long)dld_buf->max - + (unsigned long)len_flush2); + } else { + /* amt of logs in buffer is less than tail size */ + flush_ptr1 = dld_buf->front; + flush_ptr2 = NULL; + len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front; + } + DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags); + + printf("\n================= LOG_DUMP tail =================\n"); + if (flush_ptr2) { + dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2); + } + dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1); + printf("\n===================================================\n"); +} +#endif /* CUSTOMER_HW4_DEBUG */ + +#ifdef DHD_SSSR_DUMP +int +dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len) +{ + int i = 0; + uint dig_buf_size = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* core 0 */ + i = 0; +#ifdef DHD_SSSR_DUMP_BEFORE_SR + if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] && + (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + + arr_len[SSSR_C0_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i); + DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_C0_D11_BEFORE])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE", + dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]); +#endif /* DHD_LOG_DUMP */ + } +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) { + arr_len[SSSR_C0_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i); + DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_C0_D11_AFTER])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER", + dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]); +#endif /* DHD_LOG_DUMP */ + } + + /* core 1 */ + i = 1; +#ifdef DHD_SSSR_DUMP_BEFORE_SR + if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] && + (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + arr_len[SSSR_C1_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i); + DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_C1_D11_BEFORE])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE", + dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]); +#endif /* DHD_LOG_DUMP */ + } +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) { + arr_len[SSSR_C1_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i); + DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_C1_D11_AFTER])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER", + dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]); +#endif /* DHD_LOG_DUMP */ + } + + /* core 2 scan core */ + if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_2) { + i = 2; +#ifdef DHD_SSSR_DUMP_BEFORE_SR + if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] && + (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + arr_len[SSSR_C2_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i); + DHD_ERROR(("%s: arr_len[SSSR_C2_D11_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_C2_D11_BEFORE])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C2_D11_BEFORE", + dhd->sssr_d11_before[i], arr_len[SSSR_C2_D11_BEFORE]); +#endif /* DHD_LOG_DUMP */ + } +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) { + arr_len[SSSR_C2_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i); + DHD_ERROR(("%s: arr_len[SSSR_C2_D11_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_C2_D11_AFTER])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C2_D11_AFTER", + dhd->sssr_d11_after[i], arr_len[SSSR_C2_D11_AFTER]); +#endif /* DHD_LOG_DUMP */ + } + } + + /* DIG core or VASIP */ + dig_buf_size = dhd_sssr_dig_buf_size(dhd); +#ifdef DHD_SSSR_DUMP_BEFORE_SR + arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_dig_buf_before) ? dig_buf_size : 0; + DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_DIG_BEFORE])); +#ifdef DHD_LOG_DUMP + if (dhd->sssr_dig_buf_before) { + dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE", + dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]); + } +#endif /* DHD_LOG_DUMP */ +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + + arr_len[SSSR_DIG_AFTER] = (dhd->sssr_dig_buf_after) ? dig_buf_size : 0; + DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_DIG_AFTER])); +#ifdef DHD_LOG_DUMP + if (dhd->sssr_dig_buf_after) { + dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER", + dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]); + } +#endif /* DHD_LOG_DUMP */ + + return BCME_OK; +} + +void +dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len) +{ + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhd_pub_t *dhdp = &dhd_info->pub; + + if (dhdp->sssr_dump_collected) { + dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len); + } +} +#endif /* DHD_SSSR_DUMP */ + +uint32 +dhd_get_time_str_len() +{ + char *ts = NULL, time_str[128]; + + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, sizeof(time_str), + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); + return strlen(time_str); +} + +#if defined(BCMPCIE) +uint32 +dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp) +{ + int length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (dhdp->extended_trap_data) { + length = (strlen(EXT_TRAP_LOG_HDR) + + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } + return length; +} +#endif /* BCMPCIE */ + +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) +uint32 +dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp) +{ + int length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + length = (strlen(HEALTH_CHK_LOG_HDR) + + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE); + } + return length; +} +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + +uint32 +dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp) +{ + uint32 length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + int remain_len = 0; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (dhdp->concise_dbg_buf) { + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) { + DHD_ERROR(("%s: error getting concise debug info ! remain_len: %d\n", + __FUNCTION__, remain_len)); + return length; + } + + length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len); + } + + length += (uint32)(strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr)); + return length; +} + +uint32 +dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp) +{ + int length = 0; + dhd_info_t *dhd_info; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) { + length = dhd_log_dump_cookie_len(dhdp); + } + return length; + +} + +#ifdef DHD_DUMP_PCIE_RINGS +uint32 +dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp) +{ + uint32 length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + uint16 h2d_flowrings_total; + int remain_len = 0; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (dhdp->concise_dbg_buf) { + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) { + DHD_ERROR(("%s: error getting concise debug info ! remain_len: %d\n", + __FUNCTION__, remain_len)); + return length; + } + + length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len); + } + + length += (uint32) strlen(FLOWRING_DUMP_HDR); + length += (uint32) sizeof(sec_hdr); + h2d_flowrings_total = dhd_get_max_flow_rings(dhdp); + length += ((D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM) + + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM) + + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM) + + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM) + + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM) +#ifdef EWP_EDL + + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM)); +#else + + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM) + + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM)); +#endif /* EWP_EDL */ + +#if defined(DHD_HTPUT_TUNABLES) + /* flowring lengths are different for HTPUT rings, handle accordingly */ + length += ((H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_htput_max_txpost(dhdp) * + HTPUT_TOTAL_FLOW_RINGS) + + (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) * + (h2d_flowrings_total - HTPUT_TOTAL_FLOW_RINGS))); +#else + length += (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) * + h2d_flowrings_total); +#endif /* DHD_HTPUT_TUNABLES */ + + return length; +} +#endif /* DHD_DUMP_PCIE_RINGS */ + +#ifdef EWP_ECNTRS_LOGGING +uint32 +dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp) +{ + dhd_info_t *dhd_info; + log_dump_section_hdr_t sec_hdr; + int length = 0; + dhd_dbg_ring_t *ring; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring; + length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr); + } + return length; +} +#endif /* EWP_ECNTRS_LOGGING */ + +int +dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, int type, void *pos) +{ + int ret = BCME_OK; + struct dhd_log_dump_buf *dld_buf; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + dld_buf = &g_dld_buf[type]; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } else if (!dhdp) { + return BCME_ERROR; + } + + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + + dhd_init_sec_hdr(&sec_hdr); + + /* write the section header first */ + ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf, + strlen(dld_hdrs[type].hdr_str), pos); + if (ret < 0) + goto exit; + len -= (uint32)strlen(dld_hdrs[type].hdr_str); + len -= (uint32)sizeof(sec_hdr); + sec_hdr.type = dld_hdrs[type].sec_type; + sec_hdr.length = len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos); + if (ret < 0) + goto exit; + +exit: + return ret; +} + +static int +dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type) +{ + unsigned long flags = 0; +#ifdef EWP_EDL + int i = 0; +#endif /* EWP_EDL */ + dhd_info_t *dhd_info = NULL; + + BCM_REFERENCE(dhd_info); + + /* if dhdp is null, its extremely unlikely that log dump will be scheduled + * so not freeing 'type' here is ok, even if we want to free 'type' + * we cannot do so, since 'dhdp->osh' is unavailable + * as dhdp is null + */ + if (!dhdp || !type) { + if (dhdp) { + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + } + return BCME_ERROR; + } + +#if defined(BCMPCIE) + if (dhd_bus_get_linkdown(dhdp)) { + /* As link is down donot collect any data over PCIe. + * Also return BCME_OK to caller, so that caller can + * dump all the outstanding data to file + */ + return BCME_OK; + } +#endif /* BCMPCIE */ + + dhd_info = (dhd_info_t *)dhdp->info; + /* in case of trap get preserve logs from ETD */ +#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) + if (dhdp->dongle_trap_occured && + dhdp->extended_trap_data) { + dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data, + &dhd_info->event_data); + } +#endif /* BCMPCIE */ + + /* flush the event work items to get any fw events/logs + * flush_work is a blocking call + */ +#ifdef SHOW_LOGTRACE +#ifdef EWP_EDL + if (dhd_info->pub.dongle_edl_support) { + /* wait till existing edl items are processed */ + dhd_flush_logtrace_process(dhd_info); + /* dhd_flush_logtrace_process will ensure the work items in the ring + * (EDL ring) from rd to wr are processed. But if wr had + * wrapped around, only the work items from rd to ring-end are processed. + * So to ensure that the work items at the + * beginning of ring are also processed in the wrap around case, call + * it twice + */ + for (i = 0; i < 2; i++) { + /* blocks till the edl items are processed */ + dhd_flush_logtrace_process(dhd_info); + } + } else { + dhd_flush_logtrace_process(dhd_info); + } +#else + dhd_flush_logtrace_process(dhd_info); +#endif /* EWP_EDL */ +#endif /* SHOW_LOGTRACE */ + +#ifdef CUSTOMER_HW4_DEBUG + /* print last 'x' KB of preserve buffer data to kmsg console + * this is to address cases where debug_dump is not + * available for debugging + */ + dhd_log_dump_print_tail(dhdp, + &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize); +#endif /* CUSTOMER_HW4_DEBUG */ + return BCME_OK; +} + +int +dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size) +{ + int ret; + int len = 0; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + memset(dump_path, 0, size); + + ret = snprintf(dump_path, size, "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE); + len += ret; + + /* Keep the same timestamp across different dump logs */ + if (!dhdp->logdump_periodic_flush) { + struct rtc_time tm; + clear_debug_dump_time(dhdp->debug_dump_time_str); + get_debug_dump_time(dhdp->debug_dump_time_str); + sscanf(dhdp->debug_dump_time_str, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS, + &tm.tm_year, &tm.tm_mon, &tm.tm_mday, + &tm.tm_hour, &tm.tm_min, &tm.tm_sec); + ret = snprintf(dump_path + len, size - len, "_" DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS, + tm.tm_year, tm.tm_mon, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + len += ret; + } + + ret = 0; + switch (dhdp->debug_dump_subcmd) { + case CMD_UNWANTED: + ret = snprintf(dump_path + len, size - len, "%s", DHD_DUMP_SUBSTR_UNWANTED); + break; + case CMD_DISCONNECTED: + ret = snprintf(dump_path + len, size - len, "%s", DHD_DUMP_SUBSTR_DISCONNECTED); + break; + default: + break; + } + len += ret; + + return BCME_OK; +} + +uint32 +dhd_get_dld_len(int log_type) +{ + unsigned long wr_size = 0; + unsigned long buf_size = 0; + unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf; + log_dump_section_hdr_t sec_hdr; + + /* calculate the length of the log */ + dld_buf = &g_dld_buf[log_type]; + buf_size = (unsigned long)dld_buf->max - + (unsigned long)dld_buf->buffer; + + if (dld_buf->wraparound) { + wr_size = buf_size; + } else { + /* need to hold the lock before accessing 'present' and 'remain' ptrs */ + DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags); + wr_size = (unsigned long)dld_buf->present - + (unsigned long)dld_buf->front; + DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags); + } + return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str)); +} + +static void +dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size) +{ + char *ts = NULL; + memset(time_str, 0, size); + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, size, + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); +} + +int +dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos) +{ + char *ts = NULL; + int ret = 0; + char time_str[128]; + + memset_s(time_str, sizeof(time_str), 0, sizeof(time_str)); + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, sizeof(time_str), + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); + + /* write the timestamp hdr to the file first */ + ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + } + return ret; +} + +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) +int +dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + dhd_init_sec_hdr(&sec_hdr); + + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + /* write the section header first */ + ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf, + strlen(HEALTH_CHK_LOG_HDR), pos); + if (ret < 0) + goto exit; + + len -= (uint32)strlen(HEALTH_CHK_LOG_HDR); + sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK; + sec_hdr.length = HEALTH_CHK_BUF_SIZE; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + len -= (uint32)sizeof(sec_hdr); + /* write the log */ + ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp, + user_buf, len, pos); + if (ret < 0) + goto exit; + } +exit: + return ret; +} +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + +#if defined(BCMPCIE) +int +dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + dhd_init_sec_hdr(&sec_hdr); + + /* append extended trap data to the file in case of traps */ + if (dhdp->dongle_trap_occured && + dhdp->extended_trap_data) { + /* write the section header first */ + ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf, + strlen(EXT_TRAP_LOG_HDR), pos); + if (ret < 0) + goto exit; + + len -= (uint32)strlen(EXT_TRAP_LOG_HDR); + sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP; + sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN; + ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + len -= (uint32)sizeof(sec_hdr); + /* write the log */ + ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp, + user_buf, len, pos); + if (ret < 0) + goto exit; + } +exit: + return ret; +} +#endif /* BCMPCIE */ + +int +dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + dhd_init_sec_hdr(&sec_hdr); + + ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos); + if (ret < 0) + goto exit; + + len -= (uint32)strlen(DHD_DUMP_LOG_HDR); + sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP; + sec_hdr.length = len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + len -= (uint32)sizeof(sec_hdr); + + if (dhdp->concise_dbg_buf) { + dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos); + if (ret < 0) + goto exit; + } + +exit: + return ret; +} + +int +dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) { + ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos); + } + return ret; +} + +#ifdef DHD_DUMP_PCIE_RINGS +int +dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + log_dump_section_hdr_t sec_hdr; + int ret = BCME_OK; + int remain_len = 0; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + dhd_init_sec_hdr(&sec_hdr); + + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) { + DHD_ERROR(("%s: error getting concise debug info !\n", + __FUNCTION__)); + return BCME_ERROR; + } + memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN); + + /* write the section header first */ + ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf, + strlen(FLOWRING_DUMP_HDR), pos); + if (ret < 0) + goto exit; + + /* Write the ring summary */ + ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, + (CONCISE_DUMP_BUFLEN - remain_len), pos); + if (ret < 0) + goto exit; + + sec_hdr.type = LOG_DUMP_SECTION_FLOWRING; + sec_hdr.length = len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + /* write the log */ + ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE); + if (ret < 0) + goto exit; + +exit: + return ret; +} +#endif /* DHD_DUMP_PCIE_RINGS */ + +#ifdef EWP_ECNTRS_LOGGING +int +dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + log_dump_section_hdr_t sec_hdr; + int ret = BCME_OK; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + dhd_init_sec_hdr(&sec_hdr); + + if (logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { + sec_hdr.type = LOG_DUMP_SECTION_ECNTRS; + ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring, + user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS); + } + return ret; + +} +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING +int +dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + log_dump_section_hdr_t sec_hdr; + int ret = BCME_OK; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return BCME_ERROR; + + dhd_init_sec_hdr(&sec_hdr); + + if (logdump_rtt_enable && dhdp->rtt_dbg_ring) { + ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring, + user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT); + } + return ret; + +} +#endif /* EWP_RTT_LOGGING */ + +#ifdef DHD_STATUS_LOGGING +int +dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) { + return BCME_ERROR; + } + + return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos); +} + +uint32 +dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp) +{ + dhd_info_t *dhd_info; + uint32 length = 0; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (dhdp) { + length = dhd_statlog_get_logbuf_len(dhdp); + } + + return length; +} +#endif /* DHD_STATUS_LOGGING */ + +void +dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr) +{ + /* prep the section header */ + memset(sec_hdr, 0, sizeof(*sec_hdr)); + sec_hdr->magic = LOG_DUMP_MAGIC; + sec_hdr->timestamp = local_clock(); +} + +/* Must hold 'dhd_os_logdump_lock' before calling this function ! */ +static int +do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) +{ + int ret = 0, i = 0; + struct file *fp = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t old_fs; + struct kstat stat; +#endif + loff_t pos = 0; + char dump_path[128]; + uint32 file_mode; + unsigned long flags = 0; + size_t log_size = 0; + size_t fspace_remain = 0; + char time_str[128]; + unsigned int len = 0; + log_dump_section_hdr_t sec_hdr; + uint32 file_size = 0; + + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__)); + goto exit1; + } + DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + if ((ret = dhd_log_flush(dhdp, type)) < 0) { + goto exit1; + } + /* change to KERNEL_DS address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path)); + + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); + DHD_ERROR(("DHD version: %s\n", dhd_version)); + DHD_ERROR(("F/W version: %s\n", fw_version)); + + dhd_log_dump_buf_addr(dhdp, type); + + dhd_get_time_str(dhdp, time_str, 128); + + /* if this is the first time after dhd is loaded, + * or, if periodic flush is disabled, clear the log file + */ + if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0) + file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC; + else + file_mode = O_CREAT | O_RDWR | O_SYNC; + + fp = filp_open(dump_path, file_mode, 0664); + if (IS_ERR(fp)) { + /* If android installed image, try '/data' directory */ +#if defined(CONFIG_X86) && defined(OEM_ANDROID) + DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n", + __FUNCTION__)); + snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE); + if (!dhdp->logdump_periodic_flush) { + snprintf(dump_path + strlen(dump_path), + sizeof(dump_path) - strlen(dump_path), + "_%s", dhdp->debug_dump_time_str); + } + fp = filp_open(dump_path, file_mode, 0664); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + goto exit2; + } + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); +#else + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + goto exit2; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + ret = vfs_stat(dump_path, &stat); + if (ret < 0) { + DHD_ERROR(("file stat error, err = %d\n", ret)); + goto exit2; + } + file_size = stat.size; +#else + file_size = dhd_os_get_image_size(fp); + if (file_size <= 0) { + DHD_ERROR(("%s: get file size fails ! %d\n", __FUNCTION__, file_size)); + goto exit2; + } +#endif + + /* if some one else has changed the file */ + if (dhdp->last_file_posn != 0 && + file_size < dhdp->last_file_posn) { + dhdp->last_file_posn = 0; + } + + /* XXX: periodic flush is disabled by default, if enabled + * only periodic flushing of 'GENERAL' log dump buffer + * is supported, its not recommended to turn on periodic + * flushing, except for developer unit test. + */ + if (dhdp->logdump_periodic_flush) { + log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr); + /* calculate the amount of space required to dump all logs */ + for (i = 0; i < DLD_BUFFER_NUM; ++i) { + if (*type != DLD_BUF_TYPE_ALL && i != *type) + continue; + + if (g_dld_buf[i].wraparound) { + log_size += (unsigned long)g_dld_buf[i].max + - (unsigned long)g_dld_buf[i].buffer; + } else { + DHD_LOG_DUMP_BUF_LOCK(&g_dld_buf[i].lock, flags); + log_size += (unsigned long)g_dld_buf[i].present - + (unsigned long)g_dld_buf[i].front; + DHD_LOG_DUMP_BUF_UNLOCK(&g_dld_buf[i].lock, flags); + } + log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr); + + if (*type != DLD_BUF_TYPE_ALL && i == *type) + break; + } + + ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR); + if (ret < 0) { + DHD_ERROR(("file seek last posn error ! err = %d \n", ret)); + goto exit2; + } + pos = fp->f_pos; + + /* if the max file size is reached, wrap around to beginning of the file + * we're treating the file as a large ring buffer + */ + fspace_remain = logdump_max_filesize - pos; + if (log_size > fspace_remain) { + fp->f_pos -= pos; + pos = fp->f_pos; + } + } + + dhd_print_time_str(0, fp, len, &pos); + + for (i = 0; i < DLD_BUFFER_NUM; ++i) { + + if (*type != DLD_BUF_TYPE_ALL && i != *type) + continue; + + len = dhd_get_dld_len(i); + dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos); + if (*type != DLD_BUF_TYPE_ALL) + break; + } + +#ifdef EWP_ECNTRS_LOGGING + if (*type == DLD_BUF_TYPE_ALL && + logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { + dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring, + fp, (unsigned long *)&pos, + &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS); + } +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef DHD_STATUS_LOGGING + if (dhdp->statlog) { + /* write the statlog */ + len = dhd_get_status_log_len(NULL, dhdp); + if (len) { + if (dhd_print_status_log_data(NULL, dhdp, 0, fp, + len, &pos) < 0) { + goto exit2; + } + } + } +#endif /* DHD_STATUS_LOGGING */ + +#ifdef DHD_STATUS_LOGGING + if (dhdp->statlog) { + dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp), + dhd_statlog_get_logbuf_len(dhdp)); + } +#endif /* DHD_STATUS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + if (*type == DLD_BUF_TYPE_ALL && + logdump_rtt_enable && + dhdp->rtt_dbg_ring) { + dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring, + fp, (unsigned long *)&pos, + &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT); + } +#endif /* EWP_RTT_LOGGING */ + +#ifdef EWP_BCM_TRACE + if (*type == DLD_BUF_TYPE_ALL && + dhdp->bcm_trace_dbg_ring) { + dhd_log_dump_ring_to_file(dhdp, dhdp->bcm_trace_dbg_ring, + fp, (unsigned long *)&pos, + &sec_hdr, BCM_TRACE_LOG_HDR, LOG_DUMP_SECTION_BCM_TRACE); + } +#endif /* EWP_BCM_TRACE */ + +#ifdef BCMPCIE + len = dhd_get_ext_trap_len(NULL, dhdp); + if (len) { + if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; + } +#endif /* BCMPCIE */ + +#if defined(DHD_FW_COREDUMP) && defined (DNGL_EVENT_SUPPORT) + len = dhd_get_health_chk_len(NULL, dhdp); + if (len) { + if (dhd_print_health_chk_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; + } +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + + len = dhd_get_dhd_dump_len(NULL, dhdp); + if (len) { + if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; + } + + len = dhd_get_cookie_log_len(NULL, dhdp); + if (len) { + if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; + } + +#ifdef DHD_DUMP_PCIE_RINGS + len = dhd_get_flowring_len(NULL, dhdp); + if (len) { + if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; + } +#endif + + if (dhdp->logdump_periodic_flush) { + /* store the last position written to in the file for future use */ + dhdp->last_file_posn = pos; + } + +exit2: + if (!IS_ERR(fp) && fp != NULL) { + filp_close(fp, NULL); + DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n", + __FUNCTION__, dump_path)); + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(old_fs); +#endif +exit1: + if (type) { + MFREE(dhdp->osh, type, sizeof(*type)); + } + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + +#ifdef DHD_DUMP_MNGR + if (ret >= 0) { + dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE); + } +#endif /* DHD_DUMP_MNGR */ + + return (ret < 0) ? BCME_ERROR : BCME_OK; +} +#endif /* DHD_LOG_DUMP */ + +/* This function writes data to the file pointed by fp, OR + * copies data to the user buffer sent by upper layer(HAL). + */ +int +dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos) +{ + int ret = BCME_OK; + + if (fp) { + ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + } else { +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif /* LINUX_VER >= 4.6 */ + { + void * usr_ptr = compat_ptr((uintptr_t) user_buf); + ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)), + mem_buf, buf_len); + if (ret) { + DHD_ERROR(("failed to copy into user buffer : %d\n", ret)); + goto exit; + } + } + else +#endif /* CONFIG_COMPAT */ + { + ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)), + mem_buf, buf_len); + if (ret) { + DHD_ERROR(("failed to copy into user buffer : %d\n", ret)); + goto exit; + } + } + (*(int *)pos) += buf_len; + } +exit: + return ret; +} + +#ifdef BCM_ROUTER_DHD +void dhd_schedule_trap_log_dump(dhd_pub_t *dhdp, + uint8 *buf, uint32 size) +{ + dhd_write_file_t *wf = NULL; + wf = (dhd_write_file_t *)MALLOC(dhdp->osh, sizeof(dhd_write_file_t)); + if (wf == NULL) { + DHD_ERROR(("%s: dhd write file memory allocation failed\n", __FUNCTION__)); + return; + } + snprintf(wf->file_path, sizeof(wf->file_path), "%s", "/tmp/failed_if.txt"); + wf->file_flags = O_CREAT | O_WRONLY | O_SYNC; + wf->buf = buf; + wf->bufsize = size; + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)wf, + DHD_WQ_WORK_INFORM_DHD_MON, dhd_inform_dhd_monitor_handler, + DHD_WQ_WORK_PRIORITY_HIGH); +} + +/* Returns the pid of a the userspace process running with the given name */ +static struct task_struct * +_get_task_info(const char *pname) +{ + struct task_struct *task; + if (!pname) + return NULL; + + for_each_process(task) { + if (strcmp(pname, task->comm) == 0) + return task; + } + + return NULL; +} + +#define DHD_MONITOR_NS "dhd_monitor" +extern void emergency_restart(void); + +static void +dhd_inform_dhd_monitor_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_write_file_t *wf = event_info; + struct task_struct *monitor_task; + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + if (!event_info) { + DHD_ERROR(("%s: File info is NULL\n", __FUNCTION__)); + return; + } + if (!wf->buf) { + DHD_ERROR(("%s: Unable to get failed interface name\n", __FUNCTION__)); + goto exit; + } + if (write_file(wf->file_path, wf->file_flags, wf->buf, wf->bufsize)) { + DHD_ERROR(("%s: writing to the file failed\n", __FUNCTION__)); + } +exit: + MFREE(dhd->pub.osh, wf, sizeof(dhd_write_file_t)); + + /* check if dhd_monitor is running */ + monitor_task = _get_task_info(DHD_MONITOR_NS); + if (monitor_task == NULL) { + /* If dhd_monitor is not running, handle recovery from here */ + + char *val = nvram_get("watchdog"); + if (val && bcm_atoi(val)) { + /* watchdog enabled, so reboot */ + DHD_ERROR(("%s: Dongle(wl%d) trap detected. Restarting the system\n", + __FUNCTION__, dhd->unit)); + + mdelay(1000); + emergency_restart(); + while (1) + cpu_relax(); + } else { + DHD_ERROR(("%s: Dongle(wl%d) trap detected. No watchdog.\n", + __FUNCTION__, dhd->unit)); + } + + return; + } + + /* If monitor daemon is running, let's signal the monitor for recovery */ + DHD_ERROR(("%s: Dongle(wl%d) trap detected. Send signal to dhd_monitor.\n", + __FUNCTION__, dhd->unit)); + + send_sig_info(SIGUSR1, (void *)1L, monitor_task); +} +#endif /* BCM_ROUTER_DHD */ + +#ifdef BCMDBG +#define DUMPMAC_BUF_SZ (128 * 1024) +#define DUMPMAC_FILENAME_SZ 32 + +static void +_dhd_schedule_macdbg_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp = &dhd->pub; +#ifndef BCM_ROUTER_DHD + char *dumpbuf = NULL; + int dumpbuf_len = 0; + uint16 dump_signature; + char dumpfilename[DUMPMAC_FILENAME_SZ] = {0, }; +#endif /* BCM_ROUTER_DHD */ + + ASSERT(event == DHD_WQ_WORK_MACDBG); + BCM_REFERENCE(event_info); + + DHD_ERROR(("%s: Dongle(wl%d) macreg dump scheduled\n", + __FUNCTION__, dhd->unit)); + + DHD_OS_WAKE_LOCK(dhdp); + + /* Make sure dongle stops running to avoid race condition in reading mac registers */ + (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0); + + /* In router, skip macregs dump as dhd_monitor will dump them */ +#ifndef BCM_ROUTER_DHD + dumpbuf = (char *)MALLOCZ(dhdp->osh, DUMPMAC_BUF_SZ); + if (dumpbuf) { + /* Write macdump to a file */ + + /* Get dump file signature */ + dump_signature = (uint16)OSL_RAND(); + + /* PSMr */ + if (dhd_macdbg_dumpmac(dhdp, dumpbuf, DUMPMAC_BUF_SZ, + &dumpbuf_len, FALSE) == BCME_OK) { + snprintf(dumpfilename, DUMPMAC_FILENAME_SZ, + "/tmp/d11reg_dump_%04X.txt", dump_signature); + DHD_ERROR(("%s: PSMr macreg dump to %s\n", __FUNCTION__, dumpfilename)); + /* Write to a file */ + if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC), + dumpbuf, dumpbuf_len)) { + DHD_ERROR(("%s: writing mac dump to the file failed\n", + __FUNCTION__)); + } + memset(dumpbuf, 0, DUMPMAC_BUF_SZ); + memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ); + dumpbuf_len = 0; + } + + /* PSMx */ + if (dhd_macdbg_dumpmac(dhdp, dumpbuf, DUMPMAC_BUF_SZ, + &dumpbuf_len, TRUE) == BCME_OK) { + snprintf(dumpfilename, DUMPMAC_FILENAME_SZ, + "/tmp/d11regx_dump_%04X.txt", dump_signature); + DHD_ERROR(("%s: PSMx macreg dump to %s\n", __FUNCTION__, dumpfilename)); + /* Write to a file */ + if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC), + dumpbuf, dumpbuf_len)) { + DHD_ERROR(("%s: writing mac dump to the file failed\n", + __FUNCTION__)); + } + memset(dumpbuf, 0, DUMPMAC_BUF_SZ); + memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ); + dumpbuf_len = 0; + } + + /* SVMP */ + if (dhd_macdbg_dumpsvmp(dhdp, dumpbuf, DUMPMAC_BUF_SZ, + &dumpbuf_len) == BCME_OK) { + snprintf(dumpfilename, DUMPMAC_FILENAME_SZ, + "/tmp/svmp_dump_%04X.txt", dump_signature); + DHD_ERROR(("%s: SVMP mems dump to %s\n", __FUNCTION__, dumpfilename)); + /* Write to a file */ + if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC), + dumpbuf, dumpbuf_len)) { + DHD_ERROR(("%s: writing svmp dump to the file failed\n", + __FUNCTION__)); + } + memset(dumpbuf, 0, DUMPMAC_BUF_SZ); + memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ); + dumpbuf_len = 0; + } + + MFREE(dhdp->osh, dumpbuf, DUMPMAC_BUF_SZ); + } else { + DHD_ERROR(("%s: print macdump\n", __FUNCTION__)); + /* Just printf the dumps */ + (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, FALSE); /* PSMr */ + (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, TRUE); /* PSMx */ + (void) dhd_macdbg_dumpsvmp(dhdp, NULL, 0, NULL); + } +#endif /* BCM_ROUTER_DHD */ + + DHD_OS_WAKE_UNLOCK(dhdp); + dhd_deferred_work_set_skip(dhd->dhd_deferred_wq, + DHD_WQ_WORK_MACDBG, FALSE); +} + +void +dhd_schedule_macdbg_dump(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: Dongle(wl%d) schedule macreg dump\n", + __FUNCTION__, dhdp->info->unit)); + + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_MACDBG, _dhd_schedule_macdbg_dump, DHD_WQ_WORK_PRIORITY_LOW); + dhd_deferred_work_set_skip(dhdp->info->dhd_deferred_wq, + DHD_WQ_WORK_MACDBG, TRUE); +} +#endif /* BCMDBG */ + +/* + * This call is to get the memdump size so that, + * halutil can alloc that much buffer in user space. + */ +int +dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size) +{ + int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (dhdp->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + return BCME_ERROR; + } +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + ret = dhd_common_socram_dump(dhdp); + if (ret == BCME_OK) { + *dump_size = dhdp->soc_ram_length; + } + return ret; +} + +/* + * This is to get the actual memdup after getting the memdump size + */ +int +dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size) +{ + int ret = BCME_OK; + int orig_len = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + if (buf == NULL) + return BCME_ERROR; + orig_len = *size; + if (dhdp->soc_ram) { + if (orig_len >= dhdp->soc_ram_length) { + *buf = dhdp->soc_ram; + *size = dhdp->soc_ram_length; + } else { + ret = BCME_BUFTOOSHORT; + DHD_ERROR(("The length of the buffer is too short" + " to save the memory dump with %d\n", dhdp->soc_ram_length)); + } + } else { + DHD_ERROR(("socram_dump is not ready to get\n")); + ret = BCME_NOTREADY; + } + return ret; +} + +#ifdef EWP_RTT_LOGGING +uint32 +dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp) +{ + dhd_info_t *dhd_info; + log_dump_section_hdr_t sec_hdr; + int length = 0; + dhd_dbg_ring_t *ring; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (logdump_rtt_enable && dhdp->rtt_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring; + length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr); + } + return length; +} +#endif /* EWP_RTT_LOGGING */ + +int +dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size) +{ + char *fw_str; + + if (size == 0) + return BCME_BADARG; + + fw_str = strstr(info_string, "Firmware: "); + if (fw_str == NULL) { + return BCME_ERROR; + } + + bzero(*buf, size); + if (dhd_ver) { + strlcpy(*buf, dhd_version, size); + } else { + strlcpy(*buf, fw_str, size); + } + return BCME_OK; +} + +#ifdef DHD_PKT_LOGGING +int +dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len) +{ + int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + if (user_buf == NULL) { + DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len); + if (ret < 0) { + DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret)); + return ret; + } + return ret; +} + +uint32 +dhd_os_get_pktlog_dump_size(struct net_device *dev) +{ + uint32 size = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + size = dhd_pktlog_get_dump_length(dhdp); + if (size == 0) { + DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size)); + } + return size; +} + +void +dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + dhd_pktlog_get_filename(dhdp, dump_path, len); +} +#endif /* DHD_PKT_LOGGING */ +#ifdef DNGL_AXI_ERROR_LOGGING +int +dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len) +{ + int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + loff_t pos = 0; + if (user_buf == NULL) { + DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + ret = dhd_export_debug_data((char *)dhdp->axi_err_dump, + NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos); + + if (ret < 0) { + DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret)); + return ret; + } + return ret; +} + +int +dhd_os_get_axi_error_dump_size(struct net_device *dev) +{ + int size = -1; + + size = sizeof(dhd_axi_error_dump_t); + if (size < 0) { + DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size)); + } + return size; +} + +void +dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len) +{ + snprintf(dump_path, len, "%s", + DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME); +} +#endif /* DNGL_AXI_ERROR_LOGGING */ +#ifdef DHD_WMF +/* Returns interface specific WMF configuration */ +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + return &ifp->wmf; +} +#endif /* DHD_WMF */ + +#if defined(BCM_ROUTER_DHD) +void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata, *ip_body; + uint8 dwm_filter; + uint8 tos_tc = 0; + uint8 dscp = 0; + pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *) pktdata; + ip_body = NULL; + + if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) { + if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { + evh = (struct ethervlan_header *)eh; + if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || + (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { + ip_body = pktdata + sizeof(struct ethervlan_header); + } + } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || + (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { + ip_body = pktdata + sizeof(struct ether_header); + } + if (ip_body) { + tos_tc = IP_TOS46(ip_body); + dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; + } + + if (dscp < DHD_DWM_TBL_SIZE) { + dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp]; + if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) { + PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter)); + } + } + } +} +#endif /* BCM_ROUTER_DHD */ + +bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) +{ + return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; +} + +#ifdef DHD_L2_FILTER +arp_table_t* +dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(bssidx < DHD_MAX_IFS); + + ifp = dhd->iflist[bssidx]; + return ifp->phnd_arp_table; +} + +int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + return ifp->parp_enable; + else + return FALSE; +} + +/* Set interface specific proxy arp configuration */ +int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + if (!ifp) + return BCME_ERROR; + + /* At present all 3 variables are being + * handled at once + */ + ifp->parp_enable = val; + ifp->parp_discard = val; + ifp->parp_allnode = val; + + /* Flush ARP entries when disabled */ + if (val == FALSE) { + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL, + FALSE, dhdp->tickcnt); + } + return BCME_OK; +} + +bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + return ifp->parp_discard; +} + +bool +dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->parp_allnode; +} + +int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->dhcp_unicast; +} + +int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->dhcp_unicast = val; + return BCME_OK; +} + +int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->block_ping; +} + +int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->block_ping = val; + /* Disable rx_pkt_chain feature for interface if block_ping option is + * enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); + return BCME_OK; +} + +int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->grat_arp; +} + +int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->grat_arp = val; + + return BCME_OK; +} + +int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->block_tdls; +} + +int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->block_tdls = val; + + return BCME_OK; +} +#endif /* DHD_L2_FILTER */ + +#if defined(SET_XPS_CPUS) +int dhd_xps_cpus_enable(struct net_device *net, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + char * XPS_CPU_SETBUF; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + if (!dhd->pub.conf->xps_cpus) + return -ENODEV; + + if (ifidx == PRIMARY_INF) { + if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) { + DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__)); + XPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS; + } else { + DHD_INFO(("%s : set for BSS.\n", __FUNCTION__)); + XPS_CPU_SETBUF = RPS_CPUS_MASK; + } + } else if (ifidx == VIRTUAL_INF) { + DHD_INFO(("%s : set for P2P.\n", __FUNCTION__)); + XPS_CPU_SETBUF = RPS_CPUS_MASK_P2P; + } else { + DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + ifp = dhd->iflist[ifidx]; + if (ifp) { + if (enable) { + DHD_INFO(("%s : set xps_cpus as [%s]\n", __FUNCTION__, XPS_CPU_SETBUF)); + custom_xps_map_set(ifp->net, XPS_CPU_SETBUF, strlen(XPS_CPU_SETBUF)); + } else { + custom_xps_map_clear(ifp->net); + } + } else { + DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__)); + return -ENODEV; + } + return BCME_OK; +} + +int custom_xps_map_set(struct net_device *net, char *buf, size_t len) +{ + cpumask_var_t mask; + int err; + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); + if (err) { + free_cpumask_var(mask); + DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__)); + return err; + } + + err = netif_set_xps_queue(net, mask, 0); + + free_cpumask_var(mask); + + if (0 == err) + WL_MSG(net->name, "Done. mapping cpu\n"); + + return err; +} + +void custom_xps_map_clear(struct net_device *net) +{ + struct xps_dev_maps *dev_maps; + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + rcu_read_lock(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + dev_maps = rcu_dereference(net->xps_cpus_map); +#else + dev_maps = rcu_dereference(net->xps_maps); +#endif + rcu_read_unlock(); + + if (dev_maps) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + RCU_INIT_POINTER(net->xps_cpus_map, NULL); +#else + RCU_INIT_POINTER(net->xps_maps, NULL); +#endif + kfree_rcu(dev_maps, rcu); + DHD_INFO(("%s : xps_cpus map clear.\n", __FUNCTION__)); + } +} +#endif // endif + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + char * RPS_CPU_SETBUF; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + if (!dhd->pub.conf->rps_cpus) + return -ENODEV; + + if (ifidx == PRIMARY_INF) { + if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) { + DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS; + } else { + DHD_INFO(("%s : set for BSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK; + } + } else if (ifidx == VIRTUAL_INF) { + DHD_INFO(("%s : set for P2P.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P; + } else { + DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + ifp = dhd->iflist[ifidx]; + if (ifp) { + if (enable) { + DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF)); + custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF)); + } else { + custom_rps_map_clear(ifp->net->_rx); + } + } else { + DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__)); + return -ENODEV; + } + return BCME_OK; +} + +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) +{ + struct rps_map *old_map, *map; + cpumask_var_t mask; + int err, cpu, i; + static DEFINE_SPINLOCK(rps_map_lock); + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); + if (err) { + free_cpumask_var(mask); + DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__)); + return err; + } + + map = kzalloc(max_t(unsigned int, + RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), + GFP_KERNEL); + if (!map) { + free_cpumask_var(mask); + DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + i = 0; + for_each_cpu(cpu, mask) { + map->cpus[i++] = cpu; + } + + if (i) { + map->len = i; + } else { + kfree(map); + map = NULL; + free_cpumask_var(mask); + DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__)); + return -1; + } + + spin_lock(&rps_map_lock); + old_map = rcu_dereference_protected(queue->rps_map, + lockdep_is_held(&rps_map_lock)); + rcu_assign_pointer(queue->rps_map, map); + spin_unlock(&rps_map_lock); + + if (map) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) + static_key_slow_inc(&rps_needed.key); +#else + static_key_slow_inc(&rps_needed); +#endif + } + if (old_map) { + kfree_rcu(old_map, rcu); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) + static_key_slow_dec(&rps_needed.key); +#else + static_key_slow_dec(&rps_needed); +#endif + } + free_cpumask_var(mask); + + DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len)); + return map->len; +} + +void custom_rps_map_clear(struct netdev_rx_queue *queue) +{ + struct rps_map *map; + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + map = rcu_dereference_protected(queue->rps_map, 1); + if (map) { + RCU_INIT_POINTER(queue->rps_map, NULL); + kfree_rcu(map, rcu); + DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__)); + } +} +#endif // endif + +#ifdef DHD_BUZZZ_LOG_ENABLED + +static int +dhd_buzzz_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + + DAEMONIZE("dhd_buzzz"); + + /* signal: thread has started */ + complete(&tsk->completed); + + /* Run until signal received */ + while (1) { + if (down_interruptible(&tsk->sema) == 0) { + if (tsk->terminated) { + break; + } + printk("%s: start to dump...\n", __FUNCTION__); + dhd_buzzz_dump(); + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +void* dhd_os_create_buzzz_thread(void) +{ + tsk_ctl_t *thr_buzzz_ctl = NULL; + + thr_buzzz_ctl = kmalloc(sizeof(tsk_ctl_t), GFP_KERNEL); + if (!thr_buzzz_ctl) { + return NULL; + } + + PROC_START(dhd_buzzz_thread, NULL, thr_buzzz_ctl, 0, "dhd_buzzz"); + + return (void *)thr_buzzz_ctl; +} + +void dhd_os_destroy_buzzz_thread(void *thr_hdl) +{ + tsk_ctl_t *thr_buzzz_ctl = (tsk_ctl_t *)thr_hdl; + + if (!thr_buzzz_ctl) { + return; + } + + PROC_STOP(thr_buzzz_ctl); + kfree(thr_buzzz_ctl); +} + +void dhd_os_sched_buzzz_thread(void *thr_hdl) +{ + tsk_ctl_t *thr_buzzz_ctl = (tsk_ctl_t *)thr_hdl; + + if (!thr_buzzz_ctl) { + return; + } + + if (thr_buzzz_ctl->thr_pid >= 0) { + up(&thr_buzzz_ctl->sema); + } +} +#endif /* DHD_BUZZZ_LOG_ENABLED */ + +#ifdef DHD_DEBUG_PAGEALLOC +/* XXX Additional Kernel implemenation is needed to use this function at + * the top of the check_poison_mem() function in mm/debug-pagealloc.c file. + * Please check if below codes are implemenated your Linux Kernel first. + * + * - mm/debug-pagealloc.c + * + * // for DHD_DEBUG_PAGEALLOC + * typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, uint addr_len); + * page_corrupt_cb_t corrupt_cb = NULL; + * void *corrupt_cb_handle = NULL; + * + * void register_page_corrupt_cb(page_corrupt_cb_t cb, void *handle) + * { + * corrupt_cb = cb; + * corrupt_cb_handle = handle; + * } + * EXPORT_SYMBOL(register_page_corrupt_cb); + * + * extern void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); + * + * static void check_poison_mem(unsigned char *mem, size_t bytes) + * { + * ...... + * + * if (!__ratelimit(&ratelimit)) + * return; + * else if (start == end && single_bit_flip(*start, PAGE_POISON)) + * printk(KERN_ERR "pagealloc: single bit error\n"); + * else + * printk(KERN_ERR "pagealloc: memory corruption\n"); + * + * print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, + * end - start + 1, 1); + * + * // for DHD_DEBUG_PAGEALLOC + * dhd_page_corrupt_cb(corrupt_cb_handle, start, end - start + 1); + * + * dump_stack(); + * } + * + */ + +void +dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n", + __FUNCTION__, addr_corrupt, (uint32)len)); + + DHD_OS_WAKE_LOCK(dhdp); + prhex("Page Corruption:", addr_corrupt, len); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMONLY; + dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +EXPORT_SYMBOL(dhd_page_corrupt_cb); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED) +void +dhd_pktid_error_handler(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(dhdp); + dhd_dump_to_kernelog(dhdp); +#ifdef DHD_FW_COREDUMP + /* Load the dongle side dump to host memory */ + if (dhdp->memdump_enabled == DUMP_DISABLED) { + dhdp->memdump_enabled = DUMP_MEMFILE; + } + dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_FW_COREDUMP */ +#ifdef OEM_ANDROID + /* XXX Send HANG event to Android Framework for recovery */ + dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR; + dhd_os_check_hang(dhdp, 0, -EREMOTEIO); +#endif /* OEM_ANDROID */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */ + +struct net_device * +dhd_linux_get_primary_netdev(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + if (dhd->iflist[0] && dhd->iflist[0]->net) + return dhd->iflist[0]->net; + else + return NULL; +} + +#ifdef DHD_PKTTS +/** + * dhd_msgbuf_get_ipv6_id - return ipv6 identification number + * return 0 in case of error + * + * @pkt: packet pointer + */ +uint +dhd_msgbuf_get_ipv6_id(void *pkt) +{ + struct frag_hdr _frag; + const struct sk_buff *skb; + const struct frag_hdr *fh; + unsigned int offset = 0; + int err; + + skb = (struct sk_buff *)pkt; + err = ipv6_find_hdr(skb, &offset, NEXTHDR_FRAGMENT, NULL, NULL); + if (err < 0) { + return 0; + } + + fh = skb_header_pointer(skb, offset, sizeof(_frag), &_frag); + if (fh == NULL) { + return 0; + } + + return ntohl(fh->identification); +} + +/** + * dhd_create_to_notifier_ts - create BCM_NL_TS netlink socket + * + * @void: + */ +int +dhd_create_to_notifier_ts(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + /* Kernel 3.6 onwards this API accepts only 3 arguments. */ + nl_to_ts = netlink_kernel_create(&init_net, BCM_NL_TS, &dhd_netlink_ts); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */ + if (!nl_to_ts) { + DHD_ERROR(("Error creating ts socket.\n")); + return -1; + } + DHD_INFO(("nl_to socket created successfully...\n")); + return 0; +} + +/** + * dhd_destroy_to_notifier_ts - destroy BCM_NL_TS netlink socket + * + * @void: + */ +void +dhd_destroy_to_notifier_ts(void) +{ + DHD_INFO(("Destroying nl_to_ts socket\n")); + if (nl_to_ts) { + netlink_kernel_release(nl_to_ts); + nl_to_ts = NULL; + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +/** + * dhd_recv_msg_from_ts - this is called on BCM_NL_TS netlink recv message + * this api updates app pid of app which is currenty using this netlink socket + * + * @skb: rx packet socket buffer + */ +static void +dhd_recv_msg_from_ts(struct sk_buff *skb) +{ + sender_pid_ts = ((struct nlmsghdr *)(skb->data))->nlmsg_pid; + DHD_INFO(("DHD Daemon Started, PID:%d\n", sender_pid_ts)); +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */ + +/** + * dhd_send_msg_to_ts - send data to BCM_NL_TS netlink socket + * + * @skb: socket buffer (unused) + * @data: output data + * @size: size of output data + */ +int +dhd_send_msg_to_ts(struct sk_buff *skb, void *data, int size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb_out = NULL; + int ret = BCME_ERROR; + + BCM_REFERENCE(skb); + if (sender_pid_ts == 0) { + goto err; + } + + if ((skb_out = nlmsg_new(size, GFP_ATOMIC)) == NULL) { + DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__)); + goto err; + } + + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0); + if (nlh == NULL) { + DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__)); + goto err; + } + + NETLINK_CB(skb_out).dst_group = 0; /* Unicast */ + (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size); + + if ((ret = nlmsg_unicast(nl_to_ts, skb_out, sender_pid_ts)) < 0) { + DHD_ERROR(("Error sending message, ret:%d\n", ret)); + /* skb is already freed inside nlmsg_unicast() on error case */ + /* explicitly making skb_out to NULL to avoid double free */ + skb_out = NULL; + goto err; + } + return BCME_OK; + +err: + if (skb_out) { + nlmsg_free(skb_out); + } + return ret; +} +#endif /* DHD_PKTTS */ + +static int +dhd_create_to_notifier_skt(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + /* Kernel 3.7 onwards this API accepts only 3 arguments. */ + /* Kernel version 3.6 is a special case which accepts 4 arguments */ + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + /* Kernel version 3.5 and below use this old API format */ + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0, + dhd_process_daemon_msg, NULL, THIS_MODULE); +#else + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, + &dhd_netlink_cfg); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ + if (!nl_to_event_sk) + { + printf("Error creating socket.\n"); + return -1; + } + DHD_INFO(("nl_to socket created successfully...\n")); + return 0; +} + +void +dhd_destroy_to_notifier_skt(void) +{ + DHD_INFO(("Destroying nl_to socket\n")); + netlink_kernel_release(nl_to_event_sk); +} + +static void +dhd_recv_msg_from_daemon(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + bcm_to_info_t *cmd; + + nlh = (struct nlmsghdr *)skb->data; + cmd = (bcm_to_info_t *)nlmsg_data(nlh); + if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) { + sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid; + DHD_INFO(("DHD Daemon Started\n")); + } +} + +int +dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb_out; + int ret = BCME_ERROR; + + BCM_REFERENCE(skb); + if (sender_pid == 0) { + DHD_INFO(("Invalid PID 0\n")); + skb_out = NULL; + goto err; + } + + if ((skb_out = nlmsg_new(size, 0)) == NULL) { + DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0); + if (nlh == NULL) { + DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__)); + goto err; + } + NETLINK_CB(skb_out).dst_group = 0; /* Unicast */ + (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size); + + if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) { + DHD_ERROR(("Error sending message, ret:%d\n", ret)); + /* skb is already freed inside nlmsg_unicast() on error case */ + /* explicitly making skb_out to NULL to avoid double free */ + skb_out = NULL; + goto err; + } + return BCME_OK; +err: + if (skb_out) { + nlmsg_free(skb_out); + } + return ret; +} + +static void +dhd_process_daemon_msg(struct sk_buff *skb) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = REASON_DAEMON_STARTED; + to_info.trap = NO_TRAP; + + dhd_recv_msg_from_daemon(skb); + dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info)); +} + +#ifdef REPORT_FATAL_TIMEOUTS +static void +dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = reason; + to_info.trap = trap; + + DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap)); + dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t)); +} + +void +dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason) +{ + int to_reason; + int trap = NO_TRAP; + switch (reason) { + case DHD_REASON_COMMAND_TO: + to_reason = REASON_COMMAND_TO; + trap = DO_TRAP; + break; + case DHD_REASON_JOIN_TO: + to_reason = REASON_JOIN_TO; + trap = DO_TRAP; + break; + case DHD_REASON_SCAN_TO: + to_reason = REASON_SCAN_TO; + trap = DO_TRAP; + break; + case DHD_REASON_OQS_TO: + to_reason = REASON_OQS_TO; + trap = DO_TRAP; + break; + default: + to_reason = REASON_UNKOWN; + } + dhd_send_trap_to_fw(pub, to_reason, trap); +} +#endif /* REPORT_FATAL_TIMEOUTS */ + +char* +dhd_dbg_get_system_timestamp(void) +{ + static char timebuf[DEBUG_DUMP_TIME_BUF_LEN]; + struct osl_timespec tv; + unsigned long local_time; + struct rtc_time tm; + + memset_s(timebuf, DEBUG_DUMP_TIME_BUF_LEN, 0, DEBUG_DUMP_TIME_BUF_LEN); + osl_do_gettimeofday(&tv); + local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + scnprintf(timebuf, DEBUG_DUMP_TIME_BUF_LEN, + "%02d:%02d:%02d.%06lu", + tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_usec); + return timebuf; +} + +char* +dhd_log_dump_get_timestamp(void) +{ + static char buf[32]; + u64 ts_nsec; + unsigned long rem_nsec; + + ts_nsec = local_clock(); + rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC); + snprintf(buf, sizeof(buf), "%5lu.%06lu", + (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC); + + return buf; +} + +#ifdef DHD_LOG_DUMP +bool +dhd_log_dump_ecntr_enabled(void) +{ + return (bool)logdump_ecntr_enable; +} + +bool +dhd_log_dump_rtt_enabled(void) +{ + return (bool)logdump_rtt_enable; +} + +void +dhd_log_dump_init(dhd_pub_t *dhd) +{ + struct dhd_log_dump_buf *dld_buf, *dld_buf_special; + int i = 0; + uint8 *prealloc_buf = NULL, *bufptr = NULL; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF; +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + int ret; + dhd_dbg_ring_t *ring = NULL; + unsigned long flags = 0; + dhd_info_t *dhd_info = dhd->info; +#if defined(EWP_ECNTRS_LOGGING) + void *cookie_buf = NULL; +#endif + + BCM_REFERENCE(ret); + BCM_REFERENCE(ring); + BCM_REFERENCE(flags); + + /* sanity check */ + if (logdump_prsrv_tailsize <= 0 || + logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) { + logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE; + } + /* now adjust the preserve log flush size based on the + * kernel printk log buffer size + */ +#ifdef CONFIG_LOG_BUF_SHIFT + DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;" + " limit prsrv tail size to = %uKB\n", + __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024, + logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024)); + + if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) { + logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE; + } +#else + DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n", + __FUNCTION__, logdump_prsrv_tailsize/1024); +#endif /* CONFIG_LOG_BUF_SHIFT */ + + mutex_init(&dhd_info->logdump_lock); + /* initialize log dump buf structures */ + memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM); + + /* set the log dump buffer size based on the module_param */ + if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE || + logdump_max_bufsize <= 0) + dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE; + else + dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize; + + /* pre-alloc the memory for the log buffers & 'special' buffer */ + dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL]; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE); + dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); +#else + prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE); + dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + if (!prealloc_buf) { + DHD_ERROR(("Failed to allocate memory for log buffers\n")); + goto fail; + } + if (!dld_buf_special->buffer) { + DHD_ERROR(("Failed to allocate memory for special buffer\n")); + goto fail; + } +#ifdef BCMINTERNAL + DHD_ERROR(("prealloc_buf:%p dld_buf_special->buffer:%p\n", + prealloc_buf, dld_buf_special->buffer)); +#endif /* BCMINTERNAL */ + + bufptr = prealloc_buf; + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->dhd_pub = dhd; + spin_lock_init(&dld_buf->lock); + dld_buf->wraparound = 0; + if (i != DLD_BUF_TYPE_SPECIAL) { + dld_buf->buffer = bufptr; + dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i]; + bufptr = (uint8 *)dld_buf->max; + } else { + dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i]; + } + dld_buf->present = dld_buf->front = dld_buf->buffer; + dld_buf->remain = dld_buf_size[i]; + dld_buf->enable = 1; + } + + /* now use the rest of the pre-alloc'd memory for other rings */ +#ifdef EWP_ECNTRS_LOGGING + dhd->ecntr_dbg_ring = dhd_dbg_ring_alloc_init(dhd, + ECNTR_RING_ID, ECNTR_RING_NAME, + LOG_DUMP_ECNTRS_MAX_BUFSIZE, + bufptr, TRUE); + if (!dhd->ecntr_dbg_ring) { + DHD_ERROR(("%s: unable to init ecounters dbg ring !\n", + __FUNCTION__)); + goto fail; + } + bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE; +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + dhd->rtt_dbg_ring = dhd_dbg_ring_alloc_init(dhd, + RTT_RING_ID, RTT_RING_NAME, + LOG_DUMP_RTT_MAX_BUFSIZE, + bufptr, TRUE); + if (!dhd->rtt_dbg_ring) { + DHD_ERROR(("%s: unable to init rtt dbg ring !\n", + __FUNCTION__)); + goto fail; + } + bufptr += LOG_DUMP_RTT_MAX_BUFSIZE; +#endif /* EWP_RTT_LOGGING */ + +#ifdef EWP_BCM_TRACE + dhd->bcm_trace_dbg_ring = dhd_dbg_ring_alloc_init(dhd, + BCM_TRACE_RING_ID, BCM_TRACE_RING_NAME, + LOG_DUMP_BCM_TRACE_MAX_BUFSIZE, + bufptr, TRUE); + if (!dhd->bcm_trace_dbg_ring) { + DHD_ERROR(("%s: unable to init bcm trace dbg ring !\n", + __FUNCTION__)); + goto fail; + } + bufptr += LOG_DUMP_BCM_TRACE_MAX_BUFSIZE; +#endif /* EWP_BCM_TRACE */ + + /* Concise buffer is used as intermediate buffer for following purposes + * a) pull ecounters records temporarily before + * writing it to file + * b) to store dhd dump data before putting it to file + * It should have a size equal to + * MAX(largest possible ecntr record, 'dhd dump' data size) + */ + dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN); + if (!dhd->concise_dbg_buf) { + DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n", + __FUNCTION__)); + goto fail; + } + +#if defined(DHD_EVENT_LOG_FILTER) + /* XXX init filter last, because filter use buffer which alloced by log dump */ + ret = dhd_event_log_filter_init(dhd, + bufptr, + LOG_DUMP_FILTER_MAX_BUFSIZE); + if (ret != BCME_OK) { + goto fail; + } +#endif /* DHD_EVENT_LOG_FILTER */ + +#if defined(EWP_ECNTRS_LOGGING) + cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE); + if (!cookie_buf) { + DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n", + __FUNCTION__)); + goto fail; + } + + ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE); + if (ret != BCME_OK) { + MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE); + goto fail; + } +#endif /* EWP_ECNTRS_LOGGING */ + return; + +fail: + +#if defined(DHD_EVENT_LOG_FILTER) + /* XXX deinit filter first, because filter use buffer which alloced by log dump */ + if (dhd->event_log_filter) { + dhd_event_log_filter_deinit(dhd); + } +#endif /* DHD_EVENT_LOG_FILTER */ + + if (dhd->concise_dbg_buf) { + MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + } + +#ifdef EWP_ECNTRS_LOGGING + if (dhd->logdump_cookie) { + dhd_logdump_cookie_deinit(dhd); + MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE); + dhd->logdump_cookie = NULL; + } +#endif /* EWP_ECNTRS_LOGGING */ + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + if (prealloc_buf) { + DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + DHD_OS_PREFREE(dhd, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#else + if (prealloc_buf) { + MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + MFREE(dhd->osh, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->enable = 0; + dld_buf->buffer = NULL; + } + mutex_destroy(&dhd_info->logdump_lock); +} + +void +dhd_log_dump_deinit(dhd_pub_t *dhd) +{ + struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL; + int i = 0; + dhd_info_t *dhd_info = dhd->info; + dhd_dbg_ring_t *ring = NULL; + + BCM_REFERENCE(ring); + + if (dhd->concise_dbg_buf) { + MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + dhd->concise_dbg_buf = NULL; + } + +#ifdef EWP_ECNTRS_LOGGING + if (dhd->logdump_cookie) { + dhd_logdump_cookie_deinit(dhd); + MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE); + dhd->logdump_cookie = NULL; + } + + if (dhd->ecntr_dbg_ring) { + dhd_dbg_ring_dealloc_deinit(&dhd->ecntr_dbg_ring, dhd); + } +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + if (dhd->rtt_dbg_ring) { + dhd_dbg_ring_dealloc_deinit(&dhd->rtt_dbg_ring, dhd); + } +#endif /* EWP_RTT_LOGGING */ + +#ifdef EWP_BCM_TRACE + if (dhd->bcm_trace_dbg_ring) { + dhd_dbg_ring_dealloc_deinit(&dhd->bcm_trace_dbg_ring, dhd); + } +#endif /* EWP_BCM_TRACE */ + + /* 'general' buffer points to start of the pre-alloc'd memory */ + dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL]; + dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL]; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + if (dld_buf->buffer) { + DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + DHD_OS_PREFREE(dhd, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#else + if (dld_buf->buffer) { + MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + MFREE(dhd->osh, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->enable = 0; + dld_buf->buffer = NULL; + } + mutex_destroy(&dhd_info->logdump_lock); +} + +void +dhd_log_dump_write(int type, char *binary_data, + int binary_len, const char *fmt, ...) +{ + int len = 0; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + va_list args; + unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf = NULL; + bool flush_log = FALSE; + + if (type < 0 || type >= DLD_BUFFER_NUM) { + DHD_INFO(("%s: Unsupported DHD_LOG_DUMP_BUF_TYPE(%d).\n", + __FUNCTION__, type)); + return; + } + + dld_buf = &g_dld_buf[type]; + if (dld_buf->enable != 1) { + return; + } + + va_start(args, fmt); + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + /* Non ANSI C99 compliant returns -1, + * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + */ + va_end(args); + if (len < 0) { + return; + } + + if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) { + len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1; + tmp_buf[len] = '\0'; + } + + /* make a critical section to eliminate race conditions */ + DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags); + if (dld_buf->remain < len) { + dld_buf->wraparound = 1; + dld_buf->present = dld_buf->front; + dld_buf->remain = dld_buf_size[type]; + /* if wrap around happens, flush the ring buffer to the file */ + flush_log = TRUE; + } + + memcpy(dld_buf->present, tmp_buf, len); + dld_buf->remain -= len; + dld_buf->present += len; + DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags); + + /* double check invalid memory operation */ + ASSERT((unsigned long)dld_buf->present <= dld_buf->max); + + if (dld_buf->dhd_pub) { + dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub; + dhdp->logdump_periodic_flush = + logdump_periodic_flush; + if (logdump_periodic_flush && flush_log) { + log_dump_type_t *flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = type; + dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type); + } + } + } +} + +#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING +extern struct dhd_dbg_ring_buf g_ring_buf; +void +dhd_dbg_ring_write(int type, char *binary_data, + int binary_len, const char *fmt, ...) +{ + int len = 0; + va_list args; + struct dhd_dbg_ring_buf *ring_buf = NULL; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + + ring_buf = &g_ring_buf; + + va_start(args, fmt); + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + /* Non ANSI C99 compliant returns -1, + * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + */ + va_end(args); + if (len < 0) { + return; + } + + if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) { + len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1; + tmp_buf[len] = '\0'; + } + + if (ring_buf->dhd_pub) { + dhd_pub_t *dhdp = (dhd_pub_t *)ring_buf->dhd_pub; + if (type == DRIVER_LOG_RING_ID || type == FW_VERBOSE_RING_ID || + type == ROAM_STATS_RING_ID) { + if (DBG_RING_ACTIVE(dhdp, type)) { + dhd_os_push_push_ring_data(dhdp, type, + tmp_buf, strlen(tmp_buf)); + return; + } + } + } + return; +} +#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */ +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +void +dhd_flush_rx_tx_wq(dhd_pub_t *dhdp) +{ + dhd_info_t * dhd; + + if (dhdp) { + dhd = dhdp->info; + if (dhd) { + flush_workqueue(dhd->tx_wq); + flush_workqueue(dhd->rx_wq); + } + } + + return; +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_DEBUG_UART +bool +dhd_debug_uart_is_running(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd->duart_execute) { + return TRUE; + } + + return FALSE; +} + +static void +dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event) +{ + dhd_pub_t *dhdp = handle; + dhd_debug_uart_exec(dhdp, "rd"); +} + +static void +dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd) +{ + int ret; + + char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL}; + char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL}; + +#ifdef DHD_FW_COREDUMP + if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) +#endif + { + if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT || + dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT || +#ifdef DHD_FW_COREDUMP + dhdp->memdump_success == FALSE || +#endif + FALSE) { + dhdp->info->duart_execute = TRUE; + DHD_ERROR(("DHD: %s - execute %s %s\n", + __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd)); + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); + DHD_ERROR(("DHD: %s - %s %s ret = %d\n", + __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret)); + dhdp->info->duart_execute = FALSE; + +#ifdef DHD_LOG_DUMP + if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) +#endif + { + BUG_ON(1); + } + } + } +} +#endif /* DHD_DEBUG_UART */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) +void +dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path) +{ + struct file *fp; + char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH; + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__, + filepath)); + dhdp->is_blob = FALSE; + } else { + DHD_ERROR(("%s: ----- blob file exists (%s) -----\n", __FUNCTION__, filepath)); + dhdp->is_blob = TRUE; +#if defined(CONCATE_BLOB) + strncat(fw_path, "_blob", strlen("_blob")); +#else + BCM_REFERENCE(fw_path); +#endif /* SKIP_CONCATE_BLOB */ + filp_close(fp, NULL); + } +} +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + +#if defined(PCIE_FULL_DONGLE) +/** test / loopback */ +void +dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event) +{ + dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info; + dhd_info_t *dhd_info = (dhd_info_t *)handle; + + if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + if (dhd_info == NULL) { + DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__)); + return; + } + if (dmmap == NULL) { + DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__)); + return; + } + dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap); +} + +void +dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) +{ + dhd_info_t *dhd_info = dhdp->info; + + dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap, + DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW); +} +#endif /* PCIE_FULL_DONGLE */ +/* ---------------------------- End of sysfs implementation ------------------------------------- */ +#ifdef SET_PCIE_IRQ_CPU_CORE +void +dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd) +{ + unsigned int pcie_irq = 0; +#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL) + struct dhd_info *dhd = NULL; +#endif /* DHD_LB && DHD_LB_HOST_CTRL */ + + if (!dhdp) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->bus) { + DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__)); + return; + } + + if (affinity_cmd < DHD_AFFINITY_OFF || affinity_cmd > DHD_AFFINITY_LAST) { + DHD_ERROR(("Wrong Affinity cmds:%d, %s\n", affinity_cmd, __FUNCTION__)); + return; + } + + DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd)); + + if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) { + DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__)); + return; + } + +#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL) + dhd = dhdp->info; + + if (affinity_cmd == DHD_AFFINITY_OFF) { + dhd->permitted_primary_cpu = FALSE; + } else if (affinity_cmd == DHD_AFFINITY_TPUT_150MBPS || + affinity_cmd == DHD_AFFINITY_TPUT_300MBPS) { + dhd->permitted_primary_cpu = TRUE; + } + dhd_select_cpu_candidacy(dhd); + /* + * It needs to NAPI disable -> enable to raise NET_RX napi CPU core + * during Rx traffic + * NET_RX does not move to NAPI CPU core if continusly calling napi polling + * function + */ + napi_disable(&dhd->rx_napi_struct); + napi_enable(&dhd->rx_napi_struct); +#endif /* DHD_LB && DHD_LB_HOST_CTRL */ + + /* + irq_set_affinity() assign dedicated CPU core PCIe interrupt + If dedicated CPU core is not on-line, + PCIe interrupt scheduled on CPU core 0 + */ +#if defined(CONFIG_ARCH_SM8150) || defined(CONFIG_ARCH_KONA) + /* For SDM platform */ + switch (affinity_cmd) { + case DHD_AFFINITY_OFF: +#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL) + irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_secondary); + irq_set_affinity(pcie_irq, dhdp->info->cpumask_secondary); +#endif /* DHD_LB && DHD_LB_HOST_CTRL */ + break; + case DHD_AFFINITY_TPUT_150MBPS: + case DHD_AFFINITY_TPUT_300MBPS: + irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary); + irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary); + break; + default: + DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n", + __FUNCTION__, affinity_cmd)); + } +#elif defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) + /* For Exynos platform */ + switch (affinity_cmd) { + case DHD_AFFINITY_OFF: +#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL) + irq_set_affinity(pcie_irq, dhdp->info->cpumask_secondary); +#endif /* DHD_LB && DHD_LB_HOST_CTRL */ + break; + case DHD_AFFINITY_TPUT_150MBPS: + irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary); + break; + case DHD_AFFINITY_TPUT_300MBPS: + DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n", + __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE)); + irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE)); + break; + default: + DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n", + __FUNCTION__, affinity_cmd)); + } +#else /* For Undefined platform */ + DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n", + __FUNCTION__, affinity_cmd)); +#endif /* End of Platfrom define */ + +} +#endif /* SET_PCIE_IRQ_CPU_CORE */ + +int +dhd_write_file(const char *filepath, char *buf, int buf_len) +{ + struct file *fp = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t old_fs; +#endif + int ret = 0; + + /* change to KERNEL_DS address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + /* File is always created. */ + fp = filp_open(filepath, O_RDWR | O_CREAT, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n", + __FUNCTION__, filepath, PTR_ERR(fp))); + ret = BCME_ERROR; + } else { + if (fp->f_mode & FMODE_WRITE) { + ret = vfs_write(fp, buf, buf_len, &fp->f_pos); + if (ret < 0) { + DHD_ERROR(("%s: Couldn't write file '%s'\n", + __FUNCTION__, filepath)); + ret = BCME_ERROR; + } else { + ret = BCME_OK; + } + } + filp_close(fp, NULL); + } + + /* restore previous address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(old_fs); +#endif + + return ret; +} + +int +dhd_read_file(const char *filepath, char *buf, int buf_len) +{ + struct file *fp = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + mm_segment_t old_fs; +#endif + int ret; + + /* change to KERNEL_DS address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#endif + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(old_fs); +#endif + DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath)); + return BCME_ERROR; + } + + ret = kernel_read_compat(fp, 0, buf, buf_len); + filp_close(fp, NULL); + + /* restore previous address limit */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + set_fs(old_fs); +#endif + + /* Return the number of bytes read */ + if (ret > 0) { + /* Success to read */ + ret = 0; + } else { + DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n", + __FUNCTION__, filepath, ret)); + ret = BCME_ERROR; + } + + return ret; +} + +int +dhd_write_file_and_check(const char *filepath, char *buf, int buf_len) +{ + int ret; + + ret = dhd_write_file(filepath, buf, buf_len); + if (ret < 0) { + return ret; + } + + /* Read the file again and check if the file size is not zero */ + memset(buf, 0, buf_len); + ret = dhd_read_file(filepath, buf, buf_len); + + return ret; +} + +#ifdef FILTER_IE +int dhd_read_from_file(dhd_pub_t *dhd) +{ + int ret = 0, nread = 0; + void *fd; + uint8 *buf; + NULL_CHECK(dhd, "dhd is NULL", ret); + + buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE); + if (!buf) { + DHD_ERROR(("error: failed to alllocate buf.\n")); + return BCME_NOMEM; + } + + /* open file to read */ + fd = dhd_os_open_image1(dhd, FILTER_IE_PATH); + if (!fd) { + DHD_ERROR(("No filter file(not an error), filter path%s\n", FILTER_IE_PATH)); + ret = BCME_EPERM; + goto exit; + } + nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd); + if (nread > 0) { + buf[nread] = '\0'; + if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) { + DHD_ERROR(("error: failed to parse filter ie\n")); + } + } else { + DHD_ERROR(("error: zero length file.failed to read\n")); + ret = BCME_ERROR; + } + dhd_os_close_image1(dhd, fd); +exit: + if (buf) { + MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE); + } + return ret; +} + +int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf) +{ + uint8* pstr = buf; + int element_count = 0; + + if (buf == NULL) { + return BCME_ERROR; + } + + while (*pstr != '\0') { + if (*pstr == '\n') { + element_count++; + } + pstr++; + } + /* + * New line character must not be present after last line. + * To count last line + */ + element_count++; + + return element_count; +} + +int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len) +{ + uint8 i, j, msb, lsb, oui_len = 0; + /* + * OUI can vary from 3 bytes to 5 bytes. + * While reading from file as ascii input it can + * take maximum size of 14 bytes and minumum size of + * 8 bytes including ":" + * Example 5byte OUI + * Example 3byte OUI + */ + + if ((inbuf == NULL) || (len < 8) || (len > 14)) { + DHD_ERROR(("error: failed to parse OUI \n")); + return BCME_ERROR; + } + + for (j = 0, i = 0; i < len; i += 3, ++j) { + if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) { + DHD_ERROR(("error: invalid OUI format \n")); + return BCME_ERROR; + } + msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0'; + lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) - + 'A' + 10 : inbuf[i + 1] - '0'; + oui[j] = (msb << 4) | lsb; + } + /* Size of oui.It can vary from 3/4/5 */ + oui_len = j; + + return oui_len; +} + +int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len) +{ + int i = 0; + + while (i < len) { + if (!bcm_isdigit(buf[i])) { + DHD_ERROR(("error: non digit value found in filter_ie \n")); + return BCME_ERROR; + } + i++; + } + if (bcm_atoi((char*)buf) > 255) { + DHD_ERROR(("error: element id cannot be greater than 255 \n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf) +{ + int element_count = 0, i = 0, oui_size = 0, ret = 0; + uint16 bufsize, buf_space_left, id = 0, len = 0; + uint16 filter_iovsize, all_tlvsize; + wl_filter_ie_tlv_t *p_ie_tlv = NULL; + wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL; + char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL; + uint8 data[20]; + + element_count = dhd_get_filter_ie_count(dhd, buf); + DHD_INFO(("total element count %d \n", element_count)); + /* Calculate the whole buffer size */ + filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ; + p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize); + + if (p_filter_iov == NULL) { + DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize)); + return BCME_ERROR; + } + + /* setup filter iovar header */ + p_filter_iov->version = WL_FILTER_IE_VERSION; + p_filter_iov->len = filter_iovsize; + p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ; + p_filter_iov->pktflag = FC_PROBE_REQ; + p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION; + /* setup TLVs */ + bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */ + p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0]; + buf_space_left = bufsize; + + while ((i < element_count) && (buf != NULL)) { + len = 0; + /* token contains one line of input data */ + token = bcmstrtok((char**)&buf, "\n", NULL); + if (token == NULL) { + break; + } + if ((ele_token = bcmstrstr(token, ",")) == NULL) { + /* only element id is present */ + if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) { + DHD_ERROR(("error: Invalid element id \n")); + ret = BCME_ERROR; + goto exit; + } + id = bcm_atoi((char*)token); + data[len++] = WL_FILTER_IE_SET; + } else { + /* oui is present */ + ele_token = bcmstrtok(&token, ",", NULL); + if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token, + strlen(ele_token)) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid element id \n")); + ret = BCME_ERROR; + goto exit; + } + id = bcm_atoi((char*)ele_token); + data[len++] = WL_FILTER_IE_SET; + if ((oui_token = bcmstrstr(token, ",")) == NULL) { + oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token)); + if (oui_size == BCME_ERROR) { + DHD_ERROR(("error: Invalid OUI \n")); + ret = BCME_ERROR; + goto exit; + } + len += oui_size; + } else { + /* type is present */ + oui_token = bcmstrtok(&token, ",", NULL); + if ((oui_token == NULL) || ((oui_size = + dhd_parse_oui(dhd, oui_token, + &(data[len]), strlen(oui_token))) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid OUI \n")); + ret = BCME_ERROR; + goto exit; + } + len += oui_size; + if ((type = bcmstrstr(token, ",")) == NULL) { + if (dhd_check_valid_ie(dhd, token, + strlen(token)) == BCME_ERROR) { + DHD_ERROR(("error: Invalid type \n")); + ret = BCME_ERROR; + goto exit; + } + data[len++] = bcm_atoi((char*)token); + } else { + /* subtype is present */ + type = bcmstrtok(&token, ",", NULL); + if ((type == NULL) || (dhd_check_valid_ie(dhd, type, + strlen(type)) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid type \n")); + ret = BCME_ERROR; + goto exit; + } + data[len++] = bcm_atoi((char*)type); + /* subtype is last element */ + if ((token == NULL) || (*token == '\0') || + (dhd_check_valid_ie(dhd, token, + strlen(token)) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid subtype \n")); + ret = BCME_ERROR; + goto exit; + } + data[len++] = bcm_atoi((char*)token); + } + } + } + ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv, + &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ," + "status=%d\n", __FUNCTION__, ret)); + goto exit; + } + i++; + } + if (i == 0) { + /* file is empty or first line is blank */ + DHD_ERROR(("error: filter_ie file is empty or first line is blank \n")); + ret = BCME_ERROR; + goto exit; + } + /* update the iov header, set len to include all TLVs + header */ + all_tlvsize = (bufsize - buf_space_left); + p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE); + ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov, + p_filter_iov->len, NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("error: IOVAR failed, status=%d\n", ret)); + } +exit: + /* clean up */ + if (p_filter_iov) { + MFREE(dhd->osh, p_filter_iov, filter_iovsize); + } + return ret; +} +#endif /* FILTER_IE */ +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_get_wakecount(dhd_pub_t *dhdp) +{ +#ifdef BCMDBUS + return NULL; +#else + return dhd_bus_get_wakecount(dhdp); +#endif /* BCMDBUS */ +} +#endif /* DHD_WAKE_STATUS */ + +int +dhd_get_random_bytes(uint8 *buf, uint len) +{ +#ifdef BCMPCIE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + int rndlen = get_random_bytes_arch(buf, len); + if (rndlen != len) { + bzero(buf, len); + get_random_bytes(buf, len); + } +#else + get_random_bytes_arch(buf, len); +#endif +#endif /* BCMPCIE */ + return BCME_OK; +} + +#if defined(DHD_HANG_SEND_UP_TEST) +void +dhd_make_hang_with_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + uint reason = HANG_REASON_MAX; + uint32 fw_test_code = 0; + dhd = DHD_DEV_INFO(dev); + + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return; + } + + reason = (uint) bcm_strtoul(string_num, NULL, 0); + DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason)); + + if (reason == 0) { + if (dhdp->req_hang_type) { + DHD_ERROR(("%s, Clear HANG test request 0x%x\n", + __FUNCTION__, dhdp->req_hang_type)); + dhdp->req_hang_type = 0; + return; + } else { + DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__)); + return; + } + } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason)); + return; + } + + if (dhdp->req_hang_type != 0) { + DHD_ERROR(("Already HANG requested for test\n")); + return; + } + + switch (reason) { + case HANG_REASON_IOCTL_RESP_TIMEOUT: + DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + fw_test_code = 102; /* resumed on timeour */ + (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code, + WLC_SET_VAR, TRUE, 0); + break; + case HANG_REASON_DONGLE_TRAP: + DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason)); + dhdp->req_hang_type = reason; + fw_test_code = 99; /* dongle trap */ + (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code, + WLC_SET_VAR, TRUE, 0); + break; + case HANG_REASON_D3_ACK_TIMEOUT: + DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + case HANG_REASON_BUS_DOWN: + DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT: + case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT: + case HANG_REASON_MSGBUF_LIVELOCK: + dhdp->req_hang_type = 0; + DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason)); + break; + case HANG_REASON_IFACE_DEL_FAILURE: + dhdp->req_hang_type = 0; + DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason)); + break; + case HANG_REASON_HT_AVAIL_ERROR: + dhdp->req_hang_type = 0; + DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason)); + break; + case HANG_REASON_PCIE_RC_LINK_UP_FAIL: + DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + default: + dhdp->req_hang_type = 0; + DHD_ERROR(("Unknown HANG request (0x%x)\n", reason)); + break; + } +} +#endif /* DHD_HANG_SEND_UP_TEST */ + +#ifdef BT_OVER_PCIE +#define BT_QUIESCE TRUE +#define BT_RESUME FALSE +#define BT_QUIESCE_RESPONSE_TIMEOUT 4000 + +int +dhd_request_bt_quiesce(dhd_pub_t *dhdp) +{ + dhd_info_t * dhd = (dhd_info_t *)(dhdp->info); + long timeout = BT_QUIESCE_RESPONSE_TIMEOUT; + + if (request_bt_quiesce_ptr == NULL) { + DHD_ERROR(("%s: BT not loaded\n", __FUNCTION__)); + return BCME_OK; + } + + mutex_lock(&dhd->quiesce_lock); + DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state)); + if (dhd->dhd_quiesce_state != DHD_QUIESCE_INIT) { + DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state)); + mutex_unlock(&dhd->quiesce_lock); + return BCME_ERROR; + } + dhd->dhd_quiesce_state = REQUEST_BT_QUIESCE; + request_bt_quiesce_ptr(BT_QUIESCE); + + timeout = wait_event_timeout(dhd->quiesce_wait, + (dhd->dhd_quiesce_state == RESPONSE_BT_QUIESCE), timeout); + + DHD_ERROR(("%s: after wait quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state)); + + mutex_unlock(&dhd->quiesce_lock); + if (!timeout) { + DHD_ERROR(("%s: timeout quiesce_state = %d\n", + __FUNCTION__, dhd->dhd_quiesce_state)); + return BCME_BUSY; + } + return BCME_OK; +} + +int +dhd_request_bt_resume(dhd_pub_t *dhdp) +{ + dhd_info_t * dhd = (dhd_info_t *)(dhdp->info); + long timeout = BT_QUIESCE_RESPONSE_TIMEOUT; + + if (request_bt_quiesce_ptr == NULL) { + DHD_ERROR(("%s: BT not loaded\n", __FUNCTION__)); + return BCME_OK; + } + + DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state)); + mutex_lock(&dhd->quiesce_lock); + if (dhd->dhd_quiesce_state != RESPONSE_BT_QUIESCE) { + mutex_unlock(&dhd->quiesce_lock); + return BCME_ERROR; + } + dhd->dhd_quiesce_state = REQUEST_BT_RESUME; + request_bt_quiesce_ptr(BT_RESUME); + + timeout = wait_event_timeout(dhd->quiesce_wait, + (dhd->dhd_quiesce_state == RESPONSE_BT_RESUME), timeout); + + DHD_ERROR(("%s: after wait quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state)); + + dhd->dhd_quiesce_state = DHD_QUIESCE_INIT; + mutex_unlock(&dhd->quiesce_lock); + if (!timeout) { + DHD_ERROR(("%s: timeout quiesce_state = %d\n", + __FUNCTION__, dhd->dhd_quiesce_state)); + return BCME_BUSY; + } + return BCME_OK; +} + +void +response_bt_quiesce(bool quiesce) +{ + dhd_pub_t *dhdp = g_dhd_pub; + dhd_info_t * dhd = (dhd_info_t *)(dhdp->info); + if (quiesce == BT_QUIESCE) { + if (dhd->dhd_quiesce_state == REQUEST_BT_QUIESCE) { + dhd->dhd_quiesce_state = RESPONSE_BT_QUIESCE; + wake_up(&dhd->quiesce_wait); + return; + } + } else if (quiesce == BT_RESUME) { + if (dhd->dhd_quiesce_state == REQUEST_BT_RESUME) { + dhd->dhd_quiesce_state = RESPONSE_BT_RESUME; + wake_up(&dhd->quiesce_wait); + return; + } + } + DHD_ERROR(("%s: Wrong Queisce Response=%d in State=%d\n", + __FUNCTION__, quiesce, dhd->dhd_quiesce_state)); + return; +} + +int +dhd_bus_perform_flr_with_quiesce(dhd_pub_t *dhdp, struct dhd_bus *bus, + bool init_deinit_path) +{ + int ret; + dhd_info_t * dhd = (dhd_info_t *)(dhdp->info); + bool dongle_isolation = dhdp->dongle_isolation; + mutex_lock(&dhd->quiesce_flr_lock); + dhd->dhd_quiesce_state = DHD_QUIESCE_INIT; + + /* pause data on all the interfaces */ + dhd_bus_stop_queue(dhdp->bus); + + /* Since we are about to do FLR advertise that bus down is in progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc + */ + dhdpcie_advertise_bus_cleanup(dhdp); + +#ifdef BT_OVER_PCIE + /* Disable L1SS of RC and EP + * L1SS is enabled again in dhd_bus_start if dhd_sync_with_dongle succeed + */ + dhd_bus_l1ss_enable_rc_ep(dhdp->bus, FALSE); +#endif /* BT_OVER_PCIE */ + + if (dhd_bus_force_bt_quiesce_enabled(dhdp->bus)) { + DHD_ERROR(("%s: Request Quiesce\n", __FUNCTION__)); + /* Request BT quiesce right before F0 FLR to minimise latency */ + ret = dhd_request_bt_quiesce(dhdp); /* Handle return value */ + if (ret != BCME_OK) { + DHD_ERROR(("%s: Error(%d) in Request Quiesce\n", __FUNCTION__, ret)); + /* TODO: plugin API for Toggle REGON Here */ + mutex_unlock(&dhd->quiesce_flr_lock); + return ret; + } + } + + dhd_bus_pcie_pwr_req_reload_war(dhdp->bus); + + DHD_ERROR(("%s: Perform FLR\n", __FUNCTION__)); + + ret = dhd_bus_perform_flr(dhdp->bus, dhd_bus_get_flr_force_fail(dhdp->bus)); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Error(%d) in Performing FLR\n", __FUNCTION__, ret)); + /* TODO: Ensure that BT Host Driver is out of Quiesce state before REGON + * Either by sending an unquiesce message Here OR as a part of ON/OFF API. + */ + /* TODO: plugin API for Toggle REGON Here */ + mutex_unlock(&dhd->quiesce_flr_lock); + return ret; + } + + if (dhd_bus_force_bt_quiesce_enabled(dhdp->bus)) { + DHD_ERROR(("%s: Request Resume\n", __FUNCTION__)); + /* Resume BT right after F0 FLR to minimise latency */ + ret = dhd_request_bt_resume(dhdp); /* Handle return value */ + if (ret != BCME_OK) { + DHD_ERROR(("%s: Error(%d) in Request Resume\n", __FUNCTION__, ret)); + /* TODO: plugin API for Toggle REGON Here */ + mutex_unlock(&dhd->quiesce_flr_lock); + return ret; + } + } + + /* Devreset function will perform FLR again, to avoid it set dongle_isolation */ + dhdp->dongle_isolation = TRUE; + + DHD_ERROR(("%s: Devreset ON\n", __FUNCTION__)); + dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */ + + DHD_ERROR(("%s: Devreset OFF\n", __FUNCTION__)); + dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */ + + dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */ + + /* resume data on all the interfaces */ + dhd_bus_start_queue(dhdp->bus); + mutex_unlock(&dhd->quiesce_flr_lock); + + DHD_ERROR(("%s: done\n", __FUNCTION__)); + return BCME_DNGL_DEVRESET; +} +#endif /* BT_OVER_PCIE */ + +#ifdef DHD_TX_PROFILE +static int +process_layer2_headers(uint8 **p, int *plen, uint16 *type, bool is_host_sfhllc) +{ + int err = BCME_OK; + + if (*type < ETHER_TYPE_MIN) { + struct dot3_mac_llc_snap_header *sh = (struct dot3_mac_llc_snap_header *)*p; + + if (bcmp(&sh->dsap, llc_snap_hdr, SNAP_HDR_LEN) == 0) { + *type = ntoh16(sh->type); + if (*type == ETHER_TYPE_8021Q || + (is_host_sfhllc && *type != ETHER_TYPE_8021Q)) { + *p += sizeof(struct dot3_mac_llc_snap_header); + if ((*plen -= sizeof(struct dot3_mac_llc_snap_header)) <= 0) { + err = BCME_ERROR; + } + } + else { + struct dot3_mac_llc_snapvlan_header *svh = (struct + dot3_mac_llc_snapvlan_header *)*p; + + *type = ntoh16(svh->ether_type); + *p += sizeof(struct dot3_mac_llc_snapvlan_header); + if ((*plen -= sizeof(struct dot3_mac_llc_snapvlan_header)) <= 0) { + err = BCME_ERROR; + } + } + } + else { + err = BCME_ERROR; + } + } + else { + if (*type == ETHER_TYPE_8021Q) { + struct ethervlan_header *evh = (struct ethervlan_header *)*p; + + *type = ntoh16(evh->ether_type); + *p += ETHERVLAN_HDR_LEN; + if ((*plen -= ETHERVLAN_HDR_LEN) <= 0) { + err = BCME_ERROR; + } + } + else { + *p += ETHER_HDR_LEN; + if ((*plen -= ETHER_HDR_LEN) <= 0) { + err = BCME_ERROR; + } + } + } + + return err; +} + +static int +process_layer3_headers(uint8 **p, int plen, uint16 *type) +{ + int err = BCME_OK; + + if (*type == ETHER_TYPE_IP) { + struct ipv4_hdr *iph = (struct ipv4_hdr *)*p; + uint16 len = IPV4_HLEN(iph); + if ((plen -= len) <= 0) { + err = BCME_ERROR; + } else if (IP_VER(iph) == IP_VER_4 && len >= IPV4_MIN_HEADER_LEN) { + *type = IPV4_PROT(iph); + *p += len; + } else { + err = BCME_ERROR; + } + } else if (*type == ETHER_TYPE_IPV6) { + struct ipv6_hdr *ip6h = (struct ipv6_hdr *)*p; + if ((plen -= IPV6_MIN_HLEN) <= 0) { + err = BCME_ERROR; + } else if (IP_VER(ip6h) == IP_VER_6) { + *type = IPV6_PROT(ip6h); + *p += IPV6_MIN_HLEN; + if (IPV6_EXTHDR(*type)) { + uint8 proto_6 = 0; + int32 exth_len = ipv6_exthdr_len(*p, &proto_6); + if (exth_len < 0 || ((plen -= exth_len) <= 0)) { + err = BCME_ERROR; + } else { + *type = proto_6; + *p += exth_len; + } + } + } else { + err = BCME_ERROR; + } + } + + return err; +} + +bool +dhd_protocol_matches_profile(uint8 *p, int plen, const dhd_tx_profile_protocol_t + *proto, bool is_host_sfhllc) +{ + struct ether_header *eh = NULL; + bool result = FALSE; + uint16 type = 0, ether_type = 0; + + ASSERT(proto != NULL); + ASSERT(p != NULL); + + if (plen <= 0) { + result = FALSE; + } else { + eh = (struct ether_header *)p; + type = ntoh16(eh->ether_type); + if (type < ETHER_TYPE_MIN && is_host_sfhllc) { + struct dot3_mac_llc_snap_header *dot3 = + (struct dot3_mac_llc_snap_header *)p; + ether_type = ntoh16(dot3->type); + } else { + ether_type = type; + } + + if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER && + proto->protocol_number == ether_type) { + result = TRUE; + } else if (process_layer2_headers(&p, &plen, &type, is_host_sfhllc) != BCME_OK) { + /* pass 'type' instead of 'ether_type' to process_layer2_headers + * because process_layer2_headers will take care of extraction + * of protocol types if llc snap header is present, based on + * the condition (type < ETHER_TYPE_MIN) + */ + result = FALSE; + } else if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER) { + result = proto->protocol_number == type; + } else if (proto->layer != DHD_TX_PROFILE_NETWORK_LAYER) { + result = FALSE; + } else if (process_layer3_headers(&p, plen, &type) != BCME_OK) { + result = FALSE; + } else if (proto->protocol_number == type) { + /* L4, only check TCP/UDP case */ + if ((type == IP_PROT_TCP) || (type == IP_PROT_UDP)) { + /* src/dst port are the first two uint16 fields in both tcp/udp + * hdr + */ + struct bcmudp_hdr *hdr = (struct bcmudp_hdr *)p; + + /* note that a src_port or dest_port of zero counts as a match + */ + result = ((proto->src_port == 0) || (proto->src_port == + ntoh16(hdr->src_port))) && ((proto->dest_port == 0) || + (proto->dest_port == ntoh16(hdr->dst_port))); + } else { + /* at this point we know we are dealing with layer 3, and we + * know we are not dealing with TCP or UDP; this is considered a + * match + */ + result = TRUE; + } + } + } + + return result; +} +#endif /* defined(DHD_TX_PROFILE) */ + +#ifdef DHD_TIMESYNC +void +BCMFASTPATH(dhd_parse_proto)(uint8 *pktdata, dhd_pkt_parse_t *parse) +{ + uint8 *pkt = NULL; + struct iphdr *iph = NULL; + struct ether_header *eh = (struct ether_header *)pktdata; + + if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) { + pkt = (uint8 *)&pktdata[ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN]; + } else { + pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + } + + iph = (struct iphdr *)pkt; + + parse->proto = IP_PROT_RESERVED; + parse->t1 = 0; + parse->t2 = 0; + + /* check IP header */ + if ((IPV4_HLEN(iph) != IPV4_HLEN_MIN) || (IP_VER(iph) != IP_VER_4)) { + return; + } + + if (iph->protocol == IP_PROT_ICMP) { + struct icmphdr *icmph; + + parse->proto = iph->protocol; + icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr)); + + if ((icmph->type == ICMP_ECHO) || (icmph->type == ICMP_ECHOREPLY)) { + parse->t1 = icmph->type; + parse->t2 = ntoh16(icmph->un.echo.sequence); + } else { + parse->t1 = icmph->type; + parse->t2 = icmph->code; + } + } else { + parse->proto = iph->protocol; + } + + return; +} +#endif /* DHD_TIMESYNC */ + +#ifdef BCMPCIE +#define KIRQ_PRINT_BUF_LEN 256 + +void +dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num) +{ + unsigned long flags = 0; + struct irq_desc *desc; + int i; /* cpu iterator */ + struct bcmstrbuf strbuf; + char tmp_buf[KIRQ_PRINT_BUF_LEN]; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) + desc = irq_to_desc(irq_num); + if (!desc) { + DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__)); + return; + } + bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN); + raw_spin_lock_irqsave(&desc->lock, flags); + bcm_bprintf(&strbuf, "dhd irq %u:", irq_num); + for_each_online_cpu(i) + bcm_bprintf(&strbuf, "%10u ", + desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0); + if (desc->irq_data.chip) { + if (desc->irq_data.chip->name) + bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name); + else + bcm_bprintf(&strbuf, " %8s", "-"); + } else { + bcm_bprintf(&strbuf, " %8s", "None"); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) + if (desc->irq_data.domain) + bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq); +#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL + bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); +#endif +#endif /* LINUX VERSION > 3.1.0 */ + + if (desc->name) + bcm_bprintf(&strbuf, "-%-8s", desc->name); + + DHD_ERROR(("%s\n", strbuf.origbuf)); + raw_spin_unlock_irqrestore(&desc->lock, flags); +#endif /* LINUX VERSION > 2.6.28 */ +} +#endif /* BCMPCIE */ + +void +dhd_show_kirqstats(dhd_pub_t *dhd) +{ + unsigned int irq = -1; +#ifdef BCMPCIE + dhdpcie_get_pcieirq(dhd->bus, &irq); +#endif /* BCMPCIE */ +#ifdef BCMSDIO + irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num; +#endif /* BCMSDIO */ + if (irq != -1) { +#ifdef BCMPCIE + DHD_ERROR(("DUMP data kernel irq stats : \n")); + dhd_print_kirqstats(dhd, irq); +#endif /* BCMPCIE */ +#ifdef BCMSDIO + DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n")); +#endif /* BCMSDIO */ + } +#ifdef BCMPCIE_OOB_HOST_WAKE + irq = dhd_bus_get_oob_irq_num(dhd); + if (irq) { + DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n")); + dhd_print_kirqstats(dhd, irq); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} + +void +dhd_print_tasklet_status(dhd_pub_t *dhd) +{ + dhd_info_t *dhdinfo; + + if (!dhd) { + DHD_ERROR(("%s : DHD is null\n", __FUNCTION__)); + return; + } + + dhdinfo = dhd->info; + + if (!dhdinfo) { + DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__)); + return; + } + + DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state)); +} + +#if defined(DHD_MQ) && defined(DHD_MQ_STATS) +void +dhd_mqstats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd = NULL; + int i = 0, j = 0; + + if (!dhdp || !strbuf) + return; + + dhd = dhdp->info; + bcm_bprintf(strbuf, "\nMQ STATS:\n=========\n"); + + bcm_bprintf(strbuf, "\nTx packet arrival AC histogram:\n"); + bcm_bprintf(strbuf, "AC_BE \tAC_BK \tAC_VI \tAC_VO\n"); + bcm_bprintf(strbuf, "----- \t----- \t----- \t-----\n"); + for (i = 0; i < AC_COUNT; i++) + bcm_bprintf(strbuf, "%-10d\t", dhd->pktcnt_per_ac[i]); + + bcm_bprintf(strbuf, "\n\nTx packet arrival Q-AC histogram:\n"); + bcm_bprintf(strbuf, "\tAC_BE \tAC_BK \tAC_VI \tAC_VO\n"); + bcm_bprintf(strbuf, "\t----- \t----- \t----- \t-----"); + for (i = 0; i < MQ_MAX_QUEUES; i++) { + bcm_bprintf(strbuf, "\nQ%d\t", i); + for (j = 0; j < AC_COUNT; j++) + bcm_bprintf(strbuf, "%-8d\t", dhd->pktcnt_qac_histo[i][j]); + } + + bcm_bprintf(strbuf, "\n\nTx Q-CPU scheduling histogram:\n"); + bcm_bprintf(strbuf, "\t"); + for (i = 0; i < nr_cpu_ids; i++) + bcm_bprintf(strbuf, "CPU%d \t", i); + for (i = 0; i < MQ_MAX_QUEUES; i++) { + bcm_bprintf(strbuf, "\nQ%d\t", i); + for (j = 0; j < nr_cpu_ids; j++) + bcm_bprintf(strbuf, "%-8d\t", dhd->cpu_qstats[i][j]); + } + bcm_bprintf(strbuf, "\n"); +} +#endif /* DHD_MQ && DHD_MQ_STATS */ + +#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) +/* Procfs that provides to GDB Proxy asynchronous access to "sbreg", "membytes", + * "gdb_proxy_probe", "gdb_proxy_stop_count" iovars. + * Procfs is comprised of the root directory, + * /proc/dhd_gdb_proxy_ (here is like 'eth0', + * etc.) that contains files: "sbreg", "membytes", "gdb_proxy_probe", + * "gdb_proxy_stop_count". These files are to be used to access respective + * iovars. Difference from iovar is that access to these files is not blocked + * by current iovar processing (i.e. file might be accessed while wl iovar is + * stuck on breakpoint inside firmware) + * Setting address for "membytes" and "sbreg" files is performed by means of + * seek position + * For now "membytes" and "sbreg" may only be used to read/write 1, 2 or 4 + * bytes - this may be expanded later. + * For now "gdb_proxy_probe" only returns current Proxy ID, but does not set + * a new one (unlike iovar that may do both things) + */ + +/* Size of firmware address space */ +#define GDB_PROXY_FS_MEM_SIZE ((loff_t)1 << 32) + +/* Common part of 'llseek' routine for all files */ +static loff_t +gdb_proxy_fs_llseek(struct file *fp, loff_t off, int whence, loff_t file_len) +{ + loff_t pos = -1; + + switch (whence) { + case SEEK_SET: + pos = off; + break; + case SEEK_CUR: + pos = fp->f_pos + off; + break; + case SEEK_END: + pos = file_len - off; + break; + } + if ((pos < 0) || (pos > file_len)) { + return -EINVAL; + } + fp->f_pos = pos; + return pos; +} + +/* Common read/write procedure for "gdb_proxy_probe" and "gdb_proxy_stop_count" + * procfs files + * fp: file descriptor + * user_buffer_in: userspace buffer address for write operation, NULL for read + * operation + * user_buffer_out: userspace buffer address for read operation, NULL for write + * operation + * count: maximum number of bytes to read/write + * position: seek position incremented by length of data read/written + * iovar: name of iovar being accessed + * iovar_data_buf: intermediate buffer to store iovar data + * iovar_data_len: length of data, corresponded to iovar + * read_params: NULL or address of input parameter for iovar read + * read_plen: 0 or length of input parameter for iovar read + * Returns number of bytes read/written or error code + */ +static ssize_t +gdb_proxy_fs_iovar_data_op(struct file *fp, const char __user *user_buffer_in, + char __user *user_buffer_out, size_t count, loff_t *position, + const char *iovar, void *iovar_data_buf, size_t iovar_data_len, + void *read_params, size_t read_plen) +{ + dhd_info_t *dhd = (dhd_info_t *)PDE_DATA(file_inode(fp)); + int err; + if (count == 0) { + return 0; + } + /* If position out of data length - read nothing */ + if ((*position < 0) || (*position >= (loff_t)iovar_data_len)) { + return 0; + } + /* If buffer end is past structure lenght - truncate it */ + if ((*position + count) > (loff_t)iovar_data_len) { + count = (size_t)((loff_t)iovar_data_len - *position); + } + if (user_buffer_in) { + /* SET operation */ + /* Read/modify/write if not whole-buffer-operation */ + if ((*position != 0) || (count < iovar_data_len)) { + err = dhd_bus_iovar_op(&(dhd->pub), iovar, + (char *)read_params, (uint)read_plen, + (char *)iovar_data_buf, (uint)iovar_data_len, IOV_GET); + if (err) { + return -EPERM; + } + } + if (copy_from_user((char *)iovar_data_buf + (uint)*position, user_buffer_in, count)) + { + return -EPERM; + } + /* This params/plen of NULL/0 is a 'legal fiction', imposed by + * strange assert in dhd_bus_iovar_op(). After this strange + * assert, arg/arglen is copied to params/plen - and even used + * inside iovar handler! + */ + err = dhd_bus_iovar_op(&(dhd->pub), iovar, NULL, 0, + (char *)iovar_data_buf, (uint)iovar_data_len, IOV_SET); + } else { + /* GET operation */ + err = dhd_bus_iovar_op(&(dhd->pub), iovar, (char *)read_params, (uint)read_plen, + (char *)iovar_data_buf, (uint)iovar_data_len, IOV_GET); + } + if (err) { + return -EPERM; + } + if (user_buffer_out) { + if (copy_to_user(user_buffer_out, (char *)iovar_data_buf + (uint)*position, count)) + { + return -EPERM; + } + } + *position += count; + return count; +} + +/* Read for "gdb_proxy_probe" procfs file */ +static ssize_t +gdb_proxy_fs_probe_read(struct file *fp, char __user *user_buffer, size_t count, + loff_t *position) +{ + uint32 proxy_id = 0; + dhd_gdb_proxy_probe_data_t probe_data; + return gdb_proxy_fs_iovar_data_op(fp, NULL, user_buffer, count, position, "gdb_proxy_probe", + &probe_data, sizeof(probe_data), &proxy_id, sizeof(proxy_id)); +} + +/* Seek for "gdb_proxy_probe" file */ +static loff_t +gdb_proxy_fs_probe_llseek(struct file *fp, loff_t off, int whence) +{ + return gdb_proxy_fs_llseek(fp, off, whence, sizeof(dhd_gdb_proxy_probe_data_t)); +} + +/* File operations for "gdb_proxy_probe" procfs file */ +static const struct file_operations +gdb_proxy_fs_probe_file_ops = { + .read = gdb_proxy_fs_probe_read, + .llseek = gdb_proxy_fs_probe_llseek, +}; + +/* Read for "gdb_proxy_stop_count" procfs file */ +static ssize_t +gdb_proxy_fs_stop_count_read(struct file *fp, char __user *user_buffer, size_t count, + loff_t *position) +{ + uint32 stop_count; + return gdb_proxy_fs_iovar_data_op(fp, NULL, user_buffer, count, position, + "gdb_proxy_stop_count", &stop_count, sizeof(stop_count), NULL, 0); +} + +/* Write for "gdb_proxy_stop_count" procfs file */ +static ssize_t +gdb_proxy_fs_stop_count_write(struct file *fp, const char __user *user_buffer, size_t count, + loff_t *position) +{ + uint32 stop_count; + return gdb_proxy_fs_iovar_data_op(fp, user_buffer, NULL, count, position, + "gdb_proxy_stop_count", &stop_count, sizeof(stop_count), NULL, 0); +} + +/* Seek for "gdb_proxy_stop_count" file */ +static loff_t +gdb_proxy_fs_stop_count_llseek(struct file *fp, loff_t off, int whence) +{ + return gdb_proxy_fs_llseek(fp, off, whence, sizeof(uint32)); +} + +/* File operations for "gdb_proxy_stop_count" procfs file */ +static const struct file_operations +gdb_proxy_fs_stop_count_file_ops = { + .read = gdb_proxy_fs_stop_count_read, + .write = gdb_proxy_fs_stop_count_write, + .llseek = gdb_proxy_fs_stop_count_llseek, +}; + +/* Common read/write procedure for "membytes" and "sbreg" procfs files + * fp: file descriptor + * buffer_in: userspace buffer address for write operation, NULL for read + * operation + * buffer_out: userspace buffer address for read operation, NULL for write + * operation + * count: maximum number of bytes to read/write + * position: seek position (interpreted as memory address in firmware address + * space), + * incremented by length of data read/written + * iovar: name of iovar being accessed + * address_first: TRUE if address shall be packed first, FALSE if width + * Returns number of bytes read/written or error code + */ +static ssize_t +gdb_proxy_fs_iovar_mem_op(struct file *fp, const char __user *user_buffer_in, + char __user *user_buffer_out, size_t count, loff_t *position, + const char *iovar, bool address_first) +{ + dhd_info_t *dhd = (dhd_info_t *)PDE_DATA(file_inode(fp)); + uint32 buf[3]; + int err; + if (count == 0) { + return 0; + } + if ((count > sizeof(uint32)) || (count & (count - 1))) { + return -EINVAL; + } + buf[address_first ? 0 : 1] = (uint32)(*position); + buf[address_first ? 1 : 0] = (uint32)count; + if (user_buffer_in) { + /* SET operation */ + if (copy_from_user(&buf[2], user_buffer_in, count)) { + return -EPERM; + } + /* This params/plen of NULL/0 is a 'legal fiction', imposed by + * strange assert in dhd_bus_iovar_op(). After this strange + * assert, arg/arglen is copied to params/plen - and even used + * inside iovar handler! + */ + err = dhd_bus_iovar_op(&(dhd->pub), iovar, NULL, 0, (char *)buf, sizeof(*buf) * 3, + IOV_SET); + } else { + /* GET operation */ + /* This arglen of 8 bytes (where 4 would suffice) is due to + * strange requirement of minimum arglen to be 8, hardcoded into + * "membytes" iovar definition + */ + err = dhd_bus_iovar_op(&(dhd->pub), iovar, (char *)buf, sizeof(*buf) * 2, + (char *)buf, sizeof(*buf) * 2, IOV_GET); + } + if (err) { + return -EPERM; + } + *position += count; + if (user_buffer_out) { + if (copy_to_user(user_buffer_out, &buf[0], count)) { + return -EPERM; + } + } + return count; +} + +/* Common seek procedure for "membytes" and "sbreg" procfs files */ +static loff_t +gdb_proxy_fs_memory_llseek(struct file *fp, loff_t off, int whence) +{ + return gdb_proxy_fs_llseek(fp, off, whence, GDB_PROXY_FS_MEM_SIZE); +} + +/* Read for "membytes" procfs file */ +static ssize_t +gdb_proxy_fs_membytes_read(struct file *fp, char __user *user_buffer, size_t count, + loff_t *position) +{ + return gdb_proxy_fs_iovar_mem_op(fp, NULL, user_buffer, count, position, "membytes", TRUE); +} + +/* Write for "membytes" procfs file */ +static ssize_t +gdb_proxy_fs_membytes_write(struct file *fp, const char __user *user_buffer, size_t count, + loff_t *position) +{ + return gdb_proxy_fs_iovar_mem_op(fp, user_buffer, NULL, count, position, "membytes", TRUE); +} + +/* File operations for "membytes" procfs file */ +static const struct file_operations +gdb_proxy_fs_membytes_file_ops = { + .read = gdb_proxy_fs_membytes_read, + .write = gdb_proxy_fs_membytes_write, + .llseek = gdb_proxy_fs_memory_llseek, +}; + +/* Read for "sbreg" procfs file */ +static ssize_t +gdb_proxy_fs_sbreg_read(struct file *fp, char __user *user_buffer, size_t count, loff_t *position) +{ + return gdb_proxy_fs_iovar_mem_op(fp, NULL, user_buffer, count, position, "sbreg", FALSE); +} + +/* Write for "sbreg" procfs file */ +static ssize_t +gdb_proxy_fs_sbreg_write(struct file *fp, const char __user *user_buffer, size_t count, + loff_t *position) +{ + return gdb_proxy_fs_iovar_mem_op(fp, user_buffer, NULL, count, position, "sbreg", FALSE); +} + +/* File operations for "sbreg" procfs file */ +static const struct file_operations +gdb_proxy_fs_sbreg_file_ops = { + .read = gdb_proxy_fs_sbreg_read, + .write = gdb_proxy_fs_sbreg_write, + .llseek = gdb_proxy_fs_memory_llseek, +}; + +/* If GDB Proxy procfs files set not yet created for given dhd instance - creates it */ +static void +gdb_proxy_fs_try_create(dhd_info_t *dhd, const char *dev_name) +{ + char dir_name[sizeof(dhd->gdb_proxy_fs_root_name)] = "dhd_gdb_proxy_"; + struct proc_dir_entry *root_dentry; + int i; + static const struct { + const char *file_name; + const struct file_operations *fops; + } fileinfos[] = { + {"gdb_proxy_probe", &gdb_proxy_fs_probe_file_ops}, + {"gdb_proxy_stop_count", &gdb_proxy_fs_stop_count_file_ops}, + {"membytes", &gdb_proxy_fs_membytes_file_ops}, + {"sbreg", &gdb_proxy_fs_sbreg_file_ops}, + }; + if (!dev_name || !*dev_name || dhd->gdb_proxy_fs_root) { + return; + } + strlcat_s(dir_name, dev_name, sizeof(dir_name)); + dir_name[sizeof(dir_name) - 1] = 0; + root_dentry = proc_mkdir(dir_name, NULL); + if ((root_dentry == NULL) || IS_ERR(root_dentry)) { + return; + } + for (i = 0; i < ARRAYSIZE(fileinfos); ++i) { + struct proc_dir_entry *file_dentry = proc_create_data(fileinfos[i].file_name, + S_IRUGO | (fileinfos[i].fops->write ? S_IWUGO : 0), root_dentry, + fileinfos[i].fops, dhd); + if ((file_dentry == NULL) || IS_ERR(file_dentry)) { + goto fail; + } + } + dhd->gdb_proxy_fs_root = root_dentry; + memcpy_s(dhd->gdb_proxy_fs_root_name, sizeof(dhd->gdb_proxy_fs_root_name), + dir_name, sizeof(dhd->gdb_proxy_fs_root_name)); + return; +fail: + if (root_dentry) { + remove_proc_subtree(dir_name, NULL); + } +} + +/* If GDB Proxy procfs files set created for given dhd instance - removes it */ +static void +gdb_proxy_fs_remove(dhd_info_t *dhd) +{ + if (dhd->gdb_proxy_fs_root) { + remove_proc_subtree(dhd->gdb_proxy_fs_root_name, NULL); + dhd->gdb_proxy_fs_root = NULL; + bzero(dhd->gdb_proxy_fs_root_name, sizeof(dhd->gdb_proxy_fs_root_name)); + } +} +#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ + +#ifdef DHD_MAP_LOGGING +/* Will be called from SMMU fault handler */ +void +dhd_smmu_fault_handler(uint32 axid, ulong fault_addr) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub; + uint32 irq = (uint32)-1; + + DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__)); + DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr)); + dhdp->smmu_fault_occurred = TRUE; +#ifdef DNGL_AXI_ERROR_LOGGING + dhdp->axi_error = TRUE; + dhdp->axi_err_dump->axid = axid; + dhdp->axi_err_dump->fault_address = fault_addr; +#endif /* DNGL_AXI_ERROR_LOGGING */ + + /* Disable PCIe IRQ */ + dhdpcie_get_pcieirq(dhdp->bus, &irq); + if (irq != (uint32)-1) { + disable_irq_nosync(irq); + } + + /* Take debug information first */ + DHD_OS_WAKE_LOCK(dhdp); + dhd_prot_smmu_fault_dump(dhdp); + DHD_OS_WAKE_UNLOCK(dhdp); + + /* Take AXI information if possible */ +#ifdef DNGL_AXI_ERROR_LOGGING +#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR + dhd_axi_error_dispatch(dhdp); +#else + dhd_axi_error(dhdp); +#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ +#endif /* DNGL_AXI_ERROR_LOGGING */ +} +EXPORT_SYMBOL(dhd_smmu_fault_handler); +#endif /* DHD_MAP_LOGGING */ + +#ifdef DHD_PKTTS +/* Get pktts flow configuration */ +int +dhd_get_pktts_flow(dhd_pub_t *dhdp, void *arg, int len) +{ + dhd_info_t *dhd = dhdp->info; + + if (!arg || len <= (sizeof(pktts_flow_t) * PKTTS_CONFIG_MAX)) { + return BCME_BADARG; + } + + return memcpy_s(arg, len, &dhd->config[0], sizeof(dhd->config)); +} + +/* Set pktts flow configuration */ +int +dhd_set_pktts_flow(dhd_pub_t *dhdp, void *params, int plen) +{ + dhd_info_t *dhd = dhdp->info; + pktts_flow_t *config; + uint32 checksum = 0; + int ret = BCME_OK; + uint32 temp; + uint32 idx = PKTTS_CONFIG_MAX; + uint32 num_config = 0; + + if (plen < sizeof(*config)) { + DHD_ERROR(("dhd_set_pktts_flow: invalid buffer length (%d)\n", plen)); + return BCME_BADLEN; + } + + config = (pktts_flow_t *)params; + + temp = htonl(config->src_ip); + checksum ^= bcm_compute_xor32((volatile uint32 *)&temp, + sizeof(temp) / sizeof(uint32)); + temp = htonl(config->dst_ip); + checksum ^= bcm_compute_xor32((volatile uint32 *)&temp, + sizeof(temp) / sizeof(uint32)); + + temp = (hton16(config->dst_port) << 16) | hton16(config->src_port); + checksum ^= bcm_compute_xor32((volatile uint32 *)&temp, + sizeof(temp) / sizeof(uint32)); + temp = config->proto; + checksum ^= bcm_compute_xor32((volatile uint32 *)&temp, + sizeof(temp) / sizeof(uint32)); + + /* Look for checksum match: for delete or update */ + dhd_match_pktts_flow(dhdp, checksum, &idx, &num_config); + + /* no matching config */ + if (idx == PKTTS_CONFIG_MAX) { + if (config->pkt_offset == PKTTS_OFFSET_INVALID) { + /* no matching config found for deletion */ + return BCME_NOTFOUND; + } + + /* look for free config space */ + for (idx = 0; idx < PKTTS_CONFIG_MAX; idx++) { + if (dhd->config[idx].chksum == 0) { + break; + } + } + + if (idx == PKTTS_CONFIG_MAX) { + /* no config space left */ + return BCME_NORESOURCE; + } + } + + if (config->pkt_offset == PKTTS_OFFSET_INVALID) { + /* reset if pkt_offset is zero */ + memset(&dhd->config[idx], 0, sizeof(dhd->config[idx])); + } else { + ret = memcpy_s(&dhd->config[idx], sizeof(dhd->config[idx]), + config, sizeof(*config)); + if (ret == BCME_OK) { + dhd->config[idx].chksum = checksum; + } + } + + return ret; +} + +/** + * dhd_match_pktts_flow - this api returns matching pktts config against checksum + * + * @dhdp: pointer to dhd_pub object + * @checksum: five tuple checksum + * @idx: returns index of matching pktts config + * @num_config: returns number of valid pktts config + */ +pktts_flow_t * +dhd_match_pktts_flow(dhd_pub_t *dhdp, uint32 checksum, uint32 *idx, uint32 *num_config) +{ + dhd_info_t *dhd = dhdp->info; + pktts_flow_t *flow = NULL; + uint8 i; + + for (i = 0; i < PKTTS_CONFIG_MAX; i++) { + if (dhd->config[i].chksum) { + (*num_config)++; + } + + if (checksum && (dhd->config[i].chksum == checksum)) { + flow = &dhd->config[i]; + break; + } + } + + /* update matching config index */ + if (idx) { + *idx = i; + } + + /* countinue with valid config count */ + for (; i < PKTTS_CONFIG_MAX; i++) { + if (dhd->config[i].chksum) { + (*num_config)++; + } + } + + return flow; +} + +/* Get pktts enab configuration */ +int dhd_get_pktts_enab(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + return dhd->latency; +} + +/* Set pktts enable configuration */ +int dhd_set_pktts_enab(dhd_pub_t *dhdp, bool val) +{ + dhd_info_t *dhd = dhdp->info; + uint32 var_int = val; + int ret = BCME_OK; + uint power_val; + + /* check FW supports pktlat_ipc or pktlat_meta */ + if (!FW_SUPPORTED(dhdp, pktlat_ipc) && !FW_SUPPORTED(dhdp, pktlat_meta)) { + BCM_REFERENCE(power_val); + DHD_INFO(("Chip does not support pktlat\n")); + return ret; + } + power_val = 0; + /* Disabling mpc and PM mode for pktlat */ + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&power_val, sizeof(power_val), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Unable to set mpc 0, ret=%d\n", __FUNCTION__, ret)); + return ret; + } + power_val = PM_OFF; + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_PM, (char *)&power_val, sizeof(power_val), + TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: Unable to set PM 0, ret=%d\n", __FUNCTION__, ret)); + return ret; + } + + ret = dhd_iovar(dhdp, 0, "pktts_enab", (char *)&var_int, sizeof(var_int), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret)); + return ret; + } + + dhd->latency = val; + + return 0; +} +#endif /* DHD_PKTTS */ + +#ifdef DHD_ERPOM +static void +dhd_error_recovery(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp; + int ret = 0; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + DHD_ERROR(("%s: init not completed, cannot initiate recovery\n", + __FUNCTION__)); + return; + } + +#ifdef BT_OVER_PCIE + if (dhdp->dongle_trap_due_to_bt) { + DHD_ERROR(("WLAN trapped due to BT, toggle REG_ON\n")); + /* toggle REG_ON */ + dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_BT); + return; + } +#endif /* BT_OVER_PCIE */ + + ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE); + if (ret != BCME_DNGL_DEVRESET) { + DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d," + "toggle REG_ON\n", __FUNCTION__, ret)); + /* toggle REG_ON */ + dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN); + return; + } +} + +void +dhd_schedule_reset(dhd_pub_t *dhdp) +{ + if (dhdp->enable_erpom) { + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH); + } +} +#endif /* DHD_ERPOM */ + +#ifdef DHD_PKT_LOGGING +int +dhd_pktlog_debug_dump(dhd_pub_t *dhdp) +{ + struct net_device *primary_ndev; + struct bcm_cfg80211 *cfg; + unsigned long flags = 0; + + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + if (!primary_ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__)); + return BCME_ERROR; + } + + cfg = wl_get_cfg(primary_ndev); + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: HAL dump is already triggered \n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_OS_WAKE_LOCK(dhdp); + + if (wl_cfg80211_is_hal_started(cfg)) { + dhdp->pktlog_debug = TRUE; + dhd_dbg_send_urgent_evt(dhdp, NULL, 0); + } else { + DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__)); + } + DHD_OS_WAKE_UNLOCK(dhdp); + /* In case of dhd_os_busbusy_wait_bitmask() timeout, + * hal dump bit will not be cleared. Hence clearing it here. + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + return BCME_OK; +} + +void +dhd_pktlog_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (dhd_pktlog_dump_write_file(&dhd->pub)) { + DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__)); + return; + } +} + +void +dhd_schedule_pktlog_dump(dhd_pub_t *dhdp) +{ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP, + dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* DHD_PKT_LOGGING */ + +#ifdef DHDTCPSYNC_FLOOD_BLK +static void dhd_blk_tsfl_handler(struct work_struct * work) +{ + dhd_if_t *ifp = NULL; + dhd_pub_t *dhdp = NULL; + /* Ignore compiler warnings due to -Werror=cast-qual */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + ifp = container_of(work, dhd_if_t, blk_tsfl_work); + GCC_DIAGNOSTIC_POP(); + + if (ifp) { + dhdp = &ifp->info->pub; + if (dhdp) { + if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)|| + (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n")); + wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED); + } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)|| + (dhdp->op_mode & DHD_FLAG_STA_MODE)) { + DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n")); + wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED); + } + ifp->disconnect_tsync_flood = TRUE; + } + } +} +void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp) +{ + ifp->tsync_rcvd = 0; + ifp->tsyncack_txed = 0; + ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); +} +void dhd_reset_tcpsync_info_by_dev(struct net_device *dev) +{ + dhd_if_t *ifp = NULL; + if (dev) { + ifp = DHD_DEV_IFP(dev); + } + if (ifp) { + ifp->tsync_rcvd = 0; + ifp->tsyncack_txed = 0; + ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + ifp->tsync_per_sec = 0; + ifp->disconnect_tsync_flood = FALSE; + } +} +#endif /* DHDTCPSYNC_FLOOD_BLK */ + +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +static void dhd_m4_state_handler(struct work_struct *work) +{ + dhd_if_t *ifp = NULL; + /* Ignore compiler warnings due to -Werror=cast-qual */ + struct delayed_work *dw = to_delayed_work(work); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + ifp = container_of(dw, dhd_if_t, m4state_work); + GCC_DIAGNOSTIC_POP(); + + if (ifp && ifp->net && + (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) { + DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n", + ifp->net->name)); + wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT); + } +} + +void +dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + if (!success) { + /* XXX where does this stuff belong to? */ + dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + + /* XXX Use packet tag when it is available to identify its type */ + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + if (type == ETHER_TYPE_802_1X) { + if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) { + dhd_if_t *ifp = NULL; + ifp = dhd->iflist[ifidx]; + if (!ifp || !ifp->net) { + return; + } + + DHD_INFO(("%s: M4 TX failed on %d.\n", + __FUNCTION__, ifidx)); + + OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED); + schedule_delayed_work(&ifp->m4state_work, + msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS)); + } + } + } +} + +void +dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhdinfo; + dhd_if_t *ifp; + + if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) { + DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx)); + return; + } + + dhdinfo = (dhd_info_t *)(dhdp->info); + if (!dhdinfo) { + DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__)); + return; + } + + ifp = dhdinfo->iflist[ifidx]; + if (ifp) { + cancel_delayed_work_sync(&ifp->m4state_work); + } +} +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef BIGDATA_SOFTAP +void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e) +{ + struct bcm_cfg80211 *cfg; + dhd_pub_t *dhdp; + ap_sta_wq_data_t *p_wq_data; + + if (!bcm_cfg || !ndev || !e) { + WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e)); + return; + } + + cfg = (struct bcm_cfg80211 *)bcm_cfg; + dhdp = (dhd_pub_t *)cfg->pub; + + if (!dhdp || !cfg->ap_sta_info) { + WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info)); + return; + } + + p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t)); + if (unlikely(!p_wq_data)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "ap_sta_wq_data_t\n", __FUNCTION__)); + return; + } + + mutex_lock(&cfg->ap_sta_info->wq_data_sync); + + memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t)); + p_wq_data->dhdp = dhdp; + p_wq_data->bcm_cfg = cfg; + p_wq_data->ndev = (struct net_device *)ndev; + + mutex_unlock(&cfg->ap_sta_info->wq_data_sync); + + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP, + wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH); + +} +#endif /* BIGDATA_SOFTAP */ + +void +get_debug_dump_time(char *str) +{ + struct osl_timespec curtime; + unsigned long local_time; + struct rtc_time tm; + + if (!strlen(str)) { + osl_do_gettimeofday(&curtime); + local_time = (u32)(curtime.tv_sec - + (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE)); + rtc_time_to_tm(local_time, &tm); + snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS, + tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC)); + } +} + +void +clear_debug_dump_time(char *str) +{ + memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN); +} +#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING) +void +copy_debug_dump_time(char *dest, char *src) +{ + memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN); +} +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */ + +/* + * DHD RING + */ +#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__)) +#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__)) + +#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x +#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x + +#define DHD_RING_MAGIC 0x20170910 +#define DHD_RING_IDX_INVALID 0xffffffff + +#define DHD_RING_SYNC_LOCK_INIT(osh) osl_spin_lock_init(osh) +#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock) +#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_RING_SYNC_UNLOCK(lock, flags) osl_spin_unlock(lock, flags) + +typedef struct { + uint32 elem_size; + uint32 elem_cnt; + uint32 write_idx; /* next write index, -1 : not started */ + uint32 read_idx; /* next read index, -1 : not start */ + + /* protected elements during serialization */ + int lock_idx; /* start index of locked, element will not be overried */ + int lock_count; /* number of locked, from lock idx */ + + /* saved data elements */ + void *elem; +} dhd_fixed_ring_info_t; + +typedef struct { + uint32 elem_size; + uint32 elem_cnt; + uint32 idx; /* -1 : not started */ + uint32 rsvd; /* reserved for future use */ + + /* protected elements during serialization */ + atomic_t ring_locked; + /* check the overwriting */ + uint32 ring_overwrited; + + /* saved data elements */ + void *elem; +} dhd_singleidx_ring_info_t; + +typedef struct { + uint32 magic; + uint32 type; + void *ring_sync; /* spinlock for sync */ + union { + dhd_fixed_ring_info_t fixed; + dhd_singleidx_ring_info_t single; + }; +} dhd_ring_info_t; + +uint32 +dhd_ring_get_hdr_size(void) +{ + return sizeof(dhd_ring_info_t); +} + +void * +dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size, + uint32 elem_cnt, uint32 type) +{ + dhd_ring_info_t *ret_ring; + + if (!buf) { + DHD_RING_ERR(("NO RING BUFFER\n")); + return NULL; + } + + if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) { + DHD_RING_ERR(("RING SIZE IS TOO SMALL\n")); + return NULL; + } + + if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) { + DHD_RING_ERR(("UNSUPPORTED RING TYPE\n")); + return NULL; + } + + ret_ring = (dhd_ring_info_t *)buf; + ret_ring->type = type; + ret_ring->ring_sync = (void *)DHD_RING_SYNC_LOCK_INIT(dhdp->osh); + ret_ring->magic = DHD_RING_MAGIC; + + if (type == DHD_RING_TYPE_FIXED) { + ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t); + ret_ring->fixed.elem_size = elem_size; + ret_ring->fixed.elem_cnt = elem_cnt; + } else { + ret_ring->single.idx = DHD_RING_IDX_INVALID; + atomic_set(&ret_ring->single.ring_locked, 0); + ret_ring->single.ring_overwrited = 0; + ret_ring->single.rsvd = 0; + ret_ring->single.elem = buf + sizeof(dhd_ring_info_t); + ret_ring->single.elem_size = elem_size; + ret_ring->single.elem_cnt = elem_cnt; + } + + return ret_ring; +} + +void +dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + if (!ring) { + return; + } + + if (ring->magic != DHD_RING_MAGIC) { + return; + } + + if (ring->type != DHD_RING_TYPE_FIXED && + ring->type != DHD_RING_TYPE_SINGLE_IDX) { + return; + } + + DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync); + ring->ring_sync = NULL; + if (ring->type == DHD_RING_TYPE_FIXED) { + dhd_fixed_ring_info_t *fixed = &ring->fixed; + memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt); + fixed->elem_size = fixed->elem_cnt = 0; + } else { + dhd_singleidx_ring_info_t *single = &ring->single; + memset(single->elem, 0, single->elem_size * single->elem_cnt); + single->elem_size = single->elem_cnt = 0; + } + ring->type = 0; + ring->magic = 0; +} + +static inline uint32 +__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type) +{ + uint32 diff; + uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID; + uint32 elem_size, elem_cnt; + void *elem; + + if (type == DHD_RING_TYPE_FIXED) { + dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring; + elem_size = fixed->elem_size; + elem_cnt = fixed->elem_cnt; + elem = fixed->elem; + } else if (type == DHD_RING_TYPE_SINGLE_IDX) { + dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring; + elem_size = single->elem_size; + elem_cnt = single->elem_cnt; + elem = single->elem; + } else { + DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type)); + return ret_idx; + } + + if (ptr < elem) { + DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem)); + return ret_idx; + } + diff = (uint32)((uint8 *)ptr - (uint8 *)elem); + if (diff % elem_size != 0) { + DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem)); + return ret_idx; + } + ret_idx = diff / elem_size; + if (ret_idx >= elem_cnt) { + DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx)); + } + return ret_idx; +} + +/* Sub functions for fixed ring */ +/* get counts between two indexes of ring buffer (internal only) */ +static inline int +__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end) +{ + if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) { + return 0; + } + + return (ring->elem_cnt + end - start) % ring->elem_cnt + 1; +} + +static inline int +__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring) +{ + return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx); +} + +static inline void * +__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + return NULL; + } + return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx); +} + +static inline void +__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring) +{ + uint32 next_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + + next_idx = (ring->read_idx + 1) % ring->elem_cnt; + if (ring->read_idx == ring->write_idx) { + /* Become empty */ + ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID; + return; + } + + ring->read_idx = next_idx; + return; +} + +static inline void * +__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + return NULL; + } + return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx); +} + +static inline void * +__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring) +{ + uint32 tmp_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + ring->read_idx = ring->write_idx = 0; + return (uint8 *)ring->elem; + } + + /* check next index is not locked */ + tmp_idx = (ring->write_idx + 1) % ring->elem_cnt; + if (ring->lock_idx == tmp_idx) { + return NULL; + } + + ring->write_idx = tmp_idx; + if (ring->write_idx == ring->read_idx) { + /* record is full, drop oldest one */ + ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt; + + } + return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx); +} + +static inline void * +__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type) +{ + uint32 cur_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + + cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + + if (cur_idx == ring->write_idx) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + 1) % ring->elem_cnt; + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void * +__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type) +{ + uint32 cur_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + if (cur_idx == ring->read_idx) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt; + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void +__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type) +{ + uint32 first_idx; + uint32 last_idx; + uint32 ring_filled_cnt; + uint32 tmp_cnt; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + + if (first_ptr) { + first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type); + if (first_idx >= ring->elem_cnt) { + return; + } + } else { + first_idx = ring->read_idx; + } + + if (last_ptr) { + last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type); + if (last_idx >= ring->elem_cnt) { + return; + } + } else { + last_idx = ring->write_idx; + } + + ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx); + tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx); + if (tmp_cnt > ring_filled_cnt) { + DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n", + ring->write_idx, ring->read_idx, first_idx)); + return; + } + + tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx); + if (tmp_cnt > ring_filled_cnt) { + DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n", + ring->write_idx, ring->read_idx, last_idx)); + return; + } + + ring->lock_idx = first_idx; + ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx); + return; +} + +static inline void +__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + + ring->lock_idx = DHD_RING_IDX_INVALID; + ring->lock_count = 0; + return; +} +static inline void * +__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return NULL; + } + return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx; +} + +static inline void * +__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring) +{ + int lock_last_idx; + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return NULL; + } + + lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt; + return (uint8 *)ring->elem + ring->elem_size * lock_last_idx; +} + +static inline int +__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return BCME_ERROR; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return BCME_ERROR; + } + return ring->lock_count; +} + +static inline void +__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return; + } + + ring->lock_count--; + if (ring->lock_count <= 0) { + ring->lock_idx = DHD_RING_IDX_INVALID; + } else { + ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt; + } + return; +} + +static inline void +__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx) +{ + ring->read_idx = idx; +} + +static inline void +__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx) +{ + ring->write_idx = idx; +} + +static inline uint32 +__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring) +{ + return ring->read_idx; +} + +static inline uint32 +__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring) +{ + return ring->write_idx; +} + +/* Sub functions for single index ring */ +static inline void * +__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring) +{ + uint32 tmp_idx = 0; + + if (ring->idx == DHD_RING_IDX_INVALID) { + return NULL; + } + + if (ring->ring_overwrited) { + tmp_idx = (ring->idx + 1) % ring->elem_cnt; + } + + return (uint8 *)ring->elem + (ring->elem_size * tmp_idx); +} + +static inline void * +__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring) +{ + if (ring->idx == DHD_RING_IDX_INVALID) { + return NULL; + } + + return (uint8 *)ring->elem + (ring->elem_size * ring->idx); +} + +static inline void * +__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring) +{ + if (ring->idx == DHD_RING_IDX_INVALID) { + ring->idx = 0; + return (uint8 *)ring->elem; + } + + /* check the lock is held */ + if (atomic_read(&ring->ring_locked)) { + return NULL; + } + + /* check the index rollover */ + if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) { + ring->ring_overwrited = 1; + } + + ring->idx = (ring->idx + 1) % ring->elem_cnt; + + return (uint8 *)ring->elem + (ring->elem_size * ring->idx); +} + +static inline void * +__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type) +{ + uint32 cur_idx; + + if (ring->idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + + cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + + if (cur_idx == ring->idx) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + 1) % ring->elem_cnt; + + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void * +__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type) +{ + uint32 cur_idx; + + if (ring->idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + + if (!ring->ring_overwrited && cur_idx == 0) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt; + if (ring->ring_overwrited && cur_idx == ring->idx) { + /* no more new record */ + return NULL; + } + + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void +__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring) +{ + if (!atomic_read(&ring->ring_locked)) { + atomic_set(&ring->ring_locked, 1); + } +} + +static inline void +__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring) +{ + if (atomic_read(&ring->ring_locked)) { + atomic_set(&ring->ring_locked, 0); + } +} + +/* Get first element : oldest element */ +void * +dhd_ring_get_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_first(&ring->fixed); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_first(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +/* Free first element : oldest element */ +void +dhd_ring_free_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_free_first(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_set_read_idx(void *_ring, uint32 read_idx) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_set_write_idx(void *_ring, uint32 write_idx) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +uint32 +dhd_ring_get_read_idx(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + uint32 read_idx = DHD_RING_IDX_INVALID; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return read_idx; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + + return read_idx; +} + +uint32 +dhd_ring_get_write_idx(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + uint32 write_idx = DHD_RING_IDX_INVALID; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return write_idx; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + + return write_idx; +} + +/* Get latest element */ +void * +dhd_ring_get_last(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_last(&ring->fixed); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_last(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +/* Get next point can be written + * will overwrite which doesn't read + * will return NULL if next pointer is locked. + */ +void * +dhd_ring_get_empty(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_empty(&ring->fixed); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_empty(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +void * +dhd_ring_get_next(void *_ring, void *cur) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +void * +dhd_ring_get_prev(void *_ring, void *cur) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +int +dhd_ring_get_cur_size(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + int cnt = 0; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return cnt; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return cnt; +} + +/* protect element between lock_ptr and write_idx */ +void +dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +/* free all lock */ +void +dhd_ring_lock_free(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_lock_free(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void * +dhd_ring_lock_get_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_lock_get_first(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +void * +dhd_ring_lock_get_last(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_lock_get_last(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +int +dhd_ring_lock_get_count(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + int ret = BCME_ERROR; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return ret; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_lock_get_count(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + return ret; +} + +/* free first locked element */ +void +dhd_ring_lock_free_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_lock_free_first(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_whole_lock(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + __dhd_singleidx_ring_whole_lock(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_whole_unlock(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + __dhd_singleidx_ring_whole_unlock(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} +/* END of DHD RING */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#define DHD_VFS_INODE(dir) (dir->d_inode) +#else +#define DHD_VFS_INODE(dir) d_inode(dir) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) +#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b) +#else +#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */ + +#if ((defined DHD_DUMP_MNGR) || (defined DNGL_AXI_ERROR_LOGGING)) +int +dhd_file_delete(char *path) +{ + struct path file_path; + int err; + struct dentry *dir; + + err = kern_path(path, 0, &file_path); + + if (err < 0) { + DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err)); + return err; + } + if ( +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + !d_is_file(file_path.dentry) || +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0)) + d_really_is_negative(file_path.dentry) || +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ + FALSE) + { + err = -EINVAL; + } else { + dir = dget_parent(file_path.dentry); + + if (!IS_ERR(dir)) { + err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL); + dput(dir); + } else { + err = PTR_ERR(dir); + } + } + + path_put(&file_path); + + if (err < 0) { + DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err)); + } + + return err; +} +#endif + +#ifdef DHD_DUMP_MNGR +static int +dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname) +{ + int i; + int fm_idx = -1; + + for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) { + /* XXX dump file manager enqueues the type name to empty slot, + * so it's impossible that empty slot is in the middle. + */ + if (strlen(fm_ptr->elems[i].type_name) == 0) { + fm_idx = i; + break; + } + if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) { + fm_idx = i; + break; + } + } + + if (fm_idx == -1) { + return fm_idx; + } + + if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) { + strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE); + fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0'; + fm_ptr->elems[fm_idx].file_idx = 0; + } + + return fm_idx; +} + +/* + * dhd_dump_file_manage_enqueue - enqueue dump file path + * and delete odest file if file count is max. + */ +void +dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname) +{ + int fm_idx; + int fp_idx; + dhd_dump_file_manage_t *fm_ptr; + DFM_elem_t *elem; + + if (!dhd || !dhd->dump_file_manage) { + DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n", + __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL))); + return; + } + + fm_ptr = dhd->dump_file_manage; + + /* find file_manage idx */ + DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path)); + if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) { + DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n", + __FUNCTION__, fname)); + return; + } + + elem = &fm_ptr->elems[fm_idx]; + fp_idx = elem->file_idx; + DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n", + __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx])); + + /* delete oldest file */ + if (strlen(elem->file_path[fp_idx]) != 0) { + if (dhd_file_delete(elem->file_path[fp_idx]) < 0) { + DHD_ERROR(("%s(): Failed to delete file: %s\n", + __FUNCTION__, elem->file_path[fp_idx])); + } else { + DHD_ERROR(("%s(): Successed to delete file: %s\n", + __FUNCTION__, elem->file_path[fp_idx])); + } + } + + /* save dump file path */ + strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE); + elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0'; + + /* change file index to next file index */ + elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX; +} +#endif /* DHD_DUMP_MNGR */ + +#ifdef DHD_HP2P +unsigned long +dhd_os_hp2plock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + flags = osl_spin_lock(&dhd->hp2p_lock); + } + + return flags; +} + +void +dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + osl_spin_unlock(&dhd->hp2p_lock, flags); + } +} +#endif /* DHD_HP2P */ +#ifdef DNGL_AXI_ERROR_LOGGING +static void +dhd_axi_error_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = (dhd_info_t *)handle; + dhd_pub_t *dhdp = NULL; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + goto exit; + } + + dhdp = &dhd->pub; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + goto exit; + } + + /** + * First save axi error information to a file + * because panic should happen right after this. + * After dhd reset, dhd reads the file, and do hang event process + * to send axi error stored on the file to Bigdata server + */ + if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) { + DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", + __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version)); + } + + DHD_OS_WAKE_LOCK(dhdp); +#ifdef DHD_FW_COREDUMP +#ifdef DHD_SSSR_DUMP + DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__)); + dhdp->collect_sssr = TRUE; +#endif /* DHD_SSSR_DUMP */ + DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__)); + dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); + +exit: + /* Trigger kernel panic after taking necessary dumps */ + BUG_ON(1); +} + +void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type) +{ + DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + type, DHD_WQ_WORK_AXI_ERROR_DUMP, + dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef SUPPORT_SET_TID +/* + * Set custom TID value for UDP frame based on UID value. + * This will be triggered by android private command below. + * DRIVER SET_TID + * Mode 0(SET_TID_OFF) : Disable changing TID + * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames + * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID +*/ +void +dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt) +{ + struct ether_header *eh = NULL; + struct sock *sk = NULL; + uint8 *pktdata = NULL; + uint8 *ip_hdr = NULL; + uint8 cur_prio; + uint8 prio; + uint32 uid; + + if (dhdp->tid_mode == SET_TID_OFF) { + return; + } + + pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt); + eh = (struct ether_header *) pktdata; + ip_hdr = (uint8 *)eh + ETHER_HDR_LEN; + + if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) { + return; + } + + cur_prio = PKTPRIO(pkt); + prio = dhdp->target_tid; + uid = dhdp->target_uid; + + if ((cur_prio == prio) || + (cur_prio != PRIO_8021D_BE)) { + return; + } + + sk = ((struct sk_buff*)(pkt))->sk; + + if ((dhdp->tid_mode == SET_TID_ALL_UDP) || + (sk && (uid == __kuid_val(sock_i_uid(sk))))) { + PKTSETPRIO(pkt, prio); + } +} +#endif /* SUPPORT_SET_TID */ + +#ifdef BCMPCIE +static void +dhd_cto_recovery_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp = NULL; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + BUG_ON(1); + return; + } + + dhdp = &dhd->pub; + if (dhdp->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) { + DHD_ERROR(("%s: skip cto recovery for DHD_INDUCE_BH_CBP_HANG\n", + __FUNCTION__)); + return; + } + dhdpcie_cto_recovery_handler(dhdp); +} + +void +dhd_schedule_cto_recovery(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + NULL, DHD_WQ_WORK_CTO_RECOVERY, + dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* BCMPCIE */ + +#ifdef DHD_WIFI_SHUTDOWN +void wifi_plat_dev_drv_shutdown(struct platform_device *pdev) +{ + dhd_pub_t *dhd_pub = NULL; + dhd_info_t *dhd_info = NULL; + dhd_if_t *dhd_if = NULL; + + DHD_ERROR(("%s enter\n", __FUNCTION__)); + dhd_pub = g_dhd_pub; + + if (dhd_os_check_if_up(dhd_pub)) { + dhd_info = (dhd_info_t *)dhd_pub->info; + dhd_if = dhd_info->iflist[0]; + ASSERT(dhd_if); + ASSERT(dhd_if->net); + if (dhd_if && dhd_if->net) { + dhd_stop(dhd_if->net); + } + } +} +#endif /* DHD_WIFI_SHUTDOWN */ +#ifdef WL_AUTO_QOS +void +dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off) +{ + dhd_sock_qos_set_status(dhdp->info, on_off); +} +#endif /* WL_AUTO_QOS */ + +#ifdef DHD_CFG80211_SUSPEND_RESUME +void +dhd_cfg80211_suspend(dhd_pub_t *dhdp) +{ + struct net_device *net = dhd_idx2net((dhd_pub_t *)dhdp, 0); + struct bcm_cfg80211 *cfg = wl_get_cfg(net); + wl_cfg80211_suspend(cfg); +} + +void +dhd_cfg80211_resume(dhd_pub_t *dhdp) +{ + struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, 0); + struct bcm_cfg80211 *cfg = wl_get_cfg(net); + wl_cfg80211_resume(cfg); +} +#endif /* DHD_CFG80211_SUSPEND_RESUME */ + +void +dhd_generate_rand_mac_addr(struct ether_addr *ea_addr) +{ + RANDOM_BYTES(ea_addr->octet, ETHER_ADDR_LEN); + /* restore mcast and local admin bits to 0 and 1 */ + ETHER_SET_UNICAST(ea_addr->octet); + ETHER_SET_LOCALADDR(ea_addr->octet); + DHD_ERROR(("%s:generated new MAC="MACDBG" \n", + __FUNCTION__, MAC2STRDBG(ea_addr->octet))); + return; +} + +void * +dhd_get_roam_evt(dhd_pub_t *dhdp) +{ +#if defined(DHD_PUB_ROAM_EVT) + return (void *)&(dhdp->roam_evt); +#else + return NULL; +#endif /* DHD_PUB_ROAM_EVT */ +} + +/* BANDLOCK_FILE is for Hikey only and BANDLOCK has a priority than BANDLOCK_FILE */ +static void dhd_set_bandlock(dhd_pub_t * dhd) +{ +#if defined(BANDLOCK) + int band = BANDLOCK; + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) { + DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band)); + } +#elif defined(BANDLOCK_FILE) + int band; + char val[2] = {0, 0}; + if (dhd_read_file(PATH_BANDLOCK_INFO, (char *)val, sizeof(char)) == BCME_OK) { + band = bcm_atoi(val); + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) { + DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band)); + } + } +#endif /* BANDLOCK */ +} + +#ifdef PCIE_FULL_DONGLE +/* API to delete flowings and Stations +* corresponding to the interface(ndev) +*/ +void +dhd_net_del_flowrings_sta(dhd_pub_t *dhd, struct net_device *ndev) +{ + dhd_if_t *ifp = NULL; + + ifp = dhd_get_ifp_by_ndev(dhd, ndev); + if (ifp == NULL) { + DHD_ERROR(("DHD Iface Info corresponding to %s not found\n", ndev->name)); + return; + } + + /* For now called only in iface delete path.. + * Add reason codes if this API need to be reused in any other paths. + */ + DHD_ERROR(("%s:Clean up IFACE idx %d due to interface delete\n", + __FUNCTION__, ifp->idx)); + + dhd_del_all_sta(dhd, ifp->idx); + dhd_flow_rings_delete(dhd, ifp->idx); +} +#endif /* PCIE_FULL_DONGLE */ + +#ifndef BCMDBUS +static void +dhd_deferred_socram_dump(void *handle, void *event_info, u8 event) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)event_info; + DHD_ERROR(("%s ... scheduled to collect memdump over bus\n", __FUNCTION__)); + dhd_socram_dump(dhdp->bus); +} + +int +dhd_schedule_socram_dump(dhd_pub_t *dhdp) +{ + int ret = 0; + ret = dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, + DHD_WQ_WORK_SOC_RAM_COLLECT, dhd_deferred_socram_dump, DHD_WQ_WORK_PRIORITY_HIGH); + return ret; +} +#endif + +void *dhd_get_pub(struct net_device *dev) +{ + dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); + if (dhdinfo) + return (void *)&dhdinfo->pub; + else { + printf("%s: null dhdinfo\n", __FUNCTION__); + return NULL; + } +} + +void *dhd_get_conf(struct net_device *dev) +{ + dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); + if (dhdinfo) + return (void *)dhdinfo->pub.conf; + else { + printf("%s: null dhdinfo\n", __FUNCTION__); + return NULL; + } +} + +bool dhd_os_wd_timer_enabled(void *bus) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return FALSE; + } + return dhd->wd_timer_valid; +} + +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) +/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */ +static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev) +{ + struct net_device *primary_ndev = NULL, *br_dev = NULL; + int cmd; + struct ifreq ifr; + + /* add new interface to bridge dev */ + if (dev) { + int found = 0, i; + DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx)); + for (i = 0 ; i < ifidx; i++) { + DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx)); + /* search the primary interface */ + if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) { + primary_ndev = dhd->pub.info->iflist[i]->net; + DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name)); + found = 1; + break; + } + } + if (found == 0) { + DHD_ERROR(("Can not find primary dev %s\n", dev->name)); + return; + } + cmd = SIOCBRADDIF; + ifr.ifr_ifindex = dev->ifindex; + } else { /* del interface from bridge dev */ + primary_ndev = dhd->pub.info->iflist[ifidx]->net; + cmd = SIOCBRDELIF; + ifr.ifr_ifindex = primary_ndev->ifindex; + } + /* if primary net device is bridged */ + if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) { + rtnl_lock(); + /* get bridge device */ + br_dev = netdev_master_upper_dev_get(primary_ndev); + if (br_dev) { + const struct net_device_ops *ops = br_dev->netdev_ops; + DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name)); + if (ops) { + if (cmd == SIOCBRADDIF) { + DHD_ERROR(("br call ndo_add_slave\n")); + ops->ndo_add_slave(br_dev, dev); + /* Also bring wds0.x interface up automatically */ + dev_change_flags(dev, dev->flags | IFF_UP); + } + else { + DHD_ERROR(("br call ndo_del_slave\n")); + ops->ndo_del_slave(br_dev, primary_ndev); + } + } + } + else { + DHD_ERROR(("no br dev\n")); + } + rtnl_unlock(); + } + else { + DHD_ERROR(("device %s is not bridged\n", primary_ndev->name)); + } +} +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ diff --git a/bcmdhd.101.10.361.x/dhd_linux.h b/bcmdhd.101.10.361.x/dhd_linux.h new file mode 100755 index 0000000..531505d --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux.h @@ -0,0 +1,523 @@ +/* + * DHD Linux header file (dhd_linux exports for cfg80211 and other components) + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/* wifi platform functions for power, interrupt and pre-alloc, either + * from Android-like platform device data, or Broadcom wifi platform + * device data. + * + */ +#ifndef __DHD_LINUX_H__ +#define __DHD_LINUX_H__ + +#include +#include +#include +#include +#include +#ifdef DHD_WMF +#include +#endif +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +#endif /* defined(WL_WIRELESS_EXT) */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ + +#ifdef BCMPCIE +#include +#endif /* BCMPCIE */ + +#ifdef PCIE_FULL_DONGLE +#include +#endif /* PCIE_FULL_DONGLE */ + +#ifdef WL_MONITOR +#ifdef HOST_RADIOTAP_CONV +#include +#else +#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */ +#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE) +#endif /* HOST_RADIOTAP_CONV */ +#endif /* WL_MONITOR */ + +/* dongle status */ +enum wifi_adapter_status { + WIFI_STATUS_POWER_ON = 0, + WIFI_STATUS_ATTACH, + WIFI_STATUS_FW_READY, + WIFI_STATUS_DETTACH +}; +#define wifi_chk_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status)) +#define wifi_get_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status)) +#define wifi_set_adapter_status(adapter, stat) (set_bit(stat, &(adapter)->status)) +#define wifi_clr_adapter_status(adapter, stat) (clear_bit(stat, &(adapter)->status)) +#define wifi_chg_adapter_status(adapter, stat) (change_bit(stat, &(adapter)->status)) + +#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ +#define DHD_FW_READY_TIMEOUT 5000 /* msec : allowed time to finished fw download */ + +typedef struct wifi_adapter_info { + const char *name; + uint irq_num; + uint intr_flags; + const char *fw_path; + const char *nv_path; + const char *clm_path; + const char *conf_path; + void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */ + uint bus_type; + uint bus_num; + uint slot_num; + int index; + int gpio_wl_reg_on; +#ifdef CUSTOMER_OOB + int gpio_wl_host_wake; +#endif + wait_queue_head_t status_event; + unsigned long status; +#if defined (BT_OVER_SDIO) + const char *btfw_path; +#endif /* defined (BT_OVER_SDIO) */ +#if defined(BCMSDIO) + struct sdio_func *sdio_func; +#endif /* BCMSDIO */ +#if defined(BCMPCIE) + struct pci_dev *pci_dev; + struct pci_saved_state *pci_saved_state; +#endif /* BCMPCIE */ +} wifi_adapter_info_t; + +#if defined(CONFIG_WIFI_CONTROL_FUNC) || defined(CUSTOMER_HW4) +#include +#else +#include +#endif /* CONFIG_WIFI_CONTROL_FUNC */ + +typedef struct bcmdhd_wifi_platdata { + uint num_adapters; + wifi_adapter_info_t *adapters; +} bcmdhd_wifi_platdata_t; + +/** Per STA params. A list of dhd_sta objects are managed in dhd_if */ +typedef struct dhd_sta { + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ + uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */ + void * ifp; /* associated dhd_if */ + struct ether_addr ea; /* stations ethernet mac address */ + struct list_head list; /* link into dhd_if::sta_list */ + int idx; /* index of self in dhd_pub::sta_pool[] */ + int ifidx; /* index of interface in dhd */ +#ifdef DHD_WMF + struct dhd_sta *psta_prim; /* primary index of psta interface */ +#endif /* DHD_WMF */ +} dhd_sta_t; +typedef dhd_sta_t dhd_sta_pool_t; + +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +typedef enum { + NONE_4WAY, + M1_4WAY, + M2_4WAY, + M3_4WAY, + M4_4WAY +} msg_4way_t; +typedef enum { + M3_RXED, + M4_TXFAILED +} msg_4way_state_t; +#define MAX_4WAY_TIMEOUT_MS 2000 +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#if defined(DHD_LB) +/* Dynamic CPU selection for load balancing. */ +#include +#include +#include +#include +#include + +/* FIXME: Make this a module param or a sysfs. */ +#if !defined(DHD_LB_PRIMARY_CPUS) +#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ +#endif +#if !defined(DHD_LB_SECONDARY_CPUS) +#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ +#endif + +#define HIST_BIN_SIZE 9 + +#if defined(DHD_LB_TXP) +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_tx_lb_pkttag_fr { + struct net_device *net; + int ifidx; +} dhd_tx_lb_pkttag_fr_t; + +#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp) +#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net) + +#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx) +#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx) +#endif /* DHD_LB_TXP */ +#endif /* DHD_LB */ + +#define FILE_DUMP_MAX_WAIT_TIME 4000 + +#ifdef IL_BIGENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#if defined(DHD_TCP_WINSIZE_ADJUST) +#define MIN_TCP_WIN_SIZE 18000 +#define WIN_SIZE_SCALE_FACTOR 2 +#define MAX_TARGET_PORTS 5 +#endif /* DHD_TCP_WINSIZE_ADJUST */ + +#ifdef BLOCK_IPV6_PACKET +#define HEX_PREF_STR "0x" +#define UNI_FILTER_STR "010000000000" +#define ZERO_ADDR_STR "000000000000" +#define ETHER_TYPE_STR "0000" +#define IPV6_FILTER_STR "20" +#define ZERO_TYPE_STR "00" +#endif /* BLOCK_IPV6_PACKET */ + +#if defined(OEM_ANDROID) && defined(SOFTAP) +extern bool ap_cfg_running; +extern bool ap_fw_loaded; +#endif + +#if defined(OEM_ANDROID) && defined(BCMPCIE) +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); +#else +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +#endif /* OEM_ANDROID && BCMPCIE */ + +#ifdef CUSTOMER_HW4 +#ifdef MIMO_ANT_SETTING +#ifdef DHD_EXPORT_CNTL_FILE +extern unsigned long antsel; +#endif /* DHD_EXPORT_CNTL_FILE */ +extern int dhd_sel_ant_from_file(dhd_pub_t *dhd); +#endif /* MIMO_ANT_SETTING */ +#ifdef WRITE_WLANINFO +#define MAX_VERSION_LEN 512 +#ifdef DHD_EXPORT_CNTL_FILE +extern char version_info[MAX_VERSION_LEN]; +#endif /* DHD_EXPORT_CNTL_FILE */ +extern uint32 sec_save_wlinfo(char *firm_ver, char *dhd_ver, char *nvram_p, char *clm_ver); +#endif /* WRITE_WLANINFO */ +#ifdef LOGTRACE_FROM_FILE +extern int dhd_logtrace_from_file(dhd_pub_t *dhd); +#ifdef DHD_EXPORT_CNTL_FILE +extern unsigned long logtrace_val; +#endif /* DHD_EXPORT_CNTL_FILE */ +#endif /* LOGTRACE_FROM_FILE */ +#ifdef GEN_SOFTAP_INFO_FILE +#define SOFTAP_INFO_BUF_SZ 512 +#ifdef DHD_EXPORT_CNTL_FILE +extern char softapinfostr[SOFTAP_INFO_BUF_SZ]; +#endif /* DHD_EXPORT_CNTL_FILE */ +extern uint32 sec_save_softap_info(void); +#endif /* GEN_SOFTAP_INFO_FILE */ +#endif /* CUSTOMER_HW4 */ + +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS +extern uint32 report_hang_privcmd_err; +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ + +#if defined(SOFTAP_TPUT_ENHANCE) +extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); +extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time); +#endif /* SOFTAP_TPUT_ENHANCE */ + +#if defined(BCM_ROUTER_DHD) +void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf); +#endif /* BCM_ROUTER_DHD */ + +#ifdef DHD_LOG_DUMP +/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE +* 2: DLD_BUF_TYPE_SPECIAL +*/ +#define DLD_BUFFER_NUM 3 + +#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB +#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */ +#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */ + +#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) + +/* + * Below are different sections that use the prealloced buffer + * and sum of the sizes of these should not cross LOG_DUMP_TOTAL_BUFSIZE + */ +#ifdef EWP_BCM_TRACE +#define LOG_DUMP_GENERAL_MAX_BUFSIZE (192 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_BCM_TRACE_MAX_BUFSIZE (64 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#else +#define LOG_DUMP_GENERAL_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_BCM_TRACE_MAX_BUFSIZE 0 +#endif /* EWP_BCM_TRACE */ +#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_RTT_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) + +#if LOG_DUMP_TOTAL_BUFSIZE < \ + (LOG_DUMP_GENERAL_MAX_BUFSIZE + LOG_DUMP_PRESERVE_MAX_BUFSIZE + \ + LOG_DUMP_ECNTRS_MAX_BUFSIZE + LOG_DUMP_RTT_MAX_BUFSIZE + \ + LOG_DUMP_BCM_TRACE_MAX_BUFSIZE + LOG_DUMP_FILTER_MAX_BUFSIZE) +#error "LOG_DUMP_TOTAL_BUFSIZE is lesser than sum of all rings" +#endif + +/* Special buffer is allocated as separately in prealloc */ +#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024) + +#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */ + +#ifdef CONFIG_LOG_BUF_SHIFT +/* 15% of kernel log buf size, if for example klog buf size is 512KB +* 15% of 512KB ~= 80KB +*/ +#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \ + (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100)) +#endif /* CONFIG_LOG_BUF_SHIFT */ + +#define LOG_DUMP_COOKIE_BUFSIZE 1024u +typedef struct { + char *hdr_str; + log_dump_section_type_t sec_type; +} dld_hdr_t; + +#define DHD_PRINT_BUF_NAME_LEN 30 +void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event); +void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb); +#endif /* DHD_LOG_DUMP */ + +typedef struct dhd_if_event { + struct list_head list; + wl_event_data_if_t event; + char name[IFNAMSIZ+1]; + uint8 mac[ETHER_ADDR_LEN]; +} dhd_if_event_t; + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + int idx; /* iface idx in dongle */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool set_macaddress; + bool set_multicast; + uint8 bssidx; /* bsscfg index for the interface */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ + char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ + struct net_device_stats stats; +#ifdef DHD_WMF + dhd_wmf_t wmf; /* per bsscfg wmf setting */ + bool wmf_psta_disable; /* enable/disable MC pkt to each mac + * of MC group behind PSTA + */ +#endif /* DHD_WMF */ +#ifdef PCIE_FULL_DONGLE + struct list_head sta_list; /* sll of associated stations */ + spinlock_t sta_list_lock; /* lock for manipulating sll */ +#endif /* PCIE_FULL_DONGLE */ + uint32 ap_isolate; /* ap-isolation settings */ +#ifdef DHD_L2_FILTER + bool parp_enable; + bool parp_discard; + bool parp_allnode; + arp_table_t *phnd_arp_table; + /* for Per BSS modification */ + bool dhcp_unicast; + bool block_ping; + bool grat_arp; + bool block_tdls; +#endif /* DHD_L2_FILTER */ +#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)) + uint8 *qosmap_up_table; /* user priority table, size is UP_TABLE_MAX */ + bool qosmap_up_table_enable; /* flag set only when app want to set additional UP */ +#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */ +#ifdef DHD_MCAST_REGEN + bool mcast_regen_bss_enable; +#endif + bool rx_pkt_chainable; /* set all rx packet to chainable config by default */ + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ +#ifdef BCM_ROUTER_DHD + bool primsta_dwds; /* DWDS status of primary sta interface */ +#endif /* BCM_ROUTER_DHD */ + uint8 tx_paths_active; + bool del_in_progress; + bool static_if; /* used to avoid some operations on static_if */ +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + struct delayed_work m4state_work; + atomic_t m4state; +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#ifdef DHDTCPSYNC_FLOOD_BLK + uint32 tsync_rcvd; + uint32 tsyncack_txed; + u64 last_sync; + struct work_struct blk_tsfl_work; + uint32 tsync_per_sec; + bool disconnect_tsync_flood; +#endif /* DHDTCPSYNC_FLOOD_BLK */ +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + bool recv_reassoc_evt; + bool post_roam_evt; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ +#ifdef WLEASYMESH + uint8 _1905_al_ucast[ETHER_ADDR_LEN]; + uint8 _1905_al_mcast[ETHER_ADDR_LEN]; +#endif /* WLEASYMESH */ +} dhd_if_t; + +struct ipv6_work_info_t { + uint8 if_idx; + char ipv6_addr[IPV6_ADDR_LEN]; + unsigned long event; +}; + +typedef struct dhd_dump { + uint8 *buf; + int bufsize; + uint8 *hscb_buf; + int hscb_bufsize; +} dhd_dump_t; +#ifdef DNGL_AXI_ERROR_LOGGING +typedef struct dhd_axi_error_dump { + ulong fault_address; + uint32 axid; + struct hnd_ext_trap_axi_error_v1 etd_axi_error_v1; +} dhd_axi_error_dump_t; +#endif /* DNGL_AXI_ERROR_LOGGING */ +#ifdef BCM_ROUTER_DHD +typedef struct dhd_write_file { + char file_path[64]; + uint32 file_flags; + uint8 *buf; + int bufsize; +} dhd_write_file_t; +#endif + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +struct dhd_rx_tx_work { + struct work_struct work; + struct sk_buff *skb; + struct net_device *net; + struct dhd_pub *pub; +}; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef FILTER_IE +#define FILTER_IE_PATH "/vendor/etc/wifi/filter_ie" +#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */ +#define FILE_BLOCK_READ_SIZE 256 +#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs) +#endif /* FILTER_IE */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +int dhd_wifi_platform_register_drv(void); +void dhd_wifi_platform_unregister_drv(void); +wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type, + uint32 bus_num, uint32 slot_num, unsigned long status); +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, + uint32 slot_num); +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec); +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present); +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr); +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf, int ifidx); +#ifdef DHD_COREDUMP +int wifi_platform_set_coredump(wifi_adapter_info_t *adapter, const char *buf, int buf_len, + const char *info); +#endif /* DHD_COREDUMP */ +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, + u32 flags); +#else +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode); +#endif /* CUSTOM_COUNTRY_CODE */ +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size); +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter); + +int dhd_get_fw_mode(struct dhd_info *dhdinfo); +bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo); +#ifdef BCM_ROUTER_DHD +void dhd_update_dpsta_interface_for_sta(dhd_pub_t* dhdp, int ifidx, void* event_data); +#endif /* BCM_ROUTER_DHD */ +#ifdef DHD_WMF +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx); +int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx); +int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val); +void dhd_update_psta_interface_for_sta(dhd_pub_t *dhdp, char* ifname, + void* mac_addr, void* event_data); +#endif /* DHD_WMF */ + +#if defined (BT_OVER_SDIO) +int dhd_net_bus_get(struct net_device *dev); +int dhd_net_bus_put(struct net_device *dev); +#endif /* BT_OVER_SDIO */ +#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD) +#define ADPS_ENABLE 1 +#define ADPS_DISABLE 0 + +int dhd_enable_adps(dhd_pub_t *dhd, uint8 on); +#endif /* WLADPS || WLADPS_PRIVATE_CMD */ +#ifdef DHDTCPSYNC_FLOOD_BLK +extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp); +extern void dhd_reset_tcpsync_info_by_dev(struct net_device *dev); +#endif /* DHDTCPSYNC_FLOOD_BLK */ +#ifdef PCIE_FULL_DONGLE +extern void dhd_net_del_flowrings_sta(dhd_pub_t * dhd, struct net_device * ndev); +#endif /* PCIE_FULL_DONGLE */ +int dhd_get_fw_capabilities(dhd_pub_t * dhd); +#endif /* __DHD_LINUX_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_exportfs.c b/bcmdhd.101.10.361.x/dhd_linux_exportfs.c new file mode 100755 index 0000000..ef5b0cc --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_exportfs.c @@ -0,0 +1,2994 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM) +#include +#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */ +#ifdef PWRSTATS_SYSFS +#include +#endif /* PWRSTATS_SYSFS */ +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ +#ifdef CSI_SUPPORT +#include +#endif /* CSI_SUPPORT */ + +#ifdef SHOW_LOGTRACE +extern dhd_pub_t* g_dhd_pub; +static int dhd_ring_proc_open(struct inode *inode, struct file *file); +ssize_t dhd_ring_proc_read(struct file *file, char *buffer, size_t tt, loff_t *loff); + +static const struct file_operations dhd_ring_proc_fops = { + .open = dhd_ring_proc_open, + .read = dhd_ring_proc_read, + .release = single_release, +}; + +static int +dhd_ring_proc_open(struct inode *inode, struct file *file) +{ + int ret = BCME_ERROR; + if (inode) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) + ret = single_open(file, 0, PDE_DATA(inode)); +#else + /* This feature is not supported for lower kernel versions */ + ret = single_open(file, 0, NULL); +#endif + } else { + DHD_ERROR(("%s: inode is NULL\n", __FUNCTION__)); + } + return ret; +} + +ssize_t +dhd_ring_proc_read(struct file *file, char __user *buffer, size_t tt, loff_t *loff) +{ + trace_buf_info_t *trace_buf_info; + int ret = BCME_ERROR; + dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)((struct seq_file *)(file->private_data))->private; + + if (ring == NULL) { + DHD_ERROR(("%s: ring is NULL\n", __FUNCTION__)); + return ret; + } + + ASSERT(g_dhd_pub); + + trace_buf_info = (trace_buf_info_t *)MALLOCZ(g_dhd_pub->osh, sizeof(trace_buf_info_t)); + if (trace_buf_info) { + dhd_dbg_read_ring_into_trace_buf(ring, trace_buf_info); + if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt))) + { + ret = -EFAULT; + goto exit; + } + if (trace_buf_info->availability == BUF_NOT_AVAILABLE) + ret = BUF_NOT_AVAILABLE; + else + ret = trace_buf_info->size; + } else + DHD_ERROR(("Memory allocation Failed\n")); + +exit: + if (trace_buf_info) { + MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); + } + return ret; +} + +void +dhd_dbg_ring_proc_create(dhd_pub_t *dhdp) +{ +#ifdef DEBUGABILITY + dhd_dbg_ring_t *dbg_verbose_ring = NULL; + + dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhdp, FW_VERBOSE_RING_ID); + if (dbg_verbose_ring) { + if (!proc_create_data("dhd_trace", S_IRUSR, NULL, &dhd_ring_proc_fops, + dbg_verbose_ring)) { + DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n")); + } else { + DHD_ERROR(("Created /proc/dhd_trace procfs interface\n")); + } + } else { + DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n")); + } +#endif /* DEBUGABILITY */ + +#ifdef EWP_ECNTRS_LOGGING + if (!proc_create_data("dhd_ecounters", S_IRUSR, NULL, &dhd_ring_proc_fops, + dhdp->ecntr_dbg_ring)) { + DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n")); + } else { + DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n")); + } +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + if (!proc_create_data("dhd_rtt", S_IRUSR, NULL, &dhd_ring_proc_fops, + dhdp->rtt_dbg_ring)) { + DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n")); + } else { + DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n")); + } +#endif /* EWP_RTT_LOGGING */ +} + +void +dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp) +{ +#ifdef DEBUGABILITY + remove_proc_entry("dhd_trace", NULL); +#endif /* DEBUGABILITY */ + +#ifdef EWP_ECNTRS_LOGGING + remove_proc_entry("dhd_ecounters", NULL); +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + remove_proc_entry("dhd_rtt", NULL); +#endif /* EWP_RTT_LOGGING */ + +} +#endif /* SHOW_LOGTRACE */ + +/* ---------------------------------------------------------------------------- + * Infrastructure code for sysfs interface support for DHD + * + * What is sysfs interface? + * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + * + * Why sysfs interface? + * This is the Linux standard way of changing/configuring Run Time parameters + * for a driver. We can use this interface to control "linux" specific driver + * parameters. + * + * ----------------------------------------------------------------------------- + */ + +#if defined(DHD_TRACE_WAKE_LOCK) +extern atomic_t trace_wklock_onoff; + +/* Function to show the history buffer */ +static ssize_t +show_wklock_trace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + + buf[ret] = '\n'; + buf[ret+1] = 0; + + dhd_wk_lock_stats_dump(&dhd->pub); + return ret+1; +} + +/* Function to enable/disable wakelock trace */ +static ssize_t +wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + BCM_REFERENCE(dhd); + + onoff = bcm_strtoul(buf, NULL, 10); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + atomic_set(&trace_wklock_onoff, onoff); + if (atomic_read(&trace_wklock_onoff)) { + DHD_ERROR(("ENABLE WAKLOCK TRACE\n")); + } else { + DHD_ERROR(("DISABLE WAKELOCK TRACE\n")); + } + + return (ssize_t)(onoff+1); +} +#endif /* DHD_TRACE_WAKE_LOCK */ + +#ifdef DHD_LOG_DUMP +extern int logdump_periodic_flush; +extern int logdump_ecntr_enable; +static ssize_t +show_logdump_periodic_flush(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long val; + + val = logdump_periodic_flush; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); + return ret; +} + +static ssize_t +logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long val; + + val = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &val); + if (val != 0 && val != 1) { + return -EINVAL; + } + logdump_periodic_flush = val; + return count; +} + +static ssize_t +show_logdump_ecntr(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long val; + + val = logdump_ecntr_enable; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); + return ret; +} + +static ssize_t +logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long val; + + val = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &val); + if (val != 0 && val != 1) { + return -EINVAL; + } + logdump_ecntr_enable = val; + return count; +} + +#endif /* DHD_LOG_DUMP */ + +extern uint enable_ecounter; +static ssize_t +show_enable_ecounter(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + + onoff = enable_ecounter; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + dhdp = &dhd->pub; + if (!FW_SUPPORTED(dhdp, ecounters)) { + DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__)); + return count; + } + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + if (enable_ecounter == onoff) { + DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter)); + return count; + } + + enable_ecounter = onoff; + dhd_ecounter_configure(dhdp, enable_ecounter); + + return count; +} + +#if defined(DHD_QOS_ON_SOCK_FLOW) +#include + +static ssize_t +show_sock_qos_onoff(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = dhd_sock_qos_get_status(dhd); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +update_sock_qos_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + dhd_sock_qos_set_status(dhd, onoff); + + return count; +} + +static ssize_t +show_sock_qos_upgrade(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = dhd_sock_qos_get_force_upgrade(dhd); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +update_sock_qos_upgrade(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + dhd_sock_qos_set_force_upgrade(dhd, onoff); + + return count; +} + +static ssize_t +show_sock_qos_numfl_upgrd_thresh(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + int upgrade_thresh; + dhd_info_t *dhd = (dhd_info_t *)dev; + + upgrade_thresh = dhd_sock_qos_get_numfl_upgrd_thresh(dhd); + ret = scnprintf(buf, PAGE_SIZE - 1, "%d \n", + upgrade_thresh); + return ret; +} + +static ssize_t +update_sock_qos_numfl_upgrd_thresh(struct dhd_info *dev, const char *buf, size_t count) +{ + int upgrade_thresh; + dhd_info_t *dhd = (dhd_info_t *)dev; + + sscanf(buf, "%d", &upgrade_thresh); + if (upgrade_thresh < 0) { + return -EINVAL; + } + + dhd_sock_qos_set_numfl_upgrd_thresh(dhd, upgrade_thresh); + + return count; +} + +static ssize_t +show_sock_qos_avgpktsize_thresh(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long avgpktsize_low, avgpktsize_high; + dhd_info_t *dhd = (dhd_info_t *)dev; + + dhd_sock_qos_get_avgpktsize_thresh(dhd, &avgpktsize_low, &avgpktsize_high); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu %lu\n", + avgpktsize_low, avgpktsize_high); + + return ret; +} + +static ssize_t +update_sock_qos_avgpktsize_thresh(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long avgpktsize_low, avgpktsize_high; + dhd_info_t *dhd = (dhd_info_t *)dev; + + sscanf(buf, "%lu %lu", &avgpktsize_low, &avgpktsize_high); + + dhd_sock_qos_set_avgpktsize_thresh(dhd, avgpktsize_low, avgpktsize_high); + + return count; +} + +static ssize_t +show_sock_qos_numpkts_thresh(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long numpkts_low, numpkts_high; + dhd_info_t *dhd = (dhd_info_t *)dev; + + dhd_sock_qos_get_numpkts_thresh(dhd, &numpkts_low, &numpkts_high); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu %lu\n", + numpkts_low, numpkts_high); + + return ret; +} + +static ssize_t +update_sock_qos_numpkts_thresh(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long numpkts_low, numpkts_high; + dhd_info_t *dhd = (dhd_info_t *)dev; + + sscanf(buf, "%lu %lu", &numpkts_low, &numpkts_high); + + dhd_sock_qos_set_numpkts_thresh(dhd, numpkts_low, numpkts_high); + + return count; +} + +static ssize_t +show_sock_qos_detectcnt_thresh(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned char detectcnt_inc, detectcnt_dec; + dhd_info_t *dhd = (dhd_info_t *)dev; + + dhd_sock_qos_get_detectcnt_thresh(dhd, &detectcnt_inc, &detectcnt_dec); + ret = scnprintf(buf, PAGE_SIZE - 1, "%d %d\n", + detectcnt_inc, detectcnt_dec); + + return ret; +} + +static ssize_t +update_sock_qos_detectcnt_thresh(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned int detectcnt_inc, detectcnt_dec; + dhd_info_t *dhd = (dhd_info_t *)dev; + + sscanf(buf, "%u %u", &detectcnt_inc, &detectcnt_dec); + + dhd_sock_qos_set_detectcnt_thresh(dhd, detectcnt_inc, detectcnt_dec); + + return count; +} + +static ssize_t +show_sock_qos_detectcnt_upgrd_thresh(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned int detectcnt_upgrd_thresh; + dhd_info_t *dhd = (dhd_info_t *)dev; + + detectcnt_upgrd_thresh = dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd); + ret = scnprintf(buf, PAGE_SIZE - 1, "%d \n", detectcnt_upgrd_thresh); + + return ret; +} + +static ssize_t +update_sock_qos_detectcnt_upgrd_thresh(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned int detectcnt_upgrd_thresh; + dhd_info_t *dhd = (dhd_info_t *)dev; + + sscanf(buf, "%u", &detectcnt_upgrd_thresh); + + dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd, detectcnt_upgrd_thresh); + + return count; +} + +static ssize_t +show_sock_qos_maxfl(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned int maxfl; + dhd_info_t *dhd = (dhd_info_t *)dev; + + maxfl = dhd_sock_qos_get_maxfl(dhd); + ret = scnprintf(buf, PAGE_SIZE - 1, "%u \n", maxfl); + + return ret; +} + +static ssize_t +update_sock_qos_maxfl(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned int maxfl; + dhd_info_t *dhd = (dhd_info_t *)dev; + + sscanf(buf, "%u", &maxfl); + + dhd_sock_qos_set_maxfl(dhd, maxfl); + + return count; +} + +static ssize_t +show_sock_qos_stats(struct dhd_info *dev, char *buf) +{ + dhd_info_t *dhd = (dhd_info_t *)dev; + + dhd_sock_qos_show_stats(dhd, buf, PAGE_SIZE); + + return PAGE_SIZE - 1; +} + +static ssize_t +clear_sock_qos_stats(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long clear; + dhd_info_t *dhd = (dhd_info_t *)dev; + + clear = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &clear); + if (clear != 0) { + return -EINVAL; + } + + dhd_sock_qos_clear_stats(dhd); + + return count; +} + +#ifdef DHD_QOS_ON_SOCK_FLOW_UT + +/* + * test_id sub_id Description + * ------ ------ ----------- + * 1 0 psk_qos->sk_fl + * The number of free sk_fl entries in the Table is exhausted + * and more sockets are still getting created + * + * 1 1 psk_qos->sk_fl + * is Full for more than x seconds, there are lot of periodic + * flows, but none of them are detected for upgrade for more + * than 'x' seconds + * + * 2 Force upgrade the socket flows to reach skfl_upgrade_thresh + * check the behaviour + * + * Downgrade one of the sk_fls and check if the 'next' pending + * sk_fl is getting upgraded. The sk_fl getting upgraded + * should follow FIFO scheme. + * + * 3 Upgrade a socket flow ... after some time downgrade the + * same and check if the sk_fl is actually getting downgraded + * Keep switching the behavior every 'x' seconds and observe + * the switches + */ +static ssize_t +do_sock_qos_unit_test(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned int test_id = 0; + unsigned int sub_id = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + int ret; + + BCM_REFERENCE(dhd); + + ret = sscanf(buf, "%d %d", &test_id, &sub_id); + if (ret < 1) { + return -EINVAL; + } + + return count; +} + +#endif /* DHD_QOS_ON_SOCK_FLOW_UT */ +#endif /* DHD_QOS_ON_SOCK_FLOW */ + +#ifdef DHD_SSSR_DUMP +static ssize_t +show_sssr_enab(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + + onoff = sssr_enab; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +set_sssr_enab(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + sssr_enab = (uint)onoff; + + return count; +} + +static ssize_t +show_fis_enab(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + + onoff = fis_enab; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +set_fis_enab(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + fis_enab = (uint)onoff; + + return count; +} +#endif /* DHD_SSSR_DUMP */ + +#define FMT_BUFSZ 32 +extern char firmware_path[]; + +static ssize_t +show_firmware_path(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", firmware_path); + + return ret; +} + +static ssize_t +store_firmware_path(struct dhd_info *dev, const char *buf, size_t count) +{ + char fmt_spec[FMT_BUFSZ] = ""; + + if ((int)strlen(buf) >= MOD_PARAM_PATHLEN) { + return -EINVAL; + } + + snprintf(fmt_spec, FMT_BUFSZ, "%%%ds", MOD_PARAM_PATHLEN - 1); + sscanf(buf, fmt_spec, firmware_path); + + return count; +} + +extern char nvram_path[]; + +static ssize_t +show_nvram_path(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", nvram_path); + + return ret; +} + +static ssize_t +store_nvram_path(struct dhd_info *dev, const char *buf, size_t count) +{ + char fmt_spec[FMT_BUFSZ] = ""; + + if ((int)strlen(buf) >= MOD_PARAM_PATHLEN) { + return -EINVAL; + } + + snprintf(fmt_spec, FMT_BUFSZ, "%%%ds", MOD_PARAM_PATHLEN - 1); + sscanf(buf, fmt_spec, nvram_path); + + return count; +} + +#ifdef PWRSTATS_SYSFS +typedef struct wl_pwrstats_sysfs { + uint64 current_ts; + uint64 pm_cnt; + uint64 pm_dur; + uint64 pm_last_entry_us; + uint64 awake_cnt; + uint64 awake_dur; + uint64 awake_last_entry_us; + uint64 l0_cnt; + uint64 l0_dur_us; + uint64 l1_cnt; + uint64 l1_dur_us; + uint64 l1_1_cnt; + uint64 l1_1_dur_us; + uint64 l1_2_cnt; + uint64 l1_2_dur_us; + uint64 l2_cnt; + uint64 l2_dur_us; +} wl_pwrstats_sysfs_t; + +uint64 last_delta = 0; +wl_pwrstats_sysfs_t accumstats = {0, }; +wl_pwrstats_sysfs_t laststats = {0, }; +static const char pwrstr_cnt[] = "count:"; +static const char pwrstr_dur[] = "duration_usec:"; +static const char pwrstr_ts[] = "last_entry_timestamp_usec:"; + +void update_pwrstats_cum(uint64 *accum, uint64 *last, uint64 *now, bool force) +{ + if (accum) { /* accumulation case, ex; counts, duration */ + if (*now < *last) { + if (force || ((*last - *now) > USEC_PER_MSEC)) { + /* not to update accum for pm_dur/awake_dur case */ + *accum += *now; + *last = *now; + } + } else { + *accum += (*now - *last); + *last = *now; + } + } else if (*now != 0) { /* last entry timestamp case */ + *last = *now + last_delta; + } +} + +static const uint16 pwrstats_req_type[] = { + WL_PWRSTATS_TYPE_PCIE, + WL_PWRSTATS_TYPE_PM_ACCUMUL +}; +#define PWRSTATS_REQ_TYPE_NUM sizeof(pwrstats_req_type) / sizeof(uint16) +#define PWRSTATS_IOV_BUF_LEN OFFSETOF(wl_pwrstats_t, data) \ + + sizeof(uint32) * PWRSTATS_REQ_TYPE_NUM \ + + sizeof(wl_pwr_pcie_stats_t) \ + + sizeof(wl_pwr_pm_accum_stats_v1_t) \ + + (uint)strlen("pwrstats") + 1 + +static ssize_t +show_pwrstats_path(struct dhd_info *dev, char *buf) +{ + int err = 0; + void *p_data = NULL; + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + struct net_device *ndev = dhd_linux_get_primary_netdev(&dhd->pub); + char *iovar_buf = NULL; + wl_pwrstats_query_t *p_query = NULL; + wl_pwrstats_sysfs_t pwrstats_sysfs = {0, }; + wl_pwrstats_t *pwrstats; + uint len, taglen, i; + uint16 type; + uint64 ts_sec, ts_usec, time_delta; + + ASSERT(g_dhd_pub); + + len = PWRSTATS_IOV_BUF_LEN; + iovar_buf = (char *)MALLOCZ(g_dhd_pub->osh, len); + if (iovar_buf == NULL) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + goto done; + } + + /* Alloc req buffer */ + len = OFFSETOF(wl_pwrstats_query_t, type) + + PWRSTATS_REQ_TYPE_NUM * sizeof(uint16); + p_query = (wl_pwrstats_query_t *)MALLOCZ(g_dhd_pub->osh, len); + if (p_query == NULL) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + goto done; + } + + /* Build a list of types */ + p_query->length = PWRSTATS_REQ_TYPE_NUM; + for (i = 0; i < PWRSTATS_REQ_TYPE_NUM; i++) { + p_query->type[i] = pwrstats_req_type[i]; + } + + /* Query with desired type list */ + err = wldev_iovar_getbuf(ndev, "pwrstats", p_query, len, + iovar_buf, PWRSTATS_IOV_BUF_LEN, NULL); + if (err != BCME_OK) { + DHD_ERROR(("error (%d) - size = %zu\n", err, sizeof(wl_pwrstats_t))); + goto done; + } + + /* Check version */ + pwrstats = (wl_pwrstats_t *) iovar_buf; + if (dtoh16(pwrstats->version) != WL_PWRSTATS_VERSION) { + DHD_ERROR(("PWRSTATS Version mismatch\n")); + goto done; + } + + /* Parse TLVs */ + len = dtoh16(pwrstats->length) - WL_PWR_STATS_HDRLEN; + p_data = pwrstats->data; + do { + type = dtoh16(((uint16*)p_data)[0]); + taglen = dtoh16(((uint16*)p_data)[1]); + + if ((taglen < BCM_XTLV_HDR_SIZE) || (taglen > len)) { + DHD_ERROR(("Bad len %d for tag %d, remaining len %d\n", + taglen, type, len)); + goto done; + } + + if (taglen & 0xF000) { + DHD_ERROR(("Resrved bits in len %d for tag %d, remaining len %d\n", + taglen, type, len)); + goto done; + } + + switch (type) { + case WL_PWRSTATS_TYPE_PCIE: + { + wl_pwr_pcie_stats_t *stats = + (wl_pwr_pcie_stats_t *)p_data; + + if (taglen < sizeof(wl_pwr_pcie_stats_t)) { + DHD_ERROR(("Short len for %d: %d < %d\n", + type, taglen, (int)sizeof(wl_pwr_pcie_stats_t))); + goto done; + } + + if (dtoh32(stats->pcie.l0_cnt) == 0) { + DHD_ERROR(("link stats are not supported for this pcie core\n")); + } + + pwrstats_sysfs.l0_cnt = dtoh32(stats->pcie.l0_cnt); + pwrstats_sysfs.l0_dur_us = dtoh32(stats->pcie.l0_usecs); + pwrstats_sysfs.l1_cnt = dtoh32(stats->pcie.l1_cnt); + pwrstats_sysfs.l1_dur_us = dtoh32(stats->pcie.l1_usecs); + pwrstats_sysfs.l1_1_cnt = dtoh32(stats->pcie.l1_1_cnt); + pwrstats_sysfs.l1_1_dur_us = dtoh32(stats->pcie.l1_1_usecs); + pwrstats_sysfs.l1_2_cnt = dtoh32(stats->pcie.l1_2_cnt); + pwrstats_sysfs.l1_2_dur_us = dtoh32(stats->pcie.l1_2_usecs); + pwrstats_sysfs.l2_cnt = dtoh32(stats->pcie.l2_cnt); + pwrstats_sysfs.l2_dur_us = dtoh32(stats->pcie.l2_usecs); + } + break; + + case WL_PWRSTATS_TYPE_PM_ACCUMUL: + { + wl_pwr_pm_accum_stats_v1_t *stats = + (wl_pwr_pm_accum_stats_v1_t *)p_data; + + if (taglen < sizeof(wl_pwr_pm_accum_stats_v1_t)) { + DHD_ERROR(("Short len for %d: %d < %d\n", type, + taglen, (int)sizeof(wl_pwr_pm_accum_stats_v1_t))); + goto done; + } + + pwrstats_sysfs.current_ts = + dtoh64(stats->accum_data.current_ts); + pwrstats_sysfs.pm_cnt = + dtoh64(stats->accum_data.pm_cnt); + pwrstats_sysfs.pm_dur = + dtoh64(stats->accum_data.pm_dur); + pwrstats_sysfs.pm_last_entry_us = + dtoh64(stats->accum_data.pm_last_entry_us); + pwrstats_sysfs.awake_cnt = + dtoh64(stats->accum_data.awake_cnt); + pwrstats_sysfs.awake_dur = + dtoh64(stats->accum_data.awake_dur); + pwrstats_sysfs.awake_last_entry_us = + dtoh64(stats->accum_data.awake_last_entry_us); + } + break; + + default: + DHD_ERROR(("Skipping uknown %d-byte tag %d\n", taglen, type)); + break; + } + + /* Adjust length to account for padding, but don't exceed total len */ + taglen = (ROUNDUP(taglen, 4) > len) ? len : ROUNDUP(taglen, 4); + len -= taglen; + *(uint8**)&p_data += taglen; + } while (len >= BCM_XTLV_HDR_SIZE); + + OSL_GET_LOCALTIME(&ts_sec, &ts_usec); + time_delta = ts_sec * USEC_PER_SEC + ts_usec - pwrstats_sysfs.current_ts; + if ((time_delta > last_delta) && + ((time_delta - last_delta) > USEC_PER_SEC)) { + last_delta = time_delta; + } + + update_pwrstats_cum(&accumstats.awake_cnt, &laststats.awake_cnt, + &pwrstats_sysfs.awake_cnt, TRUE); + update_pwrstats_cum(&accumstats.awake_dur, &laststats.awake_dur, + &pwrstats_sysfs.awake_dur, FALSE); + update_pwrstats_cum(&accumstats.pm_cnt, &laststats.pm_cnt, &pwrstats_sysfs.pm_cnt, + TRUE); + update_pwrstats_cum(&accumstats.pm_dur, &laststats.pm_dur, &pwrstats_sysfs.pm_dur, + FALSE); + update_pwrstats_cum(NULL, &laststats.awake_last_entry_us, + &pwrstats_sysfs.awake_last_entry_us, TRUE); + update_pwrstats_cum(NULL, &laststats.pm_last_entry_us, + &pwrstats_sysfs.pm_last_entry_us, TRUE); + + ret += scnprintf(buf, PAGE_SIZE - 1, "AWAKE:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.awake_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.awake_dur); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_ts, + laststats.awake_last_entry_us); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "ASLEEP:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.pm_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.pm_dur); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_ts, + laststats.pm_last_entry_us); + + update_pwrstats_cum(&accumstats.l0_cnt, &laststats.l0_cnt, &pwrstats_sysfs.l0_cnt, + TRUE); + update_pwrstats_cum(&accumstats.l0_dur_us, &laststats.l0_dur_us, + &pwrstats_sysfs.l0_dur_us, TRUE); + update_pwrstats_cum(&accumstats.l1_cnt, &laststats.l1_cnt, &pwrstats_sysfs.l1_cnt, + TRUE); + update_pwrstats_cum(&accumstats.l1_dur_us, &laststats.l1_dur_us, + &pwrstats_sysfs.l1_dur_us, TRUE); + update_pwrstats_cum(&accumstats.l1_1_cnt, &laststats.l1_1_cnt, + &pwrstats_sysfs.l1_1_cnt, TRUE); + update_pwrstats_cum(&accumstats.l1_1_dur_us, &laststats.l1_1_dur_us, + &pwrstats_sysfs.l1_1_dur_us, TRUE); + update_pwrstats_cum(&accumstats.l1_2_cnt, &laststats.l1_2_cnt, + &pwrstats_sysfs.l1_2_cnt, TRUE); + update_pwrstats_cum(&accumstats.l1_2_dur_us, &laststats.l1_2_dur_us, + &pwrstats_sysfs.l1_2_dur_us, TRUE); + update_pwrstats_cum(&accumstats.l2_cnt, &laststats.l2_cnt, &pwrstats_sysfs.l2_cnt, + TRUE); + update_pwrstats_cum(&accumstats.l2_dur_us, &laststats.l2_dur_us, + &pwrstats_sysfs.l2_dur_us, TRUE); + + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L0:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.l0_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.l0_dur_us); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L1:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.l1_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.l1_dur_us); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L1_1:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.l1_1_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.l1_1_dur_us); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L1_2:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.l1_2_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.l1_2_dur_us); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L2:\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt, + accumstats.l2_cnt); + ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur, + accumstats.l2_dur_us); + +done: + if (p_query) { + MFREE(g_dhd_pub->osh, p_query, len); + } + if (iovar_buf) { + MFREE(g_dhd_pub->osh, iovar_buf, PWRSTATS_IOV_BUF_LEN); + } + + return ret; +} +#endif /* PWRSTATS_SYSFS */ + +/* + * Generic Attribute Structure for DHD. + * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have + * to instantiate an object of type dhd_attr, populate it with + * the required show/store functions (ex:- dhd_attr_cpumask_primary) + * and add the object to default_attrs[] array, that gets registered + * to the kobject of dhd (named bcm-dhd). + */ + +struct dhd_attr { + struct attribute attr; + ssize_t(*show)(struct dhd_info *, char *); + ssize_t(*store)(struct dhd_info *, const char *, size_t count); +}; + +#if defined(DHD_TRACE_WAKE_LOCK) +static struct dhd_attr dhd_attr_wklock = + __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); +#endif /* defined(DHD_TRACE_WAKE_LOCK */ + +#ifdef DHD_LOG_DUMP +static struct dhd_attr dhd_attr_logdump_periodic_flush = + __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush, + logdump_periodic_flush_onoff); +static struct dhd_attr dhd_attr_logdump_ecntr = + __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr, + logdump_ecntr_onoff); +#endif /* DHD_LOG_DUMP */ + +static struct dhd_attr dhd_attr_ecounters = + __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff); + +#if defined(DHD_QOS_ON_SOCK_FLOW) +static struct dhd_attr dhd_attr_sock_qos_onoff = + __ATTR(sock_qos_onoff, 0660, show_sock_qos_onoff, update_sock_qos_onoff); + +static struct dhd_attr dhd_attr_sock_qos_stats = + __ATTR(sock_qos_stats, 0660, show_sock_qos_stats, clear_sock_qos_stats); + +static struct dhd_attr dhd_attr_sock_qos_upgrade = + __ATTR(sock_qos_upgrade, 0660, show_sock_qos_upgrade, update_sock_qos_upgrade); + +static struct dhd_attr dhd_attr_sock_qos_numfl_upgrd_thresh = + __ATTR(sock_qos_numfl_upgrd_thresh, 0660, show_sock_qos_numfl_upgrd_thresh, + update_sock_qos_numfl_upgrd_thresh); + +static struct dhd_attr dhd_attr_sock_qos_avgpktsize_thresh = + __ATTR(sock_qos_avgpktsize_thresh, 0660, show_sock_qos_avgpktsize_thresh, + update_sock_qos_avgpktsize_thresh); + +static struct dhd_attr dhd_attr_sock_qos_numpkts_thresh = + __ATTR(sock_qos_numpkts_thresh, 0660, show_sock_qos_numpkts_thresh, + update_sock_qos_numpkts_thresh); + +static struct dhd_attr dhd_attr_sock_qos_detectcnt_thresh = + __ATTR(sock_qos_detectcnt_thresh, 0660, show_sock_qos_detectcnt_thresh, + update_sock_qos_detectcnt_thresh); + +static struct dhd_attr dhd_attr_sock_qos_detectcnt_upgrd_thresh = + __ATTR(sock_qos_detectcnt_upgrd_thresh, 0660, show_sock_qos_detectcnt_upgrd_thresh, + update_sock_qos_detectcnt_upgrd_thresh); + +static struct dhd_attr dhd_attr_sock_qos_maxfl = + __ATTR(sock_qos_maxfl, 0660, show_sock_qos_maxfl, + update_sock_qos_maxfl); +#if defined(DHD_QOS_ON_SOCK_FLOW_UT) +static struct dhd_attr dhd_attr_sock_qos_unit_test = + __ATTR(sock_qos_unit_test, 0660, NULL, do_sock_qos_unit_test); +#endif +#endif /* DHD_QOS_ON_SOCK_FLOW */ + +#ifdef DHD_SSSR_DUMP +static struct dhd_attr dhd_attr_sssr_enab = + __ATTR(sssr_enab, 0660, show_sssr_enab, set_sssr_enab); +static struct dhd_attr dhd_attr_fis_enab = + __ATTR(fis_enab, 0660, show_fis_enab, set_fis_enab); +#endif /* DHD_SSSR_DUMP */ + +static struct dhd_attr dhd_attr_firmware_path = + __ATTR(firmware_path, 0660, show_firmware_path, store_firmware_path); + +static struct dhd_attr dhd_attr_nvram_path = + __ATTR(nvram_path, 0660, show_nvram_path, store_nvram_path); + +#ifdef PWRSTATS_SYSFS +static struct dhd_attr dhd_attr_pwrstats_path = + __ATTR(power_stats, 0660, show_pwrstats_path, NULL); +#endif /* PWRSTATS_SYSFS */ + +#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) +#define to_attr(a) container_of(a, struct dhd_attr, attr) + +#ifdef DHD_MAC_ADDR_EXPORT +struct ether_addr sysfs_mac_addr; +static ssize_t +show_mac_addr(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, MACF, + (uint32)sysfs_mac_addr.octet[0], (uint32)sysfs_mac_addr.octet[1], + (uint32)sysfs_mac_addr.octet[2], (uint32)sysfs_mac_addr.octet[3], + (uint32)sysfs_mac_addr.octet[4], (uint32)sysfs_mac_addr.octet[5]); + + return ret; +} + +static ssize_t +set_mac_addr(struct dhd_info *dev, const char *buf, size_t count) +{ + if (!bcm_ether_atoe(buf, &sysfs_mac_addr)) { + DHD_ERROR(("Invalid Mac Address \n")); + return -EINVAL; + } + + DHD_ERROR(("Mac Address set with "MACDBG"\n", MAC2STRDBG(&sysfs_mac_addr))); + + return count; +} + +static struct dhd_attr dhd_attr_macaddr = + __ATTR(mac_addr, 0660, show_mac_addr, set_mac_addr); +#endif /* DHD_MAC_ADDR_EXPORT */ + +#ifdef DHD_FW_COREDUMP +/* + * XXX The filename to store memdump is defined for each platform. + * - The default path of CUSTOMER_HW4 device is "PLATFORM_PATH/.memdump.info" + * - Brix platform will take default path "/installmedia/.memdump.info" + * New platforms can add their ifdefs accordingly below. + */ + +#ifdef CONFIG_X86 +#if defined(OEM_ANDROID) +#define MEMDUMPINFO_LIVE PLATFORM_PATH".memdump.info" +#define MEMDUMPINFO_INST "/data/.memdump.info" +#define MEMDUMPINFO MEMDUMPINFO_LIVE +#else /* FC19 and Others */ +#define MEMDUMPINFO PLATFORM_PATH".memdump.info" +#endif /* OEM_ANDROID */ +#else /* For non x86 platforms */ +#define MEMDUMPINFO PLATFORM_PATH".memdump.info" +#endif /* CONFIG_X86 */ + +uint32 +get_mem_val_from_file(void) +{ + struct file *fp = NULL; + uint32 mem_val = DUMP_MEMFILE_MAX; + char *p_mem_val = NULL; + char *filepath = MEMDUMPINFO; + int ret = 0; + + /* Read memdump info from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); +#if defined(CONFIG_X86) && defined(OEM_ANDROID) + /* Check if it is Live Brix Image */ + if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) { + goto done; + } + /* Try if it is Installed Brix Image */ + filepath = MEMDUMPINFO_INST; + DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } +#else /* Non Brix Android platform */ + goto done; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + + /* Handle success case */ + ret = kernel_read_compat(fp, 0, (char *)&mem_val, sizeof(uint32)); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + p_mem_val = (char*)&mem_val; + p_mem_val[sizeof(uint32) - 1] = '\0'; + mem_val = bcm_atoi(p_mem_val); + + filp_close(fp, NULL); + +done: + return mem_val; +} + +void dhd_get_memdump_info(dhd_pub_t *dhd) +{ +#ifndef DHD_EXPORT_CNTL_FILE + uint32 mem_val = DUMP_MEMFILE_MAX; + + mem_val = get_mem_val_from_file(); + if (mem_val != DUMP_MEMFILE_MAX) + dhd->memdump_enabled = mem_val; +#ifdef DHD_INIT_DEFAULT_MEMDUMP + if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX) + mem_val = DUMP_MEMFILE_BUGON; +#endif /* DHD_INIT_DEFAULT_MEMDUMP */ +#else +#ifdef DHD_INIT_DEFAULT_MEMDUMP + if (dhd->memdump_enabled == 0 || dhd->memdump_enabled == DUMP_MEMFILE_MAX) + dhd->memdump_enabled = DUMP_MEMFILE; +#endif /* DHD_INIT_DEFAULT_MEMDUMP */ +#endif /* !DHD_EXPORT_CNTL_FILE */ +#ifdef BCMQT + /* In QT environment collecting memdump on FW TRAP, IOVAR timeouts, + * is taking more time and makes system unresponsive so disabling it. + * if needed memdump can be collected through 'dhd upload' command. + */ + dhd->memdump_enabled = DUMP_DISABLED; +#endif +#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG + /* override memdump_enabled value to avoid once trap issues */ + if (dhd_bus_get_fw_mode(dhd) == DHD_FLAG_MFG_MODE && + (dhd->memdump_enabled == DUMP_MEMONLY || + dhd->memdump_enabled == DUMP_MEMFILE_BUGON)) { + dhd->memdump_enabled = DUMP_MEMFILE; + DHD_ERROR(("%s : Override memdump_value to %d\n", + __FUNCTION__, dhd->memdump_enabled)); + } +#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */ + DHD_ERROR(("%s: MEMDUMP ENABLED = %u\n", __FUNCTION__, dhd->memdump_enabled)); +} + +#ifdef DHD_EXPORT_CNTL_FILE +static ssize_t +show_memdump_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_pub_t *dhdp; + + if (!dev) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + dhdp = &dev->pub; + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", dhdp->memdump_enabled); + return ret; +} + +static ssize_t +set_memdump_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long memval; + dhd_pub_t *dhdp; + + if (!dev) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + dhdp = &dev->pub; + + memval = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &memval); + + dhdp->memdump_enabled = (uint32)memval; + + DHD_ERROR(("%s: MEMDUMP ENABLED = %u\n", __FUNCTION__, dhdp->memdump_enabled)); + return count; +} + +static struct dhd_attr dhd_attr_memdump = + __ATTR(memdump, 0660, show_memdump_info, set_memdump_info); +#endif /* DHD_EXPORT_CNTL_FILE */ +#endif /* DHD_FW_COREDUMP */ + +#ifdef BCMASSERT_LOG +/* + * XXX The filename to store assert type is defined for each platform. + * New platforms can add their ifdefs accordingly below. + */ +#define ASSERTINFO PLATFORM_PATH".assert.info" + +int +get_assert_val_from_file(void) +{ + struct file *fp = NULL; + char *filepath = ASSERTINFO; + char *p_mem_val = NULL; + int mem_val = -1; + + /* + * Read assert info from the file + * 0: Trigger Kernel crash by panic() + * 1: Print out the logs and don't trigger Kernel panic. (default) + * 2: Trigger Kernel crash by BUG() + * File doesn't exist: Keep default value (1). + */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + } else { + int ret = kernel_read_compat(fp, 0, (char *)&mem_val, sizeof(uint32)); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + } else { + p_mem_val = (char *)&mem_val; + p_mem_val[sizeof(uint32) - 1] = '\0'; + mem_val = bcm_atoi(p_mem_val); + DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + } + filp_close(fp, NULL); + } + +#ifdef CUSTOMER_HW4_DEBUG + mem_val = (mem_val >= 0) ? mem_val : 1; +#else + mem_val = (mem_val >= 0) ? mem_val : 0; +#endif /* CUSTOMER_HW4_DEBUG */ + return mem_val; +} + +void dhd_get_assert_info(dhd_pub_t *dhd) +{ +#ifndef DHD_EXPORT_CNTL_FILE + int mem_val = -1; + + mem_val = get_assert_val_from_file(); + + g_assert_type = mem_val; +#endif /* !DHD_EXPORT_CNTL_FILE */ +} + +#ifdef DHD_EXPORT_CNTL_FILE +static ssize_t +show_assert_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (!dev) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + ret = scnprintf(buf, PAGE_SIZE -1, "%d\n", g_assert_type); + return ret; + +} + +static ssize_t +set_assert_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long assert_val; + + assert_val = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &assert_val); + + g_assert_type = (uint32)assert_val; + + DHD_ERROR(("%s: ASSERT ENABLED = %lu\n", __FUNCTION__, assert_val)); + return count; + +} + +static struct dhd_attr dhd_attr_assert = + __ATTR(assert, 0660, show_assert_info, set_assert_info); +#endif /* DHD_EXPORT_CNTL_FILE */ +#endif /* BCMASSERT_LOG */ + +#ifdef DHD_EXPORT_CNTL_FILE +#if defined(WRITE_WLANINFO) +static ssize_t +show_wifiver_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%s", version_info); + return ret; +} + +static ssize_t +set_wifiver_info(struct dhd_info *dev, const char *buf, size_t count) +{ + DHD_ERROR(("Do not set version info\n")); + return -EINVAL; +} + +static struct dhd_attr dhd_attr_wifiver = + __ATTR(wifiver, 0660, show_wifiver_info, set_wifiver_info); +#endif /* WRITE_WLANINFO */ + +#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG) +char cidinfostr[MAX_VNAME_LEN]; + +static ssize_t +show_cid_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + +#ifdef USE_DIRECT_VID_TAG + ret = scnprintf(buf, PAGE_SIZE -1, "%x%x", cidinfostr[VENDOR_OFF], cidinfostr[MD_REV_OFF]); +#endif /* USE_DIRECT_VID_TAG */ +#ifdef USE_CID_CHECK + ret = scnprintf(buf, PAGE_SIZE -1, "%s", cidinfostr); +#endif /* USE_CID_CHECK */ + return ret; +} + +static ssize_t +set_cid_info(struct dhd_info *dev, const char *buf, size_t count) +{ +#ifdef USE_DIRECT_VID_TAG + uint32 stored_vid = 0, md_rev = 0, vendor = 0; + uint32 vendor_mask = 0x00FF; + + stored_vid = bcm_strtoul(buf, NULL, 16); + + DHD_ERROR(("%s : stored_vid : 0x%x\n", __FUNCTION__, stored_vid)); + md_rev = stored_vid & vendor_mask; + vendor = stored_vid >> 8; + + memset(cidinfostr, 0, sizeof(cidinfostr)); + + cidinfostr[MD_REV_OFF] = (char)md_rev; + cidinfostr[VENDOR_OFF] = (char)vendor; + DHD_INFO(("CID string %x%x\n", cidinfostr[VENDOR_OFF], cidinfostr[MD_REV_OFF])); +#endif /* USE_DIRECT_VID_TAG */ +#ifdef USE_CID_CHECK + int len = strlen(buf) + 1; + int maxstrsz; + maxstrsz = MAX_VNAME_LEN; + + scnprintf(cidinfostr, ((len > maxstrsz) ? maxstrsz : len), "%s", buf); + DHD_INFO(("%s : CID info string\n", cidinfostr)); +#endif /* USE_CID_CHECK */ + return count; +} + +static struct dhd_attr dhd_attr_cidinfo = + __ATTR(cid, 0660, show_cid_info, set_cid_info); +#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */ + +#if defined(GEN_SOFTAP_INFO_FILE) +char softapinfostr[SOFTAP_INFO_BUF_SZ]; +static ssize_t +show_softap_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%s", softapinfostr); + return ret; +} + +static ssize_t +set_softap_info(struct dhd_info *dev, const char *buf, size_t count) +{ + DHD_ERROR(("Do not set sofap related info\n")); + return -EINVAL; +} + +static struct dhd_attr dhd_attr_softapinfo = + __ATTR(softap, 0660, show_softap_info, set_softap_info); +#endif /* GEN_SOFTAP_INFO_FILE */ + +#if defined(MIMO_ANT_SETTING) +unsigned long antsel; + +static ssize_t +show_ant_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", antsel); + return ret; +} + +static ssize_t +set_ant_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long ant_val; + + ant_val = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &ant_val); + + /* + * Check value + * 0 - Not set, handle same as file not exist + */ + if (ant_val > 3) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n", + __FUNCTION__, ant_val)); + return -EINVAL; + } + + antsel = ant_val; + DHD_ERROR(("[WIFI_SEC] %s: Set Antinfo val = %lu \n", __FUNCTION__, antsel)); + return count; +} + +static struct dhd_attr dhd_attr_antinfo = + __ATTR(ant, 0660, show_ant_info, set_ant_info); +#endif /* MIMO_ANT_SETTING */ + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern uint32 pmmode_val; +static ssize_t +show_pm_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (pmmode_val == 0xFF) { + ret = scnprintf(buf, PAGE_SIZE -1, "PM mode is not set\n"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", pmmode_val); + } + return ret; +} + +static ssize_t +set_pm_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long pm_val; + + pm_val = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &pm_val); + + if (pm_val > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n", + __FUNCTION__, pm_val)); + return -EINVAL; + } + + pmmode_val = (uint32)pm_val; + DHD_ERROR(("[WIFI_SEC] %s: Set pminfo val = %u\n", __FUNCTION__, pmmode_val)); + return count; +} + +static struct dhd_attr dhd_attr_pminfo = + __ATTR(pm, 0660, show_pm_info, set_pm_info); +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#ifdef LOGTRACE_FROM_FILE +unsigned long logtrace_val = 1; + +static ssize_t +show_logtrace_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", logtrace_val); + return ret; +} + +static ssize_t +set_logtrace_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + + onoff = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + logtrace_val = onoff; + DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from sysfs = %lu\n", + __FUNCTION__, logtrace_val)); + return count; +} + +static struct dhd_attr dhd_attr_logtraceinfo = + __ATTR(logtrace, 0660, show_logtrace_info, set_logtrace_info); +#endif /* LOGTRACE_FROM_FILE */ + +#ifdef USE_WFA_CERT_CONF +#ifdef BCMSDIO +uint32 bus_txglom = VALUENOTSET; + +static ssize_t +show_bustxglom(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (bus_txglom == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", "bustxglom not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", bus_txglom); + } + return ret; +} + +static ssize_t +set_bustxglom(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = (uint32)bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + bus_txglom = onoff; + DHD_ERROR(("[WIFI_SEC] %s: BUS TXGLOM On/Off from sysfs = %u\n", + __FUNCTION__, bus_txglom)); + return count; +} + +static struct dhd_attr dhd_attr_bustxglom = + __ATTR(bustxglom, 0660, show_bustxglom, set_bustxglom); +#endif /* BCMSDIO */ + +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +uint32 roam_off = VALUENOTSET; + +static ssize_t +show_roamoff(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (roam_off == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "roam_off not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", roam_off); + } + return ret; +} + +static ssize_t +set_roamoff(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + roam_off = onoff; + DHD_ERROR(("[WIFI_SEC] %s: ROAM On/Off from sysfs = %u\n", + __FUNCTION__, roam_off)); + return count; +} + +static struct dhd_attr dhd_attr_roamoff = + __ATTR(roamoff, 0660, show_roamoff, set_roamoff); +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ + +#ifdef USE_WL_FRAMEBURST +uint32 frameburst = VALUENOTSET; + +static ssize_t +show_frameburst(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (frameburst == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "frameburst not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", frameburst); + } + return ret; +} + +static ssize_t +set_frameburst(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + frameburst = onoff; + DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n", + __FUNCTION__, frameburst)); + return count; +} + +static struct dhd_attr dhd_attr_frameburst = + __ATTR(frameburst, 0660, show_frameburst, set_frameburst); +#endif /* USE_WL_FRAMEBURST */ + +#ifdef USE_WL_TXBF +uint32 txbf = VALUENOTSET; + +static ssize_t +show_txbf(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (txbf == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "txbf not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", txbf); + } + return ret; +} + +static ssize_t +set_txbf(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + txbf = onoff; + DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n", + __FUNCTION__, txbf)); + return count; +} + +static struct dhd_attr dhd_attr_txbf = + __ATTR(txbf, 0660, show_txbf, set_txbf); +#endif /* USE_WL_TXBF */ + +#ifdef PROP_TXSTATUS +uint32 proptx = VALUENOTSET; + +static ssize_t +show_proptx(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (proptx == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "proptx not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", proptx); + } + return ret; +} + +static ssize_t +set_proptx(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + proptx = onoff; + DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n", + __FUNCTION__, txbf)); + return count; +} + +static struct dhd_attr dhd_attr_proptx = + __ATTR(proptx, 0660, show_proptx, set_proptx); + +#endif /* PROP_TXSTATUS */ +#endif /* USE_WFA_CERT_CONF */ +#endif /* DHD_EXPORT_CNTL_FILE */ + +#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM) +#define BAD_AP_MAC_ADDR_ELEMENT_NUM 6 +wl_bad_ap_mngr_t *g_bad_ap_mngr = NULL; + +static ssize_t +show_adps_bam_list(struct dhd_info *dev, char *buf) +{ + int offset = 0; + ssize_t ret = 0; + + wl_bad_ap_info_t *bad_ap; + wl_bad_ap_info_entry_t *entry; + + if (g_bad_ap_mngr == NULL) + return ret; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(entry, &g_bad_ap_mngr->list, list) { + bad_ap = &entry->bad_ap; + + ret = scnprintf(buf + offset, PAGE_SIZE - 1, MACF"\n", + bad_ap->bssid.octet[0], bad_ap->bssid.octet[1], + bad_ap->bssid.octet[2], bad_ap->bssid.octet[3], + bad_ap->bssid.octet[4], bad_ap->bssid.octet[5]); + + offset += ret; + } + GCC_DIAGNOSTIC_POP(); + + return offset; +} + +static ssize_t +store_adps_bam_list(struct dhd_info *dev, const char *buf, size_t count) +{ + int ret; + size_t len; + int offset; + char tmp[128]; + wl_bad_ap_info_t bad_ap; + + if (g_bad_ap_mngr == NULL) + return count; + + len = count; + offset = 0; + do { + ret = sscanf(buf + offset, MACF"\n", + (uint32 *)&bad_ap.bssid.octet[0], (uint32 *)&bad_ap.bssid.octet[1], + (uint32 *)&bad_ap.bssid.octet[2], (uint32 *)&bad_ap.bssid.octet[3], + (uint32 *)&bad_ap.bssid.octet[4], (uint32 *)&bad_ap.bssid.octet[5]); + if (ret != BAD_AP_MAC_ADDR_ELEMENT_NUM) { + DHD_ERROR(("%s - fail to parse bad ap data\n", __FUNCTION__)); + return -EINVAL; + } + + ret = wl_bad_ap_mngr_add(g_bad_ap_mngr, &bad_ap); + if (ret < 0) + return ret; + + ret = snprintf(tmp, ARRAYSIZE(tmp), MACF"\n", + bad_ap.bssid.octet[0], bad_ap.bssid.octet[1], + bad_ap.bssid.octet[2], bad_ap.bssid.octet[3], + bad_ap.bssid.octet[4], bad_ap.bssid.octet[5]); + if (ret < 0) { + DHD_ERROR(("%s - fail to get bad ap data length(%d)\n", __FUNCTION__, ret)); + return ret; + } + + len -= ret; + offset += ret; + } while (len > 0); + + return count; +} + +static struct dhd_attr dhd_attr_adps_bam = + __ATTR(bad_ap_list, 0660, show_adps_bam_list, store_adps_bam_list); +#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */ + +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS +uint32 report_hang_privcmd_err = 1; + +static ssize_t +show_hang_privcmd_err(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%u\n", report_hang_privcmd_err); + return ret; +} + +static ssize_t +set_hang_privcmd_err(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + val = bcm_atoi(buf); + sscanf(buf, "%u", &val); + + report_hang_privcmd_err = val ? 1 : 0; + DHD_INFO(("%s: Set report HANG for private cmd error: %d\n", + __FUNCTION__, report_hang_privcmd_err)); + return count; +} + +static struct dhd_attr dhd_attr_hang_privcmd_err = + __ATTR(hang_privcmd_err, 0660, show_hang_privcmd_err, set_hang_privcmd_err); +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ + +#if defined(SHOW_LOGTRACE) +static ssize_t +show_control_logtrace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_logtrace); + return ret; +} + +static ssize_t +set_control_logtrace(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + val = bcm_atoi(buf); + + control_logtrace = val; + DHD_ERROR(("%s: Set control logtrace: %d\n", __FUNCTION__, control_logtrace)); + return count; +} + +static struct dhd_attr dhd_attr_control_logtrace = +__ATTR(control_logtrace, 0660, show_control_logtrace, set_control_logtrace); +#endif /* SHOW_LOGTRACE */ + +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +uint8 control_he_enab = 1; +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ + +#if defined(CUSTOM_CONTROL_HE_ENAB) +static ssize_t +show_control_he_enab(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_he_enab); + return ret; +} + +static ssize_t +set_control_he_enab(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + val = bcm_atoi(buf); + + control_he_enab = val ? 1 : 0; + DHD_ERROR(("%s: Set control he enab: %d\n", __FUNCTION__, control_he_enab)); + return count; +} + +static struct dhd_attr dhd_attr_control_he_enab= +__ATTR(control_he_enab, 0660, show_control_he_enab, set_control_he_enab); +#endif /* CUSTOM_CONTROL_HE_ENAB */ + +#if defined(WLAN_ACCEL_BOOT) +static ssize_t +show_wl_accel_force_reg_on(struct dhd_info *dhd, char *buf) +{ + ssize_t ret = 0; + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", dhd->wl_accel_force_reg_on); + return ret; +} + +static ssize_t +set_wl_accel_force_reg_on(struct dhd_info *dhd, const char *buf, size_t count) +{ + uint32 val; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + + val = bcm_atoi(buf); + + dhd->wl_accel_force_reg_on = val ? 1 : 0; + DHD_ERROR(("%s: wl_accel_force_reg_on: %d\n", __FUNCTION__, dhd->wl_accel_force_reg_on)); + return count; +} + +static struct dhd_attr dhd_attr_wl_accel_force_reg_on= +__ATTR(wl_accel_force_reg_on, 0660, show_wl_accel_force_reg_on, set_wl_accel_force_reg_on); +#endif /* WLAN_ACCEL_BOOT */ + +#if defined(AGG_H2D_DB) +extern bool agg_h2d_db_enab; +extern uint32 agg_h2d_db_timeout; +extern uint32 agg_h2d_db_inflight_thresh; + +static ssize_t +show_agg_h2d_db_enab(struct dhd_info *dhd, char *buf) +{ + ssize_t ret = 0; + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", agg_h2d_db_enab); + return ret; +} + +static ssize_t +set_agg_h2d_db_enab(struct dhd_info *dhd, const char *buf, size_t count) +{ + uint32 val; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + + val = bcm_atoi(buf); + + agg_h2d_db_enab = val ? TRUE : FALSE; + DHD_ERROR(("%s: agg_h2d_db_timeout: %d\n", __FUNCTION__, agg_h2d_db_enab)); + return count; +} + +static struct dhd_attr dhd_attr_agg_h2d_db_enab = +__ATTR(agg_h2d_db_enab, 0660, show_agg_h2d_db_enab, set_agg_h2d_db_enab); + +static ssize_t +show_agg_h2d_db_inflight_thresh(struct dhd_info *dhd, char *buf) +{ + ssize_t ret = 0; + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", agg_h2d_db_inflight_thresh); + return ret; +} + +static ssize_t +set_agg_h2d_db_inflight_thresh(struct dhd_info *dhd, const char *buf, size_t count) +{ + uint32 val; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + + val = bcm_atoi(buf); + + agg_h2d_db_inflight_thresh = val; + DHD_ERROR(("%s: agg_h2d_db_timeout: %d\n", __FUNCTION__, agg_h2d_db_inflight_thresh)); + return count; +} + +static struct dhd_attr dhd_attr_agg_h2d_db_inflight_thresh = +__ATTR(agg_h2d_db_inflight_thresh, 0660, show_agg_h2d_db_inflight_thresh, + set_agg_h2d_db_inflight_thresh); + +static ssize_t +show_agg_h2d_db_timeout(struct dhd_info *dhd, char *buf) +{ + ssize_t ret = 0; + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", agg_h2d_db_timeout); + return ret; +} + +static ssize_t +set_agg_h2d_db_timeout(struct dhd_info *dhd, const char *buf, size_t count) +{ + uint32 val; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + + val = bcm_atoi(buf); + + agg_h2d_db_timeout = val; + DHD_ERROR(("%s: agg_h2d_db_timeout: %d\n", __FUNCTION__, agg_h2d_db_timeout)); + return count; +} + +static struct dhd_attr dhd_attr_agg_h2d_db_timeout = +__ATTR(agg_h2d_db_timeout, 0660, show_agg_h2d_db_timeout, set_agg_h2d_db_timeout); +#endif /* WLAN_ACCEL_BOOT */ +/* + * Dumps the lock and other state information useful for debug + * + */ +static ssize_t +dhd_debug_dump_stateinfo(struct dhd_info *dhd, char *buf) +{ + u32 buf_size = PAGE_SIZE - 1; + u8 *ptr = buf; + ssize_t len = 0; + + len += scnprintf(ptr, buf_size, "[DHD]\nlock info:\n"); +#ifdef BT_OVER_SDIO + len += scnprintf((ptr+len), (buf_size-len), "bus_user_lock:\n", + mutex_is_locked(&dhd->bus_user_lock)); +#endif /* BT_OVER_SDIO */ + +#ifdef WL_CFG80211 + len += wl_cfg80211_debug_data_dump(dhd_linux_get_primary_netdev(&dhd->pub), + (ptr + len), (buf_size - len)); +#endif /* WL_CFG80211 */ + + /* Ensure buffer ends with null char */ + buf[len] = '\0'; + return len + 1; +} +static struct dhd_attr dhd_attr_dhd_debug_data = +__ATTR(dump_stateinfo, 0660, dhd_debug_dump_stateinfo, NULL); + +#ifdef WL_CFG80211 +#define _S(x) #x +#define S(x) _S(x) +#define SUBLOGLEVEL 20 +#define SUBLOGLEVELZ ((SUBLOGLEVEL) + (1)) +static const struct { + u32 log_level; + char *sublogname; +} sublogname_map[] = { + {WL_DBG_ERR, "ERR"}, + {WL_DBG_INFO, "INFO"}, + {WL_DBG_DBG, "DBG"}, + {WL_DBG_SCAN, "SCAN"}, + {WL_DBG_TRACE, "TRACE"}, + {WL_DBG_P2P_ACTION, "P2PACTION"} +}; + +/** +* Format : echo "SCAN:1 DBG:1" > /sys/wifi/wl_dbg_level +* to turn on SCAN and DBG log. +* To turn off SCAN partially, echo "SCAN:0" > /sys/wifi/wl_dbg_level +* To see current setting of debug level, +* cat /sys/wifi/wl_dbg_level +*/ +static ssize_t +show_wl_debug_level(struct dhd_info *dhd, char *buf) +{ + char *param; + char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)]; + uint i; + ssize_t ret = 0; + + bzero(tbuf, sizeof(tbuf)); + param = &tbuf[0]; + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ", + sublogname_map[i].sublogname, + (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0); + } + ret = scnprintf(buf, PAGE_SIZE - 1, "%s \n", tbuf); + return ret; +} + +static ssize_t +set_wl_debug_level(struct dhd_info *dhd, const char *buf, size_t count) +{ + char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)], sublog[SUBLOGLEVELZ]; + char *params, *token, *colon; + uint i, tokens, log_on = 0; + size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count); + + bzero(tbuf, sizeof(tbuf)); + bzero(sublog, sizeof(sublog)); + strlcpy(tbuf, buf, minsize); + + DHD_INFO(("current wl_dbg_level %d \n", wl_dbg_level)); + + tbuf[minsize] = '\0'; + params = &tbuf[0]; + colon = strchr(params, '\n'); + if (colon != NULL) + *colon = '\0'; + while ((token = strsep(¶ms, " ")) != NULL) { + bzero(sublog, sizeof(sublog)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + colon = strchr(token, ':'); + if (colon != NULL) { + *colon = ' '; + } + tokens = sscanf(token, "%"S(SUBLOGLEVEL)"s %u", sublog, &log_on); + if (colon != NULL) + *colon = ':'; + + if (tokens == 2) { + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + if (!strncmp(sublog, sublogname_map[i].sublogname, + strlen(sublogname_map[i].sublogname))) { + if (log_on) + wl_dbg_level |= + (sublogname_map[i].log_level); + else + wl_dbg_level &= + ~(sublogname_map[i].log_level); + } + } + } else + WL_ERR(("%s: can't parse '%s' as a " + "SUBMODULE:LEVEL (%d tokens)\n", + tbuf, token, tokens)); + + } + DHD_INFO(("changed wl_dbg_level %d \n", wl_dbg_level)); + return count; +} + +static struct dhd_attr dhd_attr_wl_dbg_level = +__ATTR(wl_dbg_level, 0660, show_wl_debug_level, set_wl_debug_level); +#endif /* WL_CFG80211 */ + +/* Attribute object that gets registered with "wifi" kobject tree */ +static struct attribute *default_file_attrs[] = { +#ifdef DHD_MAC_ADDR_EXPORT + &dhd_attr_macaddr.attr, +#endif /* DHD_MAC_ADDR_EXPORT */ +#ifdef DHD_EXPORT_CNTL_FILE +#ifdef DHD_FW_COREDUMP + &dhd_attr_memdump.attr, +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + &dhd_attr_assert.attr, +#endif /* BCMASSERT_LOG */ +#ifdef WRITE_WLANINFO + &dhd_attr_wifiver.attr, +#endif /* WRITE_WLANINFO */ +#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG) + &dhd_attr_cidinfo.attr, +#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */ +#ifdef GEN_SOFTAP_INFO_FILE + &dhd_attr_softapinfo.attr, +#endif /* GEN_SOFTAP_INFO_FILE */ +#ifdef MIMO_ANT_SETTING + &dhd_attr_antinfo.attr, +#endif /* MIMO_ANT_SETTING */ +#ifdef DHD_PM_CONTROL_FROM_FILE + &dhd_attr_pminfo.attr, +#endif /* DHD_PM_CONTROL_FROM_FILE */ +#ifdef LOGTRACE_FROM_FILE + &dhd_attr_logtraceinfo.attr, +#endif /* LOGTRACE_FROM_FILE */ +#ifdef USE_WFA_CERT_CONF +#ifdef BCMSDIO + &dhd_attr_bustxglom.attr, +#endif /* BCMSDIO */ + &dhd_attr_roamoff.attr, +#ifdef USE_WL_FRAMEBURST + &dhd_attr_frameburst.attr, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + &dhd_attr_txbf.attr, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + &dhd_attr_proptx.attr, +#endif /* PROP_TXSTATUS */ +#endif /* USE_WFA_CERT_CONF */ +#endif /* DHD_EXPORT_CNTL_FILE */ +#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM) + &dhd_attr_adps_bam.attr, +#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */ +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS + &dhd_attr_hang_privcmd_err.attr, +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ +#if defined(SHOW_LOGTRACE) + &dhd_attr_control_logtrace.attr, +#endif /* SHOW_LOGTRACE */ +#if defined(DHD_TRACE_WAKE_LOCK) + &dhd_attr_wklock.attr, +#endif +#ifdef DHD_LOG_DUMP + &dhd_attr_logdump_periodic_flush.attr, + &dhd_attr_logdump_ecntr.attr, +#endif + &dhd_attr_ecounters.attr, +#ifdef DHD_QOS_ON_SOCK_FLOW + &dhd_attr_sock_qos_onoff.attr, + &dhd_attr_sock_qos_stats.attr, + &dhd_attr_sock_qos_upgrade.attr, + &dhd_attr_sock_qos_numfl_upgrd_thresh.attr, + &dhd_attr_sock_qos_avgpktsize_thresh.attr, + &dhd_attr_sock_qos_numpkts_thresh.attr, + &dhd_attr_sock_qos_detectcnt_thresh.attr, + &dhd_attr_sock_qos_detectcnt_upgrd_thresh.attr, + &dhd_attr_sock_qos_maxfl.attr, +#ifdef DHD_QOS_ON_SOCK_FLOW_UT + &dhd_attr_sock_qos_unit_test.attr, +#endif /* DHD_QOS_ON_SOCK_FLOW_UT */ +#endif /* DHD_QOS_ON_SOCK_FLOW */ +#ifdef DHD_SSSR_DUMP + &dhd_attr_sssr_enab.attr, + &dhd_attr_fis_enab.attr, +#endif /* DHD_SSSR_DUMP */ + &dhd_attr_firmware_path.attr, + &dhd_attr_nvram_path.attr, +#if defined(CUSTOM_CONTROL_HE_ENAB) + &dhd_attr_control_he_enab.attr, +#endif /* CUSTOM_CONTROL_HE_ENAB */ +#if defined(WLAN_ACCEL_BOOT) + &dhd_attr_wl_accel_force_reg_on.attr, +#endif /* WLAN_ACCEL_BOOT */ +#ifdef PWRSTATS_SYSFS + &dhd_attr_pwrstats_path.attr, +#endif /* PWRSTATS_SYSFS */ +#if defined(WL_CFG80211) + &dhd_attr_wl_dbg_level.attr, +#endif /* WL_CFG80211 */ + &dhd_attr_dhd_debug_data.attr, +#if defined(AGG_H2D_DB) + &dhd_attr_agg_h2d_db_enab.attr, + &dhd_attr_agg_h2d_db_inflight_thresh.attr, + &dhd_attr_agg_h2d_db_timeout.attr, +#endif /* AGG_H2D_DB */ + NULL +}; + +/* + * wifi kobject show function, the "attr" attribute specifices to which + * node under "sys/wifi" the show function is called. + */ +static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + dhd_info_t *dhd; + struct dhd_attr *d_attr; + int ret; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = to_dhd(kobj); + d_attr = to_attr(attr); + GCC_DIAGNOSTIC_POP(); + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + +/* + * wifi kobject show function, the "attr" attribute specifices to which + * node under "sys/wifi" the store function is called. + */ +static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + dhd_info_t *dhd; + struct dhd_attr *d_attr; + int ret; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = to_dhd(kobj); + d_attr = to_attr(attr); + GCC_DIAGNOSTIC_POP(); + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_ops = { + .show = dhd_show, + .store = dhd_store, +}; + +static struct kobj_type dhd_ktype = { + .sysfs_ops = &dhd_sysfs_ops, + .default_attrs = default_file_attrs, +}; + +#ifdef CSI_SUPPORT +/* Function to show current ccode */ +static ssize_t read_csi_data(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) +{ + dhd_info_t *dhd = to_dhd(kobj); + int n = 0; + + n = dhd_csi_dump_list(&dhd->pub, buf); + DHD_INFO(("Dump data to file, size %d\n", n)); + dhd_csi_clean_list(&dhd->pub); + + return n; +} + +static struct bin_attribute dhd_attr_csi = { + .attr = { .name = "csi" BUS_TYPE, + .mode = 0660, }, + .size = MAX_CSI_FILESZ, + .read = read_csi_data, +}; +#endif /* CSI_SUPPORT */ + +/* + * sysfs for dhd_lb + */ +#ifdef DHD_LB +#if defined(DHD_LB_TXP) +static ssize_t +show_lbtxp(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = atomic_read(&dhd->lb_txp_active); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + int i; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + atomic_set(&dhd->lb_txp_active, onoff); + + /* Since the scheme is changed clear the counters */ + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + } + + return count; +} + +static struct dhd_attr dhd_attr_lbtxp = + __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff); +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +static ssize_t +show_lbrxp(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = atomic_read(&dhd->lb_rxp_active); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +lbrxp_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + atomic_set(&dhd->lb_rxp_active, onoff); + + return count; +} +static struct dhd_attr dhd_attr_lbrxp = + __ATTR(lbrxp, 0660, show_lbrxp, lbrxp_onoff); + +static ssize_t +get_lb_rxp_stop_thr(struct dhd_info *dev, char *buf) +{ + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + ssize_t ret = 0; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return -EINVAL; + } + dhdp = &dhd->pub; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%u \n", + (dhdp->lb_rxp_stop_thr / D2HRING_RXCMPLT_MAX_ITEM)); + return ret; +} + +#define ONE_GB (1024 * 1024 * 1024) + +static ssize_t +set_lb_rxp_stop_thr(struct dhd_info *dev, const char *buf, size_t count) +{ + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + uint32 lb_rxp_stop_thr; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return -EINVAL; + } + dhdp = &dhd->pub; + + lb_rxp_stop_thr = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%u", &lb_rxp_stop_thr); + + /* disable lb_rxp flow ctrl */ + if (lb_rxp_stop_thr == 0) { + dhdp->lb_rxp_stop_thr = 0; + dhdp->lb_rxp_strt_thr = 0; + atomic_set(&dhd->pub.lb_rxp_flow_ctrl, FALSE); + return count; + } + /* 1. by the time lb_rxp_stop_thr gets into picture, + * DHD RX path should not consume more than 1GB + * 2. lb_rxp_stop_thr should always be more than dhdp->lb_rxp_strt_thr + */ + if (((lb_rxp_stop_thr * + D2HRING_RXCMPLT_MAX_ITEM * + dhd_prot_get_rxbufpost_sz(dhdp)) > ONE_GB) || + (lb_rxp_stop_thr <= (dhdp->lb_rxp_strt_thr / D2HRING_RXCMPLT_MAX_ITEM))) { + return -EINVAL; + } + + dhdp->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * lb_rxp_stop_thr); + return count; +} + +static struct dhd_attr dhd_attr_lb_rxp_stop_thr = + __ATTR(lbrxp_stop_thr, 0660, get_lb_rxp_stop_thr, set_lb_rxp_stop_thr); + +static ssize_t +get_lb_rxp_strt_thr(struct dhd_info *dev, char *buf) +{ + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + ssize_t ret = 0; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return -EINVAL; + } + dhdp = &dhd->pub; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%u \n", + (dhdp->lb_rxp_strt_thr / D2HRING_RXCMPLT_MAX_ITEM)); + return ret; +} + +static ssize_t +set_lb_rxp_strt_thr(struct dhd_info *dev, const char *buf, size_t count) +{ + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + uint32 lb_rxp_strt_thr; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return -EINVAL; + } + dhdp = &dhd->pub; + + lb_rxp_strt_thr = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%u", &lb_rxp_strt_thr); + + /* disable lb_rxp flow ctrl */ + if (lb_rxp_strt_thr == 0) { + dhdp->lb_rxp_strt_thr = 0; + dhdp->lb_rxp_stop_thr = 0; + atomic_set(&dhd->pub.lb_rxp_flow_ctrl, FALSE); + return count; + } + /* should be less than dhdp->lb_rxp_stop_thr */ + if ((lb_rxp_strt_thr <= 0) || + (lb_rxp_strt_thr >= (dhdp->lb_rxp_stop_thr / D2HRING_RXCMPLT_MAX_ITEM))) { + return -EINVAL; + } + dhdp->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * lb_rxp_strt_thr); + return count; +} +static struct dhd_attr dhd_attr_lb_rxp_strt_thr = + __ATTR(lbrxp_strt_thr, 0660, get_lb_rxp_strt_thr, set_lb_rxp_strt_thr); + +#endif /* DHD_LB_RXP */ + +static ssize_t +show_candidacy_override(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, + "%d\n", (int)dev->dhd_lb_candidacy_override); + return ret; +} + +static ssize_t +set_candidacy_override(struct dhd_info *dev, const char *buf, size_t count) +{ + + int val = 0; + val = bcm_atoi(buf); + + if (val > 0) { + dev->dhd_lb_candidacy_override = TRUE; + } else { + dev->dhd_lb_candidacy_override = FALSE; + } + + DHD_ERROR(("set dhd_lb_candidacy_override %d\n", dev->dhd_lb_candidacy_override)); + return count; +} + +static struct dhd_attr dhd_candidacy_override = +__ATTR(candidacy_override, 0660, show_candidacy_override, set_candidacy_override); + +static ssize_t +show_primary_mask(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, + "%02lx\n", *cpumask_bits(dev->cpumask_primary)); + return ret; +} + +static ssize_t +set_primary_mask(struct dhd_info *dev, const char *buf, size_t count) +{ + int ret; + + cpumask_var_t primary_mask; + + if (!alloc_cpumask_var(&primary_mask, GFP_KERNEL)) { + DHD_ERROR(("Can't allocate cpumask vars\n")); + return count; + } + + cpumask_clear(primary_mask); + ret = cpumask_parse(buf, primary_mask); + if (ret < 0) { + DHD_ERROR(("Setting cpumask failed ret = %d\n", ret)); + return count; + } + + cpumask_clear(dev->cpumask_primary); + cpumask_or(dev->cpumask_primary, dev->cpumask_primary, primary_mask); + + DHD_ERROR(("set cpumask results cpumask_primary 0x%2lx\n", + *cpumask_bits(dev->cpumask_primary))); + + dhd_select_cpu_candidacy(dev); + return count; +} + +static struct dhd_attr dhd_primary_mask = +__ATTR(primary_mask, 0660, show_primary_mask, set_primary_mask); + +static ssize_t +show_secondary_mask(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, + "%02lx\n", *cpumask_bits(dev->cpumask_secondary)); + return ret; +} + +static ssize_t +set_secondary_mask(struct dhd_info *dev, const char *buf, size_t count) +{ + int ret; + + cpumask_var_t secondary_mask; + + if (!alloc_cpumask_var(&secondary_mask, GFP_KERNEL)) { + DHD_ERROR(("Can't allocate cpumask vars\n")); + return count; + } + + cpumask_clear(secondary_mask); + + ret = cpumask_parse(buf, secondary_mask); + + if (ret < 0) { + DHD_ERROR(("Setting cpumask failed ret = %d\n", ret)); + return count; + } + + cpumask_clear(dev->cpumask_secondary); + cpumask_or(dev->cpumask_secondary, dev->cpumask_secondary, secondary_mask); + + DHD_ERROR(("set cpumask results cpumask_secondary 0x%2lx\n", + *cpumask_bits(dev->cpumask_secondary))); + + dhd_select_cpu_candidacy(dev); + + return count; +} + +static struct dhd_attr dhd_secondary_mask = +__ATTR(secondary_mask, 0660, show_secondary_mask, set_secondary_mask); + +static ssize_t +show_rx_cpu(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", atomic_read(&dev->rx_napi_cpu)); + return ret; +} + +static ssize_t +set_rx_cpu(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + if (!dev->dhd_lb_candidacy_override) { + DHD_ERROR(("dhd_lb_candidacy_override is required %d\n", + dev->dhd_lb_candidacy_override)); + return count; + } + + val = (uint32)bcm_atoi(buf); + if (val >= nr_cpu_ids) + { + DHD_ERROR(("%s : can't set the value out of number of cpus, val = %u\n", + __FUNCTION__, val)); + } + + atomic_set(&dev->rx_napi_cpu, val); + DHD_ERROR(("%s: rx_napi_cpu = %d\n", __FUNCTION__, atomic_read(&dev->rx_napi_cpu))); + return count; +} + +static struct dhd_attr dhd_rx_cpu = +__ATTR(rx_cpu, 0660, show_rx_cpu, set_rx_cpu); + +static ssize_t +show_tx_cpu(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", atomic_read(&dev->tx_cpu)); + return ret; +} + +static ssize_t +set_tx_cpu(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + if (!dev->dhd_lb_candidacy_override) { + DHD_ERROR(("dhd_lb_candidacy_override is required %d\n", + dev->dhd_lb_candidacy_override)); + return count; + } + + val = (uint32)bcm_atoi(buf); + if (val >= nr_cpu_ids) + { + DHD_ERROR(("%s : can't set the value out of number of cpus, val = %u\n", + __FUNCTION__, val)); + return count; + } + + atomic_set(&dev->tx_cpu, val); + DHD_ERROR(("%s: tx_cpu = %d\n", __FUNCTION__, atomic_read(&dev->tx_cpu))); + return count; +} + +static struct dhd_attr dhd_tx_cpu = +__ATTR(tx_cpu, 0660, show_tx_cpu, set_tx_cpu); + +static struct attribute *debug_lb_attrs[] = { +#if defined(DHD_LB_TXP) + &dhd_attr_lbtxp.attr, +#endif /* DHD_LB_TXP */ +#if defined(DHD_LB_RXP) + &dhd_attr_lbrxp.attr, + &dhd_attr_lb_rxp_stop_thr.attr, + &dhd_attr_lb_rxp_strt_thr.attr, +#endif /* DHD_LB_RXP */ + &dhd_candidacy_override.attr, + &dhd_primary_mask.attr, + &dhd_secondary_mask.attr, + &dhd_rx_cpu.attr, + &dhd_tx_cpu.attr, + NULL +}; + +#define to_dhd_lb(k) container_of(k, struct dhd_info, dhd_lb_kobj) + +/* + * wifi/lb kobject show function, the "attr" attribute specifices to which + * node under "sys/wifi/lb" the show function is called. + */ +static ssize_t dhd_lb_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + dhd_info_t *dhd; + struct dhd_attr *d_attr; + int ret; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = to_dhd_lb(kobj); + d_attr = to_attr(attr); + GCC_DIAGNOSTIC_POP(); + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + +/* + * wifi kobject show function, the "attr" attribute specifices to which + * node under "sys/wifi/lb" the store function is called. + */ +static ssize_t dhd_lb_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + dhd_info_t *dhd; + struct dhd_attr *d_attr; + int ret; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = to_dhd_lb(kobj); + d_attr = to_attr(attr); + GCC_DIAGNOSTIC_POP(); + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_lb_ops = { + .show = dhd_lb_show, + .store = dhd_lb_store, +}; + +static struct kobj_type dhd_lb_ktype = { + .sysfs_ops = &dhd_sysfs_lb_ops, + .default_attrs = debug_lb_attrs, +}; +#endif /* DHD_LB */ + +/* Create a kobject and attach to sysfs interface */ +int dhd_sysfs_init(dhd_info_t *dhd) +{ + int ret = -1; + + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return ret; + } + + /* Initialize the kobject */ + ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "wifi" BUS_TYPE); + if (ret) { + kobject_put(&dhd->dhd_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + +#ifdef CSI_SUPPORT + ret = sysfs_create_bin_file(&dhd->dhd_kobj, &dhd_attr_csi); + if (ret) { + DHD_ERROR(("%s: can't create %s\n", __FUNCTION__, dhd_attr_csi.attr.name)); + kobject_put(&dhd->dhd_kobj); + return ret; + } +#endif /* CSI_SUPPORT */ + + /* + * We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); + +#ifdef DHD_LB + ret = kobject_init_and_add(&dhd->dhd_lb_kobj, + &dhd_lb_ktype, &dhd->dhd_kobj, "lb"); + if (ret) { + kobject_put(&dhd->dhd_lb_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + + kobject_uevent(&dhd->dhd_lb_kobj, KOBJ_ADD); +#endif /* DHD_LB */ + + return ret; +} + +/* Done with the kobject and detach the sysfs interface */ +void dhd_sysfs_exit(dhd_info_t *dhd) +{ + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return; + } + +#ifdef DHD_LB + kobject_put(&dhd->dhd_lb_kobj); +#endif /* DHD_LB */ + + /* Releae the kobject */ + if (dhd->dhd_kobj.state_initialized) + kobject_put(&dhd->dhd_kobj); +} + +#ifdef DHD_SUPPORT_HDM +static ssize_t +hdm_load_module(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + int val = bcm_atoi(buf); + + if (val == 1) { + DHD_ERROR(("%s : Load module from the hdm %d\n", __FUNCTION__, val)); + dhd_module_init_hdm(); + } else { + DHD_ERROR(("Module load triggered with invalid value : %d\n", val)); + } + + return count; +} + +static struct kobj_attribute hdm_wlan_attr = + __ATTR(hdm_wlan_loader, 0660, NULL, hdm_load_module); + +void +dhd_hdm_wlan_sysfs_init() +{ + DHD_ERROR(("export hdm_wlan_loader\n")); + if (sysfs_create_file(kernel_kobj, &hdm_wlan_attr.attr)) { + DHD_ERROR(("export hdm_load failed\n")); + } +} + +void +dhd_hdm_wlan_sysfs_deinit(struct work_struct *work) +{ + sysfs_remove_file(kernel_kobj, &hdm_wlan_attr.attr); + +} +#endif /* DHD_SUPPORT_HDM */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_lb.c b/bcmdhd.101.10.361.x/dhd_linux_lb.c new file mode 100755 index 0000000..69b3081 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_lb.c @@ -0,0 +1,1402 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include + +extern dhd_pub_t* g_dhd_pub; + +#if defined(DHD_LB) + +#ifdef DHD_LB_STATS +#define DHD_NUM_NAPI_LATENCY_ROWS (17u) +#define DHD_NAPI_LATENCY_SIZE (sizeof(uint64) * DHD_NUM_NAPI_LATENCY_ROWS) +#endif /* DHD_LB_STATS */ + +#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE +#define DHD_LB_INFO DHD_TRACE +#else +#define DHD_LB_INFO DHD_INFO +#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */ + +void +dhd_lb_set_default_cpus(dhd_info_t *dhd) +{ + /* Default CPU allocation for the jobs */ + atomic_set(&dhd->rx_napi_cpu, 1); + atomic_set(&dhd->tx_cpu, 2); + atomic_set(&dhd->net_tx_cpu, 0); + atomic_set(&dhd->dpc_cpu, 0); +} + +void +dhd_cpumasks_deinit(dhd_info_t *dhd) +{ + free_cpumask_var(dhd->cpumask_curr_avail); + free_cpumask_var(dhd->cpumask_primary); + free_cpumask_var(dhd->cpumask_primary_new); + free_cpumask_var(dhd->cpumask_secondary); + free_cpumask_var(dhd->cpumask_secondary_new); +} + +int +dhd_cpumasks_init(dhd_info_t *dhd) +{ + int id; + uint32 cpus, num_cpus = num_possible_cpus(); + int ret = 0; + + DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__, + DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS)); + + /* FIXME: If one alloc fails we must free_cpumask_var the previous */ + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); + cpumask_clear(dhd->cpumask_primary); + cpumask_clear(dhd->cpumask_secondary); + + if (num_cpus > 32) { + DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus)); + ASSERT(0); + } + + cpus = DHD_LB_PRIMARY_CPUS; + for (id = 0; id < num_cpus; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_primary); + } + + cpus = DHD_LB_SECONDARY_CPUS; + for (id = 0; id < num_cpus; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_secondary); + } + + return ret; +fail: + dhd_cpumasks_deinit(dhd); + return ret; +} + +/* + * The CPU Candidacy Algorithm + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The available CPUs for selection are divided into two groups + * Primary Set - A CPU mask that carries the First Choice CPUs + * Secondary Set - A CPU mask that carries the Second Choice CPUs. + * + * There are two types of Job, that needs to be assigned to + * the CPUs, from one of the above mentioned CPU group. The Jobs are + * 1) Rx Packet Processing - napi_cpu + * + * To begin with napi_cpu is on CPU0. Whenever a CPU goes + * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy + * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. + * + */ +void dhd_select_cpu_candidacy(dhd_info_t *dhd) +{ + uint32 primary_available_cpus; /* count of primary available cpus */ + uint32 secondary_available_cpus; /* count of secondary available cpus */ + uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ + uint32 tx_cpu = 0; /* cpu selected for tx processing job */ + uint32 dpc_cpu = atomic_read(&dhd->dpc_cpu); + uint32 net_tx_cpu = atomic_read(&dhd->net_tx_cpu); + + cpumask_clear(dhd->cpumask_primary_new); + cpumask_clear(dhd->cpumask_secondary_new); + + /* + * Now select from the primary mask. Even if a Job is + * already running on a CPU in secondary group, we still move + * to primary CPU. So no conditional checks. + */ + cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, + dhd->cpumask_curr_avail); + + cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, + dhd->cpumask_curr_avail); + + /* Clear DPC cpu from new masks so that dpc cpu is not chosen for LB */ + cpumask_clear_cpu(dpc_cpu, dhd->cpumask_primary_new); + cpumask_clear_cpu(dpc_cpu, dhd->cpumask_secondary_new); + + /* Clear net_tx_cpu from new masks so that same is not chosen for LB */ + cpumask_clear_cpu(net_tx_cpu, dhd->cpumask_primary_new); + cpumask_clear_cpu(net_tx_cpu, dhd->cpumask_secondary_new); + + primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); + +#if defined(DHD_LB_HOST_CTRL) + /* Does not use promary cpus if DHD received affinity off cmd + * from framework + */ + if (primary_available_cpus > 0 && dhd->permitted_primary_cpu) +#else + if (primary_available_cpus > 0) +#endif /* DHD_LB_HOST_CTRL */ + { + napi_cpu = cpumask_first(dhd->cpumask_primary_new); + + /* If no further CPU is available, + * cpumask_next returns >= nr_cpu_ids + */ + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + } + + DHD_INFO(("%s After primary CPU check napi_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, tx_cpu)); + + /* -- Now check for the CPUs from the secondary mask -- */ + secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); + + DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", + __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); + + if (secondary_available_cpus > 0) { + /* At this point if napi_cpu is unassigned it means no CPU + * is online from Primary Group + */ +#if defined(DHD_LB_TXP_LITTLE_CORE_CTRL) + /* Clear tx_cpu, so that it can be picked from little core */ + tx_cpu = 0; +#endif /* DHD_LB_TXP_LITTLE_CORE_CTRL */ + if (napi_cpu == 0) { + napi_cpu = cpumask_first(dhd->cpumask_secondary_new); + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + } else if (tx_cpu == 0) { + tx_cpu = cpumask_first(dhd->cpumask_secondary_new); + } + + /* If no CPU was available for tx processing, choose CPU 0 */ + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + } + + if ((primary_available_cpus == 0) && + (secondary_available_cpus == 0)) { + /* No CPUs available from primary or secondary mask */ + napi_cpu = 1; + tx_cpu = 2; + } + + DHD_INFO(("%s After secondary CPU check napi_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, tx_cpu)); + + ASSERT(napi_cpu < nr_cpu_ids); + ASSERT(tx_cpu < nr_cpu_ids); + + atomic_set(&dhd->rx_napi_cpu, napi_cpu); + atomic_set(&dhd->tx_cpu, tx_cpu); + + return; +} + +/* + * Function to handle CPU Hotplug notifications. + * One of the task it does is to trigger the CPU Candidacy algorithm + * for load balancing. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + +int dhd_cpu_startup_callback(unsigned int cpu) +{ + dhd_info_t *dhd = g_dhd_pub->info; + + DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + + return 0; +} + +int dhd_cpu_teardown_callback(unsigned int cpu) +{ + dhd_info_t *dhd = g_dhd_pub->info; + + DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + + return 0; +} +#else +int +dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned long int cpu = (unsigned long int)hcpu; + dhd_info_t *dhd; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(nfb, dhd_info_t, cpu_notifier); + GCC_DIAGNOSTIC_POP(); + + if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) { + DHD_INFO(("%s(): LB data is not initialized yet.\n", + __FUNCTION__)); + return NOTIFY_BAD; + } + + /* XXX: Do we need other action types ? */ + switch (action) + { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + default: + break; + } + + return NOTIFY_OK; +} +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + +int dhd_register_cpuhp_callback(dhd_info_t *dhd) +{ + int cpuhp_ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd", + dhd_cpu_startup_callback, dhd_cpu_teardown_callback); + + if (cpuhp_ret < 0) { + DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n", + __FUNCTION__, cpuhp_ret)); + } +#else + /* + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ + dhd->cpu_notifier.notifier_call = dhd_cpu_callback; + register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */ +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + return cpuhp_ret; +} + +int dhd_unregister_cpuhp_callback(dhd_info_t *dhd) +{ + int ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + /* Don't want to call tear down while unregistering */ + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); +#else + if (dhd->cpu_notifier.notifier_call != NULL) { + unregister_cpu_notifier(&dhd->cpu_notifier); + } +#endif + return ret; +} + +#if defined(DHD_LB_STATS) +void dhd_lb_stats_reset(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j, num_cpus = num_possible_cpus(); + + if (dhdp == NULL) { + DHD_ERROR(("%s dhd pub pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + + /* reset NAPI latency stats */ + if (dhd->napi_latency) { + bzero(dhd->napi_latency, DHD_NAPI_LATENCY_SIZE); + } + /* reset NAPI per cpu stats */ + if (dhd->napi_percpu_run_cnt) { + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + } + } + + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + + if (dhd->rxc_percpu_run_cnt) { + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + } + } + + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + if (dhd->txc_percpu_run_cnt) { + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + } + } + + if (dhd->txp_percpu_run_cnt) { + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + } + } + + if (dhd->tx_start_percpu_run_cnt) { + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + } + } + + for (j = 0; j < HIST_BIN_SIZE; j++) { + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); + } + } + + dhd->pub.lb_rxp_strt_thr_hitcnt = 0; + dhd->pub.lb_rxp_stop_thr_hitcnt = 0; + + dhd->pub.lb_rxp_napi_sched_cnt = 0; + dhd->pub.lb_rxp_napi_complete_cnt = 0; + return; +} + +void dhd_lb_stats_init(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + + /* NAPI latency stats */ + dhd->napi_latency = (uint64 *)MALLOCZ(dhdp->osh, DHD_NAPI_LATENCY_SIZE); + /* NAPI per cpu stats */ + dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_percpu_run_cnt) { + DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + + dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_percpu_run_cnt) { + DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_percpu_run_cnt) { + DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + + dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_online_cnt) { + DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + + dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_offline_cnt) { + DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); + + dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txp_percpu_run_cnt) { + DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + + dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->tx_start_percpu_run_cnt) { + DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_rx_hist[j]) { + DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); + } + } + + dhd->pub.lb_rxp_strt_thr_hitcnt = 0; + dhd->pub.lb_rxp_stop_thr_hitcnt = 0; + + dhd->pub.lb_rxp_napi_sched_cnt = 0; + dhd->pub.lb_rxp_napi_complete_cnt = 0; + return; +} + +void dhd_lb_stats_deinit(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + if (dhd->napi_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size); + } + if (dhd->rxc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size); + } + if (dhd->txc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size); + } + if (dhd->cpu_online_cnt) { + MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size); + } + if (dhd->cpu_offline_cnt) { + MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size); + } + + if (dhd->txp_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size); + } + if (dhd->tx_start_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size); + } + if (dhd->napi_latency) { + MFREE(dhdp->osh, dhd->napi_latency, DHD_NAPI_LATENCY_SIZE); + } + + for (j = 0; j < HIST_BIN_SIZE; j++) { + if (dhd->napi_rx_hist[j]) { + MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size); + } + } + + return; +} + +void dhd_lb_stats_dump_napi_latency(dhd_pub_t *dhdp, + struct bcmstrbuf *strbuf, uint64 *napi_latency) +{ + uint32 i; + + bcm_bprintf(strbuf, "napi-latency(us): \t count\n"); + for (i = 0; i < DHD_NUM_NAPI_LATENCY_ROWS; i++) { + bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<osh, sizeof(uint32) * num_cpus); + if (!per_cpu_total) { + DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__)); + return; + } + bzero(per_cpu_total, sizeof(uint32) * num_cpus); + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\nBin\n"); + + for (i = 0; i < HIST_BIN_SIZE; i++) { + bcm_bprintf(strbuf, "%d:\t\t", 1<osh, per_cpu_total, sizeof(uint32) * num_cpus); + } + return; +} + +void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) +{ + int i, num_cpus = num_possible_cpus(); + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\n"); + + bcm_bprintf(strbuf, "Val: \t\t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%u\t", *(p+i)); + bcm_bprintf(strbuf, "\n"); + return; +} + +#ifdef DHD_MEM_STATS +uint64 dhd_lb_mem_usage(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + uint16 rxbufpost_sz; + uint16 rx_post_active = 0; + uint16 rx_cmpl_active = 0; + uint64 rx_path_memory_usage = 0; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return 0; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return 0; + } + rxbufpost_sz = dhd_prot_get_rxbufpost_sz(dhdp); + if (rxbufpost_sz == 0) { + rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } + rx_path_memory_usage = rxbufpost_sz * (skb_queue_len(&dhd->rx_pend_queue) + + skb_queue_len(&dhd->rx_napi_queue) + + skb_queue_len(&dhd->rx_process_queue)); + rx_post_active = dhd_prot_get_h2d_rx_post_active(dhdp); + if (rx_post_active != 0) { + rx_path_memory_usage += (rxbufpost_sz * rx_post_active); + } + + rx_cmpl_active = dhd_prot_get_d2h_rx_cpln_active(dhdp); + if (rx_cmpl_active != 0) { + rx_path_memory_usage += (rxbufpost_sz * rx_cmpl_active); + } + + dhdp->rxpath_mem = rx_path_memory_usage; + bcm_bprintf(strbuf, "\nrxbufpost_sz: %d rx_post_active: %d rx_cmpl_active: %d " + "pend_queue_len: %d napi_queue_len: %d process_queue_len: %d\n", + rxbufpost_sz, rx_post_active, rx_cmpl_active, + skb_queue_len(&dhd->rx_pend_queue), + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_process_queue)); + bcm_bprintf(strbuf, "DHD rx-path memory_usage: %llubytes %lluKB \n", + rx_path_memory_usage, (rx_path_memory_usage/ 1024)); + return rx_path_memory_usage; +} +#endif /* DHD_MEM_STATS */ + +void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + + bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + + bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", + dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, + dhd->txc_sched_cnt); + + bcm_bprintf(strbuf, "\nCPUs: dpc_cpu %u napi_cpu %u net_tx_cpu %u tx_cpu %u\n", + atomic_read(&dhd->dpc_cpu), + atomic_read(&dhd->rx_napi_cpu), + atomic_read(&dhd->net_tx_cpu), + atomic_read(&dhd->tx_cpu)); + +#ifdef DHD_LB_RXP + bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); + bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist); + bcm_bprintf(strbuf, "\nNAPI poll latency stats ie from napi schedule to napi execution\n"); + dhd_lb_stats_dump_napi_latency(dhdp, strbuf, dhd->napi_latency); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_TXP + bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt); + + bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt); +#endif /* DHD_LB_TXP */ +} + +void dhd_lb_stats_update_napi_latency(uint64 *bin, uint32 latency) +{ + uint64 *p; + uint32 bin_power; + bin_power = next_larger_power2(latency); + + switch (bin_power) { + case 1: p = bin + 0; break; + case 2: p = bin + 1; break; + case 4: p = bin + 2; break; + case 8: p = bin + 3; break; + case 16: p = bin + 4; break; + case 32: p = bin + 5; break; + case 64: p = bin + 6; break; + case 128: p = bin + 7; break; + case 256: p = bin + 8; break; + case 512: p = bin + 9; break; + case 1024: p = bin + 10; break; + case 2048: p = bin + 11; break; + case 4096: p = bin + 12; break; + case 8192: p = bin + 13; break; + case 16384: p = bin + 14; break; + case 32768: p = bin + 15; break; + default : p = bin + 16; break; + } + ASSERT((p - bin) < DHD_NUM_NAPI_LATENCY_ROWS); + *p = *p + 1; + return; + +} + +void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) +{ + uint32 bin_power; + uint32 *p; + bin_power = next_larger_power2(count); + + switch (bin_power) { + case 1: p = bin[0] + cpu; break; + case 2: p = bin[1] + cpu; break; + case 4: p = bin[2] + cpu; break; + case 8: p = bin[3] + cpu; break; + case 16: p = bin[4] + cpu; break; + case 32: p = bin[5] + cpu; break; + case 64: p = bin[6] + cpu; break; + case 128: p = bin[7] + cpu; break; + default : p = bin[8] + cpu; break; + } + + *p = *p + 1; + return; +} + +void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu); + + return; +} + +void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu); + + return; +} + +void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu); + + return; +} + +void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); +} + +void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); +} +#endif /* DHD_LB_STATS */ + +/** + * dhd_tasklet_schedule - Function that runs in IPI context of the destination + * CPU and schedules a tasklet. + * @tasklet: opaque pointer to the tasklet + */ +INLINE void +dhd_tasklet_schedule(void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); +} + +/** + * dhd_work_schedule_on - Executes the passed work in a given CPU + * @work: work to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * schedule_work_on and the work function + * will be invoked to schedule the specified work on the requested CPU. + */ + +INLINE void +dhd_work_schedule_on(struct work_struct *work, int on_cpu) +{ + schedule_work_on(on_cpu, work); +} + +INLINE void +dhd_delayed_work_schedule_on(struct delayed_work *dwork, int on_cpu, ulong delay) +{ + schedule_delayed_work_on(on_cpu, dwork, delay); +} + +#if defined(DHD_LB_TXP) +void dhd_tx_dispatcher_work(struct work_struct * work) +{ + struct dhd_info *dhd; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(work, struct dhd_info, tx_dispatcher_work); + GCC_DIAGNOSTIC_POP(); + + dhd_tasklet_schedule(&dhd->tx_tasklet); +} + +/** + * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet + * on another cpu. The tx_tasklet will take care of actually putting + * the skbs into appropriate flow ring and ringing H2D interrupt + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_tx_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int tx_cpu; + int prev_net_tx_cpu; + + /* + * Get cpu will disable pre-ermption and will not allow any cpu to go offline + * and call put_cpu() only after scheduling rx_napi_dispatcher_work. + */ + curr_cpu = get_cpu(); + + /* Record the CPU in which the TX request from Network stack came */ + prev_net_tx_cpu = atomic_read(&dhd->net_tx_cpu); + atomic_set(&dhd->net_tx_cpu, curr_cpu); + + tx_cpu = atomic_read(&dhd->tx_cpu); + + /* + * Avoid cpu candidacy, if override is set via sysfs for changing cpu mannually + */ + if (dhd->dhd_lb_candidacy_override) { + if (!cpu_online(tx_cpu)) { + tx_cpu = curr_cpu; + } + } else { + /* + * Now if the NET TX has scheduled in the same CPU + * that is chosen for Tx processing + * OR scheduled on different cpu than previously it was scheduled, + * OR if tx_cpu is offline, + * Call cpu candidacy algorithm to recompute tx_cpu. + */ + if ((curr_cpu == tx_cpu) || (curr_cpu != prev_net_tx_cpu) || + !cpu_online(tx_cpu)) { + /* Re compute LB CPUs */ + dhd_select_cpu_candidacy(dhd); + /* Use updated tx cpu */ + tx_cpu = atomic_read(&dhd->tx_cpu); + } + } + /* + * Schedule tx_dispatcher_work to on the cpu which + * in turn will schedule tx_tasklet. + */ + dhd_work_schedule_on(&dhd->tx_dispatcher_work, tx_cpu); + + put_cpu(); +} +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) + +/** + * dhd_napi_poll - Load balance napi poll function to process received + * packets and send up the network stack using netif_receive_skb() + * + * @napi: napi object in which context this poll function is invoked + * @budget: number of packets to be processed. + * + * Fetch the dhd_info given the rx_napi_struct. Move all packets from the + * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). + * Dequeue each packet from head of rx_process_queue, fetch the ifid from the + * packet tag and sendup. + */ +int +dhd_napi_poll(struct napi_struct *napi, int budget) +{ + int ifid; + const int pkt_count = 1; + const int chan = 0; + struct sk_buff * skb; + unsigned long flags; + struct dhd_info *dhd; + int processed = 0; + int dpc_cpu; +#ifdef DHD_LB_STATS + uint32 napi_latency; +#endif /* DHD_LB_STATS */ + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(napi, struct dhd_info, rx_napi_struct); + GCC_DIAGNOSTIC_POP(); + +#ifdef DHD_LB_STATS + napi_latency = (uint32)(OSL_SYSUPTIME_US() - dhd->napi_schedule_time); + dhd_lb_stats_update_napi_latency(dhd->napi_latency, napi_latency); +#endif /* DHD_LB_STATS */ + DHD_LB_INFO(("%s napi_queue<%d> budget<%d>\n", + __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); + + /* + * Extract the entire rx_napi_queue into another rx_process_queue + * and process only 'budget' number of skbs from rx_process_queue. + * If there are more items to be processed, napi poll will be rescheduled + * During the next iteration, next set of skbs from + * rx_napi_queue will be extracted and attached to the tail of rx_process_queue. + * Again budget number of skbs will be processed from rx_process_queue. + * If there are less than budget number of skbs in rx_process_queue, + * call napi_complete to stop rescheduling napi poll. + */ + DHD_RX_NAPI_QUEUE_LOCK(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_napi_queue, &dhd->rx_process_queue); + DHD_RX_NAPI_QUEUE_UNLOCK(&dhd->rx_napi_queue.lock, flags); + + while ((processed < budget) && (skb = __skb_dequeue(&dhd->rx_process_queue)) != NULL) { + OSL_PREFETCH(skb->data); + + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + + DHD_LB_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", + __FUNCTION__, skb, ifid)); + + dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); + processed++; + } + + if (atomic_read(&dhd->pub.lb_rxp_flow_ctrl) && + (dhd_lb_rxp_process_qlen(&dhd->pub) <= dhd->pub.lb_rxp_strt_thr)) { + /* + * If the dpc CPU is online Schedule dhd_dpc_dispatcher_work on the dpc cpu which + * in turn will schedule dpc tasklet. Else schedule dpc takslet. + */ + get_cpu(); + dpc_cpu = atomic_read(&dhd->dpc_cpu); + if (!cpu_online(dpc_cpu)) { + dhd_tasklet_schedule(&dhd->tasklet); + } else { + dhd_delayed_work_schedule_on(&dhd->dhd_dpc_dispatcher_work, dpc_cpu, 0); + } + put_cpu(); + } + DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); + + DHD_LB_INFO(("%s processed %d\n", __FUNCTION__, processed)); + + /* + * Signal napi complete only when no more packets are processed and + * none are left in the enqueued queue. + */ + if ((processed == 0) && (skb_queue_len(&dhd->rx_napi_queue) == 0)) { + napi_complete(napi); +#ifdef DHD_LB_STATS + dhd->pub.lb_rxp_napi_complete_cnt++; +#endif /* DHD_LB_STATS */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_CLEAR_IN_NAPI(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + return 0; + } + +#ifdef DHD_LB_STATS + dhd->napi_schedule_time = OSL_SYSUPTIME_US(); +#endif /* DHD_LB_STATS */ + + /* Return budget so that it gets rescheduled immediately */ + return budget; +} + +/** + * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi + * poll list. This function may be invoked via the smp_call_function_single + * from a remote CPU. + * + * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) + * after the napi_struct is added to the softnet data's poll_list + * + * @info: pointer to a dhd_info struct + */ +static void +dhd_napi_schedule(void *info) +{ + dhd_info_t *dhd = (dhd_info_t *)info; + unsigned long flags; + + DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", + __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); + + /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ + if (napi_schedule_prep(&dhd->rx_napi_struct)) { + + /* + * Set busbusystate in NAPI, which will be cleared after + * napi_complete from napi_poll context + */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_SET_IN_NAPI(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + +#ifdef DHD_LB_STATS + dhd->napi_schedule_time = OSL_SYSUPTIME_US(); + dhd->pub.lb_rxp_napi_sched_cnt++; +#endif /* DHD_LB_STATS */ + __napi_schedule(&dhd->rx_napi_struct); +#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE + raise_softirq(NET_RX_SOFTIRQ); +#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */ + } + + /* + * If the rx_napi_struct was already running, then we let it complete + * processing all its packets. The rx_napi_struct may only run on one + * core at a time, to avoid out-of-order handling. + */ +} + +/** + * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ + * action after placing the dhd's rx_process napi object in the the remote CPU's + * softnet data's poll_list. + * + * @dhd: dhd_info which has the rx_process napi object + * @on_cpu: desired remote CPU id + */ +static INLINE int +dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) +{ + int wait = 0; /* asynchronous IPI */ + DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", + __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); + + if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { + DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", + __FUNCTION__, on_cpu)); + } + + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + + return 0; +} + +/* + * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on + * Why should we do this? + * The candidacy algorithm is run from the call back function + * registered to CPU hotplug notifier. This call back happens from Worker + * context. The dhd_napi_schedule_on is also from worker context. + * Note that both of this can run on two different CPUs at the same time. + * So we can possibly have a window where a given CPUn is being brought + * down from CPUm while we try to run a function on CPUn. + * To prevent this its better have the whole code to execute an SMP + * function under get_online_cpus. + * This function call ensures that hotplug mechanism does not kick-in + * until we are done dealing with online CPUs + * If the hotplug worker is already running, no worries because the + * candidacy algo would then reflect the same in dhd->rx_napi_cpu. + * + * The below mentioned code structure is proposed in + * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + * for the question + * Q: I need to ensure that a particular cpu is not removed when there is some + * work specific to this cpu is in progress + * + * According to the documentation calling get_online_cpus is NOT required, if + * we are running from tasklet context. Since dhd_rx_napi_dispatcher_work can + * run from Work Queue context we have to call these functions + */ +void dhd_rx_napi_dispatcher_work(struct work_struct * work) +{ + struct dhd_info *dhd; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + dhd = container_of(work, struct dhd_info, rx_napi_dispatcher_work); + GCC_DIAGNOSTIC_POP(); + + dhd_napi_schedule(dhd); +} + +/** + * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct + * to run on another CPU. The rx_napi_struct's poll function will retrieve all + * the packets enqueued into the rx_napi_queue and sendup. + * The producer's rx packet queue is appended to the rx_napi_queue before + * dispatching the rx_napi_struct. + */ +void +dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) +{ + unsigned long flags; + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int rx_napi_cpu; + int prev_dpc_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); + + /* append the producer's queue of packets to the napi's rx process queue */ + DHD_RX_NAPI_QUEUE_LOCK(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); + DHD_RX_NAPI_QUEUE_UNLOCK(&dhd->rx_napi_queue.lock, flags); + + /* If sysfs lb_rxp_active is not set, schedule on current cpu */ + if (!atomic_read(&dhd->lb_rxp_active)) + { + dhd_napi_schedule(dhd); + return; + } + + /* + * Get cpu will disable pre-ermption and will not allow any cpu to go offline + * and call put_cpu() only after scheduling rx_napi_dispatcher_work. + */ + curr_cpu = get_cpu(); + + prev_dpc_cpu = atomic_read(&dhd->prev_dpc_cpu); + + rx_napi_cpu = atomic_read(&dhd->rx_napi_cpu); + + /* + * Avoid cpu candidacy, if override is set via sysfs for changing cpu mannually + */ + if (dhd->dhd_lb_candidacy_override) { + if (!cpu_online(rx_napi_cpu)) { + rx_napi_cpu = curr_cpu; + } + } else { + /* + * Now if the DPC has scheduled in the same CPU + * that is chosen for Rx napi processing + * OR scheduled on different cpu than previously it was scheduled, + * OR if rx_napi_cpu is offline, + * Call cpu candidacy algorithm to recompute napi_cpu. + */ + if ((curr_cpu == rx_napi_cpu) || (curr_cpu != prev_dpc_cpu) || + !cpu_online(rx_napi_cpu)) { + /* Re compute LB CPUs */ + dhd_select_cpu_candidacy(dhd); + /* Use updated napi cpu */ + rx_napi_cpu = atomic_read(&dhd->rx_napi_cpu); + } + + } + + DHD_LB_INFO(("%s : schedule to curr_cpu : %d, rx_napi_cpu : %d\n", + __FUNCTION__, curr_cpu, rx_napi_cpu)); + dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, rx_napi_cpu); + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + + put_cpu(); +} + +/** + * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue + */ +void +dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + + DHD_LB_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, + pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); + __skb_queue_tail(&dhd->rx_pend_queue, pkt); + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); +} + +unsigned long +dhd_read_lb_rxp(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return atomic_read(&dhd->lb_rxp_active); +} + +uint32 +dhd_lb_rxp_process_qlen(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return skb_queue_len(&dhd->rx_process_queue); +} +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) +int +BCMFASTPATH(dhd_lb_sendpkt)(dhd_info_t *dhd, struct net_device *net, + int ifidx, void *skb) +{ + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt); + + /* If the feature is disabled run-time do TX from here */ + if (atomic_read(&dhd->lb_txp_active) == 0) { + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + return __dhd_sendpkt(&dhd->pub, ifidx, skb); + } + + /* Store the address of net device and interface index in the Packet tag */ + DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net); + DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx); + + /* Enqueue the skb into tx_pend_queue */ + skb_queue_tail(&dhd->tx_pend_queue, skb); + + DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net)); + + /* Dispatch the Tx job to be processed by the tx_tasklet */ + dhd_lb_tx_dispatch(&dhd->pub); + + return NETDEV_TX_OK; +} +#endif /* DHD_LB_TXP */ + +#ifdef DHD_LB_TXP +#define DHD_LB_TXBOUND 64 +/* + * Function that performs the TX processing on a given CPU + */ +bool +dhd_lb_tx_process(dhd_info_t *dhd) +{ + struct sk_buff *skb; + int cnt = 0; + struct net_device *net; + int ifidx; + bool resched = FALSE; + + DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__)); + if (dhd == NULL) { + DHD_ERROR((" Null pointer DHD \r\n")); + return resched; + } + + BCM_REFERENCE(net); + + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + + /* Base Loop to perform the actual Tx */ + do { + skb = skb_dequeue(&dhd->tx_pend_queue); + if (skb == NULL) { + DHD_TRACE(("Dequeued a Null Packet \r\n")); + break; + } + cnt++; + + net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + + DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb, + net, ifidx)); + + __dhd_sendpkt(&dhd->pub, ifidx, skb); + + if (cnt >= DHD_LB_TXBOUND) { + resched = TRUE; + break; + } + + } while (1); + + DHD_LB_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt)); + + return resched; +} + +void +dhd_lb_tx_handler(unsigned long data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + if (dhd_lb_tx_process(dhd)) { + dhd_tasklet_schedule(&dhd->tx_tasklet); + } +} + +#endif /* DHD_LB_TXP */ +#endif /* DHD_LB */ + +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) +void +dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask) +{ + unsigned int irq = (unsigned int)-1; + int err = BCME_OK; + + if (!dhdp) { + DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->bus) { + DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__)); + return; + } + + DHD_ERROR(("%s : irq set affinity cpu:0x%lx\n", + __FUNCTION__, *cpumask_bits(cpumask))); + + dhdpcie_get_pcieirq(dhdp->bus, &irq); +#ifdef BCMDHD_MODULAR + err = irq_set_affinity_hint(irq, cpumask); +#else + err = irq_set_affinity(irq, cpumask); +#endif /* BCMDHD_MODULAR */ + if (err) + DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n", + __FUNCTION__, *cpumask_bits(cpumask))); +} +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_pktdump.c b/bcmdhd.101.10.361.x/dhd_linux_pktdump.c new file mode 100755 index 0000000..66e0b44 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_pktdump.c @@ -0,0 +1,1578 @@ +/* + * Packet dump helper functions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DHD_PKTDUMP(arg) printf arg +#define DHD_PKTDUMP_MEM(arg) printf arg +#define PACKED_STRUCT __attribute__ ((packed)) + +#define EAPOL_HDR_LEN 4 + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1 +#define EAPOL_LOGOFF 2 +#define EAPOL_KEY 3 +#define EAPOL_ASF 4 + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1 +#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254 /* WPA */ + +/* EAPOL-Key header field size */ +#define AKW_BLOCK_LEN 8 +#define WPA_KEY_REPLAY_LEN 8 +#define WPA_KEY_NONCE_LEN 32 +#define WPA_KEY_IV_LEN 16 +#define WPA_KEY_RSC_LEN 8 +#define WPA_KEY_ID_LEN 8 +#define WPA_KEY_MIC_LEN 16 +#define WPA_MAX_KEY_SIZE 32 +#define WPA_KEY_DATA_LEN (WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) + +/* Key information bit */ +#define KEYINFO_TYPE_MASK (1 << 3) +#define KEYINFO_INSTALL_MASK (1 << 6) +#define KEYINFO_KEYACK_MASK (1 << 7) +#define KEYINFO_KEYMIC_MASK (1 << 8) +#define KEYINFO_SECURE_MASK (1 << 9) +#define KEYINFO_ERROR_MASK (1 << 10) +#define KEYINFO_REQ_MASK (1 << 11) + +/* EAP Code */ +#define EAP_CODE_REQUEST 1 /* Request */ +#define EAP_CODE_RESPONSE 2 /* Response */ +#define EAP_CODE_SUCCESS 3 /* Success */ +#define EAP_CODE_FAILURE 4 /* Failure */ + +/* EAP Type */ +#define EAP_TYPE_RSVD 0 /* Reserved */ +#define EAP_TYPE_IDENT 1 /* Identify */ +#define EAP_TYPE_NOTI 2 /* Notification */ +#define EAP_TYPE_TLS 13 /* EAP-TLS */ +#define EAP_TYPE_LEAP 17 /* Cisco-LEAP */ +#define EAP_TYPE_TTLS 21 /* EAP-TTLS */ +#define EAP_TYPE_AKA 23 /* EAP-AKA */ +#define EAP_TYPE_PEAP 25 /* EAP-PEAP */ +#define EAP_TYPE_FAST 43 /* EAP-FAST */ +#define EAP_TYPE_PSK 47 /* EAP-PSK */ +#define EAP_TYPE_AKAP 50 /* EAP-AKA' */ +#define EAP_TYPE_EXP 254 /* Reserved for Expended Type */ + +/* WSC */ +#define EAP_HDR_LEN 5 +#define EAP_WSC_NONCE_OFFSET 10 +#define EAP_WSC_DATA_OFFSET (OFFSETOF(eap_wsc_fmt_t, data)) +#define EAP_WSC_MIN_DATA_LEN ((EAP_HDR_LEN) + (EAP_WSC_DATA_OFFSET)) +#define WFA_VID "\x00\x37\x2A" /* WFA SMI code */ +#define WFA_VID_LEN 3 /* WFA VID length */ +#define WFA_VTYPE 1u /* WFA Vendor type */ + +/* WSC opcode */ +#define WSC_OPCODE_UPNP 0 +#define WSC_OPCODE_START 1 +#define WSC_OPCODE_ACK 2 +#define WSC_OPCODE_NACK 3 +#define WSC_OPCODE_MSG 4 +#define WSC_OPCODE_DONE 5 +#define WSC_OPCODE_FRAG_ACK 6 + +/* WSC flag */ +#define WSC_FLAG_MF 1 /* more fragements */ +#define WSC_FLAG_LF 2 /* length field */ + +/* WSC message code */ +#define WSC_ATTR_MSG 0x1022 +#define WSC_MSG_M1 0x04 +#define WSC_MSG_M2 0x05 +#define WSC_MSG_M3 0x07 +#define WSC_MSG_M4 0x08 +#define WSC_MSG_M5 0x09 +#define WSC_MSG_M6 0x0A +#define WSC_MSG_M7 0x0B +#define WSC_MSG_M8 0x0C + +/* Debug prints */ +typedef enum pkt_cnt_type { + PKT_CNT_TYPE_INVALID = 0, + PKT_CNT_TYPE_ARP = 1, + PKT_CNT_TYPE_DNS = 2, + PKT_CNT_TYPE_MAX = 3 +} pkt_cnt_type_t; + +typedef struct pkt_cnt { + uint32 tx_cnt; + uint32 tx_err_cnt; + uint32 rx_cnt; +} pkt_cnt_t; + +typedef struct pkt_cnt_log { + bool enabled; + uint16 reason; + timer_list_compat_t pktcnt_timer; + pkt_cnt_t arp_cnt; + pkt_cnt_t dns_cnt; +} pkt_cnts_log_t; + +#define PKT_CNT_TIMER_INTERNVAL_MS 5000 /* packet count timeout(ms) */ +#define PKT_CNT_RSN_VALID(rsn) \ + (((rsn) > (PKT_CNT_RSN_INVALID)) && ((rsn) < (PKT_CNT_RSN_MAX))) + +#ifdef DHD_PKTDUMP_ROAM +static const char pkt_cnt_msg[][20] = { + "INVALID", + "ROAM_SUCCESS", + "GROUP_KEY_UPDATE", + "CONNECT_SUCCESS", + "INVALID" +}; +#endif + +static const char tx_pktfate[][30] = { + "TX_PKT_FATE_ACKED", /* 0: WLFC_CTL_PKTFLAG_DISCARD */ + "TX_PKT_FATE_FW_QUEUED", /* 1: WLFC_CTL_PKTFLAG_D11SUPPRESS */ + "TX_PKT_FATE_FW_QUEUED", /* 2: WLFC_CTL_PKTFLAG_WLSUPPRESS */ + "TX_PKT_FATE_FW_DROP_INVALID", /* 3: WLFC_CTL_PKTFLAG_TOSSED_BYWLC */ + "TX_PKT_FATE_SENT", /* 4: WLFC_CTL_PKTFLAG_DISCARD_NOACK */ + "TX_PKT_FATE_FW_DROP_OTHER", /* 5: WLFC_CTL_PKTFLAG_SUPPRESS_ACKED */ + "TX_PKT_FATE_FW_DROP_EXPTIME", /* 6: WLFC_CTL_PKTFLAG_EXPIRED */ + "TX_PKT_FATE_FW_DROP_OTHER", /* 7: WLFC_CTL_PKTFLAG_DROPPED */ + "TX_PKT_FATE_FW_PKT_FREE", /* 8: WLFC_CTL_PKTFLAG_MKTFREE */ +}; + +#define DBGREPLAY " Replay Counter: %02x%02x%02x%02x%02x%02x%02x%02x" +#define REPLAY_FMT(key) ((const eapol_key_hdr_t *)(key))->replay[0], \ + ((const eapol_key_hdr_t *)(key))->replay[1], \ + ((const eapol_key_hdr_t *)(key))->replay[2], \ + ((const eapol_key_hdr_t *)(key))->replay[3], \ + ((const eapol_key_hdr_t *)(key))->replay[4], \ + ((const eapol_key_hdr_t *)(key))->replay[5], \ + ((const eapol_key_hdr_t *)(key))->replay[6], \ + ((const eapol_key_hdr_t *)(key))->replay[7] +#define TXFATE_FMT " TX_PKTHASH:0x%X TX_PKT_FATE:%s" +#define TX_PKTHASH(pkthash) ((pkthash) ? (*pkthash) : (0)) +#define TX_FATE_STR(fate) (((*fate) <= (WLFC_CTL_PKTFLAG_MKTFREE)) ? \ + (tx_pktfate[(*fate)]) : "TX_PKT_FATE_FW_DROP_OTHER") +#define TX_FATE(fate) ((fate) ? (TX_FATE_STR(fate)) : "N/A") +#define TX_FATE_ACKED(fate) ((fate) ? ((*fate) == (WLFC_CTL_PKTFLAG_DISCARD)) : (0)) + +#define EAP_PRINT(x, args...) \ + do { \ + if (dump_msg_level & DUMP_EAPOL_VAL) { \ + if (tx) { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s)"TXFATE_FMT"\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s)\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf)); \ + } \ + } \ + } while (0) + +#define EAP_PRINT_REPLAY(x, args...) \ + do { \ + if (dump_msg_level & DUMP_EAPOL_VAL) { \ + if (tx) { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s)"DBGREPLAY TXFATE_FMT"\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + REPLAY_FMT(eap_key), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s)"DBGREPLAY"\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + REPLAY_FMT(eap_key))); \ + } \ + } \ + } while (0) + +#define EAP_PRINT_OTHER(x, args...) \ + do { \ + if (dump_msg_level & DUMP_EAPOL_VAL) { \ + if (tx) { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s) " \ + "ver %d, type %d"TXFATE_FMT"\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + eapol_hdr->version, eapol_hdr->type, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s) " \ + "ver %d, type %d\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + eapol_hdr->version, eapol_hdr->type)); \ + } \ + } \ + } while (0) + +#define EAP_PRINT_OTHER_4WAY(x, args...) \ + do { \ + if (dump_msg_level & DUMP_EAPOL_VAL) { \ + if (tx) { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s) " \ + "ver %d type %d keytype %d keyinfo 0x%02X"TXFATE_FMT"\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + eapol_hdr->version, eapol_hdr->type, eap_key->type, \ + (uint32)hton16(eap_key->key_info), \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s) " \ + "ver %d type %d keytype %d keyinfo 0x%02X\n", \ + ifname, ## args, \ + tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \ + eapol_hdr->version, eapol_hdr->type, eap_key->type, \ + (uint32)hton16(eap_key->key_info))); \ + } \ + } \ + } while (0) + +#define UDP_PORT_DNS 53 /* UDP DNS port */ + +/* EAPOL header */ +typedef struct eapol_header { + struct ether_header eth; /* 802.3/Ethernet header */ + uint8 version; /* EAPOL protocol version */ + uint8 type; /* EAPOL type */ + uint16 length; /* Length of body */ + uint8 body[1]; /* Body (optional) */ +} PACKED_STRUCT eapol_header_t; + +/* EAP header */ +typedef struct eap_header_fmt { + uint8 code; + uint8 id; + uint16 len; + uint8 type; + uint8 data[1]; +} PACKED_STRUCT eap_header_fmt_t; + +/* WSC EAP format */ +typedef struct eap_wsc_fmt { + uint8 oui[3]; + uint32 ouitype; + uint8 opcode; + uint8 flags; + uint8 data[1]; +} PACKED_STRUCT eap_wsc_fmt_t; + +/* EAPOL-Key */ +typedef struct eapol_key_hdr { + uint8 type; /* Key Descriptor Type */ + uint16 key_info; /* Key Information (unaligned) */ + uint16 key_len; /* Key Length (unaligned) */ + uint8 replay[WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + uint8 nonce[WPA_KEY_NONCE_LEN]; /* Nonce */ + uint8 iv[WPA_KEY_IV_LEN]; /* Key IV */ + uint8 rsc[WPA_KEY_RSC_LEN]; /* Key RSC */ + uint8 id[WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + uint8 mic[WPA_KEY_MIC_LEN]; /* Key MIC */ + uint16 data_len; /* Key Data Length */ + uint8 data[WPA_KEY_DATA_LEN]; /* Key data */ +} PACKED_STRUCT eapol_key_hdr_t; + +typedef struct hdr_fmt { + struct ipv4_hdr iph; + struct bcmudp_hdr udph; +} PACKED_STRUCT hdr_fmt_t; + +msg_eapol_t +dhd_is_4way_msg(uint8 *pktdata) +{ + eapol_header_t *eapol_hdr; + eapol_key_hdr_t *eap_key; + msg_eapol_t type = EAPOL_OTHER; + bool pair, ack, mic, kerr, req, sec, install; + uint16 key_info; + + if (!pktdata) { + DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__)); + return type; + } + + eapol_hdr = (eapol_header_t *)pktdata; + eap_key = (eapol_key_hdr_t *)(eapol_hdr->body); + if (eap_key->type != EAPOL_WPA2_KEY && eap_key->type != EAPOL_WPA_KEY) { + return type; + } + + key_info = hton16(eap_key->key_info); + pair = !!(key_info & KEYINFO_TYPE_MASK); + ack = !!(key_info & KEYINFO_KEYACK_MASK); + mic = !!(key_info & KEYINFO_KEYMIC_MASK); + kerr = !!(key_info & KEYINFO_ERROR_MASK); + req = !!(key_info & KEYINFO_REQ_MASK); + sec = !!(key_info & KEYINFO_SECURE_MASK); + install = !!(key_info & KEYINFO_INSTALL_MASK); + + if (eap_key->type == EAPOL_WPA2_KEY) { + if (pair && !install && ack && !mic && !sec && !kerr && !req) { + type = EAPOL_4WAY_M1; + } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { + type = EAPOL_4WAY_M2; + } else if (pair && ack && mic && sec && !kerr && !req) { + type = EAPOL_4WAY_M3; + } else if (pair && !install && !ack && mic && sec && !req && !kerr) { + type = EAPOL_4WAY_M4; + } else if (!pair && !install && ack && mic && sec && !req && !kerr) { + type = EAPOL_GROUPKEY_M1; + } else if (!pair && !install && !ack && mic && sec && !req && !kerr) { + type = EAPOL_GROUPKEY_M2; + } else { + type = EAPOL_OTHER; + if (dump_msg_level & DUMP_EAPOL_VAL) { + printf("WPA2: key_info=0x%x, pair=%d, ack=%d, mic=%d, sec=%d, kerr=%d, req=%d\n", + key_info, pair, ack, mic, sec, kerr, req); + } + } + } + else if (eap_key->type == EAPOL_WPA_KEY) { + if (pair && !install && ack && !mic && !sec && !kerr && !req) { + type = EAPOL_4WAY_M1; + } else if (pair && !install && !ack && mic && !sec && !kerr && !req && eap_key->data_len) { + type = EAPOL_4WAY_M2; + } else if (pair && install && ack && mic && !sec && !kerr && !req) { + type = EAPOL_4WAY_M3; + } else if (pair && !install && !ack && mic && !sec && !req && !kerr) { + type = EAPOL_4WAY_M4; + } else if (!pair && !install && ack && mic && sec && !req && !kerr) { + type = EAPOL_GROUPKEY_M1; + } else if (!pair && !install && !ack && mic && sec && !req && !kerr) { + type = EAPOL_GROUPKEY_M2; + } else { + type = EAPOL_OTHER; + if (dump_msg_level & DUMP_EAPOL_VAL) { + printf("WPA: key_info=0x%x, pair=%d, ack=%d, mic=%d, sec=%d, kerr=%d, req=%d\n", + key_info, pair, ack, mic, sec, kerr, req); + } + } + } + else { + type = EAPOL_OTHER; + if (dump_msg_level & DUMP_EAPOL_VAL) { + printf("OTHER: key_info=0x%x, pair=%d, ack=%d, mic=%d, sec=%d, kerr=%d, req=%d\n", + key_info, pair, ack, mic, sec, kerr, req); + } + } + + return type; +} + +void +dhd_dump_pkt(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, + bool tx, uint32 *pkthash, uint16 *pktfate) +{ + struct ether_header *eh; + uint16 ether_type; + + if (!pktdata || pktlen < ETHER_HDR_LEN) { + return; + } + +#if defined(BCMPCIE) && defined(DHD_PKT_LOGGING) + if (tx && !pkthash && !pktfate) { + return; + } +#endif /* BCMPCIE && DHD_PKT_LOGGING */ + + eh = (struct ether_header *)pktdata; + ether_type = ntoh16(eh->ether_type); + if (ether_type == ETHER_TYPE_802_1X) { + dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen, + tx, pkthash, pktfate); + } + if (ether_type == ETHER_TYPE_IP) { + if (dhd_check_dhcp(pktdata)) { + dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + } else if (dhd_check_icmp(pktdata)) { + dhd_icmp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + } else if (dhd_check_dns(pktdata)) { + dhd_dns_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + } + } + if (ether_type == ETHER_TYPE_ARP) { + if (dhd_check_arp(pktdata, ether_type)) { + dhd_arp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + } + } + dhd_trx_pkt_dump(dhdp, ifidx, pktdata, pktlen, tx); +} + +#ifdef DHD_PKTDUMP_ROAM +static void +dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) +{ + pkt_cnts_log_t *pktcnts; + pkt_cnt_t *cnt; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + if (!pktcnts) { + DHD_ERROR(("%s: pktcnts is NULL\n", __FUNCTION__)); + return; + } + + if (!pktcnts->enabled || (tx && !pktfate)) { + return; + } + + if (pkttype == PKT_CNT_TYPE_ARP) { + cnt = (pkt_cnt_t *)&pktcnts->arp_cnt; + } else if (pkttype == PKT_CNT_TYPE_DNS) { + cnt = (pkt_cnt_t *)&pktcnts->dns_cnt; + } else { + /* invalid packet type */ + return; + } + + if (tx) { + TX_FATE_ACKED(pktfate) ? cnt->tx_cnt++ : cnt->tx_err_cnt++; + } else { + cnt->rx_cnt++; + } +} + +static void +dhd_dump_pkt_timer(unsigned long data) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)data; + pkt_cnts_log_t *pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + + pktcnts->enabled = FALSE; + + /* print out the packet counter value */ + DHD_PKTDUMP(("============= PACKET COUNT SUMMARY ============\n")); + DHD_PKTDUMP(("- Reason: %s\n", pkt_cnt_msg[pktcnts->reason])); + DHD_PKTDUMP(("- Duration: %d msec(s)\n", PKT_CNT_TIMER_INTERNVAL_MS)); + DHD_PKTDUMP(("- ARP PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n", + pktcnts->arp_cnt.tx_cnt, pktcnts->arp_cnt.tx_err_cnt, + pktcnts->arp_cnt.rx_cnt)); + DHD_PKTDUMP(("- DNS PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n", + pktcnts->dns_cnt.tx_cnt, pktcnts->dns_cnt.tx_err_cnt, + pktcnts->dns_cnt.rx_cnt)); + DHD_PKTDUMP(("============= END OF COUNT SUMMARY ============\n")); +} + +void +dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + DHD_ERROR(("%s: dhdp or dhdp->pktcnts is NULL\n", + __FUNCTION__)); + return; + } + + if (!PKT_CNT_RSN_VALID(rsn)) { + DHD_ERROR(("%s: invalid reason code %d\n", + __FUNCTION__, rsn)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + if (timer_pending(&pktcnts->pktcnt_timer)) { + del_timer_sync(&pktcnts->pktcnt_timer); + } + + bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t)); + bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t)); + pktcnts->reason = rsn; + pktcnts->enabled = TRUE; + mod_timer(&pktcnts->pktcnt_timer, + jiffies + msecs_to_jiffies(PKT_CNT_TIMER_INTERNVAL_MS)); + DHD_PKTDUMP(("%s: Arm the pktcnt timer. reason=%d\n", + __FUNCTION__, rsn)); +} + +void +dhd_dump_pkt_init(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)MALLOCZ(dhdp->osh, sizeof(pkt_cnts_log_t)); + if (!pktcnts) { + DHD_ERROR(("%s: failed to allocate memory for pktcnts\n", + __FUNCTION__)); + return; + } + + /* init timers */ + init_timer_compat(&pktcnts->pktcnt_timer, dhd_dump_pkt_timer, dhdp); + dhdp->pktcnts = pktcnts; +} + +void +dhd_dump_pkt_deinit(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + pktcnts->enabled = FALSE; + del_timer_sync(&pktcnts->pktcnt_timer); + MFREE(dhdp->osh, dhdp->pktcnts, sizeof(pkt_cnts_log_t)); + dhdp->pktcnts = NULL; +} + +void +dhd_dump_pkt_clear(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + pktcnts->enabled = FALSE; + del_timer_sync(&pktcnts->pktcnt_timer); + pktcnts->reason = 0; + bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t)); + bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t)); +} + +bool +dhd_dump_pkt_enabled(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + return FALSE; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + + return pktcnts->enabled; +} +#else +static INLINE void +dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) { } +static INLINE bool +dhd_dump_pkt_enabled(dhd_pub_t *dhdp) { return FALSE; } +#endif /* DHD_PKTDUMP_ROAM */ + +#ifdef DHD_8021X_DUMP +static void +dhd_dump_wsc_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) +{ + eapol_header_t *eapol_hdr; + eap_header_fmt_t *eap_hdr; + eap_wsc_fmt_t *eap_wsc; + char *ifname; + uint16 eap_len; + bool cond; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!pktdata) { + DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + if (pktlen < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) { + DHD_ERROR(("%s: invalid pkt length\n", __FUNCTION__)); + return; + } + + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + + eapol_hdr = (eapol_header_t *)pktdata; + eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body); + if (eap_hdr->type != EAP_TYPE_EXP) { + return; + } + + eap_len = ntoh16(eap_hdr->len); + if (eap_len < EAP_WSC_MIN_DATA_LEN) { + return; + } + + eap_wsc = (eap_wsc_fmt_t *)(eap_hdr->data); + if (bcmp(eap_wsc->oui, (const uint8 *)WFA_VID, WFA_VID_LEN) || + (ntoh32(eap_wsc->ouitype) != WFA_VTYPE)) { + return; + } + + if (eap_wsc->flags) { + return; + } + + ifname = dhd_ifname(dhd, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eap_wsc->opcode == WSC_OPCODE_MSG) { + const uint8 *tlv_buf = (const uint8 *)(eap_wsc->data); + const uint8 *msg; + uint16 msglen; + uint16 wsc_data_len = (uint16)(eap_len - EAP_HDR_LEN - EAP_WSC_DATA_OFFSET); + bcm_xtlv_opts_t opt = BCM_XTLV_OPTION_IDBE | BCM_XTLV_OPTION_LENBE; + + msg = bcm_get_data_from_xtlv_buf(tlv_buf, wsc_data_len, + WSC_ATTR_MSG, &msglen, opt); + if (msg && msglen) { + switch (*msg) { + case WSC_MSG_M1: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M1); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M1), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M1"); + break; + case WSC_MSG_M2: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M2); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M2), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M2"); + break; + case WSC_MSG_M3: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M3); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M3), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M3"); + break; + case WSC_MSG_M4: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M4); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M4), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M4"); + break; + case WSC_MSG_M5: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M5); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M5), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M5"); + break; + case WSC_MSG_M6: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M6); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M6), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M6"); + break; + case WSC_MSG_M7: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M7); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M7), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M7"); + break; + case WSC_MSG_M8: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M8); +#endif + DHD_STATLOG_DATA(dhd, ST(WPS_M8), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M8"); + break; + default: + EAP_PRINT("EAP Packet, WPS MSG TYPE %d", *msg); + break; + } + } + } else if (eap_wsc->opcode == WSC_OPCODE_START) { +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WSC_START); +#endif + DHD_STATLOG_DATA(dhd, ST(WSC_START), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WSC Start"); + } else if (eap_wsc->opcode == WSC_OPCODE_DONE) { +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WSC_DONE); +#endif + DHD_STATLOG_DATA(dhd, ST(WSC_DONE), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WSC Done"); + } else { + EAP_PRINT("EAP Packet, WSC opcode=%d", eap_wsc->opcode); + } +} + +static void +dhd_dump_eap_packet(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) +{ + eapol_header_t *eapol_hdr; + eap_header_fmt_t *eap_hdr; + char *ifname; + bool cond; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!pktdata) { + DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + + eapol_hdr = (eapol_header_t *)pktdata; + eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body); + ifname = dhd_ifname(dhd, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eap_hdr->code == EAP_CODE_REQUEST || + eap_hdr->code == EAP_CODE_RESPONSE) { + bool isreq = (eap_hdr->code == EAP_CODE_REQUEST); + switch (eap_hdr->type) { + case EAP_TYPE_IDENT: + if (isreq) { +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_REQID); +#endif + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_IDENTITY), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, Identity"); + } else { +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_RSPID); +#endif + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_IDENTITY), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, Identity"); + } + break; + case EAP_TYPE_TLS: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, TLS"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, TLS"); + } + break; + case EAP_TYPE_LEAP: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_LEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, LEAP"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_LEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, LEAP"); + } + break; + case EAP_TYPE_TTLS: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TTLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, TTLS"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TTLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, TTLS"); + } + break; + case EAP_TYPE_AKA: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKA), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, AKA"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKA), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, AKA"); + } + break; + case EAP_TYPE_PEAP: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, PEAP"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, PEAP"); + } + break; + case EAP_TYPE_FAST: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_FAST), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, FAST"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_FAST), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, FAST"); + } + break; + case EAP_TYPE_PSK: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PSK), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, PSK"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PSK), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, PSK"); + } + break; + case EAP_TYPE_AKAP: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, AKAP"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, AKAP"); + } + break; + case EAP_TYPE_EXP: + dhd_dump_wsc_message(dhd, ifidx, pktdata, pktlen, tx, + pkthash, pktfate); + break; + default: + EAP_PRINT("EAP Packet, EAP TYPE %d", eap_hdr->type); + break; + } + } else if (eap_hdr->code == EAP_CODE_SUCCESS) { + DHD_STATLOG_DATA(dhd, ST(EAP_SUCCESS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Success"); + } else if (eap_hdr->code == EAP_CODE_FAILURE) { + DHD_STATLOG_DATA(dhd, ST(EAP_FAILURE), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Failure"); + } else { + EAP_PRINT("EAP Packet, EAP CODE %d", eap_hdr->code); + } +} + +static void +dhd_dump_eapol_4way_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + eapol_header_t *eapol_hdr; + eapol_key_hdr_t *eap_key; + msg_eapol_t type; + char *ifname; + bool cond; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!pktdata) { + DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + + type = dhd_is_4way_msg(pktdata); + ifname = dhd_ifname(dhd, ifidx); + eapol_hdr = (eapol_header_t *)pktdata; + eap_key = (eapol_key_hdr_t *)(eapol_hdr->body); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eap_key->type != EAPOL_WPA2_KEY && eap_key->type != EAPOL_WPA_KEY) { + EAP_PRINT_OTHER("NON EAPOL_WPA2_KEY %d", eap_key->type); + return; + } + + switch (type) { + case EAPOL_4WAY_M1: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M1); +#endif + DHD_STATLOG_DATA(dhd, ST(EAPOL_M1), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M1"); + break; + case EAPOL_4WAY_M2: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M2); +#endif + DHD_STATLOG_DATA(dhd, ST(EAPOL_M2), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M2"); + break; + case EAPOL_4WAY_M3: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M3); +#endif + DHD_STATLOG_DATA(dhd, ST(EAPOL_M3), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M3"); + break; + case EAPOL_4WAY_M4: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M4); +#endif + DHD_STATLOG_DATA(dhd, ST(EAPOL_M4), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M4"); + break; + case EAPOL_GROUPKEY_M1: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_GROUPKEY_M1); +#endif + DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M1), ifidx, tx, cond); + EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M1"); + break; + case EAPOL_GROUPKEY_M2: +#ifdef WL_EXT_IAPSTA + wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_GROUPKEY_M2); +#endif + DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M2), ifidx, tx, cond); + EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M2"); + if (ifidx == 0 && tx && pktfate) { + dhd_dump_mod_pkt_timer(dhd, PKT_CNT_RSN_GRPKEY_UP); + } + break; + default: + DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond); + EAP_PRINT_OTHER("OTHER 4WAY type=%d", type); + break; + } +} + +void +dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) +{ + char *ifname; + eapol_header_t *eapol_hdr = (eapol_header_t *)pktdata; + bool cond; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!pktdata) { + DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + + eapol_hdr = (eapol_header_t *)pktdata; + ifname = dhd_ifname(dhd, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eapol_hdr->type == EAP_PACKET) { + dhd_dump_eap_packet(dhd, ifidx, pktdata, pktlen, tx, + pkthash, pktfate); + } else if (eapol_hdr->type == EAPOL_START) { + DHD_STATLOG_DATA(dhd, ST(EAPOL_START), ifidx, tx, cond); + EAP_PRINT("EAP Packet, EAPOL-Start"); + } else if (eapol_hdr->type == EAPOL_KEY) { + dhd_dump_eapol_4way_message(dhd, ifidx, pktdata, tx, + pkthash, pktfate); + } else { + DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond); + EAP_PRINT_OTHER("OTHER 8021X"); + } +} +#endif /* DHD_8021X_DUMP */ + +bool +dhd_check_ip_prot(uint8 *pktdata, uint16 ether_type) +{ + hdr_fmt_t *b = (hdr_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &b->iph; + + /* check IP header */ + if ((ether_type != ETHER_TYPE_IP) || + (IPV4_HLEN(iph) < IPV4_HLEN_MIN) || + (IP_VER(iph) != IP_VER_4)) { + return FALSE; + } + + return TRUE; +} + +bool +dhd_check_dhcp(uint8 *pktdata) +{ + hdr_fmt_t *b = (hdr_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &b->iph; + + if (IPV4_PROT(iph) != IP_PROT_UDP) { + return FALSE; + } + + /* check UDP port for bootp (67, 68) */ + if (b->udph.src_port != htons(DHCP_PORT_SERVER) && + b->udph.src_port != htons(DHCP_PORT_CLIENT) && + b->udph.dst_port != htons(DHCP_PORT_SERVER) && + b->udph.dst_port != htons(DHCP_PORT_CLIENT)) { + return FALSE; + } + + /* check header length */ + if (ntohs(iph->tot_len) < ntohs(b->udph.len) + sizeof(struct bcmudp_hdr)) { + return FALSE; + } + return TRUE; +} + +#ifdef DHD_DHCP_DUMP +#define BOOTP_CHADDR_LEN 16 +#define BOOTP_SNAME_LEN 64 +#define BOOTP_FILE_LEN 128 +#define BOOTP_MIN_DHCP_OPT_LEN 312 +#define BOOTP_MAGIC_COOKIE_LEN 4 + +#define DHCP_MSGTYPE_DISCOVER 1 +#define DHCP_MSGTYPE_OFFER 2 +#define DHCP_MSGTYPE_REQUEST 3 +#define DHCP_MSGTYPE_DECLINE 4 +#define DHCP_MSGTYPE_ACK 5 +#define DHCP_MSGTYPE_NAK 6 +#define DHCP_MSGTYPE_RELEASE 7 +#define DHCP_MSGTYPE_INFORM 8 + +#define DHCP_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP(("[%s] " str " %8s[%8s] [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \ + ifname, typestr, opstr, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[%s] " str " %8s[%8s] [RX] : %s(%s) %s %s(%s)\n", \ + ifname, typestr, opstr, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \ + } \ + } while (0) + +typedef struct bootp_fmt { + struct ipv4_hdr iph; + struct bcmudp_hdr udph; + uint8 op; + uint8 htype; + uint8 hlen; + uint8 hops; + uint32 transaction_id; + uint16 secs; + uint16 flags; + uint32 client_ip; + uint32 assigned_ip; + uint32 server_ip; + uint32 relay_ip; + uint8 hw_address[BOOTP_CHADDR_LEN]; + uint8 server_name[BOOTP_SNAME_LEN]; + uint8 file_name[BOOTP_FILE_LEN]; + uint8 options[BOOTP_MIN_DHCP_OPT_LEN]; +} PACKED_STRUCT bootp_fmt_t; + +static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; +static char dhcp_ops[][10] = { + "NA", "REQUEST", "REPLY" +}; +static char dhcp_types[][10] = { + "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" +}; + +#ifdef DHD_STATUS_LOGGING +static const int dhcp_types_stat[9] = { + ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST), + ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE), + ST(DHCP_INFORM) +}; +#endif /* DHD_STATUS_LOGGING */ + +void +dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &b->iph; + uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len); + int dhcp_type = 0, len, opt_len; + char *ifname = NULL, *typestr = NULL, *opstr = NULL; + bool cond; + char sabuf[20]="", dabuf[20]=""; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!(dump_msg_level & DUMP_DHCP_VAL)) + return; + bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf); + bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf); + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + + ifname = dhd_ifname(dhdp, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + len = ntohs(b->udph.len) - sizeof(struct bcmudp_hdr); + opt_len = len - (sizeof(*b) - sizeof(struct ipv4_hdr) - + sizeof(struct bcmudp_hdr) - sizeof(b->options)); + + /* parse bootp options */ + if (opt_len >= BOOTP_MAGIC_COOKIE_LEN && + !memcmp(b->options, bootp_magic_cookie, BOOTP_MAGIC_COOKIE_LEN)) { + ptr = &b->options[BOOTP_MAGIC_COOKIE_LEN]; + while (ptr < end && *ptr != 0xff) { + opt = ptr++; + if (*opt == 0) { + continue; + } + ptr += *ptr + 1; + if (ptr >= end) { + break; + } + if (*opt == DHCP_OPT_MSGTYPE) { + if (opt[1]) { + dhcp_type = opt[2]; + typestr = dhcp_types[dhcp_type]; + opstr = dhcp_ops[b->op]; + DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type], + ifidx, tx, cond); + DHCP_PRINT("DHCP"); + break; + } + } + } + } +} +#endif /* DHD_DHCP_DUMP */ + +bool +dhd_check_icmp(uint8 *pktdata) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt; + + if (IPV4_PROT(iph) != IP_PROT_ICMP) { + return FALSE; + } + + /* check header length */ + if (ntohs(iph->tot_len) - IPV4_HLEN(iph) < sizeof(struct bcmicmp_hdr)) { + return FALSE; + } + return TRUE; +} + +#ifdef DHD_ICMP_DUMP +#define ICMP_TYPE_DEST_UNREACH 3 +#define ICMP_ECHO_SEQ_OFFSET 6 +#define ICMP_ECHO_SEQ(h) (*(uint16 *)((uint8 *)(h) + (ICMP_ECHO_SEQ_OFFSET))) +#define ICMP_PING_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s) SEQNUM=%d" \ + TXFATE_FMT"\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s) SEQNUM=%d\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum)); \ + } \ + } while (0) + +#define ICMP_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s) TYPE=%d, CODE=%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s) TYPE=%d," \ + " CODE=%d\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code)); \ + } \ + } while (0) + +void +dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt; + struct bcmicmp_hdr *icmph; + char *ifname; + bool cond; + uint16 seqnum, type, code; + char sabuf[20]="", dabuf[20]=""; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!(dump_msg_level & DUMP_ICMP_VAL)) + return; + + ifname = dhd_ifname(dhdp, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + icmph = (struct bcmicmp_hdr *)((uint8 *)pkt + sizeof(struct ipv4_hdr)); + seqnum = 0; + type = icmph->type; + code = icmph->code; + bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf); + bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf); + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + if (type == ICMP_TYPE_ECHO_REQUEST) { + seqnum = ntoh16(ICMP_ECHO_SEQ(icmph)); + DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_REQ), ifidx, tx, cond); + ICMP_PING_PRINT("PING REQUEST"); + } else if (type == ICMP_TYPE_ECHO_REPLY) { + seqnum = ntoh16(ICMP_ECHO_SEQ(icmph)); + DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_RESP), ifidx, tx, cond); + ICMP_PING_PRINT("PING REPLY "); + } else if (type == ICMP_TYPE_DEST_UNREACH) { + DHD_STATLOG_DATA(dhdp, ST(ICMP_DEST_UNREACH), ifidx, tx, cond); + ICMP_PRINT("ICMP DEST UNREACH"); + } else { + DHD_STATLOG_DATA(dhdp, ST(ICMP_OTHER), ifidx, tx, cond); + ICMP_PRINT("ICMP OTHER"); + } +} +#endif /* DHD_ICMP_DUMP */ + +bool +dhd_check_arp(uint8 *pktdata, uint16 ether_type) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct bcmarp *arph = (struct bcmarp *)pkt; + + /* validation check */ + if ((ether_type != ETHER_TYPE_ARP) || + (arph->htype != hton16(HTYPE_ETHERNET)) || + (arph->hlen != ETHER_ADDR_LEN) || + (arph->plen != 4)) { + return FALSE; + } + return TRUE; +} + +#ifdef DHD_ARP_DUMP +#ifdef BOARD_HIKEY +/* On Hikey, due to continuous ARP prints + * DPC not scheduled. Hence rate limit the prints. + */ +#define DHD_PKTDUMP_ARP DHD_ERROR_RLMT +#else +#define DHD_PKTDUMP_ARP DHD_PKTDUMP +#endif /* BOARD_HIKEY */ + +#define ARP_PRINT(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[%s] "str " [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s)\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \ + } \ + } while (0) \ + +#define ARP_PRINT_OTHER(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[%s] "str " [TX] : %s(%s) %s %s(%s) op_code=%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s) op_code=%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s) op_code=%d\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode)); \ + } \ + } while (0) + +void +dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct bcmarp *arph = (struct bcmarp *)pkt; + char *ifname; + uint16 opcode; + bool cond, dump_enabled; + char sabuf[20]="", dabuf[20]=""; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!(dump_msg_level & DUMP_ARP_VAL)) + return; + + ifname = dhd_ifname(dhdp, ifidx); + opcode = ntoh16(arph->oper); + cond = (tx && pktfate) ? FALSE : TRUE; + dump_enabled = dhd_dump_pkt_enabled(dhdp); + bcm_ip_ntoa((struct ipv4_addr *)arph->src_ip, sabuf); + bcm_ip_ntoa((struct ipv4_addr *)arph->dst_ip, dabuf); + bcm_ether_ntoa((struct ether_addr *)arph->dst_eth, deabuf); + bcm_ether_ntoa((struct ether_addr *)arph->src_eth, seabuf); + if (opcode == ARP_OPC_REQUEST) { + DHD_STATLOG_DATA(dhdp, ST(ARP_REQ), ifidx, tx, cond); + ARP_PRINT("ARP REQUEST "); + } else if (opcode == ARP_OPC_REPLY) { + DHD_STATLOG_DATA(dhdp, ST(ARP_RESP), ifidx, tx, cond); + ARP_PRINT("ARP RESPONSE"); + } else { + ARP_PRINT_OTHER("ARP OTHER"); + } + + if (ifidx == 0) { + dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_ARP); + } +} +#endif /* DHD_ARP_DUMP */ + +bool +dhd_check_dns(uint8 *pktdata) +{ + hdr_fmt_t *dnsh = (hdr_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &dnsh->iph; + + if (IPV4_PROT(iph) != IP_PROT_UDP) { + return FALSE; + } + + /* check UDP port for DNS */ + if (dnsh->udph.src_port != hton16(UDP_PORT_DNS) && + dnsh->udph.dst_port != hton16(UDP_PORT_DNS)) { + return FALSE; + } + + /* check header length */ + if (ntoh16(iph->tot_len) < (ntoh16(dnsh->udph.len) + + sizeof(struct bcmudp_hdr))) { + return FALSE; + } + return TRUE; +} + +#ifdef DHD_DNS_DUMP +typedef struct dns_fmt { + struct ipv4_hdr iph; + struct bcmudp_hdr udph; + uint16 id; + uint16 flags; + uint16 qdcount; + uint16 ancount; + uint16 nscount; + uint16 arcount; +} PACKED_STRUCT dns_fmt_t; + +#define DNS_QR_LOC 15 +#define DNS_OPCODE_LOC 11 +#define DNS_RCODE_LOC 0 +#define DNS_QR_MASK ((0x1) << (DNS_QR_LOC)) +#define DNS_OPCODE_MASK ((0xF) << (DNS_OPCODE_LOC)) +#define DNS_RCODE_MASK ((0xF) << (DNS_RCODE_LOC)) +#define GET_DNS_QR(flags) (((flags) & (DNS_QR_MASK)) >> (DNS_QR_LOC)) +#define GET_DNS_OPCODE(flags) (((flags) & (DNS_OPCODE_MASK)) >> (DNS_OPCODE_LOC)) +#define GET_DNS_RCODE(flags) (((flags) & (DNS_RCODE_MASK)) >> (DNS_RCODE_LOC)) +#define DNS_UNASSIGNED_OPCODE(flags) ((GET_DNS_OPCODE(flags) >= (6))) + +static const char dns_opcode_types[][11] = { + "QUERY", "IQUERY", "STATUS", "UNASSIGNED", "NOTIFY", "UPDATE" +}; + +#define DNSOPCODE(op) \ + (DNS_UNASSIGNED_OPCODE(flags) ? "UNASSIGNED" : dns_opcode_types[op]) + +#define DNS_REQ_PRINT(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + id, DNSOPCODE(opcode), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + id, DNSOPCODE(opcode), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] " str " [RX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, tx?"->":"<-", \ + tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode))); \ + } \ + } while (0) + +#define DNS_RESP_PRINT(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode), \ + GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode), \ + GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[%s] " str " [RX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + id, DNSOPCODE(opcode), GET_DNS_RCODE(flags))); \ + } \ + } while (0) + +void +dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + dns_fmt_t *dnsh = (dns_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &dnsh->iph; + uint16 flags, opcode, id; + char *ifname; + bool cond, dump_enabled; + char sabuf[20]="", dabuf[20]=""; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + if (!(dump_msg_level & DUMP_DNS_VAL)) + return; + + ifname = dhd_ifname(dhdp, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + dump_enabled = dhd_dump_pkt_enabled(dhdp); + flags = hton16(dnsh->flags); + opcode = GET_DNS_OPCODE(flags); + id = hton16(dnsh->id); + bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf); + bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf); + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + if (GET_DNS_QR(flags)) { + /* Response */ + DHD_STATLOG_DATA(dhdp, ST(DNS_RESP), ifidx, tx, cond); + DNS_RESP_PRINT("DNS RESPONSE"); + } else { + /* Request */ + DHD_STATLOG_DATA(dhdp, ST(DNS_QUERY), ifidx, tx, cond); + DNS_REQ_PRINT("DNS REQUEST"); + } + + if (ifidx == 0) { + dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_DNS); + } +} +#endif /* DHD_DNS_DUMP */ + +#ifdef DHD_TRX_DUMP +void +dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, bool tx) +{ + struct ether_header *eh; + uint16 protocol; + char *pkttype = "UNKNOWN"; + + if (!(dump_msg_level & DUMP_TRX_VAL)) + return; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + if (!pktdata) { + DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + eh = (struct ether_header *)pktdata; + protocol = hton16(eh->ether_type); + BCM_REFERENCE(pktlen); + + switch (protocol) { + case ETHER_TYPE_IP: + pkttype = "IP"; + break; + case ETHER_TYPE_ARP: + pkttype = "ARP"; + break; + case ETHER_TYPE_BRCM: + pkttype = "BRCM"; + break; + case ETHER_TYPE_802_1X: + pkttype = "802.1X"; + break; + case ETHER_TYPE_WAI: + pkttype = "WAPI"; + break; + default: + break; + } + + if (protocol != ETHER_TYPE_BRCM) { + if (pktdata[0] == 0xFF) { + DHD_PKTDUMP(("[%s] %s BROADCAST DUMP - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype)); + } else if (pktdata[0] & 1) { + DHD_PKTDUMP(("[%s] %s MULTICAST DUMP " MACDBG " - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", MAC2STRDBG(pktdata), pkttype)); + } else { + DHD_PKTDUMP(("[%s] %s DUMP - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype)); + } +#ifdef DHD_RX_FULL_DUMP + prhex("Data", pktdata, pktlen); +#endif /* DHD_RX_FULL_DUMP */ + } + else { + DHD_PKTDUMP(("[%s] %s DUMP - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype)); + } +} +#endif /* DHD_RX_DUMP */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_pktdump.h b/bcmdhd.101.10.361.x/dhd_linux_pktdump.h new file mode 100755 index 0000000..7d7ce72 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_pktdump.h @@ -0,0 +1,132 @@ +/* + * Header file for the Packet dump helper functions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_LINUX_PKTDUMP_H_ +#define __DHD_LINUX_PKTDUMP_H_ + +#include +#include + +typedef enum { + EAPOL_OTHER = 0, + EAPOL_4WAY_M1, + EAPOL_4WAY_M2, + EAPOL_4WAY_M3, + EAPOL_4WAY_M4, + EAPOL_GROUPKEY_M1, + EAPOL_GROUPKEY_M2 +} msg_eapol_t; + +typedef enum pkt_cnt_rsn { + PKT_CNT_RSN_INVALID = 0, + PKT_CNT_RSN_ROAM = 1, + PKT_CNT_RSN_GRPKEY_UP = 2, + PKT_CNT_RSN_CONNECT = 3, + PKT_CNT_RSN_MAX = 4 +} pkt_cnt_rsn_t; + +enum pkt_type { + PKT_TYPE_DATA = 0, + PKT_TYPE_DHCP = 1, + PKT_TYPE_ICMP = 2, + PKT_TYPE_DNS = 3, + PKT_TYPE_ARP = 4, + PKT_TYPE_EAP = 5 +}; + +extern msg_eapol_t dhd_is_4way_msg(uint8 *pktdata); +extern void dhd_dump_pkt(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate); +#ifdef DHD_PKTDUMP_ROAM +extern void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn); +extern void dhd_dump_pkt_init(dhd_pub_t *dhdp); +extern void dhd_dump_pkt_deinit(dhd_pub_t *dhdp); +extern void dhd_dump_pkt_clear(dhd_pub_t *dhdp); +#else +static INLINE void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) { } +static INLINE void dhd_dump_pkt_init(dhd_pub_t *dhdp) { } +static INLINE void dhd_dump_pkt_deinit(dhd_pub_t *dhdp) { } +static INLINE void dhd_dump_pkt_clear(dhd_pub_t *dhdp) { } +#endif /* DHD_PKTDUMP_ROAM */ + +/* Rx packet dump */ +#ifdef DHD_TRX_DUMP +extern void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx); +#else +static INLINE void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx) { } +#endif /* DHD_TRX_DUMP */ + +/* DHCP packet dump */ +#ifdef DHD_DHCP_DUMP +extern void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_DHCP_DUMP */ + +/* DNS packet dump */ +#ifdef DHD_DNS_DUMP +extern void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_DNS_DUMP */ + +/* ICMP packet dump */ +#ifdef DHD_ICMP_DUMP +extern void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_ICMP_DUMP */ + +/* ARP packet dump */ +#ifdef DHD_ARP_DUMP +extern void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_ARP_DUMP */ + +/* 802.1X packet dump */ +#ifdef DHD_8021X_DUMP +extern void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_8021X_DUMP */ +extern bool dhd_check_ip_prot(uint8 *pktdata, uint16 ether_type); +extern bool dhd_check_arp(uint8 *pktdata, uint16 ether_type); +extern bool dhd_check_dhcp(uint8 *pktdata); +extern bool dhd_check_icmp(uint8 *pktdata); +extern bool dhd_check_dns(uint8 *pktdata); +#endif /* __DHD_LINUX_PKTDUMP_H_ */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_platdev.c b/bcmdhd.101.10.361.x/dhd_linux_platdev.c new file mode 100755 index 0000000..8410748 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_platdev.c @@ -0,0 +1,1108 @@ +/* + * Linux platform device for DHD WLAN adapter + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(OEM_ANDROID) +#include +#endif +#if defined(CONFIG_WIFI_CONTROL_FUNC) || defined(CUSTOMER_HW4) +#include +#else +#include +#endif /* CONFIG_WIFI_CONTROL_FUNC */ +#ifdef BCMDBUS +#include +#endif +#ifdef CONFIG_DTS +#include +#include +#endif /* CONFIG_DTS */ + +#if defined(CUSTOMER_HW) +extern int dhd_wlan_init_plat_data(wifi_adapter_info_t *adapter); +extern void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter); +#endif /* CUSTOMER_HW */ + +#define WIFI_PLAT_NAME "bcmdhd_wlan" +#define WIFI_PLAT_NAME2 "bcm4329_wlan" +#define WIFI_PLAT_EXT "bcmdhd_wifi_platform" + +#ifdef DHD_WIFI_SHUTDOWN +extern void wifi_plat_dev_drv_shutdown(struct platform_device *pdev); +#endif + +#ifdef CONFIG_DTS +struct regulator *wifi_regulator = NULL; +#endif /* CONFIG_DTS */ + +bool cfg_multichip = FALSE; +bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL; +static int wifi_plat_dev_probe_ret = 0; +static bool is_power_on = FALSE; +/* XXX Some Qualcomm based CUSTOMER_HW4 platforms are using platform + * device structure even if the Kernel uses device tree structure. + * Therefore, the CONFIG_ARCH_MSM condition is temporarly remained + * to support in this case. + */ +#if !defined(CONFIG_DTS) +#if defined(DHD_OF_SUPPORT) +static bool dts_enabled = TRUE; +extern struct resource dhd_wlan_resources; +extern struct wifi_platform_data dhd_wlan_control; +#else +static bool dts_enabled = FALSE; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif +struct resource dhd_wlan_resources = {0}; +struct wifi_platform_data dhd_wlan_control = {0}; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */ +#endif /* !defind(CONFIG_DTS) */ + +static int dhd_wifi_platform_load(void); + +extern void* wl_cfg80211_get_dhdp(struct net_device *dev); + +#ifdef BCMDHD_MODULAR +//extern int dhd_wlan_init(void); +//extern int dhd_wlan_deinit(void); +#ifdef WBRC +extern int wbrc_init(void); +extern void wbrc_exit(void); +#endif /* WBRC */ +#endif /* BCMDHD_MODULAR */ + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +#ifdef BCM4335_XTAL_WAR +extern bool check_bcm4335_rev(void); +#endif /* BCM4335_XTAL_WAR */ + +wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type, + uint32 bus_num, uint32 slot_num, unsigned long status) +{ + int i; + + if (dhd_wifi_platdata == NULL) + return NULL; + + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i]; + if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) && + (adapter->bus_num == -1 || adapter->bus_num == bus_num) && + (adapter->slot_num == -1 || adapter->slot_num == slot_num) +#if defined(ENABLE_INSMOD_NO_FW_LOAD) + && (wifi_chk_adapter_status(adapter, status)) +#endif + ) { + DHD_ERROR(("attach adapter info '%s'\n", adapter->name)); + return adapter; + } + } + return NULL; +} + +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num) +{ + int i; + + if (dhd_wifi_platdata == NULL) + return NULL; + + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i]; + if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) && + (adapter->bus_num == -1 || adapter->bus_num == bus_num) && + (adapter->slot_num == -1 || adapter->slot_num == slot_num)) { + DHD_TRACE(("found adapter info '%s'\n", adapter->name)); + return adapter; + } + } + return NULL; +} + +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size) +{ + void *alloc_ptr = NULL; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + if (plat_data->mem_prealloc) { +#ifdef BCMDHD_MDRIVER + alloc_ptr = plat_data->mem_prealloc(adapter->bus_type, adapter->index, section, size); +#else + alloc_ptr = plat_data->mem_prealloc(section, size); +#endif + if (alloc_ptr) { + DHD_INFO(("success alloc section %d\n", section)); + if (size != 0L) + bzero(alloc_ptr, size); + return alloc_ptr; + } + } else + return NULL; + + DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section)); + return NULL; +} + +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter) +{ + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + return plat_data->mem_prealloc; +} + +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr) +{ + if (adapter == NULL) + return -1; + if (irq_flags_ptr) + *irq_flags_ptr = adapter->intr_flags; + return adapter->irq_num; +} + +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec) +{ + int err = 0; +#ifndef CONFIG_DTS + struct wifi_platform_data *plat_data; +#endif +#ifdef BT_OVER_SDIO + if (is_power_on == on) { + return -EINVAL; + } +#endif /* BT_OVER_SDIO */ + if (on) { + wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } else { + wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } +#ifdef CONFIG_DTS + if (on) { + printf("======== PULL WL_REG_ON HIGH! ========\n"); + err = regulator_enable(wifi_regulator); + is_power_on = TRUE; + } + else { + printf("======== PULL WL_REG_ON LOW! ========\n"); + err = regulator_disable(wifi_regulator); + is_power_on = FALSE; + } + if (err < 0) { + DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__)); + goto fail; + } +#else + if (!adapter || !adapter->wifi_plat_data) { + err = -EINVAL; + goto fail; + } + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s = %d, delay: %lu msec\n", __FUNCTION__, on, msec)); + if (plat_data->set_power) { +#ifdef ENABLE_4335BT_WAR + if (on) { + printk("WiFi: trying to acquire BT lock\n"); + if (bcm_bt_lock(lock_cookie_wifi) != 0) + printk("** WiFi: timeout in acquiring bt lock**\n"); + printk("%s: btlock acquired\n", __FUNCTION__); + } + else { + /* For a exceptional case, release btlock */ + bcm_bt_unlock(lock_cookie_wifi); + } +#endif /* ENABLE_4335BT_WAR */ + + err = plat_data->set_power(on, adapter); + } + + if (msec && !err) + OSL_SLEEP(msec); + + if (on && !err) + is_power_on = TRUE; + else + is_power_on = FALSE; + +#endif /* CONFIG_DTS */ + + return err; +fail: + if (on) { + wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } else { + wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } + return err; +} + +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present) +{ + int err = 0; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present)); + if (plat_data->set_carddetect) { + err = plat_data->set_carddetect(device_present); + } + return err; + +} + +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf, + int ifidx) +{ + struct wifi_platform_data *plat_data; + + DHD_ERROR(("%s\n", __FUNCTION__)); + if (!buf || !adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + if (plat_data->get_mac_addr) { + return plat_data->get_mac_addr(buf, ifidx); + } + return -EOPNOTSUPP; +} + +#ifdef DHD_COREDUMP +int wifi_platform_set_coredump(wifi_adapter_info_t *adapter, const char *buf, + int buf_len, const char *info) +{ + struct wifi_platform_data *plat_data; + + DHD_ERROR(("%s\n", __FUNCTION__)); + if (!buf || !adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + if (plat_data->set_coredump) { + return plat_data->set_coredump(buf, buf_len, info); + } + return -EOPNOTSUPP; +} +#endif /* DHD_COREDUMP */ + +void * +#ifdef CUSTOM_COUNTRY_CODE +wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags) +#else +wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) +#endif /* CUSTOM_COUNTRY_CODE */ +{ + /* get_country_code was added after 2.6.39 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct wifi_platform_data *plat_data; + + if (!ccode || !adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + + DHD_TRACE(("%s\n", __FUNCTION__)); + if (plat_data->get_country_code) { +#ifdef CUSTOM_FORCE_NODFS_FLAG + return plat_data->get_country_code(ccode, flags); +#else + return plat_data->get_country_code(ccode); +#endif /* CUSTOM_COUNTRY_CODE */ + } +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ + + return NULL; +} + +#ifndef CUSTOMER_HW +static int wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + struct resource *resource; + wifi_adapter_info_t *adapter; +#if defined(CONFIG_DTS) && defined(CUSTOMER_OOB) + int irq, gpio; +#endif /* CONFIG_DTS */ + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; +#if defined(CONFIG_WIFI_CONTROL_FUNC) + adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data); +#else + adapter->wifi_plat_data = (void *)&dhd_wlan_control; +#endif + + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq"); + if (resource == NULL) + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq"); + if (resource) { + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; +#ifdef DHD_ISR_NO_SUSPEND + adapter->intr_flags |= IRQF_NO_SUSPEND; +#endif + } + +#ifdef CONFIG_DTS + wifi_regulator = regulator_get(&pdev->dev, "wlreg_on"); + if (wifi_regulator == NULL) { + DHD_ERROR(("%s regulator is null\n", __FUNCTION__)); + return -1; + } + +#if defined(CUSTOMER_OOB) + /* This is to get the irq for the OOB */ + gpio = of_get_gpio(pdev->dev.of_node, 0); + + if (gpio < 0) { + DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__)); + return -1; + } + irq = gpio_to_irq(gpio); + if (irq < 0) { + DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__)); + return -1; + } + adapter->irq_num = irq; + + /* need to change the flags according to our requirement */ +#ifdef HW_OOB + adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | + IORESOURCE_IRQ_SHAREABLE; +#else + adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | + IORESOURCE_IRQ_SHAREABLE; +#endif +#endif +#endif /* CONFIG_DTS */ + + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + return wifi_plat_dev_probe_ret; +} + +static int wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + wifi_adapter_info_t *adapter; + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { +#ifdef BCMPCIE + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); +#else + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); +#endif /* BCMPCIE */ + } + +#ifdef CONFIG_DTS + regulator_put(wifi_regulator); +#endif /* CONFIG_DTS */ + return 0; +} + +static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + bcmsdh_oob_intr_set(0); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +static int wifi_plat_dev_drv_resume(struct platform_device *pdev) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + if (dhd_os_check_if_up(wl_cfg80211_get_dhdp())) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +#ifdef CONFIG_DTS +static const struct of_device_id wifi_device_dt_match[] = { + { .compatible = "android,bcmdhd_wlan", }, + {}, +}; +#endif /* CONFIG_DTS */ + +static struct platform_driver wifi_platform_dev_driver = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, +#ifdef DHD_WIFI_SHUTDOWN + .shutdown = wifi_plat_dev_drv_shutdown, +#endif /* DHD_WIFI_SHUTDOWN */ + .driver = { + .name = WIFI_PLAT_NAME, +#ifdef CONFIG_DTS + .of_match_table = wifi_device_dt_match, +#endif /* CONFIG_DTS */ + } +}; + +static struct platform_driver wifi_platform_dev_driver_legacy = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, +#ifdef DHD_WIFI_SHUTDOWN + .shutdown = wifi_plat_dev_drv_shutdown, +#endif /* DHD_WIFI_SHUTDOWN */ + .driver = { + .name = WIFI_PLAT_NAME2, + } +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) +static int wifi_platdev_match(struct device *dev, const void *data) +#else +static int wifi_platdev_match(struct device *dev, void *data) +#endif /* LINUX_VER >= 5.3.0 */ +{ + char *name = (char*)data; + const struct platform_device *pdev; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + pdev = to_platform_device(dev); + GCC_DIAGNOSTIC_POP(); + + if (strcmp(pdev->name, name) == 0) { + DHD_ERROR(("found wifi platform device %s\n", name)); + return TRUE; + } + + return FALSE; +} +#endif + +static int wifi_ctrlfunc_register_drv(void) +{ + wifi_adapter_info_t *adapter; + +#ifndef CUSTOMER_HW + int err = 0; + struct device *dev1, *dev2; + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); +#endif + +#ifdef BCMDHD_MODULAR +// dhd_wlan_init(); +#ifdef WBRC + wbrc_init(); +#endif /* WBRC */ +#endif /* BCMDHD_MODULAR */ + +#if !defined(CONFIG_DTS) && !defined(CUSTOMER_HW) + if (!dts_enabled) { + if (dev1 == NULL && dev2 == NULL) { + DHD_ERROR(("no wifi platform data, skip\n")); + return -ENXIO; + } + } +#endif /* !defined(CONFIG_DTS) */ + + /* multi-chip support not enabled, build one adapter information for + * DHD (either SDIO, USB or PCIe) + */ + adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL); + if (adapter == NULL) { + DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__)); + return -ENOMEM; + } + adapter->name = "DHD generic adapter"; + adapter->index = -1; +#ifdef BCMDHD_MDRIVER +#ifdef BCMSDIO + adapter->index = 0; +#elif defined(BCMPCIE) + adapter->index = 1; +#elif defined(BCMDBUS) + adapter->index = 2; +#endif +#endif + adapter->bus_type = -1; + adapter->bus_num = -1; + adapter->slot_num = -1; + adapter->irq_num = -1; + is_power_on = FALSE; + wifi_plat_dev_probe_ret = 0; + dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL); + dhd_wifi_platdata->num_adapters = 1; + dhd_wifi_platdata->adapters = adapter; + init_waitqueue_head(&adapter->status_event); + +#ifndef CUSTOMER_HW + if (dev1) { + err = platform_driver_register(&wifi_platform_dev_driver); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func driver\n", + __FUNCTION__)); + return err; + } + } + if (dev2) { + err = platform_driver_register(&wifi_platform_dev_driver_legacy); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n", + __FUNCTION__)); + return err; + } + } +#endif + +#if !defined(CONFIG_DTS) + if (dts_enabled) { + adapter->wifi_plat_data = (void *)&dhd_wlan_control; +#ifdef CUSTOMER_HW + wifi_plat_dev_probe_ret = dhd_wlan_init_plat_data(adapter); + if (wifi_plat_dev_probe_ret) + return wifi_plat_dev_probe_ret; +#endif +#ifdef DHD_ISR_NO_SUSPEND + adapter->intr_flags |= IRQF_NO_SUSPEND; +#endif + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + } +#endif /* !defined(CONFIG_DTS) */ + +#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW) + wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver); +#endif /* CONFIG_DTS */ + + /* return probe function's return value if registeration succeeded */ + return wifi_plat_dev_probe_ret; +} + +void wifi_ctrlfunc_unregister_drv(void) +{ +#ifndef CONFIG_DTS + wifi_adapter_info_t *adapter = NULL; +#endif + +#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW) + DHD_ERROR(("unregister wifi platform drivers\n")); + platform_driver_unregister(&wifi_platform_dev_driver); +#else +#ifndef CUSTOMER_HW + struct device *dev1, *dev2; + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); + if (!dts_enabled) + if (dev1 == NULL && dev2 == NULL) + return; +#endif + DHD_ERROR(("unregister wifi platform drivers\n")); +#ifndef CUSTOMER_HW + if (dev1) + platform_driver_unregister(&wifi_platform_dev_driver); + if (dev2) + platform_driver_unregister(&wifi_platform_dev_driver_legacy); +#endif + if (dts_enabled) { + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + } + wifi_platform_bus_enumerate(adapter, FALSE); + } +#ifdef BCMDHD_MODULAR +// dhd_wlan_deinit(); +#ifdef WBRC + wbrc_exit(); +#endif /* WBRC */ +#endif /* BCMDHD_MODULAR */ + +#endif /* !defined(CONFIG_DTS) */ + +#if defined(CUSTOMER_HW) + dhd_wlan_deinit_plat_data(adapter); +#endif + + kfree(dhd_wifi_platdata->adapters); + dhd_wifi_platdata->adapters = NULL; + dhd_wifi_platdata->num_adapters = 0; + kfree(dhd_wifi_platdata); + dhd_wifi_platdata = NULL; +} + +#ifndef CUSTOMER_HW +static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data); + + return dhd_wifi_platform_load(); +} + +static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + int i; + wifi_adapter_info_t *adapter; + ASSERT(dhd_wifi_platdata != NULL); + + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } + return 0; +} + +static struct platform_driver dhd_wifi_platform_dev_driver = { + .probe = bcmdhd_wifi_plat_dev_drv_probe, + .remove = bcmdhd_wifi_plat_dev_drv_remove, + .driver = { + .name = WIFI_PLAT_EXT, + } +}; +#endif + +int dhd_wifi_platform_register_drv(void) +{ + int err = 0; +#ifndef CUSTOMER_HW + struct device *dev; + + /* register Broadcom wifi platform data driver if multi-chip is enabled, + * otherwise use Android style wifi platform data (aka wifi control function) + * if it exists + * + * to support multi-chip DHD, Broadcom wifi platform data device must + * be added in kernel early boot (e.g. board config file). + */ + if (cfg_multichip) { + dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match); + if (dev == NULL) { + DHD_ERROR(("bcmdhd wifi platform data device not found!!\n")); + return -ENXIO; + } + err = platform_driver_register(&dhd_wifi_platform_dev_driver); + } else +#endif + { + err = wifi_ctrlfunc_register_drv(); + + /* no wifi ctrl func either, load bus directly and ignore this error */ + if (err) { + if (err == -ENXIO) { + /* wifi ctrl function does not exist */ + err = dhd_wifi_platform_load(); + } else { + /* unregister driver due to initialization failure */ + wifi_ctrlfunc_unregister_drv(); + } + } + } + + return err; +} + +#ifdef BCMPCIE +static int dhd_wifi_platform_load_pcie(void) +{ + int err = 0; + int i; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + + if (dhd_wifi_platdata == NULL) { + /* XXX For x86 Bringup PC or BRIX */ + err = dhd_bus_register(); + } else { +#ifdef DHD_SUPPORT_HDM + if (dhd_download_fw_on_driverload || hdm_trigger_init) { +#else + if (dhd_download_fw_on_driverload) { +#endif /* DHD_SUPPORT_HDM */ + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + int retry = POWERUP_MAX_RETRY; + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, + adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { + err = wifi_platform_set_power(adapter, + TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("failed to power up %s," + " %d retry left\n", + adapter->name, retry)); + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + err = wifi_platform_bus_enumerate(adapter, TRUE); + if (err) { + DHD_ERROR(("failed to enumerate bus %s, " + "%d retry left\n", + adapter->name, retry)); + wifi_platform_set_power(adapter, FALSE, + WIFI_TURNOFF_DELAY); + } else { + break; + } + } + } while (retry--); + + if (retry < 0) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", + adapter->name)); + return -ENODEV; + } + } + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__)); + if (dhd_download_fw_on_driverload) { + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + } + } + } + } + + return err; +} +#else +static int dhd_wifi_platform_load_pcie(void) +{ + return 0; +} +#endif /* BCMPCIE */ + +void dhd_wifi_platform_unregister_drv(void) +{ +#ifndef CUSTOMER_HW + if (cfg_multichip) + platform_driver_unregister(&dhd_wifi_platform_dev_driver); + else +#endif + wifi_ctrlfunc_unregister_drv(); +} + +extern int dhd_watchdog_prio; +extern int dhd_dpc_prio; +extern uint dhd_deferred_tx; +#if defined(OEM_ANDROID) && (defined(BCMLXSDMMC) || defined(BCMDBUS)) +extern struct semaphore dhd_registration_sem; +#endif /* defined(OEM_ANDROID) && defined(BCMLXSDMMC) */ + +#ifdef BCMSDIO +static int dhd_wifi_platform_load_sdio(void) +{ + int i; + int err = 0; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + /* Sanity check on the module parameters + * - Both watchdog and DPC as tasklets are ok + * - If both watchdog and DPC are threads, TX must be deferred + */ + if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) && + !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx)) + return -EINVAL; + +#if defined(OEM_ANDROID) && defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + sema_init(&dhd_registration_sem, 0); +#endif + + if (dhd_wifi_platdata == NULL) { + DHD_ERROR(("DHD wifi platform data is required for Android build\n")); + DHD_ERROR(("DHD registering bus directly\n")); + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); + return err; + } + +#if defined(OEM_ANDROID) && defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + bool chip_up = FALSE; + int retry = POWERUP_MAX_RETRY; + struct semaphore dhd_chipup_sem; + + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { +#ifndef CUSTOMER_HW_AMLOGIC + sema_init(&dhd_chipup_sem, 0); + err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem); + if (err) { + DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n", + __FUNCTION__, err)); + return err; + } +#endif + err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("%s: wifi pwr on error ! \n", __FUNCTION__)); + dhd_bus_unreg_sdio_notify(); + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + wifi_platform_bus_enumerate(adapter, TRUE); + } +#ifdef CUSTOMER_HW_AMLOGIC + sema_init(&dhd_chipup_sem, 0); + err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem); + if (err) { + DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n", + __FUNCTION__, err)); + return err; + } +#endif + + if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) { + dhd_bus_unreg_sdio_notify(); + chip_up = TRUE; + break; + } + + DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry)); + dhd_bus_unreg_sdio_notify(); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } while (retry--); + + if (!chip_up) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name)); + return -ENODEV; + } + + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); + goto fail; + } + + /* + * Wait till MMC sdio_register_driver callback called and made driver attach. + * It's needed to make sync up exit from dhd insmod and + * Kernel MMC sdio device callback registration + */ + err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)); + if (err) { + DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__)); + dhd_bus_unregister(); + goto fail; + } + + return err; + +fail: + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } +#else + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); +#endif /* defined(OEM_ANDROID) && defined(BCMLXSDMMC) */ + + return err; +} +#else /* BCMSDIO */ +static int dhd_wifi_platform_load_sdio(void) +{ + return 0; +} +#endif /* BCMSDIO */ + +#ifdef BCMDBUS +static int dhd_wifi_platform_load_usb(void) +{ + int err = 0; +#if !defined(DHD_PRELOAD) + wifi_adapter_info_t *adapter; + s32 timeout = -1; + int i; + enum wifi_adapter_status wait_status; +#endif + +#if !defined(DHD_PRELOAD) + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, 0); + if (err) { + DHD_ERROR(("failed to wifi_platform_set_power on %s\n", adapter->name)); + goto exit; + } + } + OSL_SLEEP(200); +#endif + + err = dhd_bus_register(); + if (err) { + DHD_ERROR(("%s: usb_register failed\n", __FUNCTION__)); + goto exit; + } + +#if !defined(DHD_PRELOAD) + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("failed to wifi_platform_set_power on %s\n", adapter->name)); + goto fail; + } + if (dhd_download_fw_on_driverload) + wait_status = WIFI_STATUS_ATTACH; + else + wait_status = WIFI_STATUS_DETTACH; + timeout = wait_event_interruptible_timeout(adapter->status_event, + wifi_get_adapter_status(adapter, wait_status), + msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)); + if (timeout <= 0) { + err = -1; + DHD_ERROR(("%s: usb_register_driver timeout\n", __FUNCTION__)); + goto fail; + } + } +#endif + +exit: + return err; + +#if !defined(DHD_PRELOAD) +fail: + dhd_bus_unregister(); + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + } + + return err; +#endif +} +#else /* BCMDBUS */ +static int dhd_wifi_platform_load_usb(void) +{ + return 0; +} +#endif /* BCMDBUS */ + +static int dhd_wifi_platform_load() +{ + int err = 0; + printf("%s: Enter\n", __FUNCTION__); + +#if defined(OEM_ANDROID) + wl_android_init(); +#endif /* OEM_ANDROID */ + + if ((err = dhd_wifi_platform_load_usb())) + goto end; + else if ((err = dhd_wifi_platform_load_sdio())) + goto end; + else + err = dhd_wifi_platform_load_pcie(); + +end: +#if defined(OEM_ANDROID) + if (err) + wl_android_exit(); +#if !defined(MULTIPLE_SUPPLICANT) + else + wl_android_post_init(); +#endif +#endif /* OEM_ANDROID */ + + return err; +} diff --git a/bcmdhd.101.10.361.x/dhd_linux_priv.h b/bcmdhd.101.10.361.x/dhd_linux_priv.h new file mode 100755 index 0000000..1eeb27e --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_priv.h @@ -0,0 +1,518 @@ +/* + * DHD Linux header file - contains private structure definition of the Linux specific layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_LINUX_PRIV_H__ +#define __DHD_LINUX_PRIV_H__ + +#include + +#ifdef SHOW_LOGTRACE +#include +#include +#endif /* SHOW_LOGTRACE */ +#include +#include +#include +#ifdef CONFIG_COMPAT +#include +#endif /* CONFIG COMPAT */ +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif /* CONFIG_HAS_WAKELOCK */ +#include +#include +#include +#include +#include +#include + +#ifdef PCIE_FULL_DONGLE +#include +#include +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_QOS_ON_SOCK_FLOW +struct dhd_sock_qos_info; +#endif /* DHD_QOS_ON_SOCK_FLOW */ + +/* + * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c + * Local private structure (extension of pub) + */ +typedef struct dhd_info { +#if defined(WL_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(WL_WIRELESS_EXT) */ + dhd_pub_t pub; + /* for supporting multiple interfaces. + * static_ifs hold the net ifaces without valid FW IF + */ + dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; + wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */ + char fw_path[PATH_MAX]; /* path to firmware image */ + char nv_path[PATH_MAX]; /* path to nvram vars file */ + char clm_path[PATH_MAX]; /* path to clm vars file */ + char conf_path[PATH_MAX]; /* path to config vars file */ +#ifdef DHD_UCODE_DOWNLOAD + char uc_path[PATH_MAX]; /* path to ucode image */ +#endif /* DHD_UCODE_DOWNLOAD */ + + /* serialize dhd iovars */ + struct mutex dhd_iovar_mutex; + + struct semaphore proto_sem; +#ifdef PROP_TXSTATUS + spinlock_t wlfc_spinlock; + +#ifdef BCMDBUS + ulong wlfc_lock_flags; + ulong wlfc_pub_lock_flags; +#endif /* BCMDBUS */ +#endif /* PROP_TXSTATUS */ + wait_queue_head_t ioctl_resp_wait; + wait_queue_head_t d3ack_wait; + wait_queue_head_t dhd_bus_busy_state_wait; + wait_queue_head_t dmaxfer_wait; +#ifdef BT_OVER_PCIE + wait_queue_head_t quiesce_wait; +#endif /* BT_OVER_PCIE */ + uint32 default_wd_interval; + + timer_list_compat_t timer; + bool wd_timer_valid; +#ifdef DHD_PCIE_RUNTIMEPM + timer_list_compat_t rpm_timer; + bool rpm_timer_valid; + tsk_ctl_t thr_rpm_ctl; +#endif /* DHD_PCIE_RUNTIMEPM */ + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + spinlock_t dhd_lock; + spinlock_t txoff_lock; +#ifdef BCMDBUS + ulong txqlock_flags; +#endif /* BCMDBUS */ + +#ifndef BCMDBUS + struct semaphore sdsem; + tsk_ctl_t thr_dpc_ctl; + tsk_ctl_t thr_wdt_ctl; +#endif /* BCMDBUS */ + + tsk_ctl_t thr_rxf_ctl; + spinlock_t rxf_lock; + bool rxthread_enabled; + + /* Wakelocks */ +#if defined(CONFIG_HAS_WAKELOCK) + struct wakeup_source wl_wifi; /* Wifi wakelock */ + struct wakeup_source wl_rxwake; /* Wifi rx wakelock */ + struct wakeup_source wl_ctrlwake; /* Wifi ctrl wakelock */ + struct wakeup_source wl_wdwake; /* Wifi wd wakelock */ + struct wakeup_source wl_evtwake; /* Wifi event wakelock */ + struct wakeup_source wl_pmwake; /* Wifi pm handler wakelock */ + struct wakeup_source wl_txflwake; /* Wifi tx flow wakelock */ +#ifdef BCMPCIE_OOB_HOST_WAKE + struct wakeup_source wl_intrwake; /* Host wakeup wakelock */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + struct wakeup_source wl_scanwake; /* Wifi scan wakelock */ +#endif /* DHD_USE_SCAN_WAKELOCK */ + struct wakeup_source wl_nanwake; /* NAN wakelock */ +#endif /* CONFIG_HAS_WAKELOCK */ + +#if defined(OEM_ANDROID) + /* net_device interface lock, prevent race conditions among net_dev interface + * calls and wifi_on or wifi_off + */ + struct mutex dhd_net_if_mutex; + struct mutex dhd_suspend_mutex; +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + struct mutex dhd_apf_mutex; +#endif /* PKT_FILTER_SUPPORT && APF */ +#endif /* OEM_ANDROID */ + spinlock_t wakelock_spinlock; + spinlock_t wakelock_evt_spinlock; + uint32 wakelock_counter; + int wakelock_wd_counter; + int wakelock_rx_timeout_enable; + int wakelock_ctrl_timeout_enable; + bool waive_wakelock; + uint32 wakelock_before_waive; + + /* Thread to issue ioctl for multicast */ + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + dhd_attach_states_t dhd_state; +#ifdef SHOW_LOGTRACE + dhd_event_log_t event_data; +#endif /* SHOW_LOGTRACE */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + u32 pend_ipaddr; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef DHDTCPACK_SUPPRESS + spinlock_t tcpack_lock; +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef FIX_CPU_MIN_CLOCK + bool cpufreq_fix_status; + struct mutex cpufreq_fix; + struct pm_qos_request dhd_cpu_qos; +#ifdef FIX_BUS_MIN_CLOCK + struct pm_qos_request dhd_bus_qos; +#endif /* FIX_BUS_MIN_CLOCK */ +#endif /* FIX_CPU_MIN_CLOCK */ + void *dhd_deferred_wq; +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + ctf_t *cih; /* ctf instance handle */ + ctf_brc_hot_t *brc_hot; /* hot ctf bridge cache entry */ +#endif /* BCM_ROUTER_DHD && HNDCTF */ +#ifdef DEBUG_CPU_FREQ + struct notifier_block freq_trans; + int __percpu *new_freq; +#endif + unsigned int unit; + struct notifier_block pm_notifier; +#ifdef DHD_PSTA + uint32 psta_mode; /* PSTA or PSR */ +#endif /* DHD_PSTA */ +#ifdef DHD_WET + uint32 wet_mode; +#endif /* DHD_WET */ +#ifdef DHD_DEBUG + dhd_dump_t *dump; + timer_list_compat_t join_timer; + u32 join_timeout_val; + bool join_timer_active; + uint scan_time_count; + timer_list_compat_t scan_timer; + bool scan_timer_active; +#endif + struct delayed_work dhd_dpc_dispatcher_work; + + /* CPU on which the DHD DPC is running */ + atomic_t dpc_cpu; + atomic_t prev_dpc_cpu; +#if defined(DHD_LB) +#if defined(DHD_LB_HOST_CTRL) + bool permitted_primary_cpu; +#endif /* DHD_LB_HOST_CTRL */ + /* CPU Load Balance dynamic CPU selection */ + + /* Variable that tracks the currect CPUs available for candidacy */ + cpumask_var_t cpumask_curr_avail; + + /* Primary and secondary CPU mask */ + cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ + cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ + + struct notifier_block cpu_notifier; + + /* Napi struct for handling rx packet sendup. Packets are removed from + * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then + * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled + * to run to rx_napi_cpu. + */ + struct sk_buff_head rx_pend_queue ____cacheline_aligned; + struct sk_buff_head rx_napi_queue ____cacheline_aligned; + struct sk_buff_head rx_process_queue ____cacheline_aligned; + struct napi_struct rx_napi_struct ____cacheline_aligned; + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + struct net_device *rx_napi_netdev; /* netdev of primary interface */ + + struct work_struct rx_napi_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct tx_dispatcher_work; + struct work_struct rx_compl_dispatcher_work; + + /* Number of times DPC Tasklet ran */ + uint32 dhd_dpc_cnt; + /* Number of times NAPI processing got scheduled */ + uint32 napi_sched_cnt; + /* NAPI latency stats */ + uint64 *napi_latency; + uint64 napi_schedule_time; + /* Number of times NAPI processing ran on each available core */ + uint32 *napi_percpu_run_cnt; + /* Number of times RX Completions got scheduled */ + uint32 rxc_sched_cnt; + /* Number of times RX Completion ran on each available core */ + uint32 *rxc_percpu_run_cnt; + /* Number of times TX Completions got scheduled */ + uint32 txc_sched_cnt; + /* Number of times TX Completions ran on each available core */ + uint32 *txc_percpu_run_cnt; + /* CPU status */ + /* Number of times each CPU came online */ + uint32 *cpu_online_cnt; + /* Number of times each CPU went offline */ + uint32 *cpu_offline_cnt; + + /* Number of times TX processing run on each core */ + uint32 *txp_percpu_run_cnt; + /* Number of times TX start run on each core */ + uint32 *tx_start_percpu_run_cnt; + + /* Tx load balancing */ + + /* TODO: Need to see if batch processing is really required in case of TX + * processing. In case of RX the Dongle can send a bunch of rx completions, + * hence we took a 3 queue approach + * enque - adds the skbs to rx_pend_queue + * dispatch - uses a lock and adds the list of skbs from pend queue to + * napi queue + * napi processing - copies the pend_queue into a local queue and works + * on it. + * But for TX its going to be 1 skb at a time, so we are just thinking + * of using only one queue and use the lock supported skb queue functions + * to add and process it. If its in-efficient we'll re-visit the queue + * design. + */ + + /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ + /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ + /* + * From the Tasklet that actually sends out data + * copy the list tx_pend_queue into tx_active_queue. There by we need + * to spinlock to only perform the copy the rest of the code ie to + * construct the tx_pend_queue and the code to process tx_active_queue + * can be lockless. The concept is borrowed as is from RX processing + */ + /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ + + /* Control TXP in runtime, enable by default */ + atomic_t lb_txp_active; + + /* Control RXP in runtime, enable by default */ + atomic_t lb_rxp_active; + + /* + * When the NET_TX tries to send a TX packet put it into tx_pend_queue + * For now, the processing tasklet will also direcly operate on this + * queue + */ + struct sk_buff_head tx_pend_queue ____cacheline_aligned; + + /* cpu on which the DHD Tx is happenning */ + atomic_t tx_cpu; + + /* CPU on which the Network stack is calling the DHD's xmit function */ + atomic_t net_tx_cpu; + + /* Tasklet context from which the DHD's TX processing happens */ + struct tasklet_struct tx_tasklet; + + /* + * Consumer Histogram - NAPI RX Packet processing + * ----------------------------------------------- + * On Each CPU, when the NAPI RX Packet processing call back was invoked + * how many packets were processed is captured in this data structure. + * Now its difficult to capture the "exact" number of packets processed. + * So considering the packet counter to be a 32 bit one, we have a + * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets + * processed is rounded off to the next power of 2 and put in the + * approriate "bin" the value in the bin gets incremented. + * For example, assume that in CPU 1 if NAPI Rx runs 3 times + * and the packet count processed is as follows (assume the bin counters are 0) + * iteration 1 - 10 (the bin counter 2^4 increments to 1) + * iteration 2 - 30 (the bin counter 2^5 increments to 1) + * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) + */ + uint32 *napi_rx_hist[HIST_BIN_SIZE]; + uint32 *txc_hist[HIST_BIN_SIZE]; + uint32 *rxc_hist[HIST_BIN_SIZE]; + struct kobject dhd_lb_kobj; + bool dhd_lb_candidacy_override; +#endif /* DHD_LB */ +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) + struct work_struct axi_error_dispatcher_work; +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ +#ifdef SHOW_LOGTRACE +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + tsk_ctl_t thr_logtrace_ctl; +#else + struct delayed_work event_log_dispatcher_work; +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +#endif /* SHOW_LOGTRACE */ + +#ifdef BTLOG + struct work_struct bt_log_dispatcher_work; +#endif /* SHOW_LOGTRACE */ +#ifdef EWP_EDL + struct delayed_work edl_dispatcher_work; +#endif +#if defined(WLAN_ACCEL_BOOT) + int fs_check_retry; + struct delayed_work wl_accel_work; + bool wl_accel_force_reg_on; + bool wl_accel_boot_on_done; +#endif +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#if defined(BCMDBUS) + struct task_struct *fw_download_task; + struct semaphore fw_download_lock; +#endif /* BCMDBUS */ +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + struct kobject dhd_kobj; + timer_list_compat_t timesync_timer; +#if defined(BT_OVER_SDIO) + char btfw_path[PATH_MAX]; +#endif /* defined (BT_OVER_SDIO) */ +#ifdef WL_MONITOR + struct net_device *monitor_dev; /* monitor pseudo device */ + struct sk_buff *monitor_skb; + uint monitor_len; + uint monitor_type; /* monitor pseudo device */ +#ifdef HOST_RADIOTAP_CONV + monitor_info_t *monitor_info; + uint host_radiotap_conv; +#endif /* HOST_RADIOTAP_CONV */ +#endif /* WL_MONITOR */ +#if defined (BT_OVER_SDIO) + struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ + int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ +#endif /* BT_OVER_SDIO */ +#ifdef SHOW_LOGTRACE + struct sk_buff_head evt_trace_queue ____cacheline_aligned; +#endif +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + struct workqueue_struct *tx_wq; + struct workqueue_struct *rx_wq; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef BTLOG + struct sk_buff_head bt_log_queue ____cacheline_aligned; +#endif /* BTLOG */ +#ifdef PCIE_INB_DW + wait_queue_head_t ds_exit_wait; +#endif /* PCIE_INB_DW */ +#ifdef DHD_DEBUG_UART + bool duart_execute; +#endif /* DHD_DEBUG_UART */ +#ifdef BT_OVER_PCIE + struct mutex quiesce_flr_lock; + struct mutex quiesce_lock; + enum dhd_bus_quiesce_state dhd_quiesce_state; +#endif /* BT_OVER_PCIE */ + struct mutex logdump_lock; +#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) + /* Root directory for GDB Proxy's (proc)fs files, used by first (default) interface */ + struct proc_dir_entry *gdb_proxy_fs_root; + /* Name of procfs root directory */ + char gdb_proxy_fs_root_name[100]; +#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ +#if defined(DHD_MQ) && defined(DHD_MQ_STATS) + uint64 pktcnt_qac_histo[MQ_MAX_QUEUES][AC_COUNT]; + uint64 pktcnt_per_ac[AC_COUNT]; + uint64 cpu_qstats[MQ_MAX_QUEUES][MQ_MAX_CPUS]; +#endif /* DHD_MQ && DHD_MQ_STATS */ + /* indicates mem_dump was scheduled as work queue or called directly */ + bool scheduled_memdump; +#ifdef DHD_PKTTS + bool latency; /* pktts enab flag */ + pktts_flow_t config[PKTTS_CONFIG_MAX]; /* pktts user config */ +#endif /* DHD_PKTTS */ + struct work_struct dhd_hang_process_work; +#ifdef DHD_HP2P + spinlock_t hp2p_lock; +#endif /* DHD_HP2P */ +#ifdef DHD_QOS_ON_SOCK_FLOW + struct dhd_sock_qos_info *psk_qos; +#endif +} dhd_info_t; + +#ifdef WL_MONITOR +#define MONPKT_EXTRA_LEN 48u +#endif /* WL_MONITOR */ + +extern int dhd_sysfs_init(dhd_info_t *dhd); +extern void dhd_sysfs_exit(dhd_info_t *dhd); +extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); +extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); + +int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); + +void dhd_dpc_tasklet_dispatcher_work(struct work_struct * work); +#if defined(DHD_LB) +#if defined(DHD_LB_TXP) +int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb); +void dhd_tx_dispatcher_work(struct work_struct * work); +void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); +void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); +void dhd_lb_tx_handler(unsigned long data); +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +int dhd_napi_poll(struct napi_struct *napi, int budget); +void dhd_rx_napi_dispatcher_work(struct work_struct * work); +void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); +void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); +unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp); +#endif /* DHD_LB_RXP */ + +void dhd_lb_set_default_cpus(dhd_info_t *dhd); +void dhd_cpumasks_deinit(dhd_info_t *dhd); +int dhd_cpumasks_init(dhd_info_t *dhd); + +void dhd_select_cpu_candidacy(dhd_info_t *dhd); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int dhd_cpu_startup_callback(unsigned int cpu); +int dhd_cpu_teardown_callback(unsigned int cpu); +#else +int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + +int dhd_register_cpuhp_callback(dhd_info_t *dhd); +int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); +#endif /* DHD_LB */ + +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) +void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ +#ifdef DHD_SSSR_DUMP +extern uint sssr_enab; +extern uint fis_enab; +#endif /* DHD_SSSR_DUMP */ + +#ifdef CONFIG_HAS_WAKELOCK +enum { + WAKE_LOCK_SUSPEND, /* Prevent suspend */ + WAKE_LOCK_TYPE_COUNT +}; +#define dhd_wake_lock_init(wakeup_source, type, name) wakeup_source_add(wakeup_source) +#define dhd_wake_lock_destroy(wakeup_source) wakeup_source_remove(wakeup_source) +#define dhd_wake_lock(wakeup_source) __pm_stay_awake(wakeup_source) +#define dhd_wake_unlock(wakeup_source) __pm_relax(wakeup_source) +#define dhd_wake_lock_active(wakeup_source) ((wakeup_source)->active) +#define dhd_wake_lock_timeout(wakeup_source, timeout) \ + __pm_wakeup_event(wakeup_source, jiffies_to_msecs(timeout)) +#endif /* CONFIG_HAS_WAKELOCK */ + +#endif /* __DHD_LINUX_PRIV_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_sched.c b/bcmdhd.101.10.361.x/dhd_linux_sched.c new file mode 100755 index 0000000..3d6786a --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_sched.c @@ -0,0 +1,47 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#include +#include +#include +#include +#include + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) + sched_set_fifo_low(p); +#else + rc = sched_setscheduler(p, policy, param); +#endif + return rc; +} + +int get_scheduler_policy(struct task_struct *p) +{ + int rc = SCHED_NORMAL; + rc = p->policy; + return rc; +} diff --git a/bcmdhd.101.10.361.x/dhd_linux_sock_qos.c b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.c new file mode 100755 index 0000000..3348d89 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.c @@ -0,0 +1,1034 @@ +/* + * Source file for DHD QOS on Socket Flow. + * + * Defines a socket flow and maintains a table of socket flows + * for further analysis in order to upgrade the QOS of the flow. + + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Maximum number of Socket Flows supported */ +#define MAX_SOCK_FLOW (1024UL) + +#define SOCK_FLOW_UPGRADE_THRESHOLD (3) +/* + * Mark a Socket Flow as inactive and free the resources + * if there is no packet receied for SOCK_IDLE_THREASHOLD_MS + * of time. Note that this parameter is in milli seconds. + */ +#define SOCK_IDLE_THRESHOLD_MS (2000UL) + +#define DSCP_TOS_CS7 0XE0u + +extern uint dhd_watchdog_ms; + +/* Defines Socket Flow */ +struct dhd_sock_flow_info +{ + /* Unique identifiers */ + struct sock *sk; + unsigned long ino; + + /* statistics */ + qos_stat_t stats; + u64 last_pkt_ns; + kuid_t uid; + + /* Elements related to upgrade management */ + + /* 0 - No upgrade + * 1 - Upgrade + */ + unsigned int cur_up_state; + unsigned int rcm_up_state; + unsigned int bus_flow_id; + + /* TODO: + * Handling Out Of Order during upgrade + * Once an upgrade is decided we cannot handover the skb to + * FW in the upgraded Flow Ring ... it will create Out of Order Packets. + * Instead we can have a output_q per socket flow. Once the upgrade is + * decided, we can start adding skbs to the output_q. The last 'skb' given + * to the actual Flow ring should be remembered in 'last_skb_orig_fl'. + * Once we get a Tx completion for last_skb_orig_fl we can flush the + * contents of output_q to the 'upgraded flowring'. In this solution, + * we should also handle the case where output_q hits the watermark + * before the completion for 'last_skb_orig_fl' is received. If this condition + * happens, not to worry about OOO and flush the contents of output_q. + * Probably the last_skb_orig_fl is not sent out due latency in the + * existing flow ... the actual problem we are trying to solve. + */ + + /* Management elements */ + struct list_head list; + unsigned int in_use; +}; + +typedef enum _frameburst_state +{ + FRMBRST_DISABLED = 0, + FRMBRST_ENABLED = 1 +} frameburst_state_t; + +/* Sock QOS Module Structure */ +typedef struct dhd_sock_qos_info +{ + /* Table of Socket Flows */ + struct dhd_sock_flow_info *sk_fl; + /* maximum number for socket flows supported */ + uint32 max_sock_fl; + + /* TODO: need to make it per flow later on */ + /* global qos algo parameters */ + qos_algo_params_t qos_params; + /* List in which active Socket Flows live */ + struct list_head sk_fl_list_head; + void *list_lock; + + /* Time interval a socket flow resource is moved out of the active list */ + uint32 sock_idle_thresh; + /* + * Keep track of number of flows upgraded. + * If it reaches a threshold we should stop ugrading + * This is to avoid the problem where we overwhelm + * the Dongle with upgraded traffic. + */ + int num_skfl_upgraded; + int skfl_upgrade_thresh; + + /* flag that is set to true when the first flow is upgraded + * so that FW frameburst is disabled, and set to false + * when no more flows are in upgraded state, so that + * FW frameburst is re-enabled + */ + bool upgrade_active; + /* fw frameburst state */ + frameburst_state_t frmbrst_state; + + atomic_t on_off; + atomic_t force_upgrade; + + /* required for enabling/disabling watchdog timer at runtime */ + uint watchdog_ms; +} dhd_sock_qos_info_t; + +#define SK_FL_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define SK_FL_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +int +dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms) +{ + unsigned long sz; + unsigned int i; + struct dhd_sock_flow_info *sk_fl = NULL; + int val = 0, ret = 0; + + if (dhd == NULL) + return BCME_BADARG; + + dhd->psk_qos = MALLOCZ(dhd->pub.osh, sizeof(dhd_sock_qos_info_t)); + if (dhd->psk_qos == NULL) { + DHD_ERROR(("%s(): Failed to alloc psk_qos ! \n", __FUNCTION__)); + return BCME_NOMEM; + } + dhd->psk_qos->max_sock_fl = MAX_SOCK_FLOW; + sz = sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW; + dhd->psk_qos->sk_fl = MALLOCZ(dhd->pub.osh, sz); + if (dhd->psk_qos->sk_fl == NULL) { + DHD_ERROR(("%s(): Failed to allocated sk_fl \r\n", __FUNCTION__)); + return BCME_NOMEM; + } + + sk_fl = dhd->psk_qos->sk_fl; + for (i = 0; i < MAX_SOCK_FLOW; i++, sk_fl++) { + sk_fl->in_use = 0; + } + + dhd->psk_qos->sock_idle_thresh = SOCK_IDLE_THRESHOLD_MS; + + dhd->psk_qos->skfl_upgrade_thresh = SOCK_FLOW_UPGRADE_THRESHOLD; + + INIT_LIST_HEAD(&dhd->psk_qos->sk_fl_list_head); + dhd->psk_qos->list_lock = osl_spin_lock_init(dhd->pub.osh); + + dhd->psk_qos->watchdog_ms = watchdog_ms; + /* feature is DISABLED by default */ + dhd_sock_qos_set_status(dhd, 0); + + qos_algo_params_init(&dhd->psk_qos->qos_params); + + dhd->psk_qos->frmbrst_state = FRMBRST_ENABLED; + /* read the initial state of frameburst from FW, cannot + * assume that it will always be in enabled state by default. + * We will cache the FW frameburst state in host and change + * it everytime we change it from host during QoS upgrade. + * This decision is taken, because firing an iovar everytime + * to query FW frameburst state before deciding whether to + * changing the frameburst state or not from host, is sub-optimal, + * especially in the Tx path. + */ + ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val, + sizeof(val), FALSE, 0); + if (ret != BCME_OK) { + DHD_ERROR(("%s: get fw frameburst failed," + " err=%d\n", __FUNCTION__, ret)); + } else { + DHD_INFO(("%s:fw frameburst = %d", __FUNCTION__, val)); + dhd->psk_qos->frmbrst_state = + (val == 1) ? FRMBRST_ENABLED : FRMBRST_DISABLED; + } + return BCME_OK; +} + +int +dhd_deinit_sock_flows_buf(dhd_info_t *dhd) +{ + if (dhd == NULL) + return BCME_BADARG; + + if (dhd->psk_qos->sk_fl) { + MFREE(dhd->pub.osh, dhd->psk_qos->sk_fl, + sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW); + dhd->psk_qos->sk_fl = NULL; + } + + osl_spin_lock_deinit(dhd->pub.osh, dhd->psk_qos->list_lock); + MFREE(dhd->pub.osh, dhd->psk_qos, sizeof(dhd_sock_qos_info_t)); + dhd->psk_qos = NULL; + + return BCME_OK; +} + +/* Caller should hold list_lock */ +static inline struct dhd_sock_flow_info * +__dhd_find_sock_stream_info(dhd_sock_qos_info_t *psk_qos, unsigned long ino) +{ + struct dhd_sock_flow_info *sk_fl = NULL; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, + list) { + if (sk_fl && (sk_fl->ino == ino)) { + return sk_fl; + } + } /* end of list iteration */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + /* If control comes here, the ino is not found */ + DHD_INFO(("%s(): ino:%lu not found \r\n", __FUNCTION__, ino)); + + return NULL; +} + +static struct dhd_sock_flow_info * +dhd_alloc_sock_stream_info(dhd_sock_qos_info_t *psk_qos) +{ + struct dhd_sock_flow_info *sk_fl = psk_qos->sk_fl; + int i; + + for (i = 0; i < psk_qos->max_sock_fl; i++, sk_fl++) { + if (sk_fl->in_use == 0) { + DHD_ERROR(("%s: Use sk_fl %p \r\n", __FUNCTION__, sk_fl)); + return sk_fl; + } + } + DHD_INFO(("No Free Socket Stream info \r\n")); + return NULL; +} + +/* Caller should hold list_lock */ +static inline void +__dhd_free_sock_stream_info(dhd_sock_qos_info_t *psk_qos, + struct dhd_sock_flow_info *sk_fl) +{ + /* + * If the socket flow getting freed is an upgraded socket flow, + * we can upgrade one more flow. + */ + if (sk_fl->cur_up_state == 1) { + --psk_qos->num_skfl_upgraded; + ASSERT(psk_qos->num_skfl_upgraded >= 0); + } + + /* Remove the flow from active list */ + list_del(&sk_fl->list); + + DHD_ERROR(("%s(): Cleaning Socket Flow ino:%lu psk_qos->num_skfl_upgraded=%d\r\n", + __FUNCTION__, sk_fl->ino, psk_qos->num_skfl_upgraded)); + + /* Clear its content */ + memset_s(sk_fl, sizeof(*sk_fl), 0, sizeof(*sk_fl)); + + return; +} + +static void +dhd_clean_idle_sock_streams(dhd_sock_qos_info_t *psk_qos) +{ + struct dhd_sock_flow_info *sk_fl = NULL, *next = NULL; + u64 now; + u64 diff; + unsigned long flags = 0; + now = local_clock(); + + SK_FL_LIST_LOCK(psk_qos->list_lock, flags); + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry_safe(sk_fl, next, &psk_qos->sk_fl_list_head, list) { + if (sk_fl) { + + if (sk_fl->in_use == 0) { + DHD_ERROR_RLMT(("%s:Something wrong," + " a free sk_fl living in active stream\n", + __FUNCTION__)); + DHD_ERROR_RLMT(("sk_fl:%p sk:%p ino:%lu \r\n", + sk_fl, sk_fl->sk, sk_fl->ino)); + continue; + } + + /* XXX: TODO: need to investigate properly in future. + * it is observed that in some hosts (FC25), the + * current timestamp is lesser than previous timestamp + * leading to false cleanups + */ + if (now <= sk_fl->last_pkt_ns) + continue; + + diff = now - sk_fl->last_pkt_ns; + + /* Convert diff which is in ns to ms */ + diff = div64_u64(diff, 1000000UL); + if (diff >= psk_qos->sock_idle_thresh) { + DHD_ERROR(("sk_fl->sk:%p sk_fl->i_no:%lu \r\n", + sk_fl->sk, sk_fl->ino)); + if (sk_fl->cur_up_state == 1 && + psk_qos->num_skfl_upgraded == 1) { + psk_qos->upgrade_active = FALSE; + } + __dhd_free_sock_stream_info(psk_qos, sk_fl); + } + } + } /* end of list iteration */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags); + +} + +static inline int +__dhd_upgrade_sock_flow(dhd_info_t *dhd, + struct dhd_sock_flow_info *sk_fl, + struct sk_buff *skb) +{ + dhd_sock_qos_info_t *psk_qos = dhd->psk_qos; +#ifdef DHD_HP2P + dhd_pub_t *dhdp = &dhd->pub; +#endif + uint8 *pktdat = NULL; + struct ether_header *eh = NULL; + struct iphdr *iph = NULL; + + /* Before upgrading a flow, + * Check the bound to control the number of flows getting upgraded + */ + if ((sk_fl->rcm_up_state == 1) && (sk_fl->cur_up_state == 0)) { + if (psk_qos->num_skfl_upgraded >= psk_qos->skfl_upgrade_thresh) { + DHD_ERROR_RLMT(("%s(): Thresh hit num_skfl_upgraded:%d" + "skfl_upgrade_thresh:%d \r\n", + __FUNCTION__, psk_qos->num_skfl_upgraded, + psk_qos->skfl_upgrade_thresh)); + return BCME_ERROR; + } else { + if (psk_qos->num_skfl_upgraded == 0) { + /* if no flows upgraded till now, and this is the + * first flow to be upgraded, + * then disable frameburst in FW. + * The actual iovar to disable frameburst cannot + * be fired here because Tx can happen in atomic context + * and dhd_iovar can sleep due to proto_block lock being + * held. Instead the flag is checked from + * 'dhd_analyze_sock_flows' which execs in non-atomic context + * and the iovar is fired from there + */ + DHD_TRACE(("%s: disable frameburst ..", __FUNCTION__)); + psk_qos->upgrade_active = TRUE; + } + ++psk_qos->num_skfl_upgraded; + DHD_ERROR_RLMT(("%s(): upgrade flow sk_fl %p," + "num_skfl_upgraded:%d skfl_upgrade_thresh:%d \r\n", + __FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded, + psk_qos->skfl_upgrade_thresh)); + } + } + + /* Upgrade the skb */ +#ifdef DHD_HP2P + if (dhdp->hp2p_capable) + skb->priority = TC_PRIO_CONTROL; + else + skb->priority = TC_PRIO_INTERACTIVE; +#else + skb->priority = TC_PRIO_INTERACTIVE; +#endif /* DHD_HP2P */ + + pktdat = PKTDATA(dhd->pub.osh, skb); + eh = (struct ether_header *) pktdat; + if (pktdat && (eh->ether_type == hton16(ETHER_TYPE_IP))) { + /* 'upgrade' DSCP also, else it is observed that on + * AP side if DSCP value is not in sync with L2 prio + * then out of order packets are observed + */ + iph = (struct iphdr *)(pktdat + sizeof(struct ether_header)); + iph->tos = DSCP_TOS_CS7; + /* re-compute ip hdr checksum + * NOTE: this takes around 1us, need to profile more + * accurately to get the number of cpu cycles it takes + * in order to get a better idea of the impact of + * re computing ip hdr chksum in data path + */ + ip_send_check(iph); + } + + /* Mark the Flow as 'upgraded' */ + if (sk_fl->cur_up_state == 0) + sk_fl->cur_up_state = 1; + + return BCME_OK; +} + +static inline int +__dhd_downgrade_sock_flow(dhd_info_t *dhd, + struct dhd_sock_flow_info *sk_fl, + struct sk_buff *skb) +{ + dhd_sock_qos_info_t *psk_qos = dhd->psk_qos; + + if ((sk_fl->rcm_up_state == 0) && (sk_fl->cur_up_state == 1)) { + /* sanity check */ + ASSERT(psk_qos->num_skfl_upgraded > 0); + if (psk_qos->num_skfl_upgraded <= 0) { + DHD_ERROR_RLMT(("%s(): FATAL ! no upgraded flows !\n", + __FUNCTION__)); + return BCME_ERROR; + } + + if (psk_qos->num_skfl_upgraded == 1) { + /* if this is the + * last flow to be downgraded, + * then re-enable frameburst in FW. + * The actual iovar to enable frameburst cannot + * be fired here because Tx can happen in atomic context + * and dhd_iovar can sleep due to proto_block lock being + * held. Instead the flag is checked from + * 'dhd_analyze_sock_flows' which execs in non-atomic context + * and the iovar is fired from there + */ + DHD_TRACE(("%s: enable frameburst ..", __FUNCTION__)); + psk_qos->upgrade_active = FALSE; + } + --psk_qos->num_skfl_upgraded; + DHD_ERROR_RLMT(("%s(): downgrade flow sk_fl %p," + "num_skfl_upgraded:%d \r\n", + __FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded)); + } + + /* Mark the Flow as 'downgraded' */ + if (sk_fl->cur_up_state == 1) + sk_fl->cur_up_state = 0; + + return BCME_OK; +} + +/* + * Update the stats of a Socket flow. + * Create a new flow if need be. + * If a socket flow has been recommended for upgrade, do so. + */ +void +dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb) +{ + struct sock *sk = NULL; + unsigned long ino = 0; + struct dhd_sock_flow_info *sk_fl = NULL; + dhd_sock_qos_info_t *psk_qos = NULL; + unsigned long flags = 0; + uint8 prio; + + BCM_REFERENCE(prio); + + if ((dhd == NULL) || (skb == NULL)) { + DHD_ERROR_RLMT(("%s: Invalid args \n", __FUNCTION__)); + return; + } + + /* If the Feature is disabled, return */ + if (dhd_sock_qos_get_status(dhd) == 0) + return; + + psk_qos = dhd->psk_qos; + sk = (struct sock *)PKTSOCK(dhd->pub.osh, skb); + + /* TODO: + * Some times sk is NULL, what does that mean ... + * is it a broadcast packet generated by Network Stack ???? + */ + if (sk == NULL) { + return; + } + ino = sock_i_ino(sk); + + /* TODO: + * List Lock need not be held for allocating sock stream .. optimize + */ + SK_FL_LIST_LOCK(psk_qos->list_lock, flags); + + sk_fl = __dhd_find_sock_stream_info(psk_qos, ino); + if (sk_fl == NULL) { + /* Allocate new sock stream */ + sk_fl = dhd_alloc_sock_stream_info(psk_qos); + if (sk_fl == NULL) { + SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags); + goto done; + } + else { + /* SK Flow elements updated first time */ + sk_fl->in_use = 1; + sk_fl->sk = sk; + sk_fl->ino = ino; + /* TODO: Seeing a Kernel Warning ... check */ + /* sk_fl->uid = sock_i_uid(sk); */ + sk_fl->cur_up_state = 0; + list_add_tail(&sk_fl->list, &psk_qos->sk_fl_list_head); + DHD_ERROR(("%s(): skb %p sk %p sk_fl %p ino %lu" + " prio 0x%x \r\n", __FUNCTION__, skb, + sk, sk_fl, ino, skb->priority)); + } /* end of new sk flow allocation */ + } /* end of case when sk flow is found */ + + sk_fl->stats.tx_pkts++; + sk_fl->stats.tx_bytes += skb->len; + sk_fl->last_pkt_ns = local_clock(); + + SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags); + + if (sk_fl->rcm_up_state == 1) { + __dhd_upgrade_sock_flow(dhd, sk_fl, skb); + } else { + __dhd_downgrade_sock_flow(dhd, sk_fl, skb); + } + + prio = PKTPRIO(skb); + DHD_INFO(("%s(): skb:%p skb->priority 0x%x prio %d sk_fl %p\r\n", __FUNCTION__, skb, + skb->priority, prio, sk_fl)); +done: + return; +} + +static int +dhd_change_frameburst_state(frameburst_state_t newstate, dhd_info_t *dhd) +{ + int ret = 0, val = 0; + dhd_sock_qos_info_t *psk_qos = NULL; + + if (!dhd) + return BCME_BADARG; + if (!dhd->psk_qos) + return BCME_BADARG; + + psk_qos = dhd->psk_qos; + + /* Check with the cached frameburst state on host + * instead of querying FW frameburst state. + * This decision is taken, because firing an iovar everytime + * to query FW frameburst state before deciding whether to + * changing the frameburst state or not is sub-optimal, + * especially in the Tx path. + */ + if (psk_qos->frmbrst_state == newstate) + return BCME_BADOPTION; + + val = (newstate == FRMBRST_ENABLED) ? 1 : 0; + ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val, + sizeof(val), TRUE, 0); + if (ret != BCME_OK) { + DHD_ERROR_RLMT(("%s: set frameburst=%d failed," + " err=%d\n", __FUNCTION__, val, ret)); + } else { + /* change the state */ + DHD_INFO(("%s: set frameburst=%d\n", __FUNCTION__, val)); + psk_qos->frmbrst_state = newstate; + } + + return ret; +} + +void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms) +{ + struct dhd_sock_flow_info *sk_fl = NULL; + dhd_sock_qos_info_t *psk_qos = NULL; + unsigned long flags = 0; + + if (dhd == NULL) { + DHD_ERROR_RLMT(("%s: Bad argument \r\n", __FUNCTION__)); + return; + } + + /* Check whether the feature is disabled */ + if (dhd_sock_qos_get_status(dhd) == 0) + return; + + psk_qos = dhd->psk_qos; + + dhd_clean_idle_sock_streams(dhd->psk_qos); + + /* TODO: Plug in the QoS Algorithm here */ + SK_FL_LIST_LOCK(psk_qos->list_lock, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) { + + sk_fl->rcm_up_state = dhd_qos_algo(dhd, &sk_fl->stats, &psk_qos->qos_params); + + /* TODO: Handle downgrades */ + + /* update sk_flow previous elements on every sampling interval */ + sk_fl->stats.tx_pkts_prev = sk_fl->stats.tx_pkts; + sk_fl->stats.tx_bytes_prev = sk_fl->stats.tx_bytes; + + /* TODO: Handle the condition where num_skfl_upgraded reaches the threshold */ + + /* TODO: Handle the condition where we upgrade all the socket flows + * of the uid on which one flow is detected to be upgraded. + */ + + } /* end of list iteration */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags); + + /* disable frameburst in FW on the first flow upgraded */ + if (psk_qos->upgrade_active) { + dhd_change_frameburst_state(FRMBRST_DISABLED, dhd); + } else { + /* if no upgraded flows remain, either after cleanup, + * or after a downgrade, + * then re-enable frameburst in FW + */ + dhd_change_frameburst_state(FRMBRST_ENABLED, dhd); + } + + return; +} + +void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf, + uint32 bus_flow_id) +{ + BCM_REFERENCE(dhd); + BCM_REFERENCE(pktbuf); + BCM_REFERENCE(bus_flow_id); + return; +} + +/* ================= Sysfs interfce support functions ======================== */ + +unsigned long dhd_sock_qos_get_status(dhd_info_t *dhd) +{ + if (dhd == NULL) + return 0; + + return (atomic_read(&dhd->psk_qos->on_off)); +} + +void dhd_sock_qos_set_status(dhd_info_t *dhd, unsigned long on_off) +{ + if (dhd == NULL) + return; + + atomic_set(&dhd->psk_qos->on_off, on_off); + if (on_off) { + dhd_watchdog_ms = QOS_SAMPLING_INTVL_MS; + /* enable watchdog to monitor the socket flows */ + dhd_os_wd_timer(&dhd->pub, QOS_SAMPLING_INTVL_MS); + } else { + dhd_watchdog_ms = dhd->psk_qos->watchdog_ms; + /* disable watchdog or set it back to the original value */ + dhd_os_wd_timer(&dhd->pub, dhd->psk_qos->watchdog_ms); + } + return; +} + +ssize_t dhd_sock_qos_show_stats(dhd_info_t *dhd, char *buf, + ssize_t sz) +{ + dhd_sock_qos_info_t *psk_qos = NULL; + struct dhd_sock_flow_info *sk_fl = NULL; + unsigned long flags = 0; + ssize_t ret = 0; + char *p = buf; + + /* TODO: Should be actual record length */ + unsigned long rec_len = 100; + + if (dhd == NULL) + return -1; + + psk_qos = dhd->psk_qos; + + ret += scnprintf(p, sz-ret-1, "\nino\t sk\t\t\t tx_pkts\t tx_bytes\t" + "last_pkt_ns\r\n"); + p += ret; + + SK_FL_LIST_LOCK(psk_qos->list_lock, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) { + /* Protect the buffer from over run */ + if (ret + rec_len >= sz) + break; + + ret += scnprintf(p, sz-ret-1, "%lu\t %p\t %lu\t %lu\t %llu\t \r\n", + sk_fl->ino, sk_fl->sk, sk_fl->stats.tx_pkts, sk_fl->stats.tx_bytes, + sk_fl->last_pkt_ns); + + p += ret; + + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags); + + return ret + 1; +} + +void dhd_sock_qos_clear_stats(dhd_info_t *dhd) +{ + dhd_sock_qos_info_t *psk_qos = NULL; + struct dhd_sock_flow_info *sk_fl = NULL; + unsigned long flags = 0; + + if (dhd == NULL) + return; + + psk_qos = dhd->psk_qos; + + SK_FL_LIST_LOCK(psk_qos->list_lock, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) { + sk_fl->stats.tx_pkts = 0; + sk_fl->stats.tx_bytes = 0; + sk_fl->stats.tx_pkts_prev = 0; + sk_fl->stats.tx_bytes_prev = 0; + sk_fl->last_pkt_ns = 0; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags); + + return; +} + +unsigned long dhd_sock_qos_get_force_upgrade(dhd_info_t *dhd) +{ + if (dhd == NULL) + return 0; + + return (atomic_read(&dhd->psk_qos->force_upgrade)); +} + +void dhd_sock_qos_set_force_upgrade(dhd_info_t *dhd, unsigned long force_upgrade) +{ + if (dhd == NULL) + return; + + atomic_set(&dhd->psk_qos->force_upgrade, force_upgrade); + return; +} + +int dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t *dhd) +{ + if (dhd == NULL) + return 0; + + return dhd->psk_qos->skfl_upgrade_thresh; +} + +void dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t *dhd, + int upgrade_thresh) +{ + if (dhd == NULL) + return; + + dhd->psk_qos->skfl_upgrade_thresh = upgrade_thresh; + return; +} + +void dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t *dhd, + unsigned long *avgpktsize_low, + unsigned long *avgpktsize_high) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL || avgpktsize_low == NULL || + avgpktsize_high == NULL) { + return; + } + + pqos_params = QOS_PARAMS(dhd); + *avgpktsize_low = pqos_params->avg_pkt_size_low_thresh; + *avgpktsize_high = pqos_params->avg_pkt_size_high_thresh; + return; +} + +void dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t *dhd, + unsigned long avgpktsize_low, + unsigned long avgpktsize_high) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL) + return; + + pqos_params = QOS_PARAMS(dhd); + pqos_params->avg_pkt_size_low_thresh = avgpktsize_low; + pqos_params->avg_pkt_size_high_thresh = avgpktsize_high; + return; +} + +void dhd_sock_qos_get_numpkts_thresh(dhd_info_t *dhd, + unsigned long *numpkts_low, + unsigned long *numpkts_high) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL || numpkts_low == NULL || + numpkts_high == NULL) { + return; + } + + pqos_params = QOS_PARAMS(dhd); + *numpkts_low = pqos_params->num_pkts_low_thresh; + *numpkts_high = pqos_params->num_pkts_high_thresh; +} + +void dhd_sock_qos_set_numpkts_thresh(dhd_info_t *dhd, + unsigned long numpkts_low, + unsigned long numpkts_high) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL) + return; + pqos_params = QOS_PARAMS(dhd); + pqos_params->num_pkts_low_thresh = numpkts_low; + pqos_params->num_pkts_high_thresh = numpkts_high; + return; +} + +void dhd_sock_qos_get_detectcnt_thresh(dhd_info_t *dhd, + unsigned char *detectcnt_inc, + unsigned char *detectcnt_dec) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL || detectcnt_inc == NULL || + detectcnt_dec == NULL) { + return; + } + + pqos_params = QOS_PARAMS(dhd); + *detectcnt_inc = pqos_params->detect_cnt_inc_thresh; + *detectcnt_dec = pqos_params->detect_cnt_dec_thresh; +} + +void dhd_sock_qos_set_detectcnt_thresh(dhd_info_t *dhd, + unsigned char detectcnt_inc, + unsigned char detectcnt_dec) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL) + return; + + pqos_params = QOS_PARAMS(dhd); + pqos_params->detect_cnt_inc_thresh = detectcnt_inc; + pqos_params->detect_cnt_dec_thresh = detectcnt_dec; + return; +} + +int dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t *dhd) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL) + return 0; + + pqos_params = QOS_PARAMS(dhd); + return pqos_params->detect_cnt_upgrade_thresh; +} + +void dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t *dhd, + unsigned char detect_upgrd_thresh) +{ + qos_algo_params_t *pqos_params = NULL; + + if (dhd == NULL) + return; + + pqos_params = QOS_PARAMS(dhd); + pqos_params->detect_cnt_upgrade_thresh = detect_upgrd_thresh; +} + +int dhd_sock_qos_get_maxfl(dhd_info_t *dhd) +{ + if (dhd == NULL) + return 0; + + return dhd->psk_qos->max_sock_fl; +} + +void dhd_sock_qos_set_maxfl(dhd_info_t *dhd, + unsigned int maxfl) +{ + if (dhd == NULL) + return; + + dhd->psk_qos->max_sock_fl = maxfl; +} +/* ================= End of Sysfs interfce support functions ======================== */ + +/* ================= QOS Algorithm ======================== */ + +/* + * Operates on a flow and returns 1 for upgrade and 0 for + * no up-grade - Has the potential of moving into a separate file + * Takes the dhd pointer too in case if it has to access any platform + * functions like MALLOC that takes dhd->pub.osh as argument. + */ +int dhd_qos_algo(dhd_info_t *dhd, qos_stat_t *qos, qos_algo_params_t *pqos_params) +{ + unsigned long tx_bytes, tx_pkts, tx_avg_pkt_size; + + if (!dhd || !qos || !pqos_params) { + return 0; + } + + /* if the user has set the sysfs variable to force upgrade */ + if (atomic_read(&dhd->psk_qos->force_upgrade) == 1) { + return 1; + } + + DHD_TRACE(("%s(): avgpktsize_thrsh %lu:%lu; " + "numpkts_thrs %lu:%lu; detectcnt_thrs %d:%d;" + " detectcnt_upgrd_thrs %d\n", __FUNCTION__, + pqos_params->avg_pkt_size_low_thresh, + pqos_params->avg_pkt_size_high_thresh, + pqos_params->num_pkts_low_thresh, + pqos_params->num_pkts_high_thresh, + pqos_params->detect_cnt_inc_thresh, + pqos_params->detect_cnt_dec_thresh, + pqos_params->detect_cnt_upgrade_thresh)); + + tx_bytes = qos->tx_bytes - qos->tx_bytes_prev; + tx_pkts = qos->tx_pkts - qos->tx_pkts_prev; + if ((tx_bytes == 0) || (tx_pkts == 0)) { + return 0; + } + + tx_avg_pkt_size = tx_bytes / tx_pkts; + + if ((tx_avg_pkt_size > pqos_params->avg_pkt_size_low_thresh) && + (tx_avg_pkt_size < pqos_params->avg_pkt_size_high_thresh) && + (tx_pkts > pqos_params->num_pkts_low_thresh) && + (tx_pkts < pqos_params->num_pkts_high_thresh)) { + if (qos->lowlat_detect_count < pqos_params->detect_cnt_inc_thresh) { + qos->lowlat_detect_count++; + } + } else if (qos->lowlat_detect_count > pqos_params->detect_cnt_dec_thresh) { + qos->lowlat_detect_count--; + } + + if (qos->lowlat_detect_count > pqos_params->detect_cnt_upgrade_thresh) { + qos->lowlat_flow = TRUE; + } else if (qos->lowlat_detect_count == 0) { + qos->lowlat_flow = FALSE; + } + + DHD_TRACE(("%s(): TX:%lu:%lu:%lu, PUBG:%d::%d\n", + __FUNCTION__, tx_avg_pkt_size, tx_bytes, tx_pkts, + qos->lowlat_detect_count, qos->lowlat_flow)); + + return (qos->lowlat_flow == TRUE) ? 1 : 0; +} + +int qos_algo_params_init(qos_algo_params_t *pqos_params) +{ + if (!pqos_params) + return BCME_BADARG; + + memset(pqos_params, 0, sizeof(*pqos_params)); + pqos_params->avg_pkt_size_low_thresh = LOWLAT_AVG_PKT_SIZE_LOW; + pqos_params->avg_pkt_size_high_thresh = LOWLAT_AVG_PKT_SIZE_HIGH; + pqos_params->num_pkts_low_thresh = LOWLAT_NUM_PKTS_LOW; + pqos_params->num_pkts_high_thresh = LOWLAT_NUM_PKTS_HIGH; + pqos_params->detect_cnt_inc_thresh = LOWLAT_DETECT_CNT_INC_THRESH; + pqos_params->detect_cnt_dec_thresh = LOWLAT_DETECT_CNT_DEC_THRESH; + pqos_params->detect_cnt_upgrade_thresh = LOWLAT_DETECT_CNT_UPGRADE_THRESH; + + return BCME_OK; +} +/* ================= End of QOS Algorithm ======================== */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_sock_qos.h b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.h new file mode 100755 index 0000000..641469d --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.h @@ -0,0 +1,118 @@ +/* + * Header file for DHD TPA (Traffic Pattern Analyzer) + * + * Provides type definitions and function prototypes to call into + * DHD's QOS on Socket Flow module. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + */ + +#ifndef _DHD_LINUX_TPA_H_ +#define _DHD_LINUX_TPA_H_ + +struct dhd_sock_flow_info; + +#if defined(DHD_QOS_ON_SOCK_FLOW) +#define QOS_SAMPLING_INTVL_MS 100 +/* Feature Enabed original implementation */ +int dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms); +int dhd_deinit_sock_flows_buf(dhd_info_t *dhd); +void dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb); +void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms); + +/* sysfs call backs */ +unsigned long dhd_sock_qos_get_status(dhd_info_t *dhd); +void dhd_sock_qos_set_status(dhd_info_t *dhd, unsigned long on_off); +ssize_t dhd_sock_qos_show_stats(dhd_info_t *dhd, char *buf, ssize_t sz); +void dhd_sock_qos_clear_stats(dhd_info_t *dhd); +unsigned long dhd_sock_qos_get_force_upgrade(dhd_info_t *dhd); +void dhd_sock_qos_set_force_upgrade(dhd_info_t *dhd, unsigned long force_upgrade); +int dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t *dhd); +void dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t *dhd, int upgrade_thresh); +void dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t *dhd, + unsigned long *avgpktsize_low, + unsigned long *avgpktsize_high); +void dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t *dhd, + unsigned long avgpktsize_low, + unsigned long avgpktsize_high); +void dhd_sock_qos_get_numpkts_thresh(dhd_info_t *dhd, + unsigned long *numpkts_low, + unsigned long *numpkts_high); +void dhd_sock_qos_set_numpkts_thresh(dhd_info_t *dhd, + unsigned long numpkts_low, + unsigned long numpkts_high); +void dhd_sock_qos_get_detectcnt_thresh(dhd_info_t *dhd, + unsigned char *detectcnt_inc, + unsigned char *detectcnt_dec); +void dhd_sock_qos_set_detectcnt_thresh(dhd_info_t *dhd, + unsigned char detectcnt_inc, + unsigned char detectcnt_dec); +int dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t *dhd); +void dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t *dhd, + unsigned char detect_upgrd_thresh); +int dhd_sock_qos_get_maxfl(dhd_info_t *dhd); +void dhd_sock_qos_set_maxfl(dhd_info_t *dhd, unsigned int maxfl); + +/* Update from Bus Layer */ +void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf, + uint32 bus_flow_id); + +#else +/* Feature Disabled dummy implementations */ + +inline int dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms) +{ + BCM_REFERENCE(dhd); + return BCME_UNSUPPORTED; +} + +inline int dhd_deinit_sock_flows_buf(dhd_info_t *dhd) +{ + BCM_REFERENCE(dhd); + return BCME_UNSUPPORTED; +} + +inline void dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb) +{ + BCM_REFERENCE(dhd); + BCM_REFERENCE(skb); + return; +} + +inline void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms) +{ + BCM_REFERENCE(dhd); + BCM_REFERENCE(dhd_watchdog_ms); + return; +} + +inline void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf, + uint32 bus_flow_id) +{ + BCM_REFERENCE(dhd); + BCM_REFERENCE(pktbuf); + BCM_REFERENCE(bus_flow_id); +} +#endif /* End of !DHD_QOS_ON_SOCK_FLOW */ + +#endif /* _DHD_LINUX_TPA_H_ */ diff --git a/bcmdhd.101.10.361.x/dhd_linux_wq.c b/bcmdhd.101.10.361.x/dhd_linux_wq.c new file mode 100755 index 0000000..4bfe72b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_wq.c @@ -0,0 +1,413 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * XXX: always make sure that the size of this structure is aligned to + * the power of 2 (2^n) i.e, if any new variable has to be added then + * modify the padding accordingly + */ +typedef struct dhd_deferred_event { + u8 event; /* holds the event */ + void *event_data; /* holds event specific data */ + event_handler_t event_handler; + unsigned long pad; /* for memory alignment to power of 2 */ +} dhd_deferred_event_t; + +#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t)) + +/* + * work events may occur simultaneously. + * can hold upto 64 low priority events and 16 high priority events + */ +#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE) +#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE) + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) +#define kfifo_avail(fifo) (fifo->size - kfifo_len(fifo)) +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) */ + +#define DHD_FIFO_HAS_FREE_SPACE(fifo) \ + ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE)) +#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \ + ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE)) + +struct dhd_deferred_wq { + struct work_struct deferred_work; /* should be the first member */ + + struct kfifo *prio_fifo; + struct kfifo *work_fifo; + u8 *prio_fifo_buf; + u8 *work_fifo_buf; + spinlock_t work_lock; + void *dhd_info; /* review: does it require */ + u32 event_skip_mask; +}; + +static inline struct kfifo* +dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) +{ + struct kfifo *fifo; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) + fifo = kfifo_init(buf, size, flags, lock); +#else + fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); + if (!fifo) { + return NULL; + } + kfifo_init(fifo, buf, size); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + return fifo; +} + +static inline void +dhd_kfifo_free(struct kfifo *fifo) +{ + kfifo_free(fifo); + kfree(fifo); +} + +/* deferred work functions */ +static void dhd_deferred_work_handler(struct work_struct *data); + +void* +dhd_deferred_work_init(void *dhd_info) +{ + struct dhd_deferred_wq *work = NULL; + u8* buf; + unsigned long fifo_size = 0; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + + if (!dhd_info) { + DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); + goto return_null; + } + + work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), + flags); + if (!work) { + DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__)); + goto return_null; + } + + INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); + + /* initialize event fifo */ + spin_lock_init(&work->work_lock); + + /* allocate buffer to hold prio events */ + fifo_size = DHD_PRIO_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size) ? fifo_size : + roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: prio work fifo allocation failed\n", + __FUNCTION__)); + goto return_null; + } + + /* Initialize prio event fifo */ + work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->prio_fifo) { + kfree(buf); + goto return_null; + } + + /* allocate buffer to hold work events */ + fifo_size = DHD_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size) ? fifo_size : + roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__)); + goto return_null; + } + + /* Initialize event fifo */ + work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->work_fifo) { + kfree(buf); + goto return_null; + } + + work->dhd_info = dhd_info; + work->event_skip_mask = 0; + DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__)); + return work; + +return_null: + if (work) { + dhd_deferred_work_deinit(work); + } + + return NULL; +} + +void +dhd_deferred_work_deinit(void *work) +{ + struct dhd_deferred_wq *deferred_work = work; + + if (!deferred_work) { + DHD_ERROR(("%s: deferred work has been freed already\n", + __FUNCTION__)); + return; + } + + /* cancel the deferred work handling */ + cancel_work_sync((struct work_struct *)deferred_work); + + /* + * free work event fifo. + * kfifo_free frees locally allocated fifo buffer + */ + if (deferred_work->prio_fifo) { + dhd_kfifo_free(deferred_work->prio_fifo); + } + + if (deferred_work->work_fifo) { + dhd_kfifo_free(deferred_work->work_fifo); + } + + kfree(deferred_work); +} + +/* select kfifo according to priority */ +static inline struct kfifo * +dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq, + u8 priority) +{ + if (priority == DHD_WQ_WORK_PRIORITY_HIGH) { + return deferred_wq->prio_fifo; + } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) { + return deferred_wq->work_fifo; + } else { + return NULL; + } +} + +/* + * Prepares event to be queued + * Schedules the event + */ +int +dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t event_handler, u8 priority) +{ + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq; + struct kfifo *fifo; + dhd_deferred_event_t deferred_event; + int bytes_copied = 0; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + ASSERT(0); + return DHD_WQ_STS_UNINITIALIZED; + } + + if (!event || (event >= DHD_MAX_WQ_EVENTS)) { + DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__, + event)); + return DHD_WQ_STS_UNKNOWN_EVENT; + } + + if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) { + DHD_ERROR(("%s: unknown priority, priority=%d\n", + __FUNCTION__, priority)); + return DHD_WQ_STS_UNKNOWN_PRIORITY; + } + + if ((deferred_wq->event_skip_mask & (1 << event))) { + DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n", + __FUNCTION__, deferred_wq->event_skip_mask)); + return DHD_WQ_STS_EVENT_SKIPPED; + } + + /* + * default element size is 1, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + deferred_event.event = event; + deferred_event.event_data = event_data; + deferred_event.event_handler = event_handler; + + fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority); + if (DHD_FIFO_HAS_FREE_SPACE(fifo)) { + bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + if (bytes_copied != DEFRD_EVT_SIZE) { + DHD_ERROR(("%s: failed to schedule deferred work, " + "priority=%d, bytes_copied=%d\n", __FUNCTION__, + priority, bytes_copied)); + return DHD_WQ_STS_SCHED_FAILED; + } + schedule_work((struct work_struct *)deferred_wq); + return DHD_WQ_STS_OK; +} + +static bool +dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, + dhd_deferred_event_t *event) +{ + int bytes_copied = 0; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + return DHD_WQ_STS_UNINITIALIZED; + } + + /* + * default element size is 1 byte, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + /* handle priority work */ + if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) { + bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo, + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + /* handle normal work if priority work doesn't have enough data */ + if ((bytes_copied != DEFRD_EVT_SIZE) && + DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) { + bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo, + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + return (bytes_copied == DEFRD_EVT_SIZE); +} + +static inline void +dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event) +{ + if (!work_event) { + DHD_ERROR(("%s: work_event is null\n", __FUNCTION__)); + return; + } + + DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__, + work_event->event)); + DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__, + work_event->event_data)); + DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__, + work_event->event_handler)); +} + +/* + * Called when work is scheduled + */ +static void +dhd_deferred_work_handler(struct work_struct *work) +{ + struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; + dhd_deferred_event_t work_event; + + if (!deferred_work) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + return; + } + + do { + if (!dhd_get_scheduled_work(deferred_work, &work_event)) { + DHD_TRACE(("%s: no event to handle\n", __FUNCTION__)); + break; + } + + if (work_event.event >= DHD_MAX_WQ_EVENTS) { + DHD_ERROR(("%s: unknown event\n", __FUNCTION__)); + dhd_deferred_dump_work_event(&work_event); + ASSERT(work_event.event < DHD_MAX_WQ_EVENTS); + continue; + } + + /* + * XXX: don't do NULL check for 'work_event.event_data' + * as for some events like DHD_WQ_WORK_DHD_LOG_DUMP the + * event data is always NULL even though rest of the + * event parameters are valid + */ + + if (work_event.event_handler) { + work_event.event_handler(deferred_work->dhd_info, + work_event.event_data, work_event.event); + } else { + DHD_ERROR(("%s: event handler is null\n", + __FUNCTION__)); + dhd_deferred_dump_work_event(&work_event); + ASSERT(work_event.event_handler != NULL); + } + } while (1); + + return; +} + +void +dhd_deferred_work_set_skip(void *work, u8 event, bool set) +{ + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work; + + if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) { + DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__)); + return; + } + + if (set) { + /* Set */ + deferred_wq->event_skip_mask |= (1 << event); + } else { + /* Clear */ + deferred_wq->event_skip_mask &= ~(1 << event); + } +} diff --git a/bcmdhd.101.10.361.x/dhd_linux_wq.h b/bcmdhd.101.10.361.x/dhd_linux_wq.h new file mode 100755 index 0000000..42c5a88 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_linux_wq.h @@ -0,0 +1,89 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ +#ifndef _dhd_linux_wq_h_ +#define _dhd_linux_wq_h_ +/* + * Work event definitions + */ +enum _wq_event { + DHD_WQ_WORK_IF_ADD = 1, + DHD_WQ_WORK_IF_DEL, + DHD_WQ_WORK_SET_MAC, + DHD_WQ_WORK_SET_MCAST_LIST, + DHD_WQ_WORK_IPV6_NDO, + DHD_WQ_WORK_HANG_MSG, + DHD_WQ_WORK_DHD_LOG_DUMP, + DHD_WQ_WORK_PKTLOG_DUMP, + DHD_WQ_WORK_INFORM_DHD_MON, + DHD_WQ_WORK_EVENT_LOGTRACE, + DHD_WQ_WORK_DMA_LB_MEM_REL, + DHD_WQ_WORK_NATOE_EVENT, + DHD_WQ_WORK_NATOE_IOCTL, + DHD_WQ_WORK_MACDBG, + DHD_WQ_WORK_DEBUG_UART_DUMP, + DHD_WQ_WORK_GET_BIGDATA_AP, + DHD_WQ_WORK_SOC_RAM_DUMP, + DHD_WQ_WORK_SOC_RAM_COLLECT, +#ifdef DHD_ERPOM + DHD_WQ_WORK_ERROR_RECOVERY, +#endif /* DHD_ERPOM */ + DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH, + DHD_WQ_WORK_AXI_ERROR_DUMP, + DHD_WQ_WORK_CTO_RECOVERY, +#ifdef DHD_UPDATE_INTF_MAC + DHD_WQ_WORK_IF_UPDATE, +#endif /* DHD_UPDATE_INTF_MAC */ + DHD_MAX_WQ_EVENTS +}; + +/* + * Work event priority + */ +enum wq_priority { + DHD_WQ_WORK_PRIORITY_LOW = 1, + DHD_WQ_WORK_PRIORITY_HIGH, + DHD_WQ_MAX_PRIORITY +}; + +/* + * Error definitions + */ +#define DHD_WQ_STS_OK 0 +#define DHD_WQ_STS_FAILED -1 /* General failure */ +#define DHD_WQ_STS_UNINITIALIZED -2 +#define DHD_WQ_STS_SCHED_FAILED -3 +#define DHD_WQ_STS_UNKNOWN_EVENT -4 +#define DHD_WQ_STS_UNKNOWN_PRIORITY -5 +#define DHD_WQ_STS_EVENT_SKIPPED -6 + +typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); + +void *dhd_deferred_work_init(void *dhd); +void dhd_deferred_work_deinit(void *workq); +int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t evt_handler, u8 priority); +void dhd_deferred_work_set_skip(void *work, u8 event, bool set); +#endif /* _dhd_linux_wq_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_macdbg.c b/bcmdhd.101.10.361.x/dhd_macdbg.c new file mode 100755 index 0000000..dd145df --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_macdbg.c @@ -0,0 +1,746 @@ +/* D11 macdbg functions for Broadcom 802.11abgn + * Networking Adapter Device Drivers. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + * + * $Id: dhd_macdbg.c 670412 2016-11-15 20:01:18Z shinuk $ + */ + +#ifdef BCMDBG +#include +#include + +#include +#include +#include +#include "d11reglist_proto.h" +#include "dhdioctl.h" +#include + +#ifdef BCMDBUS +#include +#define BUS_IOVAR_OP(a, b, c, d, e, f, g) dbus_iovar_op(a->dbus, b, c, d, e, f, g) +#else +#include +#define BUS_IOVAR_OP dhd_bus_iovar_op +#endif + +typedef struct _macdbg_info_t { + dhd_pub_t *dhdp; + d11regs_list_t *pd11regs; + uint16 d11regs_sz; + d11regs_list_t *pd11regs_x; + uint16 d11regsx_sz; + svmp_list_t *psvmpmems; + uint16 svmpmems_sz; +} macdbg_info_t; + +#define SVMPLIST_HARDCODE + +int +dhd_macdbg_attach(dhd_pub_t *dhdp) +{ + macdbg_info_t *macdbg_info = MALLOCZ(dhdp->osh, sizeof(*macdbg_info)); +#ifdef SVMPLIST_HARDCODE + svmp_list_t svmpmems[] = { + {0x20000, 256}, + {0x21e10, 16}, + {0x20300, 16}, + {0x20700, 16}, + {0x20b00, 16}, + {0x20be0, 16}, + {0x20bff, 16}, + {0xc000, 32}, + {0xe000, 32}, + {0x10000, 0x8000}, + {0x18000, 0x8000} + }; +#endif /* SVMPLIST_HARDCODE */ + + if (macdbg_info == NULL) { + return BCME_NOMEM; + } + dhdp->macdbg_info = macdbg_info; + macdbg_info->dhdp = dhdp; + +#ifdef SVMPLIST_HARDCODE + macdbg_info->psvmpmems = MALLOCZ(dhdp->osh, sizeof(svmpmems)); + if (macdbg_info->psvmpmems == NULL) { + return BCME_NOMEM; + } + + macdbg_info->svmpmems_sz = ARRAYSIZE(svmpmems); + memcpy(macdbg_info->psvmpmems, svmpmems, sizeof(svmpmems)); + + DHD_ERROR(("%s: psvmpmems %p svmpmems_sz %d\n", + __FUNCTION__, macdbg_info->psvmpmems, macdbg_info->svmpmems_sz)); +#endif + return BCME_OK; +} + +void +dhd_macdbg_detach(dhd_pub_t *dhdp) +{ + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + ASSERT(macdbg_info); + + if (macdbg_info->pd11regs) { + ASSERT(macdbg_info->d11regs_sz > 0); + MFREE(dhdp->osh, macdbg_info->pd11regs, + (macdbg_info->d11regs_sz * sizeof(macdbg_info->pd11regs[0]))); + macdbg_info->d11regs_sz = 0; + } + if (macdbg_info->pd11regs_x) { + ASSERT(macdbg_info->d11regsx_sz > 0); + MFREE(dhdp->osh, macdbg_info->pd11regs_x, + (macdbg_info->d11regsx_sz * sizeof(macdbg_info->pd11regs_x[0]))); + macdbg_info->d11regsx_sz = 0; + } + if (macdbg_info->psvmpmems) { + ASSERT(macdbg_info->svmpmems_sz > 0); + MFREE(dhdp->osh, macdbg_info->psvmpmems, + (macdbg_info->svmpmems_sz * sizeof(macdbg_info->psvmpmems[0]))); + macdbg_info->svmpmems_sz = 0; + } + MFREE(dhdp->osh, macdbg_info, sizeof(*macdbg_info)); +} + +void +dhd_macdbg_event_handler(dhd_pub_t *dhdp, uint32 reason, + uint8 *event_data, uint32 datalen) +{ + d11regs_list_t *pd11regs; + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + uint d11regs_sz; + + DHD_TRACE(("%s: reason %d datalen %d\n", __FUNCTION__, reason, datalen)); + switch (reason) { + case WLC_E_MACDBG_LIST_PSMX: + /* Fall through */ + case WLC_E_MACDBG_LIST_PSM: + pd11regs = MALLOCZ(dhdp->osh, datalen); + if (pd11regs == NULL) { + DHD_ERROR(("%s: NOMEM for len %d\n", __FUNCTION__, datalen)); + return; + } + memcpy(pd11regs, event_data, datalen); + d11regs_sz = datalen / sizeof(pd11regs[0]); + DHD_ERROR(("%s: d11regs %p d11regs_sz %d\n", + __FUNCTION__, pd11regs, d11regs_sz)); + if (reason == WLC_E_MACDBG_LIST_PSM) { + macdbg_info->pd11regs = pd11regs; + macdbg_info->d11regs_sz = (uint16)d11regs_sz; + } else { + macdbg_info->pd11regs_x = pd11regs; + macdbg_info->d11regsx_sz = (uint16)d11regs_sz; + } + break; + case WLC_E_MACDBG_REGALL: +#ifdef LINUX + /* Schedule to work queue as this context could be ISR */ + dhd_schedule_macdbg_dump(dhdp); +#else + /* Dump PSMr */ + (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, FALSE); + /* Dump PSMx */ + (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, TRUE); + /* Dump SVMP mems */ + (void) dhd_macdbg_dumpsvmp(dhdp, NULL, 0, NULL); +#endif + break; + default: + DHD_ERROR(("%s: Unknown reason %d\n", + __FUNCTION__, reason)); + } + return; +} + +static uint16 +_dhd_get_ihr16(macdbg_info_t *macdbg_info, uint16 addr, struct bcmstrbuf *b, bool verbose) +{ + sdreg_t sdreg; + uint16 val; + + sdreg.func = 2; + sdreg.offset = (0x1000 | addr); + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET); + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: IHR16: read 0x%08x, size 2, value 0x%04x\n", + (addr + 0x18001000), val); + } else { + printf("DEBUG: IHR16: read 0x%08x, size 2, value 0x%04x\n", + (addr + 0x18001000), val); + } + } + return val; +} + +static uint32 +_dhd_get_ihr32(macdbg_info_t *macdbg_info, uint16 addr, struct bcmstrbuf *b, bool verbose) +{ + sdreg_t sdreg; + uint32 val; + + sdreg.func = 4; + sdreg.offset = (0x1000 | addr); + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET); + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: IHR32: read 0x%08x, size 4, value 0x%08x\n", + (addr + 0x18001000), val); + } else { + printf("DEBUG: IHR32: read 0x%08x, size 4, value 0x%08x\n", + (addr + 0x18001000), val); + } + } + return val; +} + +static void +_dhd_set_ihr16(macdbg_info_t *macdbg_info, uint16 addr, uint16 val, + struct bcmstrbuf *b, bool verbose) +{ + sdreg_t sdreg; + + sdreg.func = 2; + sdreg.offset = (0x1000 | addr); + sdreg.value = val; + + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: IHR16: write 0x%08x, size 2, value 0x%04x\n", + (addr + 0x18001000), val); + } else { + printf("DEBUG: IHR16: write 0x%08x, size 2, value 0x%04x\n", + (addr + 0x18001000), val); + } + } + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + NULL, 0, &sdreg, sizeof(sdreg), IOV_SET); +} + +static void +_dhd_set_ihr32(macdbg_info_t *macdbg_info, uint16 addr, uint32 val, + struct bcmstrbuf *b, bool verbose) +{ + sdreg_t sdreg; + + sdreg.func = 4; + sdreg.offset = (0x1000 | addr); + sdreg.value = val; + + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: IHR32: write 0x%08x, size 4, value 0x%08x\n", + (addr + 0x18001000), val); + } else { + printf("DEBUG: IHR32: write 0x%08x, size 4, value 0x%08x\n", + (addr + 0x18001000), val); + } + } + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + NULL, 0, &sdreg, sizeof(sdreg), IOV_SET); +} + +static uint32 +_dhd_get_d11obj32(macdbg_info_t *macdbg_info, uint16 objaddr, uint32 sel, + struct bcmstrbuf *b, bool verbose) +{ + uint32 val; + sdreg_t sdreg; + sdreg.func = 4; // 4bytes by default. + sdreg.offset = 0x1160; + + if (objaddr == 0xffff) { + if (verbose) { + goto objaddr_read; + } else { + goto objdata_read; + } + } + + if (objaddr & 0x3) { + printf("%s: ERROR! Invalid addr 0x%x\n", __FUNCTION__, objaddr); + } + + sdreg.value = (sel | (objaddr >> 2)); + + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: %s: Indirect: write 0x%08x, size %d, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + (sdreg.offset + 0x18000000), sdreg.func, sdreg.value); + } else { + printf("DEBUG: %s: Indirect: write 0x%08x, size %d, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + (sdreg.offset + 0x18000000), sdreg.func, sdreg.value); + } + } + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + NULL, 0, &sdreg, sizeof(sdreg), IOV_SET); + +objaddr_read: + /* Give some time to obj addr register */ + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET); + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + (sdreg.offset + 0x18000000), sdreg.func, val); + } else { + printf("DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + (sdreg.offset + 0x18000000), sdreg.func, val); + } + } + +objdata_read: + sdreg.offset = 0x1164; + BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg", + &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET); + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%04x\n", + (sel & 0x00020000) ? "SCR":"SHM", + (sdreg.offset + 0x18000000), sdreg.func, val); + } else { + printf("DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%04x\n", + (sel & 0x00020000) ? "SCR":"SHM", + (sdreg.offset + 0x18000000), sdreg.func, val); + } + } + return val; +} + +static uint16 +_dhd_get_d11obj16(macdbg_info_t *macdbg_info, uint16 objaddr, + uint32 sel, d11obj_cache_t *obj_cache, struct bcmstrbuf *b, bool verbose) +{ + uint32 val; + if (obj_cache && obj_cache->cache_valid && ((obj_cache->sel ^ sel) & (0xffffff)) == 0) { + if (obj_cache->addr32 == (objaddr & ~0x3)) { + /* XXX: Same objaddr read as the previous one */ + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: %s: Read cache value: " + "addr32 0x%04x, sel 0x%08x, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + obj_cache->addr32, obj_cache->sel, obj_cache->val); + } else { + printf("DEBUG: %s: Read cache value: " + "addr32 0x%04x, sel 0x%08x, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + obj_cache->addr32, obj_cache->sel, obj_cache->val); + } + } + val = obj_cache->val; + goto exit; + } else if ((obj_cache->sel & 0x02000000) && + (obj_cache->addr32 + 4 == (objaddr & ~0x3))) { + /* XXX: objaddr is auto incrementing, so just read objdata */ + if (verbose) { + if (b) { + bcm_bprintf(b, "DEBUG: %s: Read objdata only: " + "addr32 0x%04x, sel 0x%08x, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + obj_cache->addr32, obj_cache->sel, obj_cache->val); + } else { + printf("DEBUG: %s: Read objdata only: " + "addr32 0x%04x, sel 0x%08x, value 0x%08x\n", + (sel & 0x00020000) ? "SCR":"SHM", + obj_cache->addr32, obj_cache->sel, obj_cache->val); + } + } + val = _dhd_get_d11obj32(macdbg_info, 0xffff, sel, b, verbose); + goto exit; + } + } + val = _dhd_get_d11obj32(macdbg_info, (objaddr & ~0x2), sel, b, verbose); +exit: + if (obj_cache) { + obj_cache->addr32 = (objaddr & ~0x3); + obj_cache->sel = sel; + obj_cache->val = val; + obj_cache->cache_valid = TRUE; + } + return (uint16)((objaddr & 0x2) ? (val >> 16) : val); +} + +static int +_dhd_print_d11reg(macdbg_info_t *macdbg_info, int idx, int type, uint16 addr, struct bcmstrbuf *b, + d11obj_cache_t *obj_cache, bool verbose) +{ + const char *regname[D11REG_TYPE_MAX] = D11REGTYPENAME; + uint32 val; + + if (type == D11REG_TYPE_IHR32) { + if ((addr & 0x3)) { + printf("%s: ERROR! Invalid addr 0x%x\n", __FUNCTION__, addr); + addr &= ~0x3; + } + val = _dhd_get_ihr32(macdbg_info, addr, b, verbose); + if (b) { + bcm_bprintf(b, "%-3d %s 0x%-4x = 0x%-8x\n", + idx, regname[type], addr, val); + } else { + printf("%-3d %s 0x%-4x = 0x%-8x\n", + idx, regname[type], addr, val); + } + } else { + switch (type) { + case D11REG_TYPE_IHR16: { + if ((addr & 0x1)) { + printf("%s: ERROR! Invalid addr 0x%x\n", __FUNCTION__, addr); + addr &= ~0x1; + } + val = _dhd_get_ihr16(macdbg_info, addr, b, verbose); + break; + } + case D11REG_TYPE_IHRX16: + val = _dhd_get_d11obj16(macdbg_info, (addr - 0x400) << 1, 0x020b0000, + obj_cache, b, verbose); + break; + case D11REG_TYPE_SCR: + val = _dhd_get_d11obj16(macdbg_info, addr << 2, 0x02020000, + obj_cache, b, verbose); + break; + case D11REG_TYPE_SCRX: + val = _dhd_get_d11obj16(macdbg_info, addr << 2, 0x020a0000, + obj_cache, b, verbose); + break; + case D11REG_TYPE_SHM: + val = _dhd_get_d11obj16(macdbg_info, addr, 0x02010000, + obj_cache, b, verbose); + break; + case D11REG_TYPE_SHMX: + val = _dhd_get_d11obj16(macdbg_info, addr, 0x02090000, + obj_cache, b, verbose); + break; + default: + printf("Unrecognized type %d!\n", type); + return 0; + } + if (b) { + bcm_bprintf(b, "%-3d %s 0x%-4x = 0x%-4x\n", + idx, regname[type], addr, val); + } else { + printf("%-3d %s 0x%-4x = 0x%-4x\n", + idx, regname[type], addr, val); + } + } + return 1; +} + +static int +_dhd_print_d11regs(macdbg_info_t *macdbg_info, d11regs_list_t *pregs, + int start_idx, struct bcmstrbuf *b, bool verbose) +{ + uint16 addr; + int idx = 0; + d11obj_cache_t obj_cache = {0, 0, 0, FALSE}; + + addr = pregs->addr; + if (pregs->type >= D11REG_TYPE_MAX) { + printf("%s: wrong type %d\n", __FUNCTION__, pregs->type); + return 0; + } + if (pregs->bitmap) { + while (pregs->bitmap) { + if (pregs->bitmap && (pregs->bitmap & 0x1)) { + _dhd_print_d11reg(macdbg_info, (idx + start_idx), pregs->type, + addr, b, &obj_cache, verbose); + idx++; + } + pregs->bitmap = pregs->bitmap >> 1; + addr += pregs->step; + } + } else { + for (; idx < pregs->cnt; idx++) { + _dhd_print_d11reg(macdbg_info, (idx + start_idx), pregs->type, + addr, b, &obj_cache, verbose); + addr += pregs->step; + } + } + return idx; +} + +static int +_dhd_pd11regs_bylist(macdbg_info_t *macdbg_info, d11regs_list_t *reglist, + uint16 reglist_sz, struct bcmstrbuf *b) +{ + uint i, idx = 0; + + if (reglist != NULL && reglist_sz > 0) { + for (i = 0; i < reglist_sz; i++) { + DHD_TRACE(("%s %d %p %d\n", __FUNCTION__, __LINE__, + ®list[i], reglist_sz)); + idx += _dhd_print_d11regs(macdbg_info, ®list[i], idx, b, FALSE); + } + } + return idx; +} + +int +dhd_macdbg_dumpmac(dhd_pub_t *dhdp, char *buf, int buflen, + int *outbuflen, bool dump_x) +{ + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + struct bcmstrbuf *b = NULL; + struct bcmstrbuf bcmstrbuf; + uint cnt = 0; + + DHD_TRACE(("%s %d %p %d %p %d %p %d\n", __FUNCTION__, __LINE__, + buf, buflen, macdbg_info->pd11regs, macdbg_info->d11regs_sz, + macdbg_info->pd11regs_x, macdbg_info->d11regsx_sz)); + + if (buf && buflen > 0) { + bcm_binit(&bcmstrbuf, buf, buflen); + b = &bcmstrbuf; + } + if (!dump_x) { + /* Dump PSMr */ + cnt += _dhd_pd11regs_bylist(macdbg_info, macdbg_info->pd11regs, + macdbg_info->d11regs_sz, b); + } else { + /* Dump PSMx */ + cnt += _dhd_pd11regs_bylist(macdbg_info, macdbg_info->pd11regs_x, + macdbg_info->d11regsx_sz, b); + } + + if (b && outbuflen) { + if ((uint)buflen > BCMSTRBUF_LEN(b)) { + *outbuflen = buflen - BCMSTRBUF_LEN(b); + } else { + DHD_ERROR(("%s: buflen insufficient!\n", __FUNCTION__)); + *outbuflen = buflen; + /* Do not return buftooshort to allow printing macregs we have got */ + } + } + + return ((cnt > 0) ? BCME_OK : BCME_UNSUPPORTED); +} + +int +dhd_macdbg_pd11regs(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen) +{ + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + dhd_pd11regs_param *pd11regs = (void *)params; + dhd_pd11regs_buf *pd11regs_buf = (void *)buf; + uint16 start_idx; + bool verbose; + d11regs_list_t reglist; + struct bcmstrbuf *b = NULL; + struct bcmstrbuf bcmstrbuf; + + start_idx = pd11regs->start_idx; + verbose = pd11regs->verbose; + memcpy(®list, pd11regs->plist, sizeof(reglist)); + memset(buf, '\0', buflen); + bcm_binit(&bcmstrbuf, (char *)(pd11regs_buf->pbuf), + (buflen - OFFSETOF(dhd_pd11regs_buf, pbuf))); + b = &bcmstrbuf; + pd11regs_buf->idx = (uint16)_dhd_print_d11regs(macdbg_info, ®list, + start_idx, b, verbose); + + return ((pd11regs_buf->idx > 0) ? BCME_OK : BCME_ERROR); +} + +int +dhd_macdbg_reglist(dhd_pub_t *dhdp, char *buf, int buflen) +{ + int err, desc_idx = 0; + dhd_maclist_t *maclist = (dhd_maclist_t *)buf; + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + void *xtlvbuf_p = maclist->plist; + uint16 xtlvbuflen = (uint16)buflen; + xtlv_desc_t xtlv_desc[] = { + {0, 0, NULL}, + {0, 0, NULL}, + {0, 0, NULL}, + {0, 0, NULL} + }; + + if (!macdbg_info->pd11regs) { + err = BCME_NOTFOUND; + goto exit; + } + ASSERT(macdbg_info->d11regs_sz > 0); + xtlv_desc[desc_idx].type = DHD_MACLIST_XTLV_R; + xtlv_desc[desc_idx].len = + macdbg_info->d11regs_sz * (uint16)sizeof(*(macdbg_info->pd11regs)); + xtlv_desc[desc_idx].ptr = macdbg_info->pd11regs; + desc_idx++; + + if (macdbg_info->pd11regs_x) { + ASSERT(macdbg_info->d11regsx_sz); + xtlv_desc[desc_idx].type = DHD_MACLIST_XTLV_X; + xtlv_desc[desc_idx].len = macdbg_info->d11regsx_sz * + (uint16)sizeof(*(macdbg_info->pd11regs_x)); + xtlv_desc[desc_idx].ptr = macdbg_info->pd11regs_x; + desc_idx++; + } + + if (macdbg_info->psvmpmems) { + ASSERT(macdbg_info->svmpmems_sz); + xtlv_desc[desc_idx].type = DHD_SVMPLIST_XTLV; + xtlv_desc[desc_idx].len = macdbg_info->svmpmems_sz * + (uint16)sizeof(*(macdbg_info->psvmpmems)); + xtlv_desc[desc_idx].ptr = macdbg_info->psvmpmems; + desc_idx++; + } + + err = bcm_pack_xtlv_buf_from_mem((uint8 **)&xtlvbuf_p, &xtlvbuflen, + xtlv_desc, BCM_XTLV_OPTION_ALIGN32); + + maclist->version = 0; /* No version control for now anyway */ + maclist->bytes_len = (buflen - xtlvbuflen); + +exit: + return err; +} + +static int +_dhd_print_svmps(macdbg_info_t *macdbg_info, svmp_list_t *psvmp, + int start_idx, struct bcmstrbuf *b, bool verbose) +{ + int idx; + uint32 addr, mem_id, offset, prev_mem_id, prev_offset; + uint16 cnt, val; + + BCM_REFERENCE(start_idx); + + /* Set tbl ID and tbl offset. */ + _dhd_set_ihr32(macdbg_info, 0x3fc, 0x30000d, b, verbose); + _dhd_set_ihr32(macdbg_info, 0x3fc, 0x8000000e, b, verbose); + + addr = psvmp->addr; + cnt = psvmp->cnt; + + /* In validate previous mem_id and offset */ + prev_mem_id = (uint32)(-1); + prev_offset = (uint32)(-1); + + for (idx = 0; idx < cnt; idx++, addr++) { + mem_id = (addr >> 15); + offset = (addr & 0x7fff) >> 1; + + if (mem_id != prev_mem_id) { + /* Set mem_id */ + _dhd_set_ihr32(macdbg_info, 0x3fc, ((mem_id & 0xffff0000) | 0x10), + b, verbose); + _dhd_set_ihr32(macdbg_info, 0x3fc, ((mem_id << 16) | 0xf), + b, verbose); + } + + if (offset != prev_offset) { + /* XXX: Is this needed? + * _dhd_set_ihr32(macdbg_info, 0x3fc, 0x30000d, b, verbose); + */ + /* svmp offset */ + _dhd_set_ihr32(macdbg_info, 0x3fc, ((offset << 16) | 0xe), + b, verbose); + } + /* Read hi or lo */ + _dhd_set_ihr16(macdbg_info, 0x3fc, ((addr & 0x1) ? 0x10 : 0xf), b, verbose); + val = _dhd_get_ihr16(macdbg_info, 0x3fe, b, verbose); + if (b) { + bcm_bprintf(b, "0x%-4x 0x%-4x\n", + addr, val); + + } else { + printf("0x%-4x 0x%-4x\n", + addr, val); + } + prev_mem_id = mem_id; + prev_offset = offset; + } + return idx; +} + +static int +_dhd_psvmps_bylist(macdbg_info_t *macdbg_info, svmp_list_t *svmplist, + uint16 svmplist_sz, struct bcmstrbuf *b) +{ + uint i, idx = 0; + + if (svmplist != NULL && svmplist_sz > 0) { + for (i = 0; i < svmplist_sz; i++) { + DHD_TRACE(("%s %d %p %d\n", __FUNCTION__, __LINE__, + &svmplist[i], svmplist_sz)); + idx += _dhd_print_svmps(macdbg_info, &svmplist[i], idx, b, FALSE); + } + } + return idx; +} + +int +dhd_macdbg_dumpsvmp(dhd_pub_t *dhdp, char *buf, int buflen, + int *outbuflen) +{ + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + struct bcmstrbuf *b = NULL; + struct bcmstrbuf bcmstrbuf; + uint cnt = 0; + + DHD_TRACE(("%s %d %p %d %p %d\n", __FUNCTION__, __LINE__, + buf, buflen, macdbg_info->psvmpmems, macdbg_info->svmpmems_sz)); + + if (buf && buflen > 0) { + bcm_binit(&bcmstrbuf, buf, buflen); + b = &bcmstrbuf; + } + cnt = _dhd_psvmps_bylist(macdbg_info, macdbg_info->psvmpmems, + macdbg_info->svmpmems_sz, b); + + if (b && outbuflen) { + if ((uint)buflen > BCMSTRBUF_LEN(b)) { + *outbuflen = buflen - BCMSTRBUF_LEN(b); + } else { + DHD_ERROR(("%s: buflen insufficient!\n", __FUNCTION__)); + *outbuflen = buflen; + /* Do not return buftooshort to allow printing macregs we have got */ + } + } + + return ((cnt > 0) ? BCME_OK : BCME_UNSUPPORTED); +} + +int +dhd_macdbg_psvmpmems(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen) +{ + macdbg_info_t *macdbg_info = dhdp->macdbg_info; + dhd_pd11regs_param *pd11regs = (void *)params; + dhd_pd11regs_buf *pd11regs_buf = (void *)buf; + uint16 start_idx; + bool verbose; + svmp_list_t reglist; + struct bcmstrbuf *b = NULL; + struct bcmstrbuf bcmstrbuf; + + start_idx = pd11regs->start_idx; + verbose = pd11regs->verbose; + memcpy(®list, pd11regs->plist, sizeof(reglist)); + memset(buf, '\0', buflen); + bcm_binit(&bcmstrbuf, (char *)(pd11regs_buf->pbuf), + (buflen - OFFSETOF(dhd_pd11regs_buf, pbuf))); + b = &bcmstrbuf; + pd11regs_buf->idx = (uint16)_dhd_print_svmps(macdbg_info, ®list, + start_idx, b, verbose); + + return ((pd11regs_buf->idx > 0) ? BCME_OK : BCME_ERROR); +} + +#endif /* BCMDBG */ diff --git a/bcmdhd.101.10.361.x/dhd_macdbg.h b/bcmdhd.101.10.361.x/dhd_macdbg.h new file mode 100755 index 0000000..2175137 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_macdbg.h @@ -0,0 +1,34 @@ +/* D11 macdbg function prototypes for Broadcom 802.11abgn + * Networking Adapter Device Drivers. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + * + * $Id: dhd_macdbg.h 649388 2016-07-15 22:54:42Z shinuk $ + */ + +#ifndef _dhd_macdbg_h_ +#define _dhd_macdbg_h_ +#ifdef BCMDBG +#include +#include + +extern int dhd_macdbg_attach(dhd_pub_t *dhdp); +extern void dhd_macdbg_detach(dhd_pub_t *dhdp); +extern void dhd_macdbg_event_handler(dhd_pub_t *dhdp, uint32 reason, + uint8 *event_data, uint32 datalen); +extern int dhd_macdbg_dumpmac(dhd_pub_t *dhdp, char *buf, int buflen, int *outbuflen, bool dump_x); +extern int dhd_macdbg_pd11regs(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen); +extern int dhd_macdbg_reglist(dhd_pub_t *dhdp, char *buf, int buflen); +extern int dhd_macdbg_dumpsvmp(dhd_pub_t *dhdp, char *buf, int buflen, int *outbuflen); +extern int dhd_macdbg_psvmpmems(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen); +#endif /* BCMDBG */ +#endif /* _dhd_macdbg_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_mschdbg.c b/bcmdhd.101.10.361.x/dhd_mschdbg.c new file mode 100755 index 0000000..5865b6d --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_mschdbg.c @@ -0,0 +1,796 @@ +/* + * DHD debugability support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: dhd_mschdbg.c 639872 2016-05-25 05:39:30Z sjadhav $ + */ +#ifdef SHOW_LOGTRACE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static const char *head_log = ""; +#define MSCH_EVENT_HEAD(space) \ + do { \ + MSCH_EVENT(("%s_E: ", head_log)); \ + if (space > 0) { \ + int ii; \ + for (ii = 0; ii < space; ii += 4) MSCH_EVENT((" ")); \ + } \ + } while (0) + +#ifdef DHD_EFI +#define MSCH_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_FW args; \ + } \ +} while (0) +#else +#define MSCH_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#endif /* DHD_EFI */ + +static uint64 solt_start_time[4], req_start_time[4], profiler_start_time[4]; +static uint32 solt_chanspec[4] = {0, }, req_start[4] = {0, }; +static bool lastMessages = FALSE; + +#define US_PRE_SEC 1000000 +#define DATA_UNIT_FOR_LOG_CNT 4 + +static void dhd_mschdbg_us_to_sec(uint32 time_h, uint32 time_l, uint32 *sec, uint32 *remain) +{ + uint64 cur_time = ((uint64)(ntoh32(time_h)) << 32) | ntoh32(time_l); + uint64 r, u = 0; + + r = cur_time; + while (time_h != 0) { + u += (uint64)((0xffffffff / US_PRE_SEC)) * time_h; + r = cur_time - u * US_PRE_SEC; + time_h = (uint32)(r >> 32); + } + + *sec = (uint32)(u + ((uint32)(r) / US_PRE_SEC)); + *remain = (uint32)(r) % US_PRE_SEC; +} + +static char *dhd_mschdbg_display_time(uint32 time_h, uint32 time_l) +{ + static char display_time[32]; + uint32 s, ss; + + if (time_h == 0xffffffff && time_l == 0xffffffff) { + snprintf(display_time, 31, "-1"); + } else { + dhd_mschdbg_us_to_sec(time_h, time_l, &s, &ss); + snprintf(display_time, 31, "%d.%06d", s, ss); + } + return display_time; +} + +static void +dhd_mschdbg_chanspec_list(int sp, char *data, uint16 ptr, uint16 chanspec_cnt) +{ + int i, cnt = (int)ntoh16(chanspec_cnt); + uint16 *chanspec_list = (uint16 *)(data + ntoh16(ptr)); + char buf[CHANSPEC_STR_LEN]; + chanspec_t c; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((":")); + for (i = 0; i < cnt; i++) { + c = (chanspec_t)ntoh16(chanspec_list[i]); + MSCH_EVENT((" %s", wf_chspec_ntoa(c, buf))); + } + MSCH_EVENT(("\n")); +} + +static void +dhd_mschdbg_elem_list(int sp, char *title, char *data, uint16 ptr, uint16 list_cnt) +{ + int i, cnt = (int)ntoh16(list_cnt); + uint32 *list = (uint32 *)(data + ntoh16(ptr)); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("%s_list: ", title)); + for (i = 0; i < cnt; i++) { + MSCH_EVENT(("0x%08x->", ntoh32(list[i]))); + } + MSCH_EVENT(("null\n")); +} + +static void +dhd_mschdbg_req_param_profiler_event_data(int sp, int ver, char *data, uint16 ptr) +{ + int sn = sp + 4; + msch_req_param_profiler_event_data_t *p = + (msch_req_param_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 type, flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_type: ")); + + type = p->req_type; + if (type < 4) { + char *req_type[] = {"fixed", "start-flexible", "duration-flexible", + "both-flexible"}; + MSCH_EVENT(("%s", req_type[type])); + } + else + MSCH_EVENT(("unknown(%d)", type)); + + flags = ntoh16(p->flags); + if (flags & WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS) + MSCH_EVENT((", CHAN_CONTIGUOUS")); + if (flags & WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS) + MSCH_EVENT((", MERGE_CONT_SLOTS")); + if (flags & WL_MSCH_REQ_FLAGS_PREMTABLE) + MSCH_EVENT((", PREMTABLE")); + if (flags & WL_MSCH_REQ_FLAGS_PREMT_CURTS) + MSCH_EVENT((", PREMT_CURTS")); + if (flags & WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE) + MSCH_EVENT((", PREMT_IMMEDIATE")); + MSCH_EVENT((", priority: %d\n", p->priority)); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("start-time: %s, duration: %d(us), interval: %d(us)\n", + dhd_mschdbg_display_time(p->start_time_h, p->start_time_l), + ntoh32(p->duration), ntoh32(p->interval))); + + if (type == WL_MSCH_RT_DUR_FLEX) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("dur_flex: %d(us)\n", ntoh32(p->flex.dur_flex))); + } else if (type == WL_MSCH_RT_BOTH_FLEX) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("min_dur: %d(us), max_away_dur: %d(us)\n", + ntoh32(p->flex.bf.min_dur), ntoh32(p->flex.bf.max_away_dur))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("hi_prio_time: %s, hi_prio_interval: %d(us)\n", + dhd_mschdbg_display_time(p->flex.bf.hi_prio_time_h, + p->flex.bf.hi_prio_time_l), + ntoh32(p->flex.bf.hi_prio_interval))); + } +} + +static void +dhd_mschdbg_timeslot_profiler_event_data(int sp, int ver, char *title, char *data, + uint16 ptr, bool empty) +{ + int s, sn = sp + 4; + msch_timeslot_profiler_event_data_t *p = + (msch_timeslot_profiler_event_data_t *)(data + ntoh16(ptr)); + char *state[] = {"NONE", "CHN_SW", "ONCHAN_FIRE", "OFF_CHN_PREP", + "OFF_CHN_DONE", "TS_COMPLETE"}; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("<%s timeslot>: ", title)); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x\n", ntoh32(p->p_timeslot))); + + s = (int)(ntoh32(p->state)); + if (s < 0 || s > 5) s = 0; + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("id: %d, state[%d]: %s, chan_ctxt: [0x%08x]\n", + ntoh32(p->timeslot_id), ntoh32(p->state), state[s], ntoh32(p->p_chan_ctxt))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("fire_time: %s", + dhd_mschdbg_display_time(p->fire_time_h, p->fire_time_l))); + + MSCH_EVENT((", pre_start_time: %s", + dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l))); + + MSCH_EVENT((", end_time: %s", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l))); + + MSCH_EVENT((", sch_dur: %s\n", + dhd_mschdbg_display_time(p->sch_dur_h, p->sch_dur_l))); +} + +static void +dhd_mschdbg_req_timing_profiler_event_data(int sp, int ver, char *title, char *data, + uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_timing_profiler_event_data_t *p = + (msch_req_timing_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 type; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("<%s req_timing>: ", title)); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_timing), ntoh32(p->p_prev), ntoh32(p->p_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("flags:")); + type = ntoh16(p->flags); + if ((type & 0x7f) == 0) + MSCH_EVENT((" NONE")); + else { + if (type & WL_MSCH_RC_FLAGS_ONCHAN_FIRE) + MSCH_EVENT((" ONCHAN_FIRE")); + if (type & WL_MSCH_RC_FLAGS_START_FIRE_DONE) + MSCH_EVENT((" START_FIRE")); + if (type & WL_MSCH_RC_FLAGS_END_FIRE_DONE) + MSCH_EVENT((" END_FIRE")); + if (type & WL_MSCH_RC_FLAGS_ONFIRE_DONE) + MSCH_EVENT((" ONFIRE_DONE")); + if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_START) + MSCH_EVENT((" SPLIT_SLOT_START")); + if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_END) + MSCH_EVENT((" SPLIT_SLOT_END")); + if (type & WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE) + MSCH_EVENT((" PRE_ONFIRE_DONE")); + } + MSCH_EVENT(("\n")); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("pre_start_time: %s", + dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l))); + + MSCH_EVENT((", start_time: %s", + dhd_mschdbg_display_time(p->start_time_h, p->start_time_l))); + + MSCH_EVENT((", end_time: %s\n", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l))); + + if (p->p_timeslot && (p->timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("<%s timeslot>: 0x%08x\n", title, ntoh32(p->p_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sn, ver, title, data, p->timeslot_ptr, + (p->timeslot_ptr == 0)); +} + +static void +dhd_mschdbg_chan_ctxt_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_chan_ctxt_profiler_event_data_t *p = + (msch_chan_ctxt_profiler_event_data_t *)(data + ntoh16(ptr)); + chanspec_t c; + char buf[CHANSPEC_STR_LEN]; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_chan_ctxt), ntoh32(p->p_prev), ntoh32(p->p_next))); + + c = (chanspec_t)ntoh16(p->chanspec); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("channel: %s, bf_sch_pending: %s, bf_skipped: %d\n", + wf_chspec_ntoa(c, buf), p->bf_sch_pending? "TRUE" : "FALSE", + ntoh32(p->bf_skipped_count))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("bf_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->bf_link_prev), ntoh32(p->bf_link_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("onchan_time: %s", + dhd_mschdbg_display_time(p->onchan_time_h, p->onchan_time_l))); + MSCH_EVENT((", actual_onchan_dur: %s", + dhd_mschdbg_display_time(p->actual_onchan_dur_h, p->actual_onchan_dur_l))); + MSCH_EVENT((", pend_onchan_dur: %s\n", + dhd_mschdbg_display_time(p->pend_onchan_dur_h, p->pend_onchan_dur_l))); + + dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr, + p->req_entity_list_cnt); + dhd_mschdbg_elem_list(sn, "bf_entity", data, p->bf_entity_list_ptr, + p->bf_entity_list_cnt); +} + +static void +dhd_mschdbg_req_entity_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_entity_profiler_event_data_t *p = + (msch_req_entity_profiler_event_data_t *)(data + ntoh16(ptr)); + char buf[CHANSPEC_STR_LEN]; + chanspec_t c; + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_entity), ntoh32(p->req_hdl_link_prev), + ntoh32(p->req_hdl_link_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_hdl: [0x%08x]\n", ntoh32(p->p_req_hdl))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("chan_ctxt_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->chan_ctxt_link_prev), ntoh32(p->chan_ctxt_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("rt_specific_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->rt_specific_link_prev), ntoh32(p->rt_specific_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("start_fixed_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->start_fixed_link_prev), ntoh32(p->start_fixed_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("both_flex_list: prev 0x%08x, next 0x%08x\n", + ntoh32(p->both_flex_list_prev), ntoh32(p->both_flex_list_next))); + + c = (chanspec_t)ntoh16(p->chanspec); + MSCH_EVENT_HEAD(sn); + if (ver >= 2) { + MSCH_EVENT(("channel: %s, onchan Id %d, current chan Id %d, priority %d", + wf_chspec_ntoa(c, buf), ntoh16(p->onchan_chn_idx), ntoh16(p->cur_chn_idx), + ntoh16(p->priority))); + flags = ntoh32(p->flags); + if (flags & WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE) + MSCH_EVENT((" : MULTI_INSTANCE\n")); + else + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("actual_start_time: %s, ", + dhd_mschdbg_display_time(p->actual_start_time_h, p->actual_start_time_l))); + MSCH_EVENT(("curts_fire_time: %s, ", + dhd_mschdbg_display_time(p->curts_fire_time_h, p->curts_fire_time_l))); + } else { + MSCH_EVENT(("channel: %s, priority %d, ", wf_chspec_ntoa(c, buf), + ntoh16(p->priority))); + } + MSCH_EVENT(("bf_last_serv_time: %s\n", + dhd_mschdbg_display_time(p->bf_last_serv_time_h, p->bf_last_serv_time_l))); + + dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "current", data, p->cur_slot_ptr, + (p->cur_slot_ptr == 0)); + dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "pending", data, p->pend_slot_ptr, + (p->pend_slot_ptr == 0)); + + if (p->p_chan_ctxt && (p->chan_ctxt_ptr == 0)) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT((": 0x%08x\n", ntoh32(p->p_chan_ctxt))); + } + else + dhd_mschdbg_chan_ctxt_profiler_event_data(sn, ver, data, p->chan_ctxt_ptr, + (p->chan_ctxt_ptr == 0)); +} + +static void +dhd_mschdbg_req_handle_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_handle_profiler_event_data_t *p = + (msch_req_handle_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_handle), ntoh32(p->p_prev), ntoh32(p->p_next))); + + dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr, + p->req_entity_list_cnt); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("cb_func: [0x%08x], cb_func: [0x%08x]", + ntoh32(p->cb_func), ntoh32(p->cb_ctxt))); + if (ver < 2) { + MSCH_EVENT((", chan_cnt: %d", ntoh16(p->chan_cnt))); + } + flags = ntoh32(p->flags); + if (flags & WL_MSCH_REQ_HDL_FLAGS_NEW_REQ) + MSCH_EVENT((", NEW_REQ")); + MSCH_EVENT(("\n")); + + dhd_mschdbg_req_param_profiler_event_data(sn, ver, data, p->req_param_ptr); + + if (ver >= 2) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_time: %s\n", + dhd_mschdbg_display_time(p->req_time_h, p->req_time_l))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("chan_cnt: %d, chan idx %d, last chan idx %d\n", + ntoh16(p->chan_cnt), ntoh16(p->chan_idx), ntoh16(p->last_chan_idx))); + if (p->chanspec_list && p->chanspec_cnt) { + dhd_mschdbg_chanspec_list(sn, data, p->chanspec_list, p->chanspec_cnt); + } + } +} + +static void +dhd_mschdbg_profiler_profiler_event_data(int sp, int ver, char *data, uint16 ptr) +{ + msch_profiler_profiler_event_data_t *p = + (msch_profiler_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("free list: req_hdl 0x%08x, req_entity 0x%08x," + " chan_ctxt 0x%08x, chanspec 0x%08x\n", + ntoh32(p->free_req_hdl_list), ntoh32(p->free_req_entity_list), + ntoh32(p->free_chan_ctxt_list), ntoh32(p->free_chanspec_list))); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("alloc count: chanspec %d, req_entity %d, req_hdl %d, " + "chan_ctxt %d, timeslot %d\n", + ntoh16(p->msch_chanspec_alloc_cnt), ntoh16(p->msch_req_entity_alloc_cnt), + ntoh16(p->msch_req_hdl_alloc_cnt), ntoh16(p->msch_chan_ctxt_alloc_cnt), + ntoh16(p->msch_timeslot_alloc_cnt))); + + dhd_mschdbg_elem_list(sp, "req_hdl", data, p->msch_req_hdl_list_ptr, + p->msch_req_hdl_list_cnt); + dhd_mschdbg_elem_list(sp, "chan_ctxt", data, p->msch_chan_ctxt_list_ptr, + p->msch_chan_ctxt_list_cnt); + dhd_mschdbg_elem_list(sp, "req_timing", data, p->msch_req_timing_list_ptr, + p->msch_req_timing_list_cnt); + dhd_mschdbg_elem_list(sp, "start_fixed", data, p->msch_start_fixed_list_ptr, + p->msch_start_fixed_list_cnt); + dhd_mschdbg_elem_list(sp, "both_flex_req_entity", data, + p->msch_both_flex_req_entity_list_ptr, + p->msch_both_flex_req_entity_list_cnt); + dhd_mschdbg_elem_list(sp, "start_flex", data, p->msch_start_flex_list_ptr, + p->msch_start_flex_list_cnt); + dhd_mschdbg_elem_list(sp, "both_flex", data, p->msch_both_flex_list_ptr, + p->msch_both_flex_list_cnt); + + if (p->p_cur_msch_timeslot && (p->cur_msch_timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": 0x%08x\n", + ntoh32(p->p_cur_msch_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "cur_msch", data, + p->cur_msch_timeslot_ptr, (p->cur_msch_timeslot_ptr == 0)); + + if (p->p_next_timeslot && (p->next_timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": 0x%08x\n", + ntoh32(p->p_next_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "next", data, + p->next_timeslot_ptr, (p->next_timeslot_ptr == 0)); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("ts_id: %d, ", ntoh32(p->ts_id))); + flags = ntoh32(p->flags); + if (flags & WL_MSCH_STATE_IN_TIEMR_CTXT) + MSCH_EVENT(("IN_TIEMR_CTXT, ")); + if (flags & WL_MSCH_STATE_SCHD_PENDING) + MSCH_EVENT(("SCHD_PENDING, ")); + MSCH_EVENT(("slotskip_flags: %d, cur_armed_timeslot: 0x%08x\n", + (ver >= 2)? ntoh32(p->slotskip_flag) : 0, ntoh32(p->cur_armed_timeslot))); + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("flex_list_cnt: %d, service_interval: %d, " + "max_lo_prio_interval: %d\n", + ntoh16(p->flex_list_cnt), ntoh32(p->service_interval), + ntoh32(p->max_lo_prio_interval))); +} + +static void dhd_mschdbg_dump_data(dhd_pub_t *dhdp, void *raw_event_ptr, int type, + char *data, int len) +{ + uint64 t = 0, tt = 0; + uint32 s = 0, ss = 0; + int wlc_index, ver; + + ver = (type & WL_MSCH_PROFILER_VER_MASK) >> WL_MSCH_PROFILER_VER_SHIFT; + wlc_index = (type & WL_MSCH_PROFILER_WLINDEX_MASK) >> WL_MSCH_PROFILER_WLINDEX_SHIFT; + if (wlc_index >= 4) + return; + + type &= WL_MSCH_PROFILER_TYPE_MASK; + if (type <= WL_MSCH_PROFILER_PROFILE_END) { + msch_profiler_event_data_t *pevent = (msch_profiler_event_data_t *)data; + tt = ((uint64)(ntoh32(pevent->time_hi)) << 32) | ntoh32(pevent->time_lo); + dhd_mschdbg_us_to_sec(pevent->time_hi, pevent->time_lo, &s, &ss); + } + + if (lastMessages && (type != WL_MSCH_PROFILER_MESSAGE) && + (type != WL_MSCH_PROFILER_EVENT_LOG)) { + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + lastMessages = FALSE; + } + + switch (type) { + case WL_MSCH_PROFILER_START: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d START\n", s, ss)); + break; + + case WL_MSCH_PROFILER_EXIT: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d EXIT\n", s, ss)); + break; + + case WL_MSCH_PROFILER_REQ: + { + msch_req_profiler_event_data_t *p = (msch_req_profiler_event_data_t *)data; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("===============================\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] REGISTER:\n", s, ss, wlc_index)); + dhd_mschdbg_req_param_profiler_event_data(4, ver, data, p->req_param_ptr); + dhd_mschdbg_chanspec_list(4, data, p->chanspec_ptr, p->chanspec_cnt); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("===============================\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + } + break; + + case WL_MSCH_PROFILER_CALLBACK: + { + msch_callback_profiler_event_data_t *p = + (msch_callback_profiler_event_data_t *)data; + char buf[CHANSPEC_STR_LEN]; + chanspec_t chanspec; + uint16 cbtype; + + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] CALLBACK: ", s, ss, wlc_index)); + chanspec = (chanspec_t)ntoh16(p->chanspec); + MSCH_EVENT(("req_hdl[0x%08x], channel %s --", + ntoh32(p->p_req_hdl), wf_chspec_ntoa(chanspec, buf))); + + cbtype = ntoh16(p->type); + if (cbtype & WL_MSCH_CT_ON_CHAN) + MSCH_EVENT((" ON_CHAN")); + if (cbtype & WL_MSCH_CT_OFF_CHAN) + MSCH_EVENT((" OFF_CHAN")); + if (cbtype & WL_MSCH_CT_REQ_START) + MSCH_EVENT((" REQ_START")); + if (cbtype & WL_MSCH_CT_REQ_END) + MSCH_EVENT((" REQ_END")); + if (cbtype & WL_MSCH_CT_SLOT_START) + MSCH_EVENT((" SLOT_START")); + if (cbtype & WL_MSCH_CT_SLOT_SKIP) + MSCH_EVENT((" SLOT_SKIP")); + if (cbtype & WL_MSCH_CT_SLOT_END) + MSCH_EVENT((" SLOT_END")); + if (cbtype & WL_MSCH_CT_OFF_CHAN_DONE) + MSCH_EVENT((" OFF_CHAN_DONE")); + if (cbtype & WL_MSCH_CT_PARTIAL) + MSCH_EVENT((" PARTIAL")); + if (cbtype & WL_MSCH_CT_PRE_ONCHAN) + MSCH_EVENT((" PRE_ONCHAN")); + if (cbtype & WL_MSCH_CT_PRE_REQ_START) + MSCH_EVENT((" PRE_REQ_START")); + + if (cbtype & WL_MSCH_CT_REQ_START) { + req_start[wlc_index] = 1; + req_start_time[wlc_index] = tt; + } else if (cbtype & WL_MSCH_CT_REQ_END) { + if (req_start[wlc_index]) { + MSCH_EVENT((" : REQ duration %d", + (uint32)(tt - req_start_time[wlc_index]))); + req_start[wlc_index] = 0; + } + } + + if (cbtype & WL_MSCH_CT_SLOT_START) { + solt_chanspec[wlc_index] = p->chanspec; + solt_start_time[wlc_index] = tt; + } else if (cbtype & WL_MSCH_CT_SLOT_END) { + if (p->chanspec == solt_chanspec[wlc_index]) { + MSCH_EVENT((" : SLOT duration %d", + (uint32)(tt - solt_start_time[wlc_index]))); + solt_chanspec[wlc_index] = 0; + } + } + MSCH_EVENT(("\n")); + + if (cbtype & (WL_MSCH_CT_ON_CHAN | WL_MSCH_CT_SLOT_SKIP)) { + MSCH_EVENT_HEAD(4); + if (cbtype & WL_MSCH_CT_ON_CHAN) { + MSCH_EVENT(("ID %d onchan idx %d cur_chan_seq_start %s ", + ntoh32(p->timeslot_id), ntoh32(p->onchan_idx), + dhd_mschdbg_display_time(p->cur_chan_seq_start_time_h, + p->cur_chan_seq_start_time_l))); + } + t = ((uint64)(ntoh32(p->start_time_h)) << 32) | + ntoh32(p->start_time_l); + MSCH_EVENT(("start %s ", + dhd_mschdbg_display_time(p->start_time_h, + p->start_time_l))); + tt = ((uint64)(ntoh32(p->end_time_h)) << 32) | ntoh32(p->end_time_l); + MSCH_EVENT(("end %s duration %d\n", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l), + (p->end_time_h == 0xffffffff && p->end_time_l == 0xffffffff)? + -1 : (int)(tt - t))); + } + + } + break; + + case WL_MSCH_PROFILER_EVENT_LOG: + { + while (len >= (int)WL_MSCH_EVENT_LOG_HEAD_SIZE) { + msch_event_log_profiler_event_data_t *p = + (msch_event_log_profiler_event_data_t *)data; + /* TODO: How to parse MSCH if extended event tag is present ??? */ + prcd_event_log_hdr_t hdr; + int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32); + if (len < size || size > sizeof(msch_event_log_profiler_event_data_t)) { + break; + } + data += size; + len -= size; + dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + bzero(&hdr, sizeof(hdr)); + hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; + hdr.count = p->hdr.count + 1; + /* exclude LSB 2 bits which indicate binary/non-binary data */ + hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2; + hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num); + if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) { + hdr.binary_payload = TRUE; + } + dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0); + } + lastMessages = TRUE; + break; + } + + case WL_MSCH_PROFILER_MESSAGE: + { + msch_message_profiler_event_data_t *p = (msch_message_profiler_event_data_t *)data; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: %s", s, ss, wlc_index, p->message)); + lastMessages = TRUE; + break; + } + + case WL_MSCH_PROFILER_PROFILE_START: + profiler_start_time[wlc_index] = tt; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("-------------------------------\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] PROFILE DATA:\n", s, ss, wlc_index)); + dhd_mschdbg_profiler_profiler_event_data(4, ver, data, 0); + break; + + case WL_MSCH_PROFILER_PROFILE_END: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] PROFILE END: take time %d\n", s, ss, + wlc_index, (uint32)(tt - profiler_start_time[wlc_index]))); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("-------------------------------\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + break; + + case WL_MSCH_PROFILER_REQ_HANDLE: + dhd_mschdbg_req_handle_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_REQ_ENTITY: + dhd_mschdbg_req_entity_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_CHAN_CTXT: + dhd_mschdbg_chan_ctxt_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_REQ_TIMING: + dhd_mschdbg_req_timing_profiler_event_data(4, ver, "msch", data, 0, FALSE); + break; + + default: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("[wl%d] ERROR: unsupported EVENT reason code:%d; ", + wlc_index, type)); + break; + } +} + +void +wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, void *data, int len) +{ + head_log = "MSCH"; + dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, (char *)data, len); +} + +void +wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, prcd_event_log_hdr_t *plog_hdr, + uint32 *log_ptr) +{ + uint32 log_pyld_len; + head_log = "CONSOLE"; + + if (plog_hdr->count == 0) { + return; + } + log_pyld_len = (plog_hdr->count - 1) * DATA_UNIT_FOR_LOG_CNT; + + if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) { + msch_event_log_profiler_event_data_t *p = + (msch_event_log_profiler_event_data_t *)log_ptr; + /* TODO: How to parse MSCH if extended event tag is present ??? */ + prcd_event_log_hdr_t hdr; + uint32 s, ss; + + if (log_pyld_len < OFFSETOF(msch_event_log_profiler_event_data_t, data) || + log_pyld_len > sizeof(msch_event_log_profiler_event_data_t)) { + return; + } + + dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + bzero(&hdr, sizeof(hdr)); + hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; + hdr.count = p->hdr.count + 1; + /* exclude LSB 2 bits which indicate binary/non-binary data */ + hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2; + hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num); + if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) { + hdr.binary_payload = TRUE; + } + dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0); + } else { + msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr; + int type = ntoh16(p->type); + int len = ntoh16(p->size); + + if (log_pyld_len < OFFSETOF(msch_collect_tlv_t, value) + len) { + return; + } + + dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, p->value, len); + } +} +#endif /* SHOW_LOGTRACE */ diff --git a/bcmdhd.101.10.361.x/dhd_mschdbg.h b/bcmdhd.101.10.361.x/dhd_mschdbg.h new file mode 100755 index 0000000..12ba3ab --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_mschdbg.h @@ -0,0 +1,36 @@ +/* + * DHD debugability header file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: dhd_mschdbg.h 571265 2015-07-14 20:50:18Z eccopark $ + */ + +#ifndef _dhd_mschdbg_h_ +#define _dhd_mschdbg_h_ + +#ifdef SHOW_LOGTRACE +extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, + void *data, int len); +extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, + prcd_event_log_hdr_t *plog_hdr, uint32 *log_ptr); +#endif /* SHOW_LOGTRACE */ + +#endif /* _dhd_mschdbg_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_msgbuf.c b/bcmdhd.101.10.361.x/dhd_msgbuf.c new file mode 100755 index 0000000..12eb4e0 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_msgbuf.c @@ -0,0 +1,15512 @@ +/** + * @file definition of host message ring functionality + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** XXX Twiki: [PCIeFullDongleArchitecture] */ + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#ifdef BCMDBUS +#include +#else +#include +#endif /* BCMDBUS */ + +#include +#include +#include +#ifdef EXT_STA +#include +#include +#include +#endif /* EXT_STA */ + +#include + +#include +#include +#include +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ +#ifdef DHD_PKTTS +#include +#include +#endif /* DHD_PKTTS */ +#include + +#if defined(DHD_LB) +#if !defined(LINUX) && !defined(linux) && !defined(OEM_ANDROID) +#error "DHD Loadbalancing only supported on LINUX | OEM_ANDROID" +#endif /* !LINUX && !OEM_ANDROID */ +#include +#include +#define DHD_LB_WORKQ_SZ (8192) +#define DHD_LB_WORKQ_SYNC (16) +#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) +#endif /* DHD_LB */ + +#include +#include +#include +#include +#include + +#ifdef DHD_PKT_LOGGING +#include +#include +#endif /* DHD_PKT_LOGGING */ +#ifdef DHD_EWPR_VER2 +#include +#endif /* DHD_EWPR_VER2 */ + +extern char dhd_version[]; +extern char fw_version[]; + +/** + * Host configures a soft doorbell for d2h rings, by specifying a 32bit host + * address where a value must be written. Host may also interrupt coalescing + * on this soft doorbell. + * Use Case: Hosts with network processors, may register with the dongle the + * network processor's thread wakeup register and a value corresponding to the + * core/thread context. Dongle will issue a write transaction + * to the PCIE RC which will need to be routed to the mapped register space, by + * the host. + */ +/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */ + +/* Dependency Check */ +#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF" +#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ + +#if defined(DHD_HTPUT_TUNABLES) +#define DEFAULT_RX_BUFFERS_TO_POST 1024 +#define RX_BUF_BURST 64 /* Rx buffers for MSDU Data */ +#define RXBUFPOST_THRESHOLD 64 /* Rxbuf post threshold */ +#else +#define DEFAULT_RX_BUFFERS_TO_POST 256 +#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */ +#define RXBUFPOST_THRESHOLD 32 /* Rxbuf post threshold */ +#endif /* DHD_HTPUT_TUNABLES */ + +/* Read index update Magic sequence */ +#define DHD_DMA_INDX_SEQ_H2D_DB_MAGIC 0xDDDDDDDDAu +#define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring) (0xDD000000 | (ring->idx << 16u) | ring->rd) +/* Write index update Magic sequence */ +#define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring) (0xFF000000 | (ring->idx << 16u) | ring->wr) +#define DHD_AGGR_H2D_DB_MAGIC 0xFFFFFFFAu + +#define DHD_STOP_QUEUE_THRESHOLD 200 +#define DHD_START_QUEUE_THRESHOLD 100 + +#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ +#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) + +/* flags for ioctl pending status */ +#define MSGBUF_IOCTL_ACK_PENDING (1<<0) +#define MSGBUF_IOCTL_RESP_PENDING (1<<1) + +#define DHD_IOCTL_REQ_PKTBUFSZ 2048 +#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE) + +/** + * XXX: DMA_ALIGN_LEN use is overloaded: + * - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4 + * - in ensuring that a buffer's va is 4 Byte aligned + * - in rounding up a buffer length to 4 Bytes. + */ +#define DMA_ALIGN_LEN 4 + +#define DMA_D2H_SCRATCH_BUF_LEN 8 +#define DMA_XFER_LEN_LIMIT 0x400000 + +#ifdef BCM_HOST_BUF +#ifndef DMA_HOST_BUFFER_LEN +#define DMA_HOST_BUFFER_LEN 0x200000 +#endif +#endif /* BCM_HOST_BUF */ + +#if defined(CUSTOMER_HW_AMLOGIC) && defined(USE_AML_PCIE_TEE_MEM) +#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 4096 +#else +#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 +#endif + +#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1 +#define DHD_FLOWRING_MAX_EVENTBUF_POST 32 +#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 +#define DHD_H2D_INFORING_MAX_BUF_POST 32 +#ifdef BTLOG +#define DHD_H2D_BTLOGRING_MAX_BUF_POST 32 +#endif /* BTLOG */ +#define DHD_MAX_TSBUF_POST 8 + +#define DHD_PROT_FUNCS 43 + +/* Length of buffer in host for bus throughput measurement */ +#define DHD_BUS_TPUT_BUF_LEN 2048 + +#define TXP_FLUSH_NITEMS + +/* optimization to write "n" tx items at a time to ring */ +#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 + +#define RING_NAME_MAX_LENGTH 24 +#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024 +/* Giving room before ioctl_trans_id rollsover. */ +#define BUFFER_BEFORE_ROLLOVER 300 + +/* 512K memory + 32K registers */ +#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024) + +struct msgbuf_ring; /* ring context for common and flow rings */ + +#ifdef DHD_HMAPTEST +/* 5 * DMA_CONSISTENT_ALIGN as different tests use upto 4th page */ +#define HMAP_SANDBOX_BUFFER_LEN (DMA_CONSISTENT_ALIGN * 5) /* for a 4k page this is 20K */ +/** + * for D11 DMA HMAPTEST thes states are as follows + * iovar sets ACTIVE state + * next TXPOST / RXPOST sets POSTED state + * on TXCPL / RXCPL POSTED + pktid match does buffer free nd state changed to INACTIVE + * This ensures that on an iovar only one buffer is replaced from sandbox area + */ +#define HMAPTEST_D11_TX_INACTIVE 0 +#define HMAPTEST_D11_TX_ACTIVE 1 +#define HMAPTEST_D11_TX_POSTED 2 + +#define HMAPTEST_D11_RX_INACTIVE 0 +#define HMAPTEST_D11_RX_ACTIVE 1 +#define HMAPTEST_D11_RX_POSTED 2 +#endif /* DHD_HMAPTEST */ + +#define PCIE_DMA_LOOPBACK 0 +#define D11_DMA_LOOPBACK 1 +#define BMC_DMA_LOOPBACK 2 + +/** + * PCIE D2H DMA Complete Sync Modes + * + * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into + * Host system memory. A WAR using one of 3 approaches is needed: + * 1. Dongle places a modulo-253 seqnum in last word of each D2H message + * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum + * writes in the last word of each work item. Each work item has a seqnum + * number = sequence num % 253. + * + * 3. Read Barrier: Dongle does a host memory read access prior to posting an + * interrupt, ensuring that D2H data transfer indeed completed. + * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing + * ring contents before the indices. + * + * Host does not sync for DMA to complete with option #3 or #4, and a noop sync + * callback (see dhd_prot_d2h_sync_none) may be bound. + * + * Dongle advertizes host side sync mechanism requirements. + */ + +#define PCIE_D2H_SYNC_WAIT_TRIES (512U) +#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U) +#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */ + +#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE +#define DHD_MSGBUF_INFO DHD_TRACE +#else +#define DHD_MSGBUF_INFO DHD_INFO +#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */ + +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg, int msglen); + +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * For EDL messages. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +#ifdef EWP_EDL +typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg); +#endif /* EWP_EDL */ + +/* + * +---------------------------------------------------------------------------- + * + * RingIds and FlowId are not equivalent as ringids include D2H rings whereas + * flowids do not. + * + * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes + * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings + * + * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where, + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings, + * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings. + * + * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated + * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated + * + * D2H Control Complete RingId = 2 + * D2H Transmit Complete RingId = 3 + * D2H Receive Complete RingId = 4 + * + * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring) + * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring) + * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring) + * + * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are + * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS. + * + * Example: when a system supports 4 bc/mc and 128 uc flowrings, with + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the + * FlowId values would be in the range [2..133] and the corresponding + * RingId values would be in the range [5..136]. + * + * The flowId allocator, may chose to, allocate Flowids: + * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS)) + * X# of uc flowids in consecutive ranges (per station Id), where X is the + * packet's access category (e.g. 4 uc flowids per station). + * + * CAUTION: + * When DMA indices array feature is used, RingId=5, corresponding to the 0th + * FLOWRING, will actually use the FlowId as index into the H2D DMA index, + * since the FlowId truly represents the index in the H2D DMA indices array. + * + * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS, + * will represent the index in the D2H DMA indices array. + * + * +---------------------------------------------------------------------------- + */ + +/* First TxPost Flowring Id */ +#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS + +/* Determine whether a ringid belongs to a TxPost flowring */ +#define DHD_IS_FLOWRING(ringid, max_flow_rings) \ + ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \ + (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D TxPost FlowId to a MsgBuf RingId */ +#define DHD_FLOWID_TO_RINGID(flowid) \ + (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a MsgBuf RingId to a H2D TxPost FlowId */ +#define DHD_RINGID_TO_FLOWID(ringid) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array + * This may be used for the H2D DMA WR index array or H2D DMA RD index array or + * any array of H2D rings. + */ +#define DHD_H2D_RING_OFFSET(ringid) \ + (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + +/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array + * This may be used for IFRM. + */ +#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \ + ((ringid) - BCMPCIE_COMMON_MSGRINGS) + +/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array + * This may be used for the D2H DMA WR index array or D2H DMA RD index array or + * any array of D2H rings. + * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring + * max_h2d_rings: total number of h2d rings + */ +#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \ + ((ringid) > (max_h2d_rings) ? \ + ((ringid) - max_h2d_rings) : \ + ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a D2H DMA Indices Offset to a RingId */ +#define DHD_D2H_RINGID(offset) \ + ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS) + +/* XXX: The ringid and flowid and dma indices array index idiosyncracy is error + * prone. While a simplification is possible, the backward compatability + * requirement (DHD should operate with any PCIE rev version of firmware), + * limits what may be accomplished. + * + * At the minimum, implementation should use macros for any conversions + * facilitating introduction of future PCIE FD revs that need more "common" or + * other dynamic rings. + */ + +/* XXX: Presently there is no need for maintaining both a dmah and a secdmah */ +#define DHD_DMAH_NULL ((void*)NULL) + +/* + * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able + * buffer does not occupy the entire cacheline, and another object is placed + * following the DMA-able buffer, data corruption may occur if the DMA-able + * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency + * is not available. + */ +#if defined(L1_CACHE_BYTES) +#define DHD_DMA_PAD (L1_CACHE_BYTES) +#else +#define DHD_DMA_PAD (128) +#endif + +/* + * +---------------------------------------------------------------------------- + * Flowring Pool + * + * Unlike common rings, which are attached very early on (dhd_prot_attach), + * flowrings are dynamically instantiated. Moreover, flowrings may require a + * larger DMA-able buffer. To avoid issues with fragmented cache coherent + * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. + * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. + * + * Each DMA-able buffer may be allocated independently, or may be carved out + * of a single large contiguous region that is registered with the protocol + * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region + * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). + * + * No flowring pool action is performed in dhd_prot_attach(), as the number + * of h2d rings is not yet known. + * + * In dhd_prot_init(), the dongle advertized number of h2d rings is used to + * determine the number of flowrings required, and a pool of msgbuf_rings are + * allocated and a DMA-able buffer (carved or allocated) is attached. + * See: dhd_prot_flowrings_pool_attach() + * + * A flowring msgbuf_ring object may be fetched from this pool during flowring + * creation, using the flowid. Likewise, flowrings may be freed back into the + * pool on flowring deletion. + * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() + * + * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers + * are detached (returned back to the carved region or freed), and the pool of + * msgbuf_ring and any objects allocated against it are freed. + * See: dhd_prot_flowrings_pool_detach() + * + * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a + * state as-if upon an attach. All DMA-able buffers are retained. + * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring + * pool attach will notice that the pool persists and continue to use it. This + * will avoid the case of a fragmented DMA-able region. + * + * +---------------------------------------------------------------------------- + */ + +/* Conversion of a flowid to a flowring pool index */ +#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ +#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \ + DHD_FLOWRINGS_POOL_OFFSET(flowid) + +/* Traverse each flowring in the flowring pool, assigning ring and flowid */ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \ + for ((flowid) = DHD_FLOWRING_START_FLOWID, \ + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \ + (ring)++, (flowid)++) + +/* Used in loopback tests */ +typedef struct dhd_dmaxfer { + dhd_dma_buf_t srcmem; + dhd_dma_buf_t dstmem; + uint32 srcdelay; + uint32 destdelay; + uint32 len; + bool in_progress; + uint64 start_usec; + uint64 time_taken; + uint32 d11_lpbk; + int status; +} dhd_dmaxfer_t; + +#ifdef DHD_HMAPTEST +/* Used in HMAP test */ +typedef struct dhd_hmaptest { + dhd_dma_buf_t mem; + uint32 len; + bool in_progress; + uint32 is_write; + uint32 accesstype; + uint64 start_usec; + uint32 offset; +} dhd_hmaptest_t; +#endif /* DHD_HMAPTEST */ +/** + * msgbuf_ring : This object manages the host side ring that includes a DMA-able + * buffer, the WR and RD indices, ring parameters such as max number of items + * an length of each items, and other miscellaneous runtime state. + * A msgbuf_ring may be used to represent a H2D or D2H common ring or a + * H2D TxPost ring as specified in the PCIE FullDongle Spec. + * Ring parameters are conveyed to the dongle, which maintains its own peer end + * ring state. Depending on whether the DMA Indices feature is supported, the + * host will update the WR/RD index in the DMA indices array in host memory or + * directly in dongle memory. + */ +typedef struct msgbuf_ring { + bool inited; + uint16 idx; /* ring id */ + uint16 rd; /* read index */ + uint16 curr_rd; /* read index for debug */ + uint16 wr; /* write index */ + uint16 max_items; /* maximum number of items in ring */ + uint16 item_len; /* length of each item in the ring */ + sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */ + dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */ + uint32 seqnum; /* next expected item's sequence number */ +#ifdef TXP_FLUSH_NITEMS + void *start_addr; + /* # of messages on ring not yet announced to dongle */ + uint16 pend_items_count; +#ifdef AGG_H2D_DB + osl_atomic_t inflight; +#endif /* AGG_H2D_DB */ +#endif /* TXP_FLUSH_NITEMS */ + + uint8 ring_type; + uint8 n_completion_ids; + bool create_pending; + uint16 create_req_id; + uint8 current_phase; + uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; + uchar name[RING_NAME_MAX_LENGTH]; + uint32 ring_mem_allocated; + void *ring_lock; +} msgbuf_ring_t; + +#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) +#define DHD_RING_END_VA(ring) \ + ((uint8 *)(DHD_RING_BGN_VA((ring))) + \ + (((ring)->max_items - 1) * (ring)->item_len)) + +#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) +#define MAX_IOCTL_TRACE_SIZE 50 +#define MAX_IOCTL_BUF_SIZE 64 +typedef struct _dhd_ioctl_trace_t { + uint32 cmd; + uint16 transid; + char ioctl_buf[MAX_IOCTL_BUF_SIZE]; + uint64 timestamp; +} dhd_ioctl_trace_t; +#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */ + +#ifdef DHD_PKTTS +struct pktts_fwtx_v1 { + uint32 ts[PKTTS_MAX_FWTX]; +}; + +struct pktts_fwtx_v2 { + uint32 ts[PKTTS_MAX_FWTX]; + uint32 ut[PKTTS_MAX_UCTX]; + uint32 uc[PKTTS_MAX_UCCNT]; +}; + +static void dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhd, void *pkt, + void *fw_ts, uint16 version); +static void dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhd, void *pkt, + uint fwr1, uint fwr2); +#endif /* DHD_PKTTS */ + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) +/** D2H WLAN Rx Packet Chaining context */ +typedef struct rxchain_info { + uint pkt_count; + uint ifidx; + void *pkthead; + void *pkttail; + uint8 *h_da; /* pointer to da of chain head */ + uint8 *h_sa; /* pointer to sa of chain head */ + uint8 h_prio; /* prio of chain head */ +} rxchain_info_t; +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +/* This can be overwritten by module parameter defined in dhd_linux.c + * or by dhd iovar h2d_max_txpost. + */ +int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM; +#if defined(DHD_HTPUT_TUNABLES) +int h2d_htput_max_txpost = H2DRING_HTPUT_TXPOST_MAX_ITEM; +#endif /* DHD_HTPUT_TUNABLES */ + +#ifdef AGG_H2D_DB +bool agg_h2d_db_enab = TRUE; + +#define AGG_H2D_DB_TIMEOUT_USEC (1000u) /* 1 msec */ +uint32 agg_h2d_db_timeout = AGG_H2D_DB_TIMEOUT_USEC; + +#ifndef AGG_H2D_DB_INFLIGHT_THRESH +/* Keep inflight threshold same as txp_threshold */ +#define AGG_H2D_DB_INFLIGHT_THRESH TXP_FLUSH_MAX_ITEMS_FLUSH_CNT +#endif /* !AGG_H2D_DB_INFLIGHT_THRESH */ + +uint32 agg_h2d_db_inflight_thresh = AGG_H2D_DB_INFLIGHT_THRESH; + +#define DHD_NUM_INFLIGHT_HISTO_ROWS (14u) +#define DHD_INFLIGHT_HISTO_SIZE (sizeof(uint64) * DHD_NUM_INFLIGHT_HISTO_ROWS) + +typedef struct _agg_h2d_db_info { + void *dhd; + struct hrtimer timer; + bool init; + uint32 direct_db_cnt; + uint32 timer_db_cnt; + uint64 *inflight_histo; +} agg_h2d_db_info_t; +#endif /* AGG_H2D_DB */ + +/** DHD protocol handle. Is an opaque type to other DHD software layers. */ +typedef struct dhd_prot { + osl_t *osh; /* OSL handle */ + uint16 rxbufpost_sz; + uint16 rxbufpost; + uint16 max_rxbufpost; + uint32 tot_rxbufpost; + uint32 tot_rxcpl; + uint16 max_eventbufpost; + uint16 max_ioctlrespbufpost; + uint16 max_tsbufpost; + uint16 max_infobufpost; + uint16 infobufpost; + uint16 cur_event_bufs_posted; + uint16 cur_ioctlresp_bufs_posted; + uint16 cur_ts_bufs_posted; + + /* Flow control mechanism based on active transmits pending */ + osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */ + uint16 h2d_max_txpost; +#if defined(DHD_HTPUT_TUNABLES) + uint16 h2d_htput_max_txpost; +#endif /* DHD_HTPUT_TUNABLES */ + uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ + + /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ + msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */ + msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */ + msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ + msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ + msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ + msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */ + msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */ + msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */ + + msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ + dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ + uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */ + + uint32 rx_dataoffset; + + dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ + dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */ + + /* ioctl related resources */ + uint8 ioctl_state; + int16 ioctl_status; /* status returned from dongle */ + uint16 ioctl_resplen; + dhd_ioctl_recieved_status_t ioctl_received; + uint curr_ioctl_cmd; + dhd_dma_buf_t retbuf; /* For holding ioctl response */ + dhd_dma_buf_t ioctbuf; /* For holding ioctl request */ + + dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */ + + /* DMA-able arrays for holding WR and RD indices */ + uint32 rw_index_sz; /* Size of a RD or WR index in dongle */ + dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */ + dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ + dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ + dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ + dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */ + + dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ + + dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ +#ifdef DHD_DMA_INDICES_SEQNUM + char *h2d_dma_indx_rd_copy_buf; /* Local copy of H2D WR indices array */ + char *d2h_dma_indx_wr_copy_buf; /* Local copy of D2H WR indices array */ + uint32 h2d_dma_indx_rd_copy_bufsz; /* H2D WR indices array size */ + uint32 d2h_dma_indx_wr_copy_bufsz; /* D2H WR indices array size */ + uint32 host_seqnum; /* Seqence number for D2H DMA Indices sync */ +#endif /* DHD_DMA_INDICES_SEQNUM */ + uint32 flowring_num; + + d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ +#ifdef EWP_EDL + d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */ +#endif /* EWP_EDL */ + ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ + ulong d2h_sync_wait_tot; /* total wait loops */ + + dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ + + uint16 ioctl_seq_no; + uint16 data_seq_no; /* XXX this field is obsolete */ + uint16 ioctl_trans_id; + void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */ + void *pktid_rx_map; /* pktid map for rx path */ + void *pktid_tx_map; /* pktid map for tx path */ + bool metadata_dbg; + void *pktid_map_handle_ioctl; +#ifdef DHD_MAP_PKTID_LOGGING + void *pktid_dma_map; /* pktid map for DMA MAP */ + void *pktid_dma_unmap; /* pktid map for DMA UNMAP */ +#endif /* DHD_MAP_PKTID_LOGGING */ + uint32 pktid_depleted_cnt; /* pktid depleted count */ + /* netif tx queue stop count */ + uint8 pktid_txq_stop_cnt; + /* netif tx queue start count */ + uint8 pktid_txq_start_cnt; + uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */ + uint64 ioctl_ack_time; /* timestamp for ioctl ack */ + uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */ + + /* Applications/utilities can read tx and rx metadata using IOVARs */ + uint16 rx_metadata_offset; + uint16 tx_metadata_offset; + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + rxchain_info_t rxchain; /* chain of rx packets */ +#endif + +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + /* Host's soft doorbell configuration */ + bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ + + /* Work Queues to be used by the producer and the consumer, and threshold + * when the WRITE index must be synced to consumer's workq + */ + dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */ + + uint32 host_ipc_version; /* Host sypported IPC rev */ + uint32 device_ipc_version; /* FW supported IPC rev */ + uint32 active_ipc_version; /* Host advertised IPC rev */ +#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) + dhd_ioctl_trace_t ioctl_trace[MAX_IOCTL_TRACE_SIZE]; + uint32 ioctl_trace_count; +#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */ + dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */ + bool hostts_req_buf_inuse; + bool rx_ts_log_enabled; + bool tx_ts_log_enabled; +#ifdef BTLOG + msgbuf_ring_t *h2dring_btlog_subn; /* H2D btlog submission ring */ + msgbuf_ring_t *d2hring_btlog_cpln; /* D2H btlog completion ring */ + uint16 btlogbufpost; + uint16 max_btlogbufpost; +#endif /* BTLOG */ +#ifdef DHD_HMAPTEST + uint32 hmaptest_rx_active; + uint32 hmaptest_rx_pktid; + char *hmap_rx_buf_va; + dmaaddr_t hmap_rx_buf_pa; + uint32 hmap_rx_buf_len; + + uint32 hmaptest_tx_active; + uint32 hmaptest_tx_pktid; + char *hmap_tx_buf_va; + dmaaddr_t hmap_tx_buf_pa; + uint32 hmap_tx_buf_len; + dhd_hmaptest_t hmaptest; /* for hmaptest */ + bool hmap_enabled; /* TRUE = hmap is enabled */ +#endif /* DHD_HMAPTEST */ +#ifdef SNAPSHOT_UPLOAD + dhd_dma_buf_t snapshot_upload_buf; /* snapshot upload buffer */ + uint32 snapshot_upload_len; /* snapshot uploaded len */ + uint8 snapshot_type; /* snaphot uploaded type */ + bool snapshot_cmpl_pending; /* snapshot completion pending */ +#endif /* SNAPSHOT_UPLOAD */ + bool no_retry; + bool no_aggr; + bool fixed_rate; + dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */ +#ifdef DHD_HP2P + msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */ + msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */ +#endif /* DHD_HP2P */ + bool no_tx_resource; + uint32 txcpl_db_cnt; +#ifdef AGG_H2D_DB + agg_h2d_db_info_t agg_h2d_db_info; +#endif /* AGG_H2D_DB */ + uint64 tx_h2d_db_cnt; +} dhd_prot_t; + +#ifdef DHD_EWPR_VER2 +#define HANG_INFO_BASE64_BUFFER_SIZE 640 +#endif + +#ifdef DHD_DUMP_PCIE_RINGS +static +int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, + const void *user_buf, unsigned long *file_posn); +#ifdef EWP_EDL +static +int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf, + unsigned long *file_posn); +#endif /* EWP_EDL */ +#endif /* DHD_DUMP_PCIE_RINGS */ +extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp); +extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap); +/* Convert a dmaaddr_t to a base_addr with htol operations */ +static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); + +/* APIs for managing a DMA-able buffer */ +static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); + +/* msgbuf ring management */ +static int dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot); +static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, + const char *name, uint16 max_items, uint16 len_item, uint16 ringid); +static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf); + +/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ +static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd); + +/* Fetch and Release a flowring msgbuf_ring from flowring pool */ +static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, + uint16 flowid); +/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */ + +/* Producer: Allocate space in a msgbuf ring */ +static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 *alloced, bool exactly_nitems); +static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, + uint16 *alloced, bool exactly_nitems); + +/* Consumer: Determine the location where the next message may be consumed */ +static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 *available_len); + +/* Producer (WR index update) or Consumer (RD index update) indication */ +static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, + void *p, uint16 len); + +#ifdef AGG_H2D_DB +static void dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring, + void* p, uint16 len); +static void dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db); +static void dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid); +#endif /* AGG_H2D_DB */ +static void dhd_prot_ring_doorbell(dhd_pub_t *dhd, uint32 value); +static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz); + +/* Set/Get a RD or WR index in the array of indices */ +/* See also: dhd_prot_dma_indx_init() */ +void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, + uint16 ringid); +static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); + +/* Locate a packet given a pktid */ +static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, + bool free_pktid); +/* Locate a packet given a PktId and free it. */ +static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send); + +static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf); +static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, + void *buf, int ifidx); + +/* Post buffers for Rx, control ioctl response and events */ +static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post); +static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); +static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); +static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub); + +static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, uint32 rxcnt); + +#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) +static void dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len); +static void dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf); +#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */ + +/* D2H Message handling */ +static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); + +/* D2H Message handlers */ +static void dhd_prot_noop(dhd_pub_t *dhd, void *msg); +static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); + +/* Loopback test with dongle */ +static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); +static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, + uint destdelay, dhd_dmaxfer_t *dma); +static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); + +/* Flowring management communication with dongle */ +static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg); +static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg); + +/* Monitor Mode */ +#ifdef WL_MONITOR +extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx); +extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx); +#endif /* WL_MONITOR */ + +/* Configure a soft doorbell per D2H ring */ +static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); +static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf); +#if !defined(BCM_ROUTER_DHD) +static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf); +static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf); +#endif /* !BCM_ROUTER_DHD */ +static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf); +static void dhd_prot_detach_info_rings(dhd_pub_t *dhd); +#ifdef BTLOG +static void dhd_prot_process_btlog_complete(dhd_pub_t *dhd, void* buf); +static void dhd_prot_detach_btlog_rings(dhd_pub_t *dhd); +#endif /* BTLOG */ +#ifdef DHD_HP2P +static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd); +#endif /* DHD_HP2P */ +#ifdef EWP_EDL +static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd); +#endif +static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf); +static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf); + +#ifdef DHD_TIMESYNC +extern void dhd_parse_proto(uint8 *pktdata, dhd_pkt_parse_t *parse); +#endif + +#ifdef DHD_FLOW_RING_STATUS_TRACE +void dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf); +void dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf); +#endif /* DHD_FLOW_RING_STATUS_TRACE */ + +#ifdef DHD_TX_PROFILE +extern bool dhd_protocol_matches_profile(uint8 *p, int plen, const + dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc); +#endif /* defined(DHD_TX_PROFILE) */ + +#ifdef DHD_HP2P +static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus); +static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus); +static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid); +static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc); +#endif +typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); + +/** callback functions for messages generated by the dongle */ +#define MSG_TYPE_INVALID 0 + +static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { + dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */ + dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ + dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ + NULL, + dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ + NULL, + dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ + NULL, + dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ + NULL, + dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ + NULL, + dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ + NULL, + dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ + NULL, + dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ + NULL, + NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */ + NULL, + dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME */ + dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ + dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ + NULL, /* MSG_TYPE_INFO_BUF_POST */ +#if defined(BCM_ROUTER_DHD) + NULL, /* MSG_TYPE_INFO_BUF_CMPLT */ +#else + dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */ +#endif /* BCM_ROUTER_DHD */ + NULL, /* MSG_TYPE_H2D_RING_CREATE */ + NULL, /* MSG_TYPE_D2H_RING_CREATE */ +#if defined(BCM_ROUTER_DHD) + NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ +#else + dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ +#endif /* BCM_ROUTER_DHD */ + dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG */ + NULL, /* MSG_TYPE_D2H_RING_CONFIG */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ + dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ + NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ + dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */ + NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */ + NULL, /* MSG_TYPE_HOSTTIMSTAMP */ + dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */ + dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */ + NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */ + dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */ +}; + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) +/* Related to router CPU mapping per radio core */ +#define DHD_RX_CHAINING +#endif /* BCM_ROUTER_DHD && HNDCTF */ + +#ifdef DHD_RX_CHAINING + +#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ + (dhd_wet_chainable(dhd) && \ + dhd_rx_pkt_chainable((dhd), (ifidx)) && \ + !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ + !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ + ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ + ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ + (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6)))) + +static INLINE void dhd_rxchain_reset(rxchain_info_t *rxchain); +static void dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); +static void dhd_rxchain_commit(dhd_pub_t *dhd); + +#define DHD_PKT_CTF_MAX_CHAIN_LEN 64 + +#endif /* DHD_RX_CHAINING */ + +#ifdef DHD_EFI +#define DHD_LPBKDTDUMP_ON() (1) +#else +#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL) +#endif + +static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); + +#ifdef D2H_MINIDUMP +dhd_dma_buf_t * +dhd_prot_get_minidump_buf(dhd_pub_t *dhd) +{ + return &dhd->prot->fw_trap_buf; +} +#endif /* D2H_MINIDUMP */ + +uint16 +dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd) +{ + return dhd->prot->rxbufpost_sz; +} + +uint16 +dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *flow_ring = &prot->h2dring_rxp_subn; + uint16 rd, wr; + + /* Since wr is owned by host in h2d direction, directly read wr */ + wr = flow_ring->wr; + + if (dhd->dma_d2h_ring_upd_support) { + rd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + } + return NTXPACTIVE(rd, wr, flow_ring->max_items); +} + +uint16 +dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *flow_ring = &prot->d2hring_rx_cpln; + uint16 rd, wr; + + if (dhd->dma_d2h_ring_upd_support) { + wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + } + + /* Since rd is owned by host in d2h direction, directly read rd */ + rd = flow_ring->rd; + + return NTXPACTIVE(rd, wr, flow_ring->max_items); +} + +bool +dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info; + uint16 rd, wr; + bool ret; + + if (dhd->dma_d2h_ring_upd_support) { + wr = flow_ring->wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + } + if (dhd->dma_h2d_ring_upd_support) { + rd = flow_ring->rd; + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + } + ret = (wr == rd) ? TRUE : FALSE; + return ret; +} + +void +dhd_prot_dump_ring_ptrs(void *prot_info) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info; + DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__, + ring->curr_rd, ring->rd, ring->wr)); +} + +uint16 +dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd) +{ + return (uint16)h2d_max_txpost; +} +void +dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost) +{ + h2d_max_txpost = max_txpost; +} +#if defined(DHD_HTPUT_TUNABLES) +uint16 +dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd) +{ + return (uint16)h2d_htput_max_txpost; +} +void +dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 htput_max_txpost) +{ + h2d_htput_max_txpost = htput_max_txpost; +} + +#endif /* DHD_HTPUT_TUNABLES */ +/** + * D2H DMA to completion callback handlers. Based on the mode advertised by the + * dongle through the PCIE shared region, the appropriate callback will be + * registered in the proto layer to be invoked prior to precessing any message + * from a D2H DMA ring. If the dongle uses a read barrier or another mode that + * does not require host participation, then a noop callback handler will be + * bound that simply returns the msg_type. + */ +static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, + uint32 tries, volatile uchar *msg, int msglen); +static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); +static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, + uint16 ring_type, uint32 id); +static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, + uint8 type, uint32 id); + +/** + * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has + * not completed, a livelock condition occurs. Host will avert this livelock by + * dropping this message and moving to the next. This dropped message can lead + * to a packet leak, or even something disastrous in the case the dropped + * message happens to be a control response. + * Here we will log this condition. One may choose to reboot the dongle. + * + */ +static void +dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries, + volatile uchar *msg, int msglen) +{ + uint32 ring_seqnum = ring->seqnum; + + if (dhd_query_bus_erros(dhd)) { + return; + } + + DHD_ERROR(( + "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>" + " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n", + dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries, + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, + ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr)); + + dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL); + + /* Try to resume if already suspended or suspend in progress */ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* Skip if still in suspended or suspend in progress */ + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); + goto exit; + } + + dhd_bus_dump_console_buffer(dhd->bus); + dhd_prot_debug_info_print(dhd); + +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + +exit: + dhd_schedule_reset(dhd); + +#ifdef OEM_ANDROID +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + /* XXX Trigger HANG event for recovery */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* OEM_ANDROID */ + dhd->livelock_occured = TRUE; +} + +/** + * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM + * mode. Sequence number is always in the last word of a message. + */ +static uint8 +BCMFASTPATH(dhd_prot_d2h_sync_seqnum)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */ + dhd_prot_t *prot = dhd->prot; + uint32 msg_seqnum; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + msg_seqnum = *marker; + if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ + ring->seqnum++; /* next expected sequence number */ + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error. + */ + if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { + goto dma_completed; + } + } + + total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries); + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ + OSL_DELAY(delay * step); /* Add stepper delay */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries, + (volatile uchar *) msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM + * mode. The xorcsum is placed in the last word of a message. Dongle will also + * place a seqnum in the epoch field of the cmn_msg_hdr. + */ +static uint8 +BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 prot_checksum = 0; /* computed checksum */ + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + /* First verify if the seqnumber has been update, + * if yes, then only check xorcsum. + * Once seqnum and xorcsum is proper that means + * complete message has arrived. + */ + if (msg->epoch == ring_seqnum) { + prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, + num_words); + if (prot_checksum == 0U) { /* checksum is OK */ + ring->seqnum++; /* next expected sequence number */ + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to + * LIVELOCK error. + */ + if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { + goto dma_completed; + } + } + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ + OSL_DELAY(delay * step); /* Add stepper delay */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum)); + dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, + (volatile uchar *) msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static uint8 +BCMFASTPATH(dhd_prot_d2h_sync_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error. + */ + if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) { + DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__)); + return MSG_TYPE_INVALID; + } else { + return msg->msg_type; + } +} + +#ifdef EWP_EDL +/** + * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t + * header values at both the beginning and end of the payload. + * The cmn_msg_hdr_t is placed at the start and end of the payload + * in each work item in the EDL ring. + * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field + * and the length of the payload in the 'request_id' field. + * Structure of each work item in the EDL ring: + * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t | + * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is + * too costly on the dongle side and might take up too many ARM cycles, + * hence the xorcsum sync method is not being used for EDL ring. + */ +static int +BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg) +{ + uint32 tries; + int msglen = 0, len = 0; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + volatile cmn_msg_hdr_t *trailer = NULL; + volatile uint8 *buf = NULL; + bool valid_msg = FALSE; + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + /* First verify if the seqnumber has been updated, + * if yes, only then validate the header and trailer. + * Once seqnum, header and trailer have been validated, it means + * that the complete message has arrived. + */ + valid_msg = FALSE; + if (msg->epoch == ring_seqnum && + msg->msg_type == MSG_TYPE_INFO_PYLD && + msg->request_id > 0 && + msg->request_id <= ring->item_len) { + /* proceed to check trailer only if header is valid */ + buf = (volatile uint8 *)msg; + msglen = sizeof(cmn_msg_hdr_t) + msg->request_id; + buf += msglen; + if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) { + trailer = (volatile cmn_msg_hdr_t *)buf; + valid_msg = (trailer->epoch == ring_seqnum) && + (trailer->msg_type == msg->msg_type) && + (trailer->request_id == msg->request_id); + if (!valid_msg) { + DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u" + " expected, seqnum=%u; reqid=%u. Retrying... \n", + __FUNCTION__, trailer->epoch, trailer->request_id, + msg->epoch, msg->request_id)); + } + } else { + DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n", + __FUNCTION__, msg->request_id)); + } + + if (valid_msg) { + /* data is OK */ + ring->seqnum++; /* next expected sequence number */ + if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { + goto dma_completed; + } + } + } else { + DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u." + " msg_type=0x%x, request_id=%u." + " Retrying...\n", + __FUNCTION__, ring_seqnum, msg->epoch, + msg->msg_type, msg->request_id)); + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ +#if !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)) + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ + OSL_DELAY(delay * step); /* Add stepper delay */ +#endif /* !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)) */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__)); + DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u" + " msgtype=0x%x; expected-msgtype=0x%x" + " length=%u; expected-max-length=%u", __FUNCTION__, + msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD, + msg->request_id, ring->item_len)); + dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL); + if (trailer && msglen > 0 && + (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) { + DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u" + " msgtype=0x%x; expected-msgtype=0x%x" + " length=%u; expected-length=%u", __FUNCTION__, + trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD, + trailer->request_id, msg->request_id)); + dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer, + sizeof(*trailer), DHD_ERROR_VAL); + } + + if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) + len = msglen + sizeof(cmn_msg_hdr_t); + else + len = ring->item_len; + + dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, + (volatile uchar *) msg, len); + + ring->seqnum++; /* skip this message */ + return BCME_ERROR; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__, + msg->epoch, msg->request_id)); + + prot->d2h_sync_wait_tot += tries; + return BCME_OK; +} + +/** + * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static int BCMFASTPATH +(dhd_prot_d2h_sync_edl_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg) +{ + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error. + */ + if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) { + DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__)); + return BCME_ERROR; + } else { + if (msg->msg_type == MSG_TYPE_INFO_PYLD) + return BCME_OK; + else + return msg->msg_type; + } +} +#endif /* EWP_EDL */ + +INLINE void +dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) +{ + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + dhd->prot->ioctl_received = reason; + /* Call another wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + dhd_os_ioctl_resp_wake(dhd); +} + +/** + * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what + * dongle advertizes. + */ +static void +dhd_prot_d2h_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->d2h_sync_wait_max = 0UL; + prot->d2h_sync_wait_tot = 0UL; + + prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; +#ifdef EWP_EDL + prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl; +#endif /* EWP_EDL */ + DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__)); + } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; +#ifdef EWP_EDL + prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl; +#endif /* EWP_EDL */ + DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__)); + } else { + prot->d2h_sync_cb = dhd_prot_d2h_sync_none; +#ifdef EWP_EDL + prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none; +#endif /* EWP_EDL */ + DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__)); + } +} + +/** + * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum + */ +static void +dhd_prot_h2d_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + + prot->h2dring_rxp_subn.current_phase = 0; + + prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_ctrl_subn.current_phase = 0; +} + +/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the + * virtual and physical address, the buffer lenght and the DMA handler. + * A secdma handler is also included in the dhd_dma_buf object. + * +---------------------------------------------------------------------------+ + */ + +static INLINE void +dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) +{ + base_addr->low_addr = htol32(PHYSADDRLO(pa)); + base_addr->high_addr = htol32(PHYSADDRHI(pa)); +} + +/** + * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer. + */ +static int +dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */ + ASSERT(dma_buf); + pa_lowaddr = PHYSADDRLO(dma_buf->pa); + ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa)); + ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN)); + ASSERT(dma_buf->len != 0); + + /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ + end = (pa_lowaddr + dma_buf->len); /* end address */ + + if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */ + DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", + __FUNCTION__, pa_lowaddr, dma_buf->len)); + return BCME_ERROR; + } + + return BCME_OK; +} + +/** + * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer. + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +int +dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) +{ + uint32 dma_pad = 0; + osl_t *osh = dhd->osh; + uint16 dma_align = DMA_ALIGN_LEN; + uint32 rem = 0; + + ASSERT(dma_buf != NULL); + ASSERT(dma_buf->va == NULL); + ASSERT(dma_buf->len == 0); + + /* Pad the buffer length to align to cacheline size. */ + rem = (buf_len % DHD_DMA_PAD); + dma_pad = rem ? (DHD_DMA_PAD - rem) : 0; + + dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, + dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); + + if (dma_buf->va == NULL) { + DHD_ERROR(("%s: buf_len %d, no memory available\n", + __FUNCTION__, buf_len)); + return BCME_NOMEM; + } + + dma_buf->len = buf_len; /* not including padded len */ + + if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */ + dhd_dma_buf_free(dhd, dma_buf); + return BCME_ERROR; + } + + dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */ + + return BCME_OK; +} + +/** + * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer. + */ +static void +dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + if ((dma_buf == NULL) || (dma_buf->va == NULL)) + return; + + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* Zero out the entire buffer and cache flush */ + memset((void*)dma_buf->va, 0, dma_buf->len); + OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); +} + +void +dhd_local_buf_reset(char *buf, uint32 len) +{ + /* Zero out the entire buffer and cache flush */ + memset((void*)buf, 0, len); + OSL_CACHE_FLUSH((void *)buf, len); +} + +/** + * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using + * dhd_dma_buf_alloc(). + */ +void +dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + osl_t *osh = dhd->osh; + + ASSERT(dma_buf); + + if (dma_buf->va == NULL) + return; /* Allow for free invocation, when alloc failed */ + + /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* dma buffer may have been padded at allocation */ + DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, + dma_buf->pa, dma_buf->dmah); + + memset(dma_buf, 0, sizeof(dhd_dma_buf_t)); +} + +/** + * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values. + * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0. + */ +void +dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma) +{ + dhd_dma_buf_t *dma_buf; + ASSERT(dhd_dma_buf); + dma_buf = (dhd_dma_buf_t *)dhd_dma_buf; + dma_buf->va = va; + dma_buf->len = len; + dma_buf->pa = pa; + dma_buf->dmah = dmah; + dma_buf->secdma = secdma; + + /* Audit user defined configuration */ + (void)dhd_dma_buf_audit(dhd, dma_buf); +} + +/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * DHD_MAP_PKTID_LOGGING + * Logging the PKTID and DMA map/unmap information for the SMMU fault issue + * debugging in customer platform. + * +---------------------------------------------------------------------------+ + */ + +#ifdef DHD_MAP_PKTID_LOGGING +typedef struct dhd_pktid_log_item { + dmaaddr_t pa; /* DMA bus address */ + uint64 ts_nsec; /* Timestamp: nsec */ + uint32 size; /* DMA map/unmap size */ + uint32 pktid; /* Packet ID */ + uint8 pkttype; /* Packet Type */ + uint8 rsvd[7]; /* Reserved for future use */ +} dhd_pktid_log_item_t; + +typedef struct dhd_pktid_log { + uint32 items; /* number of total items */ + uint32 index; /* index of pktid_log_item */ + dhd_pktid_log_item_t map[0]; /* metadata storage */ +} dhd_pktid_log_t; + +typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */ + +#define MAX_PKTID_LOG (2048) +#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t)) +#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \ + ((DHD_PKTID_LOG_ITEM_SZ) * (items))) + +#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl)) +#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl)) +#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \ + dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype)) +#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd)) + +static dhd_pktid_log_handle_t * +dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items) +{ + dhd_pktid_log_t *log; + uint32 log_size; + + log_size = DHD_PKTID_LOG_SZ(num_items); + log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size); + if (log == NULL) { + DHD_ERROR(("%s: MALLOC failed for size %d\n", + __FUNCTION__, log_size)); + return (dhd_pktid_log_handle_t *)NULL; + } + + log->items = num_items; + log->index = 0; + + return (dhd_pktid_log_handle_t *)log; /* opaque handle */ +} + +static void +dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle) +{ + dhd_pktid_log_t *log; + uint32 log_size; + + if (handle == NULL) { + DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__)); + return; + } + + log = (dhd_pktid_log_t *)handle; + log_size = DHD_PKTID_LOG_SZ(log->items); + MFREE(dhd->osh, handle, log_size); +} + +static void +dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa, + uint32 pktid, uint32 len, uint8 pkttype) +{ + dhd_pktid_log_t *log; + uint32 idx; + + if (handle == NULL) { + DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__)); + return; + } + + log = (dhd_pktid_log_t *)handle; + idx = log->index; + log->map[idx].ts_nsec = OSL_LOCALTIME_NS(); + log->map[idx].pa = pa; + log->map[idx].pktid = pktid; + log->map[idx].size = len; + log->map[idx].pkttype = pkttype; + log->index = (idx + 1) % (log->items); /* update index */ +} + +void +dhd_pktid_logging_dump(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + dhd_pktid_log_t *map_log, *unmap_log; + uint64 ts_sec, ts_usec; + + if (prot == NULL) { + DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__)); + return; + } + + map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map); + unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap); + OSL_GET_LOCALTIME(&ts_sec, &ts_usec); + if (map_log && unmap_log) { + DHD_ERROR(("%s: map_idx=%d unmap_idx=%d " + "current time=[%5lu.%06lu]\n", __FUNCTION__, + map_log->index, unmap_log->index, + (unsigned long)ts_sec, (unsigned long)ts_usec)); + DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, " + "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__, + (uint64)__virt_to_phys((ulong)(map_log->map)), + (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items), + (uint64)__virt_to_phys((ulong)(unmap_log->map)), + (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items))); + } +} +#endif /* DHD_MAP_PKTID_LOGGING */ + +/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. + * Main purpose is to save memory on the dongle, has other purposes as well. + * The packet id map, also includes storage for some packet parameters that + * may be saved. A native packet pointer along with the parameters may be saved + * and a unique 32bit pkt id will be returned. Later, the saved packet pointer + * and the metadata may be retrieved using the previously allocated packet id. + * +---------------------------------------------------------------------------+ + */ +#define DHD_PCIE_PKTID + +/* On Router, the pktptr serves as a pktid. */ +#if defined(BCM_ROUTER_DHD) && !defined(BCA_HNDROUTER) +#undef DHD_PCIE_PKTID /* Comment this undef, to reenable PKTIDMAP */ +#endif /* BCM_ROUTER_DHD && !BCA_HNDROUTER */ + +#if defined(BCM_ROUTER_DHD) && defined(DHD_PCIE_PKTID) +#undef MAX_TX_PKTID +#define MAX_TX_PKTID ((36 * 1024) - 1) /* Extend for 64 clients support. */ +#endif /* BCM_ROUTER_DHD && DHD_PCIE_PKTID */ + +/* XXX: PROP_TXSTATUS: WLFS defines a private pkttag layout. + * Hence cannot store the dma parameters in the pkttag and the pktidmap locker + * is required. + */ +#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID) +#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC" +#endif + +/* Enum for marking the buffer color based on usage */ +typedef enum dhd_pkttype { + PKTTYPE_DATA_TX = 0, + PKTTYPE_DATA_RX, + PKTTYPE_IOCTL_RX, + PKTTYPE_EVENT_RX, + PKTTYPE_INFO_RX, + /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ + PKTTYPE_NO_CHECK, + PKTTYPE_TSBUF_RX +} dhd_pkttype_t; + +#define DHD_PKTID_MIN_AVAIL_COUNT 512U +#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U) +#define DHD_PKTID_INVALID (0U) +#define DHD_IOCTL_REQ_PKTID (0xFFFE) +#define DHD_FAKE_PKTID (0xFACE) +#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD +#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC +#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB +#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA +#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9 +#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8 +#ifdef DHD_HP2P +#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7 +#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6 +#endif /* DHD_HP2P */ + +#define IS_FLOWRING(ring) \ + ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0)) + +typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ + +/* Construct a packet id mapping table, returning an opaque map handle */ +static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items); + +/* Destroy a packet id mapping table, freeing all packets active in the table */ +static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); + +#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items)) +#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map)) +#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) +#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map)) + +#if defined(DHD_PCIE_PKTID) +#if defined(NDIS) || defined(DHD_EFI) +/* XXX: for NDIS, using consistent memory instead of buffer from PKTGET for + * up to 8K ioctl response + */ +#define IOCTLRESP_USE_CONSTMEM +static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); +static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); +#endif /* NDIS || DHD_EFI */ + +/* Determine number of pktids that are available */ +static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); + +/* Allocate a unique pktid against which a pkt and some metadata is saved */ +static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, dhd_pkttype_t pkttype); +static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); +static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); + +/* Return an allocated pktid, retrieving previously saved pkt and metadata */ +static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, + void **secdma, dhd_pkttype_t pkttype, bool rsv_locker); + +#ifdef DHD_PKTTS +/* Store the Metadata buffer to the locker */ +static INLINE void +dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt, + dmaaddr_t mpkt_pa, + uint16 mpkt_len, + void *dmah, + uint32 nkey); + +/* Return the Metadata buffer from the locker */ +static void * dhd_pktid_map_retreive_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + dmaaddr_t *pmpkt_pa, uint32 *pmpkt_len, void **pdmah, uint32 nkey); +#endif /* DHD_PKTTS */ + +/* + * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees + * + * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator + * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation + * + * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined, + * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected. + */ +#if defined(DHD_PKTID_AUDIT_ENABLED) +#define USE_DHD_PKTID_AUDIT_LOCK 1 +/* Audit the pktidmap allocator */ +/* #define DHD_PKTID_AUDIT_MAP */ + +/* Audit the pktid during production/consumption of workitems */ +#define DHD_PKTID_AUDIT_RING + +#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING) +#error "May only enabled audit of MAP or RING, at a time." +#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */ + +#define DHD_DUPLICATE_ALLOC 1 +#define DHD_DUPLICATE_FREE 2 +#define DHD_TEST_IS_ALLOC 3 +#define DHD_TEST_IS_FREE 4 + +typedef enum dhd_pktid_map_type { + DHD_PKTID_MAP_TYPE_CTRL = 1, + DHD_PKTID_MAP_TYPE_TX, + DHD_PKTID_MAP_TYPE_RX, + DHD_PKTID_MAP_TYPE_UNKNOWN +} dhd_pktid_map_type_t; + +#ifdef USE_DHD_PKTID_AUDIT_LOCK +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) osl_spin_lock_init(osh) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock) +#define DHD_PKTID_AUDIT_LOCK(lock) osl_spin_lock(lock) +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) osl_spin_unlock(lock, flags) +#else +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0) +#define DHD_PKTID_AUDIT_LOCK(lock) 0 +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0) +#endif /* !USE_DHD_PKTID_AUDIT_LOCK */ + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +#define USE_DHD_PKTID_LOCK 1 + +#ifdef USE_DHD_PKTID_LOCK +#define DHD_PKTID_LOCK_INIT(osh) osl_spin_lock_init(osh) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock) +#define DHD_PKTID_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define DHD_PKTID_UNLOCK(lock, flags) osl_spin_unlock(lock, flags) +#else +#define DHD_PKTID_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) \ + do { \ + BCM_REFERENCE(osh); \ + BCM_REFERENCE(lock); \ + } while (0) +#define DHD_PKTID_LOCK(lock) 0 +#define DHD_PKTID_UNLOCK(lock, flags) \ + do { \ + BCM_REFERENCE(lock); \ + BCM_REFERENCE(flags); \ + } while (0) +#endif /* !USE_DHD_PKTID_LOCK */ + +typedef enum dhd_locker_state { + LOCKER_IS_FREE, + LOCKER_IS_BUSY, + LOCKER_IS_RSVD +} dhd_locker_state_t; + +/* Packet metadata saved in packet id mapper */ + +typedef struct dhd_pktid_item { + dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ + uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ + dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ + uint16 len; /* length of mapped packet's buffer */ + void *pkt; /* opaque native pointer to a packet */ + dmaaddr_t pa; /* physical address of mapped packet's buffer */ + void *dmah; /* handle to OS specific DMA map */ + void *secdma; +#ifdef DHD_PKTTS + void *mpkt; /* VA of Metadata */ + dmaaddr_t mpkt_pa; /* PA of Metadata */ + uint16 mpkt_len; /* Length of Metadata */ +#endif /* DHD_PKTTS */ +} dhd_pktid_item_t; + +typedef uint32 dhd_pktid_key_t; + +typedef struct dhd_pktid_map { + uint32 items; /* total items in map */ + uint32 avail; /* total available items */ + int failures; /* lockers unavailable count */ + /* Spinlock to protect dhd_pktid_map in process/tasklet context */ + void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + void *pktid_audit_lock; + struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */ + dhd_pktid_item_t lockers[0]; /* metadata storage */ +} dhd_pktid_map_t; + +/* + * PktId (Locker) #0 is never allocated and is considered invalid. + * + * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a + * depleted pktid pool and must not be used by the caller. + * + * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. + */ + +#define DHD_PKTID_FREE_LOCKER (FALSE) +#define DHD_PKTID_RSV_LOCKER (TRUE) + +#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) +#define DHD_PKIDMAP_ITEMS(items) (items) +#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ + (DHD_PKTID_ITEM_SZ * ((items) + 1))) +#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1)) + +#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map)) + +/* Convert a packet to a pktid, and save pkt pointer in busy locker */ +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \ + dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype)) +/* Reuse a previously reserved locker to save packet params */ +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) +/* Convert a packet to a pktid, and save packet params in locker */ +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert pktid to a packet, and free the locker */ +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) + +/* Convert the pktid to a packet, empty locker, but keep it reserved */ +#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) + +#ifdef DHD_PKTTS +#define DHD_PKTID_SAVE_METADATA(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey) \ + dhd_pktid_map_save_metadata(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey) + +#define DHD_PKTID_RETREIVE_METADATA(dhd, map, mpkt_pa, mpkt_len, dmah, nkey) \ + dhd_pktid_map_retreive_metadata(dhd, map, (dmaaddr_t *)&mpkt_pa, (uint32 *)&mpkt_len, \ + (void **) &dmah, nkey) +#endif /* DHD_PKTTS */ + +#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) + +#if defined(DHD_PKTID_AUDIT_ENABLED) + +static int +dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map) +{ + dhd_prot_t *prot = dhd->prot; + int pktid_map_type; + + if (pktid_map == prot->pktid_ctrl_map) { + pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL; + } else if (pktid_map == prot->pktid_tx_map) { + pktid_map_type = DHD_PKTID_MAP_TYPE_TX; + } else if (pktid_map == prot->pktid_rx_map) { + pktid_map_type = DHD_PKTID_MAP_TYPE_RX; + } else { + pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN; + } + + return pktid_map_type; +} + +/** +* __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. +*/ +static int +__dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ +#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " + struct bcm_mwbmap *handle; + uint32 flags; + bool ignore_audit; + int error = BCME_OK; + + if (pktid_map == (dhd_pktid_map_t *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg)); + return BCME_OK; + } + + flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); + + handle = pktid_map->pktid_audit; + if (handle == (struct bcm_mwbmap *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg)); + goto out; + } + + /* Exclude special pktids from audit */ + ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID); + if (ignore_audit) { + goto out; + } + + if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); + error = BCME_ERROR; + goto out; + } + + /* Perform audit */ + switch (test_for) { + case DHD_DUPLICATE_ALLOC: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n", + errmsg, pktid)); + error = BCME_ERROR; + } else { + bcm_mwbmap_force(handle, pktid); + } + break; + + case DHD_DUPLICATE_FREE: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n", + errmsg, pktid)); + error = BCME_ERROR; + } else { + bcm_mwbmap_free(handle, pktid); + } + break; + + case DHD_TEST_IS_ALLOC: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n", + errmsg, pktid)); + error = BCME_ERROR; + } + break; + + case DHD_TEST_IS_FREE: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free", + errmsg, pktid)); + error = BCME_ERROR; + } + break; + + default: + DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for)); + error = BCME_ERROR; + break; + } + +out: + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + + if (error != BCME_OK) { + dhd->pktid_audit_failed = TRUE; + } + + return error; +} + +static int +dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ + int ret = BCME_OK; + ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg); + if (ret == BCME_ERROR) { + DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n", + __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map))); + dhd_pktid_error_handler(dhd); +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG_DUMP(dhd); +#endif /* DHD_MAP_PKTID_LOGGING */ + } + + return ret; +} + +#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ + dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) + +static int +dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid, + const int test_for, void *msg, uint32 msg_len, const char *func) +{ + int ret = BCME_OK; + + if (dhd_query_bus_erros(dhdp)) { + return BCME_ERROR; + } + + ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func); + if (ret == BCME_ERROR) { + DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n", + __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map))); + prhex(func, (uchar *)msg, msg_len); + dhd_pktid_error_handler(dhdp); +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG_DUMP(dhdp); +#endif /* DHD_MAP_PKTID_LOGGING */ + } + return ret; +} +#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \ + dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \ + (pktid), (test_for), msg, msg_len, __FUNCTION__) + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/** + * +---------------------------------------------------------------------------+ + * Packet to Packet Id mapper using a paradigm. + * + * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID]. + * + * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique + * packet id is returned. This unique packet id may be used to retrieve the + * previously saved packet metadata, using dhd_pktid_map_free(). On invocation + * of dhd_pktid_map_free(), the unique packet id is essentially freed. A + * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. + * + * Implementation Note: + * Convert this into a abstraction and place into bcmutils ! + * Locker abstraction should treat contents as opaque storage, and a + * callback should be registered to handle busy lockers on destructor. + * + * +---------------------------------------------------------------------------+ + */ + +/** Allocate and initialize a mapper of num_items */ + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) +{ + void* osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_items; + uint32 map_keys_sz; + osh = dhd->osh; + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); + + map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz); + if (map == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", + __FUNCTION__, __LINE__, dhd_pktid_map_sz)); + return (dhd_pktid_map_handle_t *)NULL; + } + + map->items = num_items; + map->avail = num_items; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + + /* Initialize the lock that protects this structure */ + map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); + if (map->pktid_lock == NULL) { + DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__)); + goto error; + } + + map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz); + if (map->keys == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n", + __FUNCTION__, __LINE__, map_keys_sz)); + goto error; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ + map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); + if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { + DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + goto error; + } else { + DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", + __FUNCTION__, __LINE__, map_items + 1)); + } + map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ + map->keys[nkey] = nkey; /* populate with unique keys */ + map->lockers[nkey].state = LOCKER_IS_FREE; + map->lockers[nkey].pkt = NULL; /* bzero: redundant */ + map->lockers[nkey].len = 0; + } + + /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */ + map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */ + map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ + map->lockers[DHD_PKTID_INVALID].len = 0; + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */ + bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + return (dhd_pktid_map_handle_t *)map; /* opaque handle */ + +error: + if (map) { +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + if (map->keys) { + MFREE(osh, map->keys, map_keys_sz); + } + + if (map->pktid_lock) { + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + } + + VMFREE(osh, map, dhd_pktid_map_sz); + } + return (dhd_pktid_map_handle_t *)NULL; +} + +/** + * Retrieve all allocated keys and free all . + * Freeing implies: unmapping the buffers and freeing the native packet + * This could have been a callback registered with the pktid mapper. + */ +static void +dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + uint32 map_items; + unsigned long flags; + bool data_tx = FALSE; + + map = (dhd_pktid_map_t *)handle; + DHD_PKTID_LOCK(map->pktid_lock, flags); + osh = dhd->osh; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + /* skip reserved KEY #0, and start from 1 */ + + for (nkey = 1; nkey <= map_items; nkey++) { + if (map->lockers[nkey].state == LOCKER_IS_BUSY) { + locker = &map->lockers[nkey]; + locker->state = LOCKER_IS_FREE; + data_tx = (locker->pkttype == PKTTYPE_DATA_TX); + if (data_tx) { + OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count); + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_RING */ +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, + locker->pa, nkey, locker->len, + locker->pkttype); +#endif /* DHD_MAP_PKTID_LOGGING */ + + DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0, locker->dmah); + dhd_prot_packet_free(dhd, (ulong*)locker->pkt, + locker->pkttype, data_tx); + } + else { +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ + } + map->keys[nkey] = nkey; /* populate with unique keys */ + } + + map->avail = map_items; + memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +#ifdef IOCTLRESP_USE_CONSTMEM +/** Called in detach scenario. Releasing IOCTL buffers. */ +static void +dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + uint32 map_items; + unsigned long flags; + + map = (dhd_pktid_map_t *)handle; + DHD_PKTID_LOCK(map->pktid_lock, flags); + + map_items = DHD_PKIDMAP_ITEMS(map->items); + /* skip reserved KEY #0, and start from 1 */ + for (nkey = 1; nkey <= map_items; nkey++) { + if (map->lockers[nkey].state == LOCKER_IS_BUSY) { + dhd_dma_buf_t retbuf; + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_RING */ + + locker = &map->lockers[nkey]; + retbuf.va = locker->pkt; + retbuf.len = locker->len; + retbuf.pa = locker->pa; + retbuf.dmah = locker->dmah; + retbuf.secdma = locker->secdma; + + free_ioctl_return_buffer(dhd, &retbuf); + } + else { +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ + } + map->keys[nkey] = nkey; /* populate with unique keys */ + } + + map->avail = map_items; + memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** + * Free the pktid map. + */ +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_keys_sz; + + if (handle == NULL) + return; + + /* Free any pending packets */ + dhd_pktid_map_reset(dhd, handle); + + map = (dhd_pktid_map_t *)handle; + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + + DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + MFREE(dhd->osh, map->keys, map_keys_sz); + VMFREE(dhd->osh, handle, dhd_pktid_map_sz); +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static void +dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_keys_sz; + + if (handle == NULL) + return; + + /* Free any pending packets */ + dhd_pktid_map_reset_ioctl(dhd, handle); + + map = (dhd_pktid_map_t *)handle; + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + + DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + MFREE(dhd->osh, map->keys, map_keys_sz); + VMFREE(dhd->osh, handle, dhd_pktid_map_sz); +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** Get the pktid free count */ +static INLINE uint32 +BCMFASTPATH(dhd_pktid_map_avail_cnt)(dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 avail; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + avail = map->avail; + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return avail; +} + +/** + * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not + * yet populated. Invoke the pktid save api to populate the packet parameters + * into the locker. This function is not reentrant, and is the caller's + * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as + * a failure case, implying a depleted pool of pktids. + */ +static INLINE uint32 +dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, dhd_pkttype_t pkttype) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + if ((int)(map->avail) <= 0) { /* no more pktids to allocate */ + map->failures++; + DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + ASSERT(map->avail <= map->items); + nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ + + if ((map->avail > map->items) || (nkey > map->items)) { + map->failures++; + DHD_ERROR(("%s:%d: failed to allocate a new pktid," + " map->avail<%u>, nkey<%u>, pkttype<%u>\n", + __FUNCTION__, __LINE__, map->avail, nkey, + pkttype)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + locker = &map->lockers[nkey]; /* save packet metadata in locker */ + map->avail--; + locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ + locker->len = 0; + locker->state = LOCKER_IS_BUSY; /* reserve this locker */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + ASSERT(nkey != DHD_PKTID_INVALID); + + return nkey; /* return locker's numbered key */ +} + +#ifdef DHD_PKTTS +/* + * dhd_pktid_map_save_metadata - Save metadata information in a locker + * that has a reserved unique numbered key. + */ +static INLINE void +dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt, + dmaaddr_t mpkt_pa, + uint16 mpkt_len, + void *dmah, + uint32 nkey) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u>", + __FUNCTION__, __LINE__, nkey)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return; + } + + locker = &map->lockers[nkey]; + + /* + * TODO: checking the locker state for BUSY will prevent + * us from storing meta data on an already allocated + * Locker. But not checking may lead to overwriting + * existing data. + */ + locker->mpkt = mpkt; + locker->mpkt_pa = mpkt_pa; + locker->mpkt_len = mpkt_len; + locker->dmah = dmah; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} +#endif /* DHD_PKTTS */ + +/* + * dhd_pktid_map_save - Save a packet's parameters into a locker + * corresponding to a previously reserved unique numbered key. + */ +static INLINE void +dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n", + __FUNCTION__, __LINE__, nkey, pkttype)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return; + } + + locker = &map->lockers[nkey]; + + ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || + ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); + + /* store contents in locker */ + locker->dir = dir; + locker->pa = pa; + locker->len = (uint16)len; /* 16bit len */ + locker->dmah = dmah; /* 16bit len */ + locker->secdma = secdma; + locker->pkttype = pkttype; + locker->pkt = pkt; + locker->state = LOCKER_IS_BUSY; /* make this locker busy */ +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype); +#endif /* DHD_MAP_PKTID_LOGGING */ + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +/** + * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet + * contents into the corresponding locker. Return the numbered key. + */ +static uint32 +BCMFASTPATH(dhd_pktid_map_alloc)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + uint32 nkey; + + nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype); + if (nkey != DHD_PKTID_INVALID) { + dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, + len, dir, dmah, secdma, pkttype); + } + + return nkey; +} + +#ifdef DHD_PKTTS +static void * +BCMFASTPATH(dhd_pktid_map_retreive_metadata)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + dmaaddr_t *pmpkt_pa, + uint32 *pmpkt_len, + void **pdmah, + uint32 nkey) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + void *mpkt; + unsigned long flags; + + ASSERT(handle != NULL); + + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + /* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */ + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + locker = &map->lockers[nkey]; + mpkt = locker->mpkt; + *pmpkt_pa = locker->mpkt_pa; + *pmpkt_len = locker->mpkt_len; + if (pdmah) + *pdmah = locker->dmah; + locker->mpkt = NULL; + locker->mpkt_len = 0; + locker->dmah = NULL; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return mpkt; +} +#endif /* DHD_PKTTS */ + +/** + * dhd_pktid_map_free - Given a numbered key, return the locker contents. + * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. + * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid + * value. Only a previously allocated pktid may be freed. + */ +static void * +BCMFASTPATH(dhd_pktid_map_free)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, + dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype, + bool rsv_locker) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + void * pkt; + unsigned long long locker_addr; + unsigned long flags; + + ASSERT(handle != NULL); + + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + /* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */ + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n", + __FUNCTION__, __LINE__, nkey, pkttype)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + locker = &map->lockers[nkey]; + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + /* Debug check for cloned numbered key */ + if (locker->state == LOCKER_IS_FREE) { + DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + /* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */ +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + /* Check for the colour of the buffer i.e The buffer posted for TX, + * should be freed for TX completion. Similarly the buffer posted for + * IOCTL should be freed for IOCT completion etc. + */ + if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { + + DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", + __FUNCTION__, __LINE__, nkey)); +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(locker->pa, locker_addr); +#else + locker_addr = PHYSADDRLO(locker->pa); +#endif /* BCMDMA64OSL */ + DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>," + "pkttype <%d> locker->pa <0x%llx> \n", + __FUNCTION__, __LINE__, locker->state, locker->pkttype, + pkttype, locker_addr)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + if (rsv_locker == DHD_PKTID_FREE_LOCKER) { + map->avail++; + map->keys[map->avail] = nkey; /* make this numbered key available */ + locker->state = LOCKER_IS_FREE; /* open and free Locker */ + } else { + /* pktid will be reused, but the locker does not have a valid pkt */ + locker->state = LOCKER_IS_RSVD; + } + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_MAP */ +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey, + (uint32)locker->len, pkttype); +#endif /* DHD_MAP_PKTID_LOGGING */ + + *pa = locker->pa; /* return contents of locker */ + *len = (uint32)locker->len; + *dmah = locker->dmah; + *secdma = locker->secdma; + + pkt = locker->pkt; + locker->pkt = NULL; /* Clear pkt */ + locker->len = 0; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return pkt; +} + +#else /* ! DHD_PCIE_PKTID */ + +#ifndef linux +#error "DHD_PCIE_PKTID has to be defined for non-linux/android platforms" +#endif + +typedef struct pktlist { + PKT_LIST *tx_pkt_list; /* list for tx packets */ + PKT_LIST *rx_pkt_list; /* list for rx packets */ + PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */ +} pktlists_t; + +/* + * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail + * of a one to one mapping 32bit pktptr and a 32bit pktid. + * + * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail. + * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by + * a lock. + * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined. + */ +#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32)) +#define DHD_PKTPTR32(pktid32) ((void *)(pktid32)) + +static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype); +static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype); + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = NULL; + + if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(pktlists_t))); + goto error_done; + } + + if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + PKTLIST_INIT(handle->tx_pkt_list); + PKTLIST_INIT(handle->rx_pkt_list); + PKTLIST_INIT(handle->ctrl_pkt_list); + + return (dhd_pktid_map_handle_t *) handle; + +error: + if (handle->ctrl_pkt_list) { + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } + +error_done: + return (dhd_pktid_map_handle_t *)NULL; +} + +static void +dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle) +{ + osl_t *osh = dhd->osh; + + if (handle->ctrl_pkt_list) { + PKTLIST_FINI(handle->ctrl_pkt_list); + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + PKTLIST_FINI(handle->rx_pkt_list); + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + PKTLIST_FINI(handle->tx_pkt_list); + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } +} + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = (pktlists_t *) map; + + ASSERT(handle != NULL); + if (handle == (pktlists_t *)NULL) { + return; + } + + dhd_pktid_map_reset(dhd, handle); + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } +} + +/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */ +static INLINE uint32 +dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + ASSERT(pktptr32 != NULL); + DHD_PKT_SET_DMA_LEN(pktptr32, dma_len); + DHD_PKT_SET_DMAH(pktptr32, dmah); + DHD_PKT_SET_PA(pktptr32, pa); + DHD_PKT_SET_SECDMA(pktptr32, secdma); + + /* XXX optimize these branch conditionals */ + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); + } + + return DHD_PKTID32(pktptr32); +} + +/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */ +static INLINE void * +dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + void *pktptr32; + + ASSERT(pktid32 != 0U); + pktptr32 = DHD_PKTPTR32(pktid32); + *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32); + *dmah = DHD_PKT_GET_DMAH(pktptr32); + *pa = DHD_PKT_GET_PA(pktptr32); + *secdma = DHD_PKT_GET_SECDMA(pktptr32); + + /* XXX optimize these branch conditionals */ + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); + } + + return pktptr32; +} + +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt) + +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \ + dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&secdma, (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_AVAIL(map) (~0) + +#endif /* ! DHD_PCIE_PKTID */ + +/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */ + +/* + * Allocating buffers for common rings. + * also allocating Buffers for hmaptest, Scratch buffer for dma rx offset, + * bus_throughput_measurement and snapshot upload + */ +static int +dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot) +{ + + /* Common Ring Allocations */ + + /* Ring 0: H2D Control Submission */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", + H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 1: H2D Receive Buffer Post */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", + H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 2: D2H Control Completion */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", + D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 3: D2H Transmit Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", + D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* Ring 4: D2H Receive Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", + D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* + * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able + * buffers for flowrings will be instantiated, in dhd_prot_init() . + * See dhd_prot_flowrings_pool_attach() + */ + /* ioctl response buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* IOCTL request buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* Host TS request buffer one buffer for now */ + if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) { + goto fail; + } + prot->hostts_req_buf_inuse = FALSE; + + /* Scratch buffer for dma rx offset */ +#ifdef BCM_HOST_BUF + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, + ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) +#else + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) + +#endif /* BCM_HOST_BUF */ + { + goto fail; + } + +#ifdef DHD_HMAPTEST + /* Allocate buffer for hmaptest */ + DHD_ERROR(("allocating memory for hmaptest \n")); + if (dhd_dma_buf_alloc(dhd, &prot->hmaptest.mem, HMAP_SANDBOX_BUFFER_LEN)) { + + goto fail; + } else { + uint32 scratch_len; + uint64 scratch_lin, w1_start; + dmaaddr_t scratch_pa; + + scratch_pa = prot->hmaptest.mem.pa; + scratch_len = prot->hmaptest.mem.len; + scratch_lin = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff) + | (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32); + w1_start = scratch_lin + scratch_len; + DHD_ERROR(("hmap: NOTE Buffer alloc for HMAPTEST Start=0x%0llx len=0x%08x" + "End=0x%0llx\n", (uint64) scratch_lin, scratch_len, (uint64) w1_start)); + } +#endif /* DHD_HMAPTEST */ + + /* scratch buffer bus throughput measurement */ + if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { + goto fail; + } + +#ifdef SNAPSHOT_UPLOAD + /* snapshot upload buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->snapshot_upload_buf, SNAPSHOT_UPLOAD_BUF_SIZE)) { + goto fail; + } +#endif /* SNAPSHOT_UPLOAD */ + + return BCME_OK; + +fail: + return BCME_NOMEM; +} + +/** + * The PCIE FD protocol layer is constructed in two phases: + * Phase 1. dhd_prot_attach() + * Phase 2. dhd_prot_init() + * + * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields. + * All Common rings are also attached (msgbuf_ring_t objects are allocated + * with DMA-able buffers). + * All dhd_dma_buf_t objects are also allocated here. + * + * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any + * initialization of objects that requires information advertized by the dongle + * may not be performed here. + * E.g. the number of TxPost flowrings is not know at this point, neither do + * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or + * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H + * rings (common + flow). + * + * dhd_prot_init() is invoked after the bus layer has fetched the information + * advertized by the dongle in the pcie_shared_t. + */ +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + osl_t *osh = dhd->osh; + dhd_prot_t *prot; + uint32 trap_buf_len; + + /* Allocate prot structure */ + if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, + sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(prot, 0, sizeof(*prot)); + + prot->osh = osh; + dhd->prot = prot; + + /* DMAing ring completes supported? FALSE by default */ + dhd->dma_d2h_ring_upd_support = FALSE; + dhd->dma_h2d_ring_upd_support = FALSE; + dhd->dma_ring_upd_overwrite = FALSE; + + dhd->idma_inited = 0; + dhd->ifrm_inited = 0; + dhd->dar_inited = 0; + + if (dhd_prot_allocate_bufs(dhd, prot) != BCME_OK) { + goto fail; + } + +#ifdef DHD_RX_CHAINING + dhd_rxchain_reset(&prot->rxchain); +#endif + + prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_CTRL); + if (prot->pktid_ctrl_map == NULL) { + goto fail; + } + + prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_RX); + if (prot->pktid_rx_map == NULL) + goto fail; + + prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_TX); + if (prot->pktid_rx_map == NULL) + goto fail; + +#ifdef IOCTLRESP_USE_CONSTMEM + prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST); + if (prot->pktid_map_handle_ioctl == NULL) { + goto fail; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + +#ifdef DHD_MAP_PKTID_LOGGING + prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG); + if (prot->pktid_dma_map == NULL) { + DHD_ERROR(("%s: failed to allocate pktid_dma_map\n", + __FUNCTION__)); + } + + prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG); + if (prot->pktid_dma_unmap == NULL) { + DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n", + __FUNCTION__)); + } +#endif /* DHD_MAP_PKTID_LOGGING */ + +#ifdef D2H_MINIDUMP + if (dhd->bus->sih->buscorerev < 71) { + trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN; + } else { + /* buscorerev >= 71, supports minimdump of len 96KB */ + trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN; + } +#else + /* FW going to DMA extended trap data, + * allocate buffer for the maximum extended trap data. + */ + trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN; +#endif /* D2H_MINIDUMP */ + + /* Initialize trap buffer */ + if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) { + DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__)); + goto fail; + } + + return BCME_OK; + +fail: + + if (prot) { + /* Free up all allocated memories */ + dhd_prot_detach(dhd); + } + + return BCME_NOMEM; +} /* dhd_prot_attach */ + +static int +dhd_alloc_host_scbs(dhd_pub_t *dhd) +{ + int ret = BCME_OK; + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + uint32 host_scb_size = 0; + + if (dhd->hscb_enable) { + /* read number of bytes to allocate from F/W */ + dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0); + if (host_scb_size) { + /* In fw reload scenario the buffer could have been allocated for previous + * run. Check the existing buffer if there is one that can accommodate + * the new firmware requirement and reuse the buffer is possible. + */ + if (prot->host_scb_buf.va) { + if (prot->host_scb_buf.len >= host_scb_size) { + prot->host_scb_buf.len = host_scb_size; + } else { + dhd_dma_buf_free(dhd, &prot->host_scb_buf); + } + } + /* alloc array of host scbs */ + if (prot->host_scb_buf.va == NULL) { + ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size); + } + /* write host scb address to F/W */ + if (ret == BCME_OK) { + dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + HOST_SCB_ADDR, 0); + } + } + } else { + DHD_TRACE(("%s: Host scb not supported in F/W. \n", __FUNCTION__)); + } + + if (ret != BCME_OK) { + DHD_ERROR(("%s dhd_alloc_host_scbs, alloc failed: Err Code %d\n", + __FUNCTION__, ret)); + } + return ret; +} + +void +dhd_set_host_cap(dhd_pub_t *dhd) +{ + uint32 data = 0; + dhd_prot_t *prot = dhd->prot; +#ifdef D2H_MINIDUMP + uint16 host_trap_addr_len; +#endif /* D2H_MINIDUMP */ + + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + if (dhd->h2d_phase_supported) { + data |= HOSTCAP_H2D_VALID_PHASE; + if (dhd->force_dongletrap_on_bad_h2d_phase) + data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE; + } + if (prot->host_ipc_version > prot->device_ipc_version) + prot->active_ipc_version = prot->device_ipc_version; + else + prot->active_ipc_version = prot->host_ipc_version; + + data |= prot->active_ipc_version; + + if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) { + DHD_INFO(("Advertise Hostready Capability\n")); + data |= HOSTCAP_H2D_ENABLE_HOSTRDY; + } +#ifdef PCIE_INB_DW + if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) { + DHD_INFO(("Advertise Inband-DW Capability\n")); + data |= HOSTCAP_DS_INBAND_DW; + data |= HOSTCAP_DS_NO_OOB_DW; + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB); + if (!dhd->dma_h2d_ring_upd_support || !dhd->dma_d2h_ring_upd_support) { + dhd_init_dongle_ds_lock(dhd->bus); + dhdpcie_set_dongle_deepsleep(dhd->bus, FALSE); + } + } else +#endif /* PCIE_INB_DW */ +#ifdef PCIE_OOB + if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) { + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB); + } else +#endif /* PCIE_OOB */ + { + /* Disable DS altogether */ + data |= HOSTCAP_DS_NO_OOB_DW; + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE); + } + + /* Indicate support for extended trap data */ + data |= HOSTCAP_EXTENDED_TRAP_DATA; + + /* Indicate support for TX status metadata */ + if (dhd->pcie_txs_metadata_enable != 0) + data |= HOSTCAP_TXSTATUS_METADATA; + +#ifdef BTLOG + /* Indicate support for BT logging */ + if (dhd->bt_logging) { + if (dhd->bt_logging_enabled) { + data |= HOSTCAP_BT_LOGGING; + DHD_ERROR(("BT LOGGING enabled\n")); + } + else { + DHD_ERROR(("BT logging upported in FW, BT LOGGING disabled\n")); + } + } + else { + DHD_ERROR(("BT LOGGING not enabled in FW !!\n")); + } +#endif /* BTLOG */ + + /* Enable fast delete ring in firmware if supported */ + if (dhd->fast_delete_ring_support) { + data |= HOSTCAP_FAST_DELETE_RING; + } + + if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) { + DHD_ERROR(("IDMA inited\n")); + data |= HOSTCAP_H2D_IDMA; + dhd->idma_inited = TRUE; + } else { + DHD_ERROR(("IDMA not enabled in FW !!\n")); + dhd->idma_inited = FALSE; + } + + if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) { + DHD_ERROR(("IFRM Inited\n")); + data |= HOSTCAP_H2D_IFRM; + dhd->ifrm_inited = TRUE; + dhd->dma_h2d_ring_upd_support = FALSE; + dhd_prot_dma_indx_free(dhd); + } else { + DHD_ERROR(("IFRM not enabled in FW !!\n")); + dhd->ifrm_inited = FALSE; + } + + if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) { + DHD_ERROR(("DAR doorbell Use\n")); + data |= HOSTCAP_H2D_DAR; + dhd->dar_inited = TRUE; + } else { + DHD_ERROR(("DAR not enabled in FW !!\n")); + dhd->dar_inited = FALSE; + } + + /* FW Checks for HOSTCAP_UR_FW_NO_TRAP and Does not TRAP if set + * Radar 36403220 JIRA SWWLAN-182145 + */ + data |= HOSTCAP_UR_FW_NO_TRAP; + +#ifdef SNAPSHOT_UPLOAD + /* Indicate support for snapshot upload */ + if (dhd->snapshot_upload) { + data |= HOSTCAP_SNAPSHOT_UPLOAD; + DHD_ERROR(("ALLOW SNAPSHOT UPLOAD!!\n")); + } +#endif /* SNAPSHOT_UPLOAD */ + + if (dhd->hscb_enable) { + data |= HOSTCAP_HSCB; + } + +#ifdef EWP_EDL + if (dhd->dongle_edl_support) { + data |= HOSTCAP_EDL_RING; + DHD_ERROR(("Enable EDL host cap\n")); + } else { + DHD_ERROR(("DO NOT SET EDL host cap\n")); + } +#endif /* EWP_EDL */ + +#ifdef D2H_MINIDUMP + if (dhd_bus_is_minidump_enabled(dhd)) { + data |= HOSTCAP_EXT_TRAP_DBGBUF; + DHD_ERROR(("ALLOW D2H MINIDUMP!!\n")); + } +#endif /* D2H_MINIDUMP */ +#ifdef DHD_HP2P + if (dhdpcie_bus_get_hp2p_supported(dhd->bus)) { + data |= HOSTCAP_PKT_TIMESTAMP; + data |= HOSTCAP_PKT_HP2P; + DHD_ERROR(("Enable HP2P in host cap\n")); + } else { + DHD_ERROR(("HP2P not enabled in host cap\n")); + } +#endif /* DHD_HP2P */ + +#ifdef DHD_DB0TS + if (dhd->db0ts_capable) { + data |= HOSTCAP_DB0_TIMESTAMP; + DHD_ERROR(("Enable DB0 TS in host cap\n")); + } else { + DHD_ERROR(("DB0 TS not enabled in host cap\n")); + } +#endif /* DHD_DB0TS */ + if (dhd->extdtxs_in_txcpl) { + DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n")); + data |= HOSTCAP_PKT_TXSTATUS; + } + else { + DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n")); + } + + DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n", + __FUNCTION__, + prot->active_ipc_version, prot->host_ipc_version, + prot->device_ipc_version)); + + dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa, + sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0); +#ifdef D2H_MINIDUMP + if (dhd_bus_is_minidump_enabled(dhd)) { + /* Dongle expects the host_trap_addr_len in terms of words */ + host_trap_addr_len = prot->fw_trap_buf.len / 4; + dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len, + sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0); + } +#endif /* D2H_MINIDUMP */ + } + +#ifdef DHD_TIMESYNC + dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version); +#endif /* DHD_TIMESYNC */ +} + +#ifdef AGG_H2D_DB +void dhd_agg_inflight_stats_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + uint64 *inflight_histo = dhd->prot->agg_h2d_db_info.inflight_histo; + uint32 i; + uint64 total_inflight_histo = 0; + + bcm_bprintf(strbuf, "inflight: \t count\n"); + for (i = 0; i < DHD_NUM_INFLIGHT_HISTO_ROWS; i++) { + bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<prot->agg_h2d_db_info.inflight_histo; + uint64 *p; + uint32 bin_power; + bin_power = next_larger_power2(inflight); + + switch (bin_power) { + case 1: p = bin + 0; break; + case 2: p = bin + 1; break; + case 4: p = bin + 2; break; + case 8: p = bin + 3; break; + case 16: p = bin + 4; break; + case 32: p = bin + 5; break; + case 64: p = bin + 6; break; + case 128: p = bin + 7; break; + case 256: p = bin + 8; break; + case 512: p = bin + 9; break; + case 1024: p = bin + 10; break; + case 2048: p = bin + 11; break; + case 4096: p = bin + 12; break; + case 8192: p = bin + 13; break; + default : p = bin + 13; break; + } + ASSERT((p - bin) < DHD_NUM_INFLIGHT_HISTO_ROWS); + *p = *p + 1; + return; +} + +/* + * dhd_msgbuf_agg_h2d_db_timer_fn: + * Timer callback function for ringing h2d DB. + * This is run in isr context (HRTIMER_MODE_REL), + * do not hold any spin_lock_bh(). + * Using HRTIMER_MODE_REL_SOFT causing TPUT regressions. + */ +enum hrtimer_restart +dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer *timer) +{ + agg_h2d_db_info_t *agg_db_info; + dhd_pub_t *dhd; + dhd_prot_t *prot; + uint32 db_index; + uint corerev; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + agg_db_info = container_of(timer, agg_h2d_db_info_t, timer); + GCC_DIAGNOSTIC_POP(); + + dhd = agg_db_info->dhd; + prot = dhd->prot; + + prot->agg_h2d_db_info.timer_db_cnt++; + if (IDMA_ACTIVE(dhd)) { + db_index = IDMA_IDX0; + if (dhd->bus->sih) { + corerev = dhd->bus->sih->buscorerev; + if (corerev >= 24) { + db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT); + } + } + prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); + } else { + prot->mb_ring_fn(dhd->bus, DHD_AGGR_H2D_DB_MAGIC); + } + + return HRTIMER_NORESTART; +} + +void +dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t *prot) +{ + agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; + + /* Queue the timer only when it is not in the queue */ + if (!hrtimer_active(&agg_db_info->timer)) { + hrtimer_start(&agg_db_info->timer, ns_to_ktime(agg_h2d_db_timeout * NSEC_PER_USEC), + HRTIMER_MODE_REL); + } +} + +static void +dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; + + agg_db_info->dhd = dhd; + hrtimer_init(&agg_db_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + /* The timer function will run from ISR context, ensure no spin_lock_bh are used */ + agg_db_info->timer.function = &dhd_msgbuf_agg_h2d_db_timer_fn; + agg_db_info->init = TRUE; + agg_db_info->timer_db_cnt = 0; + agg_db_info->direct_db_cnt = 0; + agg_db_info->inflight_histo = (uint64 *)MALLOCZ(dhd->osh, DHD_INFLIGHT_HISTO_SIZE); +} + +static void +dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; + if (agg_db_info->init) { + if (agg_db_info->inflight_histo) { + MFREE(dhd->osh, agg_db_info->inflight_histo, DHD_INFLIGHT_HISTO_SIZE); + } + hrtimer_try_to_cancel(&agg_db_info->timer); + agg_db_info->init = FALSE; + } +} + +static void +dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; + hrtimer_try_to_cancel(&agg_db_info->timer); +} +#endif /* AGG_H2D_DB */ + +void +dhd_prot_clearcounts(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; +#ifdef AGG_H2D_DB + agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; + if (agg_db_info->inflight_histo) { + memset(agg_db_info->inflight_histo, 0, DHD_INFLIGHT_HISTO_SIZE); + } + agg_db_info->direct_db_cnt = 0; + agg_db_info->timer_db_cnt = 0; +#endif /* AGG_H2D_DB */ + prot->txcpl_db_cnt = 0; + prot->tx_h2d_db_cnt = 0; +} + +/** + * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has + * completed it's initialization of the pcie_shared structure, we may now fetch + * the dongle advertized features and adjust the protocol layer accordingly. + * + * dhd_prot_init() may be invoked again after a dhd_prot_reset(). + */ +int +dhd_prot_init(dhd_pub_t *dhd) +{ + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + int ret = 0; + uint32 idmacontrol; + uint32 waitcount = 0; + uint16 max_eventbufpost = 0; + + /** + * A user defined value can be assigned to global variable h2d_max_txpost via + * 1. DHD IOVAR h2d_max_txpost, before firmware download + * 2. module parameter h2d_max_txpost + * prot->h2d_max_txpost is assigned with DHD_H2DRING_TXPOST_MAX_ITEM, + * if user has not defined any buffers by one of the above methods. + */ + prot->h2d_max_txpost = (uint16)h2d_max_txpost; + DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost)); + +#if defined(DHD_HTPUT_TUNABLES) + prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost; + DHD_ERROR(("%s:%d: h2d_htput_max_txpost = %d\n", + __FUNCTION__, __LINE__, prot->h2d_htput_max_txpost)); +#endif /* DHD_HTPUT_TUNABLES */ + + /* Read max rx packets supported by dongle */ + dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); + if (prot->max_rxbufpost == 0) { + /* This would happen if the dongle firmware is not */ + /* using the latest shared structure template */ + prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; + } + DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); + + /* Initialize. bzero() would blow away the dma pointers. */ + max_eventbufpost = (uint16)dhdpcie_get_max_eventbufpost(dhd->bus); + prot->max_eventbufpost = (((max_eventbufpost + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST)) >= + H2DRING_CTRL_SUB_MAX_ITEM) ? DHD_FLOWRING_MAX_EVENTBUF_POST : max_eventbufpost; + prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; + prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST; +#ifdef BTLOG + prot->max_btlogbufpost = DHD_H2D_BTLOGRING_MAX_BUF_POST; +#endif /* BTLOG */ + prot->max_tsbufpost = DHD_MAX_TSBUF_POST; + + prot->cur_ioctlresp_bufs_posted = 0; + OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count); + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->rxbufpost = 0; + prot->tot_rxbufpost = 0; + prot->tot_rxcpl = 0; + prot->cur_event_bufs_posted = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->cur_ts_bufs_posted = 0; + prot->infobufpost = 0; +#ifdef BTLOG + prot->btlogbufpost = 0; +#endif /* BTLOG */ + + prot->dmaxfer.srcmem.va = NULL; + prot->dmaxfer.dstmem.va = NULL; + prot->dmaxfer.in_progress = FALSE; + +#ifdef DHD_HMAPTEST + prot->hmaptest.in_progress = FALSE; +#endif /* DHD_HMAPTEST */ + prot->metadata_dbg = FALSE; + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; + + /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ + prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; + prot->ioctl_state = 0; + prot->ioctl_status = 0; + prot->ioctl_resplen = 0; + prot->ioctl_received = IOCTL_WAIT; + + /* Initialize Common MsgBuf Rings */ + + prot->device_ipc_version = dhd->bus->api.fw_rev; + prot->host_ipc_version = PCIE_SHARED_VERSION; + prot->no_tx_resource = FALSE; + + /* Init the host API version */ + dhd_set_host_cap(dhd); + + /* alloc and configure scb host address for dongle */ + if ((ret = dhd_alloc_host_scbs(dhd))) { + return ret; + } + + /* Register the interrupt function upfront */ + /* remove corerev checks in data path */ + /* do this after host/fw negotiation for DAR */ + prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); + prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus); + + prot->tx_h2d_db_cnt = 0; +#ifdef AGG_H2D_DB + dhd_msgbuf_agg_h2d_db_timer_init(dhd); +#endif /* AGG_H2D_DB */ + + dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE; + + /* If supported by the host, indicate the memory block + * for completion writes / submission reads to shared space + */ + if (dhd->dma_d2h_ring_upd_support) { + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_RD_BUF, 0); + } + + if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_RD_BUF, 0); + } + + dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); + + /* Make it compatibile with pre-rev7 Firmware */ + if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) { + prot->d2hring_tx_cpln.item_len = + D2HRING_TXCMPLT_ITEMSIZE_PREREV7; + prot->d2hring_rx_cpln.item_len = + D2HRING_RXCMPLT_ITEMSIZE_PREREV7; + } + dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); + + dhd_prot_d2h_sync_init(dhd); + + dhd_prot_h2d_sync_init(dhd); + +#ifdef PCIE_INB_DW + /* Set the initial DS state */ + if (INBAND_DW_ENAB(dhd->bus)) { + dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus, + DW_DEVICE_DS_ACTIVE); + } +#endif /* PCIE_INB_DW */ + + /* init the scratch buffer */ + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_SCRATCH_BUF, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, + sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); +#ifdef DHD_DMA_INDICES_SEQNUM + prot->host_seqnum = D2H_EPOCH_INIT_VAL % D2H_EPOCH_MODULO; +#endif /* DHD_DMA_INDICES_SEQNUM */ + /* Signal to the dongle that common ring init is complete */ + if (dhd->hostrdy_after_init) + dhd_bus_hostready(dhd->bus); + + /* + * If the DMA-able buffers for flowring needs to come from a specific + * contiguous memory region, then setup prot->flowrings_dma_buf here. + * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from + * this contiguous memory region, for each of the flowrings. + */ + + /* Pre-allocate pool of msgbuf_ring for flowrings */ + if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) { + return BCME_ERROR; + } + + dhd->ring_attached = TRUE; + + /* If IFRM is enabled, wait for FW to setup the DMA channel */ + if (IFRM_ENAB(dhd)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_IFRM_INDX_WR_BUF, 0); + } + + /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors + * Waiting just before configuring doorbell + */ +#ifdef BCMQT +#define IDMA_ENABLE_WAIT 100 +#else +#define IDMA_ENABLE_WAIT 10 +#endif + if (IDMA_ACTIVE(dhd)) { + /* wait for idma_en bit in IDMAcontrol register to be set */ + /* Loop till idma_en is not set */ + uint buscorerev = dhd->bus->sih->buscorerev; + idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + IDMAControl(buscorerev), 0, 0); + while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) && + (waitcount++ < IDMA_ENABLE_WAIT)) { + + DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n", + waitcount, idmacontrol)); + OSL_DELAY(1000); /* 1ms as its onetime only */ + idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + IDMAControl(buscorerev), 0, 0); + } + + if (waitcount < IDMA_ENABLE_WAIT) { + DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol)); + } else { + DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n", + waitcount, idmacontrol)); + return BCME_ERROR; + } + // add delay to fix bring up issue + OSL_SLEEP(1); + } + + /* Host should configure soft doorbells if needed ... here */ + + /* Post to dongle host configured soft doorbells */ + dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + prot->no_retry = FALSE; + prot->no_aggr = FALSE; + prot->fixed_rate = FALSE; + + /* + * Note that any communication with the Dongle should be added + * below this point. Any other host data structure initialiation that + * needs to be done prior to the DPC starts executing should be done + * befor this point. + * Because once we start sending H2D requests to Dongle, the Dongle + * respond immediately. So the DPC context to handle this + * D2H response could preempt the context in which dhd_prot_init is running. + * We want to ensure that all the Host part of dhd_prot_init is + * done before that. + */ + + /* See if info rings could be created, info rings should be created + * only if dongle does not support EDL + */ +#ifdef EWP_EDL + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) +#else + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) +#endif /* EWP_EDL */ + { + if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) { + /* For now log and proceed, further clean up action maybe necessary + * when we have more clarity. + */ + DHD_ERROR(("%s Info rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } + +#ifdef EWP_EDL + /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */ + if (dhd->dongle_edl_support) { + if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } +#endif /* EWP_EDL */ + +#ifdef BTLOG + /* create BT log rings */ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->bt_logging) { + if ((ret = dhd_prot_init_btlog_rings(dhd)) != BCME_OK) { + /* For now log and proceed, further clean up action maybe necessary + * when we have more clarity. + */ + DHD_ERROR(("%s Info rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } +#endif /* BTLOG */ + +#ifdef DHD_HP2P + /* create HPP txcmpl/rxcmpl rings */ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) { + if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) { + /* For now log and proceed, further clean up action maybe necessary + * when we have more clarity. + */ + DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } +#endif /* DHD_HP2P */ + +#ifdef DHD_LB_RXP + /* defualt rx flow ctrl thresholds. Can be changed at run time through sysfs */ + dhd->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STOP_THR); + dhd->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STRT_THR); + atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE); +#endif /* DHD_LB_RXP */ + return BCME_OK; +} /* dhd_prot_init */ + +/** + * dhd_prot_detach - PCIE FD protocol layer destructor. + * Unlink, frees allocated protocol memory (including dhd_prot) + */ +void dhd_prot_detach(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + /* Stop the protocol module */ + if (prot) { + /* For non-android platforms, devreset will not be called, + * so call prot_reset here. It is harmless if called twice. + */ + dhd_prot_reset(dhd); + + /* free up all DMA-able buffers allocated during prot attach/init */ + + dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); +#ifdef DHD_HMAPTEST + dhd_dma_buf_free(dhd, &prot->hmaptest.mem); +#endif /* DHD_HMAPTEST */ + dhd_dma_buf_free(dhd, &prot->retbuf); + dhd_dma_buf_free(dhd, &prot->ioctbuf); + dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); + dhd_dma_buf_free(dhd, &prot->hostts_req_buf); + dhd_dma_buf_free(dhd, &prot->fw_trap_buf); + dhd_dma_buf_free(dhd, &prot->host_scb_buf); +#ifdef SNAPSHOT_UPLOAD + dhd_dma_buf_free(dhd, &prot->snapshot_upload_buf); +#endif /* SNAPSHOT_UPLOAD */ + + /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); + + dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf); + + /* Common MsgBuf Rings */ + dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); + + /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ + dhd_prot_flowrings_pool_detach(dhd); + + /* detach info rings */ + dhd_prot_detach_info_rings(dhd); + +#ifdef BTLOG + /* detach BT log rings */ + dhd_prot_detach_btlog_rings(dhd); +#endif /* BTLOG */ + +#ifdef EWP_EDL + dhd_prot_detach_edl_rings(dhd); +#endif +#ifdef DHD_HP2P + /* detach HPP rings */ + dhd_prot_detach_hp2p_rings(dhd); +#endif /* DHD_HP2P */ + + /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl + * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise + * they will be part of pktid_ctrl_map handler and PKT memory is allocated using + * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET. + * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used + * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE. + * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using + * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer. + */ + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map); +#ifdef IOCTLRESP_USE_CONSTMEM + DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); +#endif +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map); + DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap); +#endif /* DHD_MAP_PKTID_LOGGING */ +#ifdef DHD_DMA_INDICES_SEQNUM + if (prot->h2d_dma_indx_rd_copy_buf) { + MFREE(dhd->osh, prot->h2d_dma_indx_rd_copy_buf, + prot->h2d_dma_indx_rd_copy_bufsz); + } + if (prot->d2h_dma_indx_wr_copy_buf) { + MFREE(dhd->osh, prot->d2h_dma_indx_wr_copy_buf, + prot->d2h_dma_indx_wr_copy_bufsz); + } +#endif /* DHD_DMA_INDICES_SEQNUM */ + DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); + + dhd->prot = NULL; + } +} /* dhd_prot_detach */ + +/** + * dhd_prot_reset - Reset the protocol layer without freeing any objects. + * This may be invoked to soft reboot the dongle, without having to + * detach and attach the entire protocol layer. + * + * After dhd_prot_reset(), dhd_prot_init() may be invoked + * without going througha dhd_prot_attach() phase. + */ +void +dhd_prot_reset(dhd_pub_t *dhd) +{ + struct dhd_prot *prot = dhd->prot; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (prot == NULL) { + return; + } + + dhd->ring_attached = FALSE; + + dhd_prot_flowrings_pool_reset(dhd); + + /* Reset Common MsgBuf Rings */ + dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); + + /* Reset info rings */ + if (prot->h2dring_info_subn) { + dhd_prot_ring_reset(dhd, prot->h2dring_info_subn); + } + + if (prot->d2hring_info_cpln) { + dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln); + } + +#ifdef EWP_EDL + if (prot->d2hring_edl) { + dhd_prot_ring_reset(dhd, prot->d2hring_edl); + } +#endif /* EWP_EDL */ + + /* Reset all DMA-able buffers allocated during prot attach */ + dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); +#ifdef DHD_HMAPTEST + dhd_dma_buf_reset(dhd, &prot->hmaptest.mem); +#endif /* DHD_HMAPTEST */ + dhd_dma_buf_reset(dhd, &prot->retbuf); + dhd_dma_buf_reset(dhd, &prot->ioctbuf); + dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf); + dhd_dma_buf_reset(dhd, &prot->hostts_req_buf); + dhd_dma_buf_reset(dhd, &prot->fw_trap_buf); + dhd_dma_buf_reset(dhd, &prot->host_scb_buf); +#ifdef SNAPSHOT_UPLOAD + dhd_dma_buf_reset(dhd, &prot->snapshot_upload_buf); +#endif /* SNAPSHOT_UPLOAD */ + + dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf); + + /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); + +#ifdef DHD_DMA_INDICES_SEQNUM + if (prot->d2h_dma_indx_wr_copy_buf) { + dhd_local_buf_reset(prot->h2d_dma_indx_rd_copy_buf, + prot->h2d_dma_indx_rd_copy_bufsz); + dhd_local_buf_reset(prot->d2h_dma_indx_wr_copy_buf, + prot->d2h_dma_indx_wr_copy_bufsz); + } +#endif /* DHD_DMA_INDICES_SEQNUM */ + + /* XXX: dmaxfer src and dst? */ + + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->cur_ioctlresp_bufs_posted = 0; + + OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count); + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ + prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; + prot->txcpl_db_cnt = 0; + + /* dhd_flow_rings_init is located at dhd_bus_start, + * so when stopping bus, flowrings shall be deleted + */ + if (dhd->flow_rings_inited) { + dhd_flow_rings_deinit(dhd); + } + +#ifdef BTLOG + /* Reset BTlog rings */ + if (prot->h2dring_btlog_subn) { + dhd_prot_ring_reset(dhd, prot->h2dring_btlog_subn); + } + + if (prot->d2hring_btlog_cpln) { + dhd_prot_ring_reset(dhd, prot->d2hring_btlog_cpln); + } +#endif /* BTLOG */ +#ifdef DHD_HP2P + if (prot->d2hring_hp2p_txcpl) { + dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl); + } + if (prot->d2hring_hp2p_rxcpl) { + dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl); + } +#endif /* DHD_HP2P */ + + /* Reset PKTID map */ + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map); +#ifdef IOCTLRESP_USE_CONSTMEM + DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl); +#endif /* IOCTLRESP_USE_CONSTMEM */ +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0; + dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0; +#ifndef IOCTLRESP_USE_CONSTMEM + dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0; +#endif /* IOCTLRESP_USE_CONSTMEM */ + dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0; + dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0; + dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0; +#endif /* DMAMAP_STATS */ + +#ifdef AGG_H2D_DB + dhd_msgbuf_agg_h2d_db_timer_reset(dhd); +#endif /* AGG_H2D_DB */ + +} /* dhd_prot_reset */ + +#if defined(DHD_LB_RXP) +#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp) +#else /* !DHD_LB_RXP */ +#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_RXP */ + +#if defined(DHD_LB) +/* DHD load balancing: deferral of work to another online CPU */ +/* DHD_LB_RXP dispatchers, in dhd_linux.c */ +extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); +extern unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp); + +#if defined(DHD_LB_RXP) +/** + * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work + * to other CPU cores + */ +static INLINE void +dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp) +{ + dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ +} +#endif /* DHD_LB_RXP */ +#endif /* DHD_LB */ + +void +dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) +{ + dhd_prot_t *prot = dhd->prot; + prot->rx_dataoffset = rx_offset; +} + +static int +dhd_check_create_info_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + +#ifdef BTLOG + if (dhd->submit_count_WAR) { + ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS; + } else +#endif /* BTLOG */ + { + /* dongle may increase max_submission_rings so keep + * ringid at end of dynamic rings + */ + ringid = dhd->bus->max_tx_flowrings + + (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + + BCMPCIE_H2D_COMMON_MSGRINGS; + } + + if (prot->d2hring_info_cpln) { + /* for d2hring re-entry case, clear inited flag */ + prot->d2hring_info_cpln->inited = FALSE; + } + + if (prot->h2dring_info_subn && prot->d2hring_info_cpln) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->h2dring_info_subn == NULL) { + prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->h2dring_info_subn == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo", + H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n", + __FUNCTION__)); + goto err; + } + } + + if (prot->d2hring_info_cpln == NULL) { + prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_info_cpln == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + /* create the debug info completion ring next to debug info submit ring + * ringid = id next to debug info submit ring + */ + ringid = ringid + 1; + + DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo", + D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n", + __FUNCTION__)); + dhd_prot_ring_detach(dhd, prot->h2dring_info_subn); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_info_cpln) { + MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); + } + return ret; +} /* dhd_check_create_info_rings */ + +int +dhd_prot_init_info_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_OK; + + if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: info rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) { + DHD_INFO(("Info completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln, + BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_info_subn->current_phase = 0; + prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx)); + prot->h2dring_info_subn->n_completion_ids = 1; + prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx; + + ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn, + BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID); + + /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, + * so can not cleanup if one ring was created while the other failed + */ + return ret; +} /* dhd_prot_init_info_rings */ + +static void +dhd_prot_detach_info_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->h2dring_info_subn) { + dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn); + MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); + } + if (dhd->prot->d2hring_info_cpln) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln); + MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); + } +} + +#ifdef DHD_HP2P +static int +dhd_check_create_hp2p_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + + /* Last 2 dynamic ring indices are used by hp2p rings */ + ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2; + + if (prot->d2hring_hp2p_txcpl == NULL) { + prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_hp2p_txcpl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl", + dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n", + __FUNCTION__)); + goto err2; + } + } else { + /* for re-entry case, clear inited flag */ + prot->d2hring_hp2p_txcpl->inited = FALSE; + } + if (prot->d2hring_hp2p_rxcpl == NULL) { + prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_hp2p_rxcpl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + /* create the hp2p rx completion ring next to hp2p tx compl ring + * ringid = id next to hp2p tx compl ring + */ + ringid = ringid + 1; + + DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl", + dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n", + __FUNCTION__)); + goto err1; + } + } else { + /* for re-entry case, clear inited flag */ + prot->d2hring_hp2p_rxcpl->inited = FALSE; + } + + if (prot->d2hring_hp2p_rxcpl != NULL && + prot->d2hring_hp2p_txcpl != NULL) { + /* dhd_prot_init rentry after a dhd_prot_reset */ + ret = BCME_OK; + } + + return ret; +err1: + MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t)); + prot->d2hring_hp2p_rxcpl = NULL; + +err2: + MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t)); + prot->d2hring_hp2p_txcpl = NULL; + return ret; +} /* dhd_check_create_hp2p_rings */ + +int +dhd_prot_init_hp2p_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_OK; + + dhd->hp2p_ring_more = TRUE; + /* default multiflow not allowed */ + dhd->hp2p_mf_enable = FALSE; + + if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: hp2p rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) { + DHD_INFO(("hp2p tx completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n", + prot->d2hring_hp2p_txcpl->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl, + BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) { + DHD_INFO(("hp2p rx completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n", + prot->d2hring_hp2p_rxcpl->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl, + BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, + * so can not cleanup if one ring was created while the other failed + */ + return BCME_OK; +} /* dhd_prot_init_hp2p_rings */ + +static void +dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->d2hring_hp2p_txcpl) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl); + MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_hp2p_txcpl = NULL; + } + if (dhd->prot->d2hring_hp2p_rxcpl) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl); + MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_hp2p_rxcpl = NULL; + } +} +#endif /* DHD_HP2P */ + +#ifdef BTLOG +static int +dhd_check_create_btlog_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + + if (dhd->submit_count_WAR) { + ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS + 2; + } else { + /* ringid is one less than ringids assign by dhd_check_create_info_rings */ + ringid = dhd->bus->max_tx_flowrings + + (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + + BCMPCIE_H2D_COMMON_MSGRINGS - 1; + } + + if (prot->d2hring_btlog_cpln) { + /* for re-entry case, clear inited flag */ + prot->d2hring_btlog_cpln->inited = FALSE; + } + + if (prot->h2dring_btlog_subn && prot->d2hring_btlog_cpln) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->h2dring_btlog_subn == NULL) { + prot->h2dring_btlog_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->h2dring_btlog_subn == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->h2dring_btlog_subn, "h2dbtlog", + H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n", + __FUNCTION__)); + goto err; + } + } + + if (prot->d2hring_btlog_cpln == NULL) { + prot->d2hring_btlog_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_btlog_cpln == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + if (dhd->submit_count_WAR) { + ringid = ringid + 1; + } else { + /* advance ringid past BTLOG submit ring and INFO submit and cmplt rings */ + ringid = ringid + 3; + } + + DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_btlog_cpln, "d2hbtlog", + D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n", + __FUNCTION__)); + dhd_prot_ring_detach(dhd, prot->h2dring_btlog_subn); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_btlog_cpln) { + MFREE(prot->osh, prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t)); + } + return ret; +} /* dhd_check_create_btlog_rings */ + +int +dhd_prot_init_btlog_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_OK; + + if ((ret = dhd_check_create_btlog_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: btlog rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_btlog_cpln->inited) || (prot->d2hring_btlog_cpln->create_pending)) { + DHD_INFO(("Info completion ring was created!\n")); + return ret; + } + + DHD_ERROR(("trying to send create d2h btlog ring: id %d\n", prot->d2hring_btlog_cpln->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_btlog_cpln, + BCMPCIE_D2H_RING_TYPE_BTLOG_CPL, DHD_D2H_BTLOGRING_REQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->h2dring_btlog_subn->seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_btlog_subn->current_phase = 0; + prot->d2hring_btlog_cpln->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_btlog_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + DHD_ERROR(("trying to send create h2d btlog ring id %d\n", prot->h2dring_btlog_subn->idx)); + prot->h2dring_btlog_subn->n_completion_ids = 1; + prot->h2dring_btlog_subn->compeltion_ring_ids[0] = prot->d2hring_btlog_cpln->idx; + + ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_btlog_subn, + BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT, DHD_H2D_BTLOGRING_REQ_PKTID); + + /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, + * so can not cleanup if one ring was created while the other failed + */ + return ret; +} /* dhd_prot_init_btlog_rings */ + +static void +dhd_prot_detach_btlog_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->h2dring_btlog_subn) { + dhd_prot_ring_detach(dhd, dhd->prot->h2dring_btlog_subn); + MFREE(dhd->prot->osh, dhd->prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t)); + } + if (dhd->prot->d2hring_btlog_cpln) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_btlog_cpln); + MFREE(dhd->prot->osh, dhd->prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t)); + } +} +#endif /* BTLOG */ + +#ifdef EWP_EDL +static int +dhd_check_create_edl_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + +#ifdef BTLOG + if (dhd->submit_count_WAR) { + ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS; + } else +#endif /* BTLOG */ + { + /* dongle may increase max_submission_rings so keep + * ringid at end of dynamic rings (re-use info ring cpl ring id) + */ + ringid = dhd->bus->max_tx_flowrings + + (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + + BCMPCIE_H2D_COMMON_MSGRINGS + 1; + } + + if (prot->d2hring_edl) { + prot->d2hring_edl->inited = FALSE; + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->d2hring_edl == NULL) { + prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_edl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__, + ringid)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl", + D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n", + __FUNCTION__)); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t)); + prot->d2hring_edl = NULL; + + return ret; +} /* dhd_check_create_btlog_rings */ + +int +dhd_prot_init_edl_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + + if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: EDL rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) { + DHD_INFO(("EDL completion ring was created!\n")); + return ret; + } + + DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl, + BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + return BCME_OK; +} /* dhd_prot_init_btlog_rings */ + +static void +dhd_prot_detach_edl_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->d2hring_edl) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl); + MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_edl = NULL; + } +} +#endif /* EWP_EDL */ + +/** + * Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +int dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + uint len = 0; + wlc_rev_info_t revinfo; + char buf[128]; + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + + /* Post ts buffer after shim layer is attached */ + ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd); + + /* query for 'wlc_ver' to get version info from firmware */ + /* memsetting to zero */ + bzero(buf, sizeof(buf)); + len = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf)); + if (len == 0) { + DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len)); + ret = BCME_ERROR; + goto done; + } + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + } else { + dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major; + dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor; + } + + DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor)); +#ifndef OEM_ANDROID + /* Get the device MAC address */ + bzero(buf, sizeof(buf)); + strlcpy(buf, "cur_etheraddr", sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__)); + goto done; + } + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + if (dhd_msg_level & DHD_INFO_VAL) { + bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN); + } +#endif /* OEM_ANDROID */ + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); + goto done; + } + DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, + revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + + /* Get the RxBuf post size */ + /* Use default value in case of failure */ + prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + memset(buf, 0, sizeof(buf)); + len = bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf)); + if (len == 0) { + DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len)); + } else { + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET RxBuf post FAILED, use default %d\n", + __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ)); + } else { + if (memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), + buf, sizeof(uint16)) != BCME_OK) { + DHD_ERROR(("%s: rxbufpost_sz memcpy failed\n", __FUNCTION__)); + } + + if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) { + DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n", + __FUNCTION__, prot->rxbufpost_sz, + DHD_FLOWRING_RX_BUFPOST_PKTSZ)); + prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + DHD_ERROR(("%s: RxBuf Post : %d\n", + __FUNCTION__, prot->rxbufpost_sz)); + } + } + } + + /* Post buffers for packet reception */ + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + + DHD_SSSR_DUMP_INIT(dhd); + + dhd_process_cid_mac(dhd, TRUE); + ret = dhd_preinit_ioctls(dhd); + dhd_process_cid_mac(dhd, FALSE); +#if defined(DHD_SDTC_ETB_DUMP) + dhd_sdtc_etb_init(dhd); +#endif /* DHD_SDTC_ETB_DUMP */ +#if defined(DHD_H2D_LOG_TIME_SYNC) +#ifdef DHD_HP2P + if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) +#else + if (FW_SUPPORTED(dhd, h2dlogts)) +#endif // endif + { +#ifdef DHD_HP2P + if (dhd->hp2p_enable) { + dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40; + } else { + dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; + } +#else + dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; +#endif /* DHD_HP2P */ + dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US(); + /* This is during initialization. */ + dhd_h2d_log_time_sync(dhd); + } else { + dhd->dhd_rte_time_sync_ms = 0; + } +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +#ifdef HOST_SFH_LLC + if (FW_SUPPORTED(dhd, host_sfhllc)) { + dhd->host_sfhllc_supported = TRUE; + } else { + dhd->host_sfhllc_supported = FALSE; + } +#endif /* HOST_SFH_LLC */ + + /* Always assumes wl for now */ + dhd->iswl = TRUE; +done: + return ret; +} /* dhd_sync_with_dongle */ + +#define DHD_DBG_SHOW_METADATA 0 + +#if DHD_DBG_SHOW_METADATA +static void +BCMFASTPATH(dhd_prot_print_metadata)(dhd_pub_t *dhd, void *ptr, int len) +{ + uint8 tlv_t; + uint8 tlv_l; + uint8 *tlv_v = (uint8 *)ptr; + + if (len <= BCMPCIE_D2H_METADATA_HDRLEN) + return; + + len -= BCMPCIE_D2H_METADATA_HDRLEN; + tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; + + while (len > TLV_HDR_LEN) { + tlv_t = tlv_v[TLV_TAG_OFF]; + tlv_l = tlv_v[TLV_LEN_OFF]; + + len -= TLV_HDR_LEN; + tlv_v += TLV_HDR_LEN; + if (len < tlv_l) + break; + if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) + break; + + switch (tlv_t) { + case WLFC_CTL_TYPE_TXSTATUS: { + uint32 txs; + memcpy(&txs, tlv_v, sizeof(uint32)); + if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) { + printf("METADATA TX_STATUS: %08x\n", txs); + } else { + wl_txstatus_additional_info_t tx_add_info; + memcpy(&tx_add_info, tlv_v + sizeof(uint32), + sizeof(wl_txstatus_additional_info_t)); + printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" + " rate = %08x tries = %d - %d\n", txs, + tx_add_info.seq, tx_add_info.entry_ts, + tx_add_info.enq_ts, tx_add_info.last_ts, + tx_add_info.rspec, tx_add_info.rts_cnt, + tx_add_info.tx_cnt); + } + } break; + + case WLFC_CTL_TYPE_RSSI: { + if (tlv_l == 1) + printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v); + else + printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n", + (*(tlv_v + 3) << 8) | *(tlv_v + 2), + (int8)(*tlv_v), *(tlv_v + 1)); + } break; + + case WLFC_CTL_TYPE_FIFO_CREDITBACK: + bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_TX_ENTRY_STAMP: + bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_RX_STAMP: { + struct { + uint32 rspec; + uint32 bus_time; + uint32 wlan_time; + } rx_tmstamp; + memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp)); + printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", + rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec); + } break; + + case WLFC_CTL_TYPE_TRANS_ID: + bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_COMP_TXSTATUS: + bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); + break; + + default: + bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); + break; + } + + len -= tlv_l; + tlv_v += tlv_l; + } +} +#endif /* DHD_DBG_SHOW_METADATA */ + +static INLINE void +BCMFASTPATH(dhd_prot_packet_free)(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) +{ + if (pkt) { + if (pkttype == PKTTYPE_IOCTL_RX || + pkttype == PKTTYPE_EVENT_RX || + pkttype == PKTTYPE_INFO_RX || + pkttype == PKTTYPE_TSBUF_RX) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, send); +#else + PKTFREE(dhd->osh, pkt, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTFREE(dhd->osh, pkt, send); + } + } +} + +/** + * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle + * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK + * to ensure thread safety, so no need to hold any locks for this function + */ +static INLINE void * +BCMFASTPATH(dhd_prot_packet_get)(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) +{ + void *PKTBUF; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + +#ifdef DHD_PCIE_PKTID + if (free_pktid) { + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, + pktid, pa, len, dmah, secdma, pkttype); + } else { + PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map, + pktid, pa, len, dmah, secdma, pkttype); + } +#else + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, + len, dmah, secdma, pkttype); +#endif /* DHD_PCIE_PKTID */ + if (PKTBUF) { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); +#ifdef DMAMAP_STATS + switch (pkttype) { +#ifndef IOCTLRESP_USE_CONSTMEM + case PKTTYPE_IOCTL_RX: + dhd->dma_stats.ioctl_rx--; + dhd->dma_stats.ioctl_rx_sz -= len; + break; +#endif /* IOCTLRESP_USE_CONSTMEM */ + case PKTTYPE_EVENT_RX: + dhd->dma_stats.event_rx--; + dhd->dma_stats.event_rx_sz -= len; + break; + case PKTTYPE_INFO_RX: + dhd->dma_stats.info_rx--; + dhd->dma_stats.info_rx_sz -= len; + break; + case PKTTYPE_TSBUF_RX: + dhd->dma_stats.tsbuf_rx--; + dhd->dma_stats.tsbuf_rx_sz -= len; + break; + } +#endif /* DMAMAP_STATS */ + } + + return PKTBUF; +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static INLINE void +BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf) +{ + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); + + return; +} +#endif + +#ifdef PCIE_INB_DW +static int +dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus) +{ + unsigned long flags = 0; + + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + bus->host_active_cnt++; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + bus->host_active_cnt--; + dhd_bus_inb_ack_pending_ds_req(bus); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + return BCME_ERROR; + } + } + + return BCME_OK; +} + +static void +dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus) +{ + unsigned long flags = 0; + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + bus->host_active_cnt--; + dhd_bus_inb_ack_pending_ds_req(bus); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } +} +#endif /* PCIE_INB_DW */ + +static void +BCMFASTPATH(dhd_msgbuf_rxbuf_post)(dhd_pub_t *dhd, bool use_rsv_pktid) +{ + dhd_prot_t *prot = dhd->prot; + int16 fillbufs; + int retcount = 0; + + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + while (fillbufs >= RX_BUF_BURST) { + /* Post in a burst of 32 buffers at a time */ + fillbufs = MIN(fillbufs, RX_BUF_BURST); + + /* Post buffers */ + retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid); + + if (retcount > 0) { + prot->rxbufpost += (uint16)retcount; + /* how many more to post */ + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + } else { + /* Make sure we don't run loop any further */ + fillbufs = 0; + } + } +} + +/** Post 'count' no of rx buffers to dongle */ +static int +BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) +{ + void *p, **pktbuf; + uint8 *rxbuf_post_tmp; + host_rxbuf_post_t *rxbuf_post; + void *msg_start; + dmaaddr_t pa, *pktbuf_pa; + uint32 *pktlen; + uint16 i = 0, alloced = 0; + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; + void *lcl_buf; + uint16 lcl_buf_size; +#ifdef BCM_ROUTER_DHD + uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ + BCMEXTRAHDROOM; +#else + uint16 pktsz = prot->rxbufpost_sz; +#endif /* BCM_ROUTER_DHD */ + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + /* allocate a local buffer to store pkt buffer va, pa and length */ + lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) * + RX_BUF_BURST; + lcl_buf = MALLOC(dhd->osh, lcl_buf_size); + if (!lcl_buf) { + DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return 0; + } + pktbuf = lcl_buf; + pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST); + pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST); + + for (i = 0; i < count; i++) { + if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { + DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + +#ifdef BCM_ROUTER_DHD + /* Reserve extra headroom for router builds */ + PKTPULL(dhd->osh, p, BCMEXTRAHDROOM); +#endif /* BCM_ROUTER_DHD */ + pktlen[i] = PKTLEN(dhd->osh, p); + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0); + + if (PHYSADDRISZERO(pa)) { + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } +#ifdef DMAMAP_STATS + dhd->dma_stats.rxdata++; + dhd->dma_stats.rxdata_sz += pktlen[i]; +#endif /* DMAMAP_STATS */ + + PKTPULL(dhd->osh, p, prot->rx_metadata_offset); + pktlen[i] = PKTLEN(dhd->osh, p); + pktbuf[i] = p; + pktbuf_pa[i] = pa; + } + + /* only post what we have */ + count = i; + + /* grab the ring lock to allocate pktid and post on ring */ + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) + dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); + if (msg_start == NULL) { + DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + DHD_RING_UNLOCK(ring->ring_lock, flags); + goto cleanup; + } + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + rxbuf_post_tmp = (uint8*)msg_start; + + for (i = 0; i < alloced; i++) { + rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; + p = pktbuf[i]; + pa = pktbuf_pa[i]; + + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa, + pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + break; + } +#endif /* DHD_PCIE_PKTID */ + +#ifdef DHD_HMAPTEST + if (dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_ACTIVE) { + /* scratchbuf area */ + dhd->prot->hmap_rx_buf_va = (char *)dhd->prot->hmaptest.mem.va + + dhd->prot->hmaptest.offset; + + dhd->prot->hmap_rx_buf_len = pktlen[i] + prot->rx_metadata_offset; + if ((dhd->prot->hmap_rx_buf_va + dhd->prot->hmap_rx_buf_len) > + ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) { + DHD_ERROR(("hmaptest: ERROR Rxpost outside HMAPTEST buffer\n")); + DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n")); + dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE; + dhd->prot->hmaptest.in_progress = FALSE; + } else { + pa = DMA_MAP(dhd->osh, dhd->prot->hmap_rx_buf_va, + dhd->prot->hmap_rx_buf_len, DMA_RX, p, 0); + + dhd->prot->hmap_rx_buf_pa = pa; + dhd->prot->hmaptest_rx_pktid = pktid; + dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_POSTED; + DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf pktid=0x%08x\n", + pktid)); + DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf va=0x%p pa.lo=0x%08x\n", + dhd->prot->hmap_rx_buf_va, (uint32)PHYSADDRLO(pa))); + DHD_ERROR(("hmaptest: d11write rxpost orig pktdata va=0x%p pa.lo=0x%08x\n", + PKTDATA(dhd->osh, p), (uint32)PHYSADDRLO(pktbuf_pa[i]))); + } + } +#endif /* DHD_HMAPTEST */ + dhd->prot->tot_rxbufpost++; + /* Common msg header */ + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + rxbuf_post->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]); + rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->data_buf_addr.low_addr = + htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); + + if (prot->rx_metadata_offset) { + rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; + rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + } else { + rxbuf_post->metadata_buf_len = 0; + rxbuf_post->metadata_buf_addr.high_addr = 0; + rxbuf_post->metadata_buf_addr.low_addr = 0; + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + + /* Move rxbuf_post_tmp to next item */ + rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, p); +#endif + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) + ring->wr = ring->max_items - (alloced - i); + else + ring->wr -= (alloced - i); + + if (ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + + alloced = i; + } + + /* update ring's WR index and ring doorbell to dongle */ + if (alloced > 0) { + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +cleanup: + for (i = alloced; i < count; i++) { + p = pktbuf[i]; + pa = pktbuf_pa[i]; + + DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL); + PKTFREE(dhd->osh, p, FALSE); + } + + MFREE(dhd->osh, lcl_buf, lcl_buf_size); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return alloced; +} /* dhd_prot_rxbufpost */ + +#if !defined(BCM_ROUTER_DHD) +static int +dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ; + uint32 pktlen; + info_buf_post_msg_t *infobuf_post; + uint8 *infobuf_post_tmp; + void *p; + void* msg_start; + uint8 i = 0; + dmaaddr_t pa; + int16 count = 0; + + if (ring == NULL) + return 0; + + if (ring->inited != TRUE) + return 0; + if (ring == dhd->prot->h2dring_info_subn) { + if (prot->max_infobufpost == 0) + return 0; + + count = prot->max_infobufpost - prot->infobufpost; + } +#ifdef BTLOG + else if (ring == dhd->prot->h2dring_btlog_subn) { + if (prot->max_btlogbufpost == 0) + return 0; + + pktsz = DHD_BTLOG_RX_BUFPOST_PKTSZ; + count = prot->max_btlogbufpost - prot->btlogbufpost; + } +#endif /* BTLOG */ + else { + DHD_ERROR(("Unknown ring\n")); + return 0; + } + + if (count <= 0) { + DHD_INFO(("%s: Cannot post more than max info resp buffers\n", + __FUNCTION__)); + return 0; + } + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + /* grab the ring lock to allocate pktid and post on ring */ + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE); + + if (msg_start == NULL) { + DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return -1; + } + + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + infobuf_post_tmp = (uint8*) msg_start; + + /* loop through each allocated message in the host ring */ + for (i = 0; i < alloced; i++) { + infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp; + /* Create a rx buffer */ +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + pktlen = PKTLEN(dhd->osh, p); + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + if (PHYSADDRISZERO(pa)) { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, p, FALSE); +#else + PKTFREE(dhd->osh, p, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } +#ifdef DMAMAP_STATS + dhd->dma_stats.info_rx++; + dhd->dma_stats.info_rx_sz += pktlen; +#endif /* DMAMAP_STATS */ + pktlen = PKTLEN(dhd->osh, p); + + /* Common msg header */ + infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST; + infobuf_post->cmn_hdr.if_id = 0; + infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + infobuf_post->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa, + pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX); + +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0); + +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, p, FALSE); +#else + PKTFREE(dhd->osh, p, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__)); + break; + } +#endif /* DHD_PCIE_PKTID */ + + infobuf_post->host_buf_len = htol16((uint16)pktlen); + infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_MSGBUF_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n", + infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr, + infobuf_post->host_buf_addr.high_addr)); + + infobuf_post->cmn_hdr.request_id = htol32(pktid); + /* Move rxbuf_post_tmp to next item */ + infobuf_post_tmp = infobuf_post_tmp + ring->item_len; +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, p); +#endif + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) + ring->wr = ring->max_items - (alloced - i); + else + ring->wr -= (alloced - i); + + alloced = i; + if (alloced && ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + + /* Update the write pointer in TCM & ring bell */ + if (alloced > 0) { + if (ring == dhd->prot->h2dring_info_subn) { + prot->infobufpost += alloced; + } +#ifdef BTLOG + if (ring == dhd->prot->h2dring_btlog_subn) { + prot->btlogbufpost += alloced; + } +#endif /* BTLOG */ + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return alloced; +} /* dhd_prot_infobufpost */ +#endif /* !BCM_ROUTER_DHD */ + +#ifdef IOCTLRESP_USE_CONSTMEM +static int +alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + int err; + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + + if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) { + DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err)); + ASSERT(0); + return BCME_NOMEM; + } + + return BCME_OK; +} + +static void +free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + /* retbuf (declared on stack) not fully populated ... */ + if (retbuf->va) { + uint32 dma_pad; + dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + retbuf->len = IOCT_RETBUF_SIZE; + retbuf->_alloced = retbuf->len + dma_pad; + } + + dhd_dma_buf_free(dhd, retbuf); + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static int +dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type) +{ + void *p; + uint16 pktsz; + ioctl_resp_evt_buf_post_msg_t *rxbuf_post; + dmaaddr_t pa; + uint32 pktlen; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + unsigned long flags; + dhd_dma_buf_t retbuf; + void *dmah = NULL; + uint32 pktid; + void *map_handle; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + bool non_ioctl_resp_buf = 0; + dhd_pkttype_t buf_type; + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return -1; + } + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST) + buf_type = PKTTYPE_IOCTL_RX; + else if (msg_type == MSG_TYPE_EVENT_BUF_POST) + buf_type = PKTTYPE_EVENT_RX; + else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST) + buf_type = PKTTYPE_TSBUF_RX; + else { + DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type)); + /* XXX: may be add an assert */ + return -1; + } +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) { + return BCME_ERROR; + } +#endif /* PCIE_INB_DW */ + + if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)) + non_ioctl_resp_buf = TRUE; + else + non_ioctl_resp_buf = FALSE; + + if (non_ioctl_resp_buf) { + /* Allocate packet for not ioctl resp buffer post */ + pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + /* Allocate packet for ctrl/ioctl buffer post */ + pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!non_ioctl_resp_buf) { + if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { + DHD_ERROR(("Could not allocate IOCTL response buffer\n")); + goto fail; + } + ASSERT(retbuf.len == IOCT_RETBUF_SIZE); + p = retbuf.va; + pktlen = retbuf.len; + pa = retbuf.pa; + dmah = retbuf.dmah; + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", + __FUNCTION__, __LINE__, non_ioctl_resp_buf ? + "EVENT" : "IOCTL RESP")); + dhd->rx_pktgetfail++; + goto fail; + } + + pktlen = PKTLEN(dhd->osh, p); + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("Invalid physaddr 0\n")); + ASSERT(0); + goto free_pkt_return; + } + +#ifdef DMAMAP_STATS + switch (buf_type) { +#ifndef IOCTLRESP_USE_CONSTMEM + case PKTTYPE_IOCTL_RX: + dhd->dma_stats.ioctl_rx++; + dhd->dma_stats.ioctl_rx_sz += pktlen; + break; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + case PKTTYPE_EVENT_RX: + dhd->dma_stats.event_rx++; + dhd->dma_stats.event_rx_sz += pktlen; + break; + case PKTTYPE_TSBUF_RX: + dhd->dma_stats.tsbuf_rx++; + dhd->dma_stats.tsbuf_rx_sz += pktlen; + break; + default: + break; + } +#endif /* DMAMAP_STATS */ + + } + + /* grab the ring lock to allocate pktid and post on ring */ + DHD_RING_LOCK(ring->ring_lock, flags); + + rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (rxbuf_post == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n", + __FUNCTION__, __LINE__)); + +#ifdef IOCTLRESP_USE_CONSTMEM + if (non_ioctl_resp_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + goto free_pkt_return; + } + + /* CMN msg header */ + rxbuf_post->cmn_hdr.msg_type = msg_type; + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!non_ioctl_resp_buf) { + map_handle = dhd->prot->pktid_map_handle_ioctl; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah, + ring->dma_buf.secdma, buf_type); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + map_handle = dhd->prot->pktid_ctrl_map; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, + p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, + buf_type); + } + + if (pktid == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + if (ring->wr == 0) { + ring->current_phase = ring->current_phase ? 0 : + BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + DHD_RING_UNLOCK(ring->ring_lock, flags); + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__)); + goto free_pkt_return; + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + rxbuf_post->cmn_hdr.flags = ring->current_phase; + +#if defined(DHD_PCIE_PKTID) + if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + if (ring->wr == 0) { + ring->current_phase = ring->current_phase ? 0 : + BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef IOCTLRESP_USE_CONSTMEM + if (non_ioctl_resp_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + goto free_pkt_return; + } +#endif /* DHD_PCIE_PKTID */ + +#ifndef IOCTLRESP_USE_CONSTMEM + rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); +#else + rxbuf_post->host_buf_len = htol16((uint16)pktlen); +#endif /* IOCTLRESP_USE_CONSTMEM */ + rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); +#ifdef DHD_LBUF_AUDIT + if (non_ioctl_resp_buf) + PKTAUDIT(dhd->osh, p); +#endif + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return 1; + +free_pkt_return: + if (!non_ioctl_resp_buf) { +#ifdef IOCTLRESP_USE_CONSTMEM + free_ioctl_return_buffer(dhd, &retbuf); +#else + dhd_prot_packet_free(dhd, p, buf_type, FALSE); +#endif /* IOCTLRESP_USE_CONSTMEM */ + } else { + dhd_prot_packet_free(dhd, p, buf_type, FALSE); + } + +fail: +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return -1; +} /* dhd_prot_rxbufpost_ctrl */ + +static uint16 +dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post) +{ + uint32 i = 0; + int32 ret_val; + + DHD_MSGBUF_INFO(("max to post %d, event %d \n", max_to_post, msg_type)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return 0; + } + + while (i < max_to_post) { + ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type); + if (ret_val < 0) + break; + i++; + } + DHD_MSGBUF_INFO(("posted %d buffers of type %d\n", i, msg_type)); + return (uint16)i; +} + +static void +dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + DHD_MSGBUF_INFO(("ioctl resp buf post\n")); + max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n", + __FUNCTION__)); + return; + } + prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post); +} + +static void +dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; + if (max_to_post <= 0) { + DHD_ERROR(("%s: Cannot post more than max event buffers\n", + __FUNCTION__)); + return; + } + prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + MSG_TYPE_EVENT_BUF_POST, max_to_post); +} + +static int +dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd) +{ +#ifdef DHD_TIMESYNC + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + if (prot->active_ipc_version < 7) { + DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n", + prot->active_ipc_version)); + return 0; + } + + max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max ts buffers\n", + __FUNCTION__)); + return 0; + } + + prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post); +#endif /* DHD_TIMESYNC */ + return 0; +} + +bool +BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)(dhd_pub_t *dhd, uint bound) +{ + dhd_prot_t *prot = dhd->prot; + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = prot->d2hring_info_cpln; + unsigned long flags; + + if (ring == NULL) + return FALSE; + if (ring->inited != TRUE) + return FALSE; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + if (dhd->smmu_fault_occurred) { + more = FALSE; + break; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + /* Get the message from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + DHD_RING_UNLOCK(ring->ring_lock, flags); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", + __FUNCTION__, msg_len)); + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check RX bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +#ifdef BTLOG +bool +BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)(dhd_pub_t *dhd, uint bound) +{ + dhd_prot_t *prot = dhd->prot; + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = prot->d2hring_btlog_cpln; + + if (ring == NULL) + return FALSE; + if (ring->inited != TRUE) + return FALSE; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd_query_bus_erros(dhd)) { + more = FALSE; + break; + } + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + if (dhd->smmu_fault_occurred) { + more = FALSE; + break; + } + + /* Get the message from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", + __FUNCTION__, msg_len)); + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check RX bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} +#endif /* BTLOG */ + +#ifdef EWP_EDL +bool +dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = prot->d2hring_edl; + unsigned long flags = 0; + uint32 items = 0; + uint16 rd = 0; + uint16 depth = 0; + + if (ring == NULL) + return FALSE; + if (ring->inited != TRUE) + return FALSE; + if (ring->item_len == 0) { + DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n", + __FUNCTION__, ring->idx, ring->item_len)); + return FALSE; + } + + if (dhd_query_bus_erros(dhd)) { + return FALSE; + } + + if (dhd->hang_was_sent) { + return FALSE; + } + + /* in this DPC context just check if wr index has moved + * and schedule deferred context to actually process the + * work items. + */ + + /* update the write index */ + DHD_RING_LOCK(ring->ring_lock, flags); + if (dhd->dma_d2h_ring_upd_support) { + /* DMAing write/read indices supported */ + ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx); + } + rd = ring->rd; + DHD_RING_UNLOCK(ring->ring_lock, flags); + + depth = ring->max_items; + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(ring->wr, rd, depth); + if (items == 0) { + /* no work items in edl ring */ + return FALSE; + } + if (items > ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", + ring->wr, ring->rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (ring->wr >= ring->max_items) { + dhd->bus->read_shm_fail = TRUE; + } +#else +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; + dhd_bus_mem_dump(dhd); + + } +#endif /* DHD_FW_COREDUMP */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_schedule_reset(dhd); + + return FALSE; + } + + if (items > D2HRING_EDL_WATERMARK) { + DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;" + " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items, + ring->rd, ring->wr, depth)); + } + + dhd_schedule_logtrace(dhd->info); + + return FALSE; +} + +/* + * This is called either from work queue context of 'event_log_dispatcher_work' or + * from the kthread context of dhd_logtrace_thread + */ +int +dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data) +{ + dhd_prot_t *prot = NULL; + msgbuf_ring_t *ring = NULL; + int err = 0; + unsigned long flags = 0; + cmn_msg_hdr_t *msg = NULL; + uint8 *msg_addr = NULL; + uint32 max_items_to_process = 0, n = 0; + uint32 num_items = 0, new_items = 0; + uint16 depth = 0; + volatile uint16 wr = 0; + + if (!dhd || !dhd->prot) + return 0; + + prot = dhd->prot; + ring = prot->d2hring_edl; + + if (!ring || !evt_decode_data) { + return 0; + } + + if (dhd->hang_was_sent) { + return FALSE; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + ring->curr_rd = ring->rd; + wr = ring->wr; + depth = ring->max_items; + /* check for avail space, in number of ring items + * Note, that this will only give the # of items + * from rd to wr if wr>=rd, or from rd to ring end + * if wr < rd. So in the latter case strictly speaking + * not all the items are read. But this is OK, because + * these will be processed in the next doorbell as rd + * would have wrapped around. Processing in the next + * doorbell is acceptable since EDL only contains debug data + */ + num_items = READ_AVAIL_SPACE(wr, ring->rd, depth); + + if (num_items == 0) { + /* no work items in edl ring */ + DHD_RING_UNLOCK(ring->ring_lock, flags); + return 0; + } + + DHD_INFO(("%s: EDL work items [%u] available \n", + __FUNCTION__, num_items)); + + /* if space is available, calculate address to be read */ + msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len); + + max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + n = max_items_to_process; + while (n > 0) { + msg = (cmn_msg_hdr_t *)msg_addr; + /* wait for DMA of work item to complete */ + if ((err = dhd->prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) { + DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n", + __FUNCTION__, err)); + } + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + if (err != BCME_OK) { + return 0; + } + + /* process the edl work item, i.e, the event log */ + err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data); + + /* Dummy sleep so that scheduler kicks in after processing any logprints */ + OSL_SLEEP(0); + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr + ring->item_len); + + msg_addr += ring->item_len; + --n; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + /* update host ring read pointer */ + if ((ring->rd + max_items_to_process) >= ring->max_items) + ring->rd = 0; + else + ring->rd += max_items_to_process; + DHD_RING_UNLOCK(ring->ring_lock, flags); + + /* Now after processing max_items_to_process update dongle rd index. + * The TCM rd index is updated only if bus is not + * in D3. Else, the rd index is updated from resume + * context in - 'dhdpcie_bus_suspend' + */ + DHD_GENERAL_LOCK(dhd, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) { + DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_EDL_RING_TCM_RD_UPDATE(dhd); + } + + /* if num_items > bound, then anyway we will reschedule and + * this function runs again, so that if in between the DPC has + * updated the wr index, then the updated wr is read. But if + * num_items <= bound, and if DPC executes and updates the wr index + * when the above while loop is running, then the updated 'wr' index + * needs to be re-read from here, If we don't do so, then till + * the next time this function is scheduled + * the event logs will not be processed. + */ + if (num_items <= DHD_EVENT_LOGTRACE_BOUND) { + /* read the updated wr index if reqd. and update num_items */ + DHD_RING_LOCK(ring->ring_lock, flags); + if (wr != (volatile uint16)ring->wr) { + wr = (volatile uint16)ring->wr; + new_items = READ_AVAIL_SPACE(wr, ring->rd, depth); + DHD_INFO(("%s: new items [%u] avail in edl\n", + __FUNCTION__, new_items)); + num_items += new_items; + } + DHD_RING_UNLOCK(ring->ring_lock, flags); + } + + /* if # of items processed is less than num_items, need to re-schedule + * the deferred ctx + */ + if (max_items_to_process < num_items) { + DHD_INFO(("%s: EDL bound hit / new items found, " + "items processed=%u; remaining=%u, " + "resched deferred ctx...\n", + __FUNCTION__, max_items_to_process, + num_items - max_items_to_process)); + return (num_items - max_items_to_process); + } + + return 0; + +} + +void +dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = NULL; + unsigned long flags = 0; + msgbuf_ring_t *ring = NULL; + + if (!dhd) + return; + + prot = dhd->prot; + if (!prot || !prot->d2hring_edl) + return; + + ring = prot->d2hring_edl; + DHD_RING_LOCK(ring->ring_lock, flags); + dhd_prot_upd_read_idx(dhd, ring); + DHD_RING_UNLOCK(ring->ring_lock, flags); + if (dhd->dma_h2d_ring_upd_support && + !IDMA_ACTIVE(dhd)) { + dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring)); + } +} +#endif /* EWP_EDL */ + +static void +dhd_prot_rx_frame(dhd_pub_t *dhd, void *pkt, int ifidx, uint pkt_count) +{ + +#ifdef DHD_LB_RXP + if (dhd_read_lb_rxp(dhd) == 1) { + dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); + return; + } +#endif /* DHD_LB_RXP */ + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, pkt_count); +} + +#ifdef DHD_LB_RXP +static int dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t *dhd) +{ + if ((dhd->lb_rxp_stop_thr == 0) || (dhd->lb_rxp_strt_thr == 0)) { + /* when either of stop and start thresholds are zero flow ctrl is not enabled */ + return FALSE; + } + + if ((dhd_lb_rxp_process_qlen(dhd) >= dhd->lb_rxp_stop_thr) && + (!atomic_read(&dhd->lb_rxp_flow_ctrl))) { + atomic_set(&dhd->lb_rxp_flow_ctrl, TRUE); +#ifdef DHD_LB_STATS + dhd->lb_rxp_stop_thr_hitcnt++; +#endif /* DHD_LB_STATS */ + DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_stop_thr %d\n", + dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_stop_thr)); + } else if ((dhd_lb_rxp_process_qlen(dhd) <= dhd->lb_rxp_strt_thr) && + (atomic_read(&dhd->lb_rxp_flow_ctrl))) { + atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE); +#ifdef DHD_LB_STATS + dhd->lb_rxp_strt_thr_hitcnt++; +#endif /* DHD_LB_STATS */ + DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_strt_thr %d\n", + dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_strt_thr)); + } + + return atomic_read(&dhd->lb_rxp_flow_ctrl); +} +#endif /* DHD_LB_RXP */ + +/** called when DHD needs to check for 'receive complete' messages from the dongle */ +bool +BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, uint bound, int ringtype) +{ + bool more = FALSE; + uint n = 0; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 item_len; + host_rxbuf_cmpl_t *msg = NULL; + uint8 *msg_addr; + uint32 msg_len; + uint16 pkt_cnt, pkt_cnt_newidx; + unsigned long flags; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + int ifidx = 0, if_newidx = 0; + void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt; + uint32 pktid; + int i; + uint8 sync; + +#ifdef DHD_LB_RXP + /* must be the first check in this function */ + if (dhd_prot_lb_rxp_flow_ctrl(dhd)) { + /* DHD is holding a lot of RX packets. + * Just give chance for netwrok stack to consumes RX packets. + */ + return FALSE; + } +#endif /* DHD_LB_RXP */ +#ifdef DHD_PCIE_RUNTIMEPM + /* Set rx_pending_due_to_rpm if device is not in resume state */ + if (dhdpcie_runtime_bus_wake(dhd, FALSE, dhd_prot_process_msgbuf_rxcpl)) { + dhd->rx_pending_due_to_rpm = TRUE; + return more; + } + dhd->rx_pending_due_to_rpm = FALSE; +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef DHD_HP2P + if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl) + ring = prot->d2hring_hp2p_rxcpl; + else +#endif /* DHD_HP2P */ + ring = &prot->d2hring_rx_cpln; + item_len = ring->item_len; + while (1) { + if (dhd_is_device_removed(dhd)) + break; + + if (dhd_query_bus_erros(dhd)) + break; + + if (dhd->hang_was_sent) + break; + + if (dhd->smmu_fault_occurred) { + break; + } + + pkt_cnt = 0; + pktqhead = pkt_newidx = NULL; + pkt_cnt_newidx = 0; + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + break; + } + + while (msg_len > 0) { + msg = (host_rxbuf_cmpl_t *)msg_addr; + + /* Wait until DMA completes, then fetch msg_type */ + sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len); + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + if (!sync) { + msg_len -= item_len; + msg_addr += item_len; + continue; + } + + pktid = ltoh32(msg->cmn_hdr.request_id); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid, + DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING */ + + pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa, + len, dmah, secdma, PKTTYPE_DATA_RX); + /* Sanity check of shinfo nrfrags */ + if (!pkt || (dhd_check_shinfo_nrfrags(dhd, pkt, &pa, pktid) != BCME_OK)) { + msg_len -= item_len; + msg_addr += item_len; + continue; + } + dhd->prot->tot_rxcpl++; + + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + +#ifdef DMAMAP_STATS + dhd->dma_stats.rxdata--; + dhd->dma_stats.rxdata_sz -= len; +#endif /* DMAMAP_STATS */ +#ifdef DHD_HMAPTEST + if ((dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_POSTED) && + (pktid == dhd->prot->hmaptest_rx_pktid)) { + + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); + DMA_UNMAP(dhd->osh, dhd->prot->hmap_rx_buf_pa, + (uint)dhd->prot->hmap_rx_buf_len, DMA_RX, 0, dmah); + DHD_ERROR(("hmaptest: d11write rxcpl rcvd sc rxbuf pktid=0x%08x\n", + pktid)); + DHD_ERROR(("hmaptest: d11write rxcpl r0_st=0x%08x r1_stat=0x%08x\n", + msg->rx_status_0, msg->rx_status_1)); + DHD_ERROR(("hmaptest: d11write rxcpl rxbuf va=0x%p pa=0x%08x\n", + dhd->prot->hmap_rx_buf_va, + (uint32)PHYSADDRLO(dhd->prot->hmap_rx_buf_pa))); + DHD_ERROR(("hmaptest: d11write rxcpl pktdata va=0x%p pa=0x%08x\n", + PKTDATA(dhd->osh, pkt), (uint32)PHYSADDRLO(pa))); + memcpy(ptr, dhd->prot->hmap_rx_buf_va, dhd->prot->hmap_rx_buf_len); + dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE; + dhd->prot->hmap_rx_buf_va = NULL; + dhd->prot->hmap_rx_buf_len = 0; + PHYSADDRHISET(dhd->prot->hmap_rx_buf_pa, 0); + PHYSADDRLOSET(dhd->prot->hmap_rx_buf_pa, 0); + prot->hmaptest.in_progress = FALSE; + } +#endif /* DHD_HMAPTEST */ + DHD_MSGBUF_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, " + "pktdata %p, metalen %d\n", + ltoh32(msg->cmn_hdr.request_id), + ltoh16(msg->data_offset), + ltoh16(msg->data_len), msg->cmn_hdr.if_id, + msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), + ltoh16(msg->metadata_len))); + + pkt_cnt++; + msg_len -= item_len; + msg_addr += item_len; + +#if !defined(BCM_ROUTER_DHD) +#if DHD_DBG_SHOW_METADATA + if (prot->metadata_dbg && prot->rx_metadata_offset && + msg->metadata_len) { + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); + /* header followed by data */ + bcm_print_bytes("rxmetadata", ptr, msg->metadata_len); + dhd_prot_print_metadata(dhd, ptr, msg->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ +#endif /* !BCM_ROUTER_DHD */ + + /* data_offset from buf start */ + if (ltoh16(msg->data_offset)) { + /* data offset given from dongle after split rx */ + PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset)); + } + else if (prot->rx_dataoffset) { + /* DMA RX offset updated through shared area */ + PKTPULL(dhd->osh, pkt, prot->rx_dataoffset); + } + /* Actual length of the packet */ + PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len)); +#ifdef DHD_PKTTS + if (dhd_get_pktts_enab(dhd) == TRUE) { + uint fwr1 = 0, fwr2 = 0; + + /* firmware mark rx_pktts.tref with 0xFFFFFFFF for errors */ + if (ltoh32(msg->rx_pktts.tref) != 0xFFFFFFFF) { + fwr1 = (uint)htonl(ltoh32(msg->rx_pktts.tref)); + fwr2 = (uint)htonl(ltoh32(msg->rx_pktts.tref) + + ltoh16(msg->rx_pktts.d_t2)); + + /* check for overflow */ + if (ntohl(fwr2) > ntohl(fwr1)) { + /* send rx timestamp to netlnik socket */ + dhd_msgbuf_send_msg_rx_ts(dhd, pkt, fwr1, fwr2); + } + } + } +#endif /* DHD_PKTTS */ + +#if defined(WL_MONITOR) + if (dhd_monitor_enabled(dhd, ifidx)) { + if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { + dhd_rx_mon_pkt(dhd, msg, pkt, ifidx); + continue; + } else { + DHD_ERROR(("Received non 802.11 packet, " + "when monitor mode is enabled\n")); + } + } +#endif /* WL_MONITOR */ + + if (!pktqhead) { + pktqhead = prevpkt = pkt; + ifidx = msg->cmn_hdr.if_id; + } else { + if (ifidx != msg->cmn_hdr.if_id) { + pkt_newidx = pkt; + if_newidx = msg->cmn_hdr.if_id; + pkt_cnt--; + pkt_cnt_newidx = 1; + break; + } else { + PKTSETNEXT(dhd->osh, prevpkt, pkt); + prevpkt = pkt; + } + } + +#ifdef DHD_HP2P + if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) { +#ifdef DHD_HP2P_DEBUG + bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t)); +#endif /* DHD_HP2P_DEBUG */ + dhd_update_hp2p_rxstats(dhd, msg); + } +#endif /* DHD_HP2P */ + +#ifdef DHD_TIMESYNC + if (dhd->prot->rx_ts_log_enabled) { + dhd_pkt_parse_t parse; + ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts; + + memset(&parse, 0, sizeof(dhd_pkt_parse_t)); + dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse); + + if (parse.proto == IP_PROT_ICMP) + dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, + ts->low, ts->high, &parse); + } +#endif /* DHD_TIMESYNC */ + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif + } + + /* roll back read pointer for unprocessed message */ + if (msg_len > 0) { + if (ring->rd < msg_len / item_len) + ring->rd = ring->max_items - msg_len / item_len; + else + ring->rd -= msg_len / item_len; + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + pkt = pktqhead; + for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) { + nextpkt = PKTNEXT(dhd->osh, pkt); + PKTSETNEXT(dhd->osh, pkt, NULL); +#ifdef DHD_RX_CHAINING + dhd_rxchain_frame(dhd, pkt, ifidx); +#else + dhd_prot_rx_frame(dhd, pkt, ifidx, 1); +#endif /* DHD_LB_RXP */ + } + + if (pkt_newidx) { +#ifdef DHD_RX_CHAINING + dhd_rxchain_frame(dhd, pkt_newidx, if_newidx); +#else + dhd_prot_rx_frame(dhd, pkt_newidx, if_newidx, 1); +#endif /* DHD_LB_RXP */ + } + + pkt_cnt += pkt_cnt_newidx; + + /* Post another set of rxbufs to the device */ + dhd_prot_return_rxbuf(dhd, ring, 0, pkt_cnt); + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif + + /* After batch processing, check RX bound */ + n += pkt_cnt; + if (n >= bound) { + more = TRUE; + break; + } + } + + /* Call lb_dispatch only if packets are queued */ + if (n && +#ifdef WL_MONITOR + !(dhd_monitor_enabled(dhd, ifidx)) && +#endif /* WL_MONITOR */ + TRUE) { + DHD_LB_DISPATCH_RX_PROCESS(dhd); + } + + return more; + +} + +/** + * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) + */ +void +dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; + + if (ring == NULL) { + DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__)); + return; + } + /* Update read pointer */ + if (dhd->dma_d2h_ring_upd_support) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } + + DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", + ring->idx, flowid, ring->wr, ring->rd)); + + /* Need more logic here, but for now use it directly */ + dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ +} + +/** called when DHD needs to check for 'transmit complete' messages from the dongle */ +bool +BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)(dhd_pub_t *dhd, uint bound, int ringtype) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring; + unsigned long flags; + +#ifdef DHD_HP2P + if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl) + ring = dhd->prot->d2hring_hp2p_txcpl; + else +#endif /* DHD_HP2P */ + ring = &dhd->prot->d2hring_tx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd_query_bus_erros(dhd)) { + more = FALSE; + break; + } + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + if (dhd->smmu_fault_occurred) { + more = FALSE; + break; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + if (n) { + /* For IDMA and HWA case, doorbell is sent along with read index update. + * For DMA indices case ring doorbell once n items are read to sync with dongle. + */ + if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) { + dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring)); + dhd->prot->txcpl_db_cnt++; + } + } + return more; +} + +int +BCMFASTPATH(dhd_prot_process_trapbuf)(dhd_pub_t *dhd) +{ + uint32 data; + dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf; + + /* Interrupts can come in before this struct + * has been initialized. + */ + if (trap_addr->va == NULL) { + DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__)); + return 0; + } + + OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32)); + data = *(uint32 *)(trap_addr->va); + + if (data & D2H_DEV_FWHALT) { + if (dhd->db7_trap.fw_db7w_trap_inprogress) { + DHD_ERROR(("DB7 FW responded 0x%04x\n", data)); + } else { + DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data)); + } + + if (data & D2H_DEV_EXT_TRAP_DATA) + { + if (dhd->extended_trap_data) { + OSL_CACHE_INV((void *)trap_addr->va, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } + if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) { + DHD_ERROR(("Extended trap data available\n")); + } + } +#ifdef BT_OVER_PCIE + if (data & D2H_DEV_TRAP_DUE_TO_BT) { + DHD_ERROR(("WLAN Firmware trapped due to BT\n")); + dhd->dongle_trap_due_to_bt = TRUE; + } +#endif /* BT_OVER_PCIE */ + return data; + } + return 0; +} + +/** called when DHD needs to check for 'ioctl complete' messages from the dongle */ +int +BCMFASTPATH(dhd_prot_process_ctrlbuf)(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; + unsigned long flags; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd_query_bus_erros(dhd)) { + break; + } + + if (dhd->hang_was_sent) { + break; + } + + if (dhd->smmu_fault_occurred) { + break; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + if (msg_addr == NULL) { + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + } + + return 0; +} + +/** + * Consume messages out of the D2H ring. Ensure that the message's DMA to host + * memory has completed, before invoking the message handler via a table lookup + * of the cmn_msg_hdr::msg_type. + */ +static int +BCMFASTPATH(dhd_prot_process_msgtype)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) +{ + uint32 buf_len = len; + uint16 item_len; + uint8 msg_type; + cmn_msg_hdr_t *msg = NULL; + int ret = BCME_OK; + + ASSERT(ring); + item_len = ring->item_len; + if (item_len == 0) { + DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n", + __FUNCTION__, ring->idx, item_len, buf_len)); + return BCME_ERROR; + } + + while (buf_len > 0) { + if (dhd->hang_was_sent) { + ret = BCME_ERROR; + goto done; + } + + if (dhd->smmu_fault_occurred) { + ret = BCME_ERROR; + goto done; + } + + msg = (cmn_msg_hdr_t *)buf; + + /* Wait until DMA completes, then fetch msg_type */ + msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); + + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(buf + item_len); + + DHD_MSGBUF_INFO(("msg_type %d item_len %d buf_len %d\n", + msg_type, item_len, buf_len)); + + if (msg_type == MSG_TYPE_LOOPBACK) { + bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len); + DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len)); + } + + ASSERT(msg_type < DHD_PROT_FUNCS); + if (msg_type >= DHD_PROT_FUNCS) { + DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n", + __FUNCTION__, msg_type, item_len, buf_len)); + ret = BCME_ERROR; + goto done; + } + +#if !defined(BCM_ROUTER_DHD) + if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) { + if (ring == dhd->prot->d2hring_info_cpln) { + if (!dhd->prot->infobufpost) { + DHD_ERROR(("infobuf posted are zero," + "but there is a completion\n")); + goto done; + } + dhd->prot->infobufpost--; + dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn); + dhd_prot_process_infobuf_complete(dhd, buf); + } +#ifdef BTLOG + else if (ring == dhd->prot->d2hring_btlog_cpln) { + info_buf_resp_t *resp = (info_buf_resp_t *)buf; + + if (!dhd->prot->btlogbufpost) { + DHD_ERROR(("btlogbuf posted are zero," + "but there is a completion\n")); + goto done; + } + + dhd->prot->btlogbufpost--; + if (resp->compl_hdr.status != BCMPCIE_PKT_FLUSH) { + dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn); + } + dhd_prot_process_btlog_complete(dhd, buf); + } +#endif /* BTLOG */ + } else +#endif /* !defined(BCM_ROUTER_DHD) */ + if (table_lookup[msg_type]) { + table_lookup[msg_type](dhd, buf); + } + + if (buf_len < item_len) { + ret = BCME_ERROR; + goto done; + } + buf_len = buf_len - item_len; + buf = buf + item_len; + } + +done: + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif + + return ret; +} /* dhd_prot_process_msgtype */ + +static void +dhd_prot_noop(dhd_pub_t *dhd, void *msg) +{ + return; +} + +/** called on MSG_TYPE_RING_STATUS message received from dongle */ +static void +dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg; + uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id); + uint16 status = ltoh16(ring_status->compl_hdr.status); + uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id); + + DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", + request_id, status, ring_id, ltoh16(ring_status->write_idx))); + + if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) + return; + if (status == BCMPCIE_BAD_PHASE) { + /* bad phase report from */ + /* XXX: if the request is ioctl request finish the ioctl, rather than timing out */ + DHD_ERROR(("Bad phase\n")); + } + if (status != BCMPCIE_BADOPTION) + return; + + if (request_id == DHD_H2D_DBGRING_REQ_PKTID) { + /* XXX: see if the debug ring create is pending */ + if (dhd->prot->h2dring_info_subn != NULL) { + if (dhd->prot->h2dring_info_subn->create_pending == TRUE) { + DHD_ERROR(("H2D ring create failed for info ring\n")); + dhd->prot->h2dring_info_subn->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for a ring, create not pending\n")); + } else { + DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__)); + } + } + else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) { + /* XXX: see if the debug ring create is pending */ + if (dhd->prot->d2hring_info_cpln != NULL) { + if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) { + DHD_ERROR(("D2H ring create failed for info ring\n")); + dhd->prot->d2hring_info_cpln->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for info ring, create not pending\n")); + } else { + DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__)); + } + } +#ifdef BTLOG + else if (request_id == DHD_H2D_BTLOGRING_REQ_PKTID) { + /* XXX: see if the debug ring create is pending */ + if (dhd->prot->h2dring_btlog_subn != NULL) { + if (dhd->prot->h2dring_btlog_subn->create_pending == TRUE) { + DHD_ERROR(("H2D ring create failed for btlog ring\n")); + dhd->prot->h2dring_btlog_subn->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for a ring, create not pending\n")); + } else { + DHD_ERROR(("%s btlog submit ring doesn't exist\n", __FUNCTION__)); + } + } + else if (request_id == DHD_D2H_BTLOGRING_REQ_PKTID) { + /* XXX: see if the debug ring create is pending */ + if (dhd->prot->d2hring_btlog_cpln != NULL) { + if (dhd->prot->d2hring_btlog_cpln->create_pending == TRUE) { + DHD_ERROR(("D2H ring create failed for btlog ring\n")); + dhd->prot->d2hring_btlog_cpln->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for btlog ring, create not pending\n")); + } else { + DHD_ERROR(("%s btlog cpl ring doesn't exist\n", __FUNCTION__)); + } + } +#endif /* BTLOG */ +#ifdef DHD_HP2P + else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) { + /* XXX: see if the HPP txcmpl ring create is pending */ + if (dhd->prot->d2hring_hp2p_txcpl != NULL) { + if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) { + DHD_ERROR(("H2D ring create failed for hp2p ring\n")); + dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for a ring, create not pending\n")); + } else { + DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__)); + } + } + else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) { + /* XXX: see if the hp2p rxcmpl ring create is pending */ + if (dhd->prot->d2hring_hp2p_rxcpl != NULL) { + if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) { + DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n")); + dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n")); + } else { + DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__)); + } + } +#endif /* DHD_HP2P */ + else { + DHD_ERROR(("don;t know how to pair with original request\n")); + } + /* How do we track this to pair it with ??? */ + return; +} + +/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */ +static void +dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg; + DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n", + gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, + gen_status->compl_hdr.flow_ring_id)); + + /* How do we track this to pair it with ??? */ + return; +} + +/** + * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the + * dongle received the ioctl message in dongle memory. + */ +static void +dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) +{ + ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; + unsigned long flags; +#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD) + uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id); +#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */ + +#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD) + /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */ + if (pktid != DHD_IOCTL_REQ_PKTID) { +#ifndef IOCTLRESP_USE_CONSTMEM + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#else + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } +#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */ + + dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS(); + + DHD_GENERAL_LOCK(dhd, flags); + if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && + (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; + } else { + DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", + ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, + ioct_ack->compl_hdr.flow_ring_id)); + if (ioct_ack->compl_hdr.status != 0) { + DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); + /* FIXME: should we fail the pending IOCTL compelteion wait process... */ + } +#ifdef REPORT_FATAL_TIMEOUTS + else { + dhd_stop_bus_timer(dhd); + } +#endif /* REPORT_FATAL_TIMEOUTS */ +} + +/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ +static void +dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint32 pkt_id, xt_id; + ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg; + void *pkt; + unsigned long flags; + dhd_dma_buf_t retbuf; +#ifdef REPORT_FATAL_TIMEOUTS + uint16 dhd_xt_id; +#endif + + /* Check for ioctl timeout induce flag, which is set by firing + * dhd iovar to induce IOCTL timeout. If flag is set, + * return from here, which results in to IOCTL timeout. + */ + if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) { + DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__)); + return; + } + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD) +#ifndef IOCTLRESP_USE_CONSTMEM + DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#else + DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ +#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || + !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + + dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS(); + + /* Clear Response pending bit */ + prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING; + DHD_GENERAL_UNLOCK(dhd, flags); + +#ifndef IOCTLRESP_USE_CONSTMEM + pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); +#else + dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf); + pkt = retbuf.va; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (!pkt) { + DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + return; + } + + prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); + prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); + xt_id = ltoh16(ioct_resp->trans_id); + + if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) { + DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n", + __FUNCTION__, xt_id, prot->ioctl_trans_id, + prot->curr_ioctl_cmd, ioct_resp->cmd)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_stop_cmd_timer(dhd); +#endif /* REPORT_FATAL_TIMEOUTS */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR); + dhd_prot_debug_info_print(dhd); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + dhd_schedule_reset(dhd); + goto exit; + } +#ifdef REPORT_FATAL_TIMEOUTS + dhd_xt_id = dhd_get_request_id(dhd); + if (xt_id == dhd_xt_id) { + dhd_stop_cmd_timer(dhd); + } else { + DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d", + __FUNCTION__, xt_id, dhd_xt_id)); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", + pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); + + if (prot->ioctl_resplen > 0) { +#ifndef IOCTLRESP_USE_CONSTMEM + bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); +#else + bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } + + /* wake up any dhd_os_ioctl_resp_wait() */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS); + +exit: +#ifndef IOCTLRESP_USE_CONSTMEM + dhd_prot_packet_free(dhd, pkt, + PKTTYPE_IOCTL_RX, FALSE); +#else + free_ioctl_return_buffer(dhd, &retbuf); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + + /* Post another ioctl buf to the device */ + if (prot->cur_ioctlresp_bufs_posted > 0) { + prot->cur_ioctlresp_bufs_posted--; + } + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); +} + +int +dhd_prot_check_tx_resource(dhd_pub_t *dhd) +{ + return dhd->prot->no_tx_resource; +} + +#ifdef DHD_PKTTS +/** + * dhd_msgbuf_get_ip_info - this api finds following (ipv4 and ipv6 are supported) + * 1. pointer to data portion of pkt + * 2. five tuple checksum of pkt + * = {scr_ip, dst_ip, src_port, dst_port, proto} + * 3. ip_prec + * + * @dhdp: pointer to dhd_pub object + * @pkt: packet pointer + * @ptr: retuns pointer to data portion of pkt + * @chksum: returns five tuple checksum of pkt + * @prec: returns ip precedence + * @tcp_seqno: returns tcp sequnce number + * + * returns packet length remaining after tcp/udp header or BCME_ERROR. + */ +static int +dhd_msgbuf_get_ip_info(dhd_pub_t *dhdp, void *pkt, void **ptr, uint32 *chksum, + uint32 *prec, uint32 *tcp_seqno, uint32 *tcp_ackno) +{ + char *pdata; + uint plen; + uint32 type, len; + uint32 checksum = 0; + uint8 dscp_prio = 0; + struct bcmtcp_hdr *tcp = NULL; + + pdata = PKTDATA(dhdp->osh, pkt); + plen = PKTLEN(dhdp->osh, pkt); + + /* Ethernet header */ + if (plen < ETHER_HDR_LEN) { + return BCME_ERROR; + } + type = ntoh16(((struct ether_header *)pdata)->ether_type); + pdata += ETHER_HDR_LEN; + plen -= ETHER_HDR_LEN; + + if ((type == ETHER_TYPE_IP) || + (type == ETHER_TYPE_IPV6)) { + dscp_prio = (IP_TOS46(pdata) >> IPV4_TOS_PREC_SHIFT); + } + + /* IP header (v4 or v6) */ + if (type == ETHER_TYPE_IP) { + struct ipv4_hdr *iph = (struct ipv4_hdr *)pdata; + if (plen <= sizeof(*iph)) { + return BCME_ERROR; + } + + len = IPV4_HLEN(iph); + if (plen <= len || IP_VER(iph) != IP_VER_4 || len < IPV4_MIN_HEADER_LEN) { + return BCME_ERROR; + } + + type = IPV4_PROT(iph); + pdata += len; + plen -= len; + + checksum ^= bcm_compute_xor32((volatile uint32 *)iph->src_ip, + sizeof(iph->src_ip) / sizeof(uint32)); + checksum ^= bcm_compute_xor32((volatile uint32 *)iph->dst_ip, + sizeof(iph->dst_ip) / sizeof(uint32)); + } else if (type == ETHER_TYPE_IPV6) { + struct ipv6_hdr *ip6h = (struct ipv6_hdr *)pdata; + + if (plen <= IPV6_MIN_HLEN || IP_VER(ip6h) != IP_VER_6) { + return BCME_ERROR; + } + + type = IPV6_PROT(ip6h); + pdata += IPV6_MIN_HLEN; + plen -= IPV6_MIN_HLEN; + if (IPV6_EXTHDR(type)) { + uint8 proto = 0; + int32 exth_len = ipv6_exthdr_len(pdata, &proto); + if (exth_len < 0 || ((plen -= exth_len) <= 0)) { + return BCME_ERROR; + } + type = proto; + pdata += exth_len; + plen -= exth_len; + } + + checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->saddr, + sizeof(ip6h->saddr) / sizeof(uint32)); + checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->daddr, + sizeof(ip6h->saddr) / sizeof(uint32)); + } + + /* return error if not TCP or UDP */ + if ((type != IP_PROT_UDP) && (type != IP_PROT_TCP)) { + return BCME_ERROR; + } + + /* src_port and dst_port (together 32bit) */ + checksum ^= bcm_compute_xor32((volatile uint32 *)pdata, 1); + checksum ^= bcm_compute_xor32((volatile uint32 *)&type, 1); + + if (type == IP_PROT_TCP) { + tcp = (struct bcmtcp_hdr *)pdata; + len = TCP_HDRLEN(pdata[TCP_HLEN_OFFSET]) << 2; + } else { /* IP_PROT_UDP */ + len = sizeof(struct bcmudp_hdr); + } + + /* length check */ + if (plen < len) { + return BCME_ERROR; + } + + pdata += len; + plen -= len; + + /* update data[0] */ + *ptr = (void *)pdata; + + /* update fivetuple checksum */ + *chksum = checksum; + + /* update ip prec */ + *prec = dscp_prio; + + /* update tcp sequence number */ + if (tcp != NULL) { + *tcp_seqno = tcp->seq_num; + *tcp_ackno = tcp->ack_num; + } + + return plen; +} + +/** + * dhd_msgbuf_send_msg_tx_ts - send pktts tx timestamp to netlnik socket + * + * @dhdp: pointer to dhd_pub object + * @pkt: packet pointer + * @fwts: firmware timestamp {fwt1..fwt4} + * @version: pktlat version supported in firmware + */ +static void +dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhdp, void *pkt, void *fw_ts, uint16 version) +{ + bcm_to_info_tx_ts_t to_tx_info; + void *ptr = NULL; + int dlen = 0; + uint32 checksum = 0; + uint32 prec = 0; + pktts_flow_t *flow = NULL; + uint32 flow_pkt_offset = 0; + uint32 num_config = 0; + uint32 tcp_seqno = 0; + uint32 tcp_ackno = 0; + + dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno); + + flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config); + if (flow) { + /* there is valid config for this chksum */ + flow_pkt_offset = flow->pkt_offset; + } else if (num_config) { + /* there is valid config + no matching config for this chksum */ + return; + } else { + /* there is no valid config. pass all to netlink */ + } + + memset(&to_tx_info, 0, sizeof(to_tx_info)); + to_tx_info.hdr.type = BCM_TS_TX; + to_tx_info.hdr.flowid = checksum; + to_tx_info.hdr.prec = prec; + + /* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */ + if (!flow && tcp_seqno) { + uint32 *xbytes = (uint32 *)to_tx_info.hdr.xbytes; + + (void)memcpy_s(&xbytes[0], sizeof(xbytes[0]), + ((uint8 *)&tcp_seqno), sizeof(tcp_seqno)); + (void)memcpy_s(&xbytes[1], sizeof(xbytes[1]), + ((uint8 *)&tcp_ackno), sizeof(tcp_ackno)); + } else if ((dlen > flow_pkt_offset) && + ((dlen - flow_pkt_offset) >= sizeof(to_tx_info.hdr.xbytes))) { + (void)memcpy_s(to_tx_info.hdr.xbytes, sizeof(to_tx_info.hdr.xbytes), + ((uint8 *)ptr + flow_pkt_offset), sizeof(to_tx_info.hdr.xbytes)); + } + + to_tx_info.dhdt0 = DHD_PKT_GET_QTIME(pkt); + to_tx_info.dhdt5 = OSL_SYSUPTIME_US(); + + if (version == METADATA_VER_1) { + struct pktts_fwtx_v1 *fwts = (struct pktts_fwtx_v1 *)fw_ts; + + to_tx_info.hdr.magic = BCM_TS_MAGIC; + + to_tx_info.fwts[0] = ntohl(fwts->ts[0]); + to_tx_info.fwts[1] = ntohl(fwts->ts[1]); + to_tx_info.fwts[2] = ntohl(fwts->ts[2]); + to_tx_info.fwts[3] = ntohl(fwts->ts[3]); + + dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, OFFSETOF(bcm_to_info_tx_ts_t, ucts)); + } else if (version == METADATA_VER_2) { + struct pktts_fwtx_v2 *fwts = (struct pktts_fwtx_v2 *)fw_ts; + + to_tx_info.hdr.magic = BCM_TS_MAGIC_V2; + + to_tx_info.fwts[0] = ntohl(fwts->ts[0]); + to_tx_info.fwts[1] = ntohl(fwts->ts[1]); + to_tx_info.fwts[2] = ntohl(fwts->ts[2]); + to_tx_info.fwts[3] = ntohl(fwts->ts[3]); + + to_tx_info.ucts[0] = ntohl(fwts->ut[0]); + to_tx_info.ucts[1] = ntohl(fwts->ut[1]); + to_tx_info.ucts[2] = ntohl(fwts->ut[2]); + to_tx_info.ucts[3] = ntohl(fwts->ut[3]); + to_tx_info.ucts[4] = ntohl(fwts->ut[4]); + + to_tx_info.uccnt[0] = ntohl(fwts->uc[0]); + to_tx_info.uccnt[1] = ntohl(fwts->uc[1]); + to_tx_info.uccnt[2] = ntohl(fwts->uc[2]); + to_tx_info.uccnt[3] = ntohl(fwts->uc[3]); + to_tx_info.uccnt[4] = ntohl(fwts->uc[4]); + to_tx_info.uccnt[5] = ntohl(fwts->uc[5]); + to_tx_info.uccnt[6] = ntohl(fwts->uc[6]); + to_tx_info.uccnt[7] = ntohl(fwts->uc[7]); + + dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, sizeof(to_tx_info)); + } + return; +} + +/** + * dhd_msgbuf_send_msg_dx_ts - send pktts rx timestamp to netlnik socket + * + * @dhdp: pointer to dhd_pub object + * @pkt: packet pointer + * @fwr1: firmware timestamp at probe point 1 + * @fwr2: firmware timestamp at probe point 2 + */ +static void +dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhdp, void *pkt, uint fwr1, uint fwr2) +{ + bcm_to_info_rx_ts_t to_rx_info; + void *ptr = NULL; + int dlen = 0; + uint32 checksum = 0; + uint32 prec = 0; + pktts_flow_t *flow = NULL; + uint32 flow_pkt_offset = 0; + uint32 num_config = 0; + uint32 tcp_seqno = 0; + uint32 tcp_ackno = 0; + + dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno); + + flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config); + if (flow) { + /* there is valid config for this chksum */ + flow_pkt_offset = flow->pkt_offset; + } else if (num_config) { + /* there is valid config + no matching config for this chksum */ + return; + } else { + /* there is no valid config. pass all to netlink */ + } + + memset(&to_rx_info, 0, sizeof(to_rx_info)); + to_rx_info.hdr.magic = BCM_TS_MAGIC; + to_rx_info.hdr.type = BCM_TS_RX; + to_rx_info.hdr.flowid = checksum; + to_rx_info.hdr.prec = prec; + + /* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */ + if (!flow && tcp_seqno) { + uint32 *xbytes = (uint32 *)to_rx_info.hdr.xbytes; + + (void)memcpy_s(&xbytes[0], sizeof(xbytes[0]), + ((uint8 *)&tcp_seqno), sizeof(tcp_seqno)); + (void)memcpy_s(&xbytes[1], sizeof(xbytes[1]), + ((uint8 *)&tcp_ackno), sizeof(tcp_ackno)); + } else if ((dlen > flow_pkt_offset) && + ((dlen - flow_pkt_offset) >= sizeof(to_rx_info.hdr.xbytes))) { + (void)memcpy_s(to_rx_info.hdr.xbytes, sizeof(to_rx_info.hdr.xbytes), + ((uint8 *)ptr + flow_pkt_offset), sizeof(to_rx_info.hdr.xbytes)); + } + + to_rx_info.dhdr3 = OSL_SYSUPTIME_US(); + + to_rx_info.fwts[0] = ntohl(fwr1); + to_rx_info.fwts[1] = ntohl(fwr2); + + dhd_send_msg_to_ts(NULL, (void *)&to_rx_info, sizeof(to_rx_info)); + return; +} +#endif /* DHD_PKTTS */ + +/** called on MSG_TYPE_TX_STATUS message received from dongle */ +static void +BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + host_txbuf_cmpl_t * txstatus; + unsigned long flags; + uint32 pktid; + void *pkt; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + bool pkt_fate; + msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; +#if defined(TX_STATUS_LATENCY_STATS) + flow_info_t *flow_info; + uint64 tx_status_latency; +#endif /* TX_STATUS_LATENCY_STATS */ +#ifdef AGG_H2D_DB + msgbuf_ring_t *flow_ring; +#endif /* AGG_H2D_DB */ +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + dhd_awdl_stats_t *awdl_stats; + if_flow_lkup_t *if_flow_lkup; + unsigned long awdl_stats_lock_flags; + uint8 ifindex; + uint8 role; +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + flow_ring_node_t *flow_ring_node; + uint16 flowid; +#ifdef DHD_PKTTS + struct metadata_txcmpl_v1 meta_ts_v1; + struct metadata_txcmpl_v2 meta_ts_v2; + dhd_dma_buf_t meta_data_buf; + uint64 addr = 0; + + BCM_REFERENCE(meta_ts_v1); + BCM_REFERENCE(meta_ts_v2); + BCM_REFERENCE(meta_data_buf); + BCM_REFERENCE(addr); + + if ((dhd->memdump_type == DUMP_TYPE_PKTID_AUDIT_FAILURE) || + (dhd->memdump_type == DUMP_TYPE_PKTID_INVALID)) { + DHD_ERROR_RLMT(("%s: return as invalid pktid detected\n", __FUNCTION__)); + return; + } + + memset(&meta_ts_v1, 0, sizeof(meta_ts_v1)); + memset(&meta_ts_v2, 0, sizeof(meta_ts_v2)); + memset(&meta_data_buf, 0, sizeof(meta_data_buf)); +#endif /* DHD_PKTTS */ + txstatus = (host_txbuf_cmpl_t *)msg; + + flowid = txstatus->compl_hdr.flow_ring_id; + flow_ring_node = DHD_FLOW_RING(dhd, flowid); +#ifdef AGG_H2D_DB + flow_ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + OSL_ATOMIC_DEC(dhd->osh, &flow_ring->inflight); +#endif /* AGG_H2D_DB */ + + BCM_REFERENCE(flow_ring_node); + +#ifdef DEVICE_TX_STUCK_DETECT + /** + * Since we got a completion message on this flowid, + * update tx_cmpl time stamp + */ + flow_ring_node->tx_cmpl = OSL_SYSUPTIME(); + /* update host copy of rd pointer */ +#ifdef DHD_HP2P + if (dhd->prot->d2hring_hp2p_txcpl && + flow_ring_node->flow_info.tid == HP2P_PRIO) { + ring = dhd->prot->d2hring_hp2p_txcpl; + } +#endif /* DHD_HP2P */ + ring->curr_rd++; + if (ring->curr_rd >= ring->max_items) { + ring->curr_rd = 0; + } +#endif /* DEVICE_TX_STUCK_DETECT */ + + /* locks required to protect circular buffer accesses */ + DHD_RING_LOCK(ring->ring_lock, flags); + pktid = ltoh32(txstatus->cmn_hdr.request_id); + + if (dhd->pcie_txs_metadata_enable > 1) { + /* Return metadata format (little endian): + * |<--- txstatus --->|<- metadatalen ->| + * |____|____|________|________|________| + * | | | | |> total delay from fetch to report (8-bit 1 = 4ms) + * | | | |> ucode delay from enqueue to completion (8-bit 1 = 4ms) + * | | |> 8-bit reserved (pre-filled with original TX status by caller) + * | |> delay time first fetch to the last fetch (4-bit 1 = 32ms) + * |> fetch count (4-bit) + */ + printf("TX status[%d] = %04x-%04x -> status = %d (%d/%dms + %d/%dms)\n", pktid, + ltoh16(txstatus->tx_status_ext), ltoh16(txstatus->tx_status), + (txstatus->tx_status & WLFC_CTL_PKTFLAG_MASK), + ((txstatus->tx_status >> 12) & 0xf), + ((txstatus->tx_status >> 8) & 0xf) * 32, + ((txstatus->tx_status_ext & 0xff) * 4), + ((txstatus->tx_status_ext >> 8) & 0xff) * 4); + } + pkt_fate = TRUE; + +#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD) + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid, + DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */ + + DHD_MSGBUF_INFO(("txstatus for pktid 0x%04x\n", pktid)); + if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) { + DHD_ERROR(("Extra packets are freed\n")); + } + ASSERT(pktid != 0); + +#ifdef DHD_HMAPTEST + + if ((dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_POSTED) && + (pktid == dhd->prot->hmaptest_tx_pktid)) { + DHD_ERROR(("hmaptest: d11read txcpl received sc txbuf pktid=0x%08x\n", pktid)); + DHD_ERROR(("hmaptest: d11read txcpl txstatus=0x%08x\n", txstatus->tx_status)); + DHD_ERROR(("hmaptest: d11read txcpl sc txbuf va=0x%p pa=0x%08x\n", + dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(dhd->prot->hmap_tx_buf_pa))); + dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE; + dhd->prot->hmap_tx_buf_va = NULL; + dhd->prot->hmap_tx_buf_len = 0; + PHYSADDRHISET(dhd->prot->hmap_tx_buf_pa, 0); + PHYSADDRLOSET(dhd->prot->hmap_tx_buf_pa, 0); + prot->hmaptest.in_progress = FALSE; + } + /* original skb is kept as it is because its going to be freed later in this path */ +#endif /* DHD_HMAPTEST */ + +#ifdef DHD_PKTTS + if (dhd_get_pktts_enab(dhd) && + dhd->pkt_metadata_buflen) { + /* Handle the Metadata first */ + meta_data_buf.va = DHD_PKTID_RETREIVE_METADATA(dhd, dhd->prot->pktid_tx_map, + meta_data_buf.pa, meta_data_buf._alloced, meta_data_buf.dmah, pktid); + if (meta_data_buf.va) { + if (dhd->pkt_metadata_version == METADATA_VER_1) { + memcpy(&meta_ts_v1, meta_data_buf.va, sizeof(meta_ts_v1)); + } else if (dhd->pkt_metadata_version == METADATA_VER_2) { + memcpy(&meta_ts_v2, meta_data_buf.va, sizeof(meta_ts_v2)); + } + memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa)); + DHD_TRACE(("%s(): pktid %d retrieved mdata buffer %p " + "pa: %llx dmah: %p\r\n", __FUNCTION__, + pktid, meta_data_buf.va, addr, + meta_data_buf.dmah)); + } + } +#endif /* DHD_PKTTS */ + + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, + pa, len, dmah, secdma, PKTTYPE_DATA_TX); + if (!pkt) { + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef DHD_PKTTS + /* + * Call the free function after the Ring Lock is released. + * This is becuase pcie_free_consistent is not supposed to be + * called with Interrupts Disabled + */ + if (meta_data_buf.va) { + DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced, + meta_data_buf.pa, meta_data_buf.dmah); + } +#endif /* DHD_PKTTS */ + DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__)); + prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return; + } + + if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) { + DHD_ERROR_RLMT(("%s: start tx queue as min pktids are available\n", + __FUNCTION__)); + prot->pktid_txq_stop_cnt--; + dhd->prot->no_tx_resource = FALSE; + dhd_bus_start_queue(dhd->bus); + } + + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + +#ifdef TX_STATUS_LATENCY_STATS + /* update the tx status latency for flowid */ + flow_info = &flow_ring_node->flow_info; + tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt); +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + if (dhd->pkt_latency > 0 && + tx_status_latency > (dhd->pkt_latency)) { + DHD_ERROR(("Latency: %llu > %u aw_cnt: %u \n", + tx_status_latency, dhd->pkt_latency, + dhd->awdl_aw_counter)); + } +#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */ + flow_info->cum_tx_status_latency += tx_status_latency; + flow_info->num_tx_status++; +#endif /* TX_STATUS_LATENCY_STATS */ +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + /* update the tx status latency when this AWDL slot is active */ + if_flow_lkup = (if_flow_lkup_t *)dhd->if_flow_lkup; + ifindex = flow_ring_node->flow_info.ifindex; + role = if_flow_lkup[ifindex].role; + if (role == WLC_E_IF_ROLE_AWDL) { + awdl_stats = &dhd->awdl_stats[dhd->awdl_tx_status_slot]; + DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags); + awdl_stats->cum_tx_status_latency += tx_status_latency; + awdl_stats->num_tx_status++; + DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags); + } +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + +#ifdef HOST_SFH_LLC + if (dhd->host_sfhllc_supported) { + struct ether_header eth; + if (!memcpy_s(ð, sizeof(eth), + PKTDATA(dhd->osh, pkt), sizeof(eth))) { + if (dhd_8023_llc_to_ether_hdr(dhd->osh, + ð, pkt) != BCME_OK) { + DHD_ERROR_RLMT(("%s: host sfh llc" + " converstion to ether failed\n", + __FUNCTION__)); + } + } + } +#endif /* HOST_SFH_LLC */ + +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata--; + dhd->dma_stats.txdata_sz -= len; +#endif /* DMAMAP_STATS */ + pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid, + ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK); +#ifdef DHD_PKT_LOGGING + if (dhd->d11_tx_status) { + uint16 status = ltoh16(txstatus->compl_hdr.status) & + WLFC_CTL_PKTFLAG_MASK; + dhd_handle_pktdata(dhd, ltoh32(txstatus->cmn_hdr.if_id), + pkt, (uint8 *)PKTDATA(dhd->osh, pkt), pktid, len, + &status, NULL, TRUE, FALSE, TRUE); + } +#endif /* DHD_PKT_LOGGING */ +#if defined(BCMPCIE) && (defined(LINUX) || defined(OEM_ANDROID) || defined(DHD_EFI)) + dhd_txcomplete(dhd, pkt, pkt_fate); +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#endif /* BCMPCIE && (defined(LINUX) || defined(OEM_ANDROID)) */ + +#ifdef DHD_PKTTS + if (dhd_get_pktts_enab(dhd) == TRUE) { + if (dhd->pkt_metadata_buflen) { + /* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */ + if ((dhd->pkt_metadata_version == METADATA_VER_1) && + (ltoh32(meta_ts_v1.tref) != 0xFFFFFFFF)) { + struct pktts_fwtx_v1 fwts; + fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v1.tref)); + fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v1.tref) + + ltoh16(meta_ts_v1.d_t2)); + fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v1.tref) + + ltoh16(meta_ts_v1.d_t3)); + fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v1.tref) + + ltoh16(meta_ts_v1.d_t4)); + /* check for overflow */ + if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) { + /* send tx timestamp to netlink socket */ + dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, + dhd->pkt_metadata_version); + } + } else if ((dhd->pkt_metadata_version == METADATA_VER_2) && + (ltoh32(meta_ts_v2.tref) != 0xFFFFFFFF)) { + struct pktts_fwtx_v2 fwts; + fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref)); + fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v2.tref) + + ltoh16(meta_ts_v2.d_t2)); + fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v2.tref) + + ltoh16(meta_ts_v2.d_t3)); + fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v2.tref) + + ltoh16(meta_ts_v2.d_t4)); + + fwts.ut[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref) + + ltoh16(meta_ts_v2.u_t1)); + fwts.ut[1] = (uint32)htonl(ltoh16(meta_ts_v2.u_t2)); + fwts.ut[2] = (uint32)htonl(ltoh16(meta_ts_v2.u_t3)); + fwts.ut[3] = (uint32)htonl(ltoh16(meta_ts_v2.u_t4)); + fwts.ut[4] = (uint32)htonl(ltoh32(meta_ts_v2.tref) + + ltoh16(meta_ts_v2.u_t5)); + + fwts.uc[0] = (uint32)htonl(ltoh32(meta_ts_v2.u_c1)); + fwts.uc[1] = (uint32)htonl(ltoh32(meta_ts_v2.u_c2)); + fwts.uc[2] = (uint32)htonl(ltoh32(meta_ts_v2.u_c3)); + fwts.uc[3] = (uint32)htonl(ltoh32(meta_ts_v2.u_c4)); + fwts.uc[4] = (uint32)htonl(ltoh32(meta_ts_v2.u_c5)); + fwts.uc[5] = (uint32)htonl(ltoh32(meta_ts_v2.u_c6)); + fwts.uc[6] = (uint32)htonl(ltoh32(meta_ts_v2.u_c7)); + fwts.uc[7] = (uint32)htonl(ltoh32(meta_ts_v2.u_c8)); + + DHD_INFO(("uct1:%x uct2:%x uct3:%x uct4:%x uct5:%x\n", + ntohl(fwts.ut[0]), ntohl(fwts.ut[1]), ntohl(fwts.ut[2]), + ntohl(fwts.ut[3]), ntohl(fwts.ut[4]))); + DHD_INFO(("ucc1:%x ucc2:%x ucc3:%x ucc4:%x" + " ucc5:%x ucc6:%x ucc7:%x ucc8:%x\n", + ntohl(fwts.uc[0]), ntohl(fwts.uc[1]), ntohl(fwts.uc[2]), + ntohl(fwts.uc[3]), ntohl(fwts.uc[4]), ntohl(fwts.uc[5]), + ntohl(fwts.uc[6]), ntohl(fwts.uc[7]))); + /* check for overflow */ + if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) { + /* send tx timestamp to netlink socket */ + dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, + dhd->pkt_metadata_version); + } + } + } else { + /* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */ + if (ltoh32(txstatus->tx_pktts.tref) != 0xFFFFFFFF) { + struct pktts_fwtx_v1 fwts; + + fwts.ts[0] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref)); + fwts.ts[1] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) + + ltoh16(txstatus->tx_pktts.d_t2)); + fwts.ts[2] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) + + ltoh16(txstatus->tx_pktts.d_t3)); + fwts.ts[3] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) + + ltoh16(txstatus->compl_hdr.tx_pktts.d_t4)); + + /* check for overflow */ + if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) { + /* send tx timestamp to netlnik socket */ + dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, METADATA_VER_1); + } + } + } + } +#endif /* DHD_PKTTS */ + +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->tx_metadata_offset && txstatus->metadata_len) { + uchar *ptr; + /* The Ethernet header of TX frame was copied and removed. + * Here, move the data pointer forward by Ethernet header size. + */ + PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); + bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); + dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + +#ifdef DHD_HP2P + if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { +#ifdef DHD_HP2P_DEBUG + bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t)); +#endif /* DHD_HP2P_DEBUG */ + dhd_update_hp2p_txstats(dhd, txstatus); + } +#endif /* DHD_HP2P */ + +#ifdef DHD_TIMESYNC + if (dhd->prot->tx_ts_log_enabled) { + dhd_pkt_parse_t parse; + ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts); + + memset(&parse, 0, sizeof(parse)); + dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse); + + if (parse.proto == IP_PROT_ICMP) + dhd_timesync_log_tx_timestamp(dhd->ts, + txstatus->compl_hdr.flow_ring_id, + txstatus->cmn_hdr.if_id, + ts->low, ts->high, &parse); + } +#endif /* DHD_TIMESYNC */ + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif + DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, + txstatus->tx_status); + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef DHD_PKTTS + if (meta_data_buf.va) { + DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced, + meta_data_buf.pa, meta_data_buf.dmah); + } +#endif /* DHD_PKTTS */ +#ifdef DHD_MEM_STATS + DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags); + DHD_MSGBUF_INFO(("%s txpath_mem: %llu PKTLEN: %d\n", + __FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt))); + dhd->txpath_mem -= PKTLEN(dhd->osh, pkt); + DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags); +#endif /* DHD_MEM_STATS */ + PKTFREE(dhd->osh, pkt, TRUE); + + return; +} /* dhd_prot_txstatus_process */ + +/* FIXME: assuming that it is getting inline data related to the event data */ +/** called on MSG_TYPE_WL_EVENT message received from dongle */ +static void +dhd_prot_event_process(dhd_pub_t *dhd, void *msg) +{ + wlevent_req_msg_t *evnt; + uint32 bufid; + uint16 buflen; + int ifidx = 0; + void* pkt; + dhd_prot_t *prot = dhd->prot; + + /* Event complete header */ + evnt = (wlevent_req_msg_t *)msg; + bufid = ltoh32(evnt->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD) + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */ + + buflen = ltoh16(evnt->event_data_len); + + ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); + /* FIXME: check the event status */ + + /* Post another rxbuf to the device */ + if (prot->cur_event_bufs_posted) + prot->cur_event_bufs_posted--; + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE); + + if (!pkt) { + DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid)); + return; + } + +#if !defined(BCM_ROUTER_DHD) + /* FIXME: make sure the length is more than dataoffset */ + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); +#endif /* !BCM_ROUTER_DHD */ + + PKTSETLEN(dhd->osh, pkt, buflen); +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +} + +#if !defined(BCM_ROUTER_DHD) +/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */ +static void +BCMFASTPATH(dhd_prot_process_infobuf_complete)(dhd_pub_t *dhd, void* buf) +{ + info_buf_resp_t *resp; + uint32 pktid; + uint16 buflen; + void * pkt; + + resp = (info_buf_resp_t *)buf; + pktid = ltoh32(resp->cmn_hdr.request_id); + buflen = ltoh16(resp->info_data_len); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_MSGBUF_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n", + pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), + dhd->prot->rx_dataoffset)); + + if (dhd->debug_buf_dest_support) { + if (resp->dest < DEBUG_BUF_DEST_MAX) { + dhd->debug_buf_dest_stat[resp->dest]++; + } + } + + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE); + if (!pkt) + return; + +#if !defined(BCM_ROUTER_DHD) + /* FIXME: make sure the length is more than dataoffset */ + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); +#endif /* !BCM_ROUTER_DHD */ + + PKTSETLEN(dhd->osh, pkt, buflen); +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif + /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a + * special ifidx of -1. This is just internal to dhd to get the data to + * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process). + */ + dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1); +} +#endif /* !BCM_ROUTER_DHD */ + +/** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */ +static void +BCMFASTPATH(dhd_prot_process_snapshot_complete)(dhd_pub_t *dhd, void *buf) +{ +#ifdef SNAPSHOT_UPLOAD + dhd_prot_t *prot = dhd->prot; + snapshot_resp_t *resp; + uint16 status; + + resp = (snapshot_resp_t *)buf; + + /* check completion status */ + status = resp->compl_hdr.status; + if (status != BCMPCIE_SUCCESS) { + DHD_ERROR(("%s: failed: %s (%d)\n", + __FUNCTION__, + status == BCMPCIE_BT_DMA_ERR ? "DMA_ERR" : + status == BCMPCIE_BT_DMA_DESCR_FETCH_ERR ? + "DMA_DESCR_ERR" : + status == BCMPCIE_SNAPSHOT_ERR ? "SNAPSHOT_ERR" : + status == BCMPCIE_NOT_READY ? "NOT_READY" : + status == BCMPCIE_INVALID_DATA ? "INVALID_DATA" : + status == BCMPCIE_NO_RESPONSE ? "NO_RESPONSE" : + status == BCMPCIE_NO_CLOCK ? "NO_CLOCK" : + "", status)); + } + + /* length may be truncated if error occurred */ + prot->snapshot_upload_len = ltoh32(resp->resp_len); + prot->snapshot_type = resp->type; + prot->snapshot_cmpl_pending = FALSE; + + DHD_INFO(("%s id 0x%04x, phase 0x%02x, resp_len %d, type %d\n", + __FUNCTION__, ltoh32(resp->cmn_hdr.request_id), + resp->cmn_hdr.flags, + prot->snapshot_upload_len, prot->snapshot_type)); +#endif /* SNAPSHOT_UPLOAD */ +} + +#ifdef BTLOG +/** called on MSG_TYPE_BT_LOG_CMPLT message received from dongle */ +static void +BCMFASTPATH(dhd_prot_process_btlog_complete)(dhd_pub_t *dhd, void* buf) +{ + info_buf_resp_t *resp; + uint32 pktid; + uint16 buflen; + void * pkt; + + resp = (info_buf_resp_t *)buf; + pktid = ltoh32(resp->cmn_hdr.request_id); + buflen = ltoh16(resp->info_data_len); + + /* check completion status */ + if (resp->compl_hdr.status != BCMPCIE_SUCCESS) { + DHD_ERROR(("%s: failed completion status %d\n", + __FUNCTION__, resp->compl_hdr.status)); + return; + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n", + pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), + dhd->prot->rx_dataoffset)); + + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE); + + if (!pkt) + return; + +#if !defined(BCM_ROUTER_DHD) + /* FIXME: make sure the length is more than dataoffset */ + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); +#endif /* !BCM_ROUTER_DHD */ + + PKTSETLEN(dhd->osh, pkt, buflen); + PKTSETNEXT(dhd->osh, pkt, NULL); + + dhd_bus_rx_bt_log(dhd->bus, pkt); +} +#endif /* BTLOG */ + +/** Stop protocol: sync w/dongle state. */ +void dhd_prot_stop(dhd_pub_t *dhd) +{ + ASSERT(dhd); + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#if defined(NDIS) + if (dhd->prot) { + DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_tx_map); +#if defined(IOCTLRESP_USE_CONSTMEM) + DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, dhd->prot->pktid_map_handle_ioctl); +#endif /* DHD_PCIE_PKTID */ + } +#endif /* NDIS */ +} + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +void +BCMFASTPATH(dhd_prot_hdrpush)(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ + return; +} + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + return 0; +} + +#define PKTBUF pktbuf + +/** + * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in + * the corresponding flow ring. + */ +int +BCMFASTPATH(dhd_prot_txdata)(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + host_txbuf_post_t *txdesc = NULL; + dmaaddr_t pa, meta_pa; + uint8 *pktdata; + uint32 pktlen; + uint32 pktid; + uint8 prio; + uint16 flowid = 0; + uint16 alloced = 0; + uint16 headroom; + msgbuf_ring_t *ring; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; +#if defined(BCMINTERNAL) && defined(LINUX) + void *pkt_to_free = NULL; +#endif /* BCMINTERNAL && LINUX */ +#ifdef DHD_PKTTS + dhd_dma_buf_t meta_data_buf; + uint16 meta_data_buf_len = dhd->pkt_metadata_buflen; + uint64 addr = 0; +#endif /* DHD_PKTTS */ + void *big_pktbuf = NULL; + uint8 dhd_udr = FALSE; + bool host_sfh_llc_reqd = dhd->host_sfhllc_supported; + bool llc_inserted = FALSE; + + BCM_REFERENCE(llc_inserted); +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) { + DHD_ERROR(("failed to increment hostactive_devwake\n")); + return BCME_ERROR; + } +#endif /* PCIE_INB_DW */ + + if (dhd->flow_ring_table == NULL) { + DHD_ERROR(("dhd flow_ring_table is NULL\n")); + goto fail; + } + +#ifdef DHD_PCIE_PKTID + if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) { + if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) { + DHD_ERROR(("%s: stop tx queue as pktid_depleted_cnt maxed\n", + __FUNCTION__)); + prot->pktid_txq_stop_cnt++; + dhd_bus_stop_queue(dhd->bus); + dhd->prot->no_tx_resource = TRUE; + } + dhd->prot->pktid_depleted_cnt++; + goto fail; + } else { + dhd->prot->pktid_depleted_cnt = 0; + } +#endif /* DHD_PCIE_PKTID */ + + if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT) { + if ((big_pktbuf = PKTGET(dhd->osh, DHD_FLOWRING_TX_BIG_PKT_SIZE, TRUE)) == NULL) { + DHD_ERROR(("%s:%d: PKTGET for txbuf failed\n", __FUNCTION__, __LINE__)); + goto fail; + } + + memset(PKTDATA(dhd->osh, big_pktbuf), 0xff, DHD_FLOWRING_TX_BIG_PKT_SIZE); + DHD_ERROR(("PKTBUF len = %d big_pktbuf len = %d\n", PKTLEN(dhd->osh, PKTBUF), + PKTLEN(dhd->osh, big_pktbuf))); + if (memcpy_s(PKTDATA(dhd->osh, big_pktbuf), DHD_FLOWRING_TX_BIG_PKT_SIZE, + PKTDATA(dhd->osh, PKTBUF), PKTLEN(dhd->osh, PKTBUF)) != BCME_OK) { + DHD_ERROR(("%s:%d: memcpy_s big_pktbuf failed\n", __FUNCTION__, __LINE__)); + ASSERT(0); + } + } + + flowid = DHD_PKT_GET_FLOWID(PKTBUF); + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + /* + * XXX: + * JIRA SW4349-436: + * Copying the TX Buffer to an SKB that lives in the DMA Zone + * is done here. Previously this was done from dhd_stat_xmit + * On conditions where the Host is pumping heavy traffic to + * the dongle, we see that the Queue that is backing up the + * flow rings is getting full and holds the precious memory + * from DMA Zone, leading the host to run out of memory in DMA + * Zone. So after this change the back up queue would continue to + * hold the pointers from Network Stack, just before putting + * the PHY ADDR in the flow rings, we'll do the copy. + */ +#if defined(BCMINTERNAL) && defined(LINUX) + if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) { + struct sk_buff *skb; + /* + * We are about to add the Ethernet header and send out, + * copy the skb here. + */ + skb = skb_copy(PKTBUF, GFP_DMA); + if (skb == NULL) { + /* + * Memory allocation failed, the old packet can + * live in the queue, return BCME_NORESOURCE so + * the caller re-queues this packet + */ + DHD_ERROR(("%s: skb_copy(DMA) failed\n", __FUNCTION__)); + goto fail; + } + + /* + * Now we have copied the SKB to GFP_DMA memory, make the + * rest of the code operate on this new SKB. Hold on to + * the original SKB. If we don't get the pkt id or flow ring + * space we'll free the Zone memory and return "no resource" + * so the caller would re-queue the original SKB. + */ + pkt_to_free = PKTBUF; + PKTBUF = skb; + } +#endif /* BCMINTERNAL && LINUX */ + + if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT && big_pktbuf) { + PKTFREE(dhd->osh, PKTBUF, TRUE); + PKTBUF = big_pktbuf; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Create a unique 32-bit packet id */ + pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map, + PKTBUF, PKTTYPE_DATA_TX); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__)); + /* + * If we return error here, the caller would queue the packet + * again. So we'll just free the skb allocated in DMA Zone. + * Since we have not freed the original SKB yet the caller would + * requeue the same. + */ + goto err_no_res_pktfree; + } +#endif /* DHD_PCIE_PKTID */ + + /* Reserve space in the circular buffer */ + txdesc = (host_txbuf_post_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (txdesc == NULL) { + DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", + __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count))); + goto err_free_pktid; + } + txdesc->flags = 0; + + /* Extract the data pointer and length information */ + pktdata = PKTDATA(dhd->osh, PKTBUF); + pktlen = PKTLEN(dhd->osh, PKTBUF); + + /* TODO: XXX: re-look into dropped packets */ + DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid); + + dhd_handle_pktdata(dhd, ifidx, PKTBUF, pktdata, pktid, + pktlen, NULL, &dhd_udr, TRUE, FALSE, TRUE); + +#if defined(BCMINTERNAL) && defined(LINUX) + /* + * We have got all the resources, pktid and ring space + * so we can safely free the original SKB here. + */ + if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) + PKTCFREE(dhd->osh, pkt_to_free, FALSE); +#endif /* BCMINTERNAL && LINUX */ + + /* Ethernet header - contains ethertype field + * Copy before we cache flush packet using DMA_MAP + */ + bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); + +#ifdef DHD_AWDL + /* the awdl ifidx will always have a non-zero value + * if the awdl iface is created. This is because the + * primary iface (usually eth1) will always have ifidx of 0. + * Hence we can check for non-zero value of awdl ifidx to + * see if awdl iface is created or not + */ + if (dhd->awdl_llc_enabled && + dhd->awdl_ifidx && ifidx == dhd->awdl_ifidx) { + if (host_sfh_llc_reqd) { + /* if FW supports host sfh llc insertion + * then BOTH sfh and llc needs to be inserted + * in which case the host LLC only path + * in FW will not be exercised - which is the + * objective of this feature. Hence in such a + * case disable awdl llc insertion + */ + DHD_ERROR_RLMT(("%s: FW supports host sfh + llc, this is" + "is incompatible with awdl llc insertion" + " disable host sfh llc support in FW and try\n", + __FUNCTION__)); + } else { + if (dhd_ether_to_awdl_llc_hdr(dhd, (struct ether_header *)pktdata, + PKTBUF) == BCME_OK) { + llc_inserted = TRUE; + /* in work item change ether type to len by + * re-copying the ether header + */ + memcpy_s(txdesc->txhdr, ETHER_HDR_LEN, PKTDATA(dhd->osh, PKTBUF), + ETHER_HDR_LEN); + } else { + goto err_rollback_idx; + } + } + } +#endif /* DHD_AWDL */ + +#ifdef HOST_SFH_LLC + if (host_sfh_llc_reqd) { + if (dhd_ether_to_8023_hdr(dhd->osh, (struct ether_header *)pktdata, + PKTBUF) == BCME_OK) { + /* adjust the data pointer and length information */ + pktdata = PKTDATA(dhd->osh, PKTBUF); + pktlen = PKTLEN(dhd->osh, PKTBUF); + txdesc->flags |= BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC; + } else { + goto err_rollback_idx; + } + } else +#endif /* HOST_SFH_LLC */ + { + /* Extract the ethernet header and adjust the data pointer and length */ + pktlen = PKTLEN(dhd->osh, PKTBUF) - ETHER_HDR_LEN; + pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); + } + + /* Map the data pointer to a DMA-able address */ + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("%s: Something really bad, unless 0 is " + "a valid phyaddr for pa\n", __FUNCTION__)); + ASSERT(0); + /* XXX if ASSERT() doesn't work like as Android platform, + * try to requeue the packet to the backup queue. + */ + goto err_rollback_idx; + } + +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata++; + dhd->dma_stats.txdata_sz += pktlen; +#endif /* DMAMAP_STATS */ + /* No need to lock. Save the rest of the packet's metadata */ + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid, + pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); + +#ifdef TXP_FLUSH_NITEMS + if (ring->pend_items_count == 0) + ring->start_addr = (void *)txdesc; + ring->pend_items_count++; +#endif +#ifdef DHD_HMAPTEST + if (dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_ACTIVE) { + /* scratch area */ + dhd->prot->hmap_tx_buf_va = (char *)dhd->prot->hmaptest.mem.va + + dhd->prot->hmaptest.offset; + /* replace pa with our pa for txbuf post only */ + dhd->prot->hmap_tx_buf_len = pktlen; + if ((dhd->prot->hmap_tx_buf_va + dhd->prot->hmap_tx_buf_len) > + ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) { + DHD_ERROR(("hmaptest: ERROR Txpost outside HMAPTEST buffer\n")); + DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n")); + dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE; + dhd->prot->hmaptest.in_progress = FALSE; + } else { + /* copy pktdata to our va */ + memcpy(dhd->prot->hmap_tx_buf_va, PKTDATA(dhd->osh, PKTBUF), pktlen); + pa = DMA_MAP(dhd->osh, dhd->prot->hmap_tx_buf_va, + dhd->prot->hmap_tx_buf_len, DMA_TX, PKTBUF, 0); + + dhd->prot->hmap_tx_buf_pa = pa; + /* store pktid for later mapping in txcpl */ + dhd->prot->hmaptest_tx_pktid = pktid; + dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_POSTED; + DHD_ERROR(("hmaptest: d11read txpost scratch txbuf pktid=0x%08x\n", pktid)); + DHD_ERROR(("hmaptest: d11read txpost txbuf va=0x%p pa.lo=0x%08x len=%d\n", + dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(pa), pktlen)); + } + } +#endif /* DHD_HMAPTEST */ + +#ifdef DHD_PKTTS + memset(&meta_data_buf, 0, sizeof(meta_data_buf)); + if (dhd_get_pktts_enab(dhd) && + dhd->pkt_metadata_buflen) { + /* Allocate memory for Meta data */ + meta_data_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, meta_data_buf_len, + DMA_ALIGN_LEN, &meta_data_buf._alloced, + &meta_data_buf.pa, &meta_data_buf.dmah); + + if (meta_data_buf.va == NULL) { + DHD_ERROR_RLMT(("%s: dhd_dma_buf_alloc failed \r\n", __FUNCTION__)); + DHD_ERROR_RLMT((" ... Proceeding without metadata buffer \r\n")); + } else { + DHD_PKTID_SAVE_METADATA(dhd, dhd->prot->pktid_tx_map, + (void *)meta_data_buf.va, + meta_data_buf.pa, + (uint16)meta_data_buf._alloced, + meta_data_buf.dmah, + pktid); + } + memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa)); + DHD_TRACE(("Meta data Buffer VA: %p PA: %llx dmah: %p\r\n", + meta_data_buf.va, addr, meta_data_buf.dmah)); + + txdesc->metadata_buf_addr.low = addr & (0xFFFFFFFF); + txdesc->metadata_buf_addr.high = (addr >> 32) & (0xFFFFFFFF); + txdesc->metadata_buf_len = meta_data_buf_len; + } +#endif /* DHD_PKTTS */ + + /* Form the Tx descriptor message buffer */ + + /* Common message hdr */ + txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; + txdesc->cmn_hdr.if_id = ifidx; + txdesc->cmn_hdr.flags = ring->current_phase; + + txdesc->flags |= BCMPCIE_PKT_FLAGS_FRAME_802_3; + prio = (uint8)PKTPRIO(PKTBUF); + +#ifdef EXT_STA + txdesc->flags &= ~BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK << + BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT; + txdesc->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(PKTBUF)) & + BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK) + << BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT; +#endif + + txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; + txdesc->seg_cnt = 1; + + txdesc->data_len = htol16((uint16) pktlen); + txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + if (!host_sfh_llc_reqd) + { + /* Move data pointer to keep ether header in local PKTBUF for later reference */ + PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); + } + + txdesc->ext_flags = 0; + +#ifdef DHD_TIMESYNC + txdesc->rate = 0; + + if (!llc_inserted && dhd->prot->tx_ts_log_enabled) { + dhd_pkt_parse_t parse; + + dhd_parse_proto(PKTDATA(dhd->osh, PKTBUF), &parse); + + if (parse.proto == IP_PROT_ICMP) { + if (dhd->prot->no_retry) + txdesc->ext_flags = BCMPCIE_PKT_FLAGS_FRAME_NORETRY; + if (dhd->prot->no_aggr) + txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_NOAGGR; + if (dhd->prot->fixed_rate) + txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR; + } + } +#endif /* DHD_TIMESYNC */ + +#ifdef DHD_SBN + if (dhd_udr) { + txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR; + } +#endif /* DHD_SBN */ + +#ifdef DHD_TX_PROFILE + if (!llc_inserted && + dhd->tx_profile_enab && dhd->num_profiles > 0) + { + uint8 offset; + + for (offset = 0; offset < dhd->num_profiles; offset++) { + if (dhd_protocol_matches_profile((uint8 *)PKTDATA(dhd->osh, PKTBUF), + PKTLEN(dhd->osh, PKTBUF), &(dhd->protocol_filters[offset]), + host_sfh_llc_reqd)) { + /* mask so other reserved bits are not modified. */ + txdesc->rate |= + (((uint8)dhd->protocol_filters[offset].profile_index) & + BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK); + + /* so we can use the rate field for our purposes */ + txdesc->rate |= BCMPCIE_TXPOST_RATE_EXT_USAGE; + + break; + } + } + } +#endif /* defined(DHD_TX_PROFILE) */ + + /* Handle Tx metadata */ + headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); + if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) + DHD_ERROR(("No headroom for Metadata tx %d %d\n", + prot->tx_metadata_offset, headroom)); + + if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { + DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); + + /* Adjust the data pointer to account for meta data in DMA_MAP */ + PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); + + if (PHYSADDRISZERO(meta_pa)) { + /* Unmap the data pointer to a DMA-able address */ + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL); +#ifdef TXP_FLUSH_NITEMS + /* update pend_items_count */ + ring->pend_items_count--; +#endif /* TXP_FLUSH_NITEMS */ + + DHD_ERROR(("%s: Something really bad, unless 0 is " + "a valid phyaddr for meta_pa\n", __FUNCTION__)); + ASSERT(0); + /* XXX if ASSERT() doesn't work like as Android platform, + * try to requeue the packet to the backup queue. + */ + goto err_rollback_idx; + } + + /* Adjust the data pointer back to original value */ + PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + txdesc->metadata_buf_len = prot->tx_metadata_offset; + txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); + txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); + } else { +#ifdef DHD_HP2P + if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { + dhd_update_hp2p_txdesc(dhd, txdesc); + } else +#endif /* DHD_HP2P */ +#ifdef DHD_PKTTS + if (!dhd_get_pktts_enab(dhd) || !dhd->pkt_metadata_buflen) { +#else + if (1) { +#endif /* DHD_PKTTS */ + txdesc->metadata_buf_len = htol16(0); + txdesc->metadata_buf_addr.high_addr = 0; + txdesc->metadata_buf_addr.low_addr = 0; + } + } + +#ifdef AGG_H2D_DB + OSL_ATOMIC_INC(dhd->osh, &ring->inflight); +#endif /* AGG_H2D_DB */ + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + txdesc->cmn_hdr.request_id = htol32(pktid); + + DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, + txdesc->cmn_hdr.request_id)); + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, PKTBUF); +#endif + + /* Update the write pointer in TCM & ring bell */ +#if defined(TXP_FLUSH_NITEMS) +#if defined(DHD_HP2P) + if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { + dhd_calc_hp2p_burst(dhd, ring, flowid); + } else +#endif /* HP2P */ + { + if ((ring->pend_items_count == prot->txp_threshold) || + ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { +#ifdef AGG_H2D_DB + if (agg_h2d_db_enab) { + dhd_prot_txdata_aggr_db_write_flush(dhd, flowid); + if ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring)) { + dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, TRUE); + } + } else +#endif /* AGG_H2D_DB */ + { + dhd_prot_txdata_write_flush(dhd, flowid); + } + + } + } +#else + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); +#endif /* TXP_FLUSH_NITEMS */ + +#ifdef TX_STATUS_LATENCY_STATS + /* set the time when pkt is queued to flowring */ + DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US()); +#elif defined(DHD_PKTTS) + if (dhd_get_pktts_enab(dhd) == TRUE) { + /* set the time when pkt is queued to flowring */ + DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US()); + } +#endif /* TX_STATUS_LATENCY_STATS */ + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count); + + /* + * Take a wake lock, do not sleep if we have atleast one packet + * to finish. + */ + DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif +#ifdef TX_STATUS_LATENCY_STATS + flow_ring_node->flow_info.num_tx_pkts++; +#endif /* TX_STATUS_LATENCY_STATS */ + return BCME_OK; + +err_rollback_idx: + /* roll back write pointer for unprocessed message */ + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + if (ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + +err_free_pktid: +#if defined(DHD_PCIE_PKTID) + { + void *dmah; + void *secdma; + /* Free up the PKTID. physaddr and pktlen will be garbage. */ + DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, + pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); + } + +err_no_res_pktfree: +#endif /* DHD_PCIE_PKTID */ + +#if defined(BCMINTERNAL) && defined(LINUX) + if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) + PKTCFREE(dhd->osh, PKTBUF, FALSE); +#endif /* BCMINTERNAL && LINUX */ + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +fail: +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NORESOURCE; +} /* dhd_prot_txdata */ + +#ifdef AGG_H2D_DB +static void +dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid) +{ + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + msgbuf_ring_t *ring; + + if (dhd->flow_ring_table == NULL) { + return; + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + if (ring->pend_items_count) { + dhd_prot_agg_db_ring_write(dhd, ring, ring->start_addr, + ring->pend_items_count); + ring->pend_items_count = 0; + ring->start_addr = NULL; + } + +} +#endif /* AGG_H2D_DB */ + +/* called with a ring_lock */ +/** optimization to write "n" tx items at a time to ring */ +void +BCMFASTPATH(dhd_prot_txdata_write_flush)(dhd_pub_t *dhd, uint16 flowid) +{ +#ifdef TXP_FLUSH_NITEMS + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + msgbuf_ring_t *ring; + + if (dhd->flow_ring_table == NULL) { + return; + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + if (ring->pend_items_count) { + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ring->start_addr, + ring->pend_items_count); + ring->pend_items_count = 0; + ring->start_addr = NULL; + dhd->prot->tx_h2d_db_cnt++; + } +#endif /* TXP_FLUSH_NITEMS */ +} + +#undef PKTBUF /* Only defined in the above routine */ + +int +BCMFASTPATH(dhd_prot_hdrpull)(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) +{ + return 0; +} + +/** post a set of receive buffers to the dongle */ +static void +BCMFASTPATH(dhd_prot_return_rxbuf)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, + uint32 rxcnt) +/* XXX function name could be more descriptive, eg dhd_prot_post_rxbufs */ +{ + dhd_prot_t *prot = dhd->prot; + + if (prot->rxbufpost >= rxcnt) { + prot->rxbufpost -= (uint16)rxcnt; + } else { + /* XXX: I have seen this assert hitting. + * Will be removed once rootcaused. + */ + /* ASSERT(0); */ + prot->rxbufpost = 0; + } + + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + } else if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) { + /* Ring DoorBell after processing the rx packets, + * so that dongle will sync the DMA indices. + */ + dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring)); + } + + return; +} + +#ifdef DHD_HMAPTEST + +static void +dhd_msgbuf_hmaptest_cmplt(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + uint64 end_usec; + char *readbuf; + uint32 len = dhd->prot->hmaptest.len; + uint32 i; + + end_usec = OSL_SYSUPTIME_US(); + end_usec -= prot->hmaptest.start_usec; + DHD_ERROR(("hmaptest cmplt: %d bytes in %llu usec, %u kBps\n", + len, end_usec, (len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1)))); + + prot->hmaptest.in_progress = FALSE; + if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) { + DHD_ERROR(("HMAPTEST_ACCESS_M2M\n")); + } else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) { + DHD_ERROR(("HMAPTEST_ACCESS_ARM\n")); + } else { + return; + } + readbuf = (char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.offset; + OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va, + dhd->prot->hmaptest.mem.len); + if (prot->hmaptest.is_write) { + DHD_ERROR(("hmaptest cmplt: FW has written at 0x%p\n", readbuf)); + DHD_ERROR(("hmaptest cmplt: pattern = \n")); + len = ALIGN_SIZE(len, (sizeof(int32))); + for (i = 0; i < len; i += (sizeof(int32))) { + DHD_ERROR(("0x%08x\n", *(int *)(readbuf + i))); + } + DHD_ERROR(("\n\n")); + } + +} +/* program HMAPTEST window and window config registers + * Reference for HMAP implementation in OS's that can easily leverage it + * this function can be used as reference for programming HMAP windows + * the function to program HMAP windows and enable it + * can be called at init time or hmap iovar + */ +static void +dhdmsgbuf_set_hmaptest_windows(dhd_pub_t *dhd) +{ + uint32 nwindows = 0; + uint32 scratch_len; + uint64 scratch_lin, w1_start; + dmaaddr_t scratch_pa; + pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */ + dhd_prot_t *prot = dhd->prot; + uint corerev = dhd->bus->sih->buscorerev; + + scratch_pa = prot->hmaptest.mem.pa; + scratch_len = prot->hmaptest.mem.len; + scratch_lin = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff) + | (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32); + hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev)); + /* windows are 4kb aligned and window length is 512 byte aligned + * window start ends with 0x1000 and window length ends with 0xe00 + * make the sandbox buffer 4kb aligned and size also 4kb aligned for hmap test + * window0 = 0 - sandbox_start + * window1 = sandbox_end + 1 - 0xffffffff + * window2 = 0x100000000 - 0x1fffffe00 + * window 3 is programmed only for valid test cases + * window3 = sandbox_start - sandbox_end + */ + w1_start = scratch_lin + scratch_len; + DHD_ERROR(("hmaptest: window 0 offset lower=0x%p upper=0x%p length=0x%p\n", + &(hmapwindows[0].baseaddr_lo), &(hmapwindows[0].baseaddr_hi), + &(hmapwindows[0].windowlength))); + DHD_ERROR(("hmaptest: window 1 offset lower=0x%p upper=0x%p length=0x%p\n", + &(hmapwindows[1].baseaddr_lo), &(hmapwindows[1].baseaddr_hi), + &(hmapwindows[1].windowlength))); + DHD_ERROR(("hmaptest: window 2 offset lower=0x%p upper=0x%p length=0x%p\n", + &(hmapwindows[2].baseaddr_lo), &(hmapwindows[2].baseaddr_hi), + &(hmapwindows[2].windowlength))); + DHD_ERROR(("hmaptest: window 3 offset lower=0x%p upper=0x%p length=0x%p\n", + &(hmapwindows[3].baseaddr_lo), &(hmapwindows[3].baseaddr_hi), + &(hmapwindows[3].windowlength))); + DHD_ERROR(("hmaptest: w0 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n", + 0, 0, (uint64) scratch_lin)); + DHD_ERROR(("hmaptest: w1 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n", + (uint32)(w1_start & 0xffffffff), + (uint32)((w1_start >> 32) & 0xffffffff), + (uint64)(0x100000000 - w1_start))); + DHD_ERROR(("hmaptest: w2 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n", + 0, 1, (uint64)0xfffffe00)); + /* setting window0 */ + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[0].baseaddr_lo)), ~0, 0x0); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[0].baseaddr_hi)), ~0, 0x0); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[0].windowlength)), ~0, + (uint64)scratch_lin); + /* setting window1 */ + w1_start = scratch_lin + scratch_len; + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[1].baseaddr_lo)), ~0, + (uint32)(w1_start & 0xffffffff)); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[1].baseaddr_hi)), ~0, + (uint32)((w1_start >> 32) & 0xffffffff)); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[1].windowlength)), ~0, + (0x100000000 - w1_start)); + /* setting window2 */ + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[2].baseaddr_lo)), ~0, 0x0); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[2].baseaddr_hi)), ~0, 0x1); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[2].windowlength)), ~0, 0xfffffe00); + nwindows = 3; + /* program only windows 0-2 with section1 +section2 */ + /* setting window config */ + /* set bit 8:15 in windowconfig to enable n windows in order */ + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, (nwindows << 8)); +} + +/* stop HMAPTEST does not check corerev + * caller has to ensure corerev check + */ +int +dhdmsgbuf_hmaptest_stop(dhd_pub_t *dhd) +{ + uint32 window_config, nwindows, i; + pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */ + uint corerev = dhd->bus->sih->buscorerev; + + hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev)); + dhd->prot->hmaptest.in_progress = FALSE; + + /* Reference for HMAP Implementation + * Disable HMAP windows. + * As windows were programmed in bus:hmap set call + * disabling in hmaptest_stop. + */ + DHD_ERROR(("hmap: disable hmap windows\n")); + window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0); + nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT; + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, 0); + /* clear all windows */ + for (i = 0; i < nwindows; i++) { + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[i].baseaddr_lo)), ~0, 0); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[i].baseaddr_hi)), ~0, 0); + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[i].windowlength)), ~0, 0); + } + + return BCME_OK; +} + +/* HMAP iovar intercept process */ +int +dhdmsgbuf_hmap(dhd_pub_t *dhd, pcie_hmap_t *hmap_params, bool set) +{ + + uint32 scratch_len; + uint64 scratch_lin, w1_start; + dmaaddr_t scratch_pa; + uint32 addr_lo, addr_hi, window_length, window_config, nwindows, i; + pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */ + + dhd_prot_t *prot = dhd->prot; + dhd_bus_t *bus = dhd->bus; + uint corerev = bus->sih->buscorerev; + scratch_pa = prot->hmaptest.mem.pa; + scratch_len = prot->hmaptest.mem.len; + scratch_lin = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff) + | (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32); + w1_start = scratch_lin + scratch_len; + DHD_ERROR(("HMAP: pcicorerev = %d\n", corerev)); + + if (corerev < 24) { + DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev)); + return BCME_UNSUPPORTED; + } + if (set) { + if (hmap_params->enable) { + dhdmsgbuf_set_hmaptest_windows(dhd); + } else { + dhdmsgbuf_hmaptest_stop(dhd); /* stop will clear all programmed windows */ + } + } + + OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va, + dhd->prot->hmaptest.mem.len); + + window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0); + nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT; + prot->hmap_enabled = nwindows ? TRUE : FALSE; + + /* getting window config */ + /* set bit 8:15 in windowconfig to enable n windows in order */ + DHD_ERROR(("hmap: hmap status = %s\n", (prot->hmap_enabled ? "Enabled" : "Disabled"))); + DHD_ERROR(("hmap: window config = 0x%08x\n", window_config)); + DHD_ERROR(("hmap: Windows\n")); + + hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev)); + /* getting windows */ + if (nwindows > 8) + return BCME_ERROR; + for (i = 0; i < nwindows; i++) { + addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[i].baseaddr_lo)), 0, 0); + addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[i].baseaddr_hi)), 0, 0); + window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uintptr_t)(&(hmapwindows[i].windowlength)), 0, 0); + + DHD_ERROR(("hmap: window %d address lower=0x%08x upper=0x%08x length=0x%08x\n", + i, addr_lo, addr_hi, window_length)); + } + addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)(PCI_HMAP_VIOLATION_ADDR_U(corerev)), 0, 0); + addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)(PCI_HMAP_VIOLATION_ADDR_L(corerev)), 0, 0); + window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + (uint)(PCI_HMAP_VIOLATION_INFO(corerev)), 0, 0); + DHD_ERROR(("hmap: violation regs\n")); + DHD_ERROR(("hmap: violationaddr_hi =0x%08x\n", addr_hi)); + DHD_ERROR(("hmap: violationaddr_lo =0x%08x\n", addr_lo)); + DHD_ERROR(("hmap: violation_info =0x%08x\n", window_length)); + DHD_ERROR(("hmap: Buffer allocated for HMAPTEST Start=0x%0llx len =0x%08x End =0x%0llx\n", + (uint64) scratch_lin, scratch_len, (uint64) w1_start)); + + return BCME_OK; +} + +/* hmaptest iovar process + * This iovar triggers HMAPTEST with given params + * on chips that have HMAP + * DHD programs hmap window registers with host addresses here. + */ +int +dhdmsgbuf_hmaptest(dhd_pub_t *dhd, pcie_hmaptest_t *hmaptest_params) +{ + + dhd_prot_t *prot = dhd->prot; + int ret = BCME_OK; + uint32 offset = 0; + uint64 scratch_lin; + dhd_bus_t *bus = dhd->bus; + uint corerev = bus->sih->buscorerev; + + if (prot->hmaptest.in_progress) { + DHD_ERROR(("HMAPTEST already running. Try again.\n")); + return BCME_BUSY; + } + + prot->hmaptest.in_progress = TRUE; + + if (corerev < 24) { + DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev)); + return BCME_UNSUPPORTED; + } + prot->hmaptest.accesstype = hmaptest_params->accesstype; + prot->hmaptest.is_write = hmaptest_params->is_write; + prot->hmaptest.len = hmaptest_params->xfer_len; + prot->hmaptest.offset = hmaptest_params->host_offset; + offset = prot->hmaptest.offset; + + DHD_ERROR(("hmaptest: is_write =%d accesstype=%d offset =%d len=%d value=0x%08x\n", + prot->hmaptest.is_write, prot->hmaptest.accesstype, + offset, prot->hmaptest.len, hmaptest_params->value)); + + DHD_ERROR(("hmaptest dma_lo=0x%08x hi=0x%08x pa\n", + (uint32)PHYSADDRLO(prot->hmaptest.mem.pa), + (uint32)PHYSADDRHI(prot->hmaptest.mem.pa))); + + if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) { + if (prot->hmaptest.is_write) { + /* if d11 is writing then post rxbuf from scratch area */ + dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_ACTIVE; + } else { + /* if d11 is reading then post txbuf from scratch area */ + dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_ACTIVE; + } + + } else { + uint32 pattern = 0xdeadbeef; + uint32 i; + uint32 maxbuflen = MIN(prot->hmaptest.len, (PKTBUFSZ)); + char *fillbuf = (char *)dhd->prot->hmaptest.mem.va + + offset; + if ((fillbuf + maxbuflen) > + ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) { + DHD_ERROR(("hmaptest: M2m/ARM ERROR offset + len outside buffer\n")); + dhd->prot->hmaptest.in_progress = FALSE; + return BCME_BADARG; + } + + if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) { + DHD_ERROR(("HMAPTEST_ACCESS_M2M\n")); + } else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) { + DHD_ERROR(("HMAPTEST_ACCESS_ARM\n")); + } else { + prot->hmaptest.in_progress = FALSE; + DHD_ERROR(("hmaptest: accesstype error\n")); + return BCME_BADARG; + } + + /* fill a pattern at offset */ + maxbuflen = ALIGN_SIZE(maxbuflen, (sizeof(uint32))); + memset(fillbuf, 0, maxbuflen); + DHD_ERROR(("hmaptest: dhd write pattern at addr=0x%p\n", + fillbuf)); + DHD_ERROR(("pattern = %08x, %u times", + pattern, (uint32)(maxbuflen / sizeof(uint32)))); + for (i = 0; i < maxbuflen; i += sizeof(uint32)) { + *(uint32 *)(fillbuf + i) = pattern; + } + OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va, + dhd->prot->hmaptest.mem.len); + DHD_ERROR(("\n\n")); + + } + + /* + * Do not calculate address from scratch buffer + offset, + * if user supplied absolute address + */ + if (hmaptest_params->host_addr_lo || hmaptest_params->host_addr_hi) { + if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) { + DHD_ERROR(("hmaptest: accesstype D11 does not support absolute addr\n")); + return BCME_UNSUPPORTED; + } + } else { + scratch_lin = (uint64)(PHYSADDRLO(prot->hmaptest.mem.pa) & 0xffffffff) + | (((uint64)PHYSADDRHI(prot->hmaptest.mem.pa) & 0xffffffff) << 32); + scratch_lin += offset; + hmaptest_params->host_addr_lo = htol32((uint32)(scratch_lin & 0xffffffff)); + hmaptest_params->host_addr_hi = htol32((uint32)((scratch_lin >> 32) & 0xffffffff)); + } + + DHD_INFO(("HMAPTEST Started...\n")); + prot->hmaptest.start_usec = OSL_SYSUPTIME_US(); + return ret; + +} + +#endif /* DHD_HMAPTEST */ + +/* called before an ioctl is sent to the dongle */ +static void +dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf) +{ + dhd_prot_t *prot = dhd->prot; + int slen = 0; + + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) { + pcie_bus_tput_params_t *tput_params; + + slen = strlen("pcie_bus_tput") + 1; + tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen); + bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr, + sizeof(tput_params->host_buf_addr)); + tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN; + } + +#ifdef DHD_HMAPTEST + if (buf != NULL && !strcmp(buf, "bus:hmap")) { + pcie_hmap_t *hmap_params; + slen = strlen("bus:hmap") + 1; + hmap_params = (pcie_hmap_t*)((char *)buf + slen); + dhdmsgbuf_hmap(dhd, hmap_params, (ioc->cmd == WLC_SET_VAR)); + } + + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) { + pcie_hmaptest_t *hmaptest_params; + + slen = strlen("bus:hmaptest") + 1; + hmaptest_params = (pcie_hmaptest_t*)((char *)buf + slen); + dhdmsgbuf_hmaptest(dhd, hmaptest_params); + } +#endif /* DHD_HMAPTEST */ +} + +/* called after an ioctl returns from dongle */ +static void +dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf, + int ifidx, int ret, int len) +{ + +#ifdef DHD_HMAPTEST + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) { + dhd_msgbuf_hmaptest_cmplt(dhd); + } +#endif /* DHD_HMAPTEST */ + + if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) { + int slen; + /* Intercept the wme_dp ioctl here */ + if (!strcmp(buf, "wme_dp")) { + int val = 0; + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + +#ifdef DHD_AWDL + /* Intercept the awdl_peer_op ioctl here */ + if (!strcmp(buf, "awdl_peer_op")) { + slen = strlen("awdl_peer_op") + 1; + dhd_awdl_peer_op(dhd, (uint8)ifidx, ((char *)buf + slen), len - slen); + } + /* Intercept the awdl ioctl here, delete flow rings if awdl is + * disabled + */ + if (!strcmp(buf, "awdl")) { + int val = 0; + slen = strlen("awdl") + 1; + if (len >= (int)(slen + sizeof(int))) { + bcopy(((char *)buf + slen), &val, sizeof(int)); + val = ltoh32(val); + if (val == TRUE) { + /** + * Though we are updating the link status when we recieve + * WLC_E_LINK from dongle, it is not gaurenteed always. + * So intercepting the awdl command fired from app to + * update the status. + */ + dhd_update_interface_link_status(dhd, (uint8)ifidx, TRUE); +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + /* reset AWDL stats data structures when AWDL is enabled */ + dhd_clear_awdl_stats(dhd); +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + } else if (val == FALSE) { + dhd_update_interface_link_status(dhd, (uint8)ifidx, FALSE); + dhd_del_all_sta(dhd, (uint8)ifidx); + dhd_awdl_peer_op(dhd, (uint8)ifidx, NULL, 0); + + } + } + + } + + /* store the awdl min extension count and presence mode values + * set by the user, same will be inserted in the LLC header for + * each tx packet on the awdl iface + */ + slen = strlen("awdl_extcounts"); + if (!strncmp(buf, "awdl_extcounts", slen)) { + awdl_extcount_t *extcnt = NULL; + slen = slen + 1; + if ((len - slen) >= sizeof(*extcnt)) { + extcnt = (awdl_extcount_t *)((char *)buf + slen); + dhd->awdl_minext = extcnt->minExt; + } + } + + slen = strlen("awdl_presencemode"); + if (!strncmp(buf, "awdl_presencemode", slen)) { + slen = slen + 1; + if ((len - slen) >= sizeof(uint8)) { + dhd->awdl_presmode = *((uint8 *)((char *)buf + slen)); + } + } +#endif /* DHD_AWDL */ + } + +} + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */ +int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + int ret = -1; + uint8 action; + + if (dhd->bus->is_linkdown) { + DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if (dhd_query_bus_erros(dhd)) { + DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -" + " bus state: %d, sent hang: %d\n", __FUNCTION__, + dhd->busstate, dhd->hang_was_sent)); + goto done; + } + + if (dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef DHD_PCIE_REG_ACCESS +#ifdef BOARD_HIKEY +#ifndef PCIE_LNK_SPEED_GEN1 +#define PCIE_LNK_SPEED_GEN1 0x1 +#endif + /* BUG_ON if link speed is GEN1 in Hikey for 4389B0 */ + if (dhd->bus->sih->buscorerev == 72) { + if (dhd_get_pcie_linkspeed(dhd) == PCIE_LNK_SPEED_GEN1) { + DHD_ERROR(("%s: ******* Link Speed is GEN1 *********\n", __FUNCTION__)); + BUG_ON(1); + } + } +#endif /* BOARD_HIKEY */ +#endif /* DHD_PCIE_REG_ACCESS */ + + if (ioc->cmd == WLC_SET_PM) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } +#endif /* DHD_PM_CONTROL_FROM_FILE */ +#ifdef DHD_PM_OVERRIDE + { + extern bool g_pm_override; + if (g_pm_override == TRUE) { + DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } + } +#endif /* DHD_PM_OVERRIDE */ + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); + } + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + action = ioc->set; + + dhd_prot_wlioctl_intercept(dhd, ioc, buf); + +#if defined(EXT_STA) + wl_dbglog_ioctl_add(ioc, len, NULL); +#endif + if (action & WL_IOCTL_ACTION_SET) { + ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + } else { + ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret; + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) { + ret = 0; + } else { +#ifndef DETAIL_DEBUG_LOG_FOR_IOCTL + DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret)); +#endif /* !DETAIL_DEBUG_LOG_FOR_IOCTL */ + dhd->dongle_error = ret; + } + + dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len); + +done: + return ret; + +} /* dhd_prot_ioctl */ + +/** test / loopback */ + +/* + * XXX: This will fail with new PCIe Split header Full Dongle using fixed + * sized messages in control submission ring. We seem to be sending the lpbk + * data via the control message, wherein the lpbk data may be larger than 1 + * control message that is being committed. + */ +int +dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + + ioct_reqst_hdr_t *ioct_rqst; + + uint16 hdrlen = sizeof(ioct_reqst_hdr_t); + uint16 msglen = len + hdrlen; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); + msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ring->ring_lock, flags); + + ioct_rqst = (ioct_reqst_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (ioct_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return 0; + } + + { + uint8 *ptr; + uint16 i; + + ptr = (uint8 *)ioct_rqst; /* XXX: failure!!! */ + for (i = 0; i < msglen; i++) { + ptr[i] = i % 256; + } + } + + /* Common msg buf hdr */ + ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; + ioct_rqst->msg.if_id = 0; + ioct_rqst->msg.flags = ring->current_phase; + + bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return 0; +} + +/** test / loopback */ +void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) +{ + if (dmaxfer == NULL) + return; + + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + dhd_dma_buf_free(dhd, &dmaxfer->dstmem); +} + +/** test / loopback */ +int +dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp) +{ + dhd_prot_t *prot = dhdp->prot; + dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer; + dmaxref_mem_map_t *dmap = NULL; + + dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t)); + if (!dmap) { + DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__)); + goto mem_alloc_fail; + } + dmap->srcmem = &(dmaxfer->srcmem); + dmap->dstmem = &(dmaxfer->dstmem); + + DMAXFER_FREE(dhdp, dmap); + return BCME_OK; + +mem_alloc_fail: + if (dmap) { + MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t)); + } + return BCME_NOMEM; +} /* dhd_prepare_schedule_dmaxfer_free */ + +/** test / loopback */ +void +dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) +{ + + dhd_dma_buf_free(dhdp, dmmap->srcmem); + dhd_dma_buf_free(dhdp, dmmap->dstmem); + + MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t)); + + dhdp->bus->dmaxfer_complete = TRUE; + dhd_os_dmaxfer_wake(dhdp); +} /* dmaxfer_free_prev_dmaaddr */ + +/** test / loopback */ +int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, + uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) +{ + uint i = 0, j = 0; + if (!dmaxfer) + return BCME_ERROR; + + /* First free up existing buffers */ + dmaxfer_free_dmaaddr(dhd, dmaxfer); + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) { + return BCME_NOMEM; + } + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) { + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + return BCME_NOMEM; + } + + dmaxfer->len = len; + + /* Populate source with a pattern like below + * 0x00000000 + * 0x01010101 + * 0x02020202 + * 0x03030303 + * 0x04040404 + * 0x05050505 + * ... + * 0xFFFFFFFF + */ + while (i < dmaxfer->len) { + ((uint8*)dmaxfer->srcmem.va)[i] = j % 256; + i++; + if (i % 4 == 0) { + j++; + } + } + + OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len); + + dmaxfer->srcdelay = srcdelay; + dmaxfer->destdelay = destdelay; + + return BCME_OK; +} /* dmaxfer_prepare_dmaaddr */ + +static void +dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint64 end_usec; + pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg; + int buf_free_scheduled; + int err = 0; + + BCM_REFERENCE(cmplt); + end_usec = OSL_SYSUPTIME_US(); + +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + /* restore interrupt poll period to the previous existing value */ + dhd_os_set_intr_poll_period(dhd->bus, dhd->cur_intr_poll_period); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + + DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status)); + prot->dmaxfer.status = cmplt->compl_hdr.status; + OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + if (prot->dmaxfer.d11_lpbk != M2M_WRITE_TO_RAM && + prot->dmaxfer.d11_lpbk != M2M_READ_FROM_RAM && + prot->dmaxfer.d11_lpbk != D11_WRITE_TO_RAM && + prot->dmaxfer.d11_lpbk != D11_READ_FROM_RAM) { + err = memcmp(prot->dmaxfer.srcmem.va, + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } + if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { + if (err || + cmplt->compl_hdr.status != BCME_OK) { + DHD_ERROR(("DMA loopback failed\n")); + /* it is observed that some times the completion + * header status is set as OK, but the memcmp fails + * hence always explicitly set the dmaxfer status + * as error if this happens. + */ + prot->dmaxfer.status = BCME_ERROR; + prhex("XFER SRC: ", + prot->dmaxfer.srcmem.va, prot->dmaxfer.len); + prhex("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } + else { + switch (prot->dmaxfer.d11_lpbk) { + case M2M_DMA_LPBK: { + DHD_ERROR(("DMA successful pcie m2m DMA loopback\n")); + } break; + case D11_LPBK: { + DHD_ERROR(("DMA successful with d11 loopback\n")); + } break; + case BMC_LPBK: { + DHD_ERROR(("DMA successful with bmc loopback\n")); + } break; + case M2M_NON_DMA_LPBK: { + DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n")); + } break; + case D11_HOST_MEM_LPBK: { + DHD_ERROR(("DMA successful d11 host mem loopback\n")); + } break; + case BMC_HOST_MEM_LPBK: { + DHD_ERROR(("DMA successful bmc host mem loopback\n")); + } break; + case M2M_WRITE_TO_RAM: { + DHD_ERROR(("DMA successful pcie m2m write to ram\n")); + } break; + case M2M_READ_FROM_RAM: { + DHD_ERROR(("DMA successful pcie m2m read from ram\n")); + prhex("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } break; + case D11_WRITE_TO_RAM: { + DHD_ERROR(("DMA successful D11 write to ram\n")); + } break; + case D11_READ_FROM_RAM: { + DHD_ERROR(("DMA successful D11 read from ram\n")); + prhex("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } break; + default: { + DHD_ERROR(("Invalid loopback option\n")); + } break; + } + + if (DHD_LPBKDTDUMP_ON()) { + /* debug info print of the Tx and Rx buffers */ + dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va, + prot->dmaxfer.len, DHD_INFO_VAL); + dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va, + prot->dmaxfer.len, DHD_INFO_VAL); + } + } + } + + buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd); + end_usec -= prot->dmaxfer.start_usec; + if (end_usec) { + prot->dmaxfer.time_taken = end_usec; + DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n", + prot->dmaxfer.len, (unsigned long)end_usec, + (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec))); + } + dhd->prot->dmaxfer.in_progress = FALSE; + + if (buf_free_scheduled != BCME_OK) { + dhd->bus->dmaxfer_complete = TRUE; + dhd_os_dmaxfer_wake(dhd); + } +} + +/** Test functionality. + * Transfers bytes from host to dongle and to host again using DMA + * This function is not reentrant, as prot->dmaxfer.in_progress is not protected + * by a spinlock. + */ +int +dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, + uint d11_lpbk, uint core_num, uint32 mem_addr) +{ + unsigned long flags; + int ret = BCME_OK; + dhd_prot_t *prot = dhd->prot; + pcie_dma_xfer_params_t *dmap; + uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT); + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + /* XXX: prot->dmaxfer.in_progress is not protected by lock */ + if (prot->dmaxfer.in_progress) { + DHD_ERROR(("DMA is in progress...\n")); + return BCME_ERROR; + } + + if (d11_lpbk >= MAX_LPBK) { + DHD_ERROR(("loopback mode should be either" + " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n")); + return BCME_ERROR; + } + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) { + return BCME_ERROR; + } +#endif /* PCIE_INB_DW */ + + prot->dmaxfer.in_progress = TRUE; + if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, + &prot->dmaxfer)) != BCME_OK) { + prot->dmaxfer.in_progress = FALSE; +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return ret; + } + DHD_RING_LOCK(ring->ring_lock, flags); + dmap = (pcie_dma_xfer_params_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (dmap == NULL) { + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + prot->dmaxfer.in_progress = FALSE; + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; + dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); + dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + dmap->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + + dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); + dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); + dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa)); + dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa)); + dmap->xfer_len = htol32(prot->dmaxfer.len); + dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); + dmap->destdelay = htol32(prot->dmaxfer.destdelay); + prot->dmaxfer.d11_lpbk = d11_lpbk; + if (d11_lpbk == M2M_WRITE_TO_RAM) { + dmap->host_ouput_buf_addr.high = 0x0; + dmap->host_ouput_buf_addr.low = mem_addr; + } else if (d11_lpbk == M2M_READ_FROM_RAM) { + dmap->host_input_buf_addr.high = 0x0; + dmap->host_input_buf_addr.low = mem_addr; + } else if (d11_lpbk == D11_WRITE_TO_RAM) { + dmap->host_ouput_buf_addr.high = 0x0; + dmap->host_ouput_buf_addr.low = mem_addr; + } else if (d11_lpbk == D11_READ_FROM_RAM) { + dmap->host_input_buf_addr.high = 0x0; + dmap->host_input_buf_addr.low = mem_addr; + } + dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK) + << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) | + ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK) + << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT)); + prot->dmaxfer.start_usec = OSL_SYSUPTIME_US(); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, dmap, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + DHD_ERROR(("DMA loopback Started... on core[%d]\n", core_num)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return BCME_OK; +} /* dhdmsgbuf_dmaxfer_req */ + +int +dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result) +{ + dhd_prot_t *prot = dhd->prot; + + if (prot->dmaxfer.in_progress) + result->status = DMA_XFER_IN_PROGRESS; + else if (prot->dmaxfer.status == 0) + result->status = DMA_XFER_SUCCESS; + else + result->status = DMA_XFER_FAILED; + + result->type = prot->dmaxfer.d11_lpbk; + result->error_code = prot->dmaxfer.status; + result->num_bytes = prot->dmaxfer.len; + result->time_taken = prot->dmaxfer.time_taken; + if (prot->dmaxfer.time_taken) { + /* throughput in kBps */ + result->tput = + (prot->dmaxfer.len * (1000 * 1000 / 1024)) / + (uint32)prot->dmaxfer.time_taken; + } + + return BCME_OK; +} + +/** Called in the process of submitting an ioctl to the dongle */ +static int +dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + uint copylen = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", + __FUNCTION__)); + return -EIO; + } + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + if (cmd == WLC_GET_VAR && buf) + { + if (!len || !*(uint8 *)buf) { + DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__)); + ret = BCME_BADARG; + goto done; + } + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + copylen = MIN(len, BCME_STRLEN); + + if ((len >= strlen("bcmerrorstr")) && + (!strcmp((char *)buf, "bcmerrorstr"))) { + strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen); + goto done; + } else if ((len >= strlen("bcmerror")) && + !strcmp((char *)buf, "bcmerror")) { + *(uint32 *)(uint32 *)buf = dhd->dongle_error; + goto done; + } + } + + DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); +#ifdef REPORT_FATAL_TIMEOUTS + /* + * These timers "should" be started before sending H2D interrupt. + * Think of the scenario where H2D interrupt is fired and the Dongle + * responds back immediately. From the DPC we would stop the cmd, bus + * timers. But the process context could have switched out leading to + * a situation where the timers are Not started yet, but are actually stopped. + * + * Disable preemption from the time we start the timer until we are done + * with seding H2D interrupts. + */ + OSL_DISABLE_PREEMPTION(dhd->osh); + dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd); + dhd_start_cmd_timer(dhd); + dhd_start_bus_timer(dhd); +#endif /* REPORT_FATAL_TIMEOUTS */ + + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + +#ifdef REPORT_FATAL_TIMEOUTS + /* For some reason if we fail to ring door bell, stop the timers */ + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + dhd_stop_cmd_timer(dhd); + dhd_stop_bus_timer(dhd); + OSL_ENABLE_PREEMPTION(dhd->osh); + goto done; + } + OSL_ENABLE_PREEMPTION(dhd->osh); +#else + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + goto done; + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + /* wait for IOCTL completion message from dongle and get first fragment */ + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +void +dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd) +{ + uint32 intstatus; + dhd_prot_t *prot = dhd->prot; + dhd->rxcnt_timeout++; + dhd->rx_ctlerrs++; + DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d " + "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__, + dhd->is_sched_error ? " due to scheduling problem" : "", + dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id, + prot->ioctl_state, dhd->busstate, prot->ioctl_received)); +#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP) + /* XXX DHD triggers Kernel panic if the resumed on timeout occurrs + * due to tasklet or workqueue scheduling problems in the Linux Kernel. + * Customer informs that it is hard to find any clue from the + * host memory dump since the important tasklet or workqueue information + * is already disappered due the latency while printing out the timestamp + * logs for debugging scan timeout issue. + * For this reason, customer requestes us to trigger Kernel Panic rather than + * taking a SOCRAM dump. + */ + if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) { + /* change g_assert_type to trigger Kernel panic */ + g_assert_type = 2; + /* use ASSERT() to trigger panic */ + ASSERT(0); + } +#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */ + + if (prot->curr_ioctl_cmd == WLC_SET_VAR || + prot->curr_ioctl_cmd == WLC_GET_VAR) { + char iovbuf[32]; + int dump_size = 128; + uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va; + memset(iovbuf, 0, sizeof(iovbuf)); + strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1); + iovbuf[sizeof(iovbuf) - 1] = '\0'; + DHD_ERROR(("Current IOVAR (%s): %s\n", + prot->curr_ioctl_cmd == WLC_SET_VAR ? + "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf)); + DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n")); + prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size); + DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n")); + } + + /* Check the PCIe link status by reading intstatus register */ + intstatus = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); + dhd->bus->is_linkdown = TRUE; + } + + dhd_bus_dump_console_buffer(dhd->bus); + dhd_prot_debug_info_print(dhd); +} + +/** + * Waits for IOCTL completion message from the dongle, copies this into caller + * provided parameter 'buf'. + */ +static int +dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) +{ + dhd_prot_t *prot = dhd->prot; + int timeleft; + unsigned long flags; + int ret = 0; + static uint cnt = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd_query_bus_erros(dhd)) { + ret = -EIO; + goto out; + } +#ifdef GDB_PROXY + /* Loop while timeout is caused by firmware stop in GDB */ + { + uint32 prev_stop_count; + do { + prev_stop_count = dhd->gdb_proxy_stop_count; + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); + } while ((timeleft == 0) && ((dhd->gdb_proxy_stop_count != prev_stop_count) || + (dhd->gdb_proxy_stop_count & GDB_PROXY_STOP_MASK))); + } +#else + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); +#endif /* GDB_PROXY */ + +#ifdef DHD_RECOVER_TIMEOUT + if (prot->ioctl_received == 0) { + uint32 intstatus = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0); + int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus); + if ((intstatus) && (intstatus != (uint32)-1) && + (timeleft == 0) && (!dhd_query_bus_erros(dhd))) { + DHD_ERROR(("%s: iovar timeout trying again intstatus=%x" + " host_irq_disabled=%d\n", + __FUNCTION__, intstatus, host_irq_disbled)); + dhd_pcie_intr_count_dump(dhd); + dhd_print_tasklet_status(dhd); + dhd_prot_process_ctrlbuf(dhd); + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); + /* Clear Interrupts */ + dhdpcie_bus_clear_intstatus(dhd->bus); + } + } +#endif /* DHD_RECOVER_TIMEOUT */ + + if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) { + cnt++; + if (cnt <= dhd->conf->ctrl_resched) { + uint buscorerev = dhd->bus->sih->buscorerev; + uint32 intstatus = 0, intmask = 0; + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0); + if (intstatus) { + DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n", + __FUNCTION__, cnt, intstatus, intmask)); + dhd->bus->intstatus = intstatus; + dhd->bus->ipend = TRUE; + dhd->bus->dpc_sched = TRUE; + dhd_sched_dpc(dhd); + timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received); + } + } + } else { + cnt = 0; + } + + if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) { + if (dhd->check_trap_rot) { + /* check dongle trap first */ + DHD_ERROR(("Check dongle trap in the case of iovar timeout\n")); + dhd_bus_checkdied(dhd->bus, NULL, 0); + + if (dhd->dongle_trap_occured) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + ret = -EREMOTEIO; + goto out; + } + } + /* check if resumed on time out related to scheduling issue */ + dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd); + + dhd->iovar_timeout_occured = TRUE; + dhd_msgbuf_iovar_timeout_dump(dhd); + +#ifdef DHD_FW_COREDUMP + /* Collect socram dump */ + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_EFI + /* + * for ioctl timeout, recovery is triggered only for EFI case, because + * in linux, dhd daemon will itself trap the FW, + * so if recovery is triggered + * then there is a race between FLR and daemon initiated trap + */ + dhd_schedule_reset(dhd); +#endif /* DHD_EFI */ + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + ret = -ETIMEDOUT; + goto out; + } else { + if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { + DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", + __FUNCTION__, prot->ioctl_received)); + ret = -EINVAL; + goto out; + } + dhd->rxcnt_timeout = 0; + dhd->rx_ctlpkts++; + DHD_CTL(("%s: ioctl resp resumed, got %d\n", + __FUNCTION__, prot->ioctl_resplen)); + } + + if (dhd->prot->ioctl_resplen > len) + dhd->prot->ioctl_resplen = (uint16)len; + if (buf) + bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); + + ret = (int)(dhd->prot->ioctl_status); + +out: + DHD_GENERAL_LOCK(dhd, flags); + dhd->prot->ioctl_state = 0; + dhd->prot->ioctl_resplen = 0; + dhd->prot->ioctl_received = IOCTL_WAIT; + dhd->prot->curr_ioctl_cmd = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + + return ret; +} /* dhd_msgbuf_wait_ioctl_cmplt */ + +static int +dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter \n", __FUNCTION__)); + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", + __FUNCTION__)); + return -EIO; + } + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + +#ifdef REPORT_FATAL_TIMEOUTS + /* + * These timers "should" be started before sending H2D interrupt. + * Think of the scenario where H2D interrupt is fired and the Dongle + * responds back immediately. From the DPC we would stop the cmd, bus + * timers. But the process context could have switched out leading to + * a situation where the timers are Not started yet, but are actually stopped. + * + * Disable preemption from the time we start the timer until we are done + * with seding H2D interrupts. + */ + OSL_DISABLE_PREEMPTION(dhd->osh); + dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd); + dhd_start_cmd_timer(dhd); + dhd_start_bus_timer(dhd); +#endif /* REPORT_FATAL_TIMEOUTS */ + + /* Fill up msgbuf for ioctl req */ + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + +#ifdef REPORT_FATAL_TIMEOUTS + /* For some reason if we fail to ring door bell, stop the timers */ + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + dhd_stop_cmd_timer(dhd); + dhd_stop_bus_timer(dhd); + OSL_ENABLE_PREEMPTION(dhd->osh); + goto done; + } + + OSL_ENABLE_PREEMPTION(dhd->osh); +#else + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + goto done; + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +/** Called by upper DHD layer. Handles a protocol control response asynchronously. */ +int dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + return 0; +} + +/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ +int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +#ifdef DHD_DUMP_PCIE_RINGS +int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf, + unsigned long *file_posn, bool file_write) +{ + dhd_prot_t *prot; + msgbuf_ring_t *ring; + int ret = 0; + uint16 h2d_flowrings_total; + uint16 flowid; + + if (!(dhd) || !(dhd->prot)) { + goto exit; + } + prot = dhd->prot; + + /* Below is the same ring dump sequence followed in parser as well. */ + ring = &prot->h2dring_ctrl_subn; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + + ring = &prot->h2dring_rxp_subn; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + + ring = &prot->d2hring_ctrl_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + + ring = &prot->d2hring_tx_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + + ring = &prot->d2hring_rx_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) { + goto exit; + } + } + +#ifdef EWP_EDL + if (dhd->dongle_edl_support) { + ring = prot->d2hring_edl; + if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + } + else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) +#else + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) +#endif /* EWP_EDL */ + { + ring = prot->h2dring_info_subn; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + + ring = prot->d2hring_info_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + } + +exit : + return ret; +} + +/* Write to file */ +static +int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, + const void *user_buf, unsigned long *file_posn) +{ + int ret = 0; + + if (ring == NULL) { + DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n", + __FUNCTION__)); + return BCME_ERROR; + } + if (file) { + ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va), + ((unsigned long)(ring->max_items) * (ring->item_len))); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + ret = BCME_ERROR; + } + } else if (user_buf) { + ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf, + ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn); + } + return ret; +} + +#ifdef EWP_EDL +/* Write to file */ +static +int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf, + unsigned long *file_posn) +{ + int ret = 0, nitems = 0; + char *buf = NULL, *ptr = NULL; + uint8 *msg_addr = NULL; + uint16 rd = 0; + + if (ring == NULL) { + DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto done; + } + + buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE)); + if (buf == NULL) { + DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__)); + ret = BCME_ERROR; + goto done; + } + ptr = buf; + + for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) { + msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len); + memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE); + ptr += D2HRING_EDL_HDR_SIZE; + } + if (file) { + ret = dhd_os_write_file_posn(file, file_posn, buf, + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM)); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + goto done; + } + } + else { + ret = dhd_export_debug_data(buf, NULL, user_buf, + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn); + } + +done: + if (buf) { + MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE)); + } + return ret; +} +#endif /* EWP_EDL */ +#endif /* DHD_DUMP_PCIE_RINGS */ + +/** Add prot dump output to a buffer */ +void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ +#if defined(BCM_ROUTER_DHD) + bcm_bprintf(b, "DHD Router: 1GMAC HotBRC forwarding mode\n"); +#endif /* BCM_ROUTER_DHD */ + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) + bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); + else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) + bcm_bprintf(b, "\nd2h_sync: XORCSUM:"); + else + bcm_bprintf(b, "\nd2h_sync: NONE:"); + bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); + + bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", + dhd->dma_h2d_ring_upd_support, + dhd->dma_d2h_ring_upd_support, + dhd->prot->rw_index_sz); + bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_max_txpost, dhd->prot->h2d_max_txpost); +#if defined(DHD_HTPUT_TUNABLES) + bcm_bprintf(b, "h2d_htput_max_txpost: %d, prot->h2d_htput_max_txpost: %d\n", + h2d_htput_max_txpost, dhd->prot->h2d_htput_max_txpost); +#endif /* DHD_HTPUT_TUNABLES */ + bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt); + bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt); + bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt); + bcm_bprintf(b, "txcpl_db_cnt: %d\n", dhd->prot->txcpl_db_cnt); +#ifdef DHD_DMA_INDICES_SEQNUM + bcm_bprintf(b, "host_seqnum %u dngl_seqnum %u\n", dhd_prot_read_seqnum(dhd, TRUE), + dhd_prot_read_seqnum(dhd, FALSE)); +#endif /* DHD_DMA_INDICES_SEQNUM */ + bcm_bprintf(b, "tx_h2d_db_cnt:%llu\n", dhd->prot->tx_h2d_db_cnt); +#ifdef AGG_H2D_DB + bcm_bprintf(b, "agg_h2d_db_enab:%d agg_h2d_db_timeout:%d agg_h2d_db_inflight_thresh:%d\n", + agg_h2d_db_enab, agg_h2d_db_timeout, agg_h2d_db_inflight_thresh); + bcm_bprintf(b, "agg_h2d_db: timer_db_cnt:%d direct_db_cnt:%d\n", + dhd->prot->agg_h2d_db_info.timer_db_cnt, dhd->prot->agg_h2d_db_info.direct_db_cnt); + dhd_agg_inflight_stats_dump(dhd, b); +#endif /* AGG_H2D_DB */ +} + +/* Update local copy of dongle statistics */ +void dhd_prot_dstats(dhd_pub_t *dhd) +{ + return; +} + +/** Called by upper DHD layer */ +int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count) +{ + return 0; +} + +/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */ +int +dhd_post_dummy_msg(dhd_pub_t *dhd) +{ + unsigned long flags; + hostevent_hdr_t *hevent = NULL; + uint16 alloced = 0; + + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_RING_LOCK(ring->ring_lock, flags); + + hevent = (hostevent_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (hevent == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return -1; + } + + /* CMN msg header */ + hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; + hevent->msg.if_id = 0; + hevent->msg.flags = ring->current_phase; + + /* Event payload */ + hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); + + /* Since, we are filling the data directly into the bufptr obtained + * from the msgbuf, we can directly call the write_complete + */ + dhd_prot_ring_write_complete(dhd, ring, hevent, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return 0; +} + +/** + * If exactly_nitems is true, this function will allocate space for nitems or fail + * If exactly_nitems is false, this function will allocate space for nitems or less + */ +static void * +BCMFASTPATH(dhd_prot_alloc_ring_space)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 * alloced, bool exactly_nitems) +{ + void * ret_buf; + + if (nitems == 0) { + DHD_ERROR(("%s: nitems is 0 - ring(%s)\n", __FUNCTION__, ring->name)); + return NULL; + } + + /* Alloc space for nitems in the ring */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + /* if alloc failed , invalidate cached read ptr */ + if (dhd->dma_d2h_ring_upd_support) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); +#ifdef SUPPORT_LINKDOWN_RECOVERY + /* Check if ring->rd is valid */ + if (ring->rd >= ring->max_items) { + DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd)); + dhd->bus->read_shm_fail = TRUE; + return NULL; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + } + + /* Try allocating once more */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + DHD_INFO(("%s: Ring space not available \n", ring->name)); + return NULL; + } + } + + if (ret_buf == HOST_RING_BASE(ring)) { + DHD_MSGBUF_INFO(("%s: setting the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + + /* Return alloced space */ + return ret_buf; +} + +/** + * Non inline ioct request. + * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer + * Form a separate request buffer where a 4 byte cmn header is added in the front + * buf contents from parent function is copied to remaining section of this buffer + */ +static int +dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) +{ + dhd_prot_t *prot = dhd->prot; + ioctl_req_msg_t *ioct_rqst; + void * ioct_buf; /* For ioctl payload */ + uint16 rqstlen, resplen; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; +#ifdef DBG_DW_CHK_PCIE_READ_LATENCY + ulong addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ktime_t begin_time, end_time; + s64 diff_ns; +#endif /* DBG_DW_CHK_PCIE_READ_LATENCY */ + + if (dhd_query_bus_erros(dhd)) { + return -EIO; + } + + rqstlen = len; + resplen = len; + + /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ + /* 8K allocation of dongle buffer fails */ + /* dhd doesnt give separate input & output buf lens */ + /* so making the assumption that input length can never be more than 2k */ + rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN); + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; + +#ifdef DBG_DW_CHK_PCIE_READ_LATENCY + preempt_disable(); + begin_time = ktime_get(); + R_REG(dhd->osh, (volatile uint16 *)(dhd->bus->tcm + addr)); + end_time = ktime_get(); + preempt_enable(); + diff_ns = ktime_to_ns(ktime_sub(end_time, begin_time)); + /* Check if the delta is greater than 1 msec */ + if (diff_ns > (1 * NSEC_PER_MSEC)) { + DHD_ERROR(("%s: found latency over 1ms (%lld ns), ds state=%d\n", __func__, + diff_ns, dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus))); + } +#endif /* DBG_DW_CHK_PCIE_READ_LATENCY */ +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ring->ring_lock, flags); + + if (prot->ioctl_state) { + DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_BUSY; + } else { + prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; + } + + /* Request for cbuf space */ + ioct_rqst = (ioctl_req_msg_t*) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (ioct_rqst == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return -1; + } + + /* Common msg buf hdr */ + ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; + ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; + ioct_rqst->cmn_hdr.flags = ring->current_phase; + ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); + ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->cmd = htol32(cmd); + prot->curr_ioctl_cmd = cmd; + ioct_rqst->output_buf_len = htol16(resplen); + prot->ioctl_trans_id++; + ioct_rqst->trans_id = prot->ioctl_trans_id; + + /* populate ioctl buffer info */ + ioct_rqst->input_buf_len = htol16(rqstlen); + ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); + ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); + /* copy ioct payload */ + ioct_buf = (void *) prot->ioctbuf.va; + + prot->ioctl_fillup_time = OSL_LOCALTIME_NS(); + + if (buf) + memcpy(ioct_buf, buf, len); + + OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); + + if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) + DHD_ERROR(("host ioct address unaligned !!!!! \n")); + + DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", + ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, + ioct_rqst->trans_id)); + +#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) + dhd_prot_ioctl_trace(dhd, ioct_rqst, buf, len); +#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */ + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return 0; +} /* dhd_fillup_ioct_reqst */ + +/** + * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a + * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring + * information is posted to the dongle. + * + * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for + * each flowring in pool of flowrings. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, + uint16 max_items, uint16 item_len, uint16 ringid) +{ + int dma_buf_alloced = BCME_NOMEM; + uint32 dma_buf_len; + dhd_prot_t *prot = dhd->prot; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + dhd_dma_buf_t *dma_buf = NULL; + + ASSERT(ring); + ASSERT(name); + ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF)); + + /* Init name */ + strlcpy((char *)ring->name, name, sizeof(ring->name)); + + ring->idx = ringid; + +#if defined(DHD_HTPUT_TUNABLES) + /* Use HTPUT max items */ + if (DHD_IS_FLOWRING(ringid, max_flowrings) && + DHD_IS_FLOWID_HTPUT(dhd, DHD_RINGID_TO_FLOWID(ringid))) { + max_items = prot->h2d_htput_max_txpost; + } +#endif /* DHD_HTPUT_TUNABLES */ + + dma_buf_len = max_items * item_len; + + ring->max_items = max_items; + ring->item_len = item_len; + + /* A contiguous space may be reserved for all flowrings */ + if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) { + /* Carve out from the contiguous DMA-able flowring buffer */ + uint16 flowid; + uint32 base_offset; + dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf; + + dma_buf = &ring->dma_buf; + + flowid = DHD_RINGID_TO_FLOWID(ringid); + base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len; + + ASSERT(base_offset + dma_buf_len <= rsv_buf->len); + + dma_buf->len = dma_buf_len; + dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset); + PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa)); + PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset); + + /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */ + ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa)); + + dma_buf->dmah = rsv_buf->dmah; + dma_buf->secdma = rsv_buf->secdma; + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + } else { +#ifdef EWP_EDL + if (ring == dhd->prot->d2hring_edl) { + /* For EDL ring, memory is alloced during attach, + * so just need to copy the dma_buf to the ring's dma_buf + */ + memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf)); + dma_buf = &ring->dma_buf; + if (dma_buf->va == NULL) { + return BCME_NOMEM; + } + } else +#endif /* EWP_EDL */ + { + /* Allocate a dhd_dma_buf */ + dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len); + if (dma_buf_alloced != BCME_OK) { + return BCME_NOMEM; + } + } + } + + /* CAUTION: Save ring::base_addr in little endian format! */ + dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa); + + ring->ring_lock = osl_spin_lock_init(dhd->osh); + + DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " + "ring start %p buf phys addr %x:%x \n", + ring->name, ring->max_items, ring->item_len, + dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr))); + + return BCME_OK; +} /* dhd_prot_ring_attach */ + +/** + * dhd_prot_ring_init - Post the common ring information to dongle. + * + * Used only for common rings. + * + * The flowrings information is passed via the create flowring control message + * (tx_flowring_create_request_t) sent over the H2D control submission common + * ring. + */ +static void +dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + + /* CAUTION: ring::base_addr already in Little Endian */ + dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, + sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, + sizeof(uint16), RING_MAX_ITEMS, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, + sizeof(uint16), RING_ITEM_LEN, ring->idx); + + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + + /* ring inited */ + ring->inited = TRUE; + +} /* dhd_prot_ring_init */ + +/** + * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush + * Reset WR and RD indices to 0. + */ +static void +dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + + dhd_dma_buf_reset(dhd, &ring->dma_buf); + + ring->rd = ring->wr = 0; + ring->curr_rd = 0; + ring->inited = FALSE; + ring->create_pending = FALSE; +} + +/** + * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects + * hanging off the msgbuf_ring. + */ +static void +dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + dhd_prot_t *prot = dhd->prot; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + ASSERT(ring); + + ring->inited = FALSE; + /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */ + + /* If the DMA-able buffer was carved out of a pre-reserved contiguous + * memory, then simply stop using it. + */ + if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) { + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); + } else { +#ifdef EWP_EDL + if (ring == dhd->prot->d2hring_edl) { + /* For EDL ring, do not free ring mem here, + * it is done in dhd_detach + */ + memset(&ring->dma_buf, 0, sizeof(ring->dma_buf)); + } else +#endif /* EWP_EDL */ + { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } + } + + osl_spin_lock_deinit(dhd->osh, ring->ring_lock); + +} /* dhd_prot_ring_detach */ + +/* Fetch number of H2D flowrings given the total number of h2d rings */ +uint16 +dhd_get_max_flow_rings(dhd_pub_t *dhd) +{ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) + return dhd->bus->max_tx_flowrings; + else + return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS); +} + +/** + * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. + * + * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings. + * Dongle includes common rings when it advertizes the number of H2D rings. + * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to + * allocate the DMA-able buffer and initialize each msgbuf_ring_t object. + * + * dhd_prot_ring_attach is invoked to perform the actual initialization and + * attaching the DMA-able buffer. + * + * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and + * initialized msgbuf_ring_t object. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + char ring_name[RING_NAME_MAX_LENGTH]; + + if (prot->h2d_flowrings_pool != NULL) + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + + ASSERT(prot->h2d_rings_total == 0); + + /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */ + prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus); + + if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) { + DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", + __FUNCTION__, prot->h2d_rings_total)); + return BCME_ERROR; + } + + /* Subtract number of H2D common rings, to determine number of flowrings */ + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + + DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); + + /* Allocate pool of msgbuf_ring_t objects for all flowrings */ + prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + if (prot->h2d_flowrings_pool == NULL) { + DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n", + __FUNCTION__, h2d_flowrings_total)); + goto fail; + } + + /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); + /* For HTPUT case max_items will be changed inside dhd_prot_ring_attach */ + if (dhd_prot_ring_attach(dhd, ring, ring_name, + prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE, + DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { + goto attach_fail; + } + } + + return BCME_OK; + +attach_fail: + /* XXX: On a per project basis, one may decide whether to continue with + * "fewer" flowrings, and what value of fewer suffices. + */ + dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */ + +fail: + prot->h2d_rings_total = 0; + return BCME_NOMEM; + +} /* dhd_prot_flowrings_pool_attach */ + +/** + * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool. + * Invokes dhd_prot_ring_reset to perform the actual reset. + * + * The DMA-able buffer is not freed during reset and neither is the flowring + * pool freed. + * + * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following + * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool + * from a previous flowring pool instantiation will be reused. + * + * This will avoid a fragmented DMA-able memory condition, if multiple + * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach + * cycle. + */ +static void +dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) +{ + uint16 flowid, h2d_flowrings_total; + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + /* Reset each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + dhd_prot_ring_reset(dhd, ring); + ring->inited = FALSE; + } + + /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */ +} + +/** + * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with + * DMA-able buffers for flowrings. + * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any + * de-initialization of each msgbuf_ring_t. + */ +static void +dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) +{ + int flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + /* Detach the DMA-able buffer for each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + dhd_prot_ring_detach(dhd, ring); + } + + MFREE(prot->osh, prot->h2d_flowrings_pool, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + prot->h2d_rings_total = 0; + +} /* dhd_prot_flowrings_pool_detach */ + +/** + * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized + * msgbuf_ring from the flowring pool, and assign it. + * + * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common + * ring information to the dongle, a flowring's information is passed via a + * flowring create control message. + * + * Only the ring state (WR, RD) index are initialized. + */ +static msgbuf_ring_t * +dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + /* ASSERT flow_ring->inited == FALSE */ + + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + ring->inited = TRUE; + /** + * Every time a flowring starts dynamically, initialize current_phase with 0 + * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT + */ + ring->current_phase = 0; + return ring; +} + +/** + * dhd_prot_flowrings_pool_release - release a previously fetched flowring's + * msgbuf_ring back to the flow_ring pool. + */ +void +dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + ASSERT(ring == (msgbuf_ring_t*)flow_ring); + /* ASSERT flow_ring->inited == TRUE */ + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + + ring->wr = 0; + ring->rd = 0; + ring->inited = FALSE; + + ring->curr_rd = 0; +} + +#ifdef AGG_H2D_DB +void +dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flowid) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 inflight; + bool db_req = FALSE; + bool flush; + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + flush = !!ring->pend_items_count; + dhd_prot_txdata_aggr_db_write_flush(dhd, flowid); + + inflight = OSL_ATOMIC_READ(dhd->osh, &ring->inflight); + if (flush && inflight) { + if (inflight <= agg_h2d_db_inflight_thresh) { + db_req = TRUE; + } + dhd_agg_inflights_stats_update(dhd, inflight); + dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, db_req); + } +} +#endif /* AGG_H2D_DB */ + +/* Assumes only one index is updated at a time */ +/* FIXME Need to fix it */ +/* If exactly_nitems is true, this function will allocate space for nitems or fail */ +/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */ +/* If exactly_nitems is false, this function will allocate space for nitems or less */ +static void * +BCMFASTPATH(dhd_prot_get_ring_space)(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, + bool exactly_nitems) +{ + void *ret_ptr = NULL; + uint16 ring_avail_cnt; + + ASSERT(nitems <= ring->max_items); + + ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items); + + if ((ring_avail_cnt == 0) || + (exactly_nitems && (ring_avail_cnt < nitems) && + ((ring->max_items - ring->wr) >= nitems))) { + DHD_MSGBUF_INFO(("Space not available: ring %s items %d write %d read %d\n", + ring->name, nitems, ring->wr, ring->rd)); + return NULL; + } + *alloced = MIN(nitems, ring_avail_cnt); + + /* Return next available space */ + ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); + + /* Update write index */ + if ((ring->wr + *alloced) == ring->max_items) + ring->wr = 0; + else if ((ring->wr + *alloced) < ring->max_items) + ring->wr += *alloced; + else { + /* Should never hit this */ + ASSERT(0); + return NULL; + } + + return ret_ptr; +} /* dhd_prot_get_ring_space */ + +#ifdef AGG_H2D_DB + +static void +dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + unsigned long flags_bus; + +#ifdef DHD_FAKE_TX_STATUS + /* if fake tx status is enabled, we should not update + * dongle side rd/wr index for the tx flowring + * and also should not ring the doorbell + */ + if (DHD_IS_FLOWRING(ring->idx, max_flowrings)) { + return; + } +#endif /* DHD_FAKE_TX_STATUS */ + + DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus); + + /* cache flush */ + OSL_CACHE_FLUSH(p, ring->item_len * nitems); + + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_IFRM_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + } + + DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus); +} + +static void +dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db) +{ + dhd_prot_t *prot = dhd->prot; + flow_ring_table_t *flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + uint32 db_index; + uint corerev; + + if (ring_db == TRUE) { + dhd_msgbuf_agg_h2d_db_timer_cancel(dhd); + prot->agg_h2d_db_info.direct_db_cnt++; + /* raise h2d interrupt */ + if (IDMA_ACTIVE(dhd) || (IFRM_ACTIVE(dhd))) { + db_index = IDMA_IDX0; + /* this api is called in wl down path..in that case sih is freed already */ + if (dhd->bus->sih) { + corerev = dhd->bus->sih->buscorerev; + /* We need to explictly configure the type of DMA for + * core rev >= 24 + */ + if (corerev >= 24) { + db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT); + } + } + prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); + } else { + prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring)); + } + } else { + dhd_msgbuf_agg_h2d_db_timer_start(prot); + } +} + +#endif /* AGG_H2D_DB */ + +/** + * dhd_prot_ring_write_complete - Host updates the new WR index on producing + * new messages in a H2D ring. The messages are flushed from cache prior to + * posting the new WR index. The new WR index will be updated in the DMA index + * array or directly in the dongle's ring state memory. + * A PCIE doorbell will be generated to wake up the dongle. + * This is a non-atomic function, make sure the callers + * always hold appropriate locks. + */ +static void +BCMFASTPATH(__dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + dhd_prot_t *prot = dhd->prot; + uint32 db_index; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + uint corerev; + + /* cache flush */ + OSL_CACHE_FLUSH(p, ring->item_len * nitems); + + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_IFRM_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + } + + /* raise h2d interrupt */ + if (IDMA_ACTIVE(dhd) || + (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) { + db_index = IDMA_IDX0; + /* this api is called in wl down path..in that case sih is freed already */ + if (dhd->bus->sih) { + corerev = dhd->bus->sih->buscorerev; + /* We need to explictly configure the type of DMA for core rev >= 24 */ + if (corerev >= 24) { + db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT); + } + } + prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); + } else { + prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring)); + } +} + +static void +BCMFASTPATH(dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + unsigned long flags_bus; + DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus); + __dhd_prot_ring_write_complete(dhd, ring, p, nitems); + DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus); +} + +static void +BCMFASTPATH(dhd_prot_ring_doorbell)(dhd_pub_t *dhd, uint32 value) +{ + unsigned long flags_bus; + DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus); + dhd->prot->mb_ring_fn(dhd->bus, value); + DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus); +} + +/** + * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg, + * which will hold DHD_BUS_LP_STATE_LOCK to update WR pointer, Ring DB and also update + * bus_low_power_state to indicate D3_INFORM sent in the same BUS_LP_STATE_LOCK. + */ +static void +BCMFASTPATH(dhd_prot_ring_write_complete_mbdata)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p, + uint16 nitems, uint32 mb_data) +{ + unsigned long flags_bus; + + DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus); + + __dhd_prot_ring_write_complete(dhd, ring, p, nitems); + + /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */ + if (mb_data == H2D_HOST_D3_INFORM) { + __DHD_SET_BUS_LPS_D3_INFORMED(dhd->bus); + } + + DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus); +} + +/** + * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages + * from a D2H ring. The new RD index will be updated in the DMA Index array or + * directly in dongle's ring state memory. + */ +static void +dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) +{ + dhd_prot_t *prot = dhd->prot; + uint32 db_index; + uint corerev; + + /* update read index */ + /* If dma'ing h2d indices supported + * update the r -indices in the + * host memory o/w in TCM + */ + if (IDMA_ACTIVE(dhd)) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + db_index = IDMA_IDX1; + if (dhd->bus->sih) { + corerev = dhd->bus->sih->buscorerev; + /* We need to explictly configure the type of DMA for core rev >= 24 */ + if (corerev >= 24) { + db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT); + } + } + prot->mb_2_ring_fn(dhd->bus, db_index, FALSE); + } else if (dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + } +} + +static int +dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, + uint16 ring_type, uint32 req_id) +{ + unsigned long flags; + d2h_ring_create_req_t *d2h_ring; + uint16 alloced = 0; + int ret = BCME_OK; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__)); + + if (ring_to_create == NULL) { + DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + goto err; + } + + /* Request for ring buffer space */ + d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (d2h_ring == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n", + __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + ring_to_create->create_req_id = (uint16)req_id; + ring_to_create->create_pending = TRUE; + + /* Common msg buf hdr */ + d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE; + d2h_ring->msg.if_id = 0; + d2h_ring->msg.flags = ctrl_ring->current_phase; + d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id); + d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings)); + DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id, + ring_to_create->idx, max_h2d_rings)); + + d2h_ring->ring_type = ring_type; + d2h_ring->max_items = htol16(ring_to_create->max_items); + d2h_ring->len_item = htol16(ring_to_create->item_len); + d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; + d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; + + d2h_ring->flags = 0; + d2h_ring->msg.epoch = + ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + +#ifdef EWP_EDL + if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) { + DHD_ERROR(("%s: sending d2h EDL ring create: " + "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n", + __FUNCTION__, ltoh16(d2h_ring->max_items), + ltoh16(d2h_ring->len_item), + ltoh16(d2h_ring->ring_id), + d2h_ring->ring_ptr.low_addr, + d2h_ring->ring_ptr.high_addr)); + } +#endif /* EWP_EDL */ + + /* Update the flow_ring's WRITE index */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return ret; +err: + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return ret; +} + +static int +dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id) +{ + unsigned long flags; + h2d_ring_create_req_t *h2d_ring; + uint16 alloced = 0; + uint8 i = 0; + int ret = BCME_OK; + msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__)); + + if (ring_to_create == NULL) { + DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + goto err; + } + + /* Request for ring buffer space */ + h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (h2d_ring == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n", + __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + ring_to_create->create_req_id = (uint16)id; + ring_to_create->create_pending = TRUE; + + /* Common msg buf hdr */ + h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE; + h2d_ring->msg.if_id = 0; + h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id); + h2d_ring->msg.flags = ctrl_ring->current_phase; + h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx)); + h2d_ring->ring_type = ring_type; + h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM); + h2d_ring->n_completion_ids = ring_to_create->n_completion_ids; + h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE); + h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; + h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; + + for (i = 0; i < ring_to_create->n_completion_ids; i++) { + h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]); + } + + h2d_ring->flags = 0; + h2d_ring->msg.epoch = + ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update the flow_ring's WRITE index */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return ret; +err: + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return ret; +} + +/** + * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. + * Dongle will DMA the entire array (if DMA_INDX feature is enabled). + * See dhd_prot_dma_indx_init() + */ +void +dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + case H2D_IFRM_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va); + offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + /* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */ + *(uint16*)ptr = htol16(new_index); + + OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, new_index, type, ringid, ptr, offset)); + +} /* dhd_prot_dma_indx_set */ + +/** + * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index + * array. + * Dongle DMAes an entire array to host memory (if the feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static uint16 +dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 data; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case H2D_DMA_INDX_RD_UPD: +#ifdef DHD_DMA_INDICES_SEQNUM + if (prot->h2d_dma_indx_rd_copy_buf) { + ptr = (uint8 *)(prot->h2d_dma_indx_rd_copy_buf); + } else +#endif /* DHD_DMA_INDICES_SEQNUM */ + { + ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va); + } + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_WR_UPD: +#ifdef DHD_DMA_INDICES_SEQNUM + if (prot->d2h_dma_indx_wr_copy_buf) { + ptr = (uint8 *)(prot->d2h_dma_indx_wr_copy_buf); + } else +#endif /* DHD_DMA_INDICES_SEQNUM */ + { + ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); + } + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return 0; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + OSL_CACHE_INV((void *)ptr, prot->rw_index_sz); + + /* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */ + data = LTOH16(*((uint16*)ptr)); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, data, type, ringid, ptr, offset)); + + return (data); + +} /* dhd_prot_dma_indx_get */ + +#ifdef DHD_DMA_INDICES_SEQNUM +void +dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num) +{ + uint8 *ptr; + dhd_prot_t *prot = dhd->prot; + + /* Update host sequence number in first four bytes of scratchbuf */ + ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va); + *(uint32*)ptr = htol32(seq_num); + OSL_CACHE_FLUSH((void *)ptr, prot->d2h_dma_scratch_buf.len); + + DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, seq_num, ptr)); + +} /* dhd_prot_dma_indx_set */ + +uint32 +dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host) +{ + uint8 *ptr; + dhd_prot_t *prot = dhd->prot; + uint32 data; + + OSL_CACHE_INV((void *)ptr, d2h_dma_scratch_buf.len); + + /* First four bytes of scratchbuf contains the host sequence number. + * Next four bytes of scratchbuf contains the Dongle sequence number. + */ + if (host) { + ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va); + data = LTOH32(*((uint32*)ptr)); + } else { + ptr = ((uint8 *)(prot->d2h_dma_scratch_buf.va) + sizeof(uint32)); + data = LTOH32(*((uint32*)ptr)); + } + DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, data, ptr)); + return data; +} /* dhd_prot_dma_indx_set */ + +void +dhd_prot_save_dmaidx(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + uint32 dngl_seqnum; + + dngl_seqnum = dhd_prot_read_seqnum(dhd, FALSE); + + DHD_TRACE(("%s: host_seqnum %u dngl_seqnum %u\n", __FUNCTION__, + prot->host_seqnum, dngl_seqnum)); + if (prot->d2h_dma_indx_wr_copy_buf && prot->h2d_dma_indx_rd_copy_buf) { + if (prot->host_seqnum == dngl_seqnum) { + memcpy_s(prot->d2h_dma_indx_wr_copy_buf, prot->d2h_dma_indx_wr_copy_bufsz, + prot->d2h_dma_indx_wr_buf.va, prot->d2h_dma_indx_wr_copy_bufsz); + memcpy_s(prot->h2d_dma_indx_rd_copy_buf, prot->h2d_dma_indx_rd_copy_bufsz, + prot->h2d_dma_indx_rd_buf.va, prot->h2d_dma_indx_rd_copy_bufsz); + dhd_prot_write_host_seqnum(dhd, prot->host_seqnum); + /* Ring DoorBell */ + dhd_prot_ring_doorbell(dhd, DHD_DMA_INDX_SEQ_H2D_DB_MAGIC); + prot->host_seqnum++; + prot->host_seqnum %= D2H_EPOCH_MODULO; + } + } +} + +int +dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz, uint8 type) +{ + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case D2H_DMA_INDX_WR_BUF: + prot->d2h_dma_indx_wr_copy_buf = MALLOCZ(dhd->osh, buf_sz); + if (prot->d2h_dma_indx_wr_copy_buf == NULL) { + DHD_ERROR(("%s: MALLOC failed for size %d\n", + __FUNCTION__, buf_sz)); + goto ret_no_mem; + } + prot->d2h_dma_indx_wr_copy_bufsz = buf_sz; + break; + + case H2D_DMA_INDX_RD_BUF: + prot->h2d_dma_indx_rd_copy_buf = MALLOCZ(dhd->osh, buf_sz); + if (prot->h2d_dma_indx_rd_copy_buf == NULL) { + DHD_ERROR(("%s: MALLOC failed for size %d\n", + __FUNCTION__, buf_sz)); + goto ret_no_mem; + } + prot->h2d_dma_indx_rd_copy_bufsz = buf_sz; + break; + + default: + break; + } + return BCME_OK; +ret_no_mem: + return BCME_NOMEM; + +} +#endif /* DHD_DMA_INDICES_SEQNUM */ + +/** + * An array of DMA read/write indices, containing information about host rings, can be maintained + * either in host memory or in device memory, dependent on preprocessor options. This function is, + * dependent on these options, called during driver initialization. It reserves and initializes + * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical + * address of these host memory blocks are communicated to the dongle later on. By reading this host + * memory, the dongle learns about the state of the host rings. + */ + +static INLINE int +dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz) +{ + int rc; + + if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) + return BCME_OK; + + rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz); + + return rc; +} + +int +dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length) +{ + uint32 bufsz; + dhd_prot_t *prot = dhd->prot; + dhd_dma_buf_t *dma_buf; + + if (prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + /* Dongle advertizes 2B or 4B RW index size */ + ASSERT(rw_index_sz != 0); + prot->rw_index_sz = rw_index_sz; + + bufsz = rw_index_sz * length; + + switch (type) { + case H2D_DMA_INDX_WR_BUF: + dma_buf = &prot->h2d_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_DMA_INDX_RD_BUF: + dma_buf = &prot->h2d_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_WR_BUF: + dma_buf = &prot->d2h_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_RD_BUF: + dma_buf = &prot->d2h_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_IFRM_INDX_WR_BUF: + dma_buf = &prot->h2d_ifrm_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + default: + DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); + return BCME_BADOPTION; + } + + return BCME_OK; + +ret_no_mem: + DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n", + __FUNCTION__, type, bufsz)); + return BCME_NOMEM; + +} /* dhd_prot_dma_indx_init */ + +/** + * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read + * from, or NULL if there are no more messages to read. + */ +static uint8* +dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len) +{ + uint16 wr; + uint16 rd; + uint16 depth; + uint16 items; + void *read_addr = NULL; /* address of next msg to be read in ring */ + uint16 d2h_wr = 0; + + DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n", + __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va), + (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va))); + + /* Remember the read index in a variable. + * This is becuase ring->rd gets updated in the end of this function + * So if we have to print the exact read index from which the + * message is read its not possible. + */ + ring->curr_rd = ring->rd; + + /* update write pointer */ + if (dhd->dma_d2h_ring_upd_support) { + /* DMAing write/read indices supported */ + d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + ring->wr = d2h_wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx); + } + + wr = ring->wr; + rd = ring->rd; + depth = ring->max_items; + + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(wr, rd, depth); + if (items == 0) + return NULL; + + /* + * Note that there are builds where Assert translates to just printk + * so, even if we had hit this condition we would never halt. Now + * dhd_prot_process_msgtype can get into an big loop if this + * happens. + */ + if (items > ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (wr >= ring->max_items) { + dhd->bus->read_shm_fail = TRUE; + } +#else +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; + dhd_bus_mem_dump(dhd); + + } +#endif /* DHD_FW_COREDUMP */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + *available_len = 0; + dhd_schedule_reset(dhd); + + return NULL; + } + + /* if space is available, calculate address to be read */ + read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); + + /* update read pointer */ + if ((ring->rd + items) >= ring->max_items) + ring->rd = 0; + else + ring->rd += items; + + ASSERT(ring->rd < ring->max_items); + + /* convert items to bytes : available_len must be 32bits */ + *available_len = (uint32)(items * ring->item_len); + + /* XXX Double cache invalidate for ARM with L2 cache/prefetch */ + OSL_CACHE_INV(read_addr, *available_len); + + /* return read address */ + return read_addr; + +} /* dhd_prot_get_read_addr */ + +/** + * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function, + * make sure the callers always hold appropriate locks. + */ +int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data) +{ + h2d_mailbox_data_t *h2d_mb_data; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn; + unsigned long flags; + int num_post = 1; + int i; + + DHD_MSGBUF_INFO(("%s Sending H2D MB data Req data 0x%04x\n", + __FUNCTION__, mb_data)); + if (!ctrl_ring->inited) { + DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + +#ifdef PCIE_INB_DW + if ((INBAND_DW_ENAB(dhd->bus)) && + (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) == + DW_DEVICE_DS_DEV_SLEEP)) { + if (mb_data == H2D_HOST_CONS_INT) { + /* One additional device_wake post needed */ + num_post = 2; + } + } +#endif /* PCIE_INB_DW */ + + for (i = 0; i < num_post; i ++) { + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + /* Request for ring buffer space */ + h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (h2d_mb_data == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n", + __FUNCTION__)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + return BCME_NOMEM; + } + + memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t)); + /* Common msg buf hdr */ + h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA; + h2d_mb_data->msg.flags = ctrl_ring->current_phase; + + h2d_mb_data->msg.epoch = + ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + h2d_mb_data->mail_box_data = htol32(mb_data); +#ifdef PCIE_INB_DW + /* post device_wake first */ + if ((num_post == 2) && (i == 0)) { + h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE); + } else +#endif /* PCIE_INB_DW */ + { + h2d_mb_data->mail_box_data = htol32(mb_data); + } + + DHD_MSGBUF_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data)); + + /* upd wrt ptr and raise interrupt */ + dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + /* Add a delay if device_wake is posted */ + if ((num_post == 2) && (i == 0)) { + OSL_DELAY(1000); + } +#endif /* PCIE_INB_DW */ + } + return 0; +} + +/** Creates a flow ring and informs dongle of this event */ +int +dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_create_request_t *flow_create_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* Request for ctrl_ring buffer space */ + flow_create_rqst = (tx_flowring_create_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_create_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; + flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_create_rqst->msg.request_id = htol32(0); /* TBD */ + flow_create_rqst->msg.flags = ctrl_ring->current_phase; + + flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + flow_create_rqst->tid = flow_ring_node->flow_info.tid; + flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); + memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); + /* CAUTION: ring::base_addr already in Little Endian */ + flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; + flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; + flow_create_rqst->max_items = htol16(flow_ring->max_items); + flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + flow_create_rqst->if_flags = 0; + +#ifdef DHD_HP2P + /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */ + /* and traffic is not multicast */ + /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */ + if (dhd->hp2p_capable && dhd->hp2p_ring_more && + flow_ring_node->flow_info.tid == HP2P_PRIO && + (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) && + !ETHER_ISMULTI(flow_create_rqst->da)) { + flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P; + flow_ring_node->hp2p_ring = TRUE; + /* Allow multiple HP2P Flow if mf override is enabled */ + if (!dhd->hp2p_mf_enable) { + dhd->hp2p_ring_more = FALSE; + } + + DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n", + __FUNCTION__, flow_ring_node->flow_info.tid, + flow_ring_node->flowid)); + } +#endif /* DHD_HP2P */ + + /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core + * currently it is not used for priority. so uses solely for ifrm mask + */ + if (IFRM_ACTIVE(dhd)) + flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0); + + DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG + " prio %d ifindex %d items %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex, flow_ring->max_items)); + + /* Update the flow_ring's WRITE index */ + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_IFRM_INDX_WR_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */ +static void +dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg; + + DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__, + ltoh16(flow_create_resp->cmplt.status), + ltoh16(flow_create_resp->cmplt.flow_ring_id))); + + dhd_bus_flow_ring_create_response(dhd->bus, + ltoh16(flow_create_resp->cmplt.flow_ring_id), + ltoh16(flow_create_resp->cmplt.status)); +} + +#if !defined(BCM_ROUTER_DHD) +static void +dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf) +{ + h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf; + DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, + ltoh16(resp->cmplt.status), + ltoh16(resp->cmplt.ring_id), + ltoh32(resp->cmn_hdr.request_id))); + if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) && + (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) { + DHD_ERROR(("invalid request ID with h2d ring create complete\n")); + return; + } + if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) && + !dhd->prot->h2dring_info_subn->create_pending) { + DHD_ERROR(("info ring create status for not pending submit ring\n")); + } +#ifdef BTLOG + if (dhd->prot->h2dring_btlog_subn && + dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) && + !dhd->prot->h2dring_btlog_subn->create_pending) { + DHD_ERROR(("btlog ring create status for not pending submit ring\n")); + } +#endif /* BTLOG */ + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("info/btlog ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) { + dhd->prot->h2dring_info_subn->create_pending = FALSE; + dhd->prot->h2dring_info_subn->inited = TRUE; + DHD_ERROR(("info buffer post after ring create\n")); + dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn); + } +#ifdef BTLOG + if (dhd->prot->h2dring_btlog_subn && + dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) { + dhd->prot->h2dring_btlog_subn->create_pending = FALSE; + dhd->prot->h2dring_btlog_subn->inited = TRUE; + DHD_ERROR(("btlog buffer post after ring create\n")); + dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn); + } +#endif /* BTLOG */ +} +#endif /* !BCM_ROUTER_DHD */ + +static void +dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf) +{ + d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf; + DHD_ERROR(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, + ltoh16(resp->cmplt.status), + ltoh16(resp->cmplt.ring_id), + ltoh32(resp->cmn_hdr.request_id))); + if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) && + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) && +#ifdef DHD_HP2P + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) && + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) && +#endif /* DHD_HP2P */ + TRUE) { + DHD_ERROR(("invalid request ID with d2h ring create complete\n")); + return; + } + if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) { +#ifdef EWP_EDL + if (!dhd->dongle_edl_support) +#endif + { + + if (!dhd->prot->d2hring_info_cpln->create_pending) { + DHD_ERROR(("info ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("info cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_info_cpln->create_pending = FALSE; + dhd->prot->d2hring_info_cpln->inited = TRUE; + } +#ifdef EWP_EDL + else { + if (!dhd->prot->d2hring_edl->create_pending) { + DHD_ERROR(("edl ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("edl cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_edl->create_pending = FALSE; + dhd->prot->d2hring_edl->inited = TRUE; + } +#endif /* EWP_EDL */ + } + +#ifdef BTLOG + if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_BTLOGRING_REQ_PKTID) { + if (!dhd->prot->d2hring_btlog_cpln->create_pending) { + DHD_ERROR(("btlog ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("btlog cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_btlog_cpln->create_pending = FALSE; + dhd->prot->d2hring_btlog_cpln->inited = TRUE; + } +#endif /* BTLOG */ +#ifdef DHD_HP2P + if (dhd->prot->d2hring_hp2p_txcpl && + ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) { + if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) { + DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("HPP tx cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE; + dhd->prot->d2hring_hp2p_txcpl->inited = TRUE; + } + if (dhd->prot->d2hring_hp2p_rxcpl && + ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) { + if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) { + DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("HPP rx cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE; + dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE; + } +#endif /* DHD_HP2P */ +} + +static void +dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf) +{ + d2h_mailbox_data_t *d2h_data; + + d2h_data = (d2h_mailbox_data_t *)buf; + DHD_MSGBUF_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__, + d2h_data->d2h_mailbox_data)); + dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data); +} + +static void +dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf) +{ +#ifdef DHD_TIMESYNC + host_timestamp_msg_cpl_t *host_ts_cpl; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + + host_ts_cpl = (host_timestamp_msg_cpl_t *)buf; + DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__, + host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id)); + + pktid = ltoh32(host_ts_cpl->msg.request_id); + if (prot->hostts_req_buf_inuse == FALSE) { + DHD_ERROR(("No Pending Host TS req, but completion\n")); + return; + } + prot->hostts_req_buf_inuse = FALSE; + if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) { + DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n", + pktid, DHD_H2D_HOSTTS_REQ_PKTID)); + return; + } + dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id, + host_ts_cpl->cmplt.status); +#else /* DHD_TIMESYNC */ + DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n")); +#endif /* DHD_TIMESYNC */ + +} + +/** called on e.g. flow ring delete */ +void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + dhd_prot_ring_detach(dhd, flow_ring); + DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); +} + +void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d, + struct bcmstrbuf *strbuf, const char * fmt) +{ + const char *default_fmt = + "TRD:%d HLRD:%d HDRD:%d TWR:%d HLWR:%d HDWR:%d BASE(VA) %p BASE(PA) %x:%x SIZE %d " + "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n"; + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + uint16 rd, wr, drd = 0, dwr = 0; + uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; + + if (fmt == NULL) { + fmt = default_fmt; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__)); + return; + } + + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + if (dhd->dma_d2h_ring_upd_support) { + if (h2d) { + drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, flow_ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else { + drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx); + } + } + bcm_bprintf(strbuf, fmt, rd, flow_ring->rd, drd, wr, flow_ring->wr, dwr, + flow_ring->dma_buf.va, + ltoh32(flow_ring->base_addr.high_addr), + ltoh32(flow_ring->base_addr.low_addr), + flow_ring->item_len, flow_ring->max_items, + dma_buf_len); +} + +void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + dhd_prot_t *prot = dhd->prot; + bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n", + dhd->prot->device_ipc_version, + dhd->prot->host_ipc_version, + dhd->prot->active_ipc_version); + + bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n", + dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted); + bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n", + dhd->prot->max_infobufpost, dhd->prot->infobufpost); +#ifdef BTLOG + bcm_bprintf(strbuf, "max BTLOG bufs to post: %d, \t posted %d \n", + dhd->prot->max_btlogbufpost, dhd->prot->btlogbufpost); +#endif /* BTLOG */ + bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n", + dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted); + bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n", + dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted); + bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n", + dhd->prot->max_rxbufpost, dhd->prot->rxbufpost); + + bcm_bprintf(strbuf, "Total RX bufs posted: %d, \t RX cpl got %d \n", + dhd->prot->tot_rxbufpost, dhd->prot->tot_rxcpl); + + bcm_bprintf(strbuf, "Total TX packets: %lu, \t TX cpl got %lu \n", + dhd->actual_tx_pkts, dhd->tot_txcpl); + + bcm_bprintf(strbuf, + "%14s %18s %18s %17s %17s %14s %14s %10s\n", + "Type", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)", + "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE"); + bcm_bprintf(strbuf, "%14s", "H2DCtrlPost"); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, TRUE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, FALSE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "H2DRxPost"); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, TRUE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HRxCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, FALSE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HTxCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, FALSE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) { + bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub"); + dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, TRUE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl"); + dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, FALSE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + } + if (dhd->prot->d2hring_edl != NULL) { + bcm_bprintf(strbuf, "%14s", "D2HRingEDL"); + dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, FALSE, strbuf, + " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n"); + } + + bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n", + OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count), + DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map), + DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map), + DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)); + +#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) + dhd_prot_ioctl_dump(dhd->prot, strbuf); +#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */ +#ifdef DHD_MMIO_TRACE + dhd_dump_bus_mmio_trace(dhd->bus, strbuf); +#endif /* DHD_MMIO_TRACE */ + dhd_dump_bus_ds_trace(dhd->bus, strbuf); +#ifdef DHD_FLOW_RING_STATUS_TRACE + dhd_dump_bus_flow_ring_status_isr_trace(dhd->bus, strbuf); + dhd_dump_bus_flow_ring_status_dpc_trace(dhd->bus, strbuf); +#endif /* DHD_FLOW_RING_STATUS_TRACE */ +} + +int +dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_delete_request_t *flow_delete_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Request for ring buffer space */ + flow_delete_rqst = (tx_flowring_delete_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_delete_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; + flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ + flow_delete_rqst->msg.flags = ring->current_phase; + + flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update Delete info */ + flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_delete_rqst->reason = htol16(BCME_OK); + + DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM" + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_OK; +} + +static void +BCMFASTPATH(dhd_prot_flow_ring_fastdelete)(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx) +{ + flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid); + msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + host_txbuf_cmpl_t txstatus; + host_txbuf_post_t *txdesc; + uint16 wr_idx; + + DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n", + __FUNCTION__, flowid, rd_idx, ring->wr)); + + memset(&txstatus, 0, sizeof(txstatus)); + txstatus.compl_hdr.flow_ring_id = flowid; + txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex; + wr_idx = ring->wr; + + while (wr_idx != rd_idx) { + if (wr_idx) + wr_idx--; + else + wr_idx = ring->max_items - 1; + txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) + + (wr_idx * ring->item_len)); + txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id; + dhd_prot_txstatus_process(dhd, &txstatus); + } +} + +static void +dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg; + + DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__, + flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id)); + + if (dhd->fast_delete_ring_support) { + dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->read_idx); + } + dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->cmplt.status); +} + +static void +dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg) +{ +#ifdef IDLE_TX_FLOW_MGMT + tx_idle_flowring_resume_response_t *flow_resume_resp = + (tx_idle_flowring_resume_response_t *)msg; + + DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__, + flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id)); + + dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id, + flow_resume_resp->cmplt.status); +#endif /* IDLE_TX_FLOW_MGMT */ +} + +static void +dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg) +{ +#ifdef IDLE_TX_FLOW_MGMT + int16 status; + tx_idle_flowring_suspend_response_t *flow_suspend_resp = + (tx_idle_flowring_suspend_response_t *)msg; + status = flow_suspend_resp->cmplt.status; + + DHD_ERROR(("%s Flow id %d suspend Response status = %d\n", + __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id, + status)); + + if (status != BCME_OK) { + + DHD_ERROR(("%s Error in Suspending Flow rings!!" + "Dongle will still be polling idle rings!!Status = %d \n", + __FUNCTION__, status)); + } +#endif /* IDLE_TX_FLOW_MGMT */ +} + +int +dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_flush_request_t *flow_flush_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Request for ring buffer space */ + flow_flush_rqst = (tx_flowring_flush_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (flow_flush_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; + flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ + flow_flush_rqst->msg.flags = ring->current_phase; + flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_flush_rqst->reason = htol16(BCME_OK); + + DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_OK; +} /* dhd_prot_flow_ring_flush */ + +static void +dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg; + + DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__, + flow_flush_resp->cmplt.status)); + + dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, + flow_flush_resp->cmplt.status); +} + +/** + * Request dongle to configure soft doorbells for D2H rings. Host populated soft + * doorbell information is transferred to dongle via the d2h ring config control + * message. + */ +void +dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) +{ +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + uint16 ring_idx; + uint8 *msg_next; + void *msg_start; + uint16 alloced = 0; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + ring_config_req_t *ring_config_req; + bcmpcie_soft_doorbell_t *soft_doorbell; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); + + if (msg_start == NULL) { + DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", + __FUNCTION__, d2h_rings)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return; + } + + msg_next = (uint8*)msg_start; + + for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) { + + /* position the ring_config_req into the ctrl subm ring */ + ring_config_req = (ring_config_req_t *)msg_next; + + /* Common msg header */ + ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG; + ring_config_req->msg.if_id = 0; + ring_config_req->msg.flags = 0; + + ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */ + + /* Ring Config subtype and d2h ring_id */ + ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL); + ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx)); + + /* Host soft doorbell configuration */ + soft_doorbell = &prot->soft_doorbell[ring_idx]; + + ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value); + ring_config_req->soft_doorbell.haddr.high = + htol32(soft_doorbell->haddr.high); + ring_config_req->soft_doorbell.haddr.low = + htol32(soft_doorbell->haddr.low); + ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items); + ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs); + + DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n", + __FUNCTION__, ring_config_req->soft_doorbell.haddr.high, + ring_config_req->soft_doorbell.haddr.low, + ring_config_req->soft_doorbell.value)); + + msg_next = msg_next + ctrl_ring->item_len; + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +} + +static void +dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg) +{ + DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", + __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), + ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); +} + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT +void +copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr) +{ + uint32 *ext_data = dhd->extended_trap_data; + hnd_ext_trap_hdr_t *hdr; + const bcm_tlv_t *tlv; + + if (ext_data == NULL) { + return; + } + /* First word is original trap_data */ + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE); + if (tlv) { + memcpy(tr, &tlv->data, sizeof(struct _trap_struct)); + } +} +#define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)} + +typedef struct { + char name[HANG_INFO_TRAP_T_NAME_MAX]; + uint32 offset; +} hang_info_trap_t; + +#ifdef DHD_EWPR_VER2 +static hang_info_trap_t hang_info_trap_tbl[] = { + {"reason", 0}, + {"ver", VENDOR_SEND_HANG_EXT_INFO_VER}, + {"stype", 0}, + TRAP_T_NAME_OFFSET(type), + TRAP_T_NAME_OFFSET(epc), + {"resrvd", 0}, + {"resrvd", 0}, + {"resrvd", 0}, + {"resrvd", 0}, + {"", 0} +}; +#else +static hang_info_trap_t hang_info_trap_tbl[] = { + {"reason", 0}, + {"ver", VENDOR_SEND_HANG_EXT_INFO_VER}, + {"stype", 0}, + TRAP_T_NAME_OFFSET(type), + TRAP_T_NAME_OFFSET(epc), + TRAP_T_NAME_OFFSET(cpsr), + TRAP_T_NAME_OFFSET(spsr), + TRAP_T_NAME_OFFSET(r0), + TRAP_T_NAME_OFFSET(r1), + TRAP_T_NAME_OFFSET(r2), + TRAP_T_NAME_OFFSET(r3), + TRAP_T_NAME_OFFSET(r4), + TRAP_T_NAME_OFFSET(r5), + TRAP_T_NAME_OFFSET(r6), + TRAP_T_NAME_OFFSET(r7), + TRAP_T_NAME_OFFSET(r8), + TRAP_T_NAME_OFFSET(r9), + TRAP_T_NAME_OFFSET(r10), + TRAP_T_NAME_OFFSET(r11), + TRAP_T_NAME_OFFSET(r12), + TRAP_T_NAME_OFFSET(r13), + TRAP_T_NAME_OFFSET(r14), + TRAP_T_NAME_OFFSET(pc), + {"", 0} +}; +#endif /* DHD_EWPR_VER2 */ + +#define TAG_TRAP_IS_STATE(tag) \ + ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \ + (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \ + (tag == TAG_TRAP_CODE)) + +static void +copy_hang_info_head(char *dest, trap_t *src, int len, int field_name, + int *bytes_written, int *cnt, char *cookie) +{ + uint8 *ptr; + int remain_len; + int i; + + ptr = (uint8 *)src; + + memset(dest, 0, len); + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + + /* hang reason, hang info ver */ + for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX); + i++, (*cnt)++) { + if (field_name) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c", + hang_info_trap_tbl[i].name, HANG_KEY_DEL); + } + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c", + hang_info_trap_tbl[i].offset, HANG_KEY_DEL); + + } + + if (*cnt < HANG_FIELD_CNT_MAX) { + if (field_name) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c", + "cookie", HANG_KEY_DEL); + } + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c", + cookie, HANG_KEY_DEL); + (*cnt)++; + } + + if (*cnt < HANG_FIELD_CNT_MAX) { + if (field_name) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c", + hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name, + HANG_KEY_DEL); + } + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c", + hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset, + HANG_KEY_DEL); + (*cnt)++; + } + + if (*cnt < HANG_FIELD_CNT_MAX) { + if (field_name) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c", + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name, + HANG_KEY_DEL); + } + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c", + *(uint32 *) + (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset), + HANG_KEY_DEL); + (*cnt)++; + } +#ifdef DHD_EWPR_VER2 + /* put 0 for HG03 ~ HG06 (reserved for future use) */ + for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX); + i++, (*cnt)++) { + if (field_name) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c", + hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name, + HANG_KEY_DEL); + } + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c", + hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset, + HANG_KEY_DEL); + } +#endif /* DHD_EWPR_VER2 */ +} +#ifndef DHD_EWPR_VER2 +static void +copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name, + int *bytes_written, int *cnt, char *cookie) +{ + uint8 *ptr; + int remain_len; + int i; + + ptr = (uint8 *)src; + + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + + for (i = HANG_INFO_TRAP_T_OFFSET_IDX; + (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX); + i++, (*cnt)++) { + if (field_name) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:", + HANG_RAW_DEL, hang_info_trap_tbl[i].name); + } + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x", + HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset)); + } +} + +static void +copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt) +{ + int remain_len; + int i = 0; + const uint32 *stack; + uint32 *ext_data = dhd->extended_trap_data; + hnd_ext_trap_hdr_t *hdr; + const bcm_tlv_t *tlv; + int remain_stack_cnt = 0; + uint32 dummy_data = 0; + int bigdata_key_stack_cnt = 0; + + if (ext_data == NULL) { + return; + } + /* First word is original trap_data */ + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK); + + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + if (tlv) { + stack = (const uint32 *)tlv->data; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, + "%08x", *(uint32 *)(stack++)); + (*cnt)++; + if (*cnt >= HANG_FIELD_CNT_MAX) { + return; + } + for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + /* Raw data for bigdata use '_' and Key data for bigdata use space */ + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, + "%c%08x", + i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL, + *(uint32 *)(stack++)); + + (*cnt)++; + if ((*cnt >= HANG_FIELD_CNT_MAX) || + (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) { + return; + } + } + } + + remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i; + + for (i = 0; i < remain_stack_cnt; i++) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x", + HANG_RAW_DEL, dummy_data); + (*cnt)++; + if (*cnt >= HANG_FIELD_CNT_MAX) { + return; + } + } + GCC_DIAGNOSTIC_POP(); + +} + +static void +copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt) +{ + int remain_len; + int i; + const uint32 *data; + uint32 *ext_data = dhd->extended_trap_data; + hnd_ext_trap_hdr_t *hdr; + const bcm_tlv_t *tlv; + int remain_trap_data = 0; + uint8 buf_u8[sizeof(uint32)] = { 0, }; + const uint8 *p_u8; + + if (ext_data == NULL) { + return; + } + /* First word is original trap_data */ + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE); + if (tlv) { + /* header include tlv hader */ + remain_trap_data = (hdr->len - tlv->len - sizeof(uint16)); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK); + if (tlv) { + /* header include tlv hader */ + remain_trap_data -= (tlv->len + sizeof(uint16)); + } + + data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data)); + + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + + for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX; + i++, (*cnt)++) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x", + HANG_RAW_DEL, *(uint32 *)(data++)); + GCC_DIAGNOSTIC_POP(); + } + + if (*cnt >= HANG_FIELD_CNT_MAX) { + return; + } + + remain_trap_data -= (sizeof(uint32) * i); + + if (remain_trap_data > sizeof(buf_u8)) { + DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__)); + remain_trap_data = sizeof(buf_u8); + } + + if (remain_trap_data) { + p_u8 = (const uint8 *)data; + for (i = 0; i < remain_trap_data; i++) { + buf_u8[i] = *(const uint8 *)(p_u8++); + } + + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x", + HANG_RAW_DEL, ltoh32_ua(buf_u8)); + (*cnt)++; + } +} +#endif /* DHD_EWPR_VER2 */ + +static void +get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype) +{ + uint32 i; + uint32 *ext_data = dhd->extended_trap_data; + hnd_ext_trap_hdr_t *hdr; + const bcm_tlv_t *tlv; + + /* First word is original trap_data */ + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + + /* Dump a list of all tags found before parsing data */ + for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) { + tlv = bcm_parse_tlvs(hdr->data, hdr->len, i); + if (tlv) { + if (!TAG_TRAP_IS_STATE(i)) { + *subtype = i; + return; + } + } + } +} +#ifdef DHD_EWPR_VER2 +static void +copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt) +{ + int remain_len; + uint32 *ext_data = dhd->extended_trap_data; + hnd_ext_trap_hdr_t *hdr; + char *base64_out = NULL; + int base64_cnt; + int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE; + + if (ext_data == NULL) { + return; + } + /* First word is original trap_data */ + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + + if (remain_len <= 0) { + DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__)); + return; + } + + if (remain_len < max_base64_len) { + DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__, + remain_len)); + max_base64_len = remain_len; + } + + base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE); + if (base64_out == NULL) { + DHD_ERROR(("%s: MALLOC failed for size %d\n", + __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE)); + return; + } + + if (hdr->len > 0) { + base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len); + if (base64_cnt == 0) { + DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__)); + } + } + + *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s", + base64_out); + (*cnt)++; + MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE); +} +#endif /* DHD_EWPR_VER2 */ + +void +copy_hang_info_trap(dhd_pub_t *dhd) +{ + trap_t tr; + int bytes_written; + int trap_subtype = 0; + + if (!dhd || !dhd->hang_info) { + DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__, + dhd, (dhd ? dhd->hang_info : NULL))); + return; + } + + if (!dhd->dongle_trap_occured) { + DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__)); + return; + } + + memset(&tr, 0x00, sizeof(struct _trap_struct)); + + copy_ext_trap_sig(dhd, &tr); + get_hang_info_trap_subtype(dhd, &trap_subtype); + + hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP; + hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype; + + bytes_written = 0; + dhd->hang_info_cnt = 0; + get_debug_dump_time(dhd->debug_dump_time_hang_str); + copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str); + + copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE, + &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str); + + DHD_INFO(("hang info head cnt: %d len: %d data: %s\n", + dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info)); + + clear_debug_dump_time(dhd->debug_dump_time_hang_str); + +#ifdef DHD_EWPR_VER2 + /* stack info & trap info are included in etd data */ + + /* extended trap data dump */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt); + DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n", + dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info)); + } +#else + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt); + DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n", + dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info)); + } + + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE, + &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str); + DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n", + dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info)); + } + + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt); + DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n", + dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info)); + } +#endif /* DHD_EWPR_VER2 */ +} + +void +copy_hang_info_linkdown(dhd_pub_t *dhd) +{ + int bytes_written = 0; + int remain_len; + + if (!dhd || !dhd->hang_info) { + DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__, + dhd, (dhd ? dhd->hang_info : NULL))); + return; + } + + if (!dhd->bus->is_linkdown) { + DHD_ERROR(("%s: link down is not happened\n", __FUNCTION__)); + return; + } + + dhd->hang_info_cnt = 0; + + get_debug_dump_time(dhd->debug_dump_time_hang_str); + copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str); + + /* hang reason code (0x8808) */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written; + bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c", + HANG_REASON_PCIE_LINK_DOWN_EP_DETECT, HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* EWP version */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written; + bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c", + VENDOR_SEND_HANG_EXT_INFO_VER, HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* cookie - dump time stamp */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written; + bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%s%c", + dhd->debug_dump_time_hang_str, HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + clear_debug_dump_time(dhd->debug_dump_time_hang_str); + + /* dump PCIE RC registers */ + dhd_dump_pcie_rc_regs_for_linkdown(dhd, &bytes_written); + + DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n", + dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info)); + +} +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + +int +dhd_prot_debug_info_print(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 rd, wr, drd, dwr; + uint32 dma_buf_len; + uint64 current_time; + ulong ring_tcm_rd_addr; /* dongle address */ + ulong ring_tcm_wr_addr; /* dongle address */ + + DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n")); + DHD_ERROR(("DHD: %s\n", dhd_version)); + DHD_ERROR(("Firmware: %s\n", fw_version)); + +#ifdef DHD_FW_COREDUMP + DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n")); + DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled)); +#endif /* DHD_FW_COREDUMP */ + + DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n")); + DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n", + prot->device_ipc_version, + prot->host_ipc_version, + prot->active_ipc_version)); + DHD_ERROR(("d2h_intr_method -> %s d2h_intr_control -> %s\n", + dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX", + dhd->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK")); + DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n", + prot->max_tsbufpost, prot->cur_ts_bufs_posted)); + DHD_ERROR(("max INFO bufs to post: %d, posted %d\n", + prot->max_infobufpost, prot->infobufpost)); + DHD_ERROR(("max event bufs to post: %d, posted %d\n", + prot->max_eventbufpost, prot->cur_event_bufs_posted)); + DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n", + prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted)); + DHD_ERROR(("max RX bufs to post: %d, posted %d\n", + prot->max_rxbufpost, prot->rxbufpost)); + DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_max_txpost, prot->h2d_max_txpost)); +#if defined(DHD_HTPUT_TUNABLES) + DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_htput_max_txpost, prot->h2d_htput_max_txpost)); +#endif /* DHD_HTPUT_TUNABLES */ + + current_time = OSL_LOCALTIME_NS(); + DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time))); + DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT + " ioctl_ack_time="SEC_USEC_FMT + " ioctl_cmplt_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(prot->ioctl_fillup_time), + GET_SEC_USEC(prot->ioctl_ack_time), + GET_SEC_USEC(prot->ioctl_cmplt_time))); + + /* Check PCIe INT registers */ + if (!dhd_pcie_dump_int_regs(dhd)) { + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); + dhd->bus->is_linkdown = TRUE; + } + + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); + + ring = &prot->h2dring_ctrl_subn; + dma_buf_len = ring->max_items * ring->item_len; + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len)); + DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("CtrlPost: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + + ring = &prot->d2hring_ctrl_cpln; + dma_buf_len = ring->max_items * ring->item_len; + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len)); + DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("CtrlCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + + ring = prot->h2dring_info_subn; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); + DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("InfoSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + } + ring = prot->d2hring_info_cpln; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); + DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("InfoCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } +#ifdef EWP_EDL + ring = prot->d2hring_edl; + if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); + DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("EdlRing: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("EdlRing: Expected seq num: %d \r\n", + ring->seqnum % D2H_EPOCH_MODULO)); + } +#endif /* EWP_EDL */ + + ring = &prot->d2hring_tx_cpln; + if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); + DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("TxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } + + ring = &prot->d2hring_rx_cpln; + if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); + DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("RxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } + + ring = &prot->h2dring_rxp_subn; + if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("RxSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); + DHD_ERROR(("RxSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->dma_d2h_ring_upd_support) { + drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx); + DHD_ERROR(("RxSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr)); + } + if (dhd->bus->is_linkdown) { + DHD_ERROR(("RxSub: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("RxSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("RxSub: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } + + DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n", + __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted)); +#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n", + __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings)); +#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ + + DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt)); + DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt)); + DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt)); + dhd_pcie_debug_info_dump(dhd); +#ifdef DHD_LB_STATS + DHD_ERROR(("\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n", + dhd->lb_rxp_stop_thr_hitcnt, dhd->lb_rxp_strt_thr_hitcnt)); + DHD_ERROR(("\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n", + dhd->lb_rxp_napi_sched_cnt, dhd->lb_rxp_napi_complete_cnt)); +#endif /* DHD_LB_STATS */ +#ifdef DHD_TIMESYNC + dhd_timesync_debug_info_print(dhd); +#endif /* DHD_TIMESYNC */ + return 0; +} + +int +dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + uint32 *ptr; + uint32 value; + + if (dhd->prot->d2h_dma_indx_wr_buf.va) { + uint32 i; + uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus); + + OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va, + dhd->prot->d2h_dma_indx_wr_buf.len); + + ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va); + + bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues); + + bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%4p\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); + + ptr++; + bcm_bprintf(b, "RPTR block Flow rings , 0x%4p\n", ptr); + for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { + value = ltoh32(*ptr); + bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); + ptr++; + } + } + + if (dhd->prot->h2d_dma_indx_rd_buf.va) { + OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va, + dhd->prot->h2d_dma_indx_rd_buf.len); + + ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va); + + bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%4p\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); + } + + return 0; +} + +uint32 +dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val) +{ + dhd_prot_t *prot = dhd->prot; +#if DHD_DBG_SHOW_METADATA + prot->metadata_dbg = val; +#endif + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadata_dbg_get(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) +{ +#if !(defined(BCM_ROUTER_DHD)) + dhd_prot_t *prot = dhd->prot; + if (rx) + prot->rx_metadata_offset = (uint16)val; + else + prot->tx_metadata_offset = (uint16)val; +#endif /* ! BCM_ROUTER_DHD */ + return dhd_prot_metadatalen_get(dhd, rx); +} + +uint32 +dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + return prot->rx_metadata_offset; + else + return prot->tx_metadata_offset; +} + +/** optimization to write "n" tx items at a time to ring */ +uint32 +dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + dhd_prot_t *prot = dhd->prot; + if (set) + prot->txp_threshold = (uint16)val; + val = prot->txp_threshold; + return val; +} + +#ifdef DHD_RX_CHAINING + +static INLINE void +BCMFASTPATH(dhd_rxchain_reset)(rxchain_info_t *rxchain) +{ + rxchain->pkt_count = 0; +} + +static void +BCMFASTPATH(dhd_rxchain_frame)(dhd_pub_t *dhd, void *pkt, uint ifidx) +{ + uint8 *eh; + uint8 prio; + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + ASSERT(!PKTISCHAINED(pkt)); + ASSERT(PKTCLINK(pkt) == NULL); + ASSERT(PKTCGETATTR(pkt) == 0); + + eh = PKTDATA(dhd->osh, pkt); + prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; + + if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, + rxchain->h_da, rxchain->h_prio))) { + /* Different flow - First release the existing chain */ + dhd_rxchain_commit(dhd); + } + + /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ + /* so that the chain can be handed off to CTF bridge as is. */ + if (rxchain->pkt_count == 0) { + /* First packet in chain */ + rxchain->pkthead = rxchain->pkttail = pkt; + + /* Keep a copy of ptr to ether_da, ether_sa and prio */ + rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; + rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; + rxchain->h_prio = prio; + rxchain->ifidx = ifidx; + rxchain->pkt_count++; + } else { + /* Same flow - keep chaining */ + PKTSETCLINK(rxchain->pkttail, pkt); + rxchain->pkttail = pkt; + rxchain->pkt_count++; + } + + if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) && + ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || + (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { + PKTSETCHAINED(dhd->osh, pkt); + PKTCINCRCNT(rxchain->pkthead); + PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); + } else { + dhd_rxchain_commit(dhd); + return; + } + + /* If we have hit the max chain length, dispatch the chain and reset */ + if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { + dhd_rxchain_commit(dhd); + } +} + +static void +BCMFASTPATH(dhd_rxchain_commit)(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + if (rxchain->pkt_count == 0) + return; + + /* Release the packets to dhd_linux */ + dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); + + /* Reset the chain */ + dhd_rxchain_reset(rxchain); +} + +#endif /* DHD_RX_CHAINING */ + +#ifdef IDLE_TX_FLOW_MGMT +int +dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_idle_flowring_resume_request_t *flow_resume_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* Request for ctrl_ring buffer space */ + flow_resume_rqst = (tx_idle_flowring_resume_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_resume_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME; + flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_resume_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + DHD_ERROR(("%s Send Flow resume Req flow ID %d\n", + __FUNCTION__, flow_ring_node->flowid)); + + /* Update the flow_ring's WRITE index */ + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_IFRM_INDX_WR_UPD, + (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +int +dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count) +{ + tx_idle_flowring_suspend_request_t *flow_suspend_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 index; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Request for ring buffer space */ + flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_suspend_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND; + /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */ + flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update flow id info */ + for (index = 0; index < count; index++) + { + flow_suspend_rqst->ring_id[index] = ringid[index]; + } + flow_suspend_rqst->num = count; + + DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return BCME_OK; +} +#endif /* IDLE_TX_FLOW_MGMT */ + +#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) +static void +dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len) +{ + struct dhd_prot *prot = dhd->prot; + uint32 cnt = prot->ioctl_trace_count % MAX_IOCTL_TRACE_SIZE; + + prot->ioctl_trace[cnt].cmd = ioct_rqst->cmd; + prot->ioctl_trace[cnt].transid = ioct_rqst->trans_id; + if ((ioct_rqst->cmd == 262 || ioct_rqst->cmd == 263) && buf) + memcpy(prot->ioctl_trace[cnt].ioctl_buf, buf, + len > MAX_IOCTL_BUF_SIZE ? MAX_IOCTL_BUF_SIZE : len); + else + memset(prot->ioctl_trace[cnt].ioctl_buf, 0, MAX_IOCTL_BUF_SIZE); + prot->ioctl_trace[cnt].timestamp = OSL_SYSUPTIME_US(); + prot->ioctl_trace_count ++; +} + +static void +dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf) +{ + int dumpsz; + int i; + + dumpsz = prot->ioctl_trace_count < MAX_IOCTL_TRACE_SIZE ? + prot->ioctl_trace_count : MAX_IOCTL_TRACE_SIZE; + if (dumpsz == 0) { + bcm_bprintf(strbuf, "\nEmpty IOCTL TRACE\n"); + return; + } + bcm_bprintf(strbuf, "----------- IOCTL TRACE --------------\n"); + bcm_bprintf(strbuf, "Timestamp us\t\tCMD\tTransID\tIOVAR\n"); + for (i = 0; i < dumpsz; i ++) { + bcm_bprintf(strbuf, "%llu\t%d\t%d\t%s\n", + prot->ioctl_trace[i].timestamp, + prot->ioctl_trace[i].cmd, + prot->ioctl_trace[i].transid, + prot->ioctl_trace[i].ioctl_buf); + } +} +#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */ + +static void dump_psmwd_v1(const bcm_tlv_t *tlv, struct bcmstrbuf *b) +{ + const hnd_ext_trap_psmwd_v1_t* psmwd = NULL; + uint32 i; + psmwd = (const hnd_ext_trap_psmwd_v1_t *)tlv; + for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1; i++) { + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]); + } + bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8); + bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406); + bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408); + bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a); + bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c); + bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424); + bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426); + bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456); + bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480); + bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500); + bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e); + bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e); + bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566); + bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690); + bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692); + bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694); + bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0); + bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490); + bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838); + bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0); + bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt); + bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt); + bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt); + +} + +static void dump_psmwd_v2(const bcm_tlv_t *tlv, struct bcmstrbuf *b) +{ + const hnd_ext_trap_psmwd_t* psmwd = NULL; + uint32 i; + psmwd = (const hnd_ext_trap_psmwd_t *)tlv; + for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2; i++) { + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]); + } + + bcm_bprintf(b, " psm_brwk0: 0x%x\n", psmwd->i16_0x4b8); + bcm_bprintf(b, " psm_brwk1: 0x%x\n", psmwd->i16_0x4ba); + bcm_bprintf(b, " psm_brwk2: 0x%x\n", psmwd->i16_0x4bc); + bcm_bprintf(b, " psm_brwk3: 0x%x\n", psmwd->i16_0x4be); + bcm_bprintf(b, " PSM BRC_1: 0x%x\n", psmwd->i16_0x4da); + bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8); + bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406); + bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408); + bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a); + bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c); + bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424); + bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426); + bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456); + bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480); + bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500); + bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e); + bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e); + bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566); + bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690); + bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692); + bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694); + bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0); + bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490); + bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838); + bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0); + bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt); + bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt); + bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt); +} + +static const char* etd_trap_name(hnd_ext_tag_trap_t tag) +{ + switch (tag) { + case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE"; + case TAG_TRAP_STACK: return "TAG_TRAP_STACK"; + case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY"; + case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP"; + case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD"; + case TAG_TRAP_PHY: return "TAG_TRAP_PHY"; + case TAG_TRAP_BUS: return "TAG_TRAP_BUS"; + case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP"; + case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE"; + case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q"; + case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE"; + case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE"; + case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP"; + case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH"; + case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA"; + case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA"; + case TAG_TRAP_CODE: return "TAG_TRAP_CODE"; + case TAG_TRAP_MEM_BIT_FLIP: return "TAG_TRAP_MEM_BIT_FLIP"; + case TAG_TRAP_LAST: + default: + return "Unknown"; + } + return "Unknown"; +} + +int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw) +{ + uint32 i; + uint32 *ext_data; + hnd_ext_trap_hdr_t *hdr; + const bcm_tlv_t *tlv; + const trap_t *tr; + const uint32 *stack; + const hnd_ext_trap_bp_err_t *bpe; + uint32 raw_len; + + ext_data = dhdp->extended_trap_data; + + /* return if there is no extended trap data */ + if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) { + bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data); + return BCME_OK; + } + + bcm_bprintf(b, "Extended trap data\n"); + + /* First word is original trap_data */ + bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data); + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len); + + /* Dump a list of all tags found before parsing data */ + bcm_bprintf(b, "\nTags Found:\n"); + for (i = 0; i < TAG_TRAP_LAST; i++) { + tlv = bcm_parse_tlvs(hdr->data, hdr->len, i); + if (tlv) + bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len); + } + + /* XXX debug dump */ + if (raw) { + raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0); + for (i = 0; i < raw_len; i++) + { + bcm_bprintf(b, "0x%08x ", ext_data[i]); + if (i % 4 == 3) + bcm_bprintf(b, "\n"); + } + return BCME_OK; + } + + /* Extract the various supported TLVs from the extended trap data */ + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE); + if (tlv) { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len); + bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE); + if (tlv) { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len); + tr = (const trap_t *)tlv->data; + + bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n", + tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr); + bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n", + tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6); + bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n", + tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK); + if (tlv) { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len); + stack = (const uint32 *)tlv->data; + for (i = 0; i < (uint32)(tlv->len / 4); i++) + { + bcm_bprintf(b, " 0x%08x\n", *stack); + stack++; + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE); + if (tlv) { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len); + bpe = (const hnd_ext_trap_bp_err_t *)tlv->data; + bcm_bprintf(b, " error: %x\n", bpe->error); + bcm_bprintf(b, " coreid: %x\n", bpe->coreid); + bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr); + bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl); + bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus); + bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl); + bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus); + bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl); + bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone); + bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus); + bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo); + bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi); + bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid); + bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser); + bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY); + if (tlv) { + const hnd_ext_trap_heap_err_t* hme; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len); + hme = (const hnd_ext_trap_heap_err_t *)tlv->data; + bcm_bprintf(b, " arena total: %d\n", hme->arena_total); + bcm_bprintf(b, " heap free: %d\n", hme->heap_free); + bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse); + bcm_bprintf(b, " mf count: %d\n", hme->mf_count); + bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm); + + bcm_bprintf(b, " Histogram:\n"); + for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) { + if (hme->heap_histogm[i] == 0xfffe) + bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]); + else if (hme->heap_histogm[i] == 0xffff) + bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]); + else + bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2, + hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2) + * hme->heap_histogm[i + 1]); + } + + bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2); + for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) { + bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2); + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q); + if (tlv) { + const hnd_ext_trap_pcie_mem_err_t* pqme; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len); + pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data; + bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len); + bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE); + if (tlv) { + const hnd_ext_trap_wlc_mem_err_t* wsme; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len); + wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data; + bcm_bprintf(b, " instance: %d\n", wsme->instance); + bcm_bprintf(b, " associated: %d\n", wsme->associated); + bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt); + bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt); + bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]); + bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]); + bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]); + bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]); + + if (tlv->len >= (sizeof(*wsme) * 2)) { + wsme++; + bcm_bprintf(b, "\n instance: %d\n", wsme->instance); + bcm_bprintf(b, " associated: %d\n", wsme->associated); + bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt); + bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt); + bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]); + bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]); + bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]); + bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]); + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY); + if (tlv) { + const hnd_ext_trap_phydbg_t* phydbg; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len); + phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data; + bcm_bprintf(b, " err: 0x%x\n", phydbg->err); + bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus); + bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0); + bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1); + bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode); + bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0); + bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1); + bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl); + bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1); + bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1); + bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError); + bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError); + bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError); + bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0); + bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1); + bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2); + bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0); + bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1); + bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10); + bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11); + bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20); + bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21); + bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength); + bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr); + bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl); + bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel); + bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug); + for (i = 0; i < 3; i++) + bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD); + if (tlv) { + const hnd_ext_trap_psmwd_t* psmwd; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len); + psmwd = (const hnd_ext_trap_psmwd_t *)tlv->data; + bcm_bprintf(b, " version: 0x%x\n", psmwd->version); + bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol); + bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand); + bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus); + bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug); + bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st); + if (psmwd->version == 1) { + dump_psmwd_v1(tlv, b); + } + if (psmwd->version == 2) { + dump_psmwd_v2(tlv, b); + } + } +/* PHY TxErr MacDump */ + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHYTXERR_THRESH); + if (tlv) { + const hnd_ext_trap_macphytxerr_t* phytxerr = NULL; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHYTXERR_THRESH), tlv->len); + phytxerr = (const hnd_ext_trap_macphytxerr_t *)tlv->data; + bcm_bprintf(b, " version: 0x%x\n", phytxerr->version); + bcm_bprintf(b, " trap_reason: %d\n", phytxerr->trap_reason); + bcm_bprintf(b, " Tsf_rx_ts_0x63E: 0x%x\n", phytxerr->i16_0x63E); + bcm_bprintf(b, " Tsf_tx_ts_0x640: 0x%x\n", phytxerr->i16_0x640); + bcm_bprintf(b, " tsf_tmr_rx_end_ts_0x642: 0x%x\n", phytxerr->i16_0x642); + bcm_bprintf(b, " TDC_FrmLen0_0x846: 0x%x\n", phytxerr->i16_0x846); + bcm_bprintf(b, " TDC_FrmLen1_0x848: 0x%x\n", phytxerr->i16_0x848); + bcm_bprintf(b, " TDC_Txtime_0x84a: 0x%x\n", phytxerr->i16_0x84a); + bcm_bprintf(b, " TXE_BytCntInTxFrmLo_0xa5a: 0x%x\n", phytxerr->i16_0xa5a); + bcm_bprintf(b, " TXE_BytCntInTxFrmHi_0xa5c: 0x%x\n", phytxerr->i16_0xa5c); + bcm_bprintf(b, " TDC_VhtPsduLen0_0x856: 0x%x\n", phytxerr->i16_0x856); + bcm_bprintf(b, " TDC_VhtPsduLen1_0x858: 0x%x\n", phytxerr->i16_0x858); + bcm_bprintf(b, " PSM_BRC: 0x%x\n", phytxerr->i16_0x490); + bcm_bprintf(b, " PSM_BRC_1: 0x%x\n", phytxerr->i16_0x4d8); + bcm_bprintf(b, " shm_txerr_reason: 0x%x\n", phytxerr->shm_txerr_reason); + bcm_bprintf(b, " shm_pctl0: 0x%x\n", phytxerr->shm_pctl0); + bcm_bprintf(b, " shm_pctl1: 0x%x\n", phytxerr->shm_pctl1); + bcm_bprintf(b, " shm_pctl2: 0x%x\n", phytxerr->shm_pctl2); + bcm_bprintf(b, " shm_lsig0: 0x%x\n", phytxerr->shm_lsig0); + bcm_bprintf(b, " shm_lsig1: 0x%x\n", phytxerr->shm_lsig1); + bcm_bprintf(b, " shm_plcp0: 0x%x\n", phytxerr->shm_plcp0); + bcm_bprintf(b, " shm_plcp1: 0x%x\n", phytxerr->shm_plcp1); + bcm_bprintf(b, " shm_plcp2: 0x%x\n", phytxerr->shm_plcp2); + bcm_bprintf(b, " shm_vht_sigb0: 0x%x\n", phytxerr->shm_vht_sigb0); + bcm_bprintf(b, " shm_vht_sigb1: 0x%x\n", phytxerr->shm_vht_sigb1); + bcm_bprintf(b, " shm_tx_tst: 0x%x\n", phytxerr->shm_tx_tst); + bcm_bprintf(b, " shm_txerr_tm: 0x%x\n", phytxerr->shm_txerr_tm); + bcm_bprintf(b, " shm_curchannel: 0x%x\n", phytxerr->shm_curchannel); + bcm_bprintf(b, " shm_blk_crx_rxtsf_pos: 0x%x\n", phytxerr->shm_crx_rxtsf_pos); + bcm_bprintf(b, " shm_lasttx_tsf: 0x%x\n", phytxerr->shm_lasttx_tsf); + bcm_bprintf(b, " shm_s_rxtsftmrval: 0x%x\n", phytxerr->shm_s_rxtsftmrval); + bcm_bprintf(b, " Phy_0x29: 0x%x\n", phytxerr->i16_0x29); + bcm_bprintf(b, " Phy_0x2a: 0x%x\n", phytxerr->i16_0x2a); + } + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP); + if (tlv) { + const hnd_ext_trap_macsusp_t* macsusp; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len); + macsusp = (const hnd_ext_trap_macsusp_t *)tlv->data; + bcm_bprintf(b, " version: %d\n", macsusp->version); + bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason); + bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol); + bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand); + bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus); + for (i = 0; i < 4; i++) + bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]); + for (i = 0; i < 8; i++) + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]); + bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a); + bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c); + bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490); + bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e); + bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e); + bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566); + bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690); + bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692); + bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694); + bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0); + bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838); + bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880); + bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt); + bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE); + if (tlv) { + const hnd_ext_trap_macenab_t* macwake; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len); + macwake = (const hnd_ext_trap_macenab_t *)tlv->data; + bcm_bprintf(b, " version: 0x%x\n", macwake->version); + bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason); + bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol); + bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand); + bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus); + for (i = 0; i < 8; i++) + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]); + bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st); + bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl); + bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8); + bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480); + bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490); + bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600); + bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690); + bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692); + bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0); + bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6); + bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8); + bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa); + bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS); + if (tlv) { + const bcm_dngl_pcie_hc_t* hc; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len); + hc = (const bcm_dngl_pcie_hc_t *)tlv->data; + bcm_bprintf(b, " version: 0x%x\n", hc->version); + bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved); + bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type); + bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag); + bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg); + for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++) + bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP); + if (tlv) { + const pcie_hmapviolation_t* hmap; + hmap = (const pcie_hmapviolation_t *)tlv->data; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len); + bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo); + bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi); + bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEM_BIT_FLIP); + if (tlv) { + const hnd_ext_trap_fb_mem_err_t* fbit; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEM_BIT_FLIP), tlv->len); + fbit = (const hnd_ext_trap_fb_mem_err_t *)tlv->data; + bcm_bprintf(b, " version: %d\n", fbit->version); + bcm_bprintf(b, " flip_bit_err_time: %d\n", fbit->flip_bit_err_time); + } + + return BCME_OK; +} + +#ifdef BCMPCIE +int +dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len, + uint16 seqnum, uint16 xt_id) +{ + dhd_prot_t *prot = dhdp->prot; + host_timestamp_msg_t *ts_req; + unsigned long flags; + uint16 alloced = 0; + uchar *ts_tlv_buf; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + if ((tlvs == NULL) || (tlv_len == 0)) { + DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n", + __FUNCTION__, tlvs, tlv_len)); + return -1; + } + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* if Host TS req already pending go away */ + if (prot->hostts_req_buf_inuse == TRUE) { + DHD_ERROR(("one host TS request already pending at device\n")); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + return -1; + } + + /* Request for cbuf space */ + ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE); + if (ts_req == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n")); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + return -1; + } + + /* Common msg buf hdr */ + ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP; + ts_req->msg.if_id = 0; + ts_req->msg.flags = ctrl_ring->current_phase; + ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID; + + ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ts_req->xt_id = xt_id; + ts_req->seqnum = seqnum; + /* populate TS req buffer info */ + ts_req->input_data_len = htol16(tlv_len); + ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa)); + ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa)); + /* copy ioct payload */ + ts_tlv_buf = (void *) prot->hostts_req_buf.va; + prot->hostts_req_buf_inuse = TRUE; + memcpy(ts_tlv_buf, tlvs, tlv_len); + + OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len); + + if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) { + DHD_ERROR(("host TS req buffer address unaligned !!!!! \n")); + } + + DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n", + ts_req->msg.request_id, ts_req->input_data_len, + ts_req->xt_id, ts_req->seqnum)); + + /* upd wrt ptr and raise interrupt */ + dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + return 0; +} /* dhd_prot_send_host_timestamp */ + +bool +dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->tx_ts_log_enabled = enable; + + return dhd->prot->tx_ts_log_enabled; +} + +bool +dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->rx_ts_log_enabled = enable; + + return dhd->prot->rx_ts_log_enabled; +} + +bool +dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->no_retry = enable; + + return dhd->prot->no_retry; +} + +bool +dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->no_aggr = enable; + + return dhd->prot->no_aggr; +} + +bool +dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->fixed_rate = enable; + + return dhd->prot->fixed_rate; +} +#endif /* BCMPCIE */ + +void +dhd_prot_dma_indx_free(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); +} + +void +dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd) +{ + if (dhd->prot->max_tsbufpost > 0) + dhd_msgbuf_rxbuf_post_ts_bufs(dhd); +} + +static void +BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf) +{ +#ifdef DHD_TIMESYNC + fw_timestamp_event_msg_t *resp; + uint32 pktid; + uint16 buflen, seqnum; + void * pkt; + + resp = (fw_timestamp_event_msg_t *)buf; + pktid = ltoh32(resp->msg.request_id); + buflen = ltoh16(resp->buf_len); + seqnum = ltoh16(resp->seqnum); + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_DUPLICATE_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n", + pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum))); + + if (!dhd->prot->cur_ts_bufs_posted) { + DHD_ERROR(("tsbuf posted are zero, but there is a completion\n")); + return; + } + + dhd->prot->cur_ts_bufs_posted--; + + if (!dhd_timesync_delay_post_bufs(dhd)) { + if (dhd->prot->max_tsbufpost > 0) { + dhd_msgbuf_rxbuf_post_ts_bufs(dhd); + } + } + + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE); + + if (!pkt) { + DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid)); + return; + } + + PKTSETLEN(dhd->osh, pkt, buflen); + dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, TRUE); +#else + PKTFREE(dhd->osh, pkt, TRUE); +#endif /* DHD_USE_STATIC_CTRLBUF */ +#else /* DHD_TIMESYNC */ + DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n")); +#endif /* DHD_TIMESYNC */ + +} + +uint16 +dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp) +{ + return dhdp->prot->ioctl_trans_id; +} + +#ifdef SNAPSHOT_UPLOAD +/* send request to take snapshot */ +int +dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param) +{ + dhd_prot_t *prot = dhdp->prot; + dhd_dma_buf_t *dma_buf = &prot->snapshot_upload_buf; + snapshot_upload_request_msg_t *snap_req; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* Request for cbuf space */ + snap_req = (snapshot_upload_request_msg_t *)dhd_prot_alloc_ring_space(dhdp, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + if (snap_req == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send snapshot request\n")); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + return BCME_ERROR; + } + + /* Common msg buf hdr */ + snap_req->cmn_hdr.msg_type = MSG_TYPE_SNAPSHOT_UPLOAD; + snap_req->cmn_hdr.if_id = 0; + snap_req->cmn_hdr.flags = ctrl_ring->current_phase; + snap_req->cmn_hdr.request_id = DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID; + snap_req->cmn_hdr.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* snapshot request msg */ + snap_req->snapshot_buf_len = htol32(dma_buf->len); + snap_req->snapshot_type = snapshot_type; + snap_req->snapshot_param = snapshot_param; + snap_req->host_buf_addr.high = htol32(PHYSADDRHI(dma_buf->pa)); + snap_req->host_buf_addr.low = htol32(PHYSADDRLO(dma_buf->pa)); + + if (ISALIGNED(dma_buf->va, DMA_ALIGN_LEN) == FALSE) { + DHD_ERROR(("snapshot req buffer address unaligned !!!!! \n")); + } + + /* clear previous snapshot upload */ + memset(dma_buf->va, 0, dma_buf->len); + prot->snapshot_upload_len = 0; + prot->snapshot_type = snapshot_type; + prot->snapshot_cmpl_pending = TRUE; + + DHD_CTL(("submitted snapshot request request_id %d, buf_len %d, type %d, param %d\n", + snap_req->cmn_hdr.request_id, snap_req->snapshot_buf_len, + snap_req->snapshot_type, snap_req->snapshot_param)); + + /* upd wrt ptr and raise interrupt */ + dhd_prot_ring_write_complete(dhdp, ctrl_ring, snap_req, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + + return BCME_OK; +} /* dhd_prot_send_snapshot_request */ + +/* get uploaded snapshot */ +int +dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset, + uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more) +{ + dhd_prot_t *prot = dhdp->prot; + uint8 *buf = prot->snapshot_upload_buf.va; + uint8 *buf_end = buf + prot->snapshot_upload_len; + uint32 copy_size; + + /* snapshot type must match */ + if (prot->snapshot_type != snapshot_type) { + return BCME_DATA_NOTFOUND; + } + + /* snapshot not completed */ + if (prot->snapshot_cmpl_pending) { + return BCME_NOTREADY; + } + + /* offset within the buffer */ + if (buf + offset >= buf_end) { + return BCME_BADARG; + } + + /* copy dst buf size or remaining size */ + copy_size = MIN(dst_buf_size, buf_end - (buf + offset)); + memcpy(dst_buf, buf + offset, copy_size); + + /* return size and is_more */ + *dst_size = copy_size; + *is_more = (offset + copy_size < prot->snapshot_upload_len) ? + TRUE : FALSE; + return BCME_OK; +} /* dhd_prot_get_snapshot */ + +#endif /* SNAPSHOT_UPLOAD */ + +int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len) +{ + if (!dhd->hscb_enable) { + if (len) { + /* prevent "Operation not supported" dhd message */ + *len = 0; + return BCME_OK; + } + return BCME_UNSUPPORTED; + } + + if (va) { + *va = dhd->prot->host_scb_buf.va; + } + if (len) { + *len = dhd->prot->host_scb_buf.len; + } + + return BCME_OK; +} + +#ifdef DHD_BUS_MEM_ACCESS +int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff) +{ + if (!dhd->hscb_enable) { + return BCME_UNSUPPORTED; + } + + if (dhd->prot->host_scb_buf.va == NULL || + ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) { + return BCME_BADADDR; + } + + memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length); + + return BCME_OK; +} +#endif /* DHD_BUS_MEM_ACCESS */ + +#ifdef DHD_HP2P +uint32 +dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + if (set) + dhd->pkt_thresh = (uint16)val; + + val = dhd->pkt_thresh; + + return val; +} + +uint32 +dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + if (set) + dhd->time_thresh = (uint16)val; + + val = dhd->time_thresh; + + return val; +} + +uint32 +dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val) +{ + if (set) + dhd->pkt_expiry = (uint16)val; + + val = dhd->pkt_expiry; + + return val; +} + +uint8 +dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable) +{ + uint8 ret = 0; + if (set) { + dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE; + dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE; + + if (enable) { + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP); + } else { + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP); + } + } + ret = dhd->hp2p_infra_enable ? 0x1:0x0; + ret <<= 4; + ret |= dhd->hp2p_enable ? 0x1:0x0; + + return ret; +} + +static void +dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus) +{ + ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts; + hp2p_info_t *hp2p_info; + uint32 dur1; + + hp2p_info = &dhd->hp2p_info[0]; + dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100; + + if (dur1 > (MAX_RX_HIST_BIN - 1)) { + dur1 = MAX_RX_HIST_BIN - 1; + DHD_INFO(("%s: 0x%x 0x%x\n", + __FUNCTION__, ts->low, ts->high)); + } + + hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++; + return; +} + +static void +dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus) +{ + ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts; + uint16 flowid = txstatus->compl_hdr.flow_ring_id; + uint32 hp2p_flowid, dur1, dur2; + hp2p_info_t *hp2p_info; + + hp2p_flowid = dhd->bus->max_submission_rings - + dhd->bus->max_cmn_rings - flowid + 1; + hp2p_info = &dhd->hp2p_info[hp2p_flowid]; + ts = (ts_timestamp_t *)&(txstatus->ts); + + dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000; + if (dur1 > (MAX_TX_HIST_BIN - 1)) { + dur1 = MAX_TX_HIST_BIN - 1; + DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high)); + } + hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++; + + dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000; + if (dur2 > (MAX_TX_HIST_BIN - 1)) { + dur2 = MAX_TX_HIST_BIN - 1; + DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high)); + } + + hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++; + return; +} + +enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer) +{ + hp2p_info_t *hp2p_info; + unsigned long flags; + dhd_pub_t *dhdp; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + hp2p_info = container_of(timer, hp2p_info_t, timer); + GCC_DIAGNOSTIC_POP(); + + dhdp = hp2p_info->dhd_pub; + if (!dhdp) { + goto done; + } + + DHD_INFO(("%s: pend_item = %d flowid = %d\n", + __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count, + hp2p_info->flowid)); + + flags = dhd_os_hp2plock(dhdp); + + dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid); + hp2p_info->hrtimer_init = FALSE; + hp2p_info->num_timer_limit++; + + dhd_os_hp2punlock(dhdp, flags); +done: + return HRTIMER_NORESTART; +} + +static void +dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid) +{ + hp2p_info_t *hp2p_info; + uint16 hp2p_flowid; + + hp2p_flowid = dhd->bus->max_submission_rings - + dhd->bus->max_cmn_rings - flowid + 1; + hp2p_info = &dhd->hp2p_info[hp2p_flowid]; + + if (ring->pend_items_count == dhd->pkt_thresh) { + dhd_prot_txdata_write_flush(dhd, flowid); + + hp2p_info->hrtimer_init = FALSE; + hp2p_info->ring = NULL; + hp2p_info->num_pkt_limit++; + hrtimer_cancel(&hp2p_info->timer); + + DHD_INFO(("%s: cancel hrtimer for flowid = %d \n" + "hp2p_flowid = %d pkt_thresh = %d\n", + __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh)); + } else { + if (hp2p_info->hrtimer_init == FALSE) { + hp2p_info->hrtimer_init = TRUE; + hp2p_info->flowid = flowid; + hp2p_info->dhd_pub = dhd; + hp2p_info->ring = ring; + hp2p_info->num_timer_start++; + + hrtimer_start(&hp2p_info->timer, + ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL); + + DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n", + __FUNCTION__, flowid, hp2p_flowid)); + } + } + return; +} + +static void +dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc) +{ + uint64 ts; + + ts = local_clock(); + do_div(ts, 1000); + + txdesc->metadata_buf_len = 0; + txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF); + txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF); + txdesc->exp_time = dhd->pkt_expiry; + + DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n", + __FUNCTION__, txdesc->metadata_buf_addr.high_addr, + txdesc->metadata_buf_addr.low_addr, + txdesc->exp_time)); + + return; +} +#endif /* DHD_HP2P */ + +#ifdef DHD_MAP_LOGGING +void +dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp) +{ + dhd_prot_debug_info_print(dhdp); + OSL_DMA_MAP_DUMP(dhdp->osh); +#ifdef DHD_MAP_PKTID_LOGGING + dhd_pktid_logging_dump(dhdp); +#endif /* DHD_MAP_PKTID_LOGGING */ +#ifdef DHD_FW_COREDUMP + dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT; +#ifdef DNGL_AXI_ERROR_LOGGING + dhdp->memdump_enabled = DUMP_MEMFILE; + dhd_bus_get_mem_dump(dhdp); +#else + dhdp->memdump_enabled = DUMP_MEMONLY; + dhd_bus_mem_dump(dhdp); +#endif /* DNGL_AXI_ERROR_LOGGING */ +#endif /* DHD_FW_COREDUMP */ +} +#endif /* DHD_MAP_LOGGING */ + +#ifdef DHD_FLOW_RING_STATUS_TRACE +void +dhd_dump_bus_flow_ring_status_trace( + dhd_bus_t *bus, struct bcmstrbuf *strbuf, dhd_frs_trace_t *frs_trace, int dumpsz, char *str) +{ + int i; + dhd_prot_t *prot = bus->dhd->prot; + uint32 isr_cnt = bus->frs_isr_count % FRS_TRACE_SIZE; + uint32 dpc_cnt = bus->frs_dpc_count % FRS_TRACE_SIZE; + + bcm_bprintf(strbuf, "---- %s ------ isr_cnt: %d dpc_cnt %d\n", + str, isr_cnt, dpc_cnt); + bcm_bprintf(strbuf, "%s\t%s\t%s\t%s\t%s\t%s\t", + "Timestamp ns", "H2DCtrlPost", "D2HCtrlCpl", + "H2DRxPost", "D2HRxCpl", "D2HTxCpl"); + if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) { + bcm_bprintf(strbuf, "%s\t%s\t", "H2DRingInfoPost", "D2HRingInfoCpl"); + } + if (prot->d2hring_edl != NULL) { + bcm_bprintf(strbuf, "%s", "D2HRingEDL"); + } + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < dumpsz; i ++) { + bcm_bprintf(strbuf, "%llu\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t", + frs_trace[i].timestamp, + frs_trace[i].h2d_ctrl_post_drd, + frs_trace[i].h2d_ctrl_post_dwr, + frs_trace[i].d2h_ctrl_cpln_drd, + frs_trace[i].d2h_ctrl_cpln_dwr, + frs_trace[i].h2d_rx_post_drd, + frs_trace[i].h2d_rx_post_dwr, + frs_trace[i].d2h_rx_cpln_drd, + frs_trace[i].d2h_rx_cpln_dwr, + frs_trace[i].d2h_tx_cpln_drd, + frs_trace[i].d2h_tx_cpln_dwr); + if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) { + bcm_bprintf(strbuf, "%6u-%u\t%6u-%u\t", + frs_trace[i].h2d_info_post_drd, + frs_trace[i].h2d_info_post_dwr, + frs_trace[i].d2h_info_cpln_drd, + frs_trace[i].d2h_info_cpln_dwr); + } + if (prot->d2hring_edl != NULL) { + bcm_bprintf(strbuf, "%6u-%u", + frs_trace[i].d2h_ring_edl_drd, + frs_trace[i].d2h_ring_edl_dwr); + + } + bcm_bprintf(strbuf, "\n"); + } + bcm_bprintf(strbuf, "--------------------------\n"); +} + +void +dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + int dumpsz; + + dumpsz = bus->frs_isr_count < FRS_TRACE_SIZE ? + bus->frs_isr_count : FRS_TRACE_SIZE; + if (dumpsz == 0) { + bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n"); + return; + } + dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_isr_trace, + dumpsz, "ISR FLOW RING TRACE DRD-DWR"); +} + +void +dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + int dumpsz; + + dumpsz = bus->frs_dpc_count < FRS_TRACE_SIZE ? + bus->frs_dpc_count : FRS_TRACE_SIZE; + if (dumpsz == 0) { + bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n"); + return; + } + dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_dpc_trace, + dumpsz, "DPC FLOW RING TRACE DRD-DWR"); +} +static void +dhd_bus_flow_ring_status_trace(dhd_pub_t *dhd, dhd_frs_trace_t *frs_trace) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + + ring = &prot->h2dring_ctrl_subn; + frs_trace->h2d_ctrl_post_drd = + dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + frs_trace->h2d_ctrl_post_dwr = + dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx); + + ring = &prot->d2hring_ctrl_cpln; + frs_trace->d2h_ctrl_cpln_drd = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + frs_trace->d2h_ctrl_cpln_dwr = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + + ring = &prot->h2dring_rxp_subn; + frs_trace->h2d_rx_post_drd = + dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + frs_trace->h2d_rx_post_dwr = + dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx); + + ring = &prot->d2hring_rx_cpln; + frs_trace->d2h_rx_cpln_drd = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + frs_trace->d2h_rx_cpln_dwr = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + + ring = &prot->d2hring_tx_cpln; + frs_trace->d2h_tx_cpln_drd = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + frs_trace->d2h_tx_cpln_dwr = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + + if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) { + ring = prot->h2dring_info_subn; + frs_trace->h2d_info_post_drd = + dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + frs_trace->h2d_info_post_dwr = + dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx); + + ring = prot->d2hring_info_cpln; + frs_trace->d2h_info_cpln_drd = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + frs_trace->d2h_info_cpln_dwr = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + } + if (prot->d2hring_edl != NULL) { + ring = prot->d2hring_edl; + frs_trace->d2h_ring_edl_drd = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx); + frs_trace->d2h_ring_edl_dwr = + dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + } + +} + +void +dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd) +{ + uint32 cnt = dhd->bus->frs_isr_count % FRS_TRACE_SIZE; + dhd_frs_trace_t *frs_isr_trace = &dhd->bus->frs_isr_trace[cnt]; + uint64 time_ns_prev = frs_isr_trace[cnt].timestamp; + uint64 time_ns_now = OSL_LOCALTIME_NS(); + + if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */ + return; + } + + dhd_bus_flow_ring_status_trace(dhd, frs_isr_trace); + + frs_isr_trace->timestamp = OSL_LOCALTIME_NS(); + dhd->bus->frs_isr_count ++; +} + +void +dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd) +{ + uint32 cnt = dhd->bus->frs_dpc_count % FRS_TRACE_SIZE; + dhd_frs_trace_t *frs_dpc_trace = &dhd->bus->frs_dpc_trace[cnt]; + uint64 time_ns_prev = frs_dpc_trace[cnt].timestamp; + uint64 time_ns_now = OSL_LOCALTIME_NS(); + + if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */ + return; + } + + dhd_bus_flow_ring_status_trace(dhd, frs_dpc_trace); + + frs_dpc_trace->timestamp = OSL_LOCALTIME_NS(); + dhd->bus->frs_dpc_count ++; +} +#endif /* DHD_FLOW_RING_STATUS_TRACE */ diff --git a/bcmdhd.101.10.361.x/dhd_pcie.c b/bcmdhd.101.10.361.x/dhd_pcie.c new file mode 100755 index 0000000..f69951d --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pcie.c @@ -0,0 +1,17674 @@ +/* + * DHD Bus Module for PCIE + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** XXX Twiki: [PCIeFullDongleArchitecture] */ + +/* include files */ +#include +#include +#include +#include +#include /* need to still support chips no longer in trunk firmware */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(LINUX) || defined(linux) +#include +#endif /* LINUX || linux */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ +#include +#include + +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ + +#ifdef BCM_ROUTER_DHD +#include +#define STR_END "END\0\0" +#define BOARDREV_PROMOTABLE_STR "0xff" +#endif +#if defined(BCMEMBEDIMAGE) +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(DEBUGGER) || defined (DHD_DSCOPE) +#include +#endif /* DEBUGGER || DHD_DSCOPE */ + +#if defined(FW_SIGNATURE) +#include +#include +#endif /* FW_SIGNATURE */ + +#ifdef DNGL_AXI_ERROR_LOGGING +#include +#include +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) +#include +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ + +#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#ifdef LINUX +#define MAX_WKLK_IDLE_CHECK 3 /* times dhd_wake_lock checked before deciding not to suspend */ +#endif /* LINUX */ + +#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512 +#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512 +#define MAX_HP2P_CMPL_RINGS 2u + +/* XXX defines for 4378 */ +#define ARMCR4REG_CORECAP (0x4/sizeof(uint32)) +#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32)) +#define ACC_MPU_SHIFT 25 +#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT) + +/* XXX Offset for 4375 work around register */ +#define REG_WORK_AROUND (0x1e4/sizeof(uint32)) + +/* XXX defines for 43602a0 workaround JIRA CRWLARMCR4-53 */ +#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) +#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) +/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ + +/* CTO Prevention Recovery */ +#define CTO_TO_CLEAR_WAIT_MS 50 +#define CTO_TO_CLEAR_WAIT_MAX_CNT 200 + +/* FLR setting */ +#define PCIE_FLR_CAPAB_BIT 28 +#define PCIE_FUNCTION_LEVEL_RESET_BIT 15 + +#ifdef BCMQT_HW +extern int qt_flr_reset; +/* FLR takes longer on QT Z boards so increasing the delay by 30% */ +#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u +#define DHD_SSRESET_STATUS_RETRY_DELAY 55u +#else +#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */ +#define DHD_SSRESET_STATUS_RETRY_DELAY 40u +#endif /* BCMQT_HW */ +/* + * Increase SSReset de-assert time to 8ms. + * since it takes longer time if re-scan time on 4378B0. + */ +#define DHD_SSRESET_STATUS_RETRIES 200u + +/* Fetch address of a member in the pciedev_shared structure in dongle memory */ +#define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \ + (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member) + +/* Fetch address of a member in rings_info_ptr structure in dongle memory */ +#define DHD_RING_INFO_MEMBER_ADDR(bus, member) \ + (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member) + +/* Fetch address of a member in the ring_mem structure in dongle memory */ +#define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \ + (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member) + +#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE +#define DHD_PCIE_INFO DHD_TRACE +#else +#define DHD_PCIE_INFO DHD_INFO +#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */ + +#if defined(SUPPORT_MULTIPLE_BOARD_REV) + extern unsigned int system_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV */ + +#ifdef EWP_EDL +extern int host_edl_support; +#endif + +#ifdef BCMQT_HW +extern int qt_dngl_timeout; +#endif /* BCMQT_HW */ + +/* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */ +uint dma_ring_indices = 0; +/* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */ +bool h2d_phase = 0; +/* This can be overwritten by module parameter(force_trap_bad_h2d_phase) + * defined in dhd_linux.c + */ +bool force_trap_bad_h2d_phase = 0; + +int dhd_dongle_ramsize; +struct dhd_bus *g_dhd_bus = NULL; +#ifdef DNGL_AXI_ERROR_LOGGING +static void dhd_log_dump_axi_error(uint8 *axi_err); +#endif /* DNGL_AXI_ERROR_LOGGING */ + +static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); +static int dhdpcie_bus_readconsole(dhd_bus_t *bus); +#if defined(DHD_FW_COREDUMP) +static int dhdpcie_mem_dump(dhd_bus_t *bus); +static int dhdpcie_get_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ + +static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); +static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, + const char *name, void *params, + uint plen, void *arg, uint len, int val_size); +static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval); +static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, + uint32 len, uint32 srcdelay, uint32 destdelay, + uint32 d11_lpbk, uint32 core_num, uint32 wait, + uint32 mem_addr); +#ifdef BCMINTERNAL +static int dhdpcie_bus_set_tx_lpback(struct dhd_bus *bus, bool enable); +static int dhdpcie_bus_get_tx_lpback(struct dhd_bus *bus); +static uint64 serialized_backplane_access_64(dhd_bus_t* bus, uint addr, uint size, uint64* val, + bool read); +#endif /* BCMINTERNAL */ +static uint serialized_backplane_access(dhd_bus_t* bus, uint addr, uint size, uint* val, bool read); +static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter); +static int _dhdpcie_download_firmware(struct dhd_bus *bus); +static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh); + +#if defined(FW_SIGNATURE) +static int dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write); +static int dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus); +static int dhdpcie_bus_write_fws_status(dhd_bus_t *bus); +static int dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus); +static int dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path); +static int dhdpcie_download_rtlv_end(dhd_bus_t *bus); +static int dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr, + uint32 download_size, const char *signature_fname, + const char *bloader_fname, uint32 bloader_download_addr); +#endif /* FW_SIGNATURE */ + +static int dhdpcie_bus_write_vars(dhd_bus_t *bus); +static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus); +static bool dhdpci_bus_read_frames(dhd_bus_t *bus); +static int dhdpcie_readshared(dhd_bus_t *bus); +static void dhdpcie_init_shared_addr(dhd_bus_t *bus); +static bool dhdpcie_dongle_attach(dhd_bus_t *bus); +static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size); +static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, + bool dongle_isolation, bool reset_flag); +static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len); +static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr); +static void dhd_init_bar1_switch_lock(dhd_bus_t *bus); +static void dhd_deinit_bar1_switch_lock(dhd_bus_t *bus); +static void dhd_init_pwr_req_lock(dhd_bus_t *bus); +static void dhd_deinit_pwr_req_lock(dhd_bus_t *bus); +static void dhd_init_bus_lp_state_lock(dhd_bus_t *bus); +static void dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus); +static void dhd_init_backplane_access_lock(dhd_bus_t *bus); +static void dhd_deinit_backplane_access_lock(dhd_bus_t *bus); +static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); +static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); +static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); +static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); +#ifdef DHD_SUPPORT_64BIT +static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used)); +static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used)); +#endif /* DHD_SUPPORT_64BIT */ +static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); +static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size); +static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); +static void dhdpcie_fw_trap(dhd_bus_t *bus); +static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); +static void dhdpcie_handle_mb_data(dhd_bus_t *bus); +extern void dhd_dpc_enable(dhd_pub_t *dhdp); +#ifdef PCIE_INB_DW +static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, + bool d2h, enum dhd_bus_ds_state inbstate); +#else +static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h); +#endif /* PCIE_INB_DW */ +#ifdef DHD_MMIO_TRACE +static void dhd_bus_mmio_trace(dhd_bus_t *bus, uint32 addr, uint32 value, bool set); +#endif /* defined(DHD_MMIO_TRACE) */ +#if defined(LINUX) || defined(linux) +extern void dhd_dpc_kill(dhd_pub_t *dhdp); +#endif /* LINUX || linux */ + +#ifdef IDLE_TX_FLOW_MGMT +static void dhd_bus_check_idle_scan(dhd_bus_t *bus); +static void dhd_bus_idle_scan(dhd_bus_t *bus); +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef BCMEMBEDIMAGE +static int dhdpcie_download_code_array(dhd_bus_t *bus); +#endif /* BCMEMBEDIMAGE */ +#ifdef BCM_ROUTER_DHD +extern char * nvram_get(const char *name); +#endif +#if defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) +static void select_fd_image( + struct dhd_bus *bus, unsigned char **p_dlarray, + char **p_dlimagename, char **p_dlimagever, + char **p_dlimagedate, int *image_size); +#endif /* defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) */ + +#ifdef BCM_ROUTER_DHD +int dbushost_initvars_flash(si_t *sih, osl_t *osh, char **base, uint len); +#endif + +#ifdef EXYNOS_PCIE_DEBUG +extern void exynos_pcie_register_dump(int ch_num); +#endif /* EXYNOS_PCIE_DEBUG */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus); +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +#define PCI_VENDOR_ID_BROADCOM 0x14e4 + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define MAX_D3_ACK_TIMEOUT 100 +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef BCMQT +#define DHD_DEFAULT_DOORBELL_TIMEOUT 40 /* ms */ +#else +#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ +#endif +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) +static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT; +#endif /* PCIE_OOB || PCIE_INB_DW */ + +static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); +static int dhdpcie_cto_error_recovery(struct dhd_bus *bus); + +static int dhdpcie_init_d11status(struct dhd_bus *bus); + +static int dhdpcie_wrt_rnd(struct dhd_bus *bus); + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE +#include +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef DHD_HP2P +extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer); +static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val); +#endif +#if defined(linux) || defined(LINUX) +#ifdef DHD_FW_MEM_CORRUPTION +#define NUM_PATTERNS 2 +#else +#define NUM_PATTERNS 6 +#endif /* DHD_FW_MEM_CORRUPTION */ +static bool dhd_bus_tcm_test(struct dhd_bus *bus); +#endif /* LINUX || linux */ + +#if defined(FW_SIGNATURE) +static int dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf); +#endif +static void dhdpcie_pme_stat_clear(dhd_bus_t *bus); + +/* IOVar table */ +enum { + IOV_INTR = 1, +#ifdef DHD_BUS_MEM_ACCESS + IOV_MEMBYTES, +#endif /* DHD_BUS_MEM_ACCESS */ + IOV_MEMSIZE, + IOV_SET_DOWNLOAD_STATE, + IOV_SET_DOWNLOAD_INFO, + IOV_DEVRESET, + IOV_VARS, + IOV_MSI_SIM, + IOV_PCIE_LPBK, + IOV_CC_NVMSHADOW, + IOV_RAMSIZE, + IOV_RAMSTART, + IOV_SLEEP_ALLOWED, +#ifdef BCMINTERNAL + IOV_PCIE_TX_LPBK, +#endif /* BCMINTERNAL */ + IOV_PCIE_DMAXFER, + IOV_PCIE_SUSPEND, +#ifdef DHD_PCIE_REG_ACCESS + IOV_PCIEREG, + IOV_PCIECFGREG, + IOV_PCIECOREREG, + IOV_PCIESERDESREG, + IOV_PCIEASPM, + IOV_BAR0_SECWIN_REG, + IOV_SBREG, +#endif /* DHD_PCIE_REG_ACCESS */ + IOV_DONGLEISOLATION, + IOV_LTRSLEEPON_UNLOOAD, + IOV_METADATA_DBG, + IOV_RX_METADATALEN, + IOV_TX_METADATALEN, + IOV_TXP_THRESHOLD, + IOV_BUZZZ_DUMP, + IOV_DUMP_RINGUPD_BLOCK, + IOV_DMA_RINGINDICES, + IOV_FORCE_FW_TRAP, + IOV_DB1_FOR_MB, + IOV_FLOW_PRIO_MAP, +#ifdef DHD_PCIE_RUNTIMEPM + IOV_IDLETIME, +#endif /* DHD_PCIE_RUNTIMEPM */ + IOV_RXBOUND, + IOV_TXBOUND, + IOV_HANGREPORT, + IOV_H2D_MAILBOXDATA, + IOV_INFORINGS, + IOV_H2D_PHASE, + IOV_H2D_ENABLE_TRAP_BADPHASE, + IOV_H2D_TXPOST_MAX_ITEM, +#if defined(DHD_HTPUT_TUNABLES) + IOV_H2D_HTPUT_TXPOST_MAX_ITEM, +#endif /* DHD_HTPUT_TUNABLES */ + IOV_TRAPDATA, + IOV_TRAPDATA_RAW, + IOV_CTO_PREVENTION, +#ifdef PCIE_OOB + IOV_OOB_BT_REG_ON, + IOV_OOB_ENABLE, +#endif /* PCIE_OOB */ +#ifdef DEVICE_TX_STUCK_DETECT + IOV_DEVICE_TX_STUCK_DETECT, +#endif /* DEVICE_TX_STUCK_DETECT */ + IOV_PCIE_WD_RESET, + IOV_DUMP_DONGLE, +#ifdef DHD_EFI + IOV_WIFI_PROPERTIES, + IOV_CONTROL_SIGNAL, + IOV_OTP_DUMP, +#ifdef BT_OVER_PCIE + IOV_BTOP_TEST, +#endif +#endif /* DHD_EFI */ + IOV_IDMA_ENABLE, + IOV_IFRM_ENABLE, + IOV_CLEAR_RING, + IOV_DAR_ENABLE, + IOV_DHD_CAPS, /**< returns string with dhd capabilities */ +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + IOV_GDB_SERVER, /**< starts gdb server on given interface */ +#endif /* DEBUGGER || DHD_DSCOPE */ +#if defined(GDB_PROXY) + IOV_GDB_PROXY_PROBE, /**< gdb proxy support presence check */ + IOV_GDB_PROXY_STOP_COUNT, /**< gdb proxy firmware stop count */ +#endif /* GDB_PROXY */ + IOV_INB_DW_ENABLE, +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + IOV_DEEP_SLEEP, +#endif /* PCIE_OOB || PCIE_INB_DW */ + IOV_CTO_THRESHOLD, +#ifdef D2H_MINIDUMP + IOV_MINIDUMP_OVERRIDE, +#endif /* D2H_MINIDUMP */ +#ifdef BCMINTERNAL + IOV_DMA_CHAN, + IOV_HYBRIDFW, +#endif /* BCMINTERNAL */ + IOV_HSCBSIZE, /* get HSCB buffer size */ +#ifdef DHD_BUS_MEM_ACCESS + IOV_HSCBBYTES, /* copy HSCB buffer */ +#endif +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + IOV_FWTRACE, /* Enable/disable firmware tracing */ +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + IOV_HP2P_ENABLE, + IOV_HP2P_PKT_THRESHOLD, + IOV_HP2P_TIME_THRESHOLD, + IOV_HP2P_PKT_EXPIRY, + IOV_HP2P_TXCPL_MAXITEMS, + IOV_HP2P_RXCPL_MAXITEMS, + IOV_EXTDTXS_IN_TXCPL, + IOV_HOSTRDY_AFTER_INIT, +#ifdef BCMINTERNAL + IOV_SBREG_64, +#endif /* BCMINTERNAL */ + IOV_HP2P_MF_ENABLE, + IOV_PCIE_LAST /**< unused IOVAR */ +}; + +const bcm_iovar_t dhdpcie_iovars[] = { + {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, +#ifdef DHD_BUS_MEM_ACCESS + {"membytes", IOV_MEMBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int) }, +#endif /* DHD_BUS_MEM_ACCESS */ + {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, + {"dwnldinfo", IOV_SET_DOWNLOAD_INFO, 0, 0, IOVT_BUFFER, + sizeof(fw_download_info_t) }, + {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, + {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 }, + {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 }, + {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 }, +#ifdef BCMINTERNAL + {"msi_sim", IOV_MSI_SIM, 0, 0, IOVT_BOOL, 0 }, +#endif /* BCMINTERNAL */ + {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 }, + {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PCIE_REG_ACCESS + {"pciereg", IOV_PCIEREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pciecfgreg", IOV_PCIECFGREG, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pciecorereg", IOV_PCIECOREREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pcieserdesreg", IOV_PCIESERDESREG, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(uint8) }, +#endif /* DHD_PCIE_REG_ACCESS */ +#ifdef BCMINTERNAL + {"pcie_tx_lpbk", IOV_PCIE_TX_LPBK, 0, 0, IOVT_UINT32, 0 }, +#endif /* BCMINTERNAL */ + {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)}, + {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 }, +#ifdef PCIE_OOB + {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, 0, IOVT_UINT32, 0 }, + {"oob_enable", IOV_OOB_ENABLE, 0, 0, IOVT_UINT32, 0 }, +#endif /* PCIE_OOB */ + {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, + {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 }, + {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 }, + {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0}, + {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 }, + {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, + {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, + {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 }, + {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 }, + {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PCIE_RUNTIMEPM + {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 }, +#endif /* DHD_PCIE_RUNTIMEPM */ + {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PCIE_REG_ACCESS + {"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 }, +#endif /* DHD_PCIE_REG_ACCESS */ + {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, + {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 }, + {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 }, + {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 }, + {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0, + IOVT_UINT32, 0 }, + {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 }, +#if defined(DHD_HTPUT_TUNABLES) + {"h2d_htput_max_txpost", IOV_H2D_HTPUT_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 }, +#endif /* DHD_HTPUT_TUNABLES */ + {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 }, + {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 }, + {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 }, + {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 }, +#ifdef DEVICE_TX_STUCK_DETECT + {"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 }, +#endif /* DEVICE_TX_STUCK_DETECT */ + {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER, + MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))}, + {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_EFI + {"properties", IOV_WIFI_PROPERTIES, 0, 0, IOVT_BUFFER, 0}, + {"otp_dump", IOV_OTP_DUMP, 0, 0, IOVT_BUFFER, 0}, + {"control_signal", IOV_CONTROL_SIGNAL, 0, 0, IOVT_UINT32, 0}, +#ifdef BT_OVER_PCIE + {"btop_test", IOV_BTOP_TEST, 0, 0, IOVT_UINT32, 0}, +#endif +#endif /* DHD_EFI */ + {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"cap", IOV_DHD_CAPS, 0, 0, IOVT_BUFFER, 0}, +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 }, +#endif /* DEBUGGER || DHD_DSCOPE */ +#if defined(GDB_PROXY) + {"gdb_proxy_probe", IOV_GDB_PROXY_PROBE, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"gdb_proxy_stop_count", IOV_GDB_PROXY_STOP_COUNT, 0, 0, IOVT_UINT32, 0 }, +#endif /* GDB_PROXY */ + {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 }, +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + {"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32, 0}, +#endif /* PCIE_OOB || PCIE_INB_DW */ + {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, +#ifdef D2H_MINIDUMP + {"minidump_override", IOV_MINIDUMP_OVERRIDE, 0, 0, IOVT_UINT32, 0 }, +#endif /* D2H_MINIDUMP */ +#ifdef BCMINTERNAL + {"dma_chan_db0", IOV_DMA_CHAN, 0, 0, IOVT_UINT32, 0 }, + {"hybridfw", IOV_HYBRIDFW, 0, 0, IOVT_BUFFER, 0 }, +#endif /* BCMINTERNAL */ + {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_BUS_MEM_ACCESS + {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) }, +#endif + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + {"fwtrace", IOV_FWTRACE, 0, 0, IOVT_UINT32, 0 }, +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef DHD_HP2P + {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 }, +#endif /* DHD_HP2P */ + {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 }, + {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 }, +#ifdef BCMINTERNAL + {"sbreg_64", IOV_SBREG_64, 0, 0, IOVT_BUFFER, sizeof(uint8) }, +#endif /* BCMINTERNAL */ + {"hp2p_mf_enable", IOV_HP2P_MF_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0, 0 } +}; + +#ifdef BCMINTERNAL +#define MSI_SIM_BUFSIZE 64 +#define PCIE_CFG_MSICAP_OFFSET 0x58 +#define PCIE_CFG_MSIADDR_LOW_OFFSET 0x5C +#define PCIE_CFG_MSIDATA_OFFSET 0x64 +#define PCIE_CFG_MSI_GENDATA 0x5678 +#define PCIE_CFG_MSICAP_ENABLE_MSI 0x816805 +#define PCIE_CFG_MSICAP_DISABLE_MSI 0x806805 +#endif + +#ifdef BCMQT_HW +#define MAX_READ_TIMEOUT 100 * 1000 /* 100 ms in dongle time */ +#elif defined(NDIS) +#define MAX_READ_TIMEOUT 5 * 1000 * 1000 +#else +#define MAX_READ_TIMEOUT 2 * 1000 * 1000 +#endif + +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 64 +#endif +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 64 +#endif + +#define DHD_INFORING_BOUND 32 +#define DHD_BTLOGRING_BOUND 32 + +uint dhd_rxbound = DHD_RXBOUND; +uint dhd_txbound = DHD_TXBOUND; + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) +/** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */ +static struct dhd_gdb_bus_ops_s bus_ops = { + .read_u16 = dhdpcie_bus_rtcm16, + .read_u32 = dhdpcie_bus_rtcm32, + .write_u32 = dhdpcie_bus_wtcm32, +}; +#endif /* DEBUGGER || DHD_DSCOPE */ + +bool +dhd_bus_get_flr_force_fail(struct dhd_bus *bus) +{ + return bus->flr_force_fail; +} + +/** + * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to + * link with the bus driver, in order to look for or await the device. + */ +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return dhdpcie_bus_register(); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhdpcie_bus_unregister(); + return; +} + +/** returns a host virtual address */ +uint32 * +dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size) +{ + REG_UNMAP(addr); + return; +} + +/** + * retrun H2D Doorbell registers address + * use DAR registers instead of enum register for corerev >= 23 (4347B0) + */ +static INLINE uint +dhd_bus_db0_addr_get(struct dhd_bus *bus) +{ + uint addr = PCIH2D_MailBox; + uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev); + +#ifdef BCMINTERNAL + if (bus->dma_chan == 1) { + addr = PCIH2D_MailBox_1; + dar_addr = DAR_PCIH2D_DB1_0(bus->sih->buscorerev); + } else if (bus->dma_chan == 2) { + addr = PCIH2D_MailBox_2; + dar_addr = DAR_PCIH2D_DB2_0(bus->sih->buscorerev); + } +#endif /* BCMINTERNAL */ + + return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr); +} + +static INLINE uint +dhd_bus_db0_addr_2_get(struct dhd_bus *bus) +{ + return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2); +} + +static INLINE uint +dhd_bus_db1_addr_get(struct dhd_bus *bus) +{ + return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1); +} + +static INLINE uint +dhd_bus_db1_addr_3_get(struct dhd_bus *bus) +{ + return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB3_1(bus->sih->buscorerev) : PCIH2D_DB1_3); +} + +static void +dhd_init_pwr_req_lock(dhd_bus_t *bus) +{ + if (!bus->pwr_req_lock) { + bus->pwr_req_lock = osl_spin_lock_init(bus->osh); + } +} + +static void +dhd_deinit_pwr_req_lock(dhd_bus_t *bus) +{ + if (bus->pwr_req_lock) { + osl_spin_lock_deinit(bus->osh, bus->pwr_req_lock); + bus->pwr_req_lock = NULL; + } +} + +#ifdef PCIE_INB_DW +void +dhdpcie_set_dongle_deepsleep(dhd_bus_t *bus, bool val) +{ + ulong flags_ds; + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_DONGLE_DS_LOCK(bus->dongle_ds_lock, flags_ds); + bus->dongle_in_deepsleep = val; + DHD_BUS_DONGLE_DS_UNLOCK(bus->dongle_ds_lock, flags_ds); + } +} +void +dhd_init_dongle_ds_lock(dhd_bus_t *bus) +{ + if (!bus->dongle_ds_lock) { + bus->dongle_ds_lock = osl_spin_lock_init(bus->osh); + } +} +void +dhd_deinit_dongle_ds_lock(dhd_bus_t *bus) +{ + if (bus->dongle_ds_lock) { + osl_spin_lock_deinit(bus->osh, bus->dongle_ds_lock); + bus->dongle_ds_lock = NULL; + } +} +#endif /* PCIE_INB_DW */ + +/* + * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request + */ +static INLINE void +dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable) +{ + if (enable) { + si_corereg(bus->sih, bus->sih->buscoreidx, offset, + SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, + SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, offset, + SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0); + } +} + +static INLINE void +_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus) +{ + uint mask; + + /* + * If multiple de-asserts, decrement ref and return + * Clear power request when only one pending + * so initial request is not removed unexpectedly + */ + if (bus->pwr_req_ref > 1) { + bus->pwr_req_ref--; + return; + } + + ASSERT(bus->pwr_req_ref == 1); + + if (MULTIBP_ENAB(bus->sih)) { + /* Common BP controlled by HW so only need to toggle WL/ARM backplane */ + mask = SRPWR_DMN1_ARMBPSD_MASK; + } else { + mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; + } + + si_srpwr_request(bus->sih, mask, 0); + bus->pwr_req_ref = 0; +} + +static INLINE void +dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags); + _dhd_bus_pcie_pwr_req_clear_cmn(bus); + DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags); +} + +static INLINE void +dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus) +{ + _dhd_bus_pcie_pwr_req_clear_cmn(bus); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus) +{ + uint mask, val; + + /* If multiple request entries, increment reference and return */ + if (bus->pwr_req_ref > 0) { + bus->pwr_req_ref++; + return; + } + + ASSERT(bus->pwr_req_ref == 0); + + if (MULTIBP_ENAB(bus->sih)) { + /* Common BP controlled by HW so only need to toggle WL/ARM backplane */ + mask = SRPWR_DMN1_ARMBPSD_MASK; + val = SRPWR_DMN1_ARMBPSD_MASK; + } else { + mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; + val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; + } + + si_srpwr_request(bus->sih, mask, val); + + bus->pwr_req_ref = 1; +} + +static INLINE void +dhd_bus_pcie_pwr_req(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags); + _dhd_bus_pcie_pwr_req_cmn(bus); + DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus) +{ + uint mask, val; + + mask = SRPWR_DMN_ALL_MASK(bus->sih); + val = SRPWR_DMN_ALL_MASK(bus->sih); + + si_srpwr_request(bus->sih, mask, val); +} + +void +dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + /* + * Few corerevs need the power domain to be active for FLR. + * Return if the pwr req is not applicable for the corerev + */ + if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) { + return; + } + + DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags); + _dhd_bus_pcie_pwr_req_pd0123_cmn(bus); + DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus) +{ + uint mask; + + mask = SRPWR_DMN_ALL_MASK(bus->sih); + + si_srpwr_request(bus->sih, mask, 0); +} + +void +dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + /* return if the pwr clear is not applicable for the corerev */ + if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) { + return; + } + DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags); + _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus); + DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags); +} + +static INLINE void +dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus) +{ + _dhd_bus_pcie_pwr_req_cmn(bus); +} + +bool +dhdpcie_chip_support_msi(dhd_bus_t *bus) +{ + /* XXX For chips with buscorerev <= 14 intstatus + * is not getting cleared from these firmwares. + * Either host can read and clear intstatus for these + * or not enable MSI at all. + * Here option 2 of not enabling MSI is choosen. + * Also for hw4 chips, msi is not enabled. + */ + DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n", + __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih))); + if (bus->sih->buscorerev <= 14 || + si_chipid(bus->sih) == BCM4389_CHIP_ID || + si_chipid(bus->sih) == BCM4385_CHIP_ID || + si_chipid(bus->sih) == BCM4375_CHIP_ID || + si_chipid(bus->sih) == BCM4376_CHIP_ID || + si_chipid(bus->sih) == BCM4362_CHIP_ID || + si_chipid(bus->sih) == BCM43751_CHIP_ID || + si_chipid(bus->sih) == BCM43752_CHIP_ID || + si_chipid(bus->sih) == BCM4361_CHIP_ID || + si_chipid(bus->sih) == BCM4359_CHIP_ID) { + return FALSE; + } else { + return TRUE; + } +} + +/** + * Called once for each hardware (dongle) instance that this DHD manages. + * + * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096 + * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The + * precondition is that the PCIEBAR0Window register 'points' at the PCIe core. + * + * 'tcm' is the *host* virtual address at which tcm is mapped. + */ +int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, + volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter) +{ + dhd_bus_t *bus = NULL; + int ret = BCME_OK; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + break; + } + bus->bus = adapter->bus_type; + bus->bus_num = adapter->bus_num; + bus->slot_num = adapter->slot_num; + + bus->regs = regs; + bus->tcm = tcm; + bus->osh = osh; +#ifndef NDIS + /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ + bus->dev = (struct pci_dev *)pci_dev; +#endif +#ifdef DHD_EFI + bus->pcie_dev = pci_dev; +#endif + + dll_init(&bus->flowring_active_list); +#ifdef IDLE_TX_FLOW_MGMT + bus->active_list_last_process_ts = OSL_SYSUPTIME(); +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef DEVICE_TX_STUCK_DETECT + /* Enable the Device stuck detection feature by default */ + bus->dev_tx_stuck_monitor = TRUE; + bus->device_tx_stuck_check = OSL_SYSUPTIME(); +#endif /* DEVICE_TX_STUCK_DETECT */ + + /* Attach pcie shared structure */ + if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { + DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + break; + } + + /* dhd_common_init(osh); */ + + if (dhdpcie_dongle_attach(bus)) { + DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); + ret = BCME_NOTREADY; + break; + } + + /* software resources */ + if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + break; + } +#if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME) + dhd_conf_get_otp(bus->dhd, bus->sih); +#endif + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->dhd->hostrdy_after_init = TRUE; + bus->db1_for_mb = TRUE; + bus->dhd->hang_report = TRUE; + bus->use_mailbox = FALSE; + bus->use_d0_inform = FALSE; + bus->intr_enabled = FALSE; + bus->flr_force_fail = FALSE; + /* update the dma indices if set through module parameter. */ + if (dma_ring_indices != 0) { + dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices); + } + /* update h2d phase support if set through module parameter */ + bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE; + /* update force trap on bad phase if set through module parameter */ + bus->dhd->force_dongletrap_on_bad_h2d_phase = + force_trap_bad_h2d_phase ? TRUE : FALSE; +#ifdef BTLOG + bus->dhd->bt_logging_enabled = TRUE; +#endif +#ifdef IDLE_TX_FLOW_MGMT + bus->enable_idle_flowring_mgmt = FALSE; +#endif /* IDLE_TX_FLOW_MGMT */ + bus->irq_registered = FALSE; + +#ifdef DHD_MSI_SUPPORT + bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ? + PCIE_MSI : PCIE_INTX; + if (bus->dhd->conf->d2h_intr_method >= 0) + bus->d2h_intr_method = bus->dhd->conf->d2h_intr_method; +#else + bus->d2h_intr_method = PCIE_INTX; +#endif /* DHD_MSI_SUPPORT */ + + /* For MSI, use host irq based control and for INTX use D2H INTMASK based control */ + if (bus->d2h_intr_method == PCIE_MSI) { + bus->d2h_intr_control = PCIE_HOST_IRQ_CTRL; + } else { + bus->d2h_intr_control = PCIE_D2H_INTMASK_CTRL; + } + +#ifdef DHD_HP2P + bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING; + bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING; +#endif /* DHD_HP2P */ + + DHD_TRACE(("%s: EXIT SUCCESS\n", + __FUNCTION__)); + g_dhd_bus = bus; + *bus_ptr = bus; + return ret; + } while (0); + + DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__)); +#ifdef DHD_EFI + /* for EFI even if there is an error, load still succeeds + * so 'bus' should not be freed here, it is freed during unload + */ + if (bus) { + *bus_ptr = bus; + } +#else + if (bus && bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + } + + if (bus) { + MFREE(osh, bus, sizeof(dhd_bus_t)); + } +#endif /* DHD_EFI */ + + return ret; +} + +bool +dhd_bus_skip_clm(dhd_pub_t *dhdp) +{ + switch (dhd_bus_chip_id(dhdp)) { + case BCM4369_CHIP_ID: + return TRUE; + default: + return FALSE; + } +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +/** Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chip; +} + +/** Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chiprev; +} + +/** Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chippkg; +} + +int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num) +{ + *bus_type = bus->bus; + *bus_num = bus->bus_num; + *slot_num = bus->slot_num; + return 0; +} + +/** Conduct Loopback test */ +int +dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) +{ + dma_xfer_info_t dmaxfer_lpbk; + int ret = BCME_OK; + +#define PCIE_DMAXFER_LPBK_LENGTH 4096 + memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t)); + dmaxfer_lpbk.version = DHD_DMAXFER_VERSION; + dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t); + dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH; + dmaxfer_lpbk.type = type; + dmaxfer_lpbk.should_wait = TRUE; + + ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0, + (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET); + if (ret < 0) { + DHD_ERROR(("failed to start PCIe Loopback Test!!! " + "Type:%d Reason:%d\n", type, ret)); + return ret; + } + + if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) { + DHD_ERROR(("failed to check PCIe Loopback Test!!! " + "Type:%d Status:%d Error code:%d\n", type, + dmaxfer_lpbk.status, dmaxfer_lpbk.error_code)); + ret = BCME_ERROR; + } else { + DHD_ERROR(("successful to check PCIe Loopback Test" + " Type:%d\n", type)); + } +#undef PCIE_DMAXFER_LPBK_LENGTH + + return ret; +} + +/* Check if there is DPC scheduling errors */ +bool +dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bool sched_err; + + if (bus->dpc_entry_time < bus->isr_exit_time) { + /* Kernel doesn't schedule the DPC after processing PCIe IRQ */ + sched_err = TRUE; + } else if (bus->dpc_entry_time < bus->resched_dpc_time) { + /* Kernel doesn't schedule the DPC after DHD tries to reschedule + * the DPC due to pending work items to be processed. + */ + sched_err = TRUE; + } else { + sched_err = FALSE; + } + + if (sched_err) { + /* print out minimum timestamp info */ + DHD_ERROR(("isr_entry_time="SEC_USEC_FMT + " isr_exit_time="SEC_USEC_FMT + " dpc_entry_time="SEC_USEC_FMT + "\ndpc_exit_time="SEC_USEC_FMT + " isr_sched_dpc_time="SEC_USEC_FMT + " resched_dpc_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->isr_entry_time), + GET_SEC_USEC(bus->isr_exit_time), + GET_SEC_USEC(bus->dpc_entry_time), + GET_SEC_USEC(bus->dpc_exit_time), + GET_SEC_USEC(bus->isr_sched_dpc_time), + GET_SEC_USEC(bus->resched_dpc_time))); + } + + return sched_err; +} + +/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */ +uint32 +dhdpcie_bus_intstatus(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + + if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) { +#ifdef DHD_EFI + DHD_INFO(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__)); +#else + DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__)); +#endif /* !DHD_EFI */ + return intstatus; + } + /* XXX: check for PCIE Gen2 also */ + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + intstatus &= I_MB; + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); + +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, bus->pcie_mailbox_int, intstatus, FALSE); +#endif /* defined(DHD_MMIO_TRACE) */ + + /* this is a PCIE core register..not a config register... */ + intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0); + /* Is device removed. intstatus & intmask read 0xffffffff */ + if (intstatus == (uint32)-1 || intmask == (uint32)-1) { + DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__)); + DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n", + __FUNCTION__, intstatus, intmask)); + bus->is_linkdown = TRUE; + dhd_pcie_debug_info_dump(bus->dhd); +#ifdef CUSTOMER_HW4_DEBUG +#if defined(OEM_ANDROID) +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT; +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + copy_hang_info_linkdown(bus->dhd); +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + dhd_os_send_hang_message(bus->dhd); +#endif /* OEM_ANDROID */ +#endif /* CUSTOMER_HW4_DEBUG */ + return intstatus; + } + +#ifndef DHD_READ_INTSTATUS_IN_DPC + intstatus &= intmask; +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, intmask, FALSE); +#endif /* defined(DHD_MMIO_TRACE) */ + + /* XXX: define the mask in a .h file */ + /* + * The fourth argument to si_corereg is the "mask" fields of the register to update + * and the fifth field is the "value" to update. Now if we are interested in only + * few fields of the "mask" bit map, we should not be writing back what we read + * By doing so, we might clear/ack interrupts that are not handled yet. + */ +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, bus->pcie_mailbox_int, intstatus, TRUE); +#endif /* defined(DHD_MMIO_TRACE) */ + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask, + intstatus); + + intstatus &= bus->def_intmask; + } + + return intstatus; +} + +void +dhdpcie_cto_recovery_handler(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + int ret; + + /* Disable PCIe Runtime PM to avoid D3_ACK timeout. + */ + DHD_DISABLE_RUNTIME_PM(dhd); + + /* Sleep for 1 seconds so that any AXI timeout + * if running on ALP clock also will be captured + */ + OSL_SLEEP(1000); + + /* reset backplane and cto, + * then access through pcie is recovered. + */ + ret = dhdpcie_cto_error_recovery(bus); + if (!ret) { + /* Waiting for backplane reset */ + OSL_SLEEP(10); + /* Dump debug Info */ + dhd_prot_debug_info_print(bus->dhd); + /* Dump console buffer */ + dhd_bus_dump_console_buffer(bus); +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (!bus->is_linkdown && bus->dhd->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__)); + bus->dhd->collect_sssr = TRUE; +#endif /* DHD_SSSR_DUMP */ + bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + } +#ifdef OEM_ANDROID +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + bus->is_linkdown = TRUE; + bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT; + /* Send HANG event */ + dhd_os_send_hang_message(bus->dhd); +#endif /* OEM_ANDROID */ +} + +void +dhd_bus_dump_imp_cfg_registers(struct dhd_bus *bus) +{ + uint32 status_cmd = dhd_pcie_config_read(bus, PCIECFGREG_STATUS_CMD, sizeof(uint32)); + uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32)); + uint32 base_addr0 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR0, sizeof(uint32)); + uint32 base_addr1 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR1, sizeof(uint32)); + uint32 linkctl = dhd_pcie_config_read(bus, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32)); + uint32 l1ssctrl = + dhd_pcie_config_read(bus, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32)); + uint32 devctl = dhd_pcie_config_read(bus, PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32)); + uint32 devctl2 = dhd_pcie_config_read(bus, PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32)); + + DHD_ERROR(("status_cmd(0x%x)=0x%x, pmcsr(0x%x)=0x%x " + "base_addr0(0x%x)=0x%x base_addr1(0x%x)=0x%x " + "linkctl(0x%x)=0x%x l1ssctrl(0x%x)=0x%x " + "devctl(0x%x)=0x%x devctl2(0x%x)=0x%x \n", + PCIECFGREG_STATUS_CMD, status_cmd, + PCIE_CFG_PMCSR, pmcsr, + PCIECFGREG_BASEADDR0, base_addr0, + PCIECFGREG_BASEADDR1, base_addr1, + PCIECFGREG_LINK_STATUS_CTRL, linkctl, + PCIECFGREG_PML1_SUB_CTRL1, l1ssctrl, + PCIECFGREG_DEV_STATUS_CTRL, devctl, + PCIECFGGEN_DEV_STATUS_CTRL2, devctl2)); +} + +/** + * Name: dhdpcie_bus_isr + * Parameters: + * 1: IN int irq -- interrupt vector + * 2: IN void *arg -- handle to private data structure + * Return value: + * Status (TRUE or FALSE) + * + * Description: + * Interrupt Service routine checks for the status register, + * disable interrupt and queue DPC if mail box interrupts are raised. + */ +int32 +dhdpcie_bus_isr(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + + do { + DHD_INTR(("%s: Enter\n", __FUNCTION__)); + /* verify argument */ + if (!bus) { + DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__)); + break; + } + + if (bus->dhd->dongle_reset) { + DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__)); + break; + } + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__)); + break; + } + + /* avoid processing of interrupts until msgbuf prot is inited */ + if (!bus->intr_enabled) { + DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__)); + break; + } + + if (PCIECTO_ENAB(bus)) { + /* read pci_intstatus */ + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4); + + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s : Invalid intstatus for cto recovery\n", + __FUNCTION__)); + bus->is_linkdown = 1; + dhdpcie_disable_irq_nosync(bus); + dhd_prot_debug_info_print(bus->dhd); + break; + } + + if (intstatus & PCI_CTO_INT_MASK) { + DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE " + "intstat=0x%x enab=%d\n", __FUNCTION__, + intstatus, bus->cto_enable)); + bus->cto_triggered = 1; + dhd_bus_dump_imp_cfg_registers(bus); + /* + * DAR still accessible + */ + dhd_bus_dump_dar_registers(bus); + + /* Disable further PCIe interrupts */ +#ifndef NDIS + dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */ +#endif + /* Stop Tx flow */ + dhd_bus_stop_queue(bus); + + /* Schedule CTO recovery */ + dhd_schedule_cto_recovery(bus->dhd); + + return TRUE; + } + } + + if (bus->d2h_intr_method == PCIE_MSI && + !dhd_conf_legacy_msi_chip(bus->dhd)) { + /* For MSI, as intstatus is cleared by firmware, no need to read */ + goto skip_intstatus_read; + } + +#ifndef DHD_READ_INTSTATUS_IN_DPC + intstatus = dhdpcie_bus_intstatus(bus); + + /* Check if the interrupt is ours or not */ + if (intstatus == 0) { + bus->non_ours_irq_count++; + bus->last_non_ours_irq_time = OSL_LOCALTIME_NS(); + break; + } + + /* save the intstatus */ + /* read interrupt status register!! Status bits will be cleared in DPC !! */ + bus->intstatus = intstatus; + + /* return error for 0xFFFFFFFF */ + if (intstatus == (uint32)-1) { + DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n", + __FUNCTION__, intstatus)); + bus->is_linkdown = 1; + dhdpcie_disable_irq_nosync(bus); + break; + } + +skip_intstatus_read: + /* Overall operation: + * - Mask further interrupts + * - Read/ack intstatus + * - Take action based on bits and state + * - Reenable interrupts (as per state) + */ + + /* Count the interrupt call */ + bus->intrcount++; +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + + bus->ipend = TRUE; + + bus->isr_intr_disable_count++; + + if (bus->d2h_intr_control == PCIE_D2H_INTMASK_CTRL) { + dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */ + } else { + /* For Linux, Macos etc (otherthan NDIS) instead of disabling + * dongle interrupt by clearing the IntMask, disable directly + * interrupt from the host side, so that host will not recieve + * any interrupts at all, even though dongle raises interrupts + */ + dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */ + } + + bus->intdis = TRUE; +#ifdef DHD_FLOW_RING_STATUS_TRACE + if (bus->dhd->dma_h2d_ring_upd_support && bus->dhd->dma_d2h_ring_upd_support && + (bus->dhd->ring_attached == TRUE)) { + dhd_bus_flow_ring_status_isr_trace(bus->dhd); + } +#endif /* DHD_FLOW_RING_STATUS_TRACE */ +#if defined(PCIE_ISR_THREAD) + + DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + while (dhd_bus_dpc(bus)); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + bus->isr_sched_dpc_time = OSL_LOCALTIME_NS(); +#ifndef NDIS + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ +#endif /* !NDIS */ +#endif /* defined(SDIO_ISR_THREAD) */ + + DHD_INTR(("%s: Exit Success DPC Queued\n", __FUNCTION__)); + return TRUE; + + } while (0); + + DHD_INTR(("%s: Exit Failure\n", __FUNCTION__)); + return FALSE; +} + +int +dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state) +{ + uint32 cur_state = 0; + uint32 pm_csr = 0; + osl_t *osh = bus->osh; + + pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; + + if (cur_state == state) { + DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state)); + return BCME_OK; + } + + if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT) + return BCME_ERROR; + + /* Validate the state transition + * if already in a lower power state, return error + */ + if (state != PCIECFGREG_PM_CSR_STATE_D0 && + cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD && + cur_state > state) { + DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__)); + return BCME_ERROR; + } + + pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK; + pm_csr |= state; + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr); + + /* need to wait for the specified mandatory pcie power transition delay time */ + if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT || + cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT) + OSL_DELAY(DHDPCIE_PM_D3_DELAY); + else if (state == PCIECFGREG_PM_CSR_STATE_D2 || + cur_state == PCIECFGREG_PM_CSR_STATE_D2) + OSL_DELAY(DHDPCIE_PM_D2_DELAY); + + /* read back the power state and verify */ + pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; + if (cur_state != state) { + DHD_ERROR(("%s: power transition failed ! Current state is %u \n", + __FUNCTION__, cur_state)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: power transition to %u success \n", + __FUNCTION__, cur_state)); + } + + return BCME_OK; +} + +int +dhdpcie_config_check(dhd_bus_t *bus) +{ + uint32 i, val; + int ret = BCME_ERROR; + + for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) { + val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) == VENDOR_BROADCOM) { + ret = BCME_OK; + break; + } + OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000); + } + + return ret; +} + +int +dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr) +{ + uint32 i; + osl_t *osh = bus->osh; + + if (BCME_OK != dhdpcie_config_check(bus)) { + return BCME_ERROR; + } + + for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { + OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]); + } + OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]); + + if (restore_pmcsr) + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, + sizeof(uint32), bus->saved_config.pmcsr); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32), + bus->saved_config.msi_addr0); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, + sizeof(uint32), bus->saved_config.msi_addr1); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA, + sizeof(uint32), bus->saved_config.msi_data); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL, + sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2, + sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL, + sizeof(uint32), bus->saved_config.exp_link_ctrl_stat); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2, + sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32), bus->saved_config.l1pm0); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32), bus->saved_config.l1pm1); + + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32), + bus->saved_config.bar0_win); + dhdpcie_setbar1win(bus, bus->saved_config.bar1_win); + + return BCME_OK; +} + +int +dhdpcie_config_save(dhd_bus_t *bus) +{ + uint32 i; + osl_t *osh = bus->osh; + + if (BCME_OK != dhdpcie_config_check(bus)) { + return BCME_ERROR; + } + + for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { + bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32)); + } + + bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + + bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP, + sizeof(uint32)); + bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, + sizeof(uint32)); + bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, + sizeof(uint32)); + bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA, + sizeof(uint32)); + + bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32)); + bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, + PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32)); + bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32)); + bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32)); + + bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32)); + bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32)); + + bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, + sizeof(uint32)); + bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN, + sizeof(uint32)); + + return BCME_OK; +} + +#ifdef CONFIG_ARCH_EXYNOS +dhd_pub_t *link_recovery = NULL; +#endif /* CONFIG_ARCH_EXYNOS */ + +static void +dhdpcie_bus_intr_init(dhd_bus_t *bus) +{ + uint buscorerev = bus->sih->buscorerev; + bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev); + bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev); + bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev); + bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev); + if (buscorerev < 64) { + bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1; + } +} + +static void +dhdpcie_cc_watchdog_reset(dhd_bus_t *bus) +{ + uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN : + (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN); + pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en); +} + +void +dhdpcie_dongle_reset(dhd_bus_t *bus) +{ + + /* if the pcie link is down, watchdog reset + * should not be done, as it may hang + */ + if (bus->is_linkdown) { + return; + } + + /* Currently BP reset using CFG reg is done only for android platforms */ +#ifdef DHD_USE_BP_RESET_SPROM + /* This is for architectures that does NOT control subsystem reset */ + (void)dhd_bus_cfg_sprom_ctrl_bp_reset(bus); + return; +#elif defined(DHD_USE_BP_RESET_SS_CTRL) + /* This is for architectures that supports Subsystem Control */ + (void)dhd_bus_cfg_ss_ctrl_bp_reset(bus); + return; +#else + +#ifdef BCMQT_HW + /* flr takes a long time on qt and is only required when testing with BT + * included database. Fall back to watchdog reset by default and only perform + * flr if enabled through module parameter + */ + if (qt_flr_reset && (dhd_bus_perform_flr(bus, FALSE) != BCME_UNSUPPORTED)) { + return; + } +#else + /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */ + if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) +#endif + { + /* Legacy chipcommon watchdog reset */ + dhdpcie_cc_watchdog_reset(bus); + } + return; +#endif /* DHD_USE_BP_RESET */ +} + +#ifdef BCMQT_HW +/* Calculate dongle/host clock ratio for QT so the waiting period in host driver can be scaled + * properly. The dongle uses ALP clock by default which can't be read directly. But ILP and + * ALP clocks are scaled disproportionally in QT. So DHD must know the preset crystal frequency + * for ALP clock in order to calculate the scale ratio. The logic below takes 3 sources of xtal + * frequency as following priority: + * 1 module parameter + * 2 nvram "xtalfreq" line (not available for the first dongle reset) + * 3 Hard coded 37.4MHz + * If the QT simulation of a chip uses a different frequency xtal other than 37.4MHz, it's + * strongly recommended to expend the hard coded value to per chip basis or override with module + * parameter. + */ +#define XTAL_FREQ_37M4 37400000u +void dhdpcie_htclkratio_cal(dhd_bus_t *bus) +{ + uint cur_coreidx, pmu_idx; + uint32 ilp_start, ilp_tick, xtal_ratio; + int xtalfreq = 0; + + /* If a larger than 1 htclkratio is set through module parameter, use it directly */ + if (htclkratio > 1) { + goto exit; + } + + /* Store current core id */ + cur_coreidx = si_coreidx(bus->sih); + if (!si_setcore(bus->sih, PMU_CORE_ID, 0)) { + htclkratio = 2000; + goto exit; + } + + pmu_idx = si_coreidx(bus->sih); + + /* Count IPL ticks in 1 second of host domain clock */ + ilp_start = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmutimer), 0, 0); + osl_sleep(1000); + ilp_tick = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmutimer), 0, 0); + /* -1 to compensate the incomplete cycle at the beginning */ + ilp_tick -= ilp_start - 1; + + /* Get xtal vs ILP ratio from XtalFreqRatio(0x66c) */ + xtal_ratio = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmu_xtalfreq), 0, 0); + xtal_ratio = (xtal_ratio & PMU_XTALFREQ_REG_ILPCTR_MASK) / 4; + + /* Go back to original core */ + si_setcoreidx(bus->sih, cur_coreidx); + + /* Use module parameter if one is provided. Otherwise use default 37.4MHz */ + if (dngl_xtalfreq) { + xtalfreq = dngl_xtalfreq; + } else { + xtalfreq = XTAL_FREQ_37M4; + } + + /* htclkratio = xtalfreq / QT_XTAL_FREQ + * = xtalfreq / (ilp_tick * xtal_ratio) + */ + htclkratio = xtalfreq / (ilp_tick * xtal_ratio); + bus->xtalfreq = xtalfreq; + bus->ilp_tick = ilp_tick; + bus->xtal_ratio = xtal_ratio; + +exit: + DHD_ERROR(("Dongle/Host clock ratio %u with %dHz xtal frequency\n", htclkratio, xtalfreq)); +} + +/* Re-calculate htclkratio if nvram provides a different xtalfreq */ +void dhdpcie_htclkratio_recal(dhd_bus_t *bus, char *nvram, uint nvram_sz) +{ + char *freq_c = NULL; + uint len, p; + int xtalfreq = 0; + + /* Do not re-calculate if xtalfreq is overridden by module parameter */ + if (dngl_xtalfreq) + return; + + /* look for "xtalfreq=xxxx" line in nvram */ + len = strlen("xtalfreq"); + for (p = 0; p < (nvram_sz - len) && nvram[p]; ) { + if ((bcmp(&nvram[p], "xtalfreq", len) == 0) && (nvram[p + len] == '=')) { + freq_c = &nvram[p + len + 1u]; + break; + } + /* jump to next line */ + while (nvram[p++]); + } + + if (freq_c) { + xtalfreq = bcm_strtoul(freq_c, NULL, 0); + if (xtalfreq > (INT_MAX / 1000u)) { + DHD_ERROR(("xtalfreq %d in nvram is too big\n", xtalfreq)); + xtalfreq = 0; + } + xtalfreq *= 1000; + } + + /* Skip recalculation if: + * nvram doesn't provide "xtalfreq", or + * first calculation was not performed because module parameter override, or + * xtalfreq in nvram is the same as the one used in first calculation + */ + if (xtalfreq == 0 || bus->xtalfreq == 0 || xtalfreq == bus->xtalfreq) { + return; + } + + /* Print out a error message here. Even if the ratio is corrected with nvram setting, dongle + * reset has been performed before DHD has access to NVRAM. Insufficient waiting period + * for reset might cause unexpected behavior. + */ + DHD_ERROR(("Re-calculating htclkratio because nvram xtalfreq %dHz is different from %dHz\n", + xtalfreq, bus->xtalfreq)); + + htclkratio = xtalfreq / (bus->ilp_tick * bus->xtal_ratio); + bus->xtalfreq = xtalfreq; + + DHD_ERROR(("Corrected dongle/Host clock ratio %u with %dHz xtal frequency\n", + htclkratio, xtalfreq)); +} +#endif /* BCMQT_HW */ + +static bool +is_bmpu_supported(dhd_bus_t *bus) +{ + if (BCM4378_CHIP(bus->sih->chip) || + BCM4376_CHIP(bus->sih->chip) || + BCM4387_CHIP(bus->sih->chip) || + BCM4385_CHIP(bus->sih->chip)) { + return TRUE; + } + return FALSE; +} + +#define CHIP_COMMON_SCR_DHD_TO_BL_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_DHD_TO_BL) +#define CHIP_COMMON_SCR_BL_TO_DHD_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_BL_TO_DHD) +void +dhdpcie_bus_mpu_disable(dhd_bus_t *bus) +{ + volatile uint32 *cr4_regs; + uint val = 0; + + if (is_bmpu_supported(bus) == FALSE) { + return; + } + + /* reset to default values dhd_to_bl and bl_to_dhd regs */ + (void)serialized_backplane_access(bus, CHIP_COMMON_SCR_DHD_TO_BL_ADDR(bus->sih), + sizeof(val), &val, FALSE); + (void)serialized_backplane_access(bus, CHIP_COMMON_SCR_BL_TO_DHD_ADDR(bus->sih), + sizeof(val), &val, FALSE); + + cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + if (cr4_regs == NULL) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + return; + } + if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) { + /* bus mpu is supported */ + W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0); + } +} + +static bool +dhdpcie_dongle_attach(dhd_bus_t *bus) +{ + osl_t *osh = bus->osh; + volatile void *regsva = (volatile void*)bus->regs; + uint16 devid; + uint32 val; + sbpcieregs_t *sbpcieregs; + bool dongle_reset_needed; + uint16 chipid; + + BCM_REFERENCE(chipid); + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + /* Configure CTO Prevention functionality */ +#if defined(BCMFPGA_HW) || defined(BCMQT_HW) + DHD_ERROR(("Disable CTO\n")); + bus->cto_enable = FALSE; +#else +#if defined(BCMPCIE_CTO_PREVENTION) + chipid = dhd_get_chipid(bus); + + if (BCM4349_CHIP(chipid) || BCM4350_CHIP(chipid) || BCM4345_CHIP(chipid)) { + DHD_ERROR(("Disable CTO\n")); + bus->cto_enable = FALSE; + } else { + DHD_ERROR(("Enable CTO\n")); + bus->cto_enable = TRUE; + } +#else + DHD_ERROR(("Disable CTO\n")); + bus->cto_enable = FALSE; +#endif /* BCMPCIE_CTO_PREVENTION */ +#endif /* BCMFPGA_HW || BCMQT_HW */ + + if (PCIECTO_ENAB(bus)) { + dhdpcie_cto_init(bus, TRUE); + } + +#ifdef CONFIG_ARCH_EXYNOS + link_recovery = bus->dhd; +#endif /* CONFIG_ARCH_EXYNOS */ + + dhd_init_pwr_req_lock(bus); + dhd_init_bus_lp_state_lock(bus); + dhd_init_backplane_access_lock(bus); + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Checking PCIe bus status with reading configuration space */ + val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) != VENDOR_BROADCOM) { + DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__)); + goto fail; + } + devid = (val >> 16) & 0xFFFF; + bus->cl_devid = devid; + + /* Set bar0 window to si_enum_base */ + dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid)); + + /* + * Checking PCI_SPROM_CONTROL register for preventing invalid address access + * due to switch address space from PCI_BUS to SI_BUS. + */ + val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); + if (val == 0xffffffff) { + DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__)); + goto fail; + } + +#if defined(DHD_EFI) || defined(NDIS) + /* Save good copy of PCIe config space */ + if (BCME_OK != dhdpcie_config_save(bus)) { + DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__)); + goto fail; + } +#endif /* DHD_EFI */ + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + + if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { + /* + * HW JIRA - CRWLPCIEGEN2-672 + * Producer Index Feature which is used by F1 gets reset on F0 FLR + * fixed in REV68 + */ + if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { + dhdpcie_ssreset_dis_enum_rst(bus); + } + + /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset + * dhdpcie_bus_release_dongle() --> si_detach() + * dhdpcie_dongle_attach() --> si_attach() + */ + bus->pwr_req_ref = 0; + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_nolock(bus); + } + + /* Get info on the ARM and SOCRAM cores... */ + /* Should really be qualified by device id */ + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + bus->coreid = si_coreid(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + /* CA7 requires coherent bits on */ + if (bus->coreid == ARMCA7_CORE_ID) { + val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, + (val | PCIE_BARCOHERENTACCEN_MASK)); + } + + /* EFI requirement - stop driver load if FW is already running + * need to do this here before pcie_watchdog_reset, because + * pcie_watchdog_reset will put the ARM back into halt state + */ + if (!dhdpcie_is_arm_halted(bus)) { + DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n", + __FUNCTION__)); + goto fail; + } + + BCM_REFERENCE(dongle_reset_needed); + + /* For inbuilt drivers pcie clk req will be done by RC, + * so do not do clkreq from dhd + */ +#if defined(linux) || defined(LINUX) + if (dhd_download_fw_on_driverload) +#endif /* linux || LINUX */ + { + /* Enable CLKREQ# */ + dhdpcie_clkreq(bus->osh, 1, 1); + } + + /* Calculate htclkratio only for QT, for FPGA it is fixed at 30 */ +#ifdef BCMQT_HW + dhdpcie_htclkratio_cal(bus); +#endif /* BCMQT_HW */ + + /* + * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset + * without checking dongle_isolation flag, but if it is called via some other path + * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should + * be called. + */ + if (bus->dhd == NULL) { + /* dhd_attach not yet happened, do dongle reset */ +#ifdef DHD_SKIP_DONGLE_RESET_IN_ATTACH + dongle_reset_needed = FALSE; +#else + dongle_reset_needed = TRUE; +#endif /* DHD_SKIP_DONGLE_RESET_IN_ATTACH */ + } else { + /* Based on dongle_isolationflag, reset dongle */ + dongle_reset_needed = !(bus->dhd->dongle_isolation); + } + + /* Fix for FLR reset specific to 4397a0. Write a value 0x1E in PMU CC reg18 */ + if (BCM4397_CHIP(dhd_get_chipid(bus)) && (bus->sih->chiprev == 0)) { + uint origidx = 0; + + origidx = si_coreidx(bus->sih); + pmu_corereg(bus->sih, SI_CC_IDX, chipcontrol_addr, ~0, PMU_CHIPCTL18); + pmu_corereg(bus->sih, SI_CC_IDX, chipcontrol_data, + (PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN | PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK), + (PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN | + ((PMU_CC18_WL_P_CHAN_TIMER_SEL_8ms << PMU_CC18_WL_P_CHAN_TIMER_SEL_OFF) & + PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK))); + si_setcore(bus->sih, origidx, 0); + } + + /* + * Issue dongle to reset all the cores on the chip - similar to rmmod dhd + * This is required to avoid spurious interrupts to the Host and bring back + * dongle to a sane state (on host soft-reboot / watchdog-reboot). + */ + if (dongle_reset_needed) { + dhdpcie_dongle_reset(bus); + } + + /* need to set the force_bt_quiesce flag here + * before calling dhdpcie_dongle_flr_or_pwr_toggle + */ + bus->force_bt_quiesce = TRUE; + /* + * For buscorerev = 66 and after, F0 FLR should be done independent from F1. + * So don't need BT quiesce. + */ + if (bus->sih->buscorerev >= 66) { + bus->force_bt_quiesce = FALSE; + } + + dhdpcie_dongle_flr_or_pwr_toggle(bus); + + dhdpcie_bus_mpu_disable(bus); + + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + sbpcieregs = (sbpcieregs_t*)(bus->regs); + + /* WAR where the BAR1 window may not be sized properly */ + W_REG(osh, &sbpcieregs->configaddr, 0x4e0); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + + /* if chip uses sysmem instead of tcm, typically ARM CA chips */ + if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4385_CHIP_ID: + bus->dongle_ram_base = CA7_4385_RAM_BASE; + break; + case BCM4388_CHIP_ID: + case BCM4389_CHIP_ID: + bus->dongle_ram_base = CA7_4389_RAM_BASE; + break; +#ifdef UNRELEASEDCHIP + case BCM4397_CHIP_ID: + bus->dongle_ram_base = CA7_4389_RAM_BASE; + break; +#endif + default: + /* also populate base address */ + bus->dongle_ram_base = 0x200000; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + break; + } + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4339_CHIP_ID: + case BCM4335_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4358_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43567_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM4350_CHIP_ID: + case BCM43570_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + + case BCM4364_CHIP_ID: + bus->dongle_ram_base = CR4_4364_RAM_BASE; + break; + + CASE_BCM4345_CHIP: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + CASE_BCM43602_CHIP: + bus->dongle_ram_base = CR4_43602_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM based changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); + break; + case BCM4347_CHIP_ID: + case BCM4357_CHIP_ID: + case BCM4361_CHIP_ID: + bus->dongle_ram_base = CR4_4347_RAM_BASE; + break; + case BCM43751_CHIP_ID: + bus->dongle_ram_base = CR4_43751_RAM_BASE; + break; + case BCM43752_CHIP_ID: + bus->dongle_ram_base = CR4_43752_RAM_BASE; + break; + case BCM4376_CHIP_GRPID: + bus->dongle_ram_base = CR4_4376_RAM_BASE; + break; + case BCM4378_CHIP_GRPID: + bus->dongle_ram_base = CR4_4378_RAM_BASE; + break; + case BCM4362_CHIP_ID: + bus->dongle_ram_base = CR4_4362_RAM_BASE; + break; + case BCM4375_CHIP_ID: + case BCM4369_CHIP_ID: + bus->dongle_ram_base = CR4_4369_RAM_BASE; + break; + case BCM4377_CHIP_ID: + bus->dongle_ram_base = CR4_4377_RAM_BASE; + break; + case BCM4387_CHIP_GRPID: + bus->dongle_ram_base = CR4_4387_RAM_BASE; + break; + case BCM4385_CHIP_ID: + bus->dongle_ram_base = CR4_4385_RAM_BASE; + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_ramsize) { + dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_ramsize); + } + + if (bus->ramsize > DONGLE_TCM_MAP_SIZE) { + DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n", + __FUNCTION__, bus->ramsize, bus->ramsize)); + goto fail; + } + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + dhdpcie_bar1_window_switch_enab(bus); + + /* Init bar1_switch_lock only after bar1_switch_enab is inited */ + dhd_init_bar1_switch_lock(bus); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + + dhdpcie_bus_intr_init(bus); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; +#ifdef DHD_DISABLE_ASPM + dhd_bus_aspm_enable_rc_ep(bus, FALSE); +#endif /* DHD_DISABLE_ASPM */ +#ifdef PCIE_OOB + dhdpcie_oob_init(bus); +#endif /* PCIE_OOB */ +#ifdef PCIE_INB_DW + bus->inb_enabled = TRUE; +#endif /* PCIE_INB_DW */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + bus->ds_enabled = TRUE; + bus->deep_sleep = TRUE; +#endif + + bus->idma_enabled = TRUE; + bus->ifrm_enabled = TRUE; +#ifdef BCMINTERNAL + bus->dma_chan = 0; +#endif /* BCMINTERNAL */ + + dhdpcie_pme_stat_clear(bus); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear_nolock(bus); + + /* + * One time clearing of Common Power Domain since HW default is set + * Needs to be after FLR because FLR resets PCIe enum back to HW defaults + * for 4378B0 (rev 68). + * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672 + */ + si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0); + + /* + * WAR to fix ARM cold boot; + * Assert WL domain in DAR helps but not enum + */ + if (bus->sih->buscorerev >= 68) { + dhd_bus_pcie_pwr_req_wl_domain(bus, + DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE); + } + } + + DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); + + return 0; + +fail: +/* for EFI even if there is an error, load still succeeds +* so si_detach should not be called here, it is called during unload +*/ +#ifndef DHD_EFI + /* + * As request irq is done later, till then CTO will not be detected, + * so unconditionally dump cfg and DAR registers. + */ + dhd_bus_dump_imp_cfg_registers(bus); + /* Check if CTO has happened */ + if (PCIECTO_ENAB(bus)) { + /* read pci_intstatus */ + uint32 pci_intstatus = + dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4); + if (pci_intstatus == (uint32)-1) { + DHD_ERROR(("%s : Invalid pci_intstatus(0x%x)\n", + __FUNCTION__, pci_intstatus)); + } else if (pci_intstatus & PCI_CTO_INT_MASK) { + DHD_ERROR(("%s: ##### CTO REPORTED BY DONGLE " + "intstat=0x%x enab=%d\n", __FUNCTION__, + pci_intstatus, bus->cto_enable)); + } + } + dhd_deinit_pwr_req_lock(bus); + dhd_deinit_bus_lp_state_lock(bus); + dhd_deinit_backplane_access_lock(bus); + + if (bus->sih != NULL) { + /* Dump DAR registers only if si_attach has succeeded */ + dhd_bus_dump_dar_registers(bus); + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear_nolock(bus); + } + + si_detach(bus->sih); + bus->sih = NULL; + } + +#endif /* DHD_EFI */ + DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__)); + return -1; +} + +int +dhpcie_bus_unmask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB); + return 0; +} +int +dhpcie_bus_mask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0); + return 0; +} + +/* Non atomic function, caller should hold appropriate lock */ +void +dhdpcie_bus_intr_enable(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + if (bus) { + if (bus->sih && !bus->is_linkdown) { + /* Skip after recieving D3 ACK */ + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + return; + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_unmask_interrupt(bus); + } else { +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, + bus->def_intmask, TRUE); +#endif /* defined(DHD_MMIO_TRACE) */ + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, + bus->def_intmask, bus->def_intmask); + } + } + +#if defined(NDIS) + dhd_msix_message_set(bus->dhd, 0, 0, TRUE); +#endif + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/* Non atomic function, caller should hold appropriate lock */ +void +dhdpcie_bus_intr_disable(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + if (bus && bus->sih && !bus->is_linkdown) { + /* Skip after recieving D3 ACK */ + if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) { + return; + } + + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_mask_interrupt(bus); + } else { +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, 0, TRUE); +#endif /* defined(DHD_MMIO_TRACE) */ + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, + bus->def_intmask, 0); + } + } +#if defined(NDIS) + /* + * dhdpcie_bus_intr_disable may get called from + * dhdpcie_dongle_attach -> dhdpcie_dongle_reset + * with dhd = NULL during attach time. So check for bus->dhd NULL before + * calling dhd_msix_message_set + */ + if (bus && bus->dhd) { + dhd_msix_message_set(bus->dhd, 0, 0, FALSE); + } +#endif + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/* + * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +void +dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup); +#endif /* DHD_PCIE_RUNTIMEPM */ + + dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms; + if (dhdp->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n", + __FUNCTION__)); + dhd_os_wd_timer(dhdp, 0); + } + if (dhdp->busstate != DHD_BUS_DOWN) { +#ifdef DHD_DONGLE_TRAP_IN_DETACH + /* + * For x86 platforms, rmmod/insmod is failing due to some power + * resources are not held high. + * Hence induce DB7 trap during detach and in FW trap handler all + * power resources are held high. + */ + if (!dhd_query_bus_erros(dhdp) && dhdp->db7_trap.fw_db7w_trap) { + dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE; + dhdpcie_fw_trap(dhdp->bus); + OSL_DELAY(100 * 1000); // wait 100 msec + dhdp->db7_trap.fw_db7w_trap_inprogress = FALSE; + } else { + DHD_ERROR(("%s: DB7 Not sent!!!\n", + __FUNCTION__)); + } +#endif /* DHD_DONGLE_TRAP_IN_DETACH */ + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhdp, flags); + } + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); +#ifdef LINUX + if ((timeleft == 0) || (timeleft == 1)) +#else + if (timeleft == 0) +#endif + { + /* XXX This condition ideally should not occur, this means some + * bus usage context is not clearing the respective usage bit, print + * dhd_bus_busy_state and crash the host for further debugging. + */ + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_REMOVE; + DHD_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhdpcie_bus_remove_prep(dhd_bus_t *bus) +{ + unsigned long flags; + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef PCIE_INB_DW + /* De-Initialize the lock to serialize Device Wake Inband activities */ + if (bus->inb_lock) { + osl_spin_lock_deinit(bus->dhd->osh, bus->inb_lock); + bus->inb_lock = NULL; + } +#endif + + dhd_os_sdlock(bus->dhd); + + if (bus->sih && !bus->dhd->dongle_isolation) { + + dhd_bus_pcie_pwr_req_reload_war(bus); + + /* Skip below WARs for Android as insmod fails after rmmod in Brix Android */ +#if !defined(OEM_ANDROID) + /* HW4347-909, Set PCIE TRefUp time to 100us for 4347/4377 */ + if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) { + pcie_set_trefup_time_100us(bus->sih); + } + + /* disable fast lpo from 4347/4377 */ + /* For 4378/4387/4389, do not disable fast lpo because we always enable fast lpo. + * it causes insmod/rmmod reload failure. + */ + if ((PMUREV(bus->sih->pmurev) > 31) && + !(PCIE_FASTLPO_ENABLED(bus->sih->buscorerev))) { + si_pmu_fast_lpo_disable(bus->sih); + } +#endif /* !OEM_ANDROID */ + + /* if the pcie link is down, watchdog reset + * should not be done, as it may hang + */ + + if (!bus->is_linkdown) { + /* For Non-EFI modular builds, do dongle reset during rmmod */ +#ifndef DHD_EFI + /* For EFI-DHD this compile flag will be defined. + * In EFI, depending on bt over pcie mode + * we either power toggle or do F0 FLR + * from dhdpcie_bus_release dongle. So no need to + * do dongle reset from here + */ + dhdpcie_dongle_reset(bus); +#endif /* !DHD_EFI */ + } + + bus->dhd->is_pcie_watchdog_reset = TRUE; + } + + dhd_os_sdunlock(bus->dhd); + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +void +dhd_init_bus_lp_state_lock(dhd_bus_t *bus) +{ + if (!bus->bus_lp_state_lock) { + bus->bus_lp_state_lock = osl_spin_lock_init(bus->osh); + } +} + +void +dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus) +{ + if (bus->bus_lp_state_lock) { + osl_spin_lock_deinit(bus->osh, bus->bus_lp_state_lock); + bus->bus_lp_state_lock = NULL; + } +} + +void +dhd_init_backplane_access_lock(dhd_bus_t *bus) +{ + if (!bus->backplane_access_lock) { + bus->backplane_access_lock = osl_spin_lock_init(bus->osh); + } +} + +void +dhd_deinit_backplane_access_lock(dhd_bus_t *bus) +{ + if (bus->backplane_access_lock) { + osl_spin_lock_deinit(bus->osh, bus->backplane_access_lock); + bus->backplane_access_lock = NULL; + } +} + +/** Detach and free everything */ +void +dhdpcie_bus_release(dhd_bus_t *bus) +{ + bool dongle_isolation = FALSE; +#ifdef BCMQT + uint buscorerev = 0; +#endif /* BCMQT */ + osl_t *osh = NULL; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { +#if defined(DEBUGGER) || defined (DHD_DSCOPE) + debugger_close(); +#endif /* DEBUGGER || DHD_DSCOPE */ + dhdpcie_advertise_bus_remove(bus->dhd); + dongle_isolation = bus->dhd->dongle_isolation; + bus->dhd->is_pcie_watchdog_reset = FALSE; + dhdpcie_bus_remove_prep(bus); + + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } + dhd_deinit_bus_lp_state_lock(bus); + dhd_deinit_bar1_switch_lock(bus); + dhd_deinit_backplane_access_lock(bus); + dhd_deinit_pwr_req_lock(bus); +#ifdef PCIE_INB_DW + dhd_deinit_dongle_ds_lock(bus); +#endif /* PCIE_INB_DW */ +#ifdef BCMQT + if (IDMA_ACTIVE(bus->dhd)) { + /** + * On FPGA during exit path force set "IDMA Control Register" + * to default value 0x0. Otherwise host dongle syc for IDMA fails + * during next IDMA initilization(without system reboot) + */ + buscorerev = bus->sih->buscorerev; + si_corereg(bus->sih, bus->sih->buscoreidx, + IDMAControl(buscorerev), ~0, 0); + } +#endif /* BCMQT */ + /** + * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to + * access Dongle registers. + * dhd_detach will communicate with dongle to delete flowring ..etc. + * So dhdpcie_bus_release_dongle should be called only after the dhd_detach. + */ + dhd_detach(bus->dhd); + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } +#ifdef DHD_EFI + else { + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + } +#endif /* DHD_EFI */ + /* unmap the regs and tcm here!! */ + if (bus->regs) { + dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE); + bus->regs = NULL; + } + if (bus->tcm) { + dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE); + bus->tcm = NULL; + } + + dhdpcie_bus_release_malloc(bus, osh); + /* Detach pcie shared structure */ + if (bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + } + + if (bus->console.buf != NULL) { + MFREE(osh, bus->console.buf, bus->console.bufsize); + } + +#ifdef BCMINTERNAL + if (bus->msi_sim) { + DMA_UNMAP(osh, bus->msi_sim_phys, MSI_SIM_BUFSIZE, DMA_RX, 0, 0); + MFREE(osh, bus->msi_sim_addr, MSI_SIM_BUFSIZE); + } + + /* free host fw buffer if there is any */ + if (bus->hostfw_buf.va) { + DMA_FREE_CONSISTENT(osh, bus->hostfw_buf.va, bus->hostfw_buf._alloced, + bus->hostfw_buf.pa, bus->hostfw_buf.dmah); + memset(&bus->hostfw_buf, 0, sizeof(bus->hostfw_buf)); + } +#endif /* BCMINTERNAL */ + + /* Finally free bus info */ + MFREE(osh, bus, sizeof(dhd_bus_t)); + + g_dhd_bus = NULL; + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); +} /* dhdpcie_bus_release */ + +void +dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) { + goto fail; + } + + if (bus->is_linkdown) { + DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__)); + goto fail; + } + + if (bus->sih) { +#ifdef BCMINTERNAL + if (bus->msi_sim) { + /* disable MSI */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCIE_CFG_MSICAP_OFFSET); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), ~0, + PCIE_CFG_MSICAP_DISABLE_MSI); + } +#endif /* BCMINTERNAL */ + + /* + * Perform dongle reset only if dongle isolation is not enabled. + * In android platforms, dongle isolation will be enabled and + * quiescing dongle will be done using DB7 trap. + */ + if (!dongle_isolation && + bus->dhd && !bus->dhd->is_pcie_watchdog_reset) { + dhdpcie_dongle_reset(bus); + } + + /* Only for EFI this will be effective */ + dhdpcie_dongle_flr_or_pwr_toggle(bus); + + if (bus->ltrsleep_on_unload) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); + } + + if (bus->sih->buscorerev == 13) + pcie_serdes_iddqdisable(bus->osh, bus->sih, + (sbpcieregs_t *) bus->regs); + + /* For inbuilt drivers pcie clk req will be done by RC, + * so do not do clkreq from dhd + */ +#if defined(linux) || defined(LINUX) + if (dhd_download_fw_on_driverload) +#endif /* linux || LINUX */ + { + /* Disable CLKREQ# */ + dhdpcie_clkreq(bus->osh, 1, 0); + } + } +fail: + /* Resources should be freed */ + if (bus->sih) { + si_detach(bus->sih); + bus->sih = NULL; + } + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +uint32 +dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size) +{ + uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size); + return data; +} + +/** 32 bit config write */ +void +dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data); +} + +void +dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data); +} + +void +dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_MEMSIZE; + /* Restrict the memsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d max accepted %d\n", + mem_size, min_size, (int32)bus->orig_ramsize)); + if ((mem_size > min_size) && + (mem_size < (int32)bus->orig_ramsize)) { + bus->ramsize = mem_size; + } else { + DHD_ERROR(("%s: Invalid mem_size %d\n", __FUNCTION__, mem_size)); + } +} + +void +dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; + +} + +/** Stop bus module: clear pending frames, disable data flow */ +void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus->dhd) + return; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__)); + goto done; + } + + DHD_STOP_RPM_TIMER(bus->dhd); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, TRUE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + dhdpcie_bus_intr_disable(bus); + + if (!bus->is_linkdown) { + uint32 status; + status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); + } + +#if defined(linux) || defined(LINUX) + if (!dhd_download_fw_on_driverload) { + dhd_dpc_kill(bus->dhd); + } +#endif /* linux || LINUX */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_disable(dhd_bus_to_dev(bus)); + pm_runtime_set_suspended(dhd_bus_to_dev(bus)); + pm_runtime_enable(dhd_bus_to_dev(bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + /* Clear rx control and wake any waiters */ + /* XXX More important in disconnect, but no context? */ + dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT); + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP); + +done: + return; +} + +#ifdef DEVICE_TX_STUCK_DETECT +void +dhd_bus_send_msg_to_daemon(int reason) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = reason; + + dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t)); + return; +} + +#define DHD_MEMORY_SET_PATTERN 0xAA + +/** + * scan the flow rings in active list to check if stuck and notify application + * The conditions for warn/stuck detection are + * 1. Flow ring is active + * 2. There are packets to be consumed by the consumer (wr != rd) + * If 1 and 2 are true, then + * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION + * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION + */ +static void +dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus) +{ + uint32 tx_cmpl; + unsigned long list_lock_flags; + unsigned long ring_lock_flags; + dll_t *item, *prev; + flow_ring_node_t *flow_ring_node; + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)bus->dhd->if_flow_lkup; + uint8 ifindex; +#ifndef FW_HAS_AGING_LOGIC_ALL_IF + /** + * Since the aging logic is implemented only for INFRA in FW, + * DHD should monitor only INFRA for stuck detection. + */ + uint8 role; +#endif /* FW_HAS_AGING_LOGIC_ALL_IF */ + bool ring_empty; + bool active; + uint8 status; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags); + + for (item = dll_tail_p(&bus->flowring_active_list); + !dll_end(&bus->flowring_active_list, item); item = prev) { + + prev = dll_prev_p(item); + + flow_ring_node = dhd_constlist_to_flowring(item); + ifindex = flow_ring_node->flow_info.ifindex; +#ifndef FW_HAS_AGING_LOGIC_ALL_IF + role = if_flow_lkup[ifindex].role; + if (role != WLC_E_IF_ROLE_STA) { + continue; + } +#endif /* FW_HAS_AGING_LOGIC_ALL_IF */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags); + tx_cmpl = flow_ring_node->tx_cmpl; + active = flow_ring_node->active; + status = flow_ring_node->status; + ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags); + /* + * Need not monitor the flow ring if, + * 1. flow ring is empty + * 2. LINK is down + * 3. flow ring is not in FLOW_RING_STATUS_OPEN state + */ + if ((ring_empty) || !(if_flow_lkup[ifindex].status) || + (status != FLOW_RING_STATUS_OPEN)) { + /* reset conters... etc */ + flow_ring_node->stuck_count = 0; + flow_ring_node->tx_cmpl_prev = tx_cmpl; + continue; + } + /** + * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer + * representation of time, to decide if a flow is in warn state or stuck. + * + * flow_ring_node->stuck_count is an integer counter representing how long + * tx_cmpl is not received though there are pending packets in the ring + * to be consumed by the dongle for that particular flow. + * + * This method of determining time elapsed is helpful in sleep/wake scenarios. + * If host sleeps and wakes up, that sleep time is not considered into + * stuck duration. + */ + if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) { + + flow_ring_node->stuck_count++; + + DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n", + __func__, flow_ring_node->flowid, tx_cmpl, + flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count)); + dhd_prot_dump_ring_ptrs(flow_ring_node->prot_info); + + switch (flow_ring_node->stuck_count) { + case DEVICE_TX_STUCK_WARN_DURATION: + /** + * Notify Device Tx Stuck Notification App about the + * device Tx stuck warning for this flowid. + * App will collect the logs required. + */ + DHD_ERROR(("stuck warning for flowid: %d sent to app\n", + flow_ring_node->flowid)); + dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING); + break; + case DEVICE_TX_STUCK_DURATION: + /** + * Notify Device Tx Stuck Notification App about the + * device Tx stuck info for this flowid. + * App will collect the logs required. + */ + DHD_ERROR(("stuck information for flowid: %d sent to app\n", + flow_ring_node->flowid)); + dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK); + break; + default: + break; + } + } else { + flow_ring_node->tx_cmpl_prev = tx_cmpl; + flow_ring_node->stuck_count = 0; + } + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags); +} +/** + * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT, + * to determine if any flowid is stuck. + */ +static void +dhd_bus_device_stuck_scan(dhd_bus_t *bus) +{ + uint32 time_stamp; /* in millisec */ + uint32 diff; + + /* Need not run the algorith if Dongle has trapped */ + if (bus->dhd->dongle_trap_occured) { + return; + } + time_stamp = OSL_SYSUPTIME(); + diff = time_stamp - bus->device_tx_stuck_check; + if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) { + dhd_bus_device_tx_stuck_scan(bus); + bus->device_tx_stuck_check = OSL_SYSUPTIME(); + } + return; +} +#endif /* DEVICE_TX_STUCK_DETECT */ + +/** + * Watchdog timer function. + * @param dhd Represents a specific hardware (dongle) instance that this DHD manages + */ +bool dhd_bus_watchdog(dhd_pub_t *dhd) +{ + unsigned long flags; + dhd_bus_t *bus = dhd->bus; + + if (dhd_query_bus_erros(bus->dhd)) { + return FALSE; + } + + DHD_GENERAL_LOCK(dhd, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) || + DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) { + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; + } + DHD_BUS_BUSY_SET_IN_WD(dhd); + DHD_GENERAL_UNLOCK(dhd, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef BCMINTERNAL + if ((bus->msi_sim) && (++bus->polltick >= bus->pollrate)) { + uint32 val; + bus->polltick = 0; + val = *(uint32 *)bus->msi_sim_addr; + *(uint32 *)bus->msi_sim_addr = 0; + if (val) { + DHD_INFO(("calling dhdpcie_bus_isr 0x%04x\n", val)); + dhdpcie_bus_isr(bus); + } + } +#endif /* BCMINTERNAL */ + +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + dhd_intr_poll_pkt_thresholds(dhd); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + + /* Poll for console output periodically */ + if (dhd->busstate == DHD_BUS_DATA && + dhd->dhd_console_ms != 0 && + DHD_CHK_BUS_NOT_IN_LPS(bus)) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd->dhd_console_ms) { + bus->console.count -= dhd->dhd_console_ms; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + /* Make sure backplane clock is on */ + if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) { + if (dhdpcie_bus_readconsole(bus) < 0) { + DHD_ERROR(("%s: disable dconpoll\n", __FUNCTION__)); + dhd->dhd_console_ms = 0; /* On error, stop trying */ + } + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + } + } + +#ifdef DHD_READ_INTSTATUS_IN_DPC + if (bus->poll) { + bus->ipend = TRUE; + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ + } +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + +#ifdef DEVICE_TX_STUCK_DETECT + if (dhd->bus->dev_tx_stuck_monitor == TRUE) { + dhd_bus_device_stuck_scan(bus); + } +#endif /* DEVICE_TX_STUCK_DETECT */ + + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_WD(dhd); + dhd_os_busbusy_wake(dhd); + DHD_GENERAL_UNLOCK(dhd, flags); +#if !defined(DHD_PCIE_RUNTIMEPM) && (defined(PCIE_OOB) || defined(PCIE_INB_DW)) + dhd->bus->inb_dw_deassert_cnt += dhd_watchdog_ms; + if (dhd->bus->inb_dw_deassert_cnt >= + DHD_INB_DW_DEASSERT_MS) { + dhd->bus->inb_dw_deassert_cnt = 0; + /* Inband device wake is deasserted from DPC context after DS_Exit is received, + * but if at all there is no d2h interrupt received, dpc will not be scheduled + * and inband DW is not deasserted, hence DW is deasserted from watchdog thread + * for every 250ms. + */ + dhd_bus_dw_deassert(dhd); + } +#endif /* !DHD_PCIE_RUNTIMEPM && PCIE_OOB || PCIE_INB_DW */ + return TRUE; +} /* dhd_bus_watchdog */ + +#if defined(SUPPORT_MULTIPLE_REVISION) +static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chiprev; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[20] = "_4358"; +#else + char chipver_tag[10] = {0, }; +#endif /* SUPPORT_MULTIPLE_CHIPS */ + + chiprev = dhd_bus_chiprev(bus); + if (chiprev == 0) { + DHD_ERROR(("----- CHIP 4358 A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else if (chiprev == 1) { + DHD_ERROR(("----- CHIP 4358 A1 -----\n")); +#if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) + strcat(chipver_tag, "_a1"); +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */ + } else if (chiprev == 3) { + DHD_ERROR(("----- CHIP 4358 A3 -----\n")); +#if defined(SUPPORT_MULTIPLE_CHIPS) + strcat(chipver_tag, "_a3"); +#endif /* SUPPORT_MULTIPLE_CHIPS */ + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev)); + } + + strcat(fw_path, chipver_tag); + +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) + if (chiprev == 1 || chiprev == 3) { + int ret = dhd_check_module_b85a(); + if ((chiprev == 1) && (ret < 0)) { + memset(chipver_tag, 0x00, sizeof(chipver_tag)); + strcat(chipver_tag, "_b85"); + strcat(chipver_tag, "_a1"); + } + } + + DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag)); +#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */ + +#if defined(SUPPORT_MULTIPLE_BOARD_REV) + if (system_rev >= 10) { + DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev)); + strcat(chipver_tag, "_r10"); + } +#endif /* SUPPORT_MULTIPLE_BOARD_REV */ + strcat(nv_path, chipver_tag); + + return 0; +} + +static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chip_ver; + char chipver_tag[10] = {0, }; +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \ + defined(SUPPORT_BCM4359_MIXED_MODULES) + int module_type = -1; +#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ + + chip_ver = bus->sih->chiprev; + if (chip_ver == 4) { + DHD_ERROR(("----- CHIP 4359 B0 -----\n")); + strncat(chipver_tag, "_b0", strlen("_b0")); + } else if (chip_ver == 5) { + DHD_ERROR(("----- CHIP 4359 B1 -----\n")); + strncat(chipver_tag, "_b1", strlen("_b1")); + } else if (chip_ver == 9) { + DHD_ERROR(("----- CHIP 4359 C0 -----\n")); + strncat(chipver_tag, "_c0", strlen("_c0")); + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); + return -1; + } + +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \ + defined(SUPPORT_BCM4359_MIXED_MODULES) + module_type = dhd_check_module_b90(); + + switch (module_type) { + case BCM4359_MODULE_TYPE_B90B: + strcat(fw_path, chipver_tag); + break; + case BCM4359_MODULE_TYPE_B90S: + default: + /* + * .cid.info file not exist case, + * loading B90S FW force for initial MFG boot up. + */ + if (chip_ver == 5) { + strncat(fw_path, "_b90s", strlen("_b90s")); + } + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + break; + } +#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); +#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ + + return 0; +} + +#define NVRAM_FEM_MURATA "_murata" +static int +concate_revision_from_cisinfo(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int ret = BCME_OK; +#if defined(SUPPORT_MIXED_MODULES) +#if defined(USE_CID_CHECK) + char module_type[MAX_VNAME_LEN]; + naming_info_t *info = NULL; + bool is_murata_fem = FALSE; + + memset(module_type, 0, sizeof(module_type)); + + if (dhd_check_module_bcm(module_type, + MODULE_NAME_INDEX_MAX, &is_murata_fem) == BCME_OK) { + info = dhd_find_naming_info(bus->dhd, module_type); + } else { + /* in case of .cid.info doesn't exists */ + info = dhd_find_naming_info_by_chip_rev(bus->dhd, &is_murata_fem); + } + +#ifdef BCM4361_CHIP + if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) { + is_murata_fem = FALSE; + } +#endif /* BCM4361_CHIP */ + + if (info) { +#ifdef BCM4361_CHIP + if (is_murata_fem) { + strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA)); + } +#endif /* BCM4361_CHIP */ + strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext)); + strncat(fw_path, info->fw_ext, strlen(info->fw_ext)); + } else { + DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__)); + ret = BCME_ERROR; + } +#endif /* USE_CID_CHECK */ +#ifdef USE_DIRECT_VID_TAG + int revid = bus->sih->chiprev; + unsigned char chipstr[MAX_VID_LEN]; + + memset(chipstr, 0, sizeof(chipstr)); + snprintf(chipstr, sizeof(chipstr), "_4389"); + + /* write chipstr/vid into nvram tag */ + ret = concate_nvram_by_vid(bus, nv_path, chipstr); + /* write chiprev into FW tag */ + if (ret == BCME_OK) { + if (revid == 3) { + strncat(fw_path, A0_REV, strlen(fw_path)); + DHD_ERROR(("%s: fw_path : %s\n", __FUNCTION__, fw_path)); + } else if (revid == 1) { + strncat(fw_path, B0_REV, strlen(fw_path)); + DHD_ERROR(("%s: fw_path : %s\n", __FUNCTION__, fw_path)); + } else { + DHD_ERROR(("%s: INVALID CHIPREV %d\n", __FUNCTION__, revid)); + } + } +#endif /* USE_DIRECT_VID_TAG */ +#else /* SUPPORT_MIXED_MODULE */ + char chipver_tag[10] = {0, }; + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); +#endif /* SUPPORT_MIXED_MODULE */ + + return ret; +} + +int +concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int res = 0; + + if (!bus || !bus->sih) { + DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); + return -1; + } + + if (!fw_path || !nv_path) { + DHD_ERROR(("fw_path or nv_path is null.\n")); + return res; + } + + switch (si_chipid(bus->sih)) { + + case BCM43569_CHIP_ID: + case BCM4358_CHIP_ID: + res = concate_revision_bcm4358(bus, fw_path, nv_path); + break; + case BCM4355_CHIP_ID: + case BCM4359_CHIP_ID: + res = concate_revision_bcm4359(bus, fw_path, nv_path); + break; + case BCM4361_CHIP_ID: + case BCM4347_CHIP_ID: + case BCM4375_CHIP_ID: + case BCM4389_CHIP_ID: + res = concate_revision_from_cisinfo(bus, fw_path, nv_path); + break; + default: + DHD_ERROR(("REVISION SPECIFIC feature is not required\n")); + return res; + } + + return res; +} +#endif /* SUPPORT_MULTIPLE_REVISION */ + +uint16 +dhd_get_chipid(struct dhd_bus *bus) +{ + if (bus && bus->sih) { + return (uint16)si_chipid(bus->sih); + } else if (bus && bus->regs) { + chipcregs_t *cc = (chipcregs_t *)bus->regs; + uint w, chipid; + + /* Set bar0 window to si_enum_base */ + dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(0)); + + w = R_REG(bus->osh, &cc->chipid); + chipid = w & CID_ID_MASK; + + return (uint16)chipid; + } else { + return 0; + } +} + +/** + * Loads firmware given by caller supplied path and nvram image into PCIe dongle. + * + * BCM_REQUEST_FW specific : + * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing + * firmware and nvm for that chip. If the download fails, retries download with a different nvm file + * + * BCMEMBEDIMAGE specific: + * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header + * file will be used instead. + * + * @return BCME_OK on success + */ +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; + +#if defined(SUPPORT_MULTIPLE_REVISION) + if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) { + DHD_ERROR(("%s: fail to concatnate revison \n", + __FUNCTION__)); + /* Proceed if SUPPORT_MULTIPLE_CHIPS is enabled */ +#ifndef SUPPORT_MULTIPLE_CHIPS + return BCME_BADARG; +#endif /* !SUPPORT_MULTIPLE_CHIPS */ + } +#endif /* SUPPORT_MULTIPLE_REVISION */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) + dhd_set_blob_support(bus->dhd, bus->fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + + DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); +#if defined(LINUX) || defined(linux) + dhdpcie_dump_resource(bus); +#endif /* LINUX || linux */ + + ret = dhdpcie_download_firmware(bus, osh); + + return ret; +} + +void +dhd_set_bus_params(struct dhd_bus *bus) +{ + if (bus->dhd->conf->dhd_poll >= 0) { + bus->poll = bus->dhd->conf->dhd_poll; + if (!bus->pollrate) + bus->pollrate = 1; + printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); + } + if (bus->dhd->conf->d2h_intr_control >= 0) + bus->d2h_intr_control = bus->dhd->conf->d2h_intr_control; + printf("d2h_intr_method -> %s(%d); d2h_intr_control -> %s(%d)\n", + bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX", bus->d2h_intr_method, + bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK", bus->d2h_intr_control); +} + +/** + * Loads firmware given by 'bus->fw_path' into PCIe dongle. + * + * BCM_REQUEST_FW specific : + * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing + * firmware and nvm for that chip. If the download fails, retries download with a different nvm file + * + * BCMEMBEDIMAGE specific: + * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header + * file will be used instead. + * + * @return BCME_OK on success + */ +static int +dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) +{ + int ret = 0; +#if defined(BCM_REQUEST_FW) + uint chipid = bus->sih->chip; + uint revid = bus->sih->chiprev; + char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */ + char nv_path[64]; /* path to nvram vars file */ + bus->fw_path = fw_path; + bus->nv_path = nv_path; + switch (chipid) { + case BCM43570_CHIP_ID: + bcmstrncat(fw_path, "43570", 5); + switch (revid) { + case 0: + bcmstrncat(fw_path, "a0", 2); + break; + case 2: + bcmstrncat(fw_path, "a2", 2); + break; + default: + DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__, + revid)); + break; + } + break; + default: + DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, + chipid)); + return 0; + } + /* load board specific nvram file */ + snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path); + /* load firmware */ + snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path); +#endif /* BCM_REQUEST_FW */ + + DHD_OS_WAKE_LOCK(bus->dhd); + + dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path); + dhd_set_bus_params(bus); + + ret = _dhdpcie_download_firmware(bus); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} /* dhdpcie_download_firmware */ + +#ifdef BCMINTERNAL +#define PCIE_HYBRIDFW_MAGICNUM 0x434F464Cu +#define PCIE_HYBRIDFW_HOSTOFFSET_MASK 0xFFC00000u +#define PCIE_HYBRIDFW_TYPE_DNGL 0u +#define PCIE_HYBRIDFW_TYPE_HOST 1u +#define PCIE_HYBRIDFW_TYPE_DNGLTBL 2u +#define PCIE_HYBRIDFW_TYPE_HOSTTBL 3u +#define SBtoPCIETranslation2 0xF0 +#define SBtoPCIETranslation2Upper 0xF4 +#define SBtoPCIETranslation3 0xF8 +#define SBtoPCIETranslation3Upper 0xFC +#define SBtoPCIETranslation0 0x100 +#define SBtoPCIETranslation1 0x104 +#define SBtoPCIETranslation0Upper 0x10C +#define SBtoPCIETranslation1Upper 0x110 + +/* Get length of each portion of hybrid fw binary from the header */ +static int +dhdpcie_hybridfw_get_next_block(char * fptr, int *fsize, uint32 *type, uint32 *len) +{ + struct portion_hdr { + uint32 type; + uint32 len; + } hdr; + int ret; + + /* read and verify header */ + if (*fsize <= sizeof(hdr)) { + return BCME_BADLEN; + } + + ret = dhd_os_get_image_block((char *)&hdr, sizeof(hdr), fptr); + if (ret <= 0) { + return BCME_ERROR; + } + + *fsize -= sizeof(hdr); + *type = ltoh32(hdr.type); + *len = ltoh32(hdr.len); + + if ((*len > (uint32)*fsize) || ((int)*len < 0)) { + return BCME_BADLEN; + } + + DHD_INFO(("%s Found section %d with length %d\n", __FUNCTION__, hdr.type, hdr.len)); + + return BCME_OK; +} + +/* Replace host offload functions' pointers */ +static int +dhdpcie_hybridfw_ptrrpl(char *fw, uint fw_sz, uint32 *jmptbl, uint jmptbl_sz, + dmaaddr_t hbuf_pa, uint32 hbuf_len) +{ + uint32 *p_ptr; + uint32 host_addr; + int ret = BCME_OK; + + if (jmptbl_sz % 4) { + DHD_ERROR(("%s table size %u not 4 bytes aligned\n", __FUNCTION__, jmptbl_sz)); + return BCME_ERROR; + } + + host_addr = PCIEDEV_ARM_ADDR(PHYSADDRLO(hbuf_pa), PCIEDEV_TRANS_WIN_HOSTMEM); + for (; jmptbl_sz > 0; jmptbl_sz -= 4, jmptbl++) { + if (*jmptbl >= fw_sz) { + DHD_ERROR(("%s offset %u >= fw size %u\n", __FUNCTION__, *jmptbl, fw_sz)); + ret = BCME_ERROR; + break; + } + p_ptr = (uint32 *)(fw + *jmptbl); + *p_ptr &= ~(uint32)PCIE_HYBRIDFW_HOSTOFFSET_MASK; + if (*p_ptr > hbuf_len) { + DHD_ERROR(("%s function offset %x >= host buffer len %x\n", + __FUNCTION__, *p_ptr, hbuf_len)); + ret = BCME_ERROR; + break; + } + *p_ptr += host_addr; + } + + return ret; +} + +/* configure back plane to pcie translation window */ +static void +dhdpcie_sbtopcie_translation_config(struct dhd_bus *bus, int bp_window, dmaaddr_t addr) +{ + uint32 trans_reg_offset, trans_u_reg_offset; + + switch (bp_window) { + case PCIEDEV_TRANS_WIN_0: + trans_reg_offset = SBtoPCIETranslation0; + trans_u_reg_offset = SBtoPCIETranslation0Upper; + break; + + case PCIEDEV_TRANS_WIN_1: + trans_reg_offset = SBtoPCIETranslation1; + trans_u_reg_offset = SBtoPCIETranslation1Upper; + break; + + case PCIEDEV_TRANS_WIN_2: + trans_reg_offset = SBtoPCIETranslation2; + trans_u_reg_offset = SBtoPCIETranslation2Upper; + break; + + case PCIEDEV_TRANS_WIN_3: + trans_reg_offset = SBtoPCIETranslation3; + trans_u_reg_offset = SBtoPCIETranslation3Upper; + break; + + default: + DHD_ERROR(("%s Invalid bp translation window %d\n", + __FUNCTION__, bp_window)); + return; + } + + si_corereg(bus->sih, bus->sih->buscoreidx, trans_reg_offset, ~0, + ((PHYSADDRLO(addr) & PCIEDEV_HOSTADDR_MAP_WIN_MASK) | 0xC)); + si_corereg(bus->sih, bus->sih->buscoreidx, trans_u_reg_offset, ~0, PHYSADDRHI(addr)); +} + +/** + * hybrid firmware download handler + * + * Parse, prepare and download a hybrid firmware + * - Identify a hybrid firmware + * - Place the host offload portion in an allocated DMA consistent buffer + * - Modifying the host portion function pointers according to info table + */ +static int +dhdpcie_hybridfw_download(struct dhd_bus *bus, char *fp) +{ + uint32 magic_num; + int ret = BCME_OK; + dhd_dma_buf_t *hstfw = &bus->hostfw_buf; + char *dnglfw = NULL, *dngltbl = NULL, *hsttbl = NULL; + int dnglfw_sz = 0, dngltbl_sz = 0, hsttbl_sz = 0; + int fsize; + int offset = 0; + uint32 type = 0, len = 0; + void * ptr = NULL; + + fsize = dhd_os_get_image_size(fp); + + /* Verify magic number */ + if (fsize < sizeof(magic_num)) { + return BCME_UNSUPPORTED; + } + ret = dhd_os_get_image_block((char *)&magic_num, sizeof(magic_num), fp); + if (ret <= 0) { + return BCME_ERROR; + } + magic_num = ltoh32(magic_num); + if (magic_num != PCIE_HYBRIDFW_MAGICNUM) { + return BCME_UNSUPPORTED; + } + fsize -= sizeof(magic_num); + + do { + ret = dhdpcie_hybridfw_get_next_block(fp, &fsize, &type, &len); + if (ret != BCME_OK) { + break; + } + + if (len == 0) { + continue; + } + + if ((ptr = MALLOC(bus->dhd->osh, len)) == NULL) { + ret = BCME_NOMEM; + break; + } + + len = dhd_os_get_image_block(ptr, len, fp); + if (len <= 0) { + MFREE(bus->dhd->osh, ptr, len); + ret = BCME_ERROR; + break; + } + fsize -= len; + + switch (type) { + case PCIE_HYBRIDFW_TYPE_DNGL: + /* cannot have more than one RAM image blocks */ + if (dnglfw_sz) { + MFREE(bus->dhd->osh, ptr, len); + ret = BCME_ERROR; + break; + } + + /* RAM portion of the FW image */ + dnglfw = ptr; + dnglfw_sz = len; + + if ((uint32)len > bus->ramsize) { + ret = BCME_BADLEN; + break; + } + break; + + case PCIE_HYBRIDFW_TYPE_HOST: + /* Host portion of FW image + * Check if a -hostmem- fw has already been loaded, if yes and + * the buffer can accommodate the new firmware host portion, + * reuse the allocated buffer + * For Insufficient size buffer or freshly loaded dhd, allocate + * a coherent buffer + */ + if (hstfw->va) { + if (hstfw->len >= len) { + hstfw->len = len; + } else { + DMA_FREE_CONSISTENT(bus->dhd->osh, hstfw->va, + hstfw->_alloced, hstfw->pa, hstfw->dmah); + memset(hstfw, 0, sizeof(*hstfw)); + } + } + + if (hstfw->va == NULL) { + hstfw->len = len; + hstfw->va = DMA_ALLOC_CONSISTENT(bus->dhd->osh, hstfw->len, + 4, &hstfw->_alloced, &hstfw->pa, &hstfw->dmah); + if (hstfw->va == NULL) { + MFREE(bus->dhd->osh, ptr, len); + ret = BCME_NOMEM; + break; + } + } + + ret = memcpy_s(hstfw->va, hstfw->len, ptr, len); + MFREE(bus->dhd->osh, ptr, len); + break; + + case PCIE_HYBRIDFW_TYPE_DNGLTBL: + /* cannot have more than one ram image relocation information */ + if (dngltbl) { + MFREE(bus->dhd->osh, ptr, len); + ret = BCME_ERROR; + break; + } + + /* RAM image relocation information */ + dngltbl = ptr; + dngltbl_sz = len; + + /* RAM image should be included before RAM reloc info */ + if ((dnglfw == NULL) || (hstfw->va == NULL)) { + ret = BCME_ERROR; + break; + } + /* Store the fw assumed host memory base */ + bus->hostfw_base = *(uint32 *)(dnglfw + *(uint32 *)dngltbl); + bus->hostfw_base &= PCIE_HYBRIDFW_HOSTOFFSET_MASK; + + DHD_INFO(("%s FW assumed host base address is %08x\n", + __FUNCTION__, bus->hostfw_base)); + + ret = dhdpcie_hybridfw_ptrrpl(dnglfw, dnglfw_sz, + (uint32 *)dngltbl, dngltbl_sz, hstfw->pa, hstfw->len); + break; + + case PCIE_HYBRIDFW_TYPE_HOSTTBL: + /* cannot have more than one host image relocation info */ + if (hsttbl) { + MFREE(bus->dhd->osh, ptr, len); + ret = BCME_ERROR; + break; + } + /* Host image relocation information */ + hsttbl = ptr; + hsttbl_sz = len; + + /* Host image should be included before host reloc info */ + if (hstfw->va == NULL) { + ret = BCME_ERROR; + break; + } + ret = dhdpcie_hybridfw_ptrrpl(hstfw->va, hstfw->len, + (uint32 *)hsttbl, hsttbl_sz, hstfw->pa, hstfw->len); + break; + + default: + ret = BCME_ERROR; + break; + } + + } while (!ret && (fsize > 0)); + + if (ret != BCME_OK) { + DHD_ERROR(("%s: err:%d, fsize:%d, t:%d, l:%d\n", + __FUNCTION__, ret, fsize, type, len)); + goto exit; + } + + if ((uint32*)dnglfw == NULL) { + DHD_ERROR(("%s: Dongle image should be present in combo file\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + + if (hstfw->va) { + OSL_CACHE_FLUSH((void *)hstfw->va, hstfw->len); + } + + /* for CR4/CA7 store the reset instruction to be written in 0 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + bus->resetinstr = *(((uint32*)dnglfw)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + + ret = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)dnglfw, dnglfw_sz); + if (ret) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, ret, dnglfw_sz, offset)); + goto exit; + } + + /* Configrue bptopcie register to allow ARM access the host offload area */ + bus->bp_base = PCIEDEV_ARM_ADDR(PHYSADDRLO(hstfw->pa), PCIEDEV_TRANS_WIN_HOSTMEM); + dhdpcie_sbtopcie_translation_config(bus, PCIEDEV_TRANS_WIN_HOSTMEM, hstfw->pa); + + /* Check if the buffer is crossing 32MB Window */ + if (((bus->bp_base + hstfw->len) & PCIEDEV_ARM_ADDR_SPACE) < + (bus->bp_base & PCIEDEV_ARM_ADDR_SPACE)) { + DHD_ERROR(("Host memomery crissing 32MB window." + " Entire hostmem block should be within continuous 32MB block")); + ret = BCME_ERROR; + goto exit; + } + + DHD_ERROR(("%s %d bytes host offload firmware placed at pa %08x %08x\n", + __FUNCTION__, hstfw->len, + (uint)PHYSADDRHI(hstfw->pa), (uint)PHYSADDRLO(hstfw->pa))); + +exit: + if (dnglfw) { + MFREE(bus->dhd->osh, dnglfw, dnglfw_sz); + } + + if (dngltbl) { + MFREE(bus->dhd->osh, dngltbl, dngltbl_sz); + } + + if (hsttbl) { + MFREE(bus->dhd->osh, hsttbl, hsttbl_sz); + } + + if (ret && hstfw->va) { + DMA_FREE_CONSISTENT(bus->dhd->osh, hstfw->va, hstfw->_alloced, + hstfw->pa, hstfw->dmah); + memset(hstfw, 0, sizeof(*hstfw)); + } + + return ret; +} +#endif /* BCMINTERNAL */ + +/** + * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD + * is updated with the event logging partitions within that file as well. + * + * @param pfw_path Path to .bin or .bea file + */ +static int +dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = BCME_ERROR; + int offset = 0; + int len = 0; + bool store_reset; + char *imgbuf = NULL; /**< XXX a file pointer, contradicting its name and type */ + uint8 *memblock = NULL, *memptr = NULL; +#ifdef CHECK_DOWNLOAD_FW + uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct +#endif + int offset_end = bus->ramsize; + uint32 file_size = 0, read_len = 0; + +#if defined(CACHE_FW_IMAGES) + int buf_offset, total_len, residual_len; + char * dnld_buf; +#endif /* CACHE_FW_IMAGE */ + +#if defined(linux) || defined(LINUX) +#if defined(DHD_FW_MEM_CORRUPTION) + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + dhd_tcm_test_enable = TRUE; + } else { + dhd_tcm_test_enable = FALSE; + } +#endif /* DHD_FW_MEM_CORRUPTION */ + DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable)); + /* TCM check */ + if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) { + DHD_ERROR(("dhd_bus_tcm_test failed\n")); + bcmerror = BCME_ERROR; + goto err; + } +#endif /* LINUX || linux */ +#ifndef DHD_EFI + DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); +#endif /* DHD_EFI */ + + /* Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + imgbuf = dhd_os_open_image1(bus->dhd, pfw_path); + if (imgbuf == NULL) { + printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path); + goto err; + } + + file_size = dhd_os_get_image_size(imgbuf); + if (!file_size) { + DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__)); + goto err; + } + +#ifdef BCMINTERNAL + /* dhdpcie_hybridfw_download return BCME_UNSUPPORTED if the binary + * doesn't have a recognizable format. Continue to previous routine + * in such case. Return and propagate the result for BCME_OK or + * other errors + */ + bcmerror = dhdpcie_hybridfw_download(bus, imgbuf); + if (bcmerror != BCME_UNSUPPORTED) { + goto err; + } + + /* Close and re-open the image file to reset the file pointer. + * Needed because dhdpcie_hybridfw_download() already read 4 bytes from the file. + */ + dhd_os_close_image1(bus->dhd, imgbuf); + imgbuf = dhd_os_open_image1(bus->dhd, pfw_path); + if (imgbuf == NULL) { + goto err; + } +#endif /* BCMINTERNAL */ + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + bcmerror = BCME_NOMEM; + goto err; + } +#ifdef CHECK_DOWNLOAD_FW + if (bus->dhd->conf->fwchk) { + memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memptr_tmp == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + } +#endif + if ((uint32)(uintptr)memblock % DHD_SDALIGN) { + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + } + + /* check if CR4/CA7 */ + store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)); +#if defined(CACHE_FW_IMAGES) + total_len = bus->ramsize; + dhd_os_close_image(imgbuf); + imgbuf = NULL; + buf_offset = 0; + bcmerror = dhd_get_download_buffer(bus->dhd, pfw_path, FW, &dnld_buf, &total_len); + if (bcmerror != BCME_OK) { + DHD_ERROR(("%s: dhd_get_download_buffer failed (%d)\n", __FUNCTION__, bcmerror)); + goto err; + } + residual_len = total_len; +#endif /* CACHE_FW_IMAGE */ + /* Download image with MEMBLOCK size */ +#if defined(CACHE_FW_IMAGES) + while (residual_len) +#else + /* Download image with MEMBLOCK size */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) +#endif /* CACHE_FW_IMAGE */ + { +#if defined(CACHE_FW_IMAGES) + len = MIN(residual_len, MEMBLOCK); + memcpy(memptr, dnld_buf + buf_offset, len); + residual_len -= len; + buf_offset += len; +#else + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } +#endif /* CACHE_FW_IMAGE */ + + read_len += len; + if (read_len > file_size) { + DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;" + " file_size=%u truncating len to %d \n", __FUNCTION__, + len, read_len, file_size, (len - (read_len - file_size)))); + len -= (read_len - file_size); + } + + /* if address is 0, store the reset instruction to be written in 0 */ + if (store_reset) { + ASSERT(offset == 0); + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + offset_end += offset; + store_reset = FALSE; + } + + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + +#ifdef CHECK_DOWNLOAD_FW + if (bus->dhd->conf->fwchk) { + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + if (memcmp(memptr_tmp, memptr, len)) { + DHD_ERROR(("%s: Downloaded image is corrupted at 0x%08x\n", __FUNCTION__, offset)); + bcmerror = BCME_ERROR; + goto err; + } else + DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + } +#endif + offset += MEMBLOCK; + + if (offset >= offset_end) { + DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", + __FUNCTION__, offset, offset_end)); + bcmerror = BCME_ERROR; + goto err; + } + + if (read_len >= file_size) { + break; + } + } +err: + if (memblock) { + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); +#ifdef CHECK_DOWNLOAD_FW + if (memptr_tmp) + MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); +#endif + } + + if (imgbuf) { + dhd_os_close_image1(bus->dhd, imgbuf); + } + + return bcmerror; +} /* dhdpcie_download_code_file */ + +#ifdef CUSTOMER_HW4_DEBUG +#define MIN_NVRAMVARS_SIZE 128 +#endif /* CUSTOMER_HW4_DEBUG */ + +static int +dhdpcie_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = BCME_ERROR; + uint len; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + bool nvram_uefi_exists = FALSE; + bool local_alloc = FALSE; + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* First try UEFI */ + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len); + + /* If UEFI empty, then read from file system */ + if ((len <= 0) || (memblock == NULL)) { + + if (nvram_file_exists) { + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len); + if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { + goto err; + } + } +#ifdef BCM_ROUTER_DHD + else if (bus->nvram_params_len) { + memblock = MALLOCZ(bus->dhd->osh, MAX_NVRAMBUF_SIZE); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + local_alloc = TRUE; + /* nvram is string with null terminated. cannot use strlen */ + len = bus->nvram_params_len; + ASSERT(len <= MAX_NVRAMBUF_SIZE); + memcpy(memblock, bus->nvram_params, len); + } +#endif /* BCM_ROUTER_DHD */ + else { + /* For SROM OTP no external file or UEFI required */ + bcmerror = BCME_OK; + } + } else { + nvram_uefi_exists = TRUE; + } + + DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); + + if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) { + bufp = (char *) memblock; + +#ifdef DHD_EFI + dhd_insert_random_mac_addr(bus->dhd, bufp, &len); + +#endif /* DHD_EFI */ + +#ifdef CACHE_FW_IMAGES + if (bus->processed_nvram_params_len) { + len = bus->processed_nvram_params_len; + } + + if (!bus->processed_nvram_params_len) { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + bus->processed_nvram_params_len = len; + } + } else +#else + { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + } + } +#endif /* CACHE_FW_IMAGES */ + + DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len)); +#ifdef CUSTOMER_HW4_DEBUG + if (len < MIN_NVRAMVARS_SIZE) { + DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto err; + } +#endif /* CUSTOMER_HW4_DEBUG */ + + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + +err: + if (memblock) { + if (local_alloc) { + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + } else { + dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE); + } + } + + return bcmerror; +} + +#if defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) + +#ifdef DLIMAGE_43602a1 +#define CHIPID_43602 BCM43602_CHIP_ID +#define CHIPID_43462 BCM43462_CHIP_ID +#define CHIPID_43522 BCM43522_CHIP_ID +#define CHIP_43602_CHIPREV_A0 0 +#define CHIP_43602_CHIPREV_A1 1 +#define CHIP_43602_PKG_OPT 1 +#endif + +#define CHIPID_NONE -1 + +struct fd_chip_image +{ + unsigned char *dlarray; + int dlimagesize; + char *dlimagename; + char *dlimagever; + char *dliamgedate; +} static chip_dl_image_array[] __initdata = +{ +#ifdef DLIMAGE_43602a1 + {dlarray_43602a1, sizeof(dlarray_43602a1), dlimagename_43602a1, + dlimagever_43602a1, dlimagedate_43602a1}, +#endif + /* {image attributes for other chips, only if image is compiled} */ +}; + +enum chip_image_rev +{ +#ifdef DLIMAGE_43602a1 + CHIP_43602_A1_CHIP_IMAGE, +#endif + /* index in the above array */ +}; + +struct chip_image_map +{ + uint32 chipid; + uint32 chiprev; + uint32 chippkg; + uint32 image_idx; +} static chip_image_index_map_table [] __initdata = +{ +#ifdef DLIMAGE_43602a1 + {CHIPID_43602, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE}, + {CHIPID_43462, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE}, + {CHIPID_43522, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE}, +#endif + /* {for a given chipid, chiprev, chippkg, what is the index (the above enum) */ + {CHIPID_NONE, 0, 0, 0} /* CHIPID_NONE is -1, used to mark end of list */ +}; + +static void __init select_fd_image( + struct dhd_bus *bus, unsigned char **p_dlarray, + char **p_dlimagename, char **p_dlimagever, + char **p_dlimagedate, int *image_size) { + + uint32 chipid, chiprev, chippkg_opt; + int image_index; + struct chip_image_map *p_image_index; + + chipid = 0; + image_index = -1; + p_image_index = &chip_image_index_map_table[0]; + while (chipid != CHIPID_NONE) { + chipid = p_image_index->chipid; + chiprev = p_image_index->chiprev; + chippkg_opt = p_image_index->chippkg; + + if ((chipid == bus->sih->chip) && (chiprev == bus->sih->chiprev) && + (chippkg_opt == bus->sih->chippkg)) { + image_index = p_image_index->image_idx; + break; + } + p_image_index++; + } + + if (image_index != -1) { + *p_dlarray = chip_dl_image_array[image_index].dlarray; + *p_dlimagename = chip_dl_image_array[image_index].dlimagename; + *p_dlimagever = chip_dl_image_array[image_index].dlimagever; + *p_dlimagedate = chip_dl_image_array[image_index].dliamgedate; + *image_size = chip_dl_image_array[image_index].dlimagesize; + } else { + *p_dlarray = 0; + DHD_ERROR(("####################################################################\n" + "# %s: Dongle image not available for chipid = 0x%x" + " chiprev = %d chippkg = %d\n" + "####################################################################\n", + __FUNCTION__, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg)); + } +} +#endif /* defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) */ + +#ifdef BCMEMBEDIMAGE +int +dhdpcie_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *p_dlarray = NULL; + unsigned int dlarray_size = 0; + unsigned int downloded_len, remaining_len, len; + char *p_dlimagename, *p_dlimagever, *p_dlimagedate; + uint8 *memblock = NULL, *memptr; + + downloded_len = 0; + remaining_len = 0; + len = 0; + +#ifdef DHD_EFI + p_dlarray = rtecdc_fw_arr; + dlarray_size = sizeof(rtecdc_fw_arr); +#else +#ifndef BCM_ROUTER_DHD + p_dlarray = dlarray; + dlarray_size = sizeof(dlarray); + p_dlimagename = dlimagename; + p_dlimagever = dlimagever; + p_dlimagedate = dlimagedate; +#else + select_fd_image(bus, &p_dlarray, &p_dlimagename, &p_dlimagever, + &p_dlimagedate, &dlarray_size); +#endif /* endif for BCM_ROUTER_DHD */ +#endif /* DHD_EFI */ + +#ifndef DHD_EFI + if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) || + (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0)) + goto err; +#endif /* DHD_EFI */ + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + while (downloded_len < dlarray_size) { + remaining_len = dlarray_size - downloded_len; + if (remaining_len >= MEMBLOCK) + len = MEMBLOCK; + else + len = remaining_len; + + memcpy(memptr, (p_dlarray + downloded_len), len); + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + downloded_len += len; + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + offset += MEMBLOCK; + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + unsigned char *ularray = NULL; + unsigned int uploded_len; + uploded_len = 0; + bcmerror = -1; + ularray = MALLOC(bus->dhd->osh, dlarray_size); + if (ularray == NULL) + goto upload_err; + /* Upload image to verify downloaded contents. */ + offset = bus->dongle_ram_base; + memset(ularray, 0xaa, dlarray_size); + while (uploded_len < dlarray_size) { + remaining_len = dlarray_size - uploded_len; + if (remaining_len >= MEMBLOCK) + len = MEMBLOCK; + else + len = remaining_len; + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, + (uint8 *)(ularray + uploded_len), len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto upload_err; + } + + uploded_len += len; + offset += MEMBLOCK; + } +#ifdef DHD_EFI + if (memcmp(p_dlarray, ularray, dlarray_size)) { + DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__)); + goto upload_err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__)); +#else + if (memcmp(p_dlarray, ularray, dlarray_size)) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); + goto upload_err; + + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); +#endif /* DHD_EFI */ + +upload_err: + if (ularray) + MFREE(bus->dhd->osh, ularray, dlarray_size); + } +#endif /* DHD_DEBUG */ +err: + + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + return bcmerror; +} /* dhdpcie_download_code_array */ +#endif /* BCMEMBEDIMAGE */ + +#ifdef BCM_ROUTER_DHD +static int +_dhdpcie_get_nvram_params(struct dhd_bus *bus) +{ + int nvram_len = MAX_NVRAMBUF_SIZE; + int tmp_nvram_len, boardrev_str_len; + char *boardrev_str; + char *boardtype_str; + char *ptr; + + bus->nvram_params = MALLOC(bus->dhd->osh, nvram_len); + if (!bus->nvram_params) { + DHD_ERROR(("%s: fail to get nvram buffer to download.\n", __FUNCTION__)); + return -1; + } + + bus->nvram_params[0] = 0; + ptr = bus->nvram_params; + /* + * For full dongle router platforms, we would have two dhd instances running, + * serving two radios, one for 5G and another for 2G. But, both dongle instances + * would come up as wl0, as one is not aware of the other. In order to avoid + * this situation, we pass the dhd instance number through nvram parameter + * wlunit=0 and wlunit=1 to the dongle and make sure the two dongle instances + * come up as wl0 and wl1. + */ + + tmp_nvram_len = strlen("wlunit=xx\n\n") + 1; + tmp_nvram_len = + snprintf(ptr, tmp_nvram_len, "wlunit=%d", dhd_get_instance(bus->dhd)); + ptr += (tmp_nvram_len + 1); /* leave NULL */ + tmp_nvram_len++; + + if ((boardrev_str = si_getdevpathvar(bus->sih, "boardrev")) == NULL) + boardrev_str = nvram_get("boardrev"); + + boardrev_str_len = strlen("boardrev=0xXXXX") + 1; + boardrev_str_len = snprintf(ptr, boardrev_str_len, "boardrev=%s", + boardrev_str? boardrev_str : BOARDREV_PROMOTABLE_STR); + ptr += (boardrev_str_len + 1); /* leave NULL */ + tmp_nvram_len += (boardrev_str_len + 1); + + /* If per device boardtype is not available, use global boardtype */ + if ((boardtype_str = si_getdevpathvar(bus->sih, "boardtype")) == NULL) { + if ((boardtype_str = nvram_get("boardtype")) != NULL) { + int boardtype_str_len = 0; + + boardtype_str_len = strlen("boardtype=0xXXXX") + 1; + boardtype_str_len = snprintf(ptr, boardtype_str_len, + "boardtype=%s", boardtype_str); + ptr += (boardtype_str_len + 1); /* leave NULL */ + tmp_nvram_len += (boardtype_str_len + 1); + } + } + + if (dbushost_initvars_flash(bus->sih, + bus->osh, &ptr, + (nvram_len - tmp_nvram_len)) != 0) { + DHD_ERROR(("%s: fail to read nvram from flash.\n", __FUNCTION__)); + } + + tmp_nvram_len = (int)(ptr - bus->nvram_params); + + bcopy(STR_END, ptr, sizeof(STR_END)); + tmp_nvram_len += sizeof(STR_END); + bus->nvram_params_len = tmp_nvram_len; + return 0; +} + +static void +_dhdpcie_free_nvram_params(struct dhd_bus *bus) +{ + if (bus->nvram_params) { + MFREE(bus->dhd->osh, bus->nvram_params, MAX_NVRAMBUF_SIZE); + } +} + +/** Handler to send a signal to the dhdmonitor process to notify of firmware traps */ +void +dhdpcie_handle_dongle_trap(struct dhd_bus *bus) +{ + char *failed_if; + + /* Call the bus module watchdog */ + dhd_bus_watchdog(bus->dhd); + + /* Get the failed interface name to be later used by + * dhd_monitor to capture the required logs + */ + failed_if = dhd_ifname(bus->dhd, 0); + dhd_schedule_trap_log_dump(bus->dhd, (uint8 *)failed_if, strlen(failed_if)); +} + +#endif /* BCM_ROUTER_DHD */ + +/** + * Downloads firmware file given by 'bus->fw_path' into PCIe dongle + * + * BCMEMBEDIMAGE specific: + * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header + * file will be used instead. + * + */ +static int +_dhdpcie_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); + return 0; +#endif + } +#ifdef BCM_ROUTER_DHD + if (_dhdpcie_get_nvram_params(bus) < 0) { + DHD_ERROR(("%s: fail to get nvram from system.\n", __FUNCTION__)); + return 0; + } +#endif + /* Keep arm in reset */ + if (dhdpcie_bus_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdpcie_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__, + __LINE__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } else { + embed = FALSE; + dlok = TRUE; + } + } + +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdpcie_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } else { + dlok = TRUE; + } + } +#else + BCM_REFERENCE(embed); +#endif + if (!dlok) { + DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__)); + goto err; + } + + /* EXAMPLE: nvram_array */ + /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ + /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ + + /* External nvram takes precedence if specified */ + if (dhdpcie_download_nvram(bus)) { + DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdpcie_bus_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: +#ifdef BCM_ROUTER_DHD + _dhdpcie_free_nvram_params(bus); +#endif /* BCM_ROUTER_DHD */ + return bcmerror; +} /* _dhdpcie_download_firmware */ + +static int +dhdpcie_bus_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + uint readlen = 0; + uint i = 0; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return -1; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + DHD_PCIE_INFO(("conlog: bufsize=0x%x\n", c->bufsize)); + } + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + DHD_PCIE_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf, + idx, c->last)); + + /* Read the console buffer data to a local buffer + * optimize and read only the portion of the buffer needed, but + * important to handle wrap-around. Read ptr is 'c->last', + * write ptr is 'idx' + */ + addr = ltoh32(c->log.buf); + + /* wrap around case - write ptr < read ptr */ + if (idx < c->last) { + /* from read ptr to end of buffer */ + readlen = c->bufsize - c->last; + if ((rv = dhdpcie_bus_membytes(bus, FALSE, + addr + c->last, c->buf, readlen)) < 0) { + DHD_ERROR(("conlog: read error[1] ! \n")); + return rv; + } + /* from beginning of buffer to write ptr */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, + addr, c->buf + readlen, + idx)) < 0) { + DHD_ERROR(("conlog: read error[2] ! \n")); + return rv; + } + readlen += idx; + } else { + /* non-wraparound case, write ptr > read ptr */ + readlen = (uint)idx - c->last; + if ((rv = dhdpcie_bus_membytes(bus, FALSE, + addr + c->last, c->buf, readlen)) < 0) { + DHD_ERROR(("conlog: read error[3] ! \n")); + return rv; + } + } + /* update read ptr */ + c->last = idx; + + /* now output the read data from the local buffer to the host console */ + while (i < readlen) { + for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) { + ch = c->buf[i]; + ++i; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); + } + } + + return BCME_OK; + +} /* dhdpcie_bus_readconsole */ + +void +dhd_bus_dump_console_buffer(dhd_bus_t *bus) +{ + uint32 n, i; + uint32 addr; + char *console_buffer = NULL; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + int rv; + + DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__)); + return; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { + goto exit; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) { + goto exit; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) { + goto exit; + } + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { + goto exit; + } + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) { + goto exit; + } + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + printf("CONSOLE: %s\n", line); + } + } + +exit: + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + return; +} + +static void +dhdpcie_schedule_log_dump(dhd_bus_t *bus) +{ +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP) + log_dump_type_t *flush_type; + + /* flush_type is freed at do_dhd_log_dump function */ + flush_type = MALLOCZ(bus->dhd->osh, sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + dhd_schedule_log_dump(bus->dhd, flush_type); + } else { + DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__)); + } +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */ +} + +/** + * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file. + * + * @return BCME_OK on success + */ +static int +dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + pciedev_shared_t *local_pciedev_shared = bus->pcie_sh; + struct bcmstrbuf strbuf; + unsigned long flags; + bool dongle_trap_occured = FALSE; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) { + return 0; + } + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done2; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done2; + } + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + if ((bcmerror = dhdpcie_readshared(bus)) < 0) { + goto done1; + } + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr); + + if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (bus->pcie_sh->assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done1; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (bus->pcie_sh->assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done1; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line); + } + + if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { + trap_t *tr = &bus->dhd->last_trap_info; + dongle_trap_occured = TRUE; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) { + bus->dhd->dongle_trap_occured = TRUE; + goto done1; + } + dhd_bus_dump_trap_info(bus, &strbuf); + } + } + + if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { + printf("%s: %s\n", __FUNCTION__, strbuf.origbuf); +#ifdef REPORT_FATAL_TIMEOUTS + /** + * stop the timers as FW trapped + */ + if (dhd_stop_scan_timer(bus->dhd, FALSE, 0)) { + DHD_ERROR(("dhd_stop_scan_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_bus_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_bus_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_cmd_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_cmd_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_join_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_join_timer failed\n")); + ASSERT(0); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + + dhd_bus_dump_console_buffer(bus); + dhd_prot_debug_info_print(bus->dhd); + +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (bus->dhd->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__)); + bus->dhd->collect_sssr = TRUE; +#endif /* DHD_SSSR_DUMP */ +#ifdef DHD_SDTC_ETB_DUMP + DHD_ERROR(("%s : Set collect_sdtc as TRUE\n", __FUNCTION__)); + bus->dhd->collect_sdtc = TRUE; +#endif /* DHD_SDTC_ETB_DUMP */ + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + + /* set the trap occured flag only after all the memdump, + * logdump and sssr dump collection has been scheduled + */ + if (dongle_trap_occured) { + bus->dhd->dongle_trap_occured = TRUE; + if (bus->dhd->check_trap_rot && + bus->dhd->ext_trap_data_supported && + bus->pcie_sh->flags2 & PCIE_SHARED2_ETD_ADDR_SUPPORT) { + uint32 trap_data = *(uint32 *)bus->dhd->extended_trap_data; + DHD_ERROR(("%s : etd data : %x\n", __FUNCTION__, trap_data)); + if (!(trap_data & D2H_DEV_EXT_TRAP_DATA)) { + uint32 *ext_data = bus->dhd->extended_trap_data; + /* Skip the first word which is trap_data */ + ext_data++; + DHD_ERROR(("Dongle trap but no etd\n")); + if (dhdpcie_bus_membytes(bus, FALSE, + local_pciedev_shared->etd_addr, + (uint8 *)ext_data, + BCMPCIE_EXT_TRAP_DATA_MAXLEN - + sizeof(trap_data)) < 0) { + DHD_ERROR(("Error to read etd from dongle\n")); + } + } else { + DHD_ERROR(("Dongle trap with etd\n")); + } + } + + } + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + copy_hang_info_trap(bus->dhd); +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + + dhd_schedule_reset(bus->dhd); + +#ifdef NDIS + /* ASSERT only if hang detection/recovery is disabled. If enabled then let + * windows HDR mechansim trigger FW download via surprise removal + */ + dhd_bus_check_died(bus); +#endif + + } + +done1: + if (bcmerror) { + /* dhdpcie_checkdied is invoked only when dongle has trapped + * or after PCIe link down..etc. so set dongle_trap_occured so that + * log_dump logic can rely on only one flag dongle_trap_occured. + */ + bus->dhd->dongle_trap_occured = TRUE; + dhdpcie_schedule_log_dump(bus); + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +done2: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + + return bcmerror; +} /* dhdpcie_checkdied */ + +/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */ +void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *databuf = buf; + + if (bus == NULL) { + return; + } + + start = bus->dongle_ram_base; + read_size = 4; + /* check for dead bus */ + { + uint test_word = 0; + ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size); + /* if read error or bus timeout */ + if (ret || (test_word == 0xFFFFFFFF)) { + return; + } + } + + /* Get full mem size */ + size = bus->ramsize; + /* Read mem content */ + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { + return; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + bus->dhd->soc_ram = buf; + bus->dhd->soc_ram_length = bus->ramsize; + return; +} + +#if defined(DHD_FW_COREDUMP) +static int +dhdpcie_get_mem_dump(dhd_bus_t *bus) +{ + int ret = BCME_OK; + int size = 0; + int start = 0; + int read_size = 0; /* Read size of each iteration */ + uint8 *p_buf = NULL, *databuf = NULL; +#ifdef BOARD_HIKEY + unsigned long flags_bus; +#endif /* BOARD_HIKEY */ + + if (!bus) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!bus->dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + size = bus->ramsize; /* Full mem size */ + start = bus->dongle_ram_base; /* Start address */ + + /* Get full mem size */ + p_buf = dhd_get_fwdump_buf(bus->dhd, size); + if (!p_buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", + __FUNCTION__, size)); + return BCME_ERROR; + } + + /* Read mem content */ + DHD_TRACE_HW4(("Dump dongle memory\n")); + databuf = p_buf; + + while (size > 0) { + read_size = MIN(MEMBLOCK, size); +#ifdef BOARD_HIKEY + /* Hold BUS_LP_STATE_LOCK to avoid simultaneous bus access */ + DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); +#endif /* BOARD_HIKEY */ + ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size); +#ifdef BOARD_HIKEY + DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); +#endif /* BOARD_HIKEY */ + if (ret) { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); +#ifdef DHD_DEBUG_UART + bus->dhd->memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + break; + } + DHD_TRACE((".")); + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + return ret; +} + +static int +dhdpcie_mem_dump(dhd_bus_t *bus) +{ + dhd_pub_t *dhdp; + int ret; + uint32 dhd_console_ms_prev = 0; + + dhdp = bus->dhd; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd_console_ms_prev = dhdp->dhd_console_ms; + if (dhd_console_ms_prev) { + DHD_ERROR(("%s: Disabling console msgs(0x%d) before mem dump to local buf\n", + __FUNCTION__, dhd_console_ms_prev)); + dhdp->dhd_console_ms = 0; + } +#ifdef EXYNOS_PCIE_DEBUG + exynos_pcie_register_dump(1); +#endif /* EXYNOS_PCIE_DEBUG */ + +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__)); + /* panic only for DUMP_MEMFILE_BUGON */ + ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON); + ret = BCME_ERROR; + goto exit; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + + /* Induce DB7 trap for below non-trap cases */ + switch (dhdp->memdump_type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + /* intentional fall through */ + case DUMP_TYPE_D3_ACK_TIMEOUT: + /* intentional fall through */ + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + /* intentional fall through */ + case DUMP_TYPE_PKTID_INVALID: + /* intentional fall through */ + case DUMP_TYPE_SCAN_TIMEOUT: + /* intentional fall through */ + case DUMP_TYPE_SCAN_BUSY: + /* intentional fall through */ + case DUMP_TYPE_BY_LIVELOCK: + /* intentional fall through */ + case DUMP_TYPE_IFACE_OP_FAILURE: + /* intentional fall through */ + case DUMP_TYPE_PKTID_POOL_DEPLETED: + /* intentional fall through */ + case DUMP_TYPE_ESCAN_SYNCID_MISMATCH: + /* intentional fall through */ + case DUMP_TYPE_INVALID_SHINFO_NRFRAGS: + if (dhdp->db7_trap.fw_db7w_trap) { + /* Set fw_db7w_trap_inprogress here and clear from DPC */ + dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE; + dhdpcie_fw_trap(dhdp->bus); + OSL_DELAY(100 * 1000); // wait 100 msec + } else { + DHD_ERROR(("%s: DB7 Not supported!!!\n", + __FUNCTION__)); + } + break; + default: + break; + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0) + return BCME_ERROR; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + ret = dhdpcie_get_mem_dump(bus); + if (ret) { + DHD_ERROR(("%s: failed to get mem dump, err=%d\n", + __FUNCTION__, ret)); + goto exit; + } +#ifdef DHD_DEBUG_UART + bus->dhd->memdump_success = TRUE; +#endif /* DHD_DEBUG_UART */ + +#ifdef BCMINTERNAL + /* TODO: for host offload firmware, need to modify the stack and pc/lr to point it back to + * the original offset so gdb can match with symbol files + */ +#endif + + dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); + /* buf, actually soc_ram free handled in dhd_{free,clear} */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +exit: + if (dhd_console_ms_prev) { + DHD_ERROR(("%s: enable console msgs(0x%d) after collecting memdump to local buf\n", + __FUNCTION__, dhd_console_ms_prev)); + dhdp->dhd_console_ms = dhd_console_ms_prev; + } + return ret; +} + +int +dhd_bus_get_mem_dump(dhd_pub_t *dhdp) +{ + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + return dhdpcie_get_mem_dump(dhdp->bus); +} + +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = BCME_ERROR; + + if (dhdp->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s bus is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Try to resume if already suspended or suspend in progress */ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* Skip if still in suspended or suspend in progress */ + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + return BCME_ERROR; + } + + DHD_OS_WAKE_LOCK(dhdp); + ret = dhdpcie_mem_dump(bus); + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t *bus) +{ +#if defined(DHD_FW_COREDUMP) + DHD_OS_WAKE_LOCK(bus->dhd); + dhd_bus_mem_dump(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + return 0; +#else + return -1; +#endif +} + +/** + * Transfers bytes from host to dongle using pio mode. + * Parameter 'address' is a backplane address. + */ +static int +dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size) +{ + uint dsize; + int detect_endian_flag = 0x01; + bool little_endian; + + if (write && bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + /* Detect endianness. */ + little_endian = *(char *)&detect_endian_flag; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + + /* Determine initial transfer parameters */ +#ifdef DHD_SUPPORT_64BIT + dsize = sizeof(uint64); +#else /* !DHD_SUPPORT_64BIT */ + dsize = sizeof(uint32); +#endif /* DHD_SUPPORT_64BIT */ + + /* Do the transfer(s) */ + DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n", + __FUNCTION__, (write ? "write" : "read"), size, address)); + if (write) { + while (size) { +#ifdef DHD_SUPPORT_64BIT + if (size >= sizeof(uint64) && little_endian && !(address % 8)) { + dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data)); + } +#else /* !DHD_SUPPORT_64BIT */ + if (size >= sizeof(uint32) && little_endian && !(address % 4)) { + dhdpcie_bus_wtcm32(bus, address, *((uint32*)data)); + } +#endif /* DHD_SUPPORT_64BIT */ + else { + dsize = sizeof(uint8); + dhdpcie_bus_wtcm8(bus, address, *data); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + } + } + } else { + while (size) { +#ifdef DHD_SUPPORT_64BIT + if (size >= sizeof(uint64) && little_endian && !(address % 8)) + { + *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address); + } +#else /* !DHD_SUPPORT_64BIT */ + if (size >= sizeof(uint32) && little_endian && !(address % 4)) + { + *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address); + } +#endif /* DHD_SUPPORT_64BIT */ + else { + dsize = sizeof(uint8); + *data = dhdpcie_bus_rtcm8(bus, address); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize) > 0) { + data += dsize; + address += dsize; + } + } + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return BCME_OK; +} /* dhdpcie_bus_membytes */ + +extern bool agg_h2d_db_enab; +/** + * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue + * to the (non flow controlled) flow ring. + */ +int +BCMFASTPATH(dhd_bus_schedule_queue)(struct dhd_bus *bus, uint16 flow_id, bool txs) +/** XXX function name could be more descriptive, eg use 'tx' and 'flow ring' in name */ +{ + flow_ring_node_t *flow_ring_node; + int ret = BCME_OK; +#ifdef DHD_LOSSLESS_ROAMING + dhd_pub_t *dhdp = bus->dhd; +#endif + + DHD_PCIE_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); + + /* ASSERT on flow_id */ + if (flow_id >= bus->max_submission_rings) { + DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, + flow_id, bus->max_submission_rings)); + return 0; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id); + + if (flow_ring_node->prot_info == NULL) { + DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__)); + return BCME_NOTREADY; + } + +#ifdef DHD_LOSSLESS_ROAMING + if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) { + DHD_ERROR_RLMT(("%s: roam in progress, tid %d is not in precedence map 0x%x." + " block scheduling\n", + __FUNCTION__, flow_ring_node->flow_info.tid, dhdp->dequeue_prec_map)); + return BCME_OK; + } +#endif /* DHD_LOSSLESS_ROAMING */ + + { + unsigned long flags; + void *txp = NULL; + flow_queue_t *queue; +#ifdef TPUT_MONITOR + int pktlen; +#endif + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return BCME_NOTREADY; + } + + while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + if (bus->dhd->conf->orphan_move <= 1) + PKTORPHAN(txp, bus->dhd->conf->tsq); + + /* + * Modifying the packet length caused P2P cert failures. + * Specifically on test cases where a packet of size 52 bytes + * was injected, the sniffer capture showed 62 bytes because of + * which the cert tests failed. So making the below change + * only Router specific. + */ +#if defined(BCM_ROUTER_DHD) + if (PKTLEN(bus->dhd->osh, txp) < (ETHER_MIN_LEN - ETHER_CRC_LEN)) { + PKTSETLEN(bus->dhd->osh, txp, (ETHER_MIN_LEN - ETHER_CRC_LEN)); + } +#endif /* BCM_ROUTER_DHD */ + +#ifdef DHDTCPACK_SUPPRESS + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { + ret = dhd_tcpack_check_xmit(bus->dhd, txp); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n", + __FUNCTION__)); + } + } +#endif /* DHDTCPACK_SUPPRESS */ + /* Attempt to transfer packet over flow ring */ + /* XXX: ifidx is wrong */ +#ifdef TPUT_MONITOR + pktlen = PKTLEN(OSH_NULL, txp); + if ((bus->dhd->conf->data_drop_mode == TXPKT_DROP) && (pktlen > 500)) + ret = BCME_OK; + else +#endif + ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex); + if (ret != BCME_OK) { /* may not have resources in flow ring */ + DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret)); +#ifdef AGG_H2D_DB + if (agg_h2d_db_enab) { + dhd_prot_schedule_aggregate_h2d_db(bus->dhd, flow_id); + } else +#endif /* AGG_H2D_DB */ + { + dhd_prot_txdata_write_flush(bus->dhd, flow_id); + } + /* reinsert at head */ + dhd_flow_queue_reinsert(bus->dhd, queue, txp); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* If we are able to requeue back, return success */ + return BCME_OK; + } + +#ifdef DHD_MEM_STATS + DHD_MEM_STATS_LOCK(bus->dhd->mem_stats_lock, flags); + bus->dhd->txpath_mem += PKTLEN(bus->dhd->osh, txp); + DHD_PCIE_INFO(("%s txpath_mem: %llu PKTLEN: %d\n", + __FUNCTION__, bus->dhd->txpath_mem, PKTLEN(bus->dhd->osh, txp))); + DHD_MEM_STATS_UNLOCK(bus->dhd->mem_stats_lock, flags); +#endif /* DHD_MEM_STATS */ + } + +#ifdef DHD_HP2P + if (!flow_ring_node->hp2p_ring) +#endif /* DHD_HP2P */ + { +#ifdef AGG_H2D_DB + if (agg_h2d_db_enab) { + dhd_prot_schedule_aggregate_h2d_db(bus->dhd, flow_id); + } else +#endif /* AGG_H2D_DB */ + { + dhd_prot_txdata_write_flush(bus->dhd, flow_id); + } + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; +} /* dhd_bus_schedule_queue */ + +/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */ +int +BCMFASTPATH(dhd_bus_txdata)(struct dhd_bus *bus, void *txp, uint8 ifidx) +{ + uint16 flowid; +#ifdef IDLE_TX_FLOW_MGMT + uint8 node_status; +#endif /* IDLE_TX_FLOW_MGMT */ + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + int ret = BCME_OK; + void *txp_pend = NULL; +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + void *ntxp = NULL; + uint8 prio = PKTPRIO(txp); +#endif + + if (!bus->dhd->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + goto toss; + } + + flowid = DHD_PKT_GET_FLOWID(txp); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + + DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if ((flowid > bus->dhd->max_tx_flowid) || +#ifdef IDLE_TX_FLOW_MGMT + (!flow_ring_node->active)) +#else + (!flow_ring_node->active) || + (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || + (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) +#endif /* IDLE_TX_FLOW_MGMT */ + { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + ret = BCME_ERROR; + goto toss; + } + +#ifdef IDLE_TX_FLOW_MGMT + node_status = flow_ring_node->status; + + /* handle diffrent status states here!! */ + switch (node_status) + { + case FLOW_RING_STATUS_OPEN: + + if (bus->enable_idle_flowring_mgmt) { + /* Move the node to the head of active list */ + dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node); + } + break; + + case FLOW_RING_STATUS_SUSPENDED: + DHD_INFO(("Need to Initiate TX Flow resume\n")); + /* Issue resume_ring request */ + dhd_bus_flow_ring_resume_request(bus, + flow_ring_node); + break; + + case FLOW_RING_STATUS_CREATE_PENDING: + case FLOW_RING_STATUS_RESUME_PENDING: + /* Dont do anything here!! */ + DHD_INFO(("Waiting for Flow create/resume! status is %u\n", + node_status)); + break; + + case FLOW_RING_STATUS_DELETE_PENDING: + default: + DHD_ERROR(("Dropping packet!! flowid %u status is %u\n", + flowid, node_status)); + /* error here!! */ + ret = BCME_ERROR; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + goto toss; + } + /* Now queue the packet */ +#endif /* IDLE_TX_FLOW_MGMT */ + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + FOREACH_CHAINED_PKT(txp, ntxp) { + /* Tag the packet with flowid - Remember, only the head packet */ + /* of the chain has been tagged with the FlowID in dhd_sendpkt */ + /* Also set the priority */ + DHD_PKT_SET_FLOWID(txp, flowid); + PKTSETPRIO(txp, prio); + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) { + txp_pend = txp; + PKTSETCLINK((txp), ntxp); + break; + } + } +#else /* !(defined(BCM_ROUTER_DHD) && defined(HNDCTF)) */ + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) + txp_pend = txp; +#endif /* defined(BCM_ROUTER_DHD) && defined(HNDCTF */ + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status) { + DHD_PCIE_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + if (txp_pend) { + txp = txp_pend; + goto toss; + } + return BCME_OK; + } + ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + /* If we have anything pending, try to push into q */ + if (txp_pend) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + +#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) + FOREACH_CHAINED_PKT(txp_pend, ntxp) { + /* Tag the packet with flowid and set packet priority */ + DHD_PKT_SET_FLOWID(txp_pend, flowid); + PKTSETPRIO(txp_pend, prio); + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) + != BCME_OK) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + PKTSETCLINK((txp_pend), ntxp); + txp = txp_pend; + goto toss; + } + } +#else /* !(defined(BCM_ROUTER_DHD) && defined(HNDCTF)) */ + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + txp = txp_pend; + goto toss; + } +#endif /* defined(BCM_ROUTER_DHD) && defined(HNDCTF) */ + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; + +toss: + DHD_PCIE_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); +#ifdef DHD_EFI + /* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt + * into the Tx done queue + */ + PKTCFREE(bus->dhd->osh, txp, FALSE); +#else + PKTCFREE(bus->dhd->osh, txp, TRUE); +#endif /* DHD_EFI */ + return ret; +} /* dhd_bus_txdata */ + +void +dhd_bus_stop_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +} + +void +dhd_bus_start_queue(struct dhd_bus *bus) +{ + /* + * Tx queue has been stopped due to resource shortage (or) + * bus is not in a state to turn on. + * + * Note that we try to re-start network interface only + * when we have enough resources, one has to first change the + * flag indicating we have all the resources. + */ + if (dhd_prot_check_tx_resource(bus->dhd)) { + DHD_ERROR(("%s: Interface NOT started, previously stopped " + "due to resource shortage\n", __FUNCTION__)); + return; + } + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +} + +/* Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhd->bus; + uint32 addr, val; + int rv; +#ifdef PCIE_INB_DW + unsigned long flags = 0; +#endif /* PCIE_INB_DW */ + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + return BCME_NOTREADY; + } + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + /* handle difference in definition of hnd_log_t in certain branches */ + if (dhd->wlc_ver_major < 14) { + addr -= (uint32)sizeof(uint32); + } + val = htol32(0); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + /* handle difference in definition of hnd_log_t in certain branches */ + if (dhd->wlc_ver_major < 14) { + addr -= sizeof(uint32); + } + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + +#ifdef PCIE_INB_DW + /* Use a lock to ensure this tx DEVICE_WAKE + tx H2D_HOST_CONS_INT sequence is + * mutually exclusive with the rx D2H_DEV_DS_ENTER_REQ + tx H2D_HOST_DS_ACK sequence. + */ + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); +#endif /* PCIE_INB_DW */ + + /* generate an interrupt to dongle to indicate that it needs to process cons command */ + dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT); + +#ifdef PCIE_INB_DW + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); +#endif /* PCIE_INB_DW */ +done: + return rv; +} /* dhd_bus_console_in */ + +/** + * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is + * contained in 'pkt'. Processes rx frame, forwards up the layer to netif. + */ +void +BCMFASTPATH(dhd_bus_rx_frame)(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) +{ + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); +} + +/* Aquire/Release bar1_switch_lock only if the chip supports bar1 switching */ +#define DHD_BUS_BAR1_SWITCH_LOCK(bus, flags) \ + ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_LOCK((bus)->bar1_switch_lock, flags) : \ + BCM_REFERENCE(flags) + +#define DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags) \ + ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_UNLOCK((bus)->bar1_switch_lock, flags) : \ + BCM_REFERENCE(flags) + +/* Init/Deinit bar1_switch_lock only if the chip supports bar1 switching */ +static void +dhd_init_bar1_switch_lock(dhd_bus_t *bus) +{ + if (bus->bar1_switch_enab && !bus->bar1_switch_lock) { + bus->bar1_switch_lock = osl_spin_lock_init(bus->osh); + } +} + +static void +dhd_deinit_bar1_switch_lock(dhd_bus_t *bus) +{ + if (bus->bar1_switch_enab && bus->bar1_switch_lock) { + osl_spin_lock_deinit(bus->osh, bus->bar1_switch_lock); + bus->bar1_switch_lock = NULL; + } +} + +/* + * The bpwindow for any address will be lower bound of multiples of bar1_size. + * For eg, if addr=0x938fff and bar1_size is 0x400000, then + * address will fall in the window of 0x800000-0xbfffff, so need + * to select bpwindow as 0x800000. + * To achieve this mask the LSB nibbles of bar1_size of the given addr. + */ +#define DHD_BUS_BAR1_BPWIN(addr, bar1_size) \ + (uint32)((addr) & ~((bar1_size) - 1)) + +/** + * dhdpcie_bar1_window_switch_enab + * + * Check if the chip requires BAR1 window switching based on + * dongle_ram_base, ramsize and mapped bar1_size and sets + * bus->bar1_switch_enab accordingly + * @bus: dhd bus context + * + */ +void +dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus) +{ + uint32 ramstart = bus->dongle_ram_base; + uint32 ramend = bus->dongle_ram_base + bus->ramsize - 1; + uint32 bpwinstart = DHD_BUS_BAR1_BPWIN(ramstart, bus->bar1_size); + uint32 bpwinend = DHD_BUS_BAR1_BPWIN(ramend, bus->bar1_size); + + bus->bar1_switch_enab = FALSE; + + /* + * Window switch is needed to access complete BAR1 + * if bpwinstart and bpwinend are different + */ + if (bpwinstart != bpwinend) { + bus->bar1_switch_enab = TRUE; + } + + DHD_ERROR(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n", + __FUNCTION__, bus->bar1_switch_enab, ramstart, ramend, bus->bar1_size)); +} + +/** + * dhdpcie_setbar1win + * + * os independendent function for setting bar1 window in order to allow + * also set current window positon. + * + * @bus: dhd bus context + * @addr: new backplane windows address for BAR1 + */ +static void +dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, addr); + bus->curr_bar1_win = addr; +} + +/** + * dhdpcie_bus_chkandshift_bpoffset + * + * Check the provided address is within the current BAR1 window, + * if not, shift the window + * + * @bus: dhd bus context + * @offset: back plane address that the caller wants to access + * + * Return: new offset for access + */ +static ulong +dhdpcie_bus_chkandshift_bpoffset(dhd_bus_t *bus, ulong offset) +{ + + uint32 bpwin; +#ifdef DHD_EFI + /* TODO: bar1_size is hardcoded for EFI. Below logic should be + * revisited. Also EFI platform should find bar1_size from + * EFI Kernel APIs + */ + if (!bus->bar1_switch_enab) { + return offset; + } +#endif /* DHD_EFI */ + /* Determine BAR1 backplane window using window size + * Window address mask should be ~(size - 1) + */ + bpwin = DHD_BUS_BAR1_BPWIN(offset, bus->bar1_size); + + if (bpwin != bus->curr_bar1_win) { + DHD_PCIE_INFO(("%s: move BAR1 window curr_bar1_win=0x%x bpwin=0x%x offset=0x%lx\n", + __FUNCTION__, bus->curr_bar1_win, bpwin, offset)); + /* Move BAR1 window */ + dhdpcie_setbar1win(bus, bpwin); + } + + return offset - bpwin; +} + +/** 'offset' is a backplane address */ +void +dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) +{ + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if defined(linux) || defined(LINUX) + W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data); +#else + *(volatile uint8 *)(bus->tcm + offset) = (uint8)data; +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); +} + +void +dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) +{ + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if defined(linux) || defined(LINUX) + W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data); +#else + *(volatile uint16 *)(bus->tcm + offset) = (uint16)data; +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); +} + +void +dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) +{ + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if defined(linux) || defined(LINUX) + W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data); +#else + *(volatile uint32 *)(bus->tcm + offset) = (uint32)data; +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); +} + +#ifdef DHD_SUPPORT_64BIT +void +dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) +{ + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if defined(linux) || defined(LINUX) + W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data); +#else + *(volatile uint64 *)(bus->tcm + offset) = (uint64)data; +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); +} +#endif /* DHD_SUPPORT_64BIT */ + +uint8 +dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) +{ + volatile uint8 data; + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint8)-1; + return data; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if defined(linux) || defined(LINUX) + data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset)); +#else + data = *(volatile uint8 *)(bus->tcm + offset); +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); + return data; +} + +uint16 +dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) +{ + volatile uint16 data; + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint16)-1; + return data; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if (defined(linux) || defined (LINUX)) + data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset)); +#else + data = *(volatile uint16 *)(bus->tcm + offset); +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); + return data; +} + +uint32 +dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) +{ + volatile uint32 data; + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint32)-1; + return data; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if (defined(linux) || defined (LINUX)) + data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset)); +#else + data = *(volatile uint32 *)(bus->tcm + offset); +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); + return data; +} + +#ifdef DHD_SUPPORT_64BIT +uint64 +dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) +{ + volatile uint64 data; + ulong flags = 0; + + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint64)-1; + return data; + } + + DHD_BUS_BAR1_SWITCH_LOCK(bus, flags); + + offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset); + +#if (defined(linux) || defined (LINUX)) + data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset)); +#else + data = *(volatile uint64 *)(bus->tcm + offset); +#endif /* linux || LINUX */ + + DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags); + return data; +} +#endif /* DHD_SUPPORT_64BIT */ + +void +dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(dhd_bus_t *bus, void *data, uint8 type, + uint16 ringid, bool read, bool req_pwr) +{ + ulong addr; + + if (type == RING_WR_UPD) { + addr = bus->ring_sh[ringid].ring_state_w; + } else if (type == RING_RD_UPD) { + addr = bus->ring_sh[ringid].ring_state_r; + } else { + DHD_ERROR(("%s: invalid type:%d\n", __FUNCTION__, type)); + return; + } + + if (req_pwr && MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + if (read) { + /* Read */ + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + } else { + /* Write */ + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + } + + if (req_pwr && MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +void +dhdpcie_update_ring_ptrs_in_tcm(dhd_bus_t *bus, void *data, uint8 type, uint16 ringid, + bool read) +{ +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + ulong flags_ds; + DHD_BUS_DONGLE_DS_LOCK(bus->dongle_ds_lock, flags_ds); + dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(bus, data, type, ringid, read, + bus->dongle_in_deepsleep); + DHD_BUS_DONGLE_DS_UNLOCK(bus->dongle_ds_lock, flags_ds); + } else +#endif /* PCIE_INB_DW */ + { + /* Request power explicitly */ + dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(bus, data, type, ringid, read, TRUE); + } +} + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) +{ + uint64 long_data; + ulong addr; /* dongle address */ + + DHD_PCIE_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + /* + * Use explicit tcm ring ptr update functions when DMA indices are not enabled to + * as backplane power request calls are causing TPUT drops + */ + if (!(bus->dhd->dma_d2h_ring_upd_support || bus->dhd->dma_h2d_ring_upd_support)) { + if ((type == RING_WR_UPD) || (type == RING_RD_UPD)) { + dhdpcie_update_ring_ptrs_in_tcm(bus, data, type, ringid, FALSE); + return; + } + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + switch (type) { + case RING_WR_UPD : + addr = bus->ring_sh[ringid].ring_state_w; + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_RD_UPD : + addr = bus->ring_sh[ringid].ring_state_r; + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case D2H_DMA_SCRATCH_BUF: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer); + long_data = HTOL64(*(uint64 *)data); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_DMA_SCRATCH_BUF_LEN : + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len); + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case H2D_DMA_INDX_WR_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case H2D_DMA_INDX_RD_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_DMA_INDX_WR_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_DMA_INDX_RD_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case H2D_IFRM_INDX_WR_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case RING_ITEM_LEN : + addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items); + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_MAX_ITEMS : + addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item); + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_BUF_ADDR : + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_MB_DATA: + addr = bus->d2h_mb_data_ptr_addr; + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + break; + + case H2D_MB_DATA: + addr = bus->h2d_mb_data_ptr_addr; + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + break; + + case HOST_API_VERSION: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap); + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + break; + + case DNGL_TO_HOST_TRAP_ADDR: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len); + DHD_PCIE_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data))); + break; + +#ifdef D2H_MINIDUMP + case DNGL_TO_HOST_TRAP_ADDR_LEN: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, device_trap_debug_buffer_len); + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; +#endif /* D2H_MINIDUMP */ + + case HOST_SCB_ADDR: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr); +#ifdef DHD_SUPPORT_64BIT + dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data)); +#else /* !DHD_SUPPORT_64BIT */ + dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data)); +#endif /* DHD_SUPPORT_64BIT */ + DHD_PCIE_INFO(("Wrote host_scb_addr:0x%x\n", + (uint32) HTOL32(*(uint32 *)data))); + break; + + default: + break; + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} /* dhd_bus_cmn_writeshared */ + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) +{ + ulong addr; /* dongle address */ + + /* + * Use explicit tcm ring ptr update functions when DMA indices are not enabled to + * as backplane power request calls are causing TPUT drops + */ + if (!(bus->dhd->dma_d2h_ring_upd_support || bus->dhd->dma_h2d_ring_upd_support)) { + if ((type == RING_WR_UPD) || (type == RING_RD_UPD)) { + dhdpcie_update_ring_ptrs_in_tcm(bus, data, type, ringid, TRUE); + return; + } + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + switch (type) { + case RING_WR_UPD : + addr = bus->ring_sh[ringid].ring_state_w; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case RING_RD_UPD : + addr = bus->ring_sh[ringid].ring_state_r; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case TOTAL_LFRAG_PACKET_CNT : + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt); + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case H2D_MB_DATA: + addr = bus->h2d_mb_data_ptr_addr; + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); + break; + + case D2H_MB_DATA: + addr = bus->d2h_mb_data_ptr_addr; + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); + break; + + case MAX_HOST_RXBUFS : + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs); + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case HOST_SCB_ADDR: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size); + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); + break; + + default : + break; + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus) +{ + return ((pciedev_shared_t*)bus->pcie_sh)->flags; +} + +void dhd_prot_clearcounts(dhd_pub_t *dhd); + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_prot_clearcounts(dhdp); +} + +/** + * @param params input buffer, NULL for 'set' operation. + * @param plen length of 'params' buffer, 0 for 'set' operation. + * @param arg output buffer + */ +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, uint plen, void *arg, uint len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = BCME_UNSUPPORTED; + uint val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + if (!name) + return BCME_BADARG; + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + if (!(set || (arg && len))) + return BCME_BADARG; + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + if (!(!set || (!params && !plen))) + return BCME_BADARG; + + DHD_PCIE_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) { + goto exit; + } + + if (MULTIBP_ENAB(bus->sih)) { + if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) { + DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__)); + } else { + dhd_bus_pcie_pwr_req(bus); + } + } + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + /* In DEVRESET_QUIESCE/DEVRESET_ON, + * this includes dongle re-attach which initialize pwr_req_ref count to 0 and + * causes pwr_req_ref count miss-match in pwr req clear function and hang. + * In this case, bypass pwr req clear. + */ + if (bcmerror == BCME_DNGL_DEVRESET) { + bcmerror = BCME_OK; + } else { + if (MULTIBP_ENAB(bus->sih)) { + if (vi != NULL) { + if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) { + DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__)); + } else { + dhd_bus_pcie_pwr_req_clear(bus); + } + } + } + } + return bcmerror; +} /* dhd_bus_iovar_op */ + +#ifdef BCM_BUZZZ +#include + +int +dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log, + const int num_counters) +{ + int bytes = 0; + uint32 ctr; + uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX]; + uint32 delta[BCM_BUZZZ_COUNTERS_MAX]; + + /* Compute elapsed counter values per counter event type */ + for (ctr = 0U; ctr < num_counters; ctr++) { + prev[ctr] = core[ctr]; + curr[ctr] = *log++; + core[ctr] = curr[ctr]; /* saved for next log */ + + if (curr[ctr] < prev[ctr]) + delta[ctr] = curr[ctr] + (~0U - prev[ctr]); + else + delta[ctr] = (curr[ctr] - prev[ctr]); + + bytes += sprintf(p + bytes, "%12u ", delta[ctr]); + } + + return bytes; +} + +typedef union cm3_cnts { /* export this in bcm_buzzz.h */ + uint32 u32; + uint8 u8[4]; + struct { + uint8 cpicnt; + uint8 exccnt; + uint8 sleepcnt; + uint8 lsucnt; + }; +} cm3_cnts_t; + +int +dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log) +{ + int bytes = 0; + + uint32 cyccnt, instrcnt; + cm3_cnts_t cm3_cnts; + uint8 foldcnt; + + { /* 32bit cyccnt */ + uint32 curr, prev, delta; + prev = core[0]; curr = *log++; core[0] = curr; + if (curr < prev) + delta = curr + (~0U - prev); + else + delta = (curr - prev); + + bytes += sprintf(p + bytes, "%12u ", delta); + cyccnt = delta; + } + + { /* Extract the 4 cnts: cpi, exc, sleep and lsu */ + int i; + uint8 max8 = ~0; + cm3_cnts_t curr, prev, delta; + prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32; + for (i = 0; i < 4; i++) { + if (curr.u8[i] < prev.u8[i]) + delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]); + else + delta.u8[i] = (curr.u8[i] - prev.u8[i]); + bytes += sprintf(p + bytes, "%4u ", delta.u8[i]); + } + cm3_cnts.u32 = delta.u32; + } + + { /* Extract the foldcnt from arg0 */ + uint8 curr, prev, delta, max8 = ~0; + bcm_buzzz_arg0_t arg0; arg0.u32 = *log; + prev = core[2]; curr = arg0.klog.cnt; core[2] = curr; + if (curr < prev) + delta = curr + (max8 - prev); + else + delta = (curr - prev); + bytes += sprintf(p + bytes, "%4u ", delta); + foldcnt = delta; + } + + instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2] + + cm3_cnts.u8[3]) + foldcnt; + if (instrcnt > 0xFFFFFF00) + bytes += sprintf(p + bytes, "[%10s] ", "~"); + else + bytes += sprintf(p + bytes, "[%10u] ", instrcnt); + return bytes; +} + +int +dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz) +{ + int bytes = 0; + bcm_buzzz_arg0_t arg0; + static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS; + + if (buzzz->counters == 6) { + bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log); + log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */ + } else { + bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters); + log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */ + } + + /* Dump the logged arguments using the registered formats */ + arg0.u32 = *log++; + + switch (arg0.klog.args) { + case 0: + bytes += sprintf(p + bytes, fmt[arg0.klog.id]); + break; + case 1: + { + uint32 arg1 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1); + break; + } + case 2: + { + uint32 arg1, arg2; + arg1 = *log++; arg2 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2); + break; + } + case 3: + { + uint32 arg1, arg2, arg3; + arg1 = *log++; arg2 = *log++; arg3 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3); + break; + } + case 4: + { + uint32 arg1, arg2, arg3, arg4; + arg1 = *log++; arg2 = *log++; + arg3 = *log++; arg4 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4); + break; + } + default: + printf("%s: Maximum one argument supported\n", __FUNCTION__); + break; + } + + bytes += sprintf(p + bytes, "\n"); + + return bytes; +} + +void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p) +{ + int i; + uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX]; + void * log; + + for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) { + core[i] = 0; + } + + log_sz = buzzz_p->log_sz; + + part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz; + + if (buzzz_p->wrap == TRUE) { + part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz; + total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz; + } else { + part2 = 0U; + total = buzzz_p->count; + } + + if (total == 0U) { + printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total); + return; + } else { + printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__, + total, part2, part1); + } + + if (part2) { /* with wrap */ + log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log)); + while (part2--) { /* from cur to end : part2 */ + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + } + + log = (void*)buffer_p; + while (part1--) { + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + + printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__); +} + +int dhd_buzzz_dump_dngl(dhd_bus_t *bus) +{ + bcm_buzzz_t * buzzz_p = NULL; + void * buffer_p = NULL; + char * page_p = NULL; + pciedev_shared_t *sh; + int ret = 0; + + if (bus->dhd->busstate != DHD_BUS_DATA) { + return BCME_UNSUPPORTED; + } + if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) { + printf("%s: Page memory allocation failure\n", __FUNCTION__); + goto done; + } + if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) { + printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__); + goto done; + } + + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + goto done; + } + + sh = bus->pcie_sh; + + DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr)); + + if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */ + + dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr, + (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); + + printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " + "count<%u> status<%u> wrap<%u>\n" + "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", + (int)sh->buzz_dbg_ptr, + (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, + buzzz_p->count, buzzz_p->status, buzzz_p->wrap, + buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, + buzzz_p->buffer_sz, buzzz_p->log_sz); + + if (buzzz_p->count == 0) { + printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__); + goto done; + } + + /* Allocate memory for trace buffer and format strings */ + buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz); + if (buffer_p == NULL) { + printf("%s: Buffer memory allocation failure\n", __FUNCTION__); + goto done; + } + + /* Fetch the trace. format strings are exported via bcm_buzzz.h */ + dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */ + (uint8 *)buffer_p, buzzz_p->buffer_sz); + + /* Process and display the trace using formatted output */ + + { + int ctr; + for (ctr = 0; ctr < buzzz_p->counters; ctr++) { + printf(" ", buzzz_p->eventid[ctr]); + } + printf("\n"); + } + + dhd_buzzz_dump(buzzz_p, buffer_p, page_p); + + printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__); + + MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL; + } + +done: + + if (page_p) MFREE(bus->dhd->osh, page_p, 4096); + if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t)); + if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); + + return BCME_OK; +} +#endif /* BCM_BUZZZ */ + +#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ + ((sih)->buscoretype == PCIE2_CORE_ID)) +#ifdef DHD_PCIE_REG_ACCESS +static bool +pcie2_mdiosetblock(dhd_bus_t *bus, uint blk) +{ + uint mdiodata, mdioctrl, i = 0; + uint pcie_serdes_spinwait = 200; + + mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF); + mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata); + + OSL_DELAY(10); + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, + 0, 0); + if (!(mdioctrl_read & MDIODATA2_DONE)) { + break; + } + OSL_DELAY(1000); + i++; + } + + if (i >= pcie_serdes_spinwait) { + DHD_ERROR(("pcie_mdiosetblock: timed out\n")); + return FALSE; + } + + return TRUE; +} +#endif /* DHD_PCIE_REG_ACCESS */ + +static void +dhdpcie_enum_reg_init(dhd_bus_t *bus) +{ + /* initialize Function control register (clear bit 4) to HW init value */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0, + PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE); + + /* clear IntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0); + /* clear IntStatus */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0)); + + /* clear MSIVector */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0); + /* clear MSIIntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0); + /* clear MSIIntStatus */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0)); + + /* clear PowerIntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0); + /* clear PowerIntStatus */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0)); + + /* clear MailboxIntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0); + /* clear MailboxInt */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0)); +} + +int +dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) +{ + uint flr_capab; + uint val; + int retry = 0; + + DHD_ERROR(("******** Perform FLR ********\n")); + + /* Kernel Panic for 4378Ax during traptest/devreset4 reload case: + * For 4378Ax, enum registers will not be reset with FLR (producer index WAR). + * So, the MailboxIntMask is left as 0xffff during fw boot-up, + * and the fw trap handling during fw boot causes Kernel Panic. + * Jira: SWWLAN-212578: [4378A0 PCIe DVT] : + * Kernel Panic seen in F0 FLR with BT Idle/Traffic/DMA + */ + if (bus->sih && PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { + if (bus->pcie_mailbox_mask != 0) { + dhdpcie_bus_intr_disable(bus); + } + /* initialize F0 enum registers before FLR for rev66/67 */ + dhdpcie_enum_reg_init(bus); + } + + /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */ + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val)); + flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT); + DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n", + PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab)); + if (!flr_capab) { + DHD_ERROR(("Chip does not support FLR\n")); + return BCME_UNSUPPORTED; + } + +#if defined(NDIS) && defined(BT_OVER_PCIE) + dhd_bwm_bt_quiesce(bus); +#endif + + /* Save pcie config space */ + DHD_INFO(("Save Pcie Config Space\n")); + DHD_PCIE_CONFIG_SAVE(bus); + + /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */ + DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", + PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); + + /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */ +#ifdef BCMSLTGT + DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY * htclkratio)); +#else + DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY)); +#endif /* BCMSLTGT */ + + CAN_SLEEP() ? OSL_SLEEP(DHD_FUNCTION_LEVEL_RESET_DELAY) : + OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * USEC_PER_MSEC); + + if (force_fail) { + DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n", + PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, + val)); + val = val | (1 << PCIE_SSRESET_DISABLE_BIT); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, + val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val); + + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, + val)); + } + + /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */ + DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", + PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); + + /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */ + DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)" + "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + do { + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", + PCIE_CFG_SUBSYSTEM_CONTROL, val)); + val = val & (1 << PCIE_SSRESET_STATUS_BIT); + OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY); + } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES)); + + if (val) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT)); + /* User has to fire the IOVAR again, if force_fail is needed */ + if (force_fail) { + bus->flr_force_fail = FALSE; + DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__)); + } + return BCME_DONGLE_DOWN; + } + + /* Restore pcie config space */ + DHD_INFO(("Restore Pcie Config Space\n")); + DHD_PCIE_CONFIG_RESTORE(bus); + +#if defined(NDIS) && defined(BT_OVER_PCIE) + dhd_bwm_bt_resume(bus); +#endif + + DHD_ERROR(("******** FLR Succedeed ********\n")); + + return BCME_OK; +} + +#define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */ + +#define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */ +#define DHD_BP_RESET_STATUS_RETRIES 50u + +#define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT 10 +#define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT 12 + +int +dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus) +{ + uint val; + int retry = 0; + int ret = BCME_OK; + bool reset_stat_bit; + + DHD_ERROR(("******** Perform BP reset ********\n")); + + /* Disable ASPM */ + DHD_ERROR(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", + PCIECFGREG_LINK_STATUS_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + val = val & (~PCIE_ASPM_ENAB); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); + + /* wait for delay usec */ + DHD_ERROR(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY)); + OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY); + + /* Set bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */ + DHD_ERROR(("Set PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)" + " of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n", + PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val)); + val = val | (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val); + + /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is set */ + DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of " + "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is set\n", + PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + do { + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", + PCIE_CFG_SUBSYSTEM_CONTROL, val)); + reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT); + OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); + } while (!reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); + + if (!reset_stat_bit) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not set\n", + PCIE_CFG_SUBSYSTEM_CONTROL, + PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT)); + ret = BCME_ERROR; + goto aspm_enab; + } + + /* Clear bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */ + DHD_ERROR(("Clear PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)" + " of PCIECFGREG_SPROM_CTRL(0x%x)\n", + PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val)); + val = val & ~(1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val); + + /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */ + DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of " + "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is cleared\n", + PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + do { + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", + PCIE_CFG_SUBSYSTEM_CONTROL, val)); + reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT); + OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); + } while (reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); + + if (reset_stat_bit) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + PCIE_CFG_SUBSYSTEM_CONTROL, + PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT)); + ret = BCME_ERROR; + } + +aspm_enab: + /* Enable ASPM */ + DHD_ERROR(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", + PCIECFGREG_LINK_STATUS_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + val = val | (PCIE_ASPM_L1_ENAB); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); + + if (ret) { + DHD_ERROR(("******** BP reset Failed ********\n")); + } else { + DHD_ERROR(("******** BP reset Succedeed ********\n")); + } + + return ret; +} + +#define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10 +#define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21 + +int +dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus) +{ + uint val; + int retry = 0; + uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev); + int ret = BCME_OK; + bool cond; + + DHD_ERROR(("******** Perform BP reset ********\n")); + + /* Disable ASPM */ + DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", + PCIECFGREG_LINK_STATUS_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + val = val & (~PCIE_ASPM_ENAB); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); + + /* wait for delay usec */ + DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY)); + OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY); + + /* Set bit 10 of PCIECFGREG_SPROM_CTRL */ + DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n", + PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); + val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val); + + /* Wait till bit backplane reset is ASSERTED i,e + * bit 10 of PCIECFGREG_SPROM_CTRL is cleared. + * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid + * else DAR register will read previous old value + */ + DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of " + "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n", + PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL)); + do { + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); + cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT); + OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); + } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); + + if (cond) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT)); + ret = BCME_ERROR; + goto aspm_enab; + } + + /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */ + DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of " + "dar_clk_ctrl_status_reg(0x%x) is cleared\n", + PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg)); + do { + val = si_corereg(bus->sih, bus->sih->buscoreidx, + dar_clk_ctrl_status_reg, 0, 0); + DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n", + dar_clk_ctrl_status_reg, val)); + cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT); + OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); + } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); + + if (cond) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT)); + ret = BCME_ERROR; + } + +aspm_enab: + /* Enable ASPM */ + DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", + PCIECFGREG_LINK_STATUS_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + val = val | (PCIE_ASPM_L1_ENAB); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); + + DHD_ERROR(("******** BP reset Succedeed ********\n")); + + return ret; +} + +#if defined(LINUX) || defined(linux) +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + dhd_bus_t *bus = dhdp->bus; + int bcmerror = 0; + unsigned long flags; + int retry = POWERUP_MAX_RETRY; + + if (flag == TRUE) { /* Turn off WLAN */ + /* Removing Power */ + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + + /* wait for other contexts to finish -- if required a call + * to OSL_DELAY for 1s can be added to give other contexts + * a chance to finish + */ + dhdpcie_advertise_bus_cleanup(bus->dhd); + + if (bus->dhd->busstate != DHD_BUS_DOWN) { +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, TRUE); + dhd_flush_rx_tx_wq(bus->dhd); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_os_wd_timer(dhdp, 0); + dhd_bus_stop(bus, TRUE); + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } + dhd_deinit_bus_lp_state_lock(bus); + dhd_deinit_bar1_switch_lock(bus); + dhd_deinit_backplane_access_lock(bus); + dhd_deinit_pwr_req_lock(bus); +#ifdef PCIE_INB_DW + dhd_deinit_dongle_ds_lock(bus); +#endif /* PCIE_INB_DW */ + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } + /* Clean up protocol data after Bus Master Enable bit clear + * so that host can safely unmap DMA and remove the allocated buffers + * from the PKTID MAP. Some Applicantion Processors supported + * System MMU triggers Kernel panic when they detect to attempt to + * DMA-unmapped memory access from the devices which use the + * System MMU. Therefore, Kernel panic can be happened since it is + * possible that dongle can access to DMA-unmapped memory after + * calling the dhd_prot_reset(). + * For this reason, the dhd_prot_reset() and dhd_clear() functions + * should be located after the dhdpcie_bus_disable_device(). + */ + dhd_prot_reset(dhdp); + /* XXX Reset dhd_pub_t instance to initial status + * for built-in type driver + */ + dhd_clear(dhdp); + + bcmerror = dhdpcie_bus_stop_host_dev(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_stop host_dev failed: %d\n", + __FUNCTION__, bcmerror)); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + goto done; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } else { + if (bus->intr) { + dhdpcie_free_irq(bus); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_dpc_kill(bus->dhd); + if (!bus->no_bus_init) { + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); + } + + /* Clean up protocol data after Bus Master Enable bit clear + * so that host can safely unmap DMA and remove the allocated + * buffers from the PKTID MAP. Some Applicantion Processors + * supported System MMU triggers Kernel panic when they detect + * to attempt to DMA-unmapped memory access from the devices + * which use the System MMU. + * Therefore, Kernel panic can be happened since it is possible + * that dongle can access to DMA-unmapped memory after calling + * the dhd_prot_reset(). + * For this reason, the dhd_prot_reset() and dhd_clear() functions + * should be located after the dhdpcie_bus_disable_device(). + */ + dhd_prot_reset(dhdp); + /* XXX Reset dhd_pub_t instance to initial status + * for built-in type driver + */ + dhd_clear(dhdp); + } else { + bus->no_bus_init = FALSE; + } + + bcmerror = dhdpcie_bus_stop_host_dev(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_stop_host_dev failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + } + + bus->dhd->dongle_reset = TRUE; + DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__)); + + } else { /* Turn on WLAN */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + /* Powering On */ + DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__)); + /* PCIe RC Turn on */ + do { + bcmerror = dhdpcie_bus_start_host_dev(bus); + if (!bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_start_host_dev OK\n", + __FUNCTION__)); + break; + } else { + OSL_SLEEP(10); + } + } while (retry--); + + if (bcmerror) { + DHD_ERROR(("%s: host pcie clock enable failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON) + dhd_bus_aspm_enable_rc_ep(bus, FALSE); +#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */ + bus->is_linkdown = 0; + bus->cto_triggered = 0; +#ifdef SUPPORT_LINKDOWN_RECOVERY + bus->read_shm_fail = FALSE; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + bcmerror = dhdpcie_bus_enable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: host configuration restore failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_alloc_resource(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#if defined(DHD_HP2P) && defined(OEM_ANDROID) + bus->dhd->hp2p_enable = TRUE; +#endif + +#ifdef FORCE_DONGLE_RESET_IN_DEVRESET_ON + /* + * This will be enabled from phone platforms to + * reset dongle during Wifi ON + */ + dhdpcie_dongle_reset(bus); +#endif /* FORCE_DONGLE_RESET_IN_DEVRESET_ON */ + + bcmerror = dhdpcie_bus_dongle_attach(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhd_bus_request_irq(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->dongle_reset = FALSE; + +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) + dhd_irq_set_affinity(bus->dhd, cpumask_of(1)); +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ + + bcmerror = dhd_bus_start(dhdp); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_start: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */ + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Enabling wdtick after dhd init\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); + } + DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: what should we do here\n", __FUNCTION__)); + goto done; + } + } + +done: + return bcmerror; +} +#else +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + dhd_bus_t *bus = dhdp->bus; + int bcmerror = 0; + unsigned long flags; + + if (flag == TRUE) { + /* Removing Power */ + if (!dhdp->dongle_reset) { + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + dhdpcie_advertise_bus_cleanup(bus->dhd); + dhd_os_sdlock(dhdp); + dhd_os_wd_timer(dhdp, 0); + dhd_bus_stop(bus, FALSE); + dhd_prot_reset(dhdp); + + dhdpcie_bus_release_dongle(bus, bus->dhd->osh, + bus->dhd->dongle_isolation, TRUE); + bus->dhd->dongle_reset = TRUE; + + dhd_os_sdunlock(dhdp); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: Dongle is already in RESET!\n", __FUNCTION__)); + bcmerror = BCME_DONGLE_DOWN; + } + } else { + /* Powering On */ + DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) { + dhd_os_sdlock(dhdp); /* Turn on WLAN */ + + if (dhdpcie_dongle_attach(bus)) { + DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); + dhd_os_sdunlock(dhdp); + return BCME_DONGLE_DOWN; + } + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_INFO(("%s: About to download firmware\n", __FUNCTION__)); + if (dhd_bus_download_firmware(bus, bus->dhd->osh, + bus->fw_path, bus->nv_path) == 0) { + + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { + bus->dhd->dongle_reset = FALSE; + + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: dhd_bus_init FAILed\n", __FUNCTION__)); + dhd_bus_stop(bus, FALSE); + } + } else { + DHD_ERROR(("%s: dhd_bus_download_firmware FAILed\n", __FUNCTION__)); + bcmerror = BCME_DONGLE_DOWN; + } + + dhd_os_sdunlock(dhdp); + } else { + bcmerror = BCME_DONGLE_DOWN; + DHD_ERROR(("%s called when dongle is not in reset\n", __FUNCTION__)); + } + } + return bcmerror; +} +#endif /* LINUX || linux */ + +#ifdef DHD_PCIE_REG_ACCESS +static int +pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val, + bool slave_bypass) +{ + uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; + uint32 reg32; + + pcie2_mdiosetblock(bus, physmedia); + + /* enable mdio access to SERDES */ + mdio_ctrl = MDIOCTL2_DIVISOR_VAL; + mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); + + if (slave_bypass) + mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; + + if (!write) + mdio_ctrl |= MDIOCTL2_READ; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl); + + if (write) { + reg32 = PCIE2_MDIO_WR_DATA; + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, + *val | MDIODATA2_DONE); + } else + reg32 = PCIE2_MDIO_RD_DATA; + + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0); + if (!(done_val & MDIODATA2_DONE)) { + if (!write) { + *val = si_corereg(bus->sih, bus->sih->buscoreidx, + PCIE2_MDIO_RD_DATA, 0, 0); + *val = *val & MDIODATA2_MASK; + } + return 0; + } + OSL_DELAY(1000); + i++; + } + return -1; +} +#endif /* DHD_PCIE_REG_ACCESS */ + +#ifdef BCMINTERNAL +static uint64 +serialized_backplane_access_64(dhd_bus_t *bus, uint addr, uint size, uint64 *val, bool read) +{ + uint64 ret; + unsigned long flags; + + DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags); + ret = si_backplane_access_64(bus->sih, addr, size, val, read); + DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags); + return ret; +} +#endif /* BCMINTERNAL */ + +static int +dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd) +{ + int h2d_support, d2h_support; + + d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0; + h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0; + return (d2h_support | (h2d_support << 1)); + +} +int +dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val) +{ + int bcmerror = 0; + /* Can change it only during initialization/FW download */ + if (dhd->busstate == DHD_BUS_DOWN) { + if ((int_val > 3) || (int_val < 0)) { + DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n")); + bcmerror = BCME_BADARG; + } else { + dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE; + dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE; + dhd->dma_ring_upd_overwrite = TRUE; + } + } else { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + } + + return bcmerror; + +} + +/* si_backplane_access() manages a shared resource - BAR0 mapping, hence its + * calls shall be serialized. This wrapper function provides such serialization + * and shall be used everywjer einstead of direct call of si_backplane_access() + * + * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet + * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar + * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race + * conditions calls of si_backplane_access() shall be serialized. Presence of + * tasklet context implies that serialization shall b ebased on spinlock. Hence + * Linux implementation of dhd_pcie_backplane_access_[un]lock() is + * spinlock-based. + * + * Other platforms may add their own implementations of + * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not + * needed implementation might be empty) + */ +static uint +serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read) +{ + uint ret; + unsigned long flags; + DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags); + ret = si_backplane_access(bus->sih, addr, size, val, read); + DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags); + return ret; +} + +#ifndef DHD_CAP_PLATFORM +#define DHD_CAP_PLATFORM "x86 " +#endif + +#ifndef DHD_CAP_CUSTOMER +#define DHD_CAP_CUSTOMER "brcm " +#endif + +void +BCMRAMFN(dhd_cap_bcmstrbuf)(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + bcm_bprintf(b, DHD_CAP_PLATFORM); + bcm_bprintf(b, DHD_CAP_CUSTOMER); +#ifdef PCIE_FULL_DONGLE + bcm_bprintf(b, "pcie "); +#endif /* PCIE_FULL_DONGLE */ + /* regaccess and memaccess will be present only for internal reference builds @brcm */ +#ifdef DHD_NO_MOG + bcm_bprintf(b, "internal "); +#else + bcm_bprintf(b, "external "); +#endif /* DHD_NO_MOG */ +#ifdef WLAN_ACCEL_BOOT + bcm_bprintf(b, "wlan-accel "); +#endif /* WLAN_ACCEL_BOOT */ +#ifdef ENABLE_DHD_GRO + bcm_bprintf(b, "gro "); +#endif /* ENABLE_DHD_GRO */ +#ifdef WBRC + bcm_bprintf(b, "wbrc "); +#endif /* ENABLE_DHD_GRO */ +#ifdef WL_CFG80211 + bcm_bprintf(b, "cfg80211 "); +#endif /* WL_CFG80211 */ +#ifdef DHD_FILE_DUMP_EVENT + bcm_bprintf(b, "haldump "); +#endif /* DHD_FILE_DUMP_EVENT */ +#ifdef DHD_LB_RXP + bcm_bprintf(b, "lbrxp "); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP +#ifdef DHD_LB_TXP_DEFAULT_ENAB + bcm_bprintf(b, "lbtxp "); +#endif /* DHD_LB_TXP_DEFAULT_ENAB */ +#endif /* DHD_LB_TXP */ +#ifdef DHD_HTPUT_TUNABLES + bcm_bprintf(b, "htput "); +#endif /* DHD_HTPUT_TUNABLES */ +} + +/** Return dhd capability string */ +static char* +dhd_cap(dhd_pub_t *dhd, char *buf, uint bufsize) +{ + struct bcmstrbuf b; + + bcm_binit(&b, buf, bufsize); + + dhd_cap_bcmstrbuf(dhd, &b); + + /* this is either full or overflow. return error */ + if (b.size <= 1) + return NULL; + + return (buf); +} + +/** + * IOVAR handler of the DHD bus layer (in this case, the PCIe bus). + * + * @param actionid e.g. IOV_SVAL(IOV_PCIEREG) + * @param params input buffer + * @param plen length in [bytes] of input buffer 'params' + * @param arg output buffer + * @param len length in [bytes] of output buffer 'arg' + */ +static int +dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, uint plen, void *arg, uint len, int val_size) +{ + int bcmerror = 0; +#ifdef BCMINTERNAL + uint64 uint64_val = 0; +#endif /* BCMINTERNAL */ + int32 int_val = 0; + int32 int_val2 = 0; + int32 int_val3 = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + if (plen >= sizeof(int_val) * 2) + bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); + + if (plen >= sizeof(int_val) * 3) + bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + switch (actionid) { + +#ifdef BCMINTERNAL + case IOV_SVAL(IOV_MSI_SIM): + /* allocate memory for MSI (Message Signaled Interrupt) window */ + int_val = !!int_val; + DHD_INFO(("int_val is %d\n", int_val)); + if (bus->msi_sim != int_val) { + if (int_val) { + /* bus->msi_addr */ + bus->msi_sim_addr = + MALLOC(bus->dhd->osh, MSI_SIM_BUFSIZE); + if (bus->msi_sim_addr) { + *bus->msi_sim_addr = 0; + bus->msi_sim_phys = DMA_MAP(bus->dhd->osh, + bus->msi_sim_addr, MSI_SIM_BUFSIZE, DMA_RX, 0, 0); + /* program the MSI addr */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configaddr), ~0, PCIE_CFG_MSIDATA_OFFSET); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configdata), ~0, PCIE_CFG_MSI_GENDATA); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configaddr), ~0, PCIE_CFG_MSIADDR_LOW_OFFSET); + ASSERT(PHYSADDRHI(bus->msi_sim_phys) == 0); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configdata), ~0, (uint32)PHYSADDRLO(bus->msi_sim_phys)); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configaddr), ~0, PCIE_CFG_MSICAP_OFFSET); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configdata), ~0, PCIE_CFG_MSICAP_ENABLE_MSI); + /* poll the MSI addr window */ + bus->pollrate = 10; + } + DHD_INFO(("msi_sim_addr is %p\n", bus->msi_sim_addr)); + } else { + /* bus->msi_addr */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configaddr), ~0, + PCIE_CFG_MSICAP_OFFSET); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configdata), ~0, + PCIE_CFG_MSICAP_DISABLE_MSI); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, + configaddr), ~0, + PCIE_CFG_MSIADDR_LOW_OFFSET); + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), ~0, 0); + + DMA_UNMAP(bus->dhd->osh, bus->msi_sim_phys, + MSI_SIM_BUFSIZE, DMA_RX, 0, 0); + MFREE(bus->dhd->osh, + bus->msi_sim_addr, MSI_SIM_BUFSIZE); + } + bus->msi_sim = (bool)int_val; + } + break; + case IOV_GVAL(IOV_MSI_SIM): + bcopy(&bus->msi_sim, arg, val_size); + break; +#endif /* BCMINTERNAL */ + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdpcie_downloadvars(bus, arg, len); + break; +#ifdef DHD_PCIE_REG_ACCESS + case IOV_SVAL(IOV_PCIEREG): + /* XXX: validate int_val ??? */ + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + int_val); + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, + int_val2); + break; + + case IOV_GVAL(IOV_PCIEREG): + /* XXX: validate int_val ??? */ + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + int_val); + int_val = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIECOREREG): + /* XXX: validate int_val ??? */ + si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2); + break; + case IOV_GVAL(IOV_BAR0_SECWIN_REG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK) + { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_BAR0_SECWIN_REG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + if (serialized_backplane_access(bus, addr, size, + (uint *)(&sdreg.value), FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE(bus->sih); + size = sdreg.func; + + if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK) + { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, size); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE(bus->sih); + size = sdreg.func; + if (serialized_backplane_access(bus, addr, size, + (uint *)(&sdreg.value), FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_GVAL(IOV_PCIESERDESREG): + { + uint val; + if (!PCIE_GEN2(bus->sih)) { + DHD_ERROR(("supported only in pcie gen2\n")); + bcmerror = BCME_ERROR; + break; + } + + if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) { + bcopy(&val, arg, sizeof(int32)); + } else { + DHD_ERROR(("pcie2_mdioop failed.\n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_SVAL(IOV_PCIESERDESREG): + if (!PCIE_GEN2(bus->sih)) { + DHD_ERROR(("supported only in pcie gen2\n")); + bcmerror = BCME_ERROR; + break; + } + if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) { + DHD_ERROR(("pcie2_mdioop failed.\n")); + bcmerror = BCME_ERROR; + } + break; + case IOV_GVAL(IOV_PCIECOREREG): + /* XXX: validate int_val ??? */ + int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIECFGREG): + /* XXX: validate int_val ??? */ + OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2); + break; + + case IOV_GVAL(IOV_PCIECFGREG): + /* XXX: validate int_val ??? */ + int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4); + bcopy(&int_val, arg, sizeof(int_val)); + break; +#endif /* DHD_PCIE_REG_ACCESS */ + case IOV_SVAL(IOV_PCIE_LPBK): + bcmerror = dhdpcie_bus_lpback_req(bus, int_val); + break; + + case IOV_SVAL(IOV_PCIE_DMAXFER): { + dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg; + uint32 mem_addr; + + if (!dmaxfer) + return BCME_BADARG; + if (dmaxfer->version != DHD_DMAXFER_VERSION) + return BCME_VERSION; + if (dmaxfer->length != sizeof(dma_xfer_info_t)) { + return BCME_BADLEN; + } + + mem_addr = (uint32)dmaxfer->tput; + dmaxfer->tput = 0; + bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes, + dmaxfer->src_delay, dmaxfer->dest_delay, + dmaxfer->type, dmaxfer->core_num, + dmaxfer->should_wait, mem_addr); + + if (dmaxfer->should_wait && bcmerror >= 0) { + bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer); + } + break; + } + + case IOV_GVAL(IOV_PCIE_DMAXFER): { + dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params; + if (!dmaxfer) + return BCME_BADARG; + if (dmaxfer->version != DHD_DMAXFER_VERSION) + return BCME_VERSION; + if (dmaxfer->length != sizeof(dma_xfer_info_t)) { + return BCME_BADLEN; + } + bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer); + break; + } + +#ifdef BCMINTERNAL + case IOV_GVAL(IOV_PCIE_TX_LPBK): + int_val = dhdpcie_bus_get_tx_lpback(bus); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_PCIE_TX_LPBK): + bcmerror = dhdpcie_bus_set_tx_lpback(bus, bool_val); + break; +#endif /* BCMINTERNAL */ + +#ifdef PCIE_OOB + case IOV_GVAL(IOV_OOB_BT_REG_ON): + int_val = dhd_oob_get_bt_reg_on(bus); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_OOB_BT_REG_ON): + dhd_oob_set_bt_reg_on(bus, (uint8)int_val); + break; + case IOV_GVAL(IOV_OOB_ENABLE): + int_val = bus->oob_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_OOB_ENABLE): + bus->oob_enabled = (bool)int_val; + break; +#endif /* PCIE_OOB */ +#ifdef PCIE_INB_DW + case IOV_GVAL(IOV_INB_DW_ENABLE): + int_val = bus->inb_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_INB_DW_ENABLE): + bus->inb_enabled = (bool)int_val; + break; +#endif /* PCIE_INB_DW */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + case IOV_GVAL(IOV_DEEP_SLEEP): + int_val = bus->ds_enabled; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DEEP_SLEEP): + if (int_val == 1) { + bus->deep_sleep = TRUE; + if (!bus->ds_enabled) { + bus->ds_enabled = TRUE; + /* Deassert */ + if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) { +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + int timeleft; + timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL); + if (timeleft == 0) { + DHD_ERROR(("DS-ENTER timeout\n")); + bus->ds_enabled = FALSE; + break; + } + } +#endif /* PCIE_INB_DW */ + } + else { + DHD_ERROR(("%s: Enable Deep Sleep failed !\n", + __FUNCTION__)); + bus->ds_enabled = FALSE; + } + } else { + DHD_ERROR(("%s: Deep Sleep already enabled !\n", __FUNCTION__)); + } + } + else if (int_val == 0) { + bus->deep_sleep = FALSE; + if (bus->ds_enabled) { +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + bus->dhd->cur_intr_poll_period = dhd_os_get_intr_poll_period(); + /* for accurately measuring ds-exit latency + * set interrupt poll period to a lesser value + */ + dhd_os_set_intr_poll_period(bus, INTR_POLL_PERIOD_CRITICAL); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + bus->calc_ds_exit_latency = TRUE; + /* Assert */ + if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK) { + bus->ds_enabled = FALSE; + if (INBAND_DW_ENAB(bus)) { + if (bus->ds_exit_latency != 0) { + DHD_ERROR(("DS-EXIT latency = %llu us\n", + bus->ds_exit_latency)); + } else { + DHD_ERROR(("Failed to measure DS-EXIT" + " latency!(Possibly a non" + " waitable context)\n")); + } + } + } else { + DHD_ERROR(("%s: Disable Deep Sleep failed !\n", + __FUNCTION__)); + } + bus->calc_ds_exit_latency = FALSE; +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + /* restore interrupt poll period to the previous existing value */ + dhd_os_set_intr_poll_period(bus, bus->dhd->cur_intr_poll_period); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + } else { + DHD_ERROR(("%s: Deep Sleep already disabled !\n", __FUNCTION__)); + } + } + else + DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__)); + + break; +#endif /* PCIE_OOB || PCIE_INB_DW */ +#ifdef DEVICE_TX_STUCK_DETECT + case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT): + int_val = bus->dev_tx_stuck_monitor; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT): + bus->dev_tx_stuck_monitor = (bool)int_val; + break; +#endif /* DEVICE_TX_STUCK_DETECT */ + case IOV_GVAL(IOV_PCIE_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_SUSPEND): + if (bool_val) { /* Suspend */ + int ret; + unsigned long flags; + + /* + * If some other context is busy, wait until they are done, + * before starting suspend + */ + ret = dhd_os_busbusy_wait_condition(bus->dhd, + &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR); + if (ret == 0) { + DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + return BCME_BUSY; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + dhdpcie_bus_suspend(bus, TRUE, TRUE); +#else + dhdpcie_bus_suspend(bus, TRUE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { /* Resume */ + unsigned long flags; + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhdpcie_bus_suspend(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + break; + + case IOV_GVAL(IOV_MEMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; +#ifdef DHD_BUS_MEM_ACCESS + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; /* absolute backplane address */ + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__, + (set ? "write" : "read"), size, address, dsize)); + + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (set && address == bus->dongle_ram_base) { + bus->resetinstr = *(((uint32*)params) + 2); + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size); + + break; + } +#endif /* DHD_BUS_MEM_ACCESS */ + + /* Debug related. Dumps core registers or one of the dongle memory */ + case IOV_GVAL(IOV_DUMP_DONGLE): + { + dump_dongle_in_t ddi = *(dump_dongle_in_t*)params; + dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg; + uint32 *p = ddo->val; + const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */ + + if (plen < sizeof(ddi) || len < sizeof(ddo)) { + bcmerror = BCME_BADARG; + break; + } + + switch (ddi.type) { + case DUMP_DONGLE_COREREG: + ddo->n_bytes = 0; + + if (si_setcoreidx(bus->sih, ddi.index) == NULL) { + break; // beyond last core: core enumeration ended + } + + ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0); + ddo->address += ddi.offset; // BP address at which this dump starts + + ddo->id = si_coreid(bus->sih); + ddo->rev = si_corerev(bus->sih); + + while (ddi.offset < max_offset && + sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) { + *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0); + ddi.offset += sizeof(uint32); + ddo->n_bytes += sizeof(uint32); + } + break; + default: + // TODO: implement d11 SHM/TPL dumping + bcmerror = BCME_BADARG; + break; + } + break; + } + + /* Debug related. Returns a string with dongle capabilities */ + case IOV_GVAL(IOV_DHD_CAPS): + { + if (dhd_cap(bus->dhd, (char*)arg, len) == NULL) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + } + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + case IOV_SVAL(IOV_GDB_SERVER): + /* debugger_*() functions may sleep, so cannot hold spinlock */ + if (int_val > 0) { + debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih)); + } else { + debugger_close(); + } + break; +#endif /* DEBUGGER || DHD_DSCOPE */ +#if defined(GDB_PROXY) + case IOV_GVAL(IOV_GDB_PROXY_PROBE): + { + dhd_gdb_proxy_probe_data_t ret; + ret.data_len = (uint32)sizeof(ret); + ret.magic = DHD_IOCTL_MAGIC; + ret.flags = 0; + if (bus->gdb_proxy_access_enabled) { + ret.flags |= DHD_GDB_PROXY_PROBE_ACCESS_ENABLED; + if (bus->dhd->busstate < DHD_BUS_LOAD) { + ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING; + } else { + ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING; + } + } + if (bus->gdb_proxy_bootloader_mode) { + ret.flags |= DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE; + } + ret.last_id = bus->gdb_proxy_last_id; + if (bus->hostfw_buf.va) { + ret.flags |= DHD_GDB_PROXY_PROBE_HOSTMEM_CODE; + ret.hostmem_code_win_base = + (uint32)PCIEDEV_ARM_ADDR(PHYSADDRLO(bus->hostfw_buf.pa), + PCIEDEV_TRANS_WIN_HOSTMEM); + ret.hostmem_code_win_length = bus->hostfw_buf.len; + } + if (plen && int_val) { + bus->gdb_proxy_last_id = (uint32)int_val; + } + if (len >= sizeof(ret)) { + bcopy(&ret, arg, sizeof(ret)); + bus->dhd->gdb_proxy_active = TRUE; + } else { + bcmerror = BCME_BADARG; + } + break; + } + case IOV_GVAL(IOV_GDB_PROXY_STOP_COUNT): + int_val = (int32)bus->dhd->gdb_proxy_stop_count; + bcopy(&int_val, arg, sizeof(int_val)); + break; + case IOV_SVAL(IOV_GDB_PROXY_STOP_COUNT): + bus->dhd->gdb_proxy_stop_count = (uint32)int_val; + break; +#endif /* GDB_PROXY */ + +#ifdef BCM_BUZZZ + /* Dump dongle side buzzz trace to console */ + case IOV_GVAL(IOV_BUZZZ_DUMP): + bcmerror = dhd_buzzz_dump_dngl(bus); + break; +#endif /* BCM_BUZZZ */ + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdpcie_bus_download_state(bus, bool_val); + break; + +#if defined(FW_SIGNATURE) + case IOV_SVAL(IOV_SET_DOWNLOAD_INFO): + { + fw_download_info_t *info = (fw_download_info_t*)params; + DHD_INFO(("dwnldinfo: sig=%s fw=%x,%u bl=%s,0x%x\n", + info->fw_signature_fname, + info->fw_start_addr, info->fw_size, + info->bootloader_fname, info->bootloader_start_addr)); + bcmerror = dhdpcie_bus_save_download_info(bus, + info->fw_start_addr, info->fw_size, info->fw_signature_fname, + info->bootloader_fname, info->bootloader_start_addr); + break; + } +#endif /* FW_SIGNATURE */ + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RAMSIZE): + bus->ramsize = int_val; + bus->orig_ramsize = int_val; + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_CC_NVMSHADOW): + { + struct bcmstrbuf dump_b; + + bcm_binit(&dump_b, arg, len); + bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b); + break; + } + + case IOV_GVAL(IOV_SLEEP_ALLOWED): + bool_val = bus->sleep_allowed; + bcopy(&bool_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SLEEP_ALLOWED): + bus->sleep_allowed = bool_val; + break; + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD): + int_val = bus->ltrsleep_on_unload; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD): + bus->ltrsleep_on_unload = bool_val; + break; + + case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b); + break; + } + case IOV_GVAL(IOV_DMA_RINGINDICES): + { + int_val = dhdpcie_get_dma_ring_indices(bus->dhd); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_DMA_RINGINDICES): + bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val); + break; + + case IOV_GVAL(IOV_METADATA_DBG): + int_val = dhd_prot_metadata_dbg_get(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_METADATA_DBG): + dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0)); + break; + + case IOV_GVAL(IOV_RX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RX_METADATALEN): +#if !(defined(BCM_ROUTER_DHD)) + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE); +#else + bcmerror = BCME_UNSUPPORTED; +#endif /* BCM_ROUTER_DHD */ + break; + + case IOV_SVAL(IOV_TXP_THRESHOLD): + dhd_prot_txp_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_TXP_THRESHOLD): + int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DB1_FOR_MB): + if (int_val) + bus->db1_for_mb = TRUE; + else + bus->db1_for_mb = FALSE; + break; + + case IOV_GVAL(IOV_DB1_FOR_MB): + if (bus->db1_for_mb) + int_val = 1; + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TX_METADATALEN): +#if !(defined(BCM_ROUTER_DHD)) + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE); +#else + bcmerror = BCME_UNSUPPORTED; +#endif /* BCM_ROUTER_DHD */ + break; + + case IOV_SVAL(IOV_DEVRESET): + { + devreset_info_t *devreset = (devreset_info_t *)arg; + + if (!devreset) { + return BCME_BADARG; + } + + if (devreset->length == sizeof(devreset_info_t)) { + if (devreset->version != DHD_DEVRESET_VERSION) { + return BCME_VERSION; + } + int_val = devreset->mode; + } + + switch (int_val) { + case DHD_BUS_DEVRESET_ON: + bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val); + break; + case DHD_BUS_DEVRESET_OFF: + bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val); + break; +#if !defined(NDIS) + case DHD_BUS_DEVRESET_FLR: + bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail); + break; + case DHD_BUS_DEVRESET_FLR_FORCE_FAIL: + bus->flr_force_fail = TRUE; + break; +#ifdef BT_OVER_PCIE + case DHD_BUS_DEVRESET_QUIESCE: + if (bus->dhd->busstate == DHD_BUS_DATA) { + if (bus->dhd->db7_trap.fw_db7w_trap) { + unsigned long flags = 0; + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->db7_trap.fw_db7w_trap_inprogress = TRUE; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + dhdpcie_fw_trap(bus); + OSL_DELAY(100 * 1000); // wait 100 msec + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { + DHD_TRACE(("%s: DB7 Not supported!!!\n", + __FUNCTION__)); + } + + devreset->status = + dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, + FALSE); + + if (devreset->status == BCME_DNGL_DEVRESET) { + devreset->status = BCME_OK; + } + bcmerror = BCME_DNGL_DEVRESET; + } else { + DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__)); + bcmerror = BCME_NOTUP; + } + break; +#endif /* BT_OVER_PCIE */ +#endif /* !defined(NDIS) */ + default: + DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__)); + break; + } + break; + } + case IOV_SVAL(IOV_FORCE_FW_TRAP): + if (bus->dhd->busstate == DHD_BUS_DATA) + dhdpcie_fw_trap(bus); + else { + DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__)); + bcmerror = BCME_NOTUP; + } + break; + case IOV_GVAL(IOV_FLOW_PRIO_MAP): + int_val = bus->dhd->flow_prio_map_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FLOW_PRIO_MAP): + int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val); + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_PCIE_RUNTIMEPM + case IOV_GVAL(IOV_IDLETIME): + if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) { + int_val = bus->idletime; + } else { + int_val = 0; + } + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + if (bus->idletime) { + DHD_ENABLE_RUNTIME_PM(bus->dhd); + } else { + DHD_DISABLE_RUNTIME_PM(bus->dhd); + } + } + break; +#endif /* DHD_PCIE_RUNTIMEPM */ + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_SVAL(IOV_H2D_MAILBOXDATA): + dhdpcie_send_mb_data(bus, (uint)int_val); + break; + + case IOV_SVAL(IOV_INFORINGS): + dhd_prot_init_info_rings(bus->dhd); + break; + + case IOV_SVAL(IOV_H2D_PHASE): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + bus->dhd->h2d_phase_supported = TRUE; + else + bus->dhd->h2d_phase_supported = FALSE; + break; + + case IOV_GVAL(IOV_H2D_PHASE): + int_val = (int32) bus->dhd->h2d_phase_supported; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE; + else + bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE; + break; + + case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): + int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val); + break; + + case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM): + int_val = dhd_prot_get_h2d_max_txpost(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + +#if defined(DHD_HTPUT_TUNABLES) + case IOV_SVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + dhd_prot_set_h2d_htput_max_txpost(bus->dhd, (uint16)int_val); + break; + + case IOV_GVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM): + int_val = dhd_prot_get_h2d_htput_max_txpost(bus->dhd); + bcopy(&int_val, arg, val_size); + break; +#endif /* DHD_HTPUT_TUNABLES */ + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TRAPDATA): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE); + break; + } + + case IOV_GVAL(IOV_TRAPDATA_RAW): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE); + break; + } + +#ifdef DHD_PCIE_REG_ACCESS + case IOV_GVAL(IOV_PCIEASPM): { + uint8 clkreq = 0; + uint32 aspm = 0; + + /* this command is to hide the details, but match the lcreg + #define PCIE_CLKREQ_ENAB 0x100 + #define PCIE_ASPM_L1_ENAB 2 + #define PCIE_ASPM_L0s_ENAB 1 + */ + + clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0); + aspm = dhdpcie_lcreg(bus->dhd->osh, 0, 0); + + int_val = ((clkreq & 0x1) << 8) | (aspm & PCIE_ASPM_ENAB); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_PCIEASPM): { + uint32 tmp; + + tmp = dhdpcie_lcreg(bus->dhd->osh, 0, 0); + dhdpcie_lcreg(bus->dhd->osh, PCIE_ASPM_ENAB, + (tmp & ~PCIE_ASPM_ENAB) | (int_val & PCIE_ASPM_ENAB)); + + dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8)); + break; + } +#endif /* DHD_PCIE_REG_ACCESS */ + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", + __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CTO_PREVENTION): + bcmerror = dhdpcie_cto_init(bus, bool_val); + break; + + case IOV_GVAL(IOV_CTO_PREVENTION): + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + int_val = (int32)bus->cto_enable; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CTO_THRESHOLD): + { + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + bus->cto_threshold = (uint32)int_val; + } + break; + + case IOV_GVAL(IOV_CTO_THRESHOLD): + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + if (bus->cto_threshold) { + int_val = (int32)bus->cto_threshold; + } else { + int_val = pcie_cto_to_thresh_default(bus->sih->buscorerev); + } + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_WD_RESET): + if (bool_val) { + /* Legacy chipcommon watchdog reset */ + dhdpcie_cc_watchdog_reset(bus); + } + break; + +#ifdef DHD_EFI + case IOV_SVAL(IOV_CONTROL_SIGNAL): + { + bcmerror = dhd_control_signal(bus, arg, len, TRUE); + break; + } + + case IOV_GVAL(IOV_CONTROL_SIGNAL): + { + bcmerror = dhd_control_signal(bus, params, plen, FALSE); + break; + } + case IOV_GVAL(IOV_WIFI_PROPERTIES): + bcmerror = dhd_wifi_properties(bus, params, plen); + break; + case IOV_GVAL(IOV_OTP_DUMP): + bcmerror = dhd_otp_dump(bus, params, plen); + break; +#if defined(BT_OVER_PCIE) && defined(BTOP_TEST) + case IOV_SVAL(IOV_BTOP_TEST): + bcmerror = dhd_btop_test(bus, arg, len); + break; +#endif /* BT_OVER_PCIE && BTOP_TEST */ +#endif /* DHD_EFI */ + case IOV_GVAL(IOV_IDMA_ENABLE): + int_val = bus->idma_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_IDMA_ENABLE): + bus->idma_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_IFRM_ENABLE): + int_val = bus->ifrm_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_IFRM_ENABLE): + bus->ifrm_enabled = (bool)int_val; + break; +#ifdef BCMINTERNAL + case IOV_GVAL(IOV_DMA_CHAN): + int_val = bus->dma_chan; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_DMA_CHAN): + { + bus->dma_chan = (bool)int_val; + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_get(bus)); + break; + } + case IOV_SVAL(IOV_HYBRIDFW): + { + char *fp; + fp = dhd_os_open_image1(bus->dhd, params); + if (fp == NULL) { + bcmerror = BCME_ERROR; + break; + } + bcmerror = dhdpcie_hybridfw_download(bus, fp); + dhd_os_close_image1(bus->dhd, fp); + break; + } +#endif /* BCMINTERNAL */ + case IOV_GVAL(IOV_CLEAR_RING): + bcopy(&int_val, arg, val_size); + dhd_flow_rings_flush(bus->dhd, 0); + break; + case IOV_GVAL(IOV_DAR_ENABLE): + int_val = bus->dar_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_DAR_ENABLE): + bus->dar_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_HSCBSIZE): + bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg); + break; +#ifdef DHD_BUS_MEM_ACCESS + case IOV_GVAL(IOV_HSCBBYTES): + bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg); + break; +#endif +#ifdef D2H_MINIDUMP + case IOV_GVAL(IOV_MINIDUMP_OVERRIDE): + int_val = bus->d2h_minidump_override; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_MINIDUMP_OVERRIDE): + /* Can change it only before FW download */ + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + bus->d2h_minidump_override = (bool)int_val; + break; +#endif /* D2H_MINIDUMP */ + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + case IOV_SVAL(IOV_FWTRACE): + { + DHD_INFO(("%s: set firware tracing enable/disable %d\n", + __FUNCTION__, int_val)); + + bcmerror = handle_set_fwtrace(bus->dhd, (uint32) int_val); + break; + } + break; + + case IOV_GVAL(IOV_FWTRACE): + { + uint32 val = 0, temp_val = 0; + uint16 of_counter, trace_val = 0; + int ret; + + ret = dhd_iovar(bus->dhd, 0, "dngl:fwtrace", + NULL, 0, (char *) &val, sizeof(val), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: get dhd_iovar has failed fwtrace, " + "ret=%d\n", __FUNCTION__, ret)); + bcmerror = BCME_ERROR; + } else { + of_counter = get_fw_trace_overflow_counter(bus->dhd); + DHD_INFO(("overflow counter = %d \n", of_counter)); + trace_val = val & 0xFFFF; + temp_val = (((uint32) temp_val | (uint32) of_counter) << 16u) | trace_val; + bcopy(&temp_val, arg, sizeof(temp_val)); + } + break; + } +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef DHD_HP2P + case IOV_SVAL(IOV_HP2P_ENABLE): + dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_ENABLE): + int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD): + dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD): + int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD): + dhd_prot_time_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD): + int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HP2P_PKT_EXPIRY): + dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_PKT_EXPIRY): + int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS): + int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS): + int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_HP2P_MF_ENABLE): + bus->dhd->hp2p_mf_enable = int_val ? TRUE : FALSE; + break; + + case IOV_GVAL(IOV_HP2P_MF_ENABLE): + int_val = bus->dhd->hp2p_mf_enable ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; +#endif /* DHD_HP2P */ + case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + if (int_val) + bus->dhd->extdtxs_in_txcpl = TRUE; + else + bus->dhd->extdtxs_in_txcpl = FALSE; + break; + + case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL): + int_val = bus->dhd->extdtxs_in_txcpl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + if (int_val) + bus->dhd->hostrdy_after_init = TRUE; + else + bus->dhd->hostrdy_after_init = FALSE; + break; + + case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT): + int_val = bus->dhd->hostrdy_after_init; + bcopy(&int_val, arg, val_size); + break; + +#ifdef BCMINTERNAL + case IOV_GVAL(IOV_SBREG_64): + { + sdreg_64_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE(bus->sih); + size = sdreg.func; + + if (serialized_backplane_access_64(bus, addr, size, + &uint64_val, TRUE) != BCME_OK) + { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&uint64_val, arg, size); + break; + } + + case IOV_SVAL(IOV_SBREG_64): + { + sdreg_64_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE(bus->sih); + size = sdreg.func; + + if (serialized_backplane_access_64(bus, addr, size, + (uint64 *)(&sdreg.value), FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } +#endif /* BCMINTERNAL */ + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} /* dhdpcie_bus_doiovar */ + +/** Transfers bytes from host to dongle using pio mode */ +static int +dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); + return 0; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); + return 0; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); + return 0; + } + dhdmsgbuf_lpbk_req(bus->dhd, len); + return 0; +} + +void +dhd_bus_dump_dar_registers(struct dhd_bus *bus) +{ + uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val, + dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val; + uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg, + dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: link is down\n", __FUNCTION__)); + return; + } + + if (bus->sih == NULL) { + DHD_ERROR(("%s: si_attach has not happened, cannot dump DAR registers\n", + __FUNCTION__)); + return; + } + + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + + dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev); + dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev); + dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev); + dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev); + dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev); + dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev); + + if (bus->sih->buscorerev < 24) { + DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n", + __FUNCTION__, bus->sih->buscorerev)); + return; + } + + dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0); + dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0); + dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0); + dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0); + dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0); + dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0); + + DHD_RPM(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) " + "dar_intstat(0x%x:0x%x)\n", + __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val, + dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val)); + + DHD_RPM(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) " + "dar_pcie_mbint(0x%x:0x%x)\n", + __FUNCTION__, dar_errlog_reg, dar_errlog_val, + dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val)); +} + +/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */ +void +dhd_bus_hostready(struct dhd_bus *bus) +{ + if (!bus->dhd->d2h_hostrdy_supported) { + return; + } + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_ERROR_MEM(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__, + dhd_pcie_config_read(bus, PCI_CFG_CMD, sizeof(uint32)))); + + dhd_bus_dump_dar_registers(bus); + +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, dhd_bus_db1_addr_get(bus), 0x1, TRUE); +#endif /* defined(DHD_MMIO_TRACE) */ + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678); + bus->hostready_count ++; + DHD_ERROR_MEM(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count)); +} + +/* Clear INTSTATUS */ +void +dhdpcie_bus_clear_intstatus(struct dhd_bus *bus) +{ + uint32 intstatus = 0; + /* Skip after recieving D3 ACK */ + if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) { + return; + } + /* XXX: check for PCIE Gen2 also */ + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask, + intstatus); + } +} + +int +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint) +#else +dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +{ + int timeleft; + int rc = 0; + unsigned long flags; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + int d3_read_retry = 0; + uint32 d2h_mb_data = 0; + uint32 zero = 0; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (bus->dhd == NULL) { + DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd_query_bus_erros(bus->dhd)) { + return BCME_ERROR; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) { + DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + /* Check whether we are already in the requested state. + * state=TRUE means Suspend + * state=FALSE meanse Resume + */ + if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("Bus is already in SUSPEND state.\n")); + return BCME_OK; + } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) { + DHD_ERROR(("Bus is already in RESUME state.\n")); + return BCME_OK; + } + + if (state) { +#ifdef OEM_ANDROID + int idle_retry = 0; + int active; +#endif /* OEM_ANDROID */ + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down, state=%d\n", + __FUNCTION__, state)); + return BCME_ERROR; + } + + /* Suspend */ + DHD_RPM(("%s: Entering suspend state\n", __FUNCTION__)); + + bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms; + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Disabling wdtick before going to suspend\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, 0); + } + + DHD_GENERAL_LOCK(bus->dhd, flags); +#if defined(LINUX) || defined(linux) + if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + DHD_GENERAL_UNLOCK(bus->dhd, flags); +#ifndef DHD_EFI + return -EBUSY; +#else + return BCME_ERROR; +#endif + } +#endif /* LINUX || linux */ + + bus->last_suspend_start_time = OSL_LOCALTIME_NS(); + + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (byint) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + /* Clear wait_for_d3_ack before sending D3_INFORM */ + bus->wait_for_d3_ack = 0; + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } else { + /* Clear wait_for_d3_ack before sending D3_INFORM */ + bus->wait_for_d3_ack = 0; + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT); + while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) { + dhdpcie_handle_mb_data(bus); + usleep_range(1000, 1500); + d3_read_retry++; + } + } +#else + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); +#ifdef DHD_TIMESYNC + /* disable time sync mechanism, if configed */ + dhd_timesync_control(bus->dhd, TRUE); +#endif /* DHD_TIMESYNC */ + +#ifdef PCIE_INB_DW + /* As D3_INFORM will be sent after De-assert, + * skip sending DS-ACK for DS-REQ. + */ + bus->skip_ds_ack = TRUE; +#endif /* PCIE_INB_DW */ + +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + dhd_bus_set_device_wake(bus, TRUE); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ +#ifdef PCIE_OOB + bus->oob_presuspend = TRUE; +#endif +#ifdef PCIE_INB_DW + /* De-assert at this point for In-band device_wake */ + if (INBAND_DW_ENAB(bus)) { +#ifdef DHD_EFI + /* during pcie suspend, irrespective of 'deep_sleep' enabled + * or disabled, always de-assert DW. If 'deep_sleep' was disabled + * by user iovar, then upon resuming, DW is again asserted in the + * 'dhd_bus_handle_mb_data' path. + */ + dhd_bus_inb_set_device_wake(bus, FALSE); +#else + dhd_bus_set_device_wake(bus, FALSE); +#endif /* DHD_EFI */ + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } +#endif /* PCIE_INB_DW */ + /* Clear wait_for_d3_ack before sending D3_INFORM */ + bus->wait_for_d3_ack = 0; + /* + * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state + * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata + * inside atomic context, so that no more DBs will be + * rung after sending D3_INFORM + */ +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } else +#endif /* PCIE_INB_DW */ + { + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + } + + /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */ + + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + +#ifdef DHD_RECOVER_TIMEOUT + /* XXX: WAR for missing D3 ACK MB interrupt */ + if (bus->wait_for_d3_ack == 0) { + /* If wait_for_d3_ack was not updated because D2H MB was not received */ + uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int, 0, 0); + int host_irq_disabled = dhdpcie_irq_disabled(bus); + if ((intstatus) && (intstatus != (uint32)-1) && + (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) { + DHD_ERROR(("%s: D3 ACK trying again intstatus=%x" + " host_irq_disabled=%d\n", + __FUNCTION__, intstatus, host_irq_disabled)); + dhd_pcie_intr_count_dump(bus->dhd); + dhd_print_tasklet_status(bus->dhd); + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && + !bus->use_mailbox) { + dhd_prot_process_ctrlbuf(bus->dhd); + } else { + dhdpcie_handle_mb_data(bus); + } + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + /* Clear Interrupts */ + dhdpcie_bus_clear_intstatus(bus); + } + } /* bus->wait_for_d3_ack was 0 */ +#endif /* DHD_RECOVER_TIMEOUT */ + + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef OEM_ANDROID + /* To allow threads that got pre-empted to complete. + */ + while ((active = dhd_os_check_wakelock_all(bus->dhd)) && + (idle_retry < MAX_WKLK_IDLE_CHECK)) { + OSL_SLEEP(1); + idle_retry++; + } +#endif /* OEM_ANDROID */ + + if (bus->wait_for_d3_ack) { + DHD_RPM(("%s: Got D3 Ack \n", __FUNCTION__)); + /* Got D3 Ack. Suspend the bus */ +#ifdef OEM_ANDROID + if (active) { + DHD_ERROR(("%s():Suspend failed because of wakelock" + "restoring Dongle to D0\n", __FUNCTION__)); + + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, + bus->dhd->dhd_watchdog_ms_backup); + } + + /* + * Dongle still thinks that it has to be in D3 state until + * it gets a D0 Inform, but we are backing off from suspend. + * Ensure that Dongle is brought back to D0. + * + * Bringing back Dongle from D3 Ack state to D0 state is a + * 2 step process. Dongle would want to know that D0 Inform + * would be sent as a MB interrupt to bring it out of D3 Ack + * state to D0 state. So we have to send both this message. + */ + + /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */ + bus->wait_for_d3_ack = 0; + + DHD_SET_BUS_NOT_IN_LPS(bus); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + /* Since suspend has failed due to wakelock is held, + * update the DS state to DW_DEVICE_HOST_WAKE_WAIT. + * So that host sends the DS-ACK for DS-REQ. + */ + DHD_ERROR(("Suspend failed due to wakelock is held, " + "set inband dw state to DW_DEVICE_HOST_WAKE_WAIT\n")); + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_HOST_WAKE_WAIT); + dhd_bus_ds_trace(bus, 0, TRUE, + dhdpcie_bus_get_pcie_inband_dw_state(bus)); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } + bus->skip_ds_ack = FALSE; +#endif /* PCIE_INB_DW */ + /* For Linux, Macos etc (otherthan NDIS) enable back the dongle + * interrupts using intmask and host interrupts + * which were disabled in the dhdpcie_bus_isr()-> + * dhd_bus_handle_d3_ack(). + */ + /* Enable back interrupt using Intmask!! */ + dhdpcie_bus_intr_enable(bus); +#ifndef NDIS /* !NDIS */ + /* Defer enabling host irq after RPM suspend failure */ + if (!DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd)) { + /* Enable back interrupt from Host side!! */ + if (dhdpcie_irq_disabled(bus)) { + dhdpcie_enable_irq(bus); + bus->resume_intr_enable_count++; + } + } +#else + /* Enable back the intmask which was cleared in DPC + * after getting D3_ACK. + */ + bus->resume_intr_enable_count++; + +#endif /* !NDIS */ + if (bus->use_d0_inform) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, + (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + /* ring doorbell 1 (hostready) */ + dhd_bus_hostready(bus); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + rc = BCME_ERROR; + } else { + /* Actual Suspend after no wakelock */ +#endif /* OEM_ANDROID */ + /* At this time bus->bus_low_power_state will be + * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK + * in dhd_bus_handle_d3_ack() + */ +#ifdef PCIE_OOB + bus->oob_presuspend = FALSE; + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, FALSE); + } +#endif +#ifdef PCIE_OOB + bus->oob_presuspend = TRUE; +#endif +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_HOST_SLEEP_WAIT) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_HOST_SLEEP); +#ifdef PCIE_INB_DW + dhd_bus_ds_trace(bus, 0, TRUE, + dhdpcie_bus_get_pcie_inband_dw_state(bus)); +#else + dhd_bus_ds_trace(bus, 0, TRUE); +#endif /* PCIE_INB_DW */ + } + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } +#endif /* PCIE_INB_DW */ + if (bus->use_d0_inform && + (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } +#ifdef PCIE_OOB + dhd_bus_set_device_wake(bus, FALSE); +#endif /* PCIE_OOB */ + +#if defined(BCMPCIE_OOB_HOST_WAKE) + if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) { + DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__)); + } else { + dhdpcie_oob_intr_set(bus, TRUE); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* The Host cannot process interrupts now so disable the same. + * No need to disable the dongle INTR using intmask, as we are + * already calling disabling INTRs from DPC context after + * getting D3_ACK in dhd_bus_handle_d3_ack. + * Code may not look symmetric between Suspend and + * Resume paths but this is done to close down the timing window + * between DPC and suspend context and bus->bus_low_power_state + * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC. + */ + bus->dhd->d3ackcnt_timeout = 0; + bus->dhd->busstate = DHD_BUS_SUSPEND; + DHD_GENERAL_UNLOCK(bus->dhd, flags); +#if defined(LINUX) || defined(linux) + dhdpcie_dump_resource(bus); +#endif /* LINUX || linux */ + rc = dhdpcie_pci_suspend_resume(bus, state); + if (!rc) { + bus->last_suspend_end_time = OSL_LOCALTIME_NS(); + } +#ifdef OEM_ANDROID + } +#endif /* OEM_ANDROID */ + } else if (timeleft == 0) { /* D3 ACK Timeout */ +#ifdef DHD_FW_COREDUMP + uint32 cur_memdump_mode = bus->dhd->memdump_enabled; +#endif /* DHD_FW_COREDUMP */ + + /* check if the D3 ACK timeout due to scheduling issue */ + bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) && + dhd_bus_query_dpc_sched_errors(bus->dhd); + bus->dhd->d3ack_timeout_occured = TRUE; + /* If the D3 Ack has timeout */ + bus->dhd->d3ackcnt_timeout++; + DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n", + __FUNCTION__, bus->dhd->is_sched_error ? + " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout)); +#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP) + /* XXX DHD triggers Kernel panic if the resumed on timeout occurrs + * due to tasklet or workqueue scheduling problems in the Linux Kernel. + * Customer informs that it is hard to find any clue from the + * host memory dump since the important tasklet or workqueue information + * is already disappered due the latency while printing out the timestamp + * logs for debugging scan timeout issue. + * For this reason, customer requestes us to trigger Kernel Panic rather + * than taking a SOCRAM dump. + */ + if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) { + /* change g_assert_type to trigger Kernel panic */ + g_assert_type = 2; + /* use ASSERT() to trigger panic */ + ASSERT(0); + } +#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */ + DHD_SET_BUS_NOT_IN_LPS(bus); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + /* XXX : avoid multiple socram dump from dongle trap and + * invalid PCIe bus assceess due to PCIe link down + */ + if (bus->dhd->check_trap_rot) { + DHD_ERROR(("Check dongle trap in the case of d3 ack timeout\n")); + dhdpcie_checkdied(bus, NULL, 0); + } + if (bus->dhd->dongle_trap_occured) { +#ifdef OEM_ANDROID +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO); +#endif /* OEM_ANDROID */ + } else if (!bus->is_linkdown && + !bus->cto_triggered) { + uint32 intstatus = 0; + + /* Check if PCIe bus status is valid */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + /* Invalidate PCIe bus status */ + bus->is_linkdown = 1; + } + + dhd_bus_dump_console_buffer(bus); + dhd_prot_debug_info_print(bus->dhd); +#ifdef DHD_FW_COREDUMP + if (cur_memdump_mode) { + /* write core dump to file */ + bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + +#ifdef NDIS + /* ASSERT only if hang detection/recovery is disabled. + * If enabled then let + * windows HDR mechansim trigger FW download via surprise removal + */ + dhd_bus_check_died(bus); +#endif +#ifdef OEM_ANDROID + DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n", + __FUNCTION__)); +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); +#endif /* OEM_ANDROID */ + } +#if defined(DHD_ERPOM) || (defined(DHD_EFI) && defined(BT_OVER_PCIE)) + dhd_schedule_reset(bus->dhd); +#endif /* DHD_ERPOM || DHD_EFI */ + rc = -ETIMEDOUT; + } +#ifdef PCIE_OOB + bus->oob_presuspend = FALSE; +#endif /* PCIE_OOB */ + } else { + /* Resume */ + DHD_RPM(("%s: Entering resume state\n", __FUNCTION__)); + bus->last_resume_start_time = OSL_LOCALTIME_NS(); + + /** + * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold. + * si_backplane_access(function to read/write backplane) + * updates the window(PCIE2_BAR0_CORE2_WIN) only if + * window being accessed is different form the window + * being pointed by second_bar0win. + * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold, + * invalidating second_bar0win after resume updates + * PCIE2_BAR0_CORE2_WIN with right window. + */ + si_invalidate_second_bar0win(bus->sih); +#if defined(linux) && defined(OEM_ANDROID) +#if defined(BCMPCIE_OOB_HOST_WAKE) + DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif /* linux && OEM_ANDROID */ +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT); +#ifdef PCIE_INB_DW + dhd_bus_ds_trace(bus, 0, TRUE, + dhdpcie_bus_get_pcie_inband_dw_state(bus)); +#else + dhd_bus_ds_trace(bus, 0, TRUE); +#endif /* PCIE_INB_DW */ + } + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } + bus->skip_ds_ack = FALSE; +#endif /* PCIE_INB_DW */ + rc = dhdpcie_pci_suspend_resume(bus, state); +#if defined(LINUX) || defined(linux) + dhdpcie_dump_resource(bus); +#endif /* LINUX || linux */ + + /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */ + DHD_SET_BUS_NOT_IN_LPS(bus); + + if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) { + if (bus->use_d0_inform) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + /* ring doorbell 1 (hostready) */ + dhd_bus_hostready(bus); + } + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; +#ifdef DHD_PCIE_RUNTIMEPM + if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) { + bus->bus_wake = 1; + OSL_SMP_WMB(); + wake_up(&bus->rpm_queue); + } +#endif /* DHD_PCIE_RUNTIMEPM */ +#ifdef PCIE_OOB + /* + * Assert & Deassert the Device Wake. The following is the explanation for doing so. + * 0) At this point, + * Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold + * Device Wake is enabled. + * 1) When the Host comes out of Suspend, it first sends PERST# in the Link. + * Looking at this the Dongle moves from D3 Cold to NO DS State + * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first + * Asserts the Device Wake. + * From the defn, when the Device Wake is asserted, The dongle FW will ensure + * that the Dongle is out of deep sleep IF the device is already in deep sleep. + * But note that now the Dongle is NOT in Deep sleep and is actually in + * NO DS state. So just driving the Device Wake high does not trigger any state + * transitions. The Host should actually "Toggle" the Device Wake to ensure + * that Dongle synchronizes with the Host and starts the State Transition to D0. + * 4) Note that the above explanation is applicable Only when the Host comes out of + * suspend and the Dongle comes out of D3 Cold + */ + /* This logic is not required when hostready is enabled */ + + if (!bus->dhd->d2h_hostrdy_supported) { + dhd_bus_set_device_wake(bus, TRUE); + OSL_DELAY(1000); + dhd_bus_set_device_wake(bus, FALSE); + } + +#endif /* PCIE_OOB */ + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + + /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts + * using intmask and host interrupts + * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack(). + */ + dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */ +#ifndef NDIS /* !NDIS */ + /* Defer enabling host interrupt until RPM resume done */ + if (!DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) { + if (dhdpcie_irq_disabled(bus)) { + dhdpcie_enable_irq(bus); + bus->resume_intr_enable_count++; + } + } +#else + /* TODO: for NDIS also we need to use enable_irq in future */ + bus->resume_intr_enable_count++; +#endif /* !NDIS */ + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_TIMESYNC + /* enable time sync mechanism, if configed */ + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhd_timesync_control(bus->dhd, FALSE); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); +#endif /* DHD_TIMESYNC */ + + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Enabling wdtick after resume\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); + } + + bus->last_resume_end_time = OSL_LOCALTIME_NS(); + + /* Update TCM rd index for EDL ring */ + DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd); + + } + return rc; +} + +#define BUS_SUSPEND TRUE +#define BUS_RESUME FALSE +int dhd_bus_suspend(dhd_pub_t *dhd) +{ + int ret; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* TODO: Check whether the arguments are correct */ + ret = dhdpcie_bus_suspend(dhd->bus, TRUE, BUS_SUSPEND); +#else + ret = dhdpcie_bus_suspend(dhd->bus, BUS_SUSPEND); +#endif + return ret; +} + +int dhd_bus_resume(dhd_pub_t *dhd, int stage) +{ + int ret; + BCM_REFERENCE(stage); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* TODO: Check whether the arguments are correct */ + ret = dhdpcie_bus_suspend(dhd->bus, FALSE, BUS_RESUME); +#else + ret = dhdpcie_bus_suspend(dhd->bus, BUS_RESUME); +#endif + return ret; +} + +uint32 +dhdpcie_force_alp(struct dhd_bus *bus, bool enable) +{ + ASSERT(bus && bus->sih); + if (enable) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0); + } + return 0; +} + +/* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */ +uint32 +dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time) +{ + uint reg_val; + + ASSERT(bus && bus->sih); + + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + 0x1004); + reg_val = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16); + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, + reg_val); + + return 0; +} + +static uint32 +dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk) +{ + uint16 chipid = si_chipid(bus->sih); + /* + * XXX: WAR for CRWLDOT11M-3011 + * program the DMA descriptor Buffer length as the expected frame length + * + 8 bytes extra for corerev 82 when buffer length % 128 is equal to 4 + */ + if ((chipid == BCM4375_CHIP_ID || + chipid == BCM4362_CHIP_ID || + chipid == BCM4377_CHIP_ID || + chipid == BCM43751_CHIP_ID || + chipid == BCM43752_CHIP_ID) && + (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) { + len += 8; + } + DHD_ERROR(("%s: len %d\n", __FUNCTION__, len)); + return len; +} + +/** Transfers bytes from host to dongle and to host again using DMA */ +static int +dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, + uint32 len, uint32 srcdelay, uint32 destdelay, + uint32 d11_lpbk, uint32 core_num, uint32 wait, + uint32 mem_addr) +{ + int ret = 0; + + if (bus->dhd == NULL) { + DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (len < 5 || len > 4194296) { + DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__)); + return BCME_ERROR; + } + +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + bus->dhd->cur_intr_poll_period = dhd_os_get_intr_poll_period(); + /* before running loopback test, set interrupt poll period to a lesser value */ + dhd_os_set_intr_poll_period(bus, INTR_POLL_PERIOD_CRITICAL); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + + len = dhd_apply_d11_war_length(bus, len, d11_lpbk); + + bus->dmaxfer_complete = FALSE; + ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, + d11_lpbk, core_num, mem_addr); + if (ret != BCME_OK || !wait) { + DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__, + ret, wait)); + } else { + ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete); + if (ret < 0) + ret = BCME_NOTREADY; +#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC) + /* restore interrupt poll period to the previous existing value */ + dhd_os_set_intr_poll_period(bus, bus->dhd->cur_intr_poll_period); +#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */ + } + + return ret; + +} + +#ifdef BCMINTERNAL +static int +dhdpcie_bus_set_tx_lpback(struct dhd_bus *bus, bool enable) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + return BCME_ERROR; + } + bus->dhd->loopback = enable; + return BCME_OK; +} + +static int +dhdpcie_bus_get_tx_lpback(struct dhd_bus *bus) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + return bus->dhd->loopback ? 1 : 0; + return BCME_OK; +} +#endif /* BCMINTERNAL */ + +bool +dhd_bus_is_multibp_capable(struct dhd_bus *bus) +{ + return MULTIBP_CAP(bus->sih); +} + +#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */ +#define PCIE_REV_FOR_4378B0 68 + +static int +dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) +{ + int bcmerror = 0; + volatile uint32 *cr4_regs; + bool do_flr; + bool do_wr_flops = TRUE; + + if (!bus->sih) { + DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); + return BCME_ERROR; + } + + do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) && + (bus->sih->buscorerev != PCIE_REV_FOR_4378B0)); + + /* + * Jira SWWLAN-214966: 4378B0 BToverPCIe: fails to download firmware + * with "insmod dhd.ko firmware_path=rtecdc.bin nvram_path=nvram.txt" format + * CTO is seen during autoload case. + * Need to assert PD1 power req during ARM out of reset. + * And doing FLR after this would conflict as FLR resets PCIe enum space. + */ + if (MULTIBP_ENAB(bus->sih) && !do_flr) { + dhd_bus_pcie_pwr_req(bus); + } + + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { +#ifndef BCMQT /* for performance reasons, skip the FLR for QT */ +#ifdef BT_OVER_PCIE + if (dhd_bus_is_multibp_capable(bus) && do_flr && + dhd_fw_download_status(bus->dhd) != FW_DOWNLOAD_IN_PROGRESS) { + /* for multi-backplane architecture, issue an FLR to reset the WLAN cores */ + const int pwr_req_ref = bus->pwr_req_ref; + if (pwr_req_ref > 0) { + (void)dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, FALSE); + + /* + * If power has been requested prior to calling FLR, but + * the FLR code cleared the power request, we need to + * request again to get back to the status of where we were + * prior, otherwise there'll be a mismatch in reqs/clears + */ + if (bus->pwr_req_ref < pwr_req_ref) { + dhd_bus_pcie_pwr_req(bus); + } + } else { + (void)dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, FALSE); + } + } +#endif /* BT_OVER_PCIE */ +#endif /* !BCMQT */ + + /* Make sure BAR1 maps to backplane address 0 */ + dhdpcie_setbar1win(bus, 0x00000000); + bus->alp_only = TRUE; +#ifdef GDB_PROXY + bus->gdb_proxy_access_enabled = TRUE; + bus->gdb_proxy_bootloader_mode = FALSE; +#endif /* GDB_PROXY */ + + /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ + cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + + if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } else if (cr4_regs == NULL) { /* no CR4 present on chip */ + si_core_disable(bus->sih, 0); + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (BCM43602_CHIP(bus->sih->chip)) { + /* XXX CRWLARMCR4-53 43602a0 HW bug when banks are powered down */ + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + } + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } + } else { + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + /* write random numbers to sysmem for the purpose of + * randomizing heap address space. + */ + if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) { + DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n", + __FUNCTION__)); + goto fail; + } +#ifdef BCMINTERNAL + if (bus->hostfw_buf.va) { + /* Share the location of the host memory + * location where pageable FW binary is located. + */ + host_page_location_info_t host_location; + host_location.tlv_signature = + htol32(BCM_HOST_PAGE_LOCATION_SIGNATURE); + host_location.tlv_size = htol32(sizeof(host_location)) - + sizeof(host_location.tlv_size) - + sizeof(host_location.tlv_signature); + host_location.binary_size = htol32(bus->hostfw_buf.len); + host_location.addr_hi = PHYSADDRHI(bus->hostfw_buf.pa); + host_location.addr_lo = PHYSADDRLO(bus->hostfw_buf.pa); + bus->next_tlv -= sizeof(host_location); + dhdpcie_bus_membytes(bus, TRUE, bus->next_tlv, + (uint8*)&host_location, sizeof(host_location)); + DHD_INFO(("%s:Host page location info:" + " %08x-%08x Len:%x!\n", + __FUNCTION__, host_location.addr_hi, + host_location.addr_lo, host_location.binary_size)); + } +#ifdef DHD_FWTRACE + { + /* + * Send host trace buffer at time of firmware download + * to enable collecting full init time firmware trace + */ + host_fwtrace_buf_location_info_t host_info; + + if (fwtrace_init(bus->dhd) == BCME_OK) { + fwtrace_get_haddr(bus->dhd, &host_info.host_buf_info); + + host_info.tlv_size = sizeof(host_info.host_buf_info); + host_info.tlv_signature = + htol32(BCM_HOST_FWTRACE_BUF_LOCATION_SIGNATURE); + + bus->ramtop_addr -= sizeof(host_info); + + dhdpcie_bus_membytes(bus, TRUE, bus->ramtop_addr, + (uint8*)&host_info, sizeof(host_info)); + + bus->next_tlv = sizeof(host_info); + } + } +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#if defined(FW_SIGNATURE) + if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops)) + != BCME_OK) { + goto fail; + } +#endif /* FW_SIGNATURE */ + + if (do_wr_flops) { + uint32 resetinstr_data; + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* + * read address 0 with reset instruction, + * to validate that is not secured + */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&resetinstr_data, sizeof(resetinstr_data)); + + if (resetinstr_data == 0xFFFFFFFF) { + DHD_ERROR(("%s: **** FLOPS Vector is secured, " + "Signature file is missing! ***\n", __FUNCTION__)); + bcmerror = BCME_NO_SIG_FILE; + goto fail; + } + + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + /* now remove reset and halt and continue to run CA7 */ + } + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* XXX Change standby configuration here if necessary */ + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + if (BCM43602_CHIP(bus->sih->chip)) { + /* Firmware crashes on SOCSRAM access when core is in reset */ + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + } + + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* write a random number rTLV to TCM for the purpose of + * randomizing heap address space. + */ + if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) { + DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n", + __FUNCTION__)); + goto fail; + } + +#if defined(FW_SIGNATURE) + if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops)) + != BCME_OK) { + goto fail; + } +#endif /* FW_SIGNATURE */ + if (do_wr_flops) { + uint32 resetinstr_data; + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* + * read address 0 with reset instruction, + * to validate that is not secured + */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&resetinstr_data, sizeof(resetinstr_data)); + + if (resetinstr_data == 0xFFFFFFFF) { + DHD_ERROR(("%s: **** FLOPS Vector is secured, " + "Signature file is missing! ***\n", __FUNCTION__)); + bcmerror = BCME_NO_SIG_FILE; + goto fail; + } + + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_ERROR; + goto fail; + } + } + /* now remove reset and halt and continue to run CR4 */ + } + } + + bus->arm_oor_time = OSL_LOCALTIME_NS(); + si_core_reset(bus->sih, 0, 0); + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; +#ifdef DHD_EFI + /* + * dhdpcie_init_phase2() sets the fw_download_status as FW_DOWNLOAD_IN_PROGRESS + * during the first default attempt to load FW either from OTP or WIRELESS folder. + * + * After the first successful download of the FW(either from OTP or WIRELESS folder + * or by dhd download command) set the fw_download_status as FW_DOWNLOAD_DONE. + * + * We need to maintain these states to perform FLR in dhdpcie_bus_download_state() + * only after first successful download. + */ + bus->dhd->fw_download_status = FW_DOWNLOAD_DONE; +#endif /* DHD_EFI */ + } + +fail: + /* Always return to PCIE core */ + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + + if (MULTIBP_ENAB(bus->sih) && !do_flr) { + dhd_bus_pcie_pwr_req_clear(bus); + } + + return bcmerror; +} /* dhdpcie_bus_download_state */ + +#if defined(FW_SIGNATURE) + +static int +dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write) +{ + int bcmerror = BCME_OK; + + DHD_INFO(("FWSIG: bl=%s,%x fw=%x,%u sig=%s,%x,%u" + " stat=%x,%u ram=%x,%x\n", + bus->bootloader_filename, bus->bootloader_addr, + bus->fw_download_addr, bus->fw_download_len, + bus->fwsig_filename, bus->fwsig_download_addr, + bus->fwsig_download_len, + bus->fwstat_download_addr, bus->fwstat_download_len, + bus->dongle_ram_base, bus->ramtop_addr)); + + if (bus->fwsig_filename[0] == 0) { + DHD_INFO(("%s: missing signature file\n", __FUNCTION__)); + goto exit; + } + + /* Write RAM Bootloader to TCM if requested */ + if ((bcmerror = dhdpcie_bus_download_ram_bootloader(bus)) + != BCME_OK) { + DHD_ERROR(("%s: could not write RAM BL to TCM, err %d\n", + __FUNCTION__, bcmerror)); + goto exit; + } + + /* Write FW signature rTLV to TCM */ + if ((bcmerror = dhdpcie_bus_write_fwsig(bus, bus->fwsig_filename, + NULL))) { + DHD_ERROR(("%s: could not write FWsig to TCM, err %d\n", + __FUNCTION__, bcmerror)); + goto exit; + } + + /* Write FW signature verification status rTLV to TCM */ + if ((bcmerror = dhdpcie_bus_write_fws_status(bus)) != BCME_OK) { + DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n", + __FUNCTION__, bcmerror)); + goto exit; + } + + /* Write FW memory map rTLV to TCM */ + if ((bcmerror = dhdpcie_bus_write_fws_mem_info(bus)) != BCME_OK) { + DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n", + __FUNCTION__, bcmerror)); + goto exit; + } + + /* Write a end-of-TLVs marker to TCM */ + if ((bcmerror = dhdpcie_download_rtlv_end(bus)) != BCME_OK) { + DHD_ERROR(("%s: could not write rTLV-end marker to TCM, err %d\n", + __FUNCTION__, bcmerror)); + goto exit; + } + + /* In case of BL RAM, do write flops */ + if (bus->bootloader_filename[0] != 0) { + *do_write = TRUE; + } else { + *do_write = FALSE; + } + +exit: + return bcmerror; +} + +/* Download a reversed-TLV to the top of dongle RAM without overlapping any existing rTLVs */ +static int +dhdpcie_download_rtlv(dhd_bus_t *bus, dngl_rtlv_type_t type, dngl_rtlv_len_t len, uint8 *value) +{ + int bcmerror = BCME_OK; +#ifdef DHD_DEBUG + uint8 *readback_buf = NULL; + uint32 readback_val = 0; +#endif /* DHD_DEBUG */ + uint32 dest_addr = 0; /* dongle RAM dest address */ + uint32 dest_size = 0; /* dongle RAM dest size */ + uint32 dest_raw_size = 0; /* dest size with added checksum */ + + /* Calculate the destination dongle RAM address and size */ + dest_size = ROUNDUP(len, 4); + dest_addr = bus->ramtop_addr - sizeof(dngl_rtlv_type_t) - sizeof(dngl_rtlv_len_t) + - dest_size; + bus->ramtop_addr = dest_addr; + + /* Create the rTLV size field. This consists of 2 16-bit fields: + * The lower 16 bits is the size. The higher 16 bits is a checksum + * consisting of the size with all bits reversed. + * +-------------+-------------+ + * | checksum | size | + * +-------------+-------------+ + * High 16 bits Low 16 bits + */ + dest_raw_size = (~dest_size << 16) | (dest_size & 0x0000FFFF); + + /* Write the value block */ + if (dest_size > 0) { + bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr, value, dest_size); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes to 0x%08x\n", + __FUNCTION__, bcmerror, dest_size, dest_addr)); + goto exit; + } + } + + /* Write the length word */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr + dest_size, + (uint8*)&dest_raw_size, sizeof(dngl_rtlv_len_t)); + + /* Write the type word */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, + dest_addr + dest_size + sizeof(dngl_rtlv_len_t), + (uint8*)&type, sizeof(dngl_rtlv_type_t)); + +#ifdef DHD_DEBUG + /* Read back and compare the downloaded data */ + if (dest_size > 0) { + readback_buf = (uint8*)MALLOC(bus->dhd->osh, dest_size); + if (!readback_buf) { + bcmerror = BCME_NOMEM; + goto exit; + } + memset(readback_buf, 0xaa, dest_size); + bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr, readback_buf, dest_size); + if (bcmerror) { + DHD_ERROR(("%s: readback error %d, %d bytes from 0x%08x\n", + __FUNCTION__, bcmerror, dest_size, dest_addr)); + goto exit; + } + if (memcmp(value, readback_buf, dest_size) != 0) { + DHD_ERROR(("%s: Downloaded data mismatch.\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto exit; + } else { + DHD_ERROR(("Download and compare of TLV 0x%x succeeded" + " (size %u, addr %x).\n", type, dest_size, dest_addr)); + } + } + + /* Read back and compare the downloaded len field */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr + dest_size, + (uint8*)&readback_val, sizeof(dngl_rtlv_len_t)); + if (!bcmerror) { + if (readback_val != dest_raw_size) { + bcmerror = BCME_BADLEN; + } + } + if (bcmerror) { + DHD_ERROR(("%s: Downloaded len error %d\n", __FUNCTION__, bcmerror)); + goto exit; + } + + /* Read back and compare the downloaded type field */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, + dest_addr + dest_size + sizeof(dngl_rtlv_len_t), + (uint8*)&readback_val, sizeof(dngl_rtlv_type_t)); + if (!bcmerror) { + if (readback_val != type) { + bcmerror = BCME_BADOPTION; + } + } + if (bcmerror) { + DHD_ERROR(("%s: Downloaded type error %d\n", __FUNCTION__, bcmerror)); + goto exit; + } +#endif /* DHD_DEBUG */ + + bus->ramtop_addr = dest_addr; + +exit: +#ifdef DHD_DEBUG + if (readback_buf) { + MFREE(bus->dhd->osh, readback_buf, dest_size); + } +#endif /* DHD_DEBUG */ + + return bcmerror; +} /* dhdpcie_download_rtlv */ + +/* Download a reversed-TLV END marker to the top of dongle RAM */ +static int +dhdpcie_download_rtlv_end(dhd_bus_t *bus) +{ + return dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_END_MARKER, 0, NULL); +} + +/* Write the FW signature verification status to dongle memory */ +static int +dhdpcie_bus_write_fws_status(dhd_bus_t *bus) +{ + bcm_fwsign_verif_status_t vstatus; + int ret; + + bzero(&vstatus, sizeof(vstatus)); + + ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_STATUS, sizeof(vstatus), + (uint8*)&vstatus); + bus->fwstat_download_addr = bus->ramtop_addr; + bus->fwstat_download_len = sizeof(vstatus); + + return ret; +} /* dhdpcie_bus_write_fws_status */ + +/* Write the FW signature verification memory map to dongle memory */ +static int +dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus) +{ + bcm_fwsign_mem_info_t memmap; + int ret; + + bzero(&memmap, sizeof(memmap)); + memmap.firmware.start = bus->fw_download_addr; + memmap.firmware.end = memmap.firmware.start + bus->fw_download_len; + memmap.heap.start = ROUNDUP(memmap.firmware.end + BL_HEAP_START_GAP_SIZE, 4); + memmap.heap.end = memmap.heap.start + BL_HEAP_SIZE; + memmap.signature.start = bus->fwsig_download_addr; + memmap.signature.end = memmap.signature.start + bus->fwsig_download_len; + memmap.vstatus.start = bus->fwstat_download_addr; + memmap.vstatus.end = memmap.vstatus.start + bus->fwstat_download_len; + DHD_INFO(("%s: mem_info: fw=%x-%x heap=%x-%x sig=%x-%x vst=%x-%x res=%x\n", + __FUNCTION__, + memmap.firmware.start, memmap.firmware.end, + memmap.heap.start, memmap.heap.end, + memmap.signature.start, memmap.signature.end, + memmap.vstatus.start, memmap.vstatus.end, + memmap.reset_vec.start)); + + ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_MEM_MAP, sizeof(memmap), + (uint8*)&memmap); + bus->fw_memmap_download_addr = bus->ramtop_addr; + bus->fw_memmap_download_len = sizeof(memmap); + + return ret; +} /* dhdpcie_bus_write_fws_mem_info */ + +/* Download a bootloader image to dongle RAM */ +static int +dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus) +{ + int ret = BCME_OK; + uint32 dongle_ram_base_save; + + DHD_INFO(("download_bloader: %s,0x%x. ramtop=0x%x\n", + bus->bootloader_filename, bus->bootloader_addr, bus->ramtop_addr)); + if (bus->bootloader_filename[0] == '\0') { + return ret; + } + + /* Save ram base */ + dongle_ram_base_save = bus->dongle_ram_base; + + /* Set ram base to bootloader download start address */ + bus->dongle_ram_base = bus->bootloader_addr; + + /* Download the bootloader image to TCM */ + ret = dhdpcie_download_code_file(bus, bus->bootloader_filename); + + /* Restore ram base */ + bus->dongle_ram_base = dongle_ram_base_save; + + return ret; +} /* dhdpcie_bus_download_ram_bootloader */ + +/* Save the FW download address and size */ +static int +dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr, + uint32 download_size, const char *signature_fname, + const char *bloader_fname, uint32 bloader_download_addr) +{ + bus->fw_download_len = download_size; + bus->fw_download_addr = download_addr; + strlcpy(bus->fwsig_filename, signature_fname, sizeof(bus->fwsig_filename)); + strlcpy(bus->bootloader_filename, bloader_fname, sizeof(bus->bootloader_filename)); + bus->bootloader_addr = bloader_download_addr; +#ifdef GDB_PROXY + /* GDB proxy bootloader mode - if signature file specified (i.e. + * bootloader is used), but bootloader is not specified (i.e. ROM + * bootloader is uses). + * Bootloader mode is significant only for for preattachment debugging + * of chips, in which debug cell can't be initialized before ARM CPU + * start + */ + bus->gdb_proxy_bootloader_mode = + (bus->fwsig_filename[0] != 0) && (bus->bootloader_filename[0] == 0); +#endif /* GDB_PROXY */ + return BCME_OK; +} /* dhdpcie_bus_save_download_info */ + +/* Read a small binary file and write it to the specified socram dest address */ +static int +dhdpcie_download_sig_file(dhd_bus_t *bus, char *path, uint32 type) +{ + int bcmerror = BCME_OK; + void *filep = NULL; + uint8 *srcbuf = NULL; + int srcsize = 0; + int len; + uint32 dest_size = 0; /* dongle RAM dest size */ + + if (path == NULL || path[0] == '\0') { + DHD_ERROR(("%s: no file\n", __FUNCTION__)); + bcmerror = BCME_NOTFOUND; + goto exit; + } + + /* Open file, get size */ + filep = dhd_os_open_image1(bus->dhd, path); + if (filep == NULL) { + DHD_ERROR(("%s: error opening file %s\n", __FUNCTION__, path)); + bcmerror = BCME_NOTFOUND; + goto exit; + } + srcsize = dhd_os_get_image_size(filep); + if (srcsize <= 0 || srcsize > MEMBLOCK) { + DHD_ERROR(("%s: invalid fwsig size %u\n", __FUNCTION__, srcsize)); + bcmerror = BCME_BUFTOOSHORT; + goto exit; + } + dest_size = ROUNDUP(srcsize, 4); + + /* Allocate src buffer, read in the entire file */ + srcbuf = (uint8 *)MALLOCZ(bus->dhd->osh, dest_size); + if (!srcbuf) { + bcmerror = BCME_NOMEM; + goto exit; + } + len = dhd_os_get_image_block(srcbuf, srcsize, filep); + if (len != srcsize) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_BADLEN; + goto exit; + } + + /* Write the src buffer as a rTLV to the dongle */ + bcmerror = dhdpcie_download_rtlv(bus, type, dest_size, srcbuf); + + bus->fwsig_download_addr = bus->ramtop_addr; + bus->fwsig_download_len = dest_size; + +exit: + if (filep) { + dhd_os_close_image1(bus->dhd, filep); + } + if (srcbuf) { + MFREE(bus->dhd->osh, srcbuf, dest_size); + } + + return bcmerror; +} /* dhdpcie_download_sig_file */ + +static int +dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path) +{ + int bcmerror = BCME_OK; + + /* Download the FW signature file to the chip */ + bcmerror = dhdpcie_download_sig_file(bus, fwsig_path, DNGL_RTLV_TYPE_FW_SIGNATURE); + if (bcmerror) { + goto exit; + } + +exit: + if (bcmerror) { + DHD_ERROR(("%s: error %d\n", __FUNCTION__, bcmerror)); + } + return bcmerror; +} /* dhdpcie_bus_write_fwsig */ + +/* Dump secure firmware status. */ +static int +dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + bcm_fwsign_verif_status_t status; + bcm_fwsign_mem_info_t meminfo; + int err = BCME_OK; + + bzero(&status, sizeof(status)); + if (bus->fwstat_download_addr != 0) { + err = dhdpcie_bus_membytes(bus, FALSE, bus->fwstat_download_addr, + (uint8 *)&status, sizeof(status)); + if (err != BCME_OK) { + DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n", + __FUNCTION__, err, sizeof(status), bus->fwstat_download_addr)); + return (err); + } + } + + bzero(&meminfo, sizeof(meminfo)); + if (bus->fw_memmap_download_addr != 0) { + err = dhdpcie_bus_membytes(bus, FALSE, bus->fw_memmap_download_addr, + (uint8 *)&meminfo, sizeof(meminfo)); + if (err != BCME_OK) { + DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n", + __FUNCTION__, err, sizeof(meminfo), bus->fw_memmap_download_addr)); + return (err); + } + } + + bcm_bprintf(strbuf, "Firmware signing\nSignature: (%08x) len (%d)\n", + bus->fwsig_download_addr, bus->fwsig_download_len); + + bcm_bprintf(strbuf, + "Verification status: (%08x)\n" + "\tstatus: %d\n" + "\tstate: %u\n" + "\talloc_bytes: %u\n" + "\tmax_alloc_bytes: %u\n" + "\ttotal_alloc_bytes: %u\n" + "\ttotal_freed_bytes: %u\n" + "\tnum_allocs: %u\n" + "\tmax_allocs: %u\n" + "\tmax_alloc_size: %u\n" + "\talloc_failures: %u\n", + bus->fwstat_download_addr, + status.status, + status.state, + status.alloc_bytes, + status.max_alloc_bytes, + status.total_alloc_bytes, + status.total_freed_bytes, + status.num_allocs, + status.max_allocs, + status.max_alloc_size, + status.alloc_failures); + + bcm_bprintf(strbuf, + "Memory info: (%08x)\n" + "\tfw %08x-%08x\n\theap %08x-%08x\n\tsig %08x-%08x\n\tvst %08x-%08x\n", + bus->fw_memmap_download_addr, + meminfo.firmware.start, meminfo.firmware.end, + meminfo.heap.start, meminfo.heap.end, + meminfo.signature.start, meminfo.signature.end, + meminfo.vstatus.start, meminfo.vstatus.end); + + return (err); +} +#endif /* FW_SIGNATURE */ + +/* Write nvram data to the top of dongle RAM, ending with a size in # of 32-bit words */ +static int +dhdpcie_bus_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + varaddr += bus->dongle_ram_base; + bus->ramtop_addr = varaddr; + + if (bus->vars) { + + /* XXX In case the controller has trouble with odd bytes... */ + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + /* Write the vars list */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); + + /* Implement read back and verify later */ +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) { + MFREE(bus->dhd->osh, vbuffer, varsize); + return BCME_NOMEM; + } + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + prhex("nvram file", vbuffer, varsize); + prhex("downloaded nvram", nvram_ularray, varsize); + MFREE(bus->dhd->osh, nvram_ularray, varsize); + MFREE(bus->dhd->osh, vbuffer, varsize); + return BCME_ERROR; + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__, + phys_size, bus->ramsize)); + DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__, + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + bus->nvram_csm = varsizew; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + bus->nvram_csm = varsizew; + varsizew = htol32(varsizew); + } + + DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} /* dhdpcie_bus_write_vars */ + +int +dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; +#ifdef KEEP_JP_REGREV + /* XXX Needed by customer's request */ + char *tmpbuf; + uint tmpidx; +#endif /* KEEP_JP_REGREV */ +#ifdef GDB_PROXY + const char nodeadman_record[] = "deadman_to=0"; +#endif /* GDB_PROXY */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); +#ifdef GDB_PROXY + if (bus->dhd->gdb_proxy_nodeadman) { + len += sizeof(nodeadman_record); + } +#endif /* GDB_PROXY */ + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +#ifdef GDB_PROXY + if (bus->dhd->gdb_proxy_nodeadman && + !replace_nvram_variable(bus->vars, bus->varsz, nodeadman_record, NULL)) + { + bcmerror = BCME_NOMEM; + goto err; + } +#endif /* GDB_PROXY */ + + /* Re-Calculate htclkratio only for QT, for FPGA it is fixed at 30 */ +#ifdef BCMQT_HW + dhdpcie_htclkratio_recal(bus, bus->vars, bus->varsz); +#endif + +#ifdef DHD_USE_SINGLE_NVRAM_FILE + /* XXX Change the default country code only for MFG firmware */ + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + char *sp = NULL; + char *ep = NULL; + int i; + char tag[2][8] = {"ccode=", "regrev="}; + + /* Find ccode and regrev info */ + for (i = 0; i < 2; i++) { + sp = strnstr(bus->vars, tag[i], bus->varsz); + if (!sp) { + DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n", + __FUNCTION__, bus->nv_path)); + bcmerror = BCME_ERROR; + goto err; + } + sp = strchr(sp, '='); + ep = strchr(sp, '\0'); + /* We assumed that string length of both ccode and + * regrev values should not exceed WLC_CNTRY_BUF_SZ + */ + if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) { + sp++; + while (*sp != '\0') { + DHD_INFO(("%s: parse '%s', current sp = '%c'\n", + __FUNCTION__, tag[i], *sp)); + *sp++ = '0'; + } + } else { + DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n", + __FUNCTION__, tag[i])); + bcmerror = BCME_ERROR; + goto err; + } + } + } +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ + +#ifdef KEEP_JP_REGREV + /* XXX Needed by customer's request */ +#ifdef DHD_USE_SINGLE_NVRAM_FILE + if (dhd_bus_get_fw_mode(bus->dhd) != DHD_FLAG_MFG_MODE) +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ + { + char *pos = NULL; + tmpbuf = MALLOCZ(bus->dhd->osh, bus->varsz + 1); + if (tmpbuf == NULL) { + goto err; + } + memcpy(tmpbuf, bus->vars, bus->varsz); + for (tmpidx = 0; tmpidx < bus->varsz; tmpidx++) { + if (tmpbuf[tmpidx] == 0) { + tmpbuf[tmpidx] = '\n'; + } + } + bus->dhd->vars_ccode[0] = 0; + bus->dhd->vars_regrev = 0; + if ((pos = strstr(tmpbuf, "ccode"))) { + sscanf(pos, "ccode=%3s\n", bus->dhd->vars_ccode); + } + if ((pos = strstr(tmpbuf, "regrev"))) { + sscanf(pos, "regrev=%u\n", &(bus->dhd->vars_regrev)); + } + MFREE(bus->dhd->osh, tmpbuf, bus->varsz + 1); + } +#endif /* KEEP_JP_REGREV */ + +err: + return bcmerror; +} + +/* loop through the capability list and see if the pcie capabilty exists */ +uint8 +dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id) +{ + uint8 cap_id; + uint8 cap_ptr = 0; + uint8 byte_val; + + /* check for Header type 0 */ + byte_val = read_pci_cfg_byte(PCI_CFG_HDR); + if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) { + DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__)); + goto end; + } + + /* check if the capability pointer field exists */ + byte_val = read_pci_cfg_byte(PCI_CFG_STAT); + if (!(byte_val & PCI_CAPPTR_PRESENT)) { + DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__)); + goto end; + } + + cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR); + /* check if the capability pointer is 0x00 */ + if (cap_ptr == 0x00) { + DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__)); + goto end; + } + + /* loop thr'u the capability list and see if the pcie capabilty exists */ + + cap_id = read_pci_cfg_byte(cap_ptr); + + while (cap_id != req_cap_id) { + cap_ptr = read_pci_cfg_byte((cap_ptr + 1)); + if (cap_ptr == 0x00) break; + cap_id = read_pci_cfg_byte(cap_ptr); + } + +end: + return cap_ptr; +} + +void +dhdpcie_pme_active(osl_t *osh, bool enable) +{ + uint8 cap_ptr; + uint32 pme_csr; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return; + } + + pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32)); + DHD_RPM(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr)); + + pme_csr |= PME_CSR_PME_STAT; + if (enable) { + pme_csr |= PME_CSR_PME_EN; + } else { + pme_csr &= ~PME_CSR_PME_EN; + } + + OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr); +} + +bool +dhdpcie_pme_cap(osl_t *osh) +{ + uint8 cap_ptr; + uint32 pme_cap; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return FALSE; + } + + pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); + + DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap)); + + return ((pme_cap & PME_CAP_PM_STATES) != 0); +} + +static void +dhdpcie_pme_stat_clear(dhd_bus_t *bus) +{ + uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32)); + + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(uint32), pmcsr | PCIE_PMCSR_PMESTAT); +} + +uint32 +dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val) +{ + + uint8 pcie_cap; + uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ + uint32 reg_val; + + pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); + + if (!pcie_cap) { + DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); + return 0; + } + + lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; + + /* set operation */ + if (mask) { + /* read */ + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + + /* modify */ + reg_val &= ~mask; + reg_val |= (mask & val); + + /* write */ + OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); + } + return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); +} + +#if defined(NDIS) +/* set min res mask to highest value, preventing sleep */ +void +dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask) +{ + si_pmu_set_min_res_mask(bus->sih, bus->osh, min_res_mask); +} +#endif /* defined(NDIS) */ + +uint8 +dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val) +{ + uint8 pcie_cap; + uint32 reg_val; + uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ + + pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); + + if (!pcie_cap) { + DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); + return 0; + } + + lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; + + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + /* set operation */ + if (mask) { + if (val) + reg_val |= PCIE_CLKREQ_ENAB; + else + reg_val &= ~PCIE_CLKREQ_ENAB; + OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + } + if (reg_val & PCIE_CLKREQ_ENAB) + return 1; + else + return 0; +} + +void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus; + uint64 current_time = OSL_LOCALTIME_NS(); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + bus = dhd->bus; + if (!bus) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n"); + bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" + "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" + "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n", + bus->resume_intr_enable_count, bus->dpc_intr_enable_count, + bus->isr_intr_disable_count, bus->suspend_intr_disable_count, + bus->dpc_return_busdown_count, bus->non_ours_irq_count); +#ifdef BCMPCIE_OOB_HOST_WAKE + bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu" + " oob_intr_disable_count=%lu\noob_irq_num=%d" + " last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT + " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT + " oob_irq_enabled=%d oob_gpio_level=%d\n", + bus->oob_intr_count, bus->oob_intr_enable_count, + bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus), + GET_SEC_USEC(bus->last_oob_irq_isr_time), + GET_SEC_USEC(bus->last_oob_irq_thr_time), + GET_SEC_USEC(bus->last_oob_irq_enable_time), + GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus), + dhdpcie_get_oob_irq_level()); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT + " isr_exit_time="SEC_USEC_FMT"\n" + "isr_sched_dpc_time="SEC_USEC_FMT" rpm_sched_dpc_time="SEC_USEC_FMT"\n" + " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n" + "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT + " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT + " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT + "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n" + "last_d3_inform_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time), + GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->isr_sched_dpc_time), + GET_SEC_USEC(bus->rpm_sched_dpc_time), + GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time), + GET_SEC_USEC(bus->last_process_ctrlbuf_time), + GET_SEC_USEC(bus->last_process_flowring_time), + GET_SEC_USEC(bus->last_process_txcpl_time), + GET_SEC_USEC(bus->last_process_rxcpl_time), + GET_SEC_USEC(bus->last_process_infocpl_time), + GET_SEC_USEC(bus->last_process_edl_time), + GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time), + GET_SEC_USEC(bus->last_d3_inform_time)); + + bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time=" + SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time=" + SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time), + GET_SEC_USEC(bus->last_suspend_end_time), + GET_SEC_USEC(bus->last_resume_start_time), + GET_SEC_USEC(bus->last_resume_end_time)); + +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) + bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT + " logtrace_thread_sem_down_time="SEC_USEC_FMT + "\nlogtrace_thread_flush_time="SEC_USEC_FMT + " logtrace_thread_unexpected_break_time="SEC_USEC_FMT + "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)); +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 d2h_db0 = 0; + uint32 d2h_mb_data = 0; + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_int, 0, 0); +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(dhd->bus, dhd->bus->pcie_mailbox_int, intstatus, FALSE); +#endif /* defined(DHD_MMIO_TRACE) */ + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_mask, 0, 0); +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(dhd->bus, dhd->bus->pcie_mailbox_mask, intmask, FALSE); +#endif /* defined(DHD_MMIO_TRACE) */ + d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0); + bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", + d2h_mb_data, dhd->bus->def_intmask); +} +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) +void +dhd_bus_dump_awdl_stats(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + int i = 0; + dhd_awdl_stats_t *awdl_stats; + + bcm_bprintf(strbuf, "---------- AWDL STATISTICS ---------\n"); + bcm_bprintf(strbuf, "%s %10s %12s %16s %12s %16s %8s %8s %8s\n", + "Slot", "AvgSlotTUs", "AvgSlotTUsFW", "NumSlots", + "AvgTxCmpL_Us", "NumTxStatus", "Acked", "tossed", "noack"); + for (i = 0; i < AWDL_NUM_SLOTS; i++) { + awdl_stats = &dhdp->awdl_stats[i]; + bcm_bprintf(strbuf, "%4d %10llu %12llu %16llu %12llu %16llu ", + i, + awdl_stats->num_slots ? + DIV_U64_BY_U64(awdl_stats->cum_slot_time, + awdl_stats->num_slots) : 0, + awdl_stats->num_slots ? + DIV_U64_BY_U64(awdl_stats->fw_cum_slot_time, + awdl_stats->num_slots) : 0, + awdl_stats->num_slots, + awdl_stats->num_tx_status ? + DIV_U64_BY_U64(awdl_stats->cum_tx_status_latency, + awdl_stats->num_tx_status) : 0, + awdl_stats->num_tx_status); +#ifdef BCMDBG + if (!dhdp->d2h_sync_mode) { + bcm_bprintf(strbuf, "%8d %8d %8d\n", + awdl_stats->tx_status[WLFC_CTL_PKTFLAG_DISCARD], + awdl_stats->tx_status[WLFC_CTL_PKTFLAG_TOSSED_BYWLC], + awdl_stats->tx_status[WLFC_CTL_PKTFLAG_DISCARD_NOACK]); + } else { + bcm_bprintf(strbuf, + "%8s %8s %8s\n", "NA", "NA", "NA"); + } +#else + bcm_bprintf(strbuf, + "%8s %8s %8s\n", "NA", "NA", "NA"); +#endif + } +} +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ +/** Add bus dump output to a buffer */ +void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + uint16 flowid; + int ix = 0; + flow_ring_node_t *flow_ring_node; + flow_info_t *flow_info; +#ifdef BCMDBG + flow_info_t *local_flow_info; +#endif /* BCMDBG */ +#ifdef TX_STATUS_LATENCY_STATS + uint8 ifindex; + if_flow_lkup_t *if_flow_lkup; + dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS]; +#endif /* TX_STATUS_LATENCY_STATS */ + +#if defined(FW_SIGNATURE) + /* Dump secure firmware status. */ + if (dhdp->busstate <= DHD_BUS_LOAD) { + dhd_bus_dump_fws(dhdp->bus, strbuf); + } +#endif + + if (dhdp->busstate != DHD_BUS_DATA) + return; + +#ifdef TX_STATUS_LATENCY_STATS + memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency)); +#endif /* TX_STATUS_LATENCY_STATS */ +#ifdef DHD_WAKE_STATUS + bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", + bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake, + dhdp->bus->wake_counts.rcwake); +#ifdef DHD_WAKE_RX_STATUS + bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n", + dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast, + dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp); + bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", + dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6, + dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other); + bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", + dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na, + dhdp->bus->wake_counts.rx_icmpv6_ns); +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + for (flowid = 0; flowid < WLC_E_LAST; flowid++) + if (dhdp->bus->wake_counts.rc_event[flowid] != 0) + bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid), + dhdp->bus->wake_counts.rc_event[flowid]); + bcm_bprintf(strbuf, "\n"); +#endif /* DHD_WAKE_EVENT_STATUS */ +#endif /* DHD_WAKE_STATUS */ + + dhd_prot_print_info(dhdp, strbuf); + dhd_dump_intr_registers(dhdp, strbuf); + dhd_dump_intr_counters(dhdp, strbuf); + bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", + dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); + bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); +#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n", + dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings); +#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ +#if defined(DHD_HTPUT_TUNABLES) + bcm_bprintf(strbuf, "htput_flow_ring_start:%d total_htput:%d client_htput=%d\n", + dhdp->htput_flow_ring_start, HTPUT_TOTAL_FLOW_RINGS, dhdp->htput_client_flow_rings); +#endif /* DHD_HTPUT_TUNABLES */ + bcm_bprintf(strbuf, + "%4s %4s %2s %4s %17s %4s %4s %6s %10s %17s %17s %17s %17s %14s %14s %10s ", + "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen", + " Overflows", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)", + "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE"); + +#ifdef TX_STATUS_LATENCY_STATS + /* Average Tx status/Completion Latency in micro secs */ + bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us"); +#endif /* TX_STATUS_LATENCY_STATS */ + + bcm_bprintf(strbuf, "\n"); + + for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) { + unsigned long flags; + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + continue; + } + + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, + "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++, + flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, + MAC2STRDBG(flow_info->da), + DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)), + DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); + dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, TRUE, strbuf, + "%5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d"); + +#ifdef TX_STATUS_LATENCY_STATS + bcm_bprintf(strbuf, "%16llu %16llu ", + flow_info->num_tx_pkts, + flow_info->num_tx_status ? + DIV_U64_BY_U64(flow_info->cum_tx_status_latency, + flow_info->num_tx_status) : 0); + ifindex = flow_info->ifindex; + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex < DHD_MAX_IFS) { + if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status; + if_tx_status_latency[ifindex].cum_tx_status_latency += + flow_info->cum_tx_status_latency; + } else { + DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n", + __FUNCTION__, ifindex, flowid)); + } +#endif /* TX_STATUS_LATENCY_STATS */ + bcm_bprintf(strbuf, "\n"); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + +#ifdef BCMDBG + if (!dhdp->d2h_sync_mode) { + ix = 0; + bcm_bprintf(strbuf, "\n%4s %4s %2s %10s %7s %6s %5s %5s %10s %7s %7s %7s \n", + "Num:", "Flow", "If", " ACKED", "D11SPRS", "WLSPRS", "TSDWL", + "NOACK", "SPRS_ACKED", "EXPIRED", "DROPPED", "FWFREED"); + for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (!flow_ring_node->active) + continue; + + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, "%4d %4d %2d ", + ix++, flow_ring_node->flowid, flow_info->ifindex); + local_flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, "%10d %7d %6d %5d %5d %10d %7d %7d %7d\n", + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DISCARD], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_D11SUPPRESS], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_WLSUPPRESS], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_TOSSED_BYWLC], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DISCARD_NOACK], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_SUPPRESS_ACKED], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_EXPIRED], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DROPPED], + local_flow_info->tx_status[WLFC_CTL_PKTFLAG_MKTFREE]); + } + } +#endif /* BCMDBG */ + +#ifdef TX_STATUS_LATENCY_STATS + bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus"); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + for (ix = 0; ix < DHD_MAX_IFS; ix++) { + if (!if_flow_lkup[ix].status) { + continue; + } + bcm_bprintf(strbuf, "%2d %16llu %16llu\n", + ix, + if_tx_status_latency[ix].num_tx_status ? + DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency, + if_tx_status_latency[ix].num_tx_status): 0, + if_tx_status_latency[ix].num_tx_status); + } +#endif /* TX_STATUS_LATENCY_STATS */ + +#ifdef DHD_HP2P + if (dhdp->hp2p_capable) { + bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1"); + + for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) { + hp2p_info_t *hp2p_info; + int bin; + + hp2p_info = &dhdp->hp2p_info[flowid]; + if (hp2p_info->num_timer_start == 0) + continue; + + bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid); + bcm_bprintf(strbuf, "\n%s", "Bin"); + + for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) { + bcm_bprintf(strbuf, "\n%2d %20llu %16llu", bin, + hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]); + } + + bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0"); + bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid); + bcm_bprintf(strbuf, "\n%s", "Bin"); + + for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) { + bcm_bprintf(strbuf, "\n%d %20llu", bin, + hp2p_info->rx_t0[bin]); + } + + bcm_bprintf(strbuf, "\n%s %16s %16s", + "Packet limit", "Timer limit", "Timer start"); + bcm_bprintf(strbuf, "\n%llu %24llu %16llu", hp2p_info->num_pkt_limit, + hp2p_info->num_timer_limit, hp2p_info->num_timer_start); + } + + bcm_bprintf(strbuf, "\n"); + } +#endif /* DHD_HP2P */ + +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + dhd_bus_dump_awdl_stats(dhdp, strbuf); + dhd_clear_awdl_stats(dhdp); +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); + bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); + bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); + if (dhdp->d2h_hostrdy_supported) { + bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count); + } +#ifdef PCIE_INB_DW + /* Inband device wake counters */ + if (INBAND_DW_ENAB(dhdp->bus)) { + bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n", + dhdp->bus->inband_dw_assert_cnt); + bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n", + dhdp->bus->inband_dw_deassert_cnt); + bcm_bprintf(strbuf, "Inband DS-EXIT count: %d\n", + dhdp->bus->inband_ds_exit_host_cnt); + bcm_bprintf(strbuf, "Inband DS-EXIT count: %d\n", + dhdp->bus->inband_ds_exit_device_cnt); + bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n", + dhdp->bus->inband_ds_exit_to_cnt); + bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n", + dhdp->bus->inband_host_sleep_exit_to_cnt); + } +#endif /* PCIE_INB_DW */ + bcm_bprintf(strbuf, "d2h_intr_method -> %s d2h_intr_control -> %s\n", + dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX", + dhdp->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK"); + + bcm_bprintf(strbuf, "\n\nDB7 stats - db7_send_cnt: %d, db7_trap_cnt: %d, " + "max duration: %lld (%lld - %lld), db7_timing_error_cnt: %d\n", + dhdp->db7_trap.debug_db7_send_cnt, + dhdp->db7_trap.debug_db7_trap_cnt, + dhdp->db7_trap.debug_max_db7_dur, + dhdp->db7_trap.debug_max_db7_trap_time, + dhdp->db7_trap.debug_max_db7_send_time, + dhdp->db7_trap.debug_db7_timing_error_cnt); +} + +#ifdef DNGL_AXI_ERROR_LOGGING +bool +dhd_axi_sig_match(dhd_pub_t *dhdp) +{ + uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr); + + if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) { + DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__)); + return FALSE; + } + + DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n", + __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base, + dhdp->bus->dongle_ram_base + dhdp->bus->ramsize)); + if (axi_tcm_addr >= dhdp->bus->dongle_ram_base && + axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) { + uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr + + OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature))); + if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) { + return TRUE; + } else { + DHD_ERROR(("%s: No AXI signature: 0x%x\n", + __FUNCTION__, axi_signature)); + return FALSE; + } + } else { + DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__)); + return FALSE; + } +} + +void +dhd_axi_error(dhd_pub_t *dhdp) +{ + dhd_axi_error_dump_t *axi_err_dump; + uint8 *axi_err_buf = NULL; + uint8 *p_axi_err = NULL; + uint32 axi_logbuf_addr; + uint32 axi_tcm_addr; + int err, size; + + /* XXX: On the Dongle side, if an invalid Host Address is generated for a transaction + * it results in SMMU Fault. Now the Host won't respond for the invalid transaction. + * On the Dongle side, after 50msec this results in AXI Slave Error. + * Hence introduce a delay higher than 50msec to ensure AXI Slave error happens and + * the Dongle collects the required information. + */ + OSL_DELAY(75000); + + axi_logbuf_addr = dhdp->axierror_logbuf_addr; + if (!axi_logbuf_addr) { + DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__)); + goto sched_axi; + } + + axi_err_dump = dhdp->axi_err_dump; + if (!axi_err_dump) { + goto sched_axi; + } + + if (!dhd_axi_sig_match(dhdp)) { + goto sched_axi; + } + + /* Reading AXI error data for SMMU fault */ + DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__)); + axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr); + size = sizeof(hnd_ext_trap_axi_error_v1_t); + axi_err_buf = MALLOCZ(dhdp->osh, size); + if (axi_err_buf == NULL) { + DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); + goto sched_axi; + } + + p_axi_err = axi_err_buf; + err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size); + if (err) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, err, size, axi_tcm_addr)); + goto sched_axi; + } + + /* Dump data to Dmesg */ + dhd_log_dump_axi_error(axi_err_buf); + err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size); + if (err) { + DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n", + __FUNCTION__, err)); + } + +sched_axi: + if (axi_err_buf) { + MFREE(dhdp->osh, axi_err_buf, size); + } + dhd_schedule_axi_error_dump(dhdp, NULL); +} + +static void +dhd_log_dump_axi_error(uint8 *axi_err) +{ + dma_dentry_v1_t dma_dentry; + dma_fifo_v1_t dma_fifo; + int i = 0, j = 0; + + if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) { + hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err; + DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature)); + DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version)); + DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length)); + DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n", + __FUNCTION__, axi_err_v1->dma_fifo_valid_count)); + DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_status)); + DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_core)); + DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_hi)); + DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_lo)); + DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_id)); + + for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) { + dma_fifo = axi_err_v1->dma_fifo[i]; + DHD_ERROR(("%s: valid:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.valid)); + DHD_ERROR(("%s: direction:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.direction)); + DHD_ERROR(("%s: index:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.index)); + DHD_ERROR(("%s: dpa:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.dpa)); + DHD_ERROR(("%s: desc_lo:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.desc_lo)); + DHD_ERROR(("%s: desc_hi:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.desc_hi)); + DHD_ERROR(("%s: din:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.din)); + DHD_ERROR(("%s: dout:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.dout)); + for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) { + dma_dentry = axi_err_v1->dma_fifo[i].dentry[j]; + DHD_ERROR(("%s: ctrl1:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.ctrl1)); + DHD_ERROR(("%s: ctrl2:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.ctrl2)); + DHD_ERROR(("%s: addrlo:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.addrlo)); + DHD_ERROR(("%s: addrhi:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.addrhi)); + } + } + } + else { + DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err))); + } +} +#endif /* DNGL_AXI_ERROR_LOGGING */ + +/** + * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their + * flow queue to their flow ring. + */ +static void +dhd_update_txflowrings(dhd_pub_t *dhd) +{ + unsigned long flags; + dll_t *item, *next; + flow_ring_node_t *flow_ring_node; + struct dhd_bus *bus = dhd->bus; + int count = 0; + + if (dhd_query_bus_erros(dhd)) { + return; + } + + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + for (item = dll_head_p(&bus->flowring_active_list); + (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item)); + item = next, count++) { + if (dhd->hang_was_sent) { + break; + } + + if (count > bus->max_tx_flowrings) { + DHD_ERROR(("%s : overflow max flowrings\n", __FUNCTION__)); +#ifdef OEM_ANDROID + dhd->hang_reason = HANG_REASON_UNKNOWN; + dhd_os_send_hang_message(dhd); +#endif /* OEM_ANDROID */ + break; + } + + next = dll_next_p(item); + flow_ring_node = dhd_constlist_to_flowring(item); + + /* Ensure that flow_ring_node in the list is Not Null */ + ASSERT(flow_ring_node != NULL); + + /* Ensure that the flowring node has valid contents */ + ASSERT(flow_ring_node->prot_info != NULL); + + dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info); + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); +} + +/** Mailbox ringbell Function */ +static void +dhd_bus_gen_devmb_intr(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__)); + return; + } + if (bus->db1_for_mb) { + /* this is a pcie core register, not the config register */ + /* XXX: make sure we are on PCIE */ + DHD_PCIE_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__)); + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, dhd_bus_db1_addr_get(bus), 0x2, TRUE); +#endif /* defined(DHD_MMIO_TRACE) */ + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), + ~0, 0x12345678); + } else { + DHD_PCIE_INFO(("%s: writing a mail box interrupt to the device," + " through config space\n", __FUNCTION__)); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + /* XXX CRWLPCIEGEN2-182 requires double write */ + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + } +} + +/* Upon receiving a mailbox interrupt, + * if H2D_FW_TRAP bit is set in mailbox location + * device traps + */ +static void +dhdpcie_fw_trap(dhd_bus_t *bus) +{ + DHD_ERROR(("%s: send trap!!!\n", __FUNCTION__)); + if (bus->dhd->db7_trap.fw_db7w_trap) { + uint32 addr = dhd_bus_db1_addr_3_get(bus); + bus->dhd->db7_trap.debug_db7_send_time = OSL_LOCALTIME_NS(); + bus->dhd->db7_trap.debug_db7_send_cnt++; + si_corereg(bus->sih, bus->sih->buscoreidx, addr, ~0, + bus->dhd->db7_trap.db7_magic_number); + return; + } + + /* Send the mailbox data and generate mailbox intr. */ + dhdpcie_send_mb_data(bus, H2D_FW_TRAP); + /* For FWs that cannot interprete H2D_FW_TRAP */ + (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0); +} + +#ifdef PCIE_INB_DW + +void +dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus) +{ + /* The DHD_BUS_INB_DW_LOCK must be held before + * calling this function !! + */ + if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP_PEND) && + (bus->host_active_cnt == 0)) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + } +} + +int +dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val) +{ + int timeleft; + unsigned long flags; + int ret; + + if (!INBAND_DW_ENAB(bus)) { + return BCME_ERROR; + } + if (val) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + + /* + * Reset the Door Bell Timeout value. So that the Watchdog + * doesn't try to Deassert Device Wake, while we are in + * the process of still Asserting the same. + */ + dhd_bus_doorbell_timeout_reset(bus); + + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP) { + /* Clear wait_for_ds_exit */ + bus->wait_for_ds_exit = 0; + if (bus->calc_ds_exit_latency) { + bus->ds_exit_latency = 0; + bus->ds_exit_ts2 = 0; + bus->ds_exit_ts1 = OSL_SYSUPTIME_US(); + } + ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT); + if (ret != BCME_OK) { + DHD_ERROR(("Failed: assert Inband device_wake\n")); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + ret = BCME_ERROR; + goto exit; + } + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DISABLED_WAIT); + bus->inband_dw_assert_cnt++; + } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DISABLED_WAIT) { + DHD_ERROR(("Inband device wake is already asserted, " + "waiting for DS-Exit\n")); + } + else { + DHD_PCIE_INFO(("Not in DS SLEEP state \n")); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + ret = BCME_OK; + goto exit; + } + + /* + * Since we are going to wait/sleep .. release the lock. + * The Device Wake sanity is still valid, because + * a) If there is another context that comes in and tries + * to assert DS again and if it gets the lock, since + * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the + * context would return saying Not in DS Sleep. + * b) If ther is another context that comes in and tries + * to de-assert DS and gets the lock, + * since the ds_state is != DW_DEVICE_DS_DEV_WAKE + * that context would return too. This can not happen + * since the watchdog is the only context that can + * De-Assert Device Wake and as the first step of + * Asserting the Device Wake, we have pushed out the + * Door Bell Timeout. + * + */ + + if (!CAN_SLEEP()) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_WAKE); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + /* Called from context that cannot sleep */ + OSL_DELAY(1000); + } else { + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */ + timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit); + if (!bus->wait_for_ds_exit || timeleft == 0) { + DHD_ERROR(("dhd_bus_inb_set_device_wake:DS-EXIT timeout, " + "wait_for_ds_exit : %d\n", bus->wait_for_ds_exit)); + bus->inband_ds_exit_to_cnt++; + bus->ds_exit_timeout = 0; +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + /* collect core dump */ + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->dhd->memdump_type = + DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE; + dhd_bus_mem_dump(bus->dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + ret = BCME_ERROR; + goto exit; + } + } + ret = BCME_OK; + } else { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_WAKE)) { + ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT); + if (ret != BCME_OK) { + DHD_ERROR(("Failed: deassert Inband device_wake\n")); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + goto exit; + } + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_ACTIVE); + bus->inband_dw_deassert_cnt++; + } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP_PEND) && + (bus->host_active_cnt == 0)) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + } + + ret = BCME_OK; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } + +exit: + return ret; +} +#endif /* PCIE_INB_DW */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) +void +dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus) +{ + if (dhd_doorbell_timeout) { +#ifdef DHD_PCIE_RUNTIMEPM + if (dhd_runtimepm_ms) { + dhd_timeout_start(&bus->doorbell_timer, + (dhd_doorbell_timeout * 1000) / dhd_runtimepm_ms); + } +#else +#ifdef BCMQT + uint wd_scale = 1; +#else + uint wd_scale = dhd_watchdog_ms; +#endif + if (dhd_watchdog_ms) { + dhd_timeout_start(&bus->doorbell_timer, + (dhd_doorbell_timeout * 1000) / wd_scale); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + } + else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) { + dhd_bus_set_device_wake(bus, FALSE); + } +} + +int +dhd_bus_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->ds_enabled && bus->dhd->ring_attached) { +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + return dhd_bus_inb_set_device_wake(bus, val); + } +#endif /* PCIE_INB_DW */ +#ifdef PCIE_OOB + if (OOB_DW_ENAB(bus)) { + return dhd_os_oob_set_device_wake(bus, val); + } +#endif /* PCIE_OOB */ + } + return BCME_OK; +} + +void +dhd_bus_dw_deassert(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + unsigned long flags; + + if (dhd_query_bus_erros(bus->dhd)) { + return; + } + + /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */ + if (dhd_doorbell_timeout != 0 && bus->dhd->busstate == DHD_BUS_DATA && + dhd_timeout_expired(&bus->doorbell_timer) && + !dhd_query_bus_erros(bus->dhd)) { + DHD_GENERAL_LOCK(dhd, flags); + if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && + !DHD_CHECK_CFG_IN_PROGRESS(dhd)) { + DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhd); + DHD_GENERAL_UNLOCK(dhd, flags); + dhd_bus_set_device_wake(bus, FALSE); + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + } + } + +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + if (bus->ds_exit_timeout) { + bus->ds_exit_timeout --; + if (bus->ds_exit_timeout == 1) { + DHD_ERROR(("DS-EXIT TIMEOUT\n")); + bus->ds_exit_timeout = 0; + bus->inband_ds_exit_to_cnt++; + } + } + if (bus->host_sleep_exit_timeout) { + bus->host_sleep_exit_timeout --; + if (bus->host_sleep_exit_timeout == 1) { + DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n")); + bus->host_sleep_exit_timeout = 0; + bus->inband_host_sleep_exit_to_cnt++; + } + } + } +#endif /* PCIE_INB_DW */ +} +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ + +/** mailbox doorbell ring function */ +void +dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) +{ + /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */ + if (__DHD_CHK_BUS_IN_LPS(bus)) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, + PCIE_INTB, PCIE_INTB); + } else { + /* this is a pcie core register, not the config regsiter */ + /* XXX: makesure we are on PCIE */ + DHD_PCIE_INFO(("%s: writing a door bell to the device\n", __FUNCTION__)); + if (IDMA_ACTIVE(bus->dhd)) { + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), + ~0, value); + } else { + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_get(bus), ~0, 0x12345678); + } + } +} + +/** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */ +void +dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake) +{ + /* this is a pcie core register, not the config regsiter */ + /* XXX: makesure we are on PCIE */ + /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */ + if (__DHD_CHK_BUS_IN_LPS(bus)) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_PCIE_INFO(("writing a door bell 2 to the device\n")); + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), + ~0, value); +} + +void +dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) +{ + /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */ + if (__DHD_CHK_BUS_IN_LPS(bus)) { + DHD_RPM(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, TRUE); + } + dhd_bus_doorbell_timeout_reset(bus); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, dhd_bus_db0_addr_get(bus), value, + ((value >> 24u) == 0xFF) ? TRUE : FALSE); +#endif /* defined(DHD_MMIO_TRACE) */ + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + +#ifdef DHD_DB0TS + if (bus->dhd->db0ts_capable) { + uint64 ts; + + ts = local_clock(); + do_div(ts, 1000); + + value = htol32(ts & 0xFFFFFFFF); + DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value)); + } +#endif /* DHD_DB0TS */ + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); +} + +void +dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake) +{ + /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */ + if (__DHD_CHK_BUS_IN_LPS(bus)) { + DHD_RPM(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + if (devwake) { + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, TRUE); + } + } + dhd_bus_doorbell_timeout_reset(bus); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ + +#ifdef DHD_MMIO_TRACE + dhd_bus_mmio_trace(bus, dhd_bus_db0_addr_2_get(bus), value, TRUE); +#endif /* defined(DHD_MMIO_TRACE) */ + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value); +} + +static void +dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) +{ + uint32 w; + /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */ + if (__DHD_CHK_BUS_IN_LPS(bus)) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); +} + +dhd_mb_ring_t +dhd_bus_get_mbintr_fn(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhd_bus_ringbell_oldpcie; + } + } else { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_get(bus)); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_fast; + } + } + return dhd_bus_ringbell; +} + +dhd_mb_ring_2_t +dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus) +{ + bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_2_get(bus)); + if (bus->pcie_mb_intr_2_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_2_fast; + } + return dhd_bus_ringbell_2; +} + +bool +BCMFASTPATH(dhd_bus_dpc)(struct dhd_bus *bus) +{ + bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->dpc_entry_time = OSL_LOCALTIME_NS(); + + if (dhd_query_bus_erros(bus->dhd)) { + return 0; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS + * to avoid IOCTL Resumed On timeout when ioctl is waiting for response + * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS + * and if we return from here, then IOCTL response will never be handled + */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->dpc_return_busdown_count++; + return 0; + } +#ifdef DHD_PCIE_RUNTIMEPM + bus->idlecount = 0; +#endif /* DHD_PCIE_RUNTIMEPM */ + DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_READ_INTSTATUS_IN_DPC + if (bus->ipend) { + bus->ipend = FALSE; + bus->intstatus = dhdpcie_bus_intstatus(bus); + /* Check if the interrupt is ours or not */ + if (bus->intstatus == 0) { + goto INTR_ON; + } + bus->intrcount++; + } +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + + /* Do not process dpc after receiving D3_ACK */ + if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) { + DHD_ERROR(("%s: D3 Ack Recieved, skip dpc\n", __FUNCTION__)); + goto exit; + } + + resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); + if (!resched) { + bus->intstatus = 0; +#ifdef DHD_READ_INTSTATUS_IN_DPC +INTR_ON: +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + if (bus->d2h_intr_control == PCIE_D2H_INTMASK_CTRL) { + dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */ + bus->dpc_intr_enable_count++; + } else { + /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts + * which has been disabled in the dhdpcie_bus_isr() + */ + if ((dhdpcie_irq_disabled(bus)) && (!dhd_query_bus_erros(bus->dhd))) { + dhdpcie_enable_irq(bus); /* Enable back interrupt!! */ + bus->dpc_intr_enable_count++; + } + } + bus->dpc_exit_time = OSL_LOCALTIME_NS(); + } else { + bus->resched_dpc_time = OSL_LOCALTIME_NS(); + } + + bus->dpc_sched = resched; +#ifdef DHD_FLOW_RING_STATUS_TRACE + if (bus->dhd->dma_h2d_ring_upd_support && bus->dhd->dma_d2h_ring_upd_support && + (bus->dhd->ring_attached == TRUE)) { + dhd_bus_flow_ring_status_dpc_trace(bus->dhd); + } +#endif /* DHD_FLOW_RING_STATUS_TRACE */ + +exit: + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return resched; + +} + +int +dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) +{ + uint32 cur_h2d_mb_data = 0; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_PCIE_INFO(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); + +#ifdef PCIE_INB_DW + if (h2d_mb_data == H2D_HOST_DS_ACK) { + dhdpcie_set_dongle_deepsleep(bus, TRUE); + } + dhd_bus_ds_trace(bus, h2d_mb_data, FALSE, dhdpcie_bus_get_pcie_inband_dw_state(bus)); +#else + dhd_bus_ds_trace(bus, h2d_mb_data, FALSE); +#endif /* PCIE_INB_DW */ + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) { + DHD_PCIE_INFO(("API rev is 6, sending mb data as H2D Ctrl message" + " to dongle, 0x%04x\n", h2d_mb_data)); + /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */ +#ifdef PCIE_OOB + bus->oob_enabled = FALSE; +#endif /* PCIE_OOB */ + /* XXX: check the error return value here... */ + if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) { + DHD_ERROR(("failure sending the H2D Mailbox message " + "to firmware\n")); + goto fail; + } +#ifdef PCIE_OOB + bus->oob_enabled = TRUE; +#endif /* PCIE_OOB */ + goto done; + } + + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + + if (cur_h2d_mb_data != 0) { + uint32 i = 0; + DHD_PCIE_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", + __FUNCTION__, cur_h2d_mb_data)); + /* XXX: start a zero length timer to keep checking this to be zero */ + while ((i++ < 100) && cur_h2d_mb_data) { + OSL_DELAY(10); + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + } + if (i >= 100) { + DHD_ERROR(("%s : waited 1ms for the dngl " + "to ack the previous mb transaction\n", __FUNCTION__)); + DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n", + __FUNCTION__, cur_h2d_mb_data)); + } + } + + dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); + dhd_bus_gen_devmb_intr(bus); + +done: + if (h2d_mb_data == H2D_HOST_D3_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + bus->last_d3_inform_time = OSL_LOCALTIME_NS(); + bus->d3_inform_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__)); + bus->d0_inform_in_use_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); + bus->d0_inform_cnt++; + } + return BCME_OK; +fail: + return BCME_ERROR; +} + +static void +dhd_bus_handle_d3_ack(dhd_bus_t *bus) +{ + bus->suspend_intr_disable_count++; + /* Disable dongle Interrupts Immediately after D3 */ + + /* For Linux, Macos etc (otherthan NDIS) along with disabling + * dongle interrupt by clearing the IntMask, disable directly + * interrupt from the host side as well. Also clear the intstatus + * if it is set to avoid unnecessary intrrupts after D3 ACK. + */ + dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */ + dhdpcie_bus_clear_intstatus(bus); +#ifndef NDIS /* !NDIS */ + dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */ +#endif /* !NDIS */ + + DHD_SET_BUS_LPS_D3_ACKED(bus); + DHD_RPM(("%s: D3_ACK Recieved\n", __FUNCTION__)); + + if (bus->dhd->dhd_induce_error == DHD_INDUCE_D3_ACK_TIMEOUT) { + /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */ + DHD_ERROR(("%s: Due to d3ack induce error forcefully set " + "bus_low_power_state to DHD_BUS_D3_INFORM_SENT\n", __FUNCTION__)); + DHD_SET_BUS_LPS_D3_INFORMED(bus); + } + /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout. + * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout. + */ + if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) { + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); + } else { + DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__)); + } +} + +void +dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) +{ +#ifdef PCIE_INB_DW + unsigned long flags = 0; +#endif /* PCIE_INB_DW */ + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + DHD_PCIE_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data)); +#ifdef PCIE_INB_DW + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + dhd_bus_ds_trace(bus, d2h_mb_data, TRUE, dhdpcie_bus_get_pcie_inband_dw_state(bus)); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); +#else + dhd_bus_ds_trace(bus, d2h_mb_data, TRUE); +#endif /* PCIE_INB_DW */ + + if (d2h_mb_data & D2H_DEV_FWHALT) { + if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) { + DHD_ERROR(("FW trap has happened, dongle_trap_data 0x%8x\n", + bus->dhd->dongle_trap_data)); + } + + if (bus->dhd->dongle_trap_data & D2H_DEV_TRAP_HOSTDB) { + uint64 db7_dur; + + bus->dhd->db7_trap.debug_db7_trap_time = OSL_LOCALTIME_NS(); + bus->dhd->db7_trap.debug_db7_trap_cnt++; + db7_dur = bus->dhd->db7_trap.debug_db7_trap_time - + bus->dhd->db7_trap.debug_db7_send_time; + if (db7_dur > bus->dhd->db7_trap.debug_max_db7_dur) { + bus->dhd->db7_trap.debug_max_db7_send_time = + bus->dhd->db7_trap.debug_db7_send_time; + bus->dhd->db7_trap.debug_max_db7_trap_time = + bus->dhd->db7_trap.debug_db7_trap_time; + } + bus->dhd->db7_trap.debug_max_db7_dur = + MAX(bus->dhd->db7_trap.debug_max_db7_dur, db7_dur); + if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) { + bus->dhd->db7_trap.debug_db7_timing_error_cnt++; + } + } else { + dhdpcie_checkdied(bus, NULL, 0); +#ifdef BCM_ROUTER_DHD + dhdpcie_handle_dongle_trap(bus); +#endif +#ifdef OEM_ANDROID +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO); +#endif /* OEM_ANDROID */ + } + if (bus->dhd->db7_trap.fw_db7w_trap_inprogress) { + bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE; + bus->dhd->dongle_trap_occured = TRUE; + } + goto exit; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + bool ds_acked = FALSE; + BCM_REFERENCE(ds_acked); + if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) { + DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n")); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + goto exit; + } + /* what should we do */ + DHD_PCIE_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n")); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + /* As per inband state machine, host should not send DS-ACK + * during suspend or suspend in progress, instead D3 inform will be sent. + */ + if (!bus->skip_ds_ack) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) + == DW_DEVICE_DS_ACTIVE) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_SLEEP_PEND); + if (bus->host_active_cnt == 0) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_SLEEP); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + ds_acked = TRUE; + DHD_PCIE_INFO(("D2H_MB_DATA: sent DEEP SLEEP" + "ACK to DNGL\n")); + } else { + DHD_PCIE_INFO(("%s: Host is active, " + "skip sending DS-ACK. " + "host_active_cnt is %d\n", + __FUNCTION__, bus->host_active_cnt)); + } + } + /* Currently DW_DEVICE_HOST_SLEEP_WAIT is set only + * under dhd_bus_suspend() function. + */ + else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) + == DW_DEVICE_HOST_SLEEP_WAIT) { + DHD_ERROR(("%s: DS-ACK not sent due to suspend " + "in progress\n", __FUNCTION__)); + } else { + DHD_ERROR_RLMT(("%s: Failed to send DS-ACK, DS state is %d", + __FUNCTION__, + dhdpcie_bus_get_pcie_inband_dw_state(bus))); + } + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + dhd_os_ds_enter_wake(bus->dhd); + } else { + DHD_PCIE_INFO(("%s: Skip DS-ACK due to " + "suspend in progress\n", __FUNCTION__)); + } +#ifdef DHD_EFI + if (ds_acked && !bus->ds_enabled) { + /* if 'deep_sleep' is disabled, then need to again assert DW + * from here once we we have acked the DS_ENTER_REQ, so that + * dongle stays awake and honours the user iovar request. + * Note, that this code will be hit only for the pcie_suspend/resume + * case with 'deep_sleep' disabled, and will not get executed in + * the normal path either when 'deep_sleep' is enabled (default) + * or when 'deep_sleep' is disabled, because if 'deep_sleep' is + * disabled, then by definition, dongle will not send DS_ENTER_REQ + * except in the case of D0 -> D3 -> D0 transitions, which is what + * is being handled here. + */ + dhd_bus_inb_set_device_wake(bus, TRUE); + } +#endif /* DHD_EFI */ + } else +#endif /* PCIE_INB_DW */ + { + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + DHD_PCIE_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n")); + } + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + if (bus->calc_ds_exit_latency) { + bus->ds_exit_ts2 = OSL_SYSUPTIME_US(); + if (bus->ds_exit_ts2 > bus->ds_exit_ts1 && + bus->ds_exit_ts1 != 0) + bus->ds_exit_latency = bus->ds_exit_ts2 - bus->ds_exit_ts1; + else + bus->ds_exit_latency = 0; + } + } +#endif /* PCIE_INB_DW */ + /* what should we do */ + DHD_PCIE_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n")); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DISABLED_WAIT) { + /* wake up only if some one is waiting in + * DW_DEVICE_DS_DISABLED_WAIT state + * in this case the waiter will change the state + * to DW_DEVICE_DS_DEV_WAKE + */ + bus->inband_ds_exit_host_cnt++; + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + bus->wait_for_ds_exit = 1; + /* Call another wmb() to make sure before waking up the + * other event value gets updated. + */ + OSL_SMP_WMB(); + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_WAKE); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + dhd_os_ds_exit_wake(bus->dhd); + } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP) { + DHD_PCIE_INFO(("recvd unsolicited DS-EXIT" + " from dongle in DEV_SLEEP\n")); + /* + * unsolicited state change to DW_DEVICE_DS_DEV_WAKE if + * D2H_DEV_DS_EXIT_NOTE received in DW_DEVICE_DS_DEV_SLEEP state. + * This is need when dongle is woken by external events like + * WOW, ping ..etc + */ + bus->inband_ds_exit_device_cnt++; + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_WAKE); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } else { + DHD_PCIE_INFO(("D2H_MB_DATA: not in" + " DS_DISABLED_WAIT/DS_DEV_SLEEP\n")); + bus->inband_ds_exit_host_cnt++; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } + /* + * bus->deep_sleep is TRUE by default. deep_sleep is set to FALSE when the + * dhd iovar deep_sleep is fired with value 0(user request to not enter + * deep sleep). So donot attempt to go to deep sleep when user has + * explicitly asked not to go to deep sleep. bus->deep_sleep is set to + * TRUE when the dhd iovar deep_sleep is fired with value 1. + */ + if (bus->deep_sleep) { + dhd_bus_set_device_wake(bus, FALSE); + dhdpcie_set_dongle_deepsleep(bus, FALSE); + } + } +#endif /* PCIE_INB_DW */ + } + if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) { + /* what should we do */ + DHD_PCIE_INFO(("D2H_MB_DATA: D0 ACK\n")); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_HOST_WAKE_WAIT) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE); + } + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } +#endif /* PCIE_INB_DW */ + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n")); + if (!bus->wait_for_d3_ack) { +#if defined(DHD_HANG_SEND_UP_TEST) + if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) { + DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n")); + } else { + dhd_bus_handle_d3_ack(bus); + } +#else /* DHD_HANG_SEND_UP_TEST */ + dhd_bus_handle_d3_ack(bus); +#endif /* DHD_HANG_SEND_UP_TEST */ + } + } + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +static void +dhdpcie_handle_mb_data(dhd_bus_t *bus) +{ + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return; + } + + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) { + DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n", + __FUNCTION__, d2h_mb_data)); + goto exit; + } + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data)); + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); + /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ +#ifdef BCM_ROUTER_DHD + dhdpcie_handle_dongle_trap(bus); +#endif + goto exit; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + /* what should we do */ + DHD_PCIE_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__)); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + DHD_PCIE_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__)); + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { + /* what should we do */ + DHD_PCIE_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__)); + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__)); + if (!bus->wait_for_d3_ack) { +#if defined(DHD_HANG_SEND_UP_TEST) + if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) { + DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n")); + } else { + dhd_bus_handle_d3_ack(bus); + } +#else /* DHD_HANG_SEND_UP_TEST */ + dhd_bus_handle_d3_ack(bus); +#endif /* DHD_HANG_SEND_UP_TEST */ + } + } + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +static void +dhdpcie_read_handle_mb_data(dhd_bus_t *bus) +{ + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (!d2h_mb_data) { + goto exit; + } + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + dhd_bus_handle_mb_data(bus, d2h_mb_data); + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +#define DHD_SCHED_RETRY_DPC_DELAY_MS 100u + +static void +dhd_bus_handle_intx_ahead_dma_indices(dhd_bus_t *bus) +{ + if (bus->d2h_intr_method == PCIE_MSI) { + DHD_PCIE_INFO(("%s: not required for msi\n", __FUNCTION__)); + return; + } + + if (bus->dhd->dma_d2h_ring_upd_support == FALSE) { + DHD_PCIE_INFO(("%s: not required for non-dma-indices\n", __FUNCTION__)); + return; + } + + if (dhd_query_bus_erros(bus->dhd)) { + return; + } + +#ifndef NDIS + /* + * skip delayed dpc if tasklet is scheduled by non-ISR + * From ISR context, we disable IRQ and enable IRQ back at the end of dpc, + * hence if IRQ is not disabled we can consider it as scheduled from non-ISR context. + */ + if (dhdpcie_irq_disabled(bus) == FALSE) { + DHD_PCIE_INFO(("%s: skip delayed dpc as tasklet is scheduled from non isr\n", + __FUNCTION__)); + return; + } +#endif /* NDIS */ + + if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) { + DHD_PCIE_INFO(("%s: skip delayed dpc as d3 ack is received\n", __FUNCTION__)); + return; + } + + dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, DHD_SCHED_RETRY_DPC_DELAY_MS); + return; +} + +static bool +dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) +{ + bool resched = FALSE; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + /* Msg stream interrupt */ + if (intstatus & I_BIT1) { + resched = dhdpci_bus_read_frames(bus); + } else if (intstatus & I_BIT0) { + /* do nothing for Now */ + } + } else { + if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) + bus->api.handle_mb_data(bus); + + /* The fact that we are here implies that dhdpcie_bus_intstatus( ) + * retuned a non-zer0 status after applying the current mask. + * No further check required, in fact bus->instatus can be eliminated. + * Both bus->instatus, and bud->intdis are shared between isr and dpc. + */ +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) { + resched = dhdpci_bus_read_frames(bus); + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); + } +#else + resched = dhdpci_bus_read_frames(bus); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } + + dhd_bus_handle_intx_ahead_dma_indices(bus); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return resched; +} + +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void +dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus) +{ + unsigned long time_elapsed; + + /* Poll for timeout value periodically */ + if ((bus->dhd->busstate == DHD_BUS_DATA) && + (bus->dhd->dhd_rte_time_sync_ms != 0) && + DHD_CHK_BUS_NOT_IN_LPS(bus)) { + /* + * XXX OSL_SYSUPTIME_US() overflow should not happen. + * As it is a unsigned 64 bit value 18446744073709551616L, + * which needs 213503982334 days to overflow + */ + time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count; + /* Compare time is milli seconds */ + if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) { + /* + * Its fine, if it has crossed the timeout value. No need to adjust the + * elapsed time + */ + bus->dhd_rte_time_sync_count += time_elapsed; + + /* Schedule deffered work. Work function will send IOVAR. */ + dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd); + } + } +} +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +static bool +dhdpci_bus_read_frames(dhd_bus_t *bus) +{ + bool more = FALSE; + + /* First check if there a FW trap */ + if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) && + (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) { +#ifdef DNGL_AXI_ERROR_LOGGING + if (bus->dhd->axi_error) { + DHD_ERROR(("AXI Error happened\n")); + return FALSE; + } +#endif /* DNGL_AXI_ERROR_LOGGING */ + dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT); + return FALSE; + } + + if (dhd_query_bus_erros(bus->dhd)) { + DHD_ERROR(("%s: detected bus errors. Hence donot process msg rings\n", + __FUNCTION__)); + return FALSE; + } +#ifdef DHD_DMA_INDICES_SEQNUM + dhd_prot_save_dmaidx(bus->dhd); +#endif /* DHD_DMA_INDICES_SEQNUM */ + /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ + dhd_prot_process_ctrlbuf(bus->dhd); + bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS(); + + /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */ + if (DHD_CHK_BUS_IN_LPS(bus)) { + DHD_RPM(("%s: Bus is in power save state (%d). " + "Skip processing rest of ring buffers.\n", + __FUNCTION__, bus->bus_low_power_state)); + return FALSE; + } + + /* update the flow ring cpls */ + dhd_update_txflowrings(bus->dhd); + bus->last_process_flowring_time = OSL_LOCALTIME_NS(); + + /* With heavy TX traffic, we could get a lot of TxStatus + * so add bound + */ +#ifdef DHD_HP2P + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING); +#endif /* DHD_HP2P */ + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING); + bus->last_process_txcpl_time = OSL_LOCALTIME_NS(); + + /* With heavy RX traffic, this routine potentially could spend some time + * processing RX frames without RX bound + */ +#ifdef DHD_HP2P + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING); +#endif /* DHD_HP2P */ + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING); + bus->last_process_rxcpl_time = OSL_LOCALTIME_NS(); + + /* Process info ring completion messages */ +#ifdef EWP_EDL + if (!bus->dhd->dongle_edl_support) +#endif + { + more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND); + bus->last_process_infocpl_time = OSL_LOCALTIME_NS(); + } +#ifdef EWP_EDL + else { + more |= dhd_prot_process_msgbuf_edl(bus->dhd); + bus->last_process_edl_time = OSL_LOCALTIME_NS(); + } +#endif /* EWP_EDL */ + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + /* Handle the firmware trace data in the logtrace kernel thread */ + dhd_event_logtrace_enqueue_fwtrace(bus->dhd); +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef BTLOG + /* Process info ring completion messages */ + more |= dhd_prot_process_msgbuf_btlogcpl(bus->dhd, DHD_BTLOGRING_BOUND); +#endif /* BTLOG */ + +#ifdef IDLE_TX_FLOW_MGMT + if (bus->enable_idle_flowring_mgmt) { + /* Look for idle flow rings */ + dhd_bus_check_idle_scan(bus); + } +#endif /* IDLE_TX_FLOW_MGMT */ + + /* don't talk to the dongle if fw is about to be reloaded */ + if (bus->dhd->hang_was_sent) { + more = FALSE; + } + +#ifdef SUPPORT_LINKDOWN_RECOVERY + /* XXX : It seems that linkdown is occurred without notification, + * In case read shared memory failed, recovery hang is needed + */ + if (bus->read_shm_fail) { + /* Read interrupt state once again to confirm linkdown */ + int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int, 0, 0); + if (intstatus != (uint32)-1) { + DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + DHD_OS_WAKE_LOCK(bus->dhd); + bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL; + dhd_bus_mem_dump(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + } else { + DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 1; + } + + /* XXX The dhd_prot_debug_info_print() function *has* to be + * invoked only if the bus->is_linkdown is updated so that + * host doesn't need to read any pcie registers if + * PCIe link is down. + */ + dhd_prot_debug_info_print(bus->dhd); + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT; +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT + copy_hang_info_linkdown(bus->dhd); +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + dhd_os_send_hang_message(bus->dhd); + more = FALSE; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#if defined(DHD_H2D_LOG_TIME_SYNC) + dhdpci_bus_rte_log_time_sync_poll(bus); +#endif /* DHD_H2D_LOG_TIME_SYNC */ + return more; +} + +bool +dhdpcie_tcm_valid(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv; + uint32 shaddr = 0; + pciedev_shared_t sh; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n", + __FUNCTION__, addr)); + return FALSE; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return FALSE; + } + + /* Compare any field in pciedev_shared_t */ + if (sh.console_addr != bus->pcie_sh->console_addr) { + DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n")); + return FALSE; + } + + return TRUE; +} + +static void +dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version) +{ + snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)", + firmware_api_version, host_api_version); + return; +} + +static bool +dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) +{ + bool retcode = FALSE; + + DHD_INFO(("firmware api revision %d, host api revision %d\n", + firmware_api_version, host_api_version)); + + switch (firmware_api_version) { + case PCIE_SHARED_VERSION_7: + case PCIE_SHARED_VERSION_6: + case PCIE_SHARED_VERSION_5: + retcode = TRUE; + break; + default: + if (firmware_api_version <= host_api_version) + retcode = TRUE; + } + return retcode; +} + +static int +dhdpcie_readshared(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv, dma_indx_wr_buf, dma_indx_rd_buf; + uint32 shaddr = 0; + pciedev_shared_t *sh = bus->pcie_sh; + dhd_timeout_t tmo; + bool idma_en = FALSE; +#if defined(PCIE_INB_DW) + bool d2h_inband_dw = FALSE; +#endif /* defined(PCIE_INB_DW) */ +#if defined(PCIE_OOB) + bool d2h_no_oob_dw = FALSE; +#endif /* defined(PCIE_INB_DW) */ + uint32 timeout = MAX_READ_TIMEOUT; + uint32 elapsed; +#ifndef CUSTOMER_HW4_DEBUG + uint32 intstatus; +#endif /* OEM_ANDROID */ + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + +#ifdef BCMSLTGT +#ifdef BCMQT_HW + if (qt_dngl_timeout) { + timeout = qt_dngl_timeout * 1000; + } +#endif /* BCMQT_HW */ + DHD_ERROR(("%s: host timeout in QT/FPGA mode %ld ms\n", + __FUNCTION__, (timeout * htclkratio) / USEC_PER_MSEC)); +#endif /* BCMSLTGT */ + + /* start a timer for 5 seconds */ + dhd_timeout_start(&tmo, timeout); + + while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) { + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE + /* + * FW might fill all trace buffers even before full DHD/FW initialization. + * poll for trace buffers to avoid circular buffer overflow. + */ + process_fw_trace_data(bus->dhd); +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + } + + if (addr == (uint32)-1) { + DHD_ERROR(("%s: ##### pciedev shared address is 0xffffffff ####\n", __FUNCTION__)); +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +#ifdef CUSTOMER_HW4_DEBUG + bus->is_linkdown = 1; + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); +#else + dhd_bus_dump_imp_cfg_registers(bus); + dhd_bus_dump_dar_registers(bus); + /* Check the PCIe link status by reading intstatus register */ + intstatus = si_corereg(bus->sih, + bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); + bus->is_linkdown = TRUE; + } else { +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (bus->dhd->memdump_enabled) { + /* since dhdpcie_readshared() is invoked only during init or trap */ + bus->dhd->memdump_type = bus->dhd->dongle_trap_data ? + DUMP_TYPE_DONGLE_TRAP : DUMP_TYPE_DONGLE_INIT_FAILURE; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + } +#endif /* CUSTOMER_HW4_DEBUG */ + return BCME_ERROR; + } + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + elapsed = tmo.elapsed; +#ifdef BCMSLTGT + elapsed *= htclkratio; +#endif /* BCMSLTGT */ + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n", + __FUNCTION__, addr)); + DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed)); +#ifdef DEBUG_DNGL_INIT_FAIL + if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */ +#ifdef CUSTOMER_HW4_DEBUG + bus->dhd->memdump_enabled = DUMP_MEMONLY; +#endif /* CUSTOMER_HW4_DEBUG */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE; + dhdpcie_mem_dump(bus); + } + } +#endif /* DEBUG_DNGL_INIT_FAIL */ +#if defined(NDIS) + /* This is a very common code path to catch f/w init failures. + Capture a socram dump. + */ + ASSERT(0); +#endif /* defined(NDIS) */ + return BCME_ERROR; + } else { + bus->rd_shared_pass_time = OSL_LOCALTIME_NS(); + elapsed = tmo.elapsed; +#ifdef BCMSLTGT + elapsed *= htclkratio; +#endif /* BCMSLTGT */ + bus->shared_addr = (ulong)addr; + DHD_ERROR(("### Total time ARM OOR to Readshared pass took %llu usec ###\n", + DIV_U64_BY_U32((bus->rd_shared_pass_time - bus->arm_oor_time), + NSEC_PER_USEC))); + DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec " + "before dongle is ready\n", __FUNCTION__, addr, elapsed)); + } + +#ifdef DHD_EFI + bus->dhd->pcie_readshared_done = 1; +#endif + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv)); + return rv; + } + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); + sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); + sh->flags2 = ltoh32(sh->flags2); + + /* load bus console address */ + bus->console_addr = sh->console_addr; + + /* Read the dma rx offset */ + bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset; + dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset); + + DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset)); + + bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK; + if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION))) + { + DHD_ERROR(("%s: pcie_shared version %d in dhd " + "is older than pciedev_shared version %d in dongle\n", + __FUNCTION__, PCIE_SHARED_VERSION, + bus->api.fw_rev)); + return BCME_ERROR; + } + dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION); + + bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? + sizeof(uint16) : sizeof(uint32); + DHD_INFO(("%s: Dongle advertizes %d size indices\n", + __FUNCTION__, bus->rw_index_sz)); + +#ifdef IDLE_TX_FLOW_MGMT + if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) { + DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n", + __FUNCTION__)); + bus->enable_idle_flowring_mgmt = TRUE; + } +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef PCIE_OOB + bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE; + d2h_no_oob_dw = bus->dhd->d2h_no_oob_dw; +#endif /* PCIE_OOB */ + +#ifdef PCIE_INB_DW + bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE; + d2h_inband_dw = bus->dhd->d2h_inband_dw; +#endif /* PCIE_INB_DW */ + +#if defined(PCIE_INB_DW) + DHD_ERROR(("FW supports Inband dw ? %s\n", + d2h_inband_dw ? "Y":"N")); +#endif /* defined(PCIE_INB_DW) */ + +#if defined(PCIE_OOB) + DHD_ERROR(("FW supports oob dw ? %s\n", + d2h_no_oob_dw ? "N":"Y")); +#endif /* defined(PCIE_OOB) */ + + if (IDMA_CAPABLE(bus)) { + if (bus->sih->buscorerev == 23) { +#ifdef PCIE_INB_DW + if (bus->dhd->d2h_inband_dw) + { + idma_en = TRUE; + } +#endif /* PCIE_INB_DW */ + } else { + idma_en = TRUE; + } + } + + if (idma_en) { + bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE; + bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE; + } + + bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; + + bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE; + + /* Does the FW support DMA'ing r/w indices */ + if (sh->flags & PCIE_SHARED_DMA_INDEX) { + if (!bus->dhd->dma_ring_upd_overwrite) { +#if defined(BCM_ROUTER_DHD) + /* Router platform does not use IOV_DMA_RINGINDICES */ + if (sh->flags & PCIE_SHARED_2BYTE_INDICES) +#endif /* BCM_ROUTER_DHD */ + { + if (!IFRM_ENAB(bus->dhd)) { + bus->dhd->dma_h2d_ring_upd_support = TRUE; + } + bus->dhd->dma_d2h_ring_upd_support = TRUE; + } + } + + if (bus->dhd->dma_d2h_ring_upd_support && bus->dhd->d2h_sync_mode) { + DHD_ERROR(("%s: ERROR COMBO: sync (0x%x) enabled for DMA indices\n", + __FUNCTION__, bus->dhd->d2h_sync_mode)); + } + + DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", + __FUNCTION__, + (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0), + (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0))); + } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) { + DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n", + __FUNCTION__)); + return BCME_UNSUPPORTED; + } else { + bus->dhd->dma_h2d_ring_upd_support = FALSE; + bus->dhd->dma_d2h_ring_upd_support = FALSE; + } + + /* Does the firmware support fast delete ring? */ + if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) { + DHD_INFO(("%s: Firmware supports fast delete ring\n", + __FUNCTION__)); + bus->dhd->fast_delete_ring_support = TRUE; + } else { + DHD_INFO(("%s: Firmware does not support fast delete ring\n", + __FUNCTION__)); + bus->dhd->fast_delete_ring_support = FALSE; + } + + /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */ + { + ring_info_t ring_info; + + /* boundary check */ + if ((sh->rings_info_ptr < bus->dongle_ram_base) || (sh->rings_info_ptr > shaddr)) { + DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n", + __FUNCTION__, sh->rings_info_ptr)); + return BCME_ERROR; + } + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr, + (uint8 *)&ring_info, sizeof(ring_info_t))) < 0) + return rv; + + bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr); + bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr); + + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); + bus->max_submission_rings = ltoh16(ring_info.max_submission_queues); + bus->max_completion_rings = ltoh16(ring_info.max_completion_rings); + bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings; + bus->api.handle_mb_data = dhdpcie_read_handle_mb_data; + bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX; + } + else { + bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); + bus->max_submission_rings = bus->max_tx_flowrings; + bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS; + bus->api.handle_mb_data = dhdpcie_handle_mb_data; + bus->use_mailbox = TRUE; + } + if (bus->max_completion_rings == 0) { + DHD_ERROR(("dongle completion rings are invalid %d\n", + bus->max_completion_rings)); + return BCME_ERROR; + } + if (bus->max_submission_rings == 0) { + DHD_ERROR(("dongle submission rings are invalid %d\n", + bus->max_submission_rings)); + return BCME_ERROR; + } + if (bus->max_tx_flowrings == 0) { + DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings)); + return BCME_ERROR; + } + + /* If both FW and Host support DMA'ing indices, allocate memory and notify FW + * The max_sub_queues is read from FW initialized ring_info + */ + if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_WR_BUF, bus->max_submission_rings); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_RD_BUF, bus->max_completion_rings); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_h2d_ring_upd_support = FALSE; + bus->dhd->idma_enable = FALSE; + } + } + + if (bus->dhd->dma_d2h_ring_upd_support) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_WR_BUF, bus->max_completion_rings); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_RD_BUF, bus->max_submission_rings); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_d2h_ring_upd_support = FALSE; + } + } +#ifdef DHD_DMA_INDICES_SEQNUM + if (bus->dhd->dma_d2h_ring_upd_support) { + uint32 bufsz = bus->rw_index_sz * bus->max_completion_rings; + if (dhd_prot_dma_indx_copybuf_init(bus->dhd, bufsz, D2H_DMA_INDX_WR_BUF) + != BCME_OK) { + return BCME_NOMEM; + } + bufsz = bus->rw_index_sz * bus->max_submission_rings; + if (dhd_prot_dma_indx_copybuf_init(bus->dhd, bufsz, H2D_DMA_INDX_RD_BUF) + != BCME_OK) { + return BCME_NOMEM; + } + } +#endif /* DHD_DMA_INDICES_SEQNUM */ + if (IFRM_ENAB(bus->dhd)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings); + + if (dma_indx_wr_buf != BCME_OK) { + DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n", + __FUNCTION__)); + bus->dhd->ifrm_enable = FALSE; + } + } + + /* read ringmem and ringstate ptrs from shared area and store in host variables */ + dhd_fillup_ring_sharedptr_info(bus, &ring_info); + if (dhd_msg_level & DHD_INFO_VAL) { + bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); + } + DHD_INFO(("%s: ring_info\n", __FUNCTION__)); + + DHD_ERROR(("%s: max H2D queues %d\n", + __FUNCTION__, ltoh16(ring_info.max_tx_flowrings))); + + DHD_INFO(("mail box address\n")); + DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->h2d_mb_data_ptr_addr)); + DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->d2h_mb_data_ptr_addr)); + } + + DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", + __FUNCTION__, bus->dhd->d2h_sync_mode)); + + bus->dhd->d2h_hostrdy_supported = + ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT); + + bus->dhd->ext_trap_data_supported = + ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA); + + if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0) + bus->dhd->pcie_txs_metadata_enable = 0; + + if (sh->flags2 & PCIE_SHARED2_TRAP_ON_HOST_DB7) { + memset(&bus->dhd->db7_trap, 0, sizeof(bus->dhd->db7_trap)); + bus->dhd->db7_trap.fw_db7w_trap = 1; + /* add an option to let the user select ?? */ + bus->dhd->db7_trap.db7_magic_number = PCIE_DB7_MAGIC_NUMBER_DPC_TRAP; + } + +#ifdef BTLOG + bus->dhd->bt_logging = (sh->flags2 & PCIE_SHARED2_BT_LOGGING) ? TRUE : FALSE; + /* XXX: WAR is needed for dongle with BTLOG to be backwards compatible with existing DHD. + * The issue is that existing DHD doesn't not compute the INFO cmpl ringid correctly once + * BTLOG dongle increases the max_submission_rings resulting in overwritting ring in the + * dongle. When dongle enables submit_count_WAR, it implies that the sumbit ring has be + * incremented in the dongle but will not be reflected in max_submission_rings. + */ + bus->dhd->submit_count_WAR = (sh->flags2 & PCIE_SHARED2_SUBMIT_COUNT_WAR) ? TRUE : FALSE; + DHD_ERROR(("FW supports BT logging ? %s \n", bus->dhd->bt_logging ? "Y" : "N")); +#endif /* BTLOG */ + +#ifdef SNAPSHOT_UPLOAD + bus->dhd->snapshot_upload = (sh->flags2 & PCIE_SHARED2_SNAPSHOT_UPLOAD) ? TRUE : FALSE; + DHD_ERROR(("FW supports snapshot upload ? %s \n", bus->dhd->snapshot_upload ? "Y" : "N")); +#endif /* SNAPSHOT_UPLOAD */ + +#ifdef D2H_MINIDUMP + bus->d2h_minidump = (sh->flags2 & PCIE_SHARED2_FW_SMALL_MEMDUMP) ? TRUE : FALSE; + DHD_ERROR(("FW supports minidump ? %s \n", bus->d2h_minidump ? "Y" : "N")); + if (bus->d2h_minidump_override) { + bus->d2h_minidump = FALSE; + } + DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n", + bus->d2h_minidump, bus->d2h_minidump_override)); +#endif /* D2H_MINIDUMP */ + + bus->dhd->hscb_enable = + (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB; + +#ifdef EWP_EDL + if (host_edl_support) { + bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE; + DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support)); + } +#endif /* EWP_EDL */ + + bus->dhd->debug_buf_dest_support = + (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE; + DHD_ERROR(("FW supports debug buf dest ? %s \n", + bus->dhd->debug_buf_dest_support ? "Y" : "N")); + +#ifdef DHD_HP2P + if (bus->dhd->hp2p_enable) { + bus->dhd->hp2p_ts_capable = + (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP; + bus->dhd->hp2p_capable = + (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P; + bus->dhd->hp2p_capable |= bus->dhd->hp2p_ts_capable; + + DHD_ERROR(("FW supports HP2P ? %s\n", + bus->dhd->hp2p_capable ? "Y" : "N")); + + if (bus->dhd->hp2p_capable) { + bus->dhd->pkt_thresh = HP2P_PKT_THRESH; + bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY; + bus->dhd->time_thresh = HP2P_TIME_THRESH; + for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) { + hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr]; + hp2p_info->hrtimer_init = FALSE; + hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hp2p_info->timer.function = &dhd_hp2p_write; + } + } + } +#endif /* DHD_HP2P */ + +#ifdef DHD_DB0TS + bus->dhd->db0ts_capable = + (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0; +#endif /* DHD_DB0TS */ + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + + /* + * WAR to fix ARM cold boot; + * De-assert WL domain in DAR + */ + if (bus->sih->buscorerev >= 68) { + dhd_bus_pcie_pwr_req_wl_domain(bus, + DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE); + } + } + return BCME_OK; +} /* dhdpcie_readshared */ + +/** Read ring mem and ring state ptr info from shared memory area in device memory */ +static void +dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) +{ + uint16 i = 0; + uint16 j = 0; + uint32 tcm_memloc; + uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr; + uint16 max_tx_flowrings = bus->max_tx_flowrings; + + /* Ring mem ptr info */ + /* Alloated in the order + H2D_MSGRING_CONTROL_SUBMIT 0 + H2D_MSGRING_RXPOST_SUBMIT 1 + D2H_MSGRING_CONTROL_COMPLETE 2 + D2H_MSGRING_TX_COMPLETE 3 + D2H_MSGRING_RX_COMPLETE 4 + */ + + { + /* ringmemptr holds start of the mem block address space */ + tcm_memloc = ltoh32(ring_info->ringmem_ptr); + + /* Find out ringmem ptr for each ring common ring */ + for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) { + bus->ring_sh[i].ring_mem_addr = tcm_memloc; + /* Update mem block */ + tcm_memloc = tcm_memloc + sizeof(ring_mem_t); + DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__, + i, bus->ring_sh[i].ring_mem_addr)); + } + } + + /* Ring state mem ptr info */ + { + d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr); + d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr); + h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr); + h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr); + + /* Store h2d common ring write/read pointers */ + for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store d2h common ring write/read pointers */ + for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) { + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + + /* update mem block */ + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store txflow ring write/read pointers */ + if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { + max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS; + } else { + /* Account for Debug info h2d ring located after the last tx flow ring */ + max_tx_flowrings = max_tx_flowrings + 1; + } + for (j = 0; j < max_tx_flowrings; i++, j++) + { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n", + __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); + } +#ifdef DHD_HP2P + /* store wr/rd pointers for debug info completion or EDL ring and hp2p rings */ + for (j = 0; j <= MAX_HP2P_CMPL_RINGS; i++, j++) { + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); + } +#else + /* store wr/rd pointers for debug info completion or EDL ring */ + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); +#endif /* DHD_HP2P */ + } +} /* dhd_fillup_ring_sharedptr_info */ + +/** + * Initialize bus module: prepare for communication with the dongle. Called after downloading + * firmware into the dongle. + */ +int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + dhd_bus_pcie_pwr_req_clear_reload_war(bus); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + /* Configure AER registers to log the TLP header */ + dhd_bus_aer_config(bus); + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* before opening up bus for data transfer, check if shared are is intact */ + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + goto exit; + } + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + DHD_SET_BUS_NOT_IN_LPS(bus); + dhdp->dhd_bus_busy_state = 0; + + /* D11 status via PCIe completion header */ + if ((ret = dhdpcie_init_d11status(bus)) < 0) { + goto exit; + } + +#if defined(OEM_ANDROID) || defined(LINUX) + if (!dhd_download_fw_on_driverload) + dhd_dpc_enable(bus->dhd); +#endif /* OEM_ANDROID || LINUX */ + /* Enable the interrupt after device is up */ + dhdpcie_bus_intr_enable(bus); + + DHD_ERROR(("%s: Enabling bus->intr_enabled\n", __FUNCTION__)); + bus->intr_enabled = TRUE; + + /* XXX These need to change w/API updates */ + /* bcmsdh_intr_unmask(bus->sdh); */ +#ifdef DHD_PCIE_RUNTIMEPM + bus->idlecount = 0; + bus->idletime = (int32)MAX_IDLE_COUNT; + init_waitqueue_head(&bus->rpm_queue); + mutex_init(&bus->pm_lock); +#else + bus->idletime = 0; +#endif /* DHD_PCIE_RUNTIMEPM */ +#ifdef PCIE_INB_DW + bus->skip_ds_ack = FALSE; + /* Initialize the lock to serialize Device Wake Inband activities */ + if (!bus->inb_lock) { + bus->inb_lock = osl_spin_lock_init(bus->dhd->osh); + } +#endif + + /* XXX Temp errnum workaround: return ok, caller checks bus state */ + + /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */ + if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { + bus->use_d0_inform = TRUE; + } else { + bus->use_d0_inform = FALSE; + } + + bus->hostready_count = 0; + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return ret; +} + +static void +dhdpcie_init_shared_addr(dhd_bus_t *bus) +{ + uint32 addr = 0; + uint32 val = 0; + addr = bus->dongle_ram_base + bus->ramsize - 4; +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); +} + +bool +dhdpcie_chipmatch(uint16 vendor, uint16 device) +{ + if (vendor != PCI_VENDOR_ID_BROADCOM) { +#ifndef DHD_EFI + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, + vendor, device)); +#endif /* DHD_EFI */ + return (-ENODEV); + } + + switch (device) { + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + case BCM43455_CHIP_ID: + case BCM43457_CHIP_ID: + case BCM43458_CHIP_ID: + case BCM4350_D11AC_ID: + case BCM4350_D11AC2G_ID: + case BCM4350_D11AC5G_ID: + case BCM4350_CHIP_ID: + case BCM4354_D11AC_ID: + case BCM4354_D11AC2G_ID: + case BCM4354_D11AC5G_ID: + case BCM4354_CHIP_ID: + case BCM4356_D11AC_ID: + case BCM4356_D11AC2G_ID: + case BCM4356_D11AC5G_ID: + case BCM4356_CHIP_ID: + case BCM4371_D11AC_ID: + case BCM4371_D11AC2G_ID: + case BCM4371_D11AC5G_ID: + case BCM4371_CHIP_ID: + case BCM4345_D11AC_ID: + case BCM4345_D11AC2G_ID: + case BCM4345_D11AC5G_ID: + case BCM43452_D11AC_ID: + case BCM43452_D11AC2G_ID: + case BCM43452_D11AC5G_ID: + case BCM4335_D11AC_ID: + case BCM4335_D11AC2G_ID: + case BCM4335_D11AC5G_ID: + case BCM4335_CHIP_ID: + case BCM43602_D11AC_ID: + case BCM43602_D11AC2G_ID: + case BCM43602_D11AC5G_ID: + case BCM43602_CHIP_ID: + case BCM43569_D11AC_ID: + case BCM43569_D11AC2G_ID: + case BCM43569_D11AC5G_ID: + case BCM43569_CHIP_ID: + /* XXX: For 4358, BCM4358_CHIP_ID is not checked intentionally as + * this is not a real chip id, but propagated from the OTP. + */ + case BCM4358_D11AC_ID: + case BCM4358_D11AC2G_ID: + case BCM4358_D11AC5G_ID: + case BCM4349_D11AC_ID: + case BCM4349_D11AC2G_ID: + case BCM4349_D11AC5G_ID: + case BCM4355_D11AC_ID: + case BCM4355_D11AC2G_ID: + case BCM4355_D11AC5G_ID: + case BCM4355_CHIP_ID: + /* XXX: BCM4359_CHIP_ID is not checked intentionally as this is + * not a real chip id, but propogated from the OTP. + */ + case BCM4359_D11AC_ID: + case BCM4359_D11AC2G_ID: + case BCM4359_D11AC5G_ID: + case BCM43596_D11AC_ID: + case BCM43596_D11AC2G_ID: + case BCM43596_D11AC5G_ID: + case BCM43597_D11AC_ID: + case BCM43597_D11AC2G_ID: + case BCM43597_D11AC5G_ID: + case BCM4364_D11AC_ID: + case BCM4364_D11AC2G_ID: + case BCM4364_D11AC5G_ID: + case BCM4364_CHIP_ID: + case BCM4361_D11AC_ID: + case BCM4361_D11AC2G_ID: + case BCM4361_D11AC5G_ID: + case BCM4361_CHIP_ID: + case BCM4347_D11AC_ID: + case BCM4347_D11AC2G_ID: + case BCM4347_D11AC5G_ID: + case BCM4347_CHIP_ID: + case BCM4369_D11AX_ID: + case BCM4369_D11AX2G_ID: + case BCM4369_D11AX5G_ID: + case BCM4369_CHIP_ID: + case BCM4376_D11AX_ID: + case BCM4376_D11AX2G_ID: + case BCM4376_D11AX5G_ID: + case BCM4376_CHIP_ID: + case BCM4377_M_D11AX_ID: + case BCM4377_D11AX_ID: + case BCM4377_D11AX2G_ID: + case BCM4377_D11AX5G_ID: + case BCM4377_CHIP_ID: + case BCM4378_D11AX_ID: + case BCM4378_D11AX2G_ID: + case BCM4378_D11AX5G_ID: + case BCM4378_CHIP_ID: + case BCM4387_D11AX_ID: + case BCM4387_CHIP_ID: + case BCM4362_D11AX_ID: + case BCM4362_D11AX2G_ID: + case BCM4362_D11AX5G_ID: + case BCM4362_CHIP_ID: + case BCM4375_D11AX_ID: + case BCM4375_D11AX2G_ID: + case BCM4375_D11AX5G_ID: + case BCM4375_CHIP_ID: + case BCM43751_D11AX_ID: + case BCM43751_D11AX2G_ID: + case BCM43751_D11AX5G_ID: + case BCM43751_CHIP_ID: + case BCM43752_D11AX_ID: + case BCM43752_D11AX2G_ID: + case BCM43752_D11AX5G_ID: + case BCM43752_CHIP_ID: + case BCM4388_CHIP_ID: + case BCM4388_D11AX_ID: + case BCM4389_CHIP_ID: + case BCM4389_D11AX_ID: + case BCM4385_D11AX_ID: + case BCM4385_CHIP_ID: + +#ifdef UNRELEASEDCHIP + case BCM4397_CHIP_ID: + case BCM4397_D11AX_ID: +#endif + return 0; + default: +#ifndef DHD_EFI + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", + __FUNCTION__, vendor, device)); +#endif + return (-ENODEV); + } +} /* dhdpcie_chipmatch */ + +/** + * Name: dhdpcie_cc_nvmshadow + * + * Description: + * A shadow of OTP/SPROM exists in ChipCommon Region + * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). + * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size + * can also be read from ChipCommon Registers. + */ +/* XXX So far tested with 4345 and 4350 (Hence the checks in the function.) */ +static int +dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) +{ + uint16 dump_offset = 0; + uint32 dump_size = 0, otp_size = 0, sprom_size = 0; + + /* Table for 65nm OTP Size (in bits) */ + int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024}; + + volatile uint16 *nvm_shadow; + + uint cur_coreid; + uint chipc_corerev; + chipcregs_t *chipcregs; + + /* Save the current core */ + cur_coreid = si_coreid(bus->sih); + /* Switch to ChipC */ + chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(chipcregs != NULL); + + chipc_corerev = si_corerev(bus->sih); + + /* Check ChipcommonCore Rev */ + if (chipc_corerev < 44) { + DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev)); + return BCME_UNSUPPORTED; + } + + /* Check ChipID */ + if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) && + ((uint16)bus->sih->chip != BCM4355_CHIP_ID) && + ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) { + DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips" + "4350/4345/4355/4364 only\n", __FUNCTION__)); + return BCME_UNSUPPORTED; + } + + /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */ + if (chipcregs->sromcontrol & SRC_PRESENT) { + /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */ + sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK) + >> SRC_SIZE_SHIFT))) * 1024; + bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size); + } + + /* XXX Check if OTP exists. 2 possible approaches: + * 1) Check if OtpPresent in SpromCtrl (0x190 in ChipCommon Regs) is set OR + * 2) Check if OtpSize > 0 + */ + if (chipcregs->sromcontrol & SRC_OTPPRESENT) { + bcm_bprintf(b, "\nOTP Present"); + + if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT) + == OTPL_WRAP_TYPE_40NM) { + /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */ + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) + >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } + } else { + /* This part is untested since newer chips have 40nm OTP */ + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK) + >> OTPL_ROW_SIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", + __FUNCTION__)); + } + } + } + + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found " + "sromcontrol = %x, otplayout = %x \n", + __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout)); + return BCME_NOTFOUND; + } + } else { + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found " + "sromcontrol = %x, capablities = %x \n", + __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities)); + return BCME_NOTFOUND; + } + } + + /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */ + if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) && + (chipcregs->sromcontrol & SRC_OTPPRESENT)) { + + bcm_bprintf(b, "OTP Strap selected.\n" + "\nOTP Shadow in ChipCommon:\n"); + + dump_size = otp_size / 16 ; /* 16bit words */ + + } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) && + (chipcregs->sromcontrol & SRC_PRESENT)) { + + bcm_bprintf(b, "SPROM Strap selected\n" + "\nSPROM Shadow in ChipCommon:\n"); + + /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */ + /* dump_size in 16bit words */ + dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16; + } else { + DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n", + __FUNCTION__)); + return BCME_NOTFOUND; + } + + if (bus->regs == NULL) { + DHD_ERROR(("ChipCommon Regs. not initialized\n")); + return BCME_NOTREADY; + } else { + bcm_bprintf(b, "\n OffSet:"); + + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + /* Chip common can read only 8kbits, + * for ccrev >= 49 otp size is around 12 kbits so use GCI core + */ + nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0); + } else { + /* Point to the SPROM/OTP shadow in ChipCommon */ + nvm_shadow = chipcregs->sromotp; + } + + if (nvm_shadow == NULL) { + DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__)); + return BCME_NOTFOUND; + } + + /* + * Read 16 bits / iteration. + * dump_size & dump_offset in 16-bit words + */ + while (dump_offset < dump_size) { + if (dump_offset % 2 == 0) + /* Print the offset in the shadow space in Bytes */ + bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); + + bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); + dump_offset += 0x1; + } + } + + /* Switch back to the original core */ + si_setcore(bus->sih, cur_coreid, 0); + + return BCME_OK; +} /* dhdpcie_cc_nvmshadow */ + +/** Flow rings are dynamically created and destroyed */ +void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node; + unsigned long flags; + + queue = &flow_ring_node->queue; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHD_HP2P + if (flow_ring_node->hp2p_ring) { + if (!bus->dhd->hp2p_ring_more) { + bus->dhd->hp2p_ring_more = TRUE; + } + flow_ring_node->hp2p_ring = FALSE; + } +#endif /* DHD_HP2P */ + + /* clean up BUS level info */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + /* Reinitialise flowring's queue */ + dhd_flow_queue_reinit(bus->dhd, queue, bus->dhd->conf->flow_ring_queue_threshold); + flow_ring_node->status = FLOW_RING_STATUS_CLOSED; + flow_ring_node->active = FALSE; + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_delete(&flow_ring_node->list); + dll_init(&flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + /* Release the flowring object back into the pool */ + dhd_prot_flowrings_pool_release(bus->dhd, + flow_ring_node->flowid, flow_ring_node->prot_info); + + /* Free the flowid back to the flowid allocator */ + dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, + flow_ring_node->flowid); +} + +/** + * Allocate a Flow ring buffer, + * Init Ring buffer, send Msg to device about flow ring creation +*/ +int +dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_PCIE_INFO(("%s :Flow create\n", __FUNCTION__)); + + /* Send Msg to device about flow ring creation */ + if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK) + return BCME_NOMEM; + + return BCME_OK; +} + +/** Handle response from dongle on a 'flow ring create' request */ +void +dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_PCIE_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid)); + + /* Boundary check of the flowid */ + if (flowid > bus->dhd->max_tx_flowid) { + DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__, + flowid, bus->dhd->max_tx_flowid)); + return; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + if (!flow_ring_node) { + DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); + return; + } + + ASSERT(flow_ring_node->flowid == flowid); + if (flow_ring_node->flowid != flowid) { + DHD_ERROR(("%s: flowid %d is different from the flowid " + "of the flow_ring_node %d\n", __FUNCTION__, flowid, + flow_ring_node->flowid)); + return; + } + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow create Response failure error status = %d \n", + __FUNCTION__, status)); + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + return; + } + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Now add the Flow ring node into the active list + * Note that this code to add the newly created node to the active + * list was living in dhd_flowid_lookup. But note that after + * adding the node to the active list the contents of node is being + * filled in dhd_prot_flow_ring_create. + * If there is a D2H interrupt after the node gets added to the + * active list and before the node gets populated with values + * from the Bottom half dhd_update_txflowrings would be called. + * which will then try to walk through the active flow ring list, + * pickup the nodes and operate on them. Now note that since + * the function dhd_prot_flow_ring_create is not finished yet + * the contents of flow_ring_node can still be NULL leading to + * crashes. Hence the flow_ring_node should be added to the + * active list only after its truely created, which is after + * receiving the create response message from the Host. + */ + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + return; +} + +int +dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) +{ + void * pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_PCIE_INFO(("%s :Flow Delete\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid)); + return BCME_ERROR; + } + flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring deletion */ + dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +void +dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + DHD_PCIE_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid)); + + /* Boundary check of the flowid */ + if (flowid > bus->dhd->max_tx_flowid) { + DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__, + flowid, bus->dhd->max_tx_flowid)); + return; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + if (!flow_ring_node) { + DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); + return; + } + + ASSERT(flow_ring_node->flowid == flowid); + if (flow_ring_node->flowid != flowid) { + DHD_ERROR(("%s: flowid %d is different from the flowid " + "of the flow_ring_node %d\n", __FUNCTION__, flowid, + flow_ring_node->flowid)); + return; + } + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + + return; + +} + +int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_PCIE_INFO(("%s :Flow Flush\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN + * once flow ring flush response is received for this flowring node. + */ + flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring flush */ + dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +void +dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow flush Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + + /* Boundary check of the flowid */ + if (flowid > bus->dhd->max_tx_flowid) { + DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__, + flowid, bus->dhd->max_tx_flowid)); + return; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + if (!flow_ring_node) { + DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); + return; + } + + ASSERT(flow_ring_node->flowid == flowid); + if (flow_ring_node->flowid != flowid) { + DHD_ERROR(("%s: flowid %d is different from the flowid " + "of the flow_ring_node %d\n", __FUNCTION__, flowid, + flow_ring_node->flowid)); + return; + } + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + return; +} + +uint32 +dhd_bus_max_h2d_queues(struct dhd_bus *bus) +{ + return bus->max_submission_rings; +} + +/* To be symmetric with SDIO */ +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + return; +} + +void +dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) +{ + dhdp->bus->is_linkdown = val; +} + +int +dhd_bus_get_linkdown(dhd_pub_t *dhdp) +{ + return dhdp->bus->is_linkdown; +} + +int +dhd_bus_get_cto(dhd_pub_t *dhdp) +{ + return dhdp->bus->cto_triggered; +} + +#ifdef IDLE_TX_FLOW_MGMT +/* resume request */ +int +dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid)); + + flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING; + + /* Send Msg to device about flow ring resume */ + dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +/* add the node back to active flowring */ +void +dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + + flow_ring_node_t *flow_ring_node; + + DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Error Status = %d \n", + __FUNCTION__, status)); + return; + } + + DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n", + __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len)); + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + + dhd_bus_schedule_queue(bus, flowid, FALSE); + return; +} + +/* scan the flow rings in active list for idle time out */ +void +dhd_bus_check_idle_scan(dhd_bus_t *bus) +{ + uint64 time_stamp; /* in millisec */ + uint64 diff; + + time_stamp = OSL_SYSUPTIME(); + diff = time_stamp - bus->active_list_last_process_ts; + + if (diff > IDLE_FLOW_LIST_TIMEOUT) { + dhd_bus_idle_scan(bus); + bus->active_list_last_process_ts = OSL_SYSUPTIME(); + } + + return; +} + +/* scan the nodes in active list till it finds a non idle node */ +void +dhd_bus_idle_scan(dhd_bus_t *bus) +{ + dll_t *item, *prev; + flow_ring_node_t *flow_ring_node; + uint64 time_stamp, diff; + unsigned long flags; + uint16 ringid[MAX_SUSPEND_REQ]; + uint16 count = 0; + + time_stamp = OSL_SYSUPTIME(); + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + for (item = dll_tail_p(&bus->flowring_active_list); + !dll_end(&bus->flowring_active_list, item); item = prev) { + prev = dll_prev_p(item); + + flow_ring_node = dhd_constlist_to_flowring(item); + + if (flow_ring_node->flowid == (bus->max_submission_rings - 1)) + continue; + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + /* Takes care of deleting zombie rings */ + /* delete from the active list */ + DHD_INFO(("deleting flow id %u from active list\n", + flow_ring_node->flowid)); + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + continue; + } + + diff = time_stamp - flow_ring_node->last_active_ts; + + if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) { + DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid)); + /* delete from the active list */ + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED; + ringid[count] = flow_ring_node->flowid; + count++; + if (count == MAX_SUSPEND_REQ) { + /* create a batch message now!! */ + dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); + count = 0; + } + + } else { + + /* No more scanning, break from here! */ + break; + } + } + + if (count) { + dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); + } + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} + +void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + dll_t* list; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + /* check if the node is already at head, otherwise delete it and prepend */ + list = dll_head_p(&bus->flowring_active_list); + if (&flow_ring_node->list != list) { + dll_delete(&flow_ring_node->list); + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + } + + /* update flow ring timestamp */ + flow_ring_node->last_active_ts = OSL_SYSUPTIME(); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} + +void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + /* update flow ring timestamp */ + flow_ring_node->last_active_ts = OSL_SYSUPTIME(); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} +void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + dll_delete(&flow_ring_node->list); +} + +void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} +#endif /* IDLE_TX_FLOW_MGMT */ + +#if defined(LINUX) || defined(linux) +int +dhdpcie_bus_start_host_dev(struct dhd_bus *bus) +{ + return dhdpcie_start_host_dev(bus); +} + +int +dhdpcie_bus_stop_host_dev(struct dhd_bus *bus) +{ + return dhdpcie_stop_host_dev(bus); +} + +int +dhdpcie_bus_disable_device(struct dhd_bus *bus) +{ + return dhdpcie_disable_device(bus); +} + +int +dhdpcie_bus_enable_device(struct dhd_bus *bus) +{ + return dhdpcie_enable_device(bus); +} + +int +dhdpcie_bus_alloc_resource(struct dhd_bus *bus) +{ + return dhdpcie_alloc_resource(bus); +} + +void +dhdpcie_bus_free_resource(struct dhd_bus *bus) +{ + dhdpcie_free_resource(bus); +} + +int +dhd_bus_request_irq(struct dhd_bus *bus) +{ + return dhdpcie_bus_request_irq(bus); +} + +bool +dhdpcie_bus_dongle_attach(struct dhd_bus *bus) +{ + return dhdpcie_dongle_attach(bus); +} + +int +dhd_bus_release_dongle(struct dhd_bus *bus) +{ + bool dongle_isolation; + osl_t *osh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { +#if defined(DEBUGGER) || defined (DHD_DSCOPE) + debugger_close(); +#endif /* DEBUGGER || DHD_DSCOPE */ + + dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + } + } + + return 0; +} +#endif /* LINUX || linux */ + +int +dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable) +{ + if (enable) { + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, + PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR); + } else { + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0); + } + return 0; +} + +int +dhdpcie_cto_init(struct dhd_bus *bus, bool enable) +{ + volatile void *regsva = (volatile void *)bus->regs; + uint32 val; + uint16 chipid = dhd_get_chipid(bus); + uint32 ctoctrl; + + bus->cto_enable = enable; + + dhdpcie_cto_cfg_init(bus, enable); + + if (enable) { + if (bus->cto_threshold == 0) { + if ((chipid == BCM4387_CHIP_ID) || + (chipid == BCM4388_CHIP_ID) || + (chipid == BCM4389_CHIP_ID)) { + bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT_REV69; + } else { + bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT; + } + } + val = ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) & + PCIE_CTO_TO_THRESHHOLD_MASK) | + ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) & + PCIE_CTO_CLKCHKCNT_MASK) | + PCIE_CTO_ENAB_MASK; + + pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, val); + } else { + pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0); + } + + ctoctrl = pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), 0, 0); + + DHD_ERROR(("%s: ctoctrl(0x%x) enable/disable %d for chipid(0x%x)\n", + __FUNCTION__, ctoctrl, bus->cto_enable, chipid)); + + return 0; +} + +static int +dhdpcie_cto_error_recovery(struct dhd_bus *bus) +{ + uint32 pci_intmask, err_status; + uint8 i = 0; + uint32 val; + + pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK); + + DHD_OS_WAKE_LOCK(bus->dhd); + + DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref)); + + /* + * DAR still accessible + */ + dhd_bus_dump_dar_registers(bus); + + /* reset backplane */ + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST); + + /* clear timeout error */ + while (1) { + err_status = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_ERRLOG(bus->sih->buscorerev), + 0, 0); + if (err_status & PCIE_CTO_ERR_MASK) { + si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_ERRLOG(bus->sih->buscorerev), + ~0, PCIE_CTO_ERR_MASK); + } else { + break; + } + OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000); + i++; + if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) { + DHD_ERROR(("cto recovery fail\n")); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return BCME_ERROR; + } + } + + /* clear interrupt status */ + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK); + + /* Halt ARM & remove reset */ + /* TBD : we can add ARM Halt here in case */ + + /* reset SPROM_CFG_TO_SB_RST */ + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + + DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n", + PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST); + + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n", + PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + + return BCME_OK; +} + +void +dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus) +{ + uint32 val; + + val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, + val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT)); +} + +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) +/* + * XXX: WAR: Update dongle that driver supports sending of d11 + * tx_status through unused status field of PCIe completion header + * if dongle also supports the same WAR. + */ +static int +dhdpcie_init_d11status(struct dhd_bus *bus) +{ + uint32 addr; + uint32 flags2; + int ret = 0; + + if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) { + flags2 = bus->pcie_sh->flags2; + addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2); + flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS; + ret = dhdpcie_bus_membytes(bus, TRUE, addr, + (uint8 *)&flags2, sizeof(flags2)); + if (ret < 0) { + DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n", + __FUNCTION__)); + return ret; + } + bus->pcie_sh->flags2 = flags2; + bus->dhd->d11_tx_status = TRUE; + } + return ret; +} + +#else +static int +dhdpcie_init_d11status(struct dhd_bus *bus) +{ + return 0; +} +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ + +int +dhdpcie_get_max_eventbufpost(struct dhd_bus *bus) +{ + int evt_buf_pool = EVENT_BUF_POOL_LOW; + if (bus->pcie_sh->flags2 & (0x1 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) { + evt_buf_pool = EVENT_BUF_POOL_MEDIUM; + } else if (bus->pcie_sh->flags2 & (0x2 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) { + evt_buf_pool = EVENT_BUF_POOL_HIGH; + } else if (bus->pcie_sh->flags2 & (0x3 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) { + evt_buf_pool = EVENT_BUF_POOL_HIGHEST; + } + return evt_buf_pool; +} + +int +dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + int err = 0; +#ifdef BCMPCIE_OOB_HOST_WAKE + err = dhdpcie_oob_intr_register(dhdp->bus); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + return err; +} + +void +dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ +#ifdef BCMPCIE_OOB_HOST_WAKE + dhdpcie_oob_intr_unregister(dhdp->bus); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} + +void +dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ +#ifdef BCMPCIE_OOB_HOST_WAKE + dhdpcie_oob_intr_set(dhdp->bus, enable); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} + +int +dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp) +{ + int irq_num = 0; +#ifdef BCMPCIE_OOB_HOST_WAKE + irq_num = dhdpcie_get_oob_irq_num(dhdp->bus); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + return irq_num; +} + +#ifdef BCMDBG +void +dhd_bus_flow_ring_cnt_update(dhd_bus_t *bus, uint16 flowid, uint32 txstatus) +{ + flow_ring_node_t *flow_ring_node; +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + dhd_awdl_stats_t *awdl_stats; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + uint8 ifindex; + uint8 role; +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + /* If we have d2h sync enabled due to marker overloading, we cannot update this. */ + if (bus->dhd->d2h_sync_mode) + return; + if (txstatus >= DHD_MAX_TX_STATUS_MSGS) { + /* + * XXX: changed DHD_ERROR to DHD_INFO + * There are flood of messages with P2P FW + * It is being root-caused. + */ + DHD_INFO(("%s Unknown txtstatus = %d \n", + __FUNCTION__, txstatus)); + return; + } + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + flow_ring_node->flow_info.tx_status[txstatus]++; +#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) + if_flow_lkup = (if_flow_lkup_t *)bus->dhd->if_flow_lkup; + ifindex = flow_ring_node->flow_info.ifindex; + role = if_flow_lkup[ifindex].role; + if (role == WLC_E_IF_ROLE_AWDL) { + DHD_AWDL_STATS_LOCK(bus->dhd->awdl_stats_lock, flags); + awdl_stats = &bus->dhd->awdl_stats[bus->dhd->awdl_tx_status_slot]; + awdl_stats->tx_status[txstatus]++; + DHD_AWDL_STATS_UNLOCK(bus->dhd->awdl_stats_lock, flags); + } +#endif /* DHD_AWDL && AWDL_SLOT_STATS */ + return; +} +#endif /* BCMDBG */ + +bool +dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus) +{ + return bus->dhd->d2h_hostrdy_supported; +} + +void +dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr) +{ + dhd_bus_t *bus = pub->bus; + uint32 coreoffset = index << 12; + uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset; + uint32 value; + + while (first_addr <= last_addr) { + core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr; + if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + } + DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value)); + first_addr = first_addr + 4; + } +} + +bool +dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->idma_enabled) { + return bus->dhd->idma_enable; + } else { + return FALSE; + } +} + +bool +dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->ifrm_enabled) { + return bus->dhd->ifrm_enable; + } else { + return FALSE; + } +} + +bool +dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) { + return FALSE; + } else if (bus->dar_enabled) { + return bus->dhd->dar_enable; + } else { + return FALSE; + } +} + +#ifdef DHD_HP2P +bool +dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) { + return FALSE; + } else if (bus->dhd->hp2p_enable) { + return bus->dhd->hp2p_capable; + } else { + return FALSE; + } +} +#endif /* DHD_HP2P */ + +#ifdef PCIE_OOB +bool +dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + if (bus->oob_enabled) { + return !bus->dhd->d2h_no_oob_dw; + } else { + return FALSE; + } +} +#endif /* PCIE_OOB */ + +void +dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option) +{ + DHD_ERROR(("ENABLING DW:%d\n", dw_option)); + bus->dw_option = dw_option; +} + +#ifdef PCIE_INB_DW +bool +dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + if (bus->inb_enabled) { + return bus->dhd->d2h_inband_dw; + } else { + return FALSE; + } +} + +void +dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state) +{ + if (!INBAND_DW_ENAB(bus)) + return; + + DHD_PCIE_INFO(("%s:%d\n", __FUNCTION__, state)); + bus->dhd->ds_state = state; + if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) { + bus->ds_exit_timeout = 100; + } + if (state == DW_DEVICE_HOST_WAKE_WAIT) { + bus->host_sleep_exit_timeout = 100; + } + if (state == DW_DEVICE_DS_DEV_WAKE) { + bus->ds_exit_timeout = 0; + } + if (state == DW_DEVICE_DS_ACTIVE) { + bus->host_sleep_exit_timeout = 0; + } +} + +enum dhd_bus_ds_state +dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus) +{ + if (!INBAND_DW_ENAB(bus)) + return DW_DEVICE_DS_INVALID; + return bus->dhd->ds_state; +} +#endif /* PCIE_INB_DW */ + +#ifdef DHD_MMIO_TRACE +static void +dhd_bus_mmio_trace(dhd_bus_t *bus, uint32 addr, uint32 value, bool set) +{ + uint32 cnt = bus->mmio_trace_count % MAX_MMIO_TRACE_SIZE; + uint64 ts_cur = OSL_LOCALTIME_NS(); + uint32 tmp_cnt; + + tmp_cnt = (bus->mmio_trace_count) ? ((bus->mmio_trace_count - 1) + % MAX_MMIO_TRACE_SIZE) : cnt; + + if (((DIV_U64_BY_U64(ts_cur, NSEC_PER_USEC) - + DIV_U64_BY_U64(bus->mmio_trace[tmp_cnt].timestamp, NSEC_PER_USEC)) + > MIN_MMIO_TRACE_TIME) || (bus->mmio_trace[tmp_cnt].value != + (value & DHD_RING_IDX))) { + bus->mmio_trace_count++; + } else { + cnt = tmp_cnt; + } + bus->mmio_trace[cnt].timestamp = ts_cur; + bus->mmio_trace[cnt].addr = addr; + bus->mmio_trace[cnt].set = set; + bus->mmio_trace[cnt].value = value; +} + +void +dhd_dump_bus_mmio_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + int dumpsz; + int i; + + dumpsz = bus->mmio_trace_count < MAX_MMIO_TRACE_SIZE ? + bus->mmio_trace_count : MAX_MMIO_TRACE_SIZE; + if (dumpsz == 0) { + bcm_bprintf(strbuf, "\nEmpty MMIO TRACE\n"); + return; + } + bcm_bprintf(strbuf, "---- MMIO TRACE ------\n"); + bcm_bprintf(strbuf, "Decoding value field, Ex: 0xFF2C00E4, 0xFF->WR/0XDD->RD " + "0x2C->Ringid 0x00E4->RD/WR Value\n"); + bcm_bprintf(strbuf, "Timestamp ns\t\tAddr\t\tW/R\tValue\n"); + for (i = 0; i < dumpsz; i ++) { + bcm_bprintf(strbuf, SEC_USEC_FMT"\t0x%08x\t%s\t0x%08x\n", + GET_SEC_USEC(bus->mmio_trace[i].timestamp), + bus->mmio_trace[i].addr, + bus->mmio_trace[i].set ? "W" : "R", + bus->mmio_trace[i].value); + } +} +#endif /* defined(DHD_MMIO_TRACE) */ + +static void +#ifdef PCIE_INB_DW +dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h, enum dhd_bus_ds_state inbstate) +#else +dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h) +#endif /* PCIE_INB_DW */ +{ + uint32 cnt = bus->ds_trace_count % MAX_DS_TRACE_SIZE; + + bus->ds_trace[cnt].timestamp = OSL_LOCALTIME_NS(); + bus->ds_trace[cnt].d2h = d2h; + bus->ds_trace[cnt].dsval = dsval; +#ifdef PCIE_INB_DW + bus->ds_trace[cnt].inbstate = inbstate; +#endif /* PCIE_INB_DW */ + bus->ds_trace_count ++; +} + +#ifdef PCIE_INB_DW +const char * +dhd_convert_dsval(uint32 val, bool d2h) +{ + if (d2h) { + switch (val) { + case D2H_DEV_D3_ACK: + return "D2H_DEV_D3_ACK"; + case D2H_DEV_DS_ENTER_REQ: + return "D2H_DEV_DS_ENTER_REQ"; + case D2H_DEV_DS_EXIT_NOTE: + return "D2H_DEV_DS_EXIT_NOTE"; + case D2H_DEV_FWHALT: + return "D2H_DEV_FWHALT"; + case D2HMB_DS_HOST_SLEEP_EXIT_ACK: + return "D2HMB_DS_HOST_SLEEP_EXIT_ACK"; + default: + return "INVALID"; + } + } else { + switch (val) { + case H2DMB_DS_DEVICE_WAKE_DEASSERT: + return "H2DMB_DS_DEVICE_WAKE_DEASSERT"; + case H2DMB_DS_DEVICE_WAKE_ASSERT: + return "H2DMB_DS_DEVICE_WAKE_ASSERT"; + case H2D_HOST_D3_INFORM: + return "H2D_HOST_D3_INFORM"; + case H2D_HOST_DS_ACK: + return "H2D_HOST_DS_ACK"; + case H2D_HOST_DS_NAK: + return "H2D_HOST_DS_NAK"; + case H2D_HOST_CONS_INT: + return "H2D_HOST_CONS_INT"; + case H2D_FW_TRAP: + return "H2D_FW_TRAP"; + default: + return "INVALID"; + } + } +} + +const char * +dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate) +{ + switch (inbstate) { + case DW_DEVICE_DS_DEV_SLEEP: + return "DW_DEVICE_DS_DEV_SLEEP"; + break; + case DW_DEVICE_DS_DISABLED_WAIT: + return "DW_DEVICE_DS_DISABLED_WAIT"; + break; + case DW_DEVICE_DS_DEV_WAKE: + return "DW_DEVICE_DS_DEV_WAKE"; + break; + case DW_DEVICE_DS_ACTIVE: + return "DW_DEVICE_DS_ACTIVE"; + break; + case DW_DEVICE_HOST_SLEEP_WAIT: + return "DW_DEVICE_HOST_SLEEP_WAIT"; + break; + case DW_DEVICE_HOST_SLEEP: + return "DW_DEVICE_HOST_SLEEP"; + break; + case DW_DEVICE_HOST_WAKE_WAIT: + return "DW_DEVICE_HOST_WAKE_WAIT"; + break; + case DW_DEVICE_DS_D3_INFORM_WAIT: + return "DW_DEVICE_DS_D3_INFORM_WAIT"; + break; + default: + return "INVALID"; + } +} +#endif /* PCIE_INB_DW */ + +void +dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + int dumpsz; + int i; + + dumpsz = bus->ds_trace_count < MAX_DS_TRACE_SIZE ? + bus->ds_trace_count : MAX_DS_TRACE_SIZE; + if (dumpsz == 0) { + bcm_bprintf(strbuf, "\nEmpty DS TRACE\n"); + return; + } + bcm_bprintf(strbuf, "---- DS TRACE ------\n"); +#ifdef PCIE_INB_DW + bcm_bprintf(strbuf, "%s\t\t%s\t%-30s\t\t%s\n", + "Timestamp us", "Dir", "Value", "Inband-State"); + for (i = 0; i < dumpsz; i ++) { + bcm_bprintf(strbuf, "%llu\t%s\t%-30s\t\t%s\n", + bus->ds_trace[i].timestamp, + bus->ds_trace[i].d2h ? "D2H":"H2D", + dhd_convert_dsval(bus->ds_trace[i].dsval, bus->ds_trace[i].d2h), + dhd_convert_inb_state_names(bus->ds_trace[i].inbstate)); + } +#else + bcm_bprintf(strbuf, "Timestamp us\t\tDir\tValue\n"); + for (i = 0; i < dumpsz; i ++) { + bcm_bprintf(strbuf, "%llu\t%s\t%d\n", + bus->ds_trace[i].timestamp, + bus->ds_trace[i].d2h ? "D2H":"H2D", + bus->ds_trace[i].dsval); + } +#endif /* PCIE_INB_DW */ + bcm_bprintf(strbuf, "--------------------------\n"); +} + +void +dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + trap_t *tr = &bus->dhd->last_trap_info; + bcm_bprintf(strbuf, + "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + " lp 0x%x, rpc 0x%x" + "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, " + "r10 0x%x, r11 0x%x, r12 0x%x\n\n", + ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), + ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), + ltoh32(bus->pcie_sh->trap_addr), + ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), + ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7), + ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10), + ltoh32(tr->r11), ltoh32(tr->r12)); +} + +int +dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) +{ + int bcmerror = 0; + struct dhd_bus *bus = dhdp->bus; + + if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + + return bcmerror; +} + +int +dhd_get_idletime(dhd_pub_t *dhd) +{ + return dhd->bus->idletime; +} + +bool +dhd_get_rpm_state(dhd_pub_t *dhd) +{ + return dhd->bus->rpm_enabled; +} + +void +dhd_set_rpm_state(dhd_pub_t *dhd, bool state) +{ + DHD_RPM(("%s: %d\n", __FUNCTION__, state)); + dhd->bus->rpm_enabled = state; +} + +static INLINE void +dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read) +{ + OSL_DELAY(1); + if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) { + DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr)); + } else { + DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read)); + } + return; +} + +#ifdef DHD_SSSR_DUMP +static int +dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, + uint addr_reg, uint data_reg) +{ + uint addr; + uint val = 0; + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + if (!buf) { + DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!fifo_size) { + DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Set the base address offset to 0 */ + addr = addr_reg; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + addr = data_reg; + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++) { + if (serialized_backplane_access(dhd->bus, addr, + sizeof(uint), &val, TRUE) != BCME_OK) { + DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__)); + return BCME_ERROR; + } + buf[i] = val; + OSL_DELAY(1); + } + return BCME_OK; +} + +static int +dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, + uint addr_reg) +{ + uint addr; + uint val = 0; + int i; + si_t *sih = dhd->bus->sih; + bool vasip_enab, dig_mem_check; + uint32 ioctrl_addr = 0; + + DHD_ERROR(("%s addr_reg=0x%x size=0x%x\n", __FUNCTION__, addr_reg, fifo_size)); + + if (!buf) { + DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!fifo_size) { + DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); + return BCME_ERROR; + } + + vasip_enab = FALSE; + dig_mem_check = FALSE; + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + if ((dhd->sssr_reg_info->rev2.length > OFFSETOF(sssr_reg_info_v2_t, + dig_mem_info)) && dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) { + dig_mem_check = TRUE; + } + break; + case SSSR_REG_INFO_VER_1 : + if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) { + vasip_enab = TRUE; + } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t, + dig_mem_info)) && dhd->sssr_reg_info->rev1. + dig_mem_info.dig_sr_size) { + dig_mem_check = TRUE; + } + ioctrl_addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl; + break; + case SSSR_REG_INFO_VER_0 : + if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) { + vasip_enab = TRUE; + } + ioctrl_addr = dhd->sssr_reg_info->rev0.vasip_regs.wrapper_regs.ioctrl; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + if (addr_reg) { + DHD_ERROR(("dig_mem_check=%d vasip_enab=%d\n", dig_mem_check, vasip_enab)); + if (!vasip_enab && dig_mem_check) { + int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, + fifo_size); + if (err != BCME_OK) { + DHD_ERROR(("%s: Error reading dig dump from dongle !\n", + __FUNCTION__)); + } + } else { + /* Check if vasip clk is disabled, if yes enable it */ + addr = ioctrl_addr; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!val) { + val = 1; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + + addr = addr_reg; + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++, addr += 4) { + if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), + &val, TRUE) != BCME_OK) { + DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__, + addr)); + return BCME_ERROR; + } + buf[i] = val; + OSL_DELAY(1); + } + } + } else { + uint cur_coreid; + uint chipc_corerev; + chipcregs_t *chipcregs; + + /* Save the current core */ + cur_coreid = si_coreid(sih); + + /* Switch to ChipC */ + chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if (!chipcregs) { + DHD_ERROR(("%s: si_setcore returns NULL for core id %u \n", + __FUNCTION__, CC_CORE_ID)); + return BCME_ERROR; + } + + chipc_corerev = si_corerev(sih); + + if ((chipc_corerev == 64) || (chipc_corerev == 65)) { + W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0); + + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++) { + buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data); + OSL_DELAY(1); + } + } + + /* Switch back to the original core */ + si_setcore(sih, cur_coreid, 0); + } + + return BCME_OK; +} + +#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) +void +dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, + uint8 *ext_trap_data, void *event_decode_data) +{ + hnd_ext_trap_hdr_t *hdr = NULL; + bcm_tlv_t *tlv; + eventlog_trapdata_info_t *etd_evtlog = NULL; + eventlog_trap_buf_info_t *evtlog_buf_arr = NULL; + uint arr_size = 0; + int i = 0; + int err = 0; + uint32 seqnum = 0; + + if (!ext_trap_data || !event_decode_data || !dhd) + return; + + if (!dhd->concise_dbg_buf) + return; + + /* First word is original trap_data, skip */ + ext_trap_data += sizeof(uint32); + + hdr = (hnd_ext_trap_hdr_t *)ext_trap_data; + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA); + if (tlv) { + uint32 baseaddr = 0; + uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4; + + etd_evtlog = (eventlog_trapdata_info_t *)tlv->data; + DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; " + "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__, + (etd_evtlog->num_elements), + ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr))); + if (!etd_evtlog->num_elements || + etd_evtlog->num_elements > MAX_EVENTLOG_BUFFERS) { + DHD_ERROR(("%s: ETD has bad 'num_elements' !\n", __FUNCTION__)); + return; + } + if (!etd_evtlog->log_arr_addr) { + DHD_ERROR(("%s: ETD has bad 'log_arr_addr' !\n", __FUNCTION__)); + return; + } + + arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements); + evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size); + if (!evtlog_buf_arr) { + DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); + return; + } + + /* boundary check */ + baseaddr = etd_evtlog->log_arr_addr; + if ((baseaddr < dhd->bus->dongle_ram_base) || + ((baseaddr + arr_size) > endaddr)) { + DHD_ERROR(("%s: Error reading invalid address\n", + __FUNCTION__)); + goto err; + } + + /* read the eventlog_trap_buf_info_t array from dongle memory */ + err = dhdpcie_bus_membytes(dhd->bus, FALSE, + (ulong)(etd_evtlog->log_arr_addr), + (uint8 *)evtlog_buf_arr, arr_size); + if (err != BCME_OK) { + DHD_ERROR(("%s: Error reading event log array from dongle !\n", + __FUNCTION__)); + goto err; + } + /* ntoh is required only for seq_num, because in the original + * case of event logs from info ring, it is sent from dongle in that way + * so for ETD also dongle follows same convention + */ + seqnum = ntoh32(etd_evtlog->seq_num); + memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN); + for (i = 0; i < (etd_evtlog->num_elements); ++i) { + /* boundary check */ + baseaddr = evtlog_buf_arr[i].buf_addr; + if ((baseaddr < dhd->bus->dongle_ram_base) || + ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) { + DHD_ERROR(("%s: Error reading invalid address\n", + __FUNCTION__)); + goto err; + } + /* read each individual event log buf from dongle memory */ + err = dhdpcie_bus_membytes(dhd->bus, FALSE, + ((ulong)evtlog_buf_arr[i].buf_addr), + dhd->concise_dbg_buf, (evtlog_buf_arr[i].len)); + if (err != BCME_OK) { + DHD_ERROR(("%s: Error reading event log buffer from dongle !\n", + __FUNCTION__)); + goto err; + } + dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf, + event_decode_data, (evtlog_buf_arr[i].len), + FALSE, hton32(seqnum)); + ++seqnum; + } +err: + MFREE(dhd->osh, evtlog_buf_arr, arr_size); + } else { + DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__)); + } +} +#endif /* BCMPCIE && DHD_LOG_DUMP */ + +static uint32 +dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val) +{ + uint addr; + uint val = 0; + uint powerctrl_mask; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl; + powerctrl_mask = dhd->sssr_reg_info->rev2. + chipcommon_regs.base_regs.powerctrl_mask; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl; + powerctrl_mask = dhd->sssr_reg_info->rev1. + chipcommon_regs.base_regs.powerctrl_mask; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* conditionally clear bits [11:8] of PowerCtrl */ + dhd_sbreg_op(dhd, addr, &val, TRUE); + + if (!(val & powerctrl_mask)) { + dhd_sbreg_op(dhd, addr, ®_val, FALSE); + } + return BCME_OK; +} + +static uint32 +dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0, reg_val = 0; + uint powerctrl_mask; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl; + powerctrl_mask = dhd->sssr_reg_info->rev2. + chipcommon_regs.base_regs.powerctrl_mask; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl; + powerctrl_mask = dhd->sssr_reg_info->rev1. + chipcommon_regs.base_regs.powerctrl_mask; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* conditionally clear bits [11:8] of PowerCtrl */ + dhd_sbreg_op(dhd, addr, ®_val, TRUE); + if (reg_val & powerctrl_mask) { + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return reg_val; +} + +static int +dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd) +{ + uint addr; + uint val; + uint32 cc_intmask, pmuintmask0, pmuintmask1, resreqtimer, macresreqtimer, + macresreqtimer1, vasip_sr_size = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + cc_intmask = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.intmask; + pmuintmask0 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask0; + pmuintmask1 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask1; + resreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.resreqtimer; + macresreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.macresreqtimer; + macresreqtimer1 = dhd->sssr_reg_info->rev2. + pmu_regs.base_regs.macresreqtimer1; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + cc_intmask = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.intmask; + pmuintmask0 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask0; + pmuintmask1 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask1; + resreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.resreqtimer; + macresreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.macresreqtimer; + macresreqtimer1 = dhd->sssr_reg_info->rev1. + pmu_regs.base_regs.macresreqtimer1; + vasip_sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* clear chipcommon intmask */ + val = 0x0; + dhd_sbreg_op(dhd, cc_intmask, &val, FALSE); + + /* clear PMUIntMask0 */ + val = 0x0; + dhd_sbreg_op(dhd, pmuintmask0, &val, FALSE); + + /* clear PMUIntMask1 */ + val = 0x0; + dhd_sbreg_op(dhd, pmuintmask1, &val, FALSE); + + /* clear res_req_timer */ + val = 0x0; + dhd_sbreg_op(dhd, resreqtimer, &val, FALSE); + + /* clear macresreqtimer */ + val = 0x0; + dhd_sbreg_op(dhd, macresreqtimer, &val, FALSE); + + /* clear macresreqtimer1 */ + val = 0x0; + dhd_sbreg_op(dhd, macresreqtimer1, &val, FALSE); + + /* clear VasipClkEn */ + if (vasip_sr_size) { + addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + + return BCME_OK; +} + +static void +dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd) +{ +#define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1) +#define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4) + uint trap_data_mask[MAX_NUM_D11CORES] = + {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK}; + int i; + /* Apply only for 4375 chip */ + if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) { + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i] && + (dhd->dongle_trap_data & trap_data_mask[i])) { + dhd->sssr_d11_outofreset[i] = TRUE; + } else { + dhd->sssr_d11_outofreset[i] = FALSE; + } + DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with " + "trap_data:0x%x-0x%x\n", + __FUNCTION__, i, dhd->sssr_d11_outofreset[i], + dhd->dongle_trap_data, trap_data_mask[i])); + } + } +} + +static int +dhdpcie_d11_check_outofreset(dhd_pub_t *dhd) +{ + int i; + uint addr = 0; + uint val = 0; + uint8 num_d11cores; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + num_d11cores = dhd_d11_slices_num_get(dhd); + + for (i = 0; i < num_d11cores; i++) { + /* Check if bit 0 of resetctrl is cleared */ + /* SSSR register information structure v0 and + * v1 shares most except dig_mem + */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + addr = dhd->sssr_reg_info->rev2. + mac_regs[i].wrapper_regs.resetctrl; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + addr = dhd->sssr_reg_info->rev1. + mac_regs[i].wrapper_regs.resetctrl; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + if (!addr) { + DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n", + __FUNCTION__, i)); + continue; + } + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & 1)) { + dhd->sssr_d11_outofreset[i] = TRUE; + } else { + dhd->sssr_d11_outofreset[i] = FALSE; + } + DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n", + __FUNCTION__, i, dhd->sssr_d11_outofreset[i])); + } + /* XXX Temporary WAR for 4375 to handle AXI errors on bad core + * to not collect SSSR dump for the core whose bit is not set in trap_data. + * It will be reverted once AXI errors are fixed + */ + dhdpcie_update_d11_status_from_trapdata(dhd); + + return BCME_OK; +} + +static int +dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd) +{ + int i; + uint val = 0; + uint8 num_d11cores; + uint32 clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + num_d11cores = dhd_d11_slices_num_get(dhd); + + for (i = 0; i < num_d11cores; i++) { + if (dhd->sssr_d11_outofreset[i]) { + /* clear request clk only if itopoobb/extrsrcreqs is non zero */ + /* SSSR register information structure v0 and + * v1 shares most except dig_mem + */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + clockrequeststatus = dhd->sssr_reg_info->rev2. + mac_regs[i].wrapper_regs.extrsrcreq; + clockcontrolstatus = dhd->sssr_reg_info->rev2. + mac_regs[i].base_regs.clockcontrolstatus; + clockcontrolstatus_val = dhd->sssr_reg_info->rev2. + mac_regs[i].base_regs.clockcontrolstatus_val; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + clockrequeststatus = dhd->sssr_reg_info->rev1. + mac_regs[i].wrapper_regs.itopoobb; + clockcontrolstatus = dhd->sssr_reg_info->rev1. + mac_regs[i].base_regs.clockcontrolstatus; + clockcontrolstatus_val = dhd->sssr_reg_info->rev1. + mac_regs[i].base_regs.clockcontrolstatus_val; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE); + if (val != 0) { + /* clear clockcontrolstatus */ + dhd_sbreg_op(dhd, clockcontrolstatus, + &clockcontrolstatus_val, FALSE); + } + } + } + return BCME_OK; +} + +static int +dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd) +{ + uint val = 0; + uint cfgval = 0; + uint32 resetctrl, clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + resetctrl = dhd->sssr_reg_info->rev2. + arm_regs.wrapper_regs.resetctrl; + clockrequeststatus = dhd->sssr_reg_info->rev2. + arm_regs.wrapper_regs.extrsrcreq; + clockcontrolstatus = dhd->sssr_reg_info->rev2. + arm_regs.base_regs.clockcontrolstatus; + clockcontrolstatus_val = dhd->sssr_reg_info->rev2. + arm_regs.base_regs.clockcontrolstatus_val; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + resetctrl = dhd->sssr_reg_info->rev1. + arm_regs.wrapper_regs.resetctrl; + clockrequeststatus = dhd->sssr_reg_info->rev1. + arm_regs.wrapper_regs.itopoobb; + clockcontrolstatus = dhd->sssr_reg_info->rev1. + arm_regs.base_regs.clockcontrolstatus; + clockcontrolstatus_val = dhd->sssr_reg_info->rev1. + arm_regs.base_regs.clockcontrolstatus_val; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* Check if bit 0 of resetctrl is cleared */ + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + if (!(val & 1)) { + /* clear request clk only if itopoobb/extrsrcreqs is non zero */ + dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE); + if (val != 0) { + /* clear clockcontrolstatus */ + dhd_sbreg_op(dhd, clockcontrolstatus, &clockcontrolstatus_val, FALSE); + } + + if (MULTIBP_ENAB(dhd->bus->sih)) { + /* Clear coherent bits for CA7 because CPU is halted */ + if (dhd->bus->coreid == ARMCA7_CORE_ID) { + cfgval = dhdpcie_bus_cfg_read_dword(dhd->bus, + PCIE_CFG_SUBSYSTEM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, + (cfgval & ~PCIE_BARCOHERENTACCEN_MASK)); + } + + /* Just halt ARM but do not reset the core */ + resetctrl &= ~(SI_CORE_SIZE - 1); + resetctrl += OFFSETOF(aidmp_t, ioctrl); + + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + val |= SICF_CPUHALT; + dhd_sbreg_op(dhd, resetctrl, &val, FALSE); + } + } + + return BCME_OK; +} + +static int +dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd) +{ + uint val = 0; + uint32 resetctrl; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + resetctrl = dhd->sssr_reg_info->rev2. + arm_regs.wrapper_regs.resetctrl; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + resetctrl = dhd->sssr_reg_info->rev1. + arm_regs.wrapper_regs.resetctrl; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* Check if bit 0 of resetctrl is cleared */ + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + if (!(val & 1)) { + if (MULTIBP_ENAB(dhd->bus->sih) && (dhd->bus->coreid != ARMCA7_CORE_ID)) { + /* Take ARM out of halt but do not reset core */ + resetctrl &= ~(SI_CORE_SIZE - 1); + resetctrl += OFFSETOF(aidmp_t, ioctrl); + + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + val &= ~SICF_CPUHALT; + dhd_sbreg_op(dhd, resetctrl, &val, FALSE); + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + } + } + + return BCME_OK; +} + +static int +dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd) +{ + uint val = 0; + uint32 clockrequeststatus, clockcontrolstatus_addr, clockcontrolstatus_val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + clockrequeststatus = dhd->sssr_reg_info->rev2. + pcie_regs.wrapper_regs.extrsrcreq; + clockcontrolstatus_addr = dhd->sssr_reg_info->rev2. + pcie_regs.base_regs.clockcontrolstatus; + clockcontrolstatus_val = dhd->sssr_reg_info->rev2. + pcie_regs.base_regs.clockcontrolstatus_val; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + clockrequeststatus = dhd->sssr_reg_info->rev1. + pcie_regs.wrapper_regs.itopoobb; + clockcontrolstatus_addr = dhd->sssr_reg_info->rev1. + pcie_regs.base_regs.clockcontrolstatus; + clockcontrolstatus_val = dhd->sssr_reg_info->rev1. + pcie_regs.base_regs.clockcontrolstatus_val; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + /* clear request clk only if itopoobb/extrsrcreqs is non zero */ + dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE); + if (val) { + /* clear clockcontrolstatus */ + dhd_sbreg_op(dhd, clockcontrolstatus_addr, &clockcontrolstatus_val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* SSSR register information structure v0 and v1 shares most except dig_mem */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + addr = dhd->sssr_reg_info->rev2.pcie_regs.base_regs.ltrstate; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + addr = dhd->sssr_reg_info->rev1.pcie_regs.base_regs.ltrstate; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + + val = LTR_ACTIVE; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = LTR_SLEEP; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + return BCME_OK; +} + +static int +dhdpcie_clear_clk_req(dhd_pub_t *dhd) +{ + DHD_ERROR(("%s\n", __FUNCTION__)); + + dhdpcie_arm_clear_clk_req(dhd); + + dhdpcie_d11_clear_clk_req(dhd); + + dhdpcie_pcie_clear_clk_req(dhd); + + return BCME_OK; +} + +static int +dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd) +{ + int i; + uint val = 0; + uint8 num_d11cores; + uint32 resetctrl_addr, ioctrl_addr, ioctrl_resetseq_val0, ioctrl_resetseq_val1, + ioctrl_resetseq_val2, ioctrl_resetseq_val3, ioctrl_resetseq_val4; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + num_d11cores = dhd_d11_slices_num_get(dhd); + + for (i = 0; i < num_d11cores; i++) { + if (dhd->sssr_d11_outofreset[i]) { + /* SSSR register information structure v0 and v1 shares + * most except dig_mem + */ + switch (dhd->sssr_reg_info->rev2.version) { + case SSSR_REG_INFO_VER_3 : + /* intentional fall through */ + case SSSR_REG_INFO_VER_2 : + resetctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i]. + wrapper_regs.resetctrl; + ioctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i]. + wrapper_regs.ioctrl; + ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev2. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0]; + ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev2. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1]; + ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev2. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2]; + ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev2. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3]; + ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev2. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4]; + break; + case SSSR_REG_INFO_VER_1 : + case SSSR_REG_INFO_VER_0 : + resetctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i]. + wrapper_regs.resetctrl; + ioctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i]. + wrapper_regs.ioctrl; + ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev1. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0]; + ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev1. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1]; + ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev1. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2]; + ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev1. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3]; + ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev1. + mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4]; + break; + default : + DHD_ERROR(("invalid sssr_reg_ver")); + return BCME_UNSUPPORTED; + } + /* disable core by setting bit 0 */ + val = 1; + dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE); + OSL_DELAY(6000); + + dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val0, FALSE); + + dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val1, FALSE); + + /* enable core by clearing bit 0 */ + val = 0; + dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE); + + dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val2, FALSE); + + dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val3, FALSE); + + dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val4, FALSE); + } + } + return BCME_OK; +} + +#ifdef DHD_SSSR_DUMP_BEFORE_SR +static int +dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd) +{ + int i; + uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr; + uint8 num_d11cores; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + num_d11cores = dhd_d11_slices_num_get(dhd); + + for (i = 0; i < num_d11cores; i++) { + if (dhd->sssr_d11_outofreset[i]) { + sr_size = dhd_sssr_mac_buf_size(dhd, i); + xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i); + xmtdata = dhd_sssr_mac_xmtdata(dhd, i); + dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i], + sr_size, xmtaddress, xmtdata); + } + } + + dig_buf_size = dhd_sssr_dig_buf_size(dhd); + dig_buf_addr = dhd_sssr_dig_buf_addr(dhd); + if (dig_buf_size) { + dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before, + dig_buf_size, dig_buf_addr); + } + + return BCME_OK; +} +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + +static int +dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd) +{ + int i; + uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr; + uint8 num_d11cores; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + num_d11cores = dhd_d11_slices_num_get(dhd); + + for (i = 0; i < num_d11cores; i++) { + if (dhd->sssr_d11_outofreset[i]) { + sr_size = dhd_sssr_mac_buf_size(dhd, i); + xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i); + xmtdata = dhd_sssr_mac_xmtdata(dhd, i); + dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i], + sr_size, xmtaddress, xmtdata); + } + } + + dig_buf_size = dhd_sssr_dig_buf_size(dhd); + dig_buf_addr = dhd_sssr_dig_buf_addr(dhd); + + if (dig_buf_size) { + dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, dig_buf_size, dig_buf_addr); + } + + return BCME_OK; +} + +int +dhdpcie_sssr_dump(dhd_pub_t *dhd) +{ + uint32 powerctrl_val; + + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) " + "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(chipcregs_t, powerctl), 0, 0), + si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), + PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), + PMU_REG(dhd->bus->sih, res_state, 0, 0))); + + dhdpcie_d11_check_outofreset(dhd); + +#ifdef DHD_SSSR_DUMP_BEFORE_SR + DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* DHD_SSSR_DUMP_BEFORE_SR */ + + dhdpcie_clear_intmask_and_timer(dhd); + dhdpcie_clear_clk_req(dhd); + powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd); + dhdpcie_pcie_send_ltrsleep(dhd); + + if (MULTIBP_ENAB(dhd->bus->sih)) { + dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE); + } + + /* Wait for some time before Restore */ + OSL_DELAY(6000); + + DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) " + "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(chipcregs_t, powerctl), 0, 0), + si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), + PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), + PMU_REG(dhd->bus->sih, res_state, 0, 0))); + + if (MULTIBP_ENAB(dhd->bus->sih)) { + dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE); + /* Add delay for WL domain to power up */ + OSL_DELAY(15000); + + DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) " + "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(chipcregs_t, powerctl), 0, 0), + si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), + PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), + PMU_REG(dhd->bus->sih, res_state, 0, 0))); + } + + dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val); + dhdpcie_arm_resume_clk_req(dhd); + dhdpcie_bring_d11_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + dhd->sssr_dump_collected = TRUE; + dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR); + + return BCME_OK; +} + +#define PCIE_CFG_DSTATE_MASK 0x11u + +static int +dhdpcie_fis_trigger(dhd_pub_t *dhd) +{ + uint32 fis_ctrl_status; + uint32 cfg_status_cmd; + uint32 cfg_pmcsr; + + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + +#ifdef DHD_PCIE_RUNTIMEPM + /* Bring back to D0 */ + dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0)); + /* Stop RPM timer so that even INB DW DEASSERT should not happen */ + DHD_STOP_RPM_TIMER(dhd); +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* Set fis_triggered flag to ignore link down callback from RC */ + dhd->fis_triggered = TRUE; + + /* Set FIS PwrswForceOnAll */ + PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_FIS_FORCEON_ALL_MASK, PMU_FIS_FORCEON_ALL_MASK); + + fis_ctrl_status = PMU_REG(dhd->bus->sih, fis_ctrl_status, 0, 0); + + DHD_ERROR(("%s: fis_ctrl_status=0x%x\n", __FUNCTION__, fis_ctrl_status)); + + cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32)); + cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32)); + DHD_ERROR(("before save: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n", + PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr)); + + DHD_PCIE_CONFIG_SAVE(dhd->bus); + + /* Trigger FIS */ + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK); + OSL_DELAY(100 * 1000); + +#ifdef OEM_ANDROID + /* + * For android built-in platforms need to perform REG ON/OFF + * to restore pcie link. + * dhd_download_fw_on_driverload will be FALSE for built-in. + */ + if (!dhd_download_fw_on_driverload) { + DHD_ERROR(("%s: Toggle REG_ON and restore config space\n", __FUNCTION__)); + dhdpcie_bus_stop_host_dev(dhd->bus); + dhd_wifi_platform_set_power(dhd, FALSE); + dhd_wifi_platform_set_power(dhd, TRUE); + dhdpcie_bus_start_host_dev(dhd->bus); + /* Restore inited pcie cfg from pci_load_saved_state */ + dhdpcie_bus_enable_device(dhd->bus); + } +#endif /* OEM_ANDROID */ + + cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32)); + cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32)); + DHD_ERROR(("after regon-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n", + PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr)); + + /* To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore */ + DHD_PCIE_CONFIG_RESTORE(dhd->bus); + + cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32)); + cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32)); + DHD_ERROR(("after normal-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n", + PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr)); + + /* + * To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore + * in both MSM and LSI RCs + */ + if ((cfg_pmcsr & PCIE_CFG_DSTATE_MASK) != 0) { + int ret = dhdpcie_set_master_and_d0_pwrstate(dhd->bus); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Setting D0 failed, ABORT FIS collection\n", __FUNCTION__)); + return ret; + } + cfg_status_cmd = + dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32)); + cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32)); + DHD_ERROR(("after force-d0: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n", + PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr)); + } + + /* Clear fis_triggered as REG OFF/ON recovered link */ + dhd->fis_triggered = FALSE; + + return BCME_OK; +} + +int +dhd_bus_fis_trigger(dhd_pub_t *dhd) +{ + return dhdpcie_fis_trigger(dhd); +} + +static int +dhdpcie_reset_hwa(dhd_pub_t *dhd) +{ + int ret; + sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info; + sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3; + + /* HWA wrapper registers */ + uint32 ioctrl, resetctrl; + /* HWA base registers */ + uint32 clkenable, clkgatingenable, clkext, clkctlstatus; + uint32 hwa_resetseq_val[SSSR_HWA_RESET_SEQ_STEPS]; + int i = 0; + + if (sssr_reg_info->version < SSSR_REG_INFO_VER_3) { + DHD_ERROR(("%s: not supported for version:%d\n", + __FUNCTION__, sssr_reg_info->version)); + return BCME_UNSUPPORTED; + } + + if (sssr_reg_info->hwa_regs.base_regs.clkenable == 0) { + DHD_ERROR(("%s: hwa regs are not set\n", __FUNCTION__)); + return BCME_UNSUPPORTED; + } + + DHD_ERROR(("%s: version:%d\n", __FUNCTION__, sssr_reg_info->version)); + + ioctrl = sssr_reg_info->hwa_regs.wrapper_regs.ioctrl; + resetctrl = sssr_reg_info->hwa_regs.wrapper_regs.resetctrl; + + clkenable = sssr_reg_info->hwa_regs.base_regs.clkenable; + clkgatingenable = sssr_reg_info->hwa_regs.base_regs.clkgatingenable; + clkext = sssr_reg_info->hwa_regs.base_regs.clkext; + clkctlstatus = sssr_reg_info->hwa_regs.base_regs.clkctlstatus; + + ret = memcpy_s(hwa_resetseq_val, sizeof(hwa_resetseq_val), + sssr_reg_info->hwa_regs.hwa_resetseq_val, + sizeof(sssr_reg_info->hwa_regs.hwa_resetseq_val)); + if (ret) { + DHD_ERROR(("%s: hwa_resetseq_val memcpy_s failed: %d\n", + __FUNCTION__, ret)); + return ret; + } + + dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE); + dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE); + dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE); + dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE); + + dhd_sbreg_op(dhd, clkenable, &hwa_resetseq_val[i++], FALSE); + dhd_sbreg_op(dhd, clkgatingenable, &hwa_resetseq_val[i++], FALSE); + dhd_sbreg_op(dhd, clkext, &hwa_resetseq_val[i++], FALSE); + dhd_sbreg_op(dhd, clkctlstatus, &hwa_resetseq_val[i++], FALSE); + + return BCME_OK; +} + +static int +dhdpcie_fis_dump(dhd_pub_t *dhd) +{ + int i; + uint8 num_d11cores; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* bring up all pmu resources */ + PMU_REG(dhd->bus->sih, min_res_mask, ~0, + PMU_REG(dhd->bus->sih, max_res_mask, 0, 0)); + OSL_DELAY(10 * 1000); + + num_d11cores = dhd_d11_slices_num_get(dhd); + + for (i = 0; i < num_d11cores; i++) { + dhd->sssr_d11_outofreset[i] = TRUE; + } + + dhdpcie_bring_d11_outofreset(dhd); + OSL_DELAY(6000); + + /* clear FIS Done */ + PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK); + + if (dhdpcie_reset_hwa(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_reset_hwa failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_d11_check_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + dhd->sssr_dump_collected = TRUE; + dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS); + + return BCME_OK; +} + +int +dhd_bus_fis_dump(dhd_pub_t *dhd) +{ + return dhdpcie_fis_dump(dhd); +} +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_SDTC_ETB_DUMP +int +dhd_bus_get_etb_info(dhd_pub_t *dhd, uint32 etbinfo_addr, etb_info_t *etb_info) +{ + + int ret = 0; + + if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, etbinfo_addr, + (unsigned char *)etb_info, sizeof(*etb_info)))) { + DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret)); + return BCME_ERROR; + } + + return BCME_OK; +} + +int +dhd_bus_get_sdtc_etb(dhd_pub_t *dhd, uint8 *sdtc_etb_mempool, uint addr, uint read_bytes) +{ + int ret = 0; + + if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, addr, + (unsigned char *)sdtc_etb_mempool, read_bytes))) { + DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret)); + return BCME_ERROR; + } + return BCME_OK; +} +#endif /* DHD_SDTC_ETB_DUMP */ + +#ifdef BTLOG +void +BCMFASTPATH(dhd_bus_rx_bt_log)(struct dhd_bus *bus, void* pkt) +{ + dhd_rx_bt_log(bus->dhd, pkt); +} +#endif /* BTLOG */ + +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_bus_get_wakecount(dhd_pub_t *dhd) +{ + return &dhd->bus->wake_counts; +} +int +dhd_bus_get_bus_wake(dhd_pub_t *dhd) +{ + return bcmpcie_set_get_wake(dhd->bus, 0); +} +#endif /* DHD_WAKE_STATUS */ + +/* Writes random number(s) to the TCM. FW upon initialization reads this register + * to fetch the random number, and uses it to randomize heap address space layout. + */ +static int +dhdpcie_wrt_rnd(struct dhd_bus *bus) +{ + bcm_rand_metadata_t rnd_data; + uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES]; + uint32 count = BCM_ENTROPY_HOST_NBYTES; + int ret = 0; + uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) - + ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data)); + + memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES); + rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE); + rnd_data.count = htol32(count); + /* write the metadata about random number */ + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data)); + /* scale back by number of random number counts */ + addr -= count; + + bus->ramtop_addr = addr; + +#ifdef DHD_RND_DEBUG + bus->dhd->rnd_buf = NULL; + /* get random contents from file */ + ret = dhd_get_rnd_info(bus->dhd); + if (bus->dhd->rnd_buf) { + /* write file contents to TCM */ + DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__)); + dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len); + + /* Dump random content to out file */ + dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len); + + /* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */ + MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len); + bus->dhd->rnd_buf = NULL; + return BCME_OK; + } +#endif /* DHD_RND_DEBUG */ + + /* Now write the random number(s) */ + ret = dhd_get_random_bytes(rand_buf, count); + if (ret != BCME_OK) { + return ret; + } + dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count); + +#ifdef DHD_RND_DEBUG + /* Dump random content to out file */ + dhd_dump_rnd_info(bus->dhd, rand_buf, count); +#endif /* DHD_RND_DEBUG */ + + bus->next_tlv = addr; + + return BCME_OK; +} + +#ifdef D2H_MINIDUMP +bool +dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp) +{ + return dhdp->bus->d2h_minidump; +} +#endif /* D2H_MINIDUMP */ + +void +dhd_pcie_intr_count_dump(dhd_pub_t *dhd) +{ + struct dhd_bus *bus = dhd->bus; + uint64 current_time; + + DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n")); + DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n", + bus->resume_intr_enable_count, bus->dpc_intr_enable_count)); + DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n", + bus->isr_intr_disable_count, bus->suspend_intr_disable_count)); +#ifdef BCMPCIE_OOB_HOST_WAKE + DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n", + bus->oob_intr_count, bus->oob_intr_enable_count, + bus->oob_intr_disable_count)); + DHD_ERROR(("oob_irq_num=%d last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT"\n", + dhdpcie_get_oob_irq_num(bus), + GET_SEC_USEC(bus->last_oob_irq_isr_time), + GET_SEC_USEC(bus->last_oob_irq_thr_time))); + DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT + " last_oob_irq_disable_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_oob_irq_enable_time), + GET_SEC_USEC(bus->last_oob_irq_disable_time))); + DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n", + dhdpcie_get_oob_irq_status(bus), + dhdpcie_get_oob_irq_level())); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n", + bus->dpc_return_busdown_count, bus->non_ours_irq_count)); + + current_time = OSL_LOCALTIME_NS(); + DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(current_time))); + DHD_ERROR(("isr_entry_time="SEC_USEC_FMT + " isr_exit_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->isr_entry_time), + GET_SEC_USEC(bus->isr_exit_time))); + DHD_ERROR(("isr_sched_dpc_time="SEC_USEC_FMT + " rpm_sched_dpc_time="SEC_USEC_FMT + " last_non_ours_irq_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->isr_sched_dpc_time), + GET_SEC_USEC(bus->rpm_sched_dpc_time), + GET_SEC_USEC(bus->last_non_ours_irq_time))); + DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT + " last_process_ctrlbuf_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->dpc_entry_time), + GET_SEC_USEC(bus->last_process_ctrlbuf_time))); + DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT + " last_process_txcpl_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_process_flowring_time), + GET_SEC_USEC(bus->last_process_txcpl_time))); + DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT + " last_process_infocpl_time="SEC_USEC_FMT + " last_process_edl_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_process_rxcpl_time), + GET_SEC_USEC(bus->last_process_infocpl_time), + GET_SEC_USEC(bus->last_process_edl_time))); + DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT + " resched_dpc_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->dpc_exit_time), + GET_SEC_USEC(bus->resched_dpc_time))); + DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_d3_inform_time))); + + DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT + " last_suspend_end_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_suspend_start_time), + GET_SEC_USEC(bus->last_suspend_end_time))); + DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT + " last_resume_end_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_resume_start_time), + GET_SEC_USEC(bus->last_resume_end_time))); + +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) + DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT + " logtrace_thread_sem_down_time="SEC_USEC_FMT + "\nlogtrace_thread_flush_time="SEC_USEC_FMT + " logtrace_thread_unexpected_break_time="SEC_USEC_FMT + "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time))); +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +void +dhd_bus_intr_count_dump(dhd_pub_t *dhd) +{ + dhd_pcie_intr_count_dump(dhd); +} + +int +dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd) +{ + uint32 save_idx, val; + si_t *sih = dhd->bus->sih; + uint32 oob_base, oob_base1; + uint32 wrapper_dump_list[] = { + AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74, + AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74, + AI_RESETSTATUS, AI_RESETCTRL, + AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD, + AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT + }; + uint32 i; + hndoobr_reg_t *reg; + cr4regs_t *cr4regs; + ca7regs_t *ca7regs; + + save_idx = si_coreidx(sih); + + DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__)); + + if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) { + for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) { + val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0); + DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val)); + } + } + + if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) { + DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__)); + for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) { + val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0); + DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val)); + } + DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities)); + DHD_ERROR(("reg:0x%x val:0x%x\n", + (uint)OFFSETOF(cr4regs_t, corecapabilities), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val)); + } + /* XXX: Currently dumping CA7 registers causing CTO, temporarily disabling it */ + BCM_REFERENCE(ca7regs); +#ifdef NOT_YET + if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) { + DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities)); + DHD_ERROR(("reg:0x%x val:0x%x\n", + (uint)OFFSETOF(ca7regs_t, corecapabilities), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val)); + } +#endif /* NOT_YET */ + + DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__)); + + oob_base = si_oobr_baseaddr(sih, FALSE); + oob_base1 = si_oobr_baseaddr(sih, TRUE); + if (oob_base) { + dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE); + dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE); + dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE); + dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE); + } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) { + val = R_REG(dhd->osh, ®->intstatus[0]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + val = R_REG(dhd->osh, ®->intstatus[1]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + val = R_REG(dhd->osh, ®->intstatus[2]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + val = R_REG(dhd->osh, ®->intstatus[3]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + } + + if (oob_base1) { + DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__)); + + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE); + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE); + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE); + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE); + } + + si_setcoreidx(dhd->bus->sih, save_idx); + + return 0; +} + +static void +dhdpcie_hw_war_regdump(dhd_bus_t *bus) +{ + uint32 save_idx, val; + volatile uint32 *reg; + + save_idx = si_coreidx(bus->sih); + if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) { + val = R_REG(bus->osh, reg + REG_WORK_AROUND); + DHD_ERROR(("CC HW_WAR :0x%x\n", val)); + } + + if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) { + val = R_REG(bus->osh, reg + REG_WORK_AROUND); + DHD_ERROR(("ARM HW_WAR:0x%x\n", val)); + } + + if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) { + val = R_REG(bus->osh, reg + REG_WORK_AROUND); + DHD_ERROR(("PCIE HW_WAR :0x%x\n", val)); + } + si_setcoreidx(bus->sih, save_idx); + + val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0); + DHD_ERROR(("MINRESMASK :0x%x\n", val)); +} + +int +dhd_pcie_dma_info_dump(dhd_pub_t *dhd) +{ + if (dhd->bus->is_linkdown) { + DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers " + "due to PCIe link down ------- \r\n")); + return 0; + } + + DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n")); + + //HostToDev + DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0))); + DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0))); + DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0))); + + DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0))); + DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0))); + DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0))); + + //DevToHost + DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0))); + DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0))); + DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0))); + + DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0))); + DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0))); + DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0))); + + return 0; +} + +bool +dhd_pcie_dump_int_regs(dhd_pub_t *dhd) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 d2h_db0 = 0; + uint32 d2h_mb_data = 0; + + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + DHD_ERROR(("intstatus=0x%x \n", intstatus)); + return FALSE; + } + + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_mask, 0, 0); + if (intmask == (uint32) -1) { + DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask)); + return FALSE; + } + + d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCID2H_MailBox, 0, 0); + if (d2h_db0 == (uint32)-1) { + DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0)); + return FALSE; + } + + DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0)); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, + dhd->bus->def_intmask)); + + return TRUE; +} + +void +dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd) +{ + DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n")); + DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0), + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0), + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0), + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0))); +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ +} + +#ifdef WL_CFGVENDOR_SEND_HANG_EVENT +#define MAX_RC_REG_INFO_VAL 8 +#define PCIE_EXTCAP_ERR_HD_SZ 4 +void +dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t *dhd, int *bytes_written) +{ + int i; + int remain_len; + + /* dump link control & status */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c", + dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP, + PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* dump device control & status */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c", + dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP, + PCIE_CAP_DEVCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* dump uncorrectable error */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0), HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* dump correctable error */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + /* XXX: use definition in linux/pcie_regs.h */ + PCI_ERR_COR_STATUS, TRUE, FALSE, 0), HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* HG05/06 reserved */ + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c", + 0, HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c", + 0, HANG_KEY_DEL); + dhd->hang_info_cnt++; + } + + /* dump error header log in RAW */ + for (i = 0; i < PCIE_EXTCAP_ERR_HD_SZ; i++) { + remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written; + *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, + "%c%08x", HANG_RAW_DEL, dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_0 + i * PCIE_EXTCAP_ERR_HD_SZ, + TRUE, FALSE, 0)); + } + dhd->hang_info_cnt++; +} +#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */ + +int +dhd_pcie_debug_info_dump(dhd_pub_t *dhd) +{ + int host_irq_disabled; + + DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state)); + host_irq_disabled = dhdpcie_irq_disabled(dhd->bus); + DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled)); + dhd_print_tasklet_status(dhd); + dhd_pcie_intr_count_dump(dhd); + +#if defined(LINUX) || defined(linux) + DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n")); + dhdpcie_dump_resource(dhd->bus); +#endif /* LINUX || linux */ + + dhd_pcie_dump_rc_conf_space_cap(dhd); + + DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n", + dhd_debug_get_rc_linkcap(dhd->bus))); +#ifdef CUSTOMER_HW4_DEBUG + if (dhd->bus->is_linkdown) { + DHD_ERROR(("Skip dumping the PCIe Config and Core registers. " + "link may be DOWN\n")); + return 0; + } +#endif /* CUSTOMER_HW4_DEBUG */ + DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n")); + /* XXX: hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */ + dhd_bus_dump_imp_cfg_registers(dhd->bus); +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n", + dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); + DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x " + "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1, + dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG1, sizeof(uint32)), + PCI_TLP_HDR_LOG2, + dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG2, sizeof(uint32)), + PCI_TLP_HDR_LOG3, + dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG3, sizeof(uint32)), + PCI_TLP_HDR_LOG4, + dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG4, sizeof(uint32)))); + if (dhd->bus->sih->buscorerev >= 24) { + DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x " + "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL, + dhd_pcie_config_read(dhd->bus, PCIECFGREG_DEV_STATUS_CTRL, + sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL, + dhd_pcie_config_read(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL, + sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2, + dhd_pcie_config_read(dhd->bus, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32)))); + dhd_bus_dump_dar_registers(dhd->bus); + } +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n")); + return 0; + } + + if (MULTIBP_ENAB(dhd->bus->sih)) { + dhd_bus_pcie_pwr_req(dhd->bus); + } + + DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n")); + /* XXX: hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/ + * CurrentPcieGen2ProgramGuide/pcie_ep.htm + */ + + DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x " + "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0), + PCIECFGREG_PHY_DBG_CLKREQ1, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1), + PCIECFGREG_PHY_DBG_CLKREQ2, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2), + PCIECFGREG_PHY_DBG_CLKREQ3, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3))); + +#ifdef EXTENDED_PCIE_DEBUG_DUMP + if (dhd->bus->sih->buscorerev >= 24) { + + DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x " + "ltssm_hist_2(0x%x)=0x%x " + "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0), + PCIECFGREG_PHY_LTSSM_HIST_1, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1), + PCIECFGREG_PHY_LTSSM_HIST_2, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2), + PCIECFGREG_PHY_LTSSM_HIST_3, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3))); + + DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n", + PCIECFGREG_TREFUP, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP), + PCIECFGREG_TREFUP_EXT, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT))); + DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x " + "Function_Intstatus(0x%x)=0x%x " + "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x " + "Power_Intmask(0x%x)=0x%x\n", + PCIE_CORE_REG_ERRLOG, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIE_CORE_REG_ERRLOG, 0, 0), + PCIE_CORE_REG_ERR_ADDR, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIE_CORE_REG_ERR_ADDR, 0, 0), + PCIFunctionIntstatus(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0), + PCIFunctionIntmask(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0), + PCIPowerIntstatus(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0), + PCIPowerIntmask(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0))); + DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x " + "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n", + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0))); + DHD_ERROR(("err_code(0x%x)=0x%x\n", + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0))); + + dhd_pcie_dump_wrapper_regs(dhd); + dhdpcie_hw_war_regdump(dhd->bus); + } +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + + dhd_pcie_dma_info_dump(dhd); + + if (MULTIBP_ENAB(dhd->bus->sih)) { + dhd_bus_pcie_pwr_req_clear(dhd->bus); + } + + return 0; +} + +bool +dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus) +{ + return bus->force_bt_quiesce; +} +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE +uint32 dhd_bus_get_bp_base(dhd_pub_t *dhdp) +{ + return (dhdp->bus->bp_base); +} +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ +#ifdef DHD_HP2P +uint16 +dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx) +{ + if (tx) + return bus->hp2p_txcpl_max_items; + else + return bus->hp2p_rxcpl_max_items; +} + +static uint16 +dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val) +{ + if (tx) + bus->hp2p_txcpl_max_items = val; + else + bus->hp2p_rxcpl_max_items = val; + return val; +} +#endif /* DHD_HP2P */ + +uint8 +dhd_d11_slices_num_get(dhd_pub_t *dhdp) +{ + return si_scan_core_present(dhdp->bus->sih) ? + MAX_NUM_D11_CORES_WITH_SCAN : MAX_NUM_D11CORES; +} + +#if defined(linux) || defined(LINUX) +static bool +dhd_bus_tcm_test(struct dhd_bus *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + int num = 0; + uint8 *read_buf, *write_buf; + uint8 init_val[NUM_PATTERNS] = { + 0xFFu, /* 11111111 */ + 0x00u, /* 00000000 */ +#if !defined(DHD_FW_MEM_CORRUPTION) + 0x77u, /* 01110111 */ + 0x22u, /* 00100010 */ + 0x27u, /* 00100111 */ + 0x72u, /* 01110010 */ +#endif /* !DHD_FW_MEM_CORRUPTION */ + }; + + if (!bus) { + DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__)); + return FALSE; + } + + read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK); + + if (!read_buf) { + DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__)); + return FALSE; + } + + write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK); + + if (!write_buf) { + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__)); + return FALSE; + } + + DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize)); + DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS)); + + while (num < NUM_PATTERNS) { + start = bus->dongle_ram_base; + /* Get full mem size */ + size = bus->ramsize; + + memset(write_buf, init_val[num], MEMBLOCK); + while (size > 0) { + read_size = MIN(MEMBLOCK, size); + memset(read_buf, 0, read_size); + + /* Write */ + if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) { + DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret)); + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + return FALSE; + } + + /* Read */ + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) { + DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret)); + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + return FALSE; + } + + /* Compare */ + if (memcmp(read_buf, write_buf, read_size)) { + DHD_ERROR(("%s: Mismatch at %x, iter : %d\n", + __FUNCTION__, start, num)); + prhex("Readbuf", read_buf, read_size); + prhex("Writebuf", write_buf, read_size); + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + return FALSE; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + } + num++; + } + + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + + DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num)); + return TRUE; +} +#endif /* LINUX || linux */ + +#define PCI_CFG_LINK_SPEED_SHIFT 16 +int +dhd_get_pcie_linkspeed(dhd_pub_t *dhd) +{ + uint32 pcie_lnkst; + uint32 pcie_lnkspeed; + pcie_lnkst = OSL_PCI_READ_CONFIG(dhd->osh, PCIECFGREG_LINK_STATUS_CTRL, + sizeof(pcie_lnkst)); + + pcie_lnkspeed = (pcie_lnkst >> PCI_CFG_LINK_SPEED_SHIFT) & PCI_LINK_SPEED_MASK; + DHD_INFO(("%s: Link speed: %d\n", __FUNCTION__, pcie_lnkspeed)); + return pcie_lnkspeed; +} + +int +dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size) +{ + return dhdpcie_checkdied(bus, data, size); +} + +/* Common backplane can be hung by butting APB2 bridge in reset */ +void +dhdpcie_induce_cbp_hang(dhd_pub_t *dhd) +{ + uint32 addr, val; + uint32 apb2_wrapper_reg = 0x18106000; + uint32 apb2_reset_ctrl_offset = 0x800; + addr = apb2_wrapper_reg + apb2_reset_ctrl_offset; + val = 1; + dhd_sbreg_op(dhd, addr, &val, FALSE); +} diff --git a/bcmdhd.101.10.361.x/dhd_pcie.h b/bcmdhd.101.10.361.x/dhd_pcie.h new file mode 100755 index 0000000..e18bc2b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pcie.h @@ -0,0 +1,1048 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef dhd_pcie_h +#define dhd_pcie_h + +#include +#include +#include +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#ifdef CONFIG_PCI_MSM +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_EXYNOS +#ifndef SUPPORT_EXYNOS7420 +#include +extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg); +extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); +#endif /* !SUPPORT_EXYNOS7420 */ +#endif /* CONFIG_ARCH_EXYNOS */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +#ifdef DHD_PCIE_RUNTIMEPM +#include +#include +#endif /* DHD_PCIE_RUNTIMEPM */ + +/* defines */ +#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7 + +#define PCMSGBUF_HDRLEN 0 +#define DONGLE_REG_MAP_SIZE (32 * 1024) +#define DONGLE_TCM_MAP_SIZE (4096 * 1024) +#define DONGLE_MIN_MEMSIZE (128 *1024) +#ifdef DHD_DEBUG +#define DHD_PCIE_SUCCESS 0 +#define DHD_PCIE_FAILURE 1 +#endif /* DHD_DEBUG */ +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#define struct_pcie_notify struct msm_pcie_notify +#define struct_pcie_register_event struct msm_pcie_register_event +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_EXYNOS +#ifndef SUPPORT_EXYNOS7420 +#define struct_pcie_notify struct exynos_pcie_notify +#define struct_pcie_register_event struct exynos_pcie_register_event +#endif /* !SUPPORT_EXYNOS7420 */ +#endif /* CONFIG_ARCH_EXYNOS */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +#define MAX_DHD_TX_FLOWS 320 + +/* user defined data structures */ +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192u +#define CONSOLE_BUFFER_MAX (8 * 1024) + +#ifdef IDLE_TX_FLOW_MGMT +#define IDLE_FLOW_LIST_TIMEOUT 5000 +#define IDLE_FLOW_RING_TIMEOUT 5000 +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef DEVICE_TX_STUCK_DETECT +#define DEVICE_TX_STUCK_CKECK_TIMEOUT 1000 /* 1 sec */ +#define DEVICE_TX_STUCK_TIMEOUT 10000 /* 10 secs */ +#define DEVICE_TX_STUCK_WARN_DURATION (DEVICE_TX_STUCK_TIMEOUT / DEVICE_TX_STUCK_CKECK_TIMEOUT) +#define DEVICE_TX_STUCK_DURATION (DEVICE_TX_STUCK_WARN_DURATION * 2) +#endif /* DEVICE_TX_STUCK_DETECT */ + +/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */ +#define IDMA_ENAB(dhd) ((dhd) && (dhd)->idma_enable) +#define IDMA_ACTIVE(dhd) ((dhd) && ((dhd)->idma_enable) && ((dhd)->idma_inited)) + +#define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23)) + +/* IFRM (Implicit Flow Ring Manager enable and inited */ +#define IFRM_ENAB(dhd) ((dhd) && (dhd)->ifrm_enable) +#define IFRM_ACTIVE(dhd) ((dhd) && ((dhd)->ifrm_enable) && ((dhd)->ifrm_inited)) + +/* DAR registers use for h2d doorbell */ +#define DAR_ENAB(dhd) ((dhd) && (dhd)->dar_enable) +#define DAR_ACTIVE(dhd) ((dhd) && ((dhd)->dar_enable) && ((dhd)->dar_inited)) + +/* DAR WAR for revs < 64 */ +#define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd)) + +/* PCIE CTO Prevention and Recovery */ +#define PCIECTO_ENAB(bus) ((bus)->cto_enable) + +/* Implicit DMA index usage : + * Index 0 for h2d write index transfer + * Index 1 for d2h read index transfer + */ +#define IDMA_IDX0 0 +#define IDMA_IDX1 1 +#define IDMA_IDX2 2 +#define IDMA_IDX3 3 +#define DMA_TYPE_SHIFT 4 +#define DMA_TYPE_IDMA 1 + +#define DHDPCIE_CONFIG_HDR_SIZE 16 +#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */ +#define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20 +#define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */ +#define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */ +#define DHDPCIE_PM_D2_DELAY 200 /* 200us */ + +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; + +typedef struct ring_sh_info { + uint32 ring_mem_addr; + uint32 ring_state_w; + uint32 ring_state_r; + pcie_hwa_db_index_t ring_hwa_db_idx; /* HWA DB index value per ring */ +} ring_sh_info_t; +#define MAX_DS_TRACE_SIZE 50 +#ifdef DHD_MMIO_TRACE +#define MAX_MMIO_TRACE_SIZE 256 +/* Minimum of 250us should be elapsed to add new entry */ +#define MIN_MMIO_TRACE_TIME 250 +#define DHD_RING_IDX 0x00FF0000 +typedef struct _dhd_mmio_trace_t { + uint64 timestamp; + uint32 addr; + uint32 value; + bool set; +} dhd_mmio_trace_t; +#endif /* defined(DHD_MMIO_TRACE) */ +typedef struct _dhd_ds_trace_t { + uint64 timestamp; + bool d2h; + uint32 dsval; +#ifdef PCIE_INB_DW + enum dhd_bus_ds_state inbstate; +#endif /* PCIE_INB_DW */ +} dhd_ds_trace_t; + +#define DEVICE_WAKE_NONE 0 +#define DEVICE_WAKE_OOB 1 +#define DEVICE_WAKE_INB 2 + +#define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB) +#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB) +#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE) + +#define PCIE_PWR_REQ_RELOAD_WAR_ENAB(buscorerev) \ + ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \ + (buscorerev == 70) || (buscorerev == 72)) + +#define PCIE_FASTLPO_ENABLED(buscorerev) \ + ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \ + (buscorerev == 70) || (buscorerev == 72)) + +/* + * HW JIRA - CRWLPCIEGEN2-672 + * Producer Index Feature which is used by F1 gets reset on F0 FLR + * fixed in REV68 + */ +#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \ + ((buscorerev == 66) || (buscorerev == 67)) + +struct dhd_bus; + +struct dhd_pcie_rev { + uint8 fw_rev; + void (*handle_mb_data)(struct dhd_bus *); +}; + +typedef struct dhdpcie_config_save +{ + uint32 header[DHDPCIE_CONFIG_HDR_SIZE]; + /* pmcsr save */ + uint32 pmcsr; + /* express save */ + uint32 exp_dev_ctrl_stat; + uint32 exp_link_ctrl_stat; + uint32 exp_dev_ctrl_stat2; + uint32 exp_link_ctrl_stat2; + /* msi save */ + uint32 msi_cap; + uint32 msi_addr0; + uint32 msi_addr1; + uint32 msi_data; + /* l1pm save */ + uint32 l1pm0; + uint32 l1pm1; + /* ltr save */ + uint32 ltr; + /* aer save */ + uint32 aer_caps_ctrl; /* 0x18 */ + uint32 aer_severity; /* 0x0C */ + uint32 aer_umask; /* 0x08 */ + uint32 aer_cmask; /* 0x14 */ + uint32 aer_root_cmd; /* 0x2c */ + /* BAR0 and BAR1 windows */ + uint32 bar0_win; + uint32 bar1_win; +} dhdpcie_config_save_t; + +/* The level of bus communication with the dongle */ +enum dhd_bus_low_power_state { + DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */ + DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */ + DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */ +}; + +#ifdef DHD_FLOW_RING_STATUS_TRACE +#define FRS_TRACE_SIZE 32 /* frs - flow_ring_status */ +typedef struct _dhd_flow_ring_status_trace_t { + uint64 timestamp; + uint16 h2d_ctrl_post_drd; + uint16 h2d_ctrl_post_dwr; + uint16 d2h_ctrl_cpln_drd; + uint16 d2h_ctrl_cpln_dwr; + uint16 h2d_rx_post_drd; + uint16 h2d_rx_post_dwr; + uint16 d2h_rx_cpln_drd; + uint16 d2h_rx_cpln_dwr; + uint16 d2h_tx_cpln_drd; + uint16 d2h_tx_cpln_dwr; + uint16 h2d_info_post_drd; + uint16 h2d_info_post_dwr; + uint16 d2h_info_cpln_drd; + uint16 d2h_info_cpln_dwr; + uint16 d2h_ring_edl_drd; + uint16 d2h_ring_edl_dwr; +} dhd_frs_trace_t; +#endif /* DHD_FLOW_RING_STATUS_TRACE */ + +/** Instantiated once for each hardware (dongle) instance that this DHD manages */ +typedef struct dhd_bus { + dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */ +#if !defined(NDIS) + struct pci_dev *rc_dev; /* pci RC device handle */ + struct pci_dev *dev; /* pci device handle */ +#endif /* !defined(NDIS) */ +#ifdef DHD_EFI + void *pcie_dev; +#endif + dll_t flowring_active_list; /* constructed list of tx flowring queues */ +#ifdef IDLE_TX_FLOW_MGMT + uint64 active_list_last_process_ts; + /* stores the timestamp of active list processing */ +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef DEVICE_TX_STUCK_DETECT + /* Flag to enable/disable device tx stuck monitor by DHD IOVAR dev_tx_stuck_monitor */ + uint32 dev_tx_stuck_monitor; + /* Stores the timestamp (msec) of the last device Tx stuck check */ + uint32 device_tx_stuck_check; +#endif /* DEVICE_TX_STUCK_DETECT */ + + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + sbpcieregs_t *reg; /* Registers for PCIE core */ + + uint armrev; /* CPU core revision */ + uint coreid; /* CPU core id */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 bus_num; /* bus number */ + uint32 slot_num; /* slot ID */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ +#ifdef CACHE_FW_IMAGES + int processed_nvram_params_len; /* Modified len of NVRAM info */ +#endif + +#ifdef BCM_ROUTER_DHD + char *nvram_params; /* user specified nvram params. */ + int nvram_params_len; +#endif /* BCM_ROUTER_DHD */ + + struct pktq txq; /* Queue length used for flow-control */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ + + bool alp_only; /* Don't use HT clock (ALP only) */ + + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + uint32 resetinstr; + uint32 dongle_ram_base; + uint32 next_tlv; /* Holds location of next available TLV */ + ulong shared_addr; + pciedev_shared_t *pcie_sh; + uint32 dma_rxoffset; + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + uint32 bar1_size; /* pci device memory size */ + uint32 curr_bar1_win; /* current PCIEBar1Window setting */ + osl_t *osh; + uint32 nvram_csm; /* Nvram checksum */ +#ifdef BCMINTERNAL + bool msi_sim; + uchar *msi_sim_addr; + dmaaddr_t msi_sim_phys; + dhd_dma_buf_t hostfw_buf; /* Host offload firmware buffer */ + uint32 hostfw_base; /* FW assumed base of host offload mem */ + uint32 bp_base; /* adjusted bp base of host offload mem */ +#endif /* BCMINTERNAL */ + uint16 pollrate; + uint16 polltick; + + volatile uint32 *pcie_mb_intr_addr; + volatile uint32 *pcie_mb_intr_2_addr; + void *pcie_mb_intr_osh; + bool sleep_allowed; + + wake_counts_t wake_counts; + + /* version 3 shared struct related info start */ + ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; + + uint8 h2d_ring_count; + uint8 d2h_ring_count; + uint32 ringmem_ptr; + uint32 ring_state_ptr; + + uint32 d2h_dma_scratch_buffer_mem_addr; + + uint32 h2d_mb_data_ptr_addr; + uint32 d2h_mb_data_ptr_addr; + /* version 3 shared struct related info end */ + + uint32 def_intmask; + uint32 d2h_mb_mask; + uint32 pcie_mailbox_mask; + uint32 pcie_mailbox_int; + bool ltrsleep_on_unload; + uint wait_for_d3_ack; + uint16 max_tx_flowrings; + uint16 max_submission_rings; + uint16 max_completion_rings; + uint16 max_cmn_rings; + uint32 rw_index_sz; + uint32 hwa_db_index_sz; + bool db1_for_mb; + + dhd_timeout_t doorbell_timer; + bool device_wake_state; +#ifdef PCIE_OOB + bool oob_enabled; +#endif /* PCIE_OOB */ + bool irq_registered; + bool d2h_intr_method; + bool d2h_intr_control; +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || (defined(CONFIG_ARCH_EXYNOS) && \ + !defined(SUPPORT_EXYNOS7420)) +#ifdef CONFIG_ARCH_MSM + uint8 no_cfg_restore; +#endif /* CONFIG_ARCH_MSM */ + struct_pcie_register_event pcie_event; +#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS && !SUPPORT_EXYNOS7420 */ + bool read_shm_fail; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + int32 idletime; /* Control for activity timeout */ + bool rpm_enabled; +#ifdef DHD_PCIE_RUNTIMEPM + int32 idlecount; /* Activity timeout counter */ + int32 bus_wake; /* For wake up the bus */ + bool runtime_resume_done; /* For check runtime suspend end */ + struct mutex pm_lock; /* Synchronize for system PM & runtime PM */ + wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */ +#endif /* DHD_PCIE_RUNTIMEPM */ + uint32 d3_inform_cnt; + uint32 d0_inform_cnt; + uint32 d0_inform_in_use_cnt; + uint8 force_suspend; + uint8 is_linkdown; + uint8 no_bus_init; +#ifdef IDLE_TX_FLOW_MGMT + bool enable_idle_flowring_mgmt; +#endif /* IDLE_TX_FLOW_MGMT */ + struct dhd_pcie_rev api; + bool use_mailbox; + bool use_d0_inform; + void *bus_lp_state_lock; + void *pwr_req_lock; + bool dongle_in_deepsleep; + void *dongle_ds_lock; + bool bar1_switch_enab; + void *bar1_switch_lock; + void *backplane_access_lock; + enum dhd_bus_low_power_state bus_low_power_state; +#ifdef DHD_FLOW_RING_STATUS_TRACE + dhd_frs_trace_t frs_isr_trace[FRS_TRACE_SIZE]; /* frs - flow_ring_status */ + dhd_frs_trace_t frs_dpc_trace[FRS_TRACE_SIZE]; /* frs - flow_ring_status */ + uint32 frs_isr_count; + uint32 frs_dpc_count; +#endif /* DHD_FLOW_RING_STATUS_TRACE */ +#ifdef DHD_MMIO_TRACE + dhd_mmio_trace_t mmio_trace[MAX_MMIO_TRACE_SIZE]; + uint32 mmio_trace_count; +#endif /* defined(DHD_MMIO_TRACE) */ + dhd_ds_trace_t ds_trace[MAX_DS_TRACE_SIZE]; + uint32 ds_trace_count; + uint32 hostready_count; /* Number of hostready issued */ +#if defined(PCIE_OOB) || defined (BCMPCIE_OOB_HOST_WAKE) + bool oob_presuspend; +#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */ + dhdpcie_config_save_t saved_config; + ulong resume_intr_enable_count; + ulong dpc_intr_enable_count; + ulong isr_intr_disable_count; + ulong suspend_intr_disable_count; + ulong dpc_return_busdown_count; + ulong non_ours_irq_count; +#ifdef BCMPCIE_OOB_HOST_WAKE + ulong oob_intr_count; + ulong oob_intr_enable_count; + ulong oob_intr_disable_count; + uint64 last_oob_irq_isr_time; + uint64 last_oob_irq_thr_time; + uint64 last_oob_irq_enable_time; + uint64 last_oob_irq_disable_time; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + uint64 isr_entry_time; + uint64 isr_exit_time; + uint64 isr_sched_dpc_time; + uint64 rpm_sched_dpc_time; + uint64 dpc_entry_time; + uint64 dpc_exit_time; + uint64 resched_dpc_time; + uint64 last_d3_inform_time; + uint64 last_process_ctrlbuf_time; + uint64 last_process_flowring_time; + uint64 last_process_txcpl_time; + uint64 last_process_rxcpl_time; + uint64 last_process_infocpl_time; + uint64 last_process_edl_time; + uint64 last_suspend_start_time; + uint64 last_suspend_end_time; + uint64 last_resume_start_time; + uint64 last_resume_end_time; + uint64 last_non_ours_irq_time; + bool hwa_enabled; + bool idma_enabled; + bool ifrm_enabled; + bool dar_enabled; + uint32 dmaxfer_complete; + uint8 dw_option; +#ifdef PCIE_INB_DW + bool inb_enabled; + uint32 ds_exit_timeout; + uint32 host_sleep_exit_timeout; + uint wait_for_ds_exit; + uint32 inband_dw_assert_cnt; /* # of inband device_wake assert */ + uint32 inband_dw_deassert_cnt; /* # of inband device_wake deassert */ + uint32 inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */ + uint32 inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */ + uint32 inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */ + uint32 inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */ + void *inb_lock; /* Lock to serialize in band device wake activity */ + /* # of contexts in the host which currently want a FW transaction */ + uint32 host_active_cnt; + bool skip_ds_ack; /* Skip DS-ACK during suspend in progress */ +#endif /* PCIE_INB_DW */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + bool ds_enabled; +#endif +#ifdef DHD_PCIE_RUNTIMEPM + bool chk_pm; /* To avoid counting of wake up from Runtime PM */ +#endif /* DHD_PCIE_RUNTIMEPM */ +#if defined(PCIE_INB_DW) + bool calc_ds_exit_latency; + bool deep_sleep; /* Indicates deep_sleep set or unset by the DHD IOVAR deep_sleep */ + uint64 ds_exit_latency; + uint64 ds_exit_ts1; + uint64 ds_exit_ts2; +#endif /* PCIE_INB_DW */ + bool _dar_war; +#ifdef GDB_PROXY + /* True if firmware loaded and backplane accessible */ + bool gdb_proxy_access_enabled; + /* ID set by last "gdb_proxy_probe" iovar */ + uint32 gdb_proxy_last_id; + /* True if firmware was started in bootloader mode */ + bool gdb_proxy_bootloader_mode; +#endif /* GDB_PROXY */ + uint8 dma_chan; + + bool cto_enable; /* enable PCIE CTO Prevention and recovery */ + uint32 cto_threshold; /* PCIE CTO timeout threshold */ + bool cto_triggered; /* CTO is triggered */ + bool intr_enabled; /* ready to receive interrupts from dongle */ + int pwr_req_ref; + bool flr_force_fail; /* user intends to simulate flr force fail */ + + /* Information used to compose the memory map and to write the memory map, + * FW, and FW signature to dongle RAM. + * This information is used by the bootloader. + */ + uint32 ramtop_addr; /* Dongle address of unused space at top of RAM */ + uint32 fw_download_addr; /* Dongle address of FW download */ + uint32 fw_download_len; /* Length in bytes of FW download */ + uint32 fwsig_download_addr; /* Dongle address of FW signature download */ + uint32 fwsig_download_len; /* Length in bytes of FW signature download */ + uint32 fwstat_download_addr; /* Dongle address of FWS status download */ + uint32 fwstat_download_len; /* Length in bytes of FWS status download */ + uint32 fw_memmap_download_addr; /* Dongle address of FWS memory-info download */ + uint32 fw_memmap_download_len; /* Length in bytes of FWS memory-info download */ + + char fwsig_filename[DHD_FILENAME_MAX]; /* Name of FW signature file */ + char bootloader_filename[DHD_FILENAME_MAX]; /* Name of bootloader image file */ + uint32 bootloader_addr; /* Dongle address of bootloader download */ + bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */ + bool rc_ep_aspm_cap; /* RC and EP ASPM capable */ + bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */ +#if defined(DHD_H2D_LOG_TIME_SYNC) + ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */ +#endif /* DHD_H2D_LOG_TIME_SYNC */ +#ifdef D2H_MINIDUMP + bool d2h_minidump; /* This flag will be set if Host and FW handshake to collect minidump */ + bool d2h_minidump_override; /* Force disable minidump through dhd IOVAR */ +#endif /* D2H_MINIDUMP */ +#ifdef BCMSLTGT + int xtalfreq; /* Xtal frequency used for htclkratio calculation */ + uint32 ilp_tick; /* ILP ticks per second read from pmutimer */ + uint32 xtal_ratio; /* xtal ticks per 4 ILP ticks read from pmu_xtalfreq */ +#endif /* BCMSLTGT */ +#ifdef BT_OVER_PCIE + /* whether the chip is in BT over PCIE mode or not */ + bool btop_mode; +#endif /* BT_OVER_PCIE */ + uint16 hp2p_txcpl_max_items; + uint16 hp2p_rxcpl_max_items; + /* PCIE coherent status */ + uint32 coherent_state; + uint32 inb_dw_deassert_cnt; + uint64 arm_oor_time; + uint64 rd_shared_pass_time; + uint32 hwa_mem_base; + uint32 hwa_mem_size; +} dhd_bus_t; + +#ifdef DHD_MSI_SUPPORT +extern uint enable_msi; +#endif /* DHD_MSI_SUPPORT */ + +enum { + PCIE_INTX = 0, + PCIE_MSI = 1 +}; + +enum { + PCIE_D2H_INTMASK_CTRL = 0, + PCIE_HOST_IRQ_CTRL = 1 +}; + +static INLINE bool +__dhd_check_bus_in_lps(dhd_bus_t *bus) +{ + bool ret = (bus->bus_low_power_state == DHD_BUS_D3_INFORM_SENT) || + (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED); + return ret; +} + +static INLINE bool +dhd_check_bus_in_lps(dhd_bus_t *bus) +{ + unsigned long flags_bus; + bool ret; + DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); + ret = __dhd_check_bus_in_lps(bus); + DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); + return ret; +} + +static INLINE bool +__dhd_check_bus_lps_d3_acked(dhd_bus_t *bus) +{ + bool ret = (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED); + return ret; +} + +static INLINE bool +dhd_check_bus_lps_d3_acked(dhd_bus_t *bus) +{ + unsigned long flags_bus; + bool ret; + DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); + ret = __dhd_check_bus_lps_d3_acked(bus); + DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); + return ret; +} + +static INLINE void +__dhd_set_bus_not_in_lps(dhd_bus_t *bus) +{ + bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; + return; +} + +static INLINE void +dhd_set_bus_not_in_lps(dhd_bus_t *bus) +{ + unsigned long flags_bus; + DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); + __dhd_set_bus_not_in_lps(bus); + DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); + return; +} + +static INLINE void +__dhd_set_bus_lps_d3_informed(dhd_bus_t *bus) +{ + bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT; + return; +} + +static INLINE void +dhd_set_bus_lps_d3_informed(dhd_bus_t *bus) +{ + unsigned long flags_bus; + DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); + __dhd_set_bus_lps_d3_informed(bus); + DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); + return; +} + +static INLINE void +__dhd_set_bus_lps_d3_acked(dhd_bus_t *bus) +{ + bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED; + return; +} + +static INLINE void +dhd_set_bus_lps_d3_acked(dhd_bus_t *bus) +{ + unsigned long flags_bus; + DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); + __dhd_set_bus_lps_d3_acked(bus); + DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); + return; +} + +/* check routines */ +#define DHD_CHK_BUS_IN_LPS(bus) dhd_check_bus_in_lps(bus) +#define __DHD_CHK_BUS_IN_LPS(bus) __dhd_check_bus_in_lps(bus) + +#define DHD_CHK_BUS_NOT_IN_LPS(bus) !(DHD_CHK_BUS_IN_LPS(bus)) +#define __DHD_CHK_BUS_NOT_IN_LPS(bus) !(__DHD_CHK_BUS_IN_LPS(bus)) + +#define DHD_CHK_BUS_LPS_D3_INFORMED(bus) DHD_CHK_BUS_IN_LPS(bus) +#define __DHD_CHK_BUS_LPS_D3_INFORMED(bus) __DHD_CHK_BUS_IN_LPS(bus) + +#define DHD_CHK_BUS_LPS_D3_ACKED(bus) dhd_check_bus_lps_d3_acked(bus) +#define __DHD_CHK_BUS_LPS_D3_ACKED(bus) __dhd_check_bus_lps_d3_acked(bus) + +/* set routines */ +#define DHD_SET_BUS_NOT_IN_LPS(bus) dhd_set_bus_not_in_lps(bus) +#define __DHD_SET_BUS_NOT_IN_LPS(bus) __dhd_set_bus_not_in_lps(bus) + +#define DHD_SET_BUS_LPS_D3_INFORMED(bus) dhd_set_bus_lps_d3_informed(bus) +#define __DHD_SET_BUS_LPS_D3_INFORMED(bus) __dhd_set_bus_lps_d3_informed(bus) + +#define DHD_SET_BUS_LPS_D3_ACKED(bus) dhd_set_bus_lps_d3_acked(bus) +#define __DHD_SET_BUS_LPS_D3_ACKED(bus) __dhd_set_bus_lps_d3_acked(bus) + +/* function declarations */ + +extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); +extern int dhdpcie_bus_register(void); +extern void dhdpcie_bus_unregister(void); +extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); + +extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, + volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter); +extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); +extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); +extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); +extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); +extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus); +extern void dhdpcie_bus_release(struct dhd_bus *bus); +extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); +extern void dhdpcie_free_irq(dhd_bus_t *bus); +extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); +extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake); +extern void dhdpcie_dongle_reset(dhd_bus_t *bus); +extern int dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus); +extern int dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint); +#else +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); +extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable); +extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time); +extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); +extern void dhdpcie_pme_active(osl_t *osh, bool enable); +extern bool dhdpcie_pme_cap(osl_t *osh); +extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val); +extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask); +extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val); +extern int dhdpcie_disable_irq(dhd_bus_t *bus); +extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus); +extern int dhdpcie_enable_irq(dhd_bus_t *bus); + +extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus); + +#if defined(linux) || defined(LINUX) +extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset); +extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval); +extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval); +extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus); +#else +static INLINE uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) { return 0;} +static INLINE uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval) { return -1;} +static INLINE uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval) { return -1;} +static INLINE uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) { return -1;} +#endif +#if defined(linux) || defined(LINUX) +extern int dhdpcie_start_host_dev(dhd_bus_t *bus); +extern int dhdpcie_stop_host_dev(dhd_bus_t *bus); +extern int dhdpcie_disable_device(dhd_bus_t *bus); +extern int dhdpcie_alloc_resource(dhd_bus_t *bus); +extern void dhdpcie_free_resource(dhd_bus_t *bus); +extern void dhdpcie_dump_resource(dhd_bus_t *bus); +extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); +void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr); +void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); +uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset); +void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); +uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset); +void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); +uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset); +#ifdef DHD_SUPPORT_64BIT +void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); +uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset); +#endif +#endif /* LINUX || linux */ + +#if defined(linux) || defined(LINUX) || defined(DHD_EFI) +extern int dhdpcie_enable_device(dhd_bus_t *bus); +#endif + +#ifdef BCMPCIE_OOB_HOST_WAKE +extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); +extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus); +extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus); +extern int dhdpcie_get_oob_irq_level(void); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_OOB +extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val); +extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus); +extern void dhdpcie_oob_init(dhd_bus_t *bus); +extern int dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val); +extern void dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val); +#endif /* PCIE_OOB */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) +extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ + +#if defined(linux) || defined(LINUX) +/* XXX: SWWLAN-82173 Making PCIe RC D3cold by force during system PM + * exynos_pcie_pm_suspend : RC goes to suspend status & assert PERST + * exynos_pcie_pm_resume : de-assert PERST & RC goes to resume status + */ +#if defined(CONFIG_ARCH_EXYNOS) +#define EXYNOS_PCIE_VENDOR_ID 0x144d +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420) +#define EXYNOS_PCIE_DEVICE_ID 0xa575 +#define EXYNOS_PCIE_CH_NUM 1 +#elif defined(CONFIG_SOC_EXYNOS8890) +#define EXYNOS_PCIE_DEVICE_ID 0xa544 +#define EXYNOS_PCIE_CH_NUM 0 +#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) || \ + defined(CONFIG_SOC_EXYNOS2100) || defined(CONFIG_SOC_EXYNOS1000) || \ + defined(CONFIG_SOC_GS101) +#define EXYNOS_PCIE_DEVICE_ID 0xecec +#define EXYNOS_PCIE_CH_NUM 0 +#else +#error "Not supported platform" +#endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */ +extern void exynos_pcie_pm_suspend(int ch_num); +extern void exynos_pcie_pm_resume(int ch_num); +#endif /* CONFIG_ARCH_EXYNOS */ + +#if defined(CONFIG_ARCH_MSM) +#define MSM_PCIE_VENDOR_ID 0x17cb +#if defined(CONFIG_ARCH_APQ8084) +#define MSM_PCIE_DEVICE_ID 0x0101 +#elif defined(CONFIG_ARCH_MSM8994) +#define MSM_PCIE_DEVICE_ID 0x0300 +#elif defined(CONFIG_ARCH_MSM8996) +#define MSM_PCIE_DEVICE_ID 0x0104 +#elif defined(CONFIG_ARCH_MSM8998) +#define MSM_PCIE_DEVICE_ID 0x0105 +#elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \ + defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA) +#define MSM_PCIE_DEVICE_ID 0x0106 +#else +#error "Not supported platform" +#endif +#endif /* CONFIG_ARCH_MSM */ + +#if defined(CONFIG_X86) +#define X86_PCIE_VENDOR_ID 0x8086 +#define X86_PCIE_DEVICE_ID 0x9c1a +#endif /* CONFIG_X86 */ + +#if defined(CONFIG_ARCH_TEGRA) +#define TEGRA_PCIE_VENDOR_ID 0x14e4 +#define TEGRA_PCIE_DEVICE_ID 0x4347 +#endif /* CONFIG_ARCH_TEGRA */ + +#if defined(BOARD_HIKEY) +#define HIKEY_PCIE_VENDOR_ID 0x19e5 +#define HIKEY_PCIE_DEVICE_ID 0x3660 +#endif /* BOARD_HIKEY */ + +#define DUMMY_PCIE_VENDOR_ID 0xffff +#define DUMMY_PCIE_DEVICE_ID 0xffff + +#if defined(CONFIG_ARCH_EXYNOS) +#define PCIE_RC_VENDOR_ID EXYNOS_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID EXYNOS_PCIE_DEVICE_ID +#elif defined(CONFIG_ARCH_MSM) +#define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID +#elif defined(CONFIG_X86) +#define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID +#elif defined(CONFIG_ARCH_TEGRA) +#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID +#elif defined(BOARD_HIKEY) +#define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID +#else +/* Use dummy vendor and device IDs */ +#define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID +#endif /* CONFIG_ARCH_EXYNOS */ +#endif /* linux || LINUX */ + +#define DHD_REGULAR_RING 0 +#define DHD_HP2P_RING 1 + +#ifdef CONFIG_ARCH_TEGRA +extern int tegra_pcie_pm_suspend(void); +extern int tegra_pcie_pm_resume(void); +#endif /* CONFIG_ARCH_TEGRA */ + +extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); +#ifdef IDLE_TX_FLOW_MGMT +extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg); +extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); +extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg); +extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +#endif /* IDLE_TX_FLOW_MGMT */ + +extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); + +#ifdef DHD_WAKE_STATUS +int bcmpcie_get_total_wake(struct dhd_bus *bus); +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag); +#endif /* DHD_WAKE_STATUS */ +#ifdef DHD_MMIO_TRACE +extern void dhd_dump_bus_mmio_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf); +#endif /* defined(DHD_MMIO_TRACE) */ +extern void dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf); +extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus); +extern void dhd_bus_hostready(struct dhd_bus *bus); +#ifdef PCIE_OOB +extern bool dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus); +#endif /* PCIE_OOB */ +#ifdef PCIE_INB_DW +extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus); +extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, + enum dhd_bus_ds_state state); +extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus); +extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate); +extern const char * dhd_convert_dsval(uint32 val, bool d2h); +extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val); +extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus); +#endif /* PCIE_INB_DW */ +extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option); +#if defined(LINUX) || defined(linux) +extern int dhdpcie_irq_disabled(struct dhd_bus *bus); +extern int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus); +#else +static INLINE bool dhdpcie_irq_disabled(struct dhd_bus *bus) { return BCME_ERROR;} +static INLINE int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus) +{ return BCME_ERROR;} +#endif /* defined(LINUX) || defined(linux) */ + +#ifdef DHD_EFI +extern bool dhdpcie_is_arm_halted(struct dhd_bus *bus); +extern int dhd_os_wifi_platform_set_power(uint32 value); +extern void dhdpcie_dongle_pwr_toggle(dhd_bus_t *bus); +void dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus); +int dhd_control_signal(dhd_bus_t *bus, char *arg, int len, int set); +extern int dhd_wifi_properties(struct dhd_bus *bus, char *arg, int len); +extern int dhd_otp_dump(dhd_bus_t *bus, char *arg, int len); +extern int dhdpcie_deinit_phase1(dhd_bus_t *bus); +int dhdpcie_disable_intr_poll(dhd_bus_t *bus); +int dhdpcie_enable_intr_poll(dhd_bus_t *bus); +#ifdef BT_OVER_PCIE +int dhd_btop_test(dhd_bus_t *bus, char *arg, int len); +#endif /* BT_OVER_PCIE */ +#else +static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;} +static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; } +static INLINE void +dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus) +{ return; } +#endif /* DHD_EFI */ + +int dhdpcie_config_check(dhd_bus_t *bus); +int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr); +int dhdpcie_config_save(dhd_bus_t *bus); +int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state); + +extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus); + +static INLINE uint32 +dhd_pcie_config_read(dhd_bus_t *bus, uint offset, uint size) +{ + /* For 4375 or prior chips to 4375 */ + if (bus->sih && bus->sih->buscorerev <= 64) { + OSL_DELAY(100); + } + return OSL_PCI_READ_CONFIG(bus->osh, offset, size); +} + +static INLINE uint32 +dhd_pcie_corereg_read(si_t *sih, uint val) +{ + /* For 4375 or prior chips to 4375 */ + if (sih->buscorerev <= 64) { + OSL_DELAY(100); + } + si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val); + return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0); +} + +extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path, + char *clm_path, char *txcap_path); + +extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd); +extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd); +extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus); +#ifdef DHD_HP2P +extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx); +#endif + +#if defined(DHD_EFI) +extern wifi_properties_t *dhd_get_props(dhd_bus_t *bus); +#endif + +#if defined(DHD_EFI) || defined(NDIS) +extern int dhd_get_platform(dhd_pub_t* dhd, char *progname); +extern bool dhdpcie_is_chip_supported(uint32 chipid, int *idx); +extern bool dhdpcie_is_sflash_chip(uint32 chipid); +#endif + +extern int dhd_get_pcie_linkspeed(dhd_pub_t *dhd); +extern void dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus); + +#ifdef PCIE_INB_DW +extern void dhdpcie_set_dongle_deepsleep(dhd_bus_t *bus, bool val); +extern void dhd_init_dongle_ds_lock(dhd_bus_t *bus); +extern void dhd_deinit_dongle_ds_lock(dhd_bus_t *bus); +#endif /* PCIE_INB_DW */ + +#endif /* dhd_pcie_h */ diff --git a/bcmdhd.101.10.361.x/dhd_pcie_linux.c b/bcmdhd.101.10.361.x/dhd_pcie_linux.c new file mode 100755 index 0000000..2a18c7c --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pcie_linux.c @@ -0,0 +1,3379 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/* include files */ +#include +#include +#include +#include /* need to still support chips no longer in trunk firmware */ +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef OEM_ANDROID +#ifdef CONFIG_ARCH_MSM +#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996) +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ +#endif /* OEM_ANDROID */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \ + defined(CONFIG_SOC_EXYNOS1000) || defined(CONFIG_SOC_GS101) +#include +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 || + * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 || + * CONFIG_SOC_EXYNOS1000 || CONFIG_SOC_GS101 + */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#ifndef AUTO_SUSPEND_TIMEOUT +#define AUTO_SUSPEND_TIMEOUT 1000 +#endif /* AUTO_SUSPEND_TIMEOUT */ +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_PCIE_RUNTIMEPM +#define RPM_WAKE_UP_TIMEOUT 10000 /* ms */ +#endif /* DHD_PCIE_RUNTIMEPM */ + +#include +#ifdef USE_SMMU_ARCH_MSM +#include +#include +#include +#include +#endif /* USE_SMMU_ARCH_MSM */ +#include + +#ifdef PCIE_OOB +#include "ftdi_sio_external.h" +#endif /* PCIE_OOB */ + +#define PCI_CFG_RETRY 10 /* PR15065: retry count for pci cfg accesses */ +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ + +#ifdef PCIE_OOB +#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */ +#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */ +#define BIT_WL_REG_ON 6 +#define BIT_BT_REG_ON 7 + +int gpio_handle_val = 0; +unsigned char gpio_port = 0; +unsigned char gpio_direction = 0; +#define OOB_PORT "ttyUSB0" +#endif /* PCIE_OOB */ + +#ifndef BCMPCI_DEV_ID +#define BCMPCI_DEV_ID PCI_ANY_ID +#endif + +#ifdef FORCE_TPOWERON +extern uint32 tpoweron_scale; +#endif /* FORCE_TPOWERON */ +/* user defined data structures */ + +typedef bool (*dhdpcie_cb_fn_t)(void *); + +typedef struct dhdpcie_info +{ + dhd_bus_t *bus; + osl_t *osh; + struct pci_dev *dev; /* pci device handle */ + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + uint32 bar1_size; /* pci device memory size */ + struct pcos_info *pcos_info; + uint16 last_intrstatus; /* to cache intrstatus */ + int irq; + char pciname[32]; + struct pci_saved_state* default_state; + struct pci_saved_state* state; +#ifdef BCMPCIE_OOB_HOST_WAKE + void *os_cxt; /* Pointer to per-OS private data */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_WAKE_STATUS + spinlock_t pkt_wake_lock; + unsigned int total_wake_count; + int pkt_wake; + int wake_irq; +#endif /* DHD_WAKE_STATUS */ +#ifdef USE_SMMU_ARCH_MSM + void *smmu_cxt; +#endif /* USE_SMMU_ARCH_MSM */ +} dhdpcie_info_t; + +struct pcos_info { + dhdpcie_info_t *pc; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; + timer_list_compat_t tuning_timer; + int tuning_timer_exp; + atomic_t timer_enab; + struct tasklet_struct tuning_tasklet; +}; + +#ifdef BCMPCIE_OOB_HOST_WAKE +typedef struct dhdpcie_os_info { + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + void *dev; /* handle to the underlying device */ +} dhdpcie_os_info_t; +static irqreturn_t wlan_oob_irq(int irq, void *data); +#ifdef CUSTOMER_HW2 +extern struct brcm_pcie_wake brcm_pcie_wake; +#endif /* CUSTOMER_HW2 */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef USE_SMMU_ARCH_MSM +typedef struct dhdpcie_smmu_info { + struct dma_iommu_mapping *smmu_mapping; + dma_addr_t smmu_iova_start; + size_t smmu_iova_len; +} dhdpcie_smmu_info_t; +#endif /* USE_SMMU_ARCH_MSM */ + +/* function declarations */ +static int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev); +static int dhdpcie_init(struct pci_dev *pdev); +static irqreturn_t dhdpcie_isr(int irq, void *arg); +/* OS Routine functions for PCI suspend/resume */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint); +#else +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +static int dhdpcie_resume_host_dev(dhd_bus_t *bus); +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); +static int dhdpcie_resume_dev(struct pci_dev *dev); +static int dhdpcie_suspend_dev(struct pci_dev *dev); +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev); +static int dhdpcie_pm_prepare(struct device *dev); +static int dhdpcie_pm_resume(struct device *dev); +static void dhdpcie_pm_complete(struct device *dev); +#else +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_pm_system_suspend_noirq(struct device * dev); +static int dhdpcie_pm_system_resume_noirq(struct device * dev); +#else +static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); +static int dhdpcie_pci_resume(struct pci_dev *dev); +#if defined(BT_OVER_PCIE) +static int dhdpcie_pci_resume_early(struct pci_dev *dev); +#endif /* BT_OVER_PCIE */ +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_pm_runtime_suspend(struct device * dev); +static int dhdpcie_pm_runtime_resume(struct device * dev); +static int dhdpcie_pm_system_suspend_noirq(struct device * dev); +static int dhdpcie_pm_system_resume_noirq(struct device * dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef SUPPORT_EXYNOS7420 +void exynos_pcie_pm_suspend(int ch_num) {} +void exynos_pcie_pm_resume(int ch_num) {} +#endif /* SUPPORT_EXYNOS7420 */ + +static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state); + +uint32 +dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval); + +static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { + { vendor: VENDOR_BROADCOM, + device: BCMPCI_DEV_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: PCI_CLASS_NETWORK_OTHER << 8, + class_mask: 0xffff00, + driver_data: 0, + }, + { 0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); + +/* Power Management Hooks */ +#ifdef DHD_PCIE_RUNTIMEPM +static const struct dev_pm_ops dhd_pcie_pm_ops = { + .prepare = dhdpcie_pm_prepare, + .suspend = dhdpcie_pm_suspend, + .resume = dhdpcie_pm_resume, + .complete = dhdpcie_pm_complete, +}; +#endif /* DHD_PCIE_RUNTIMEPM */ +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static const struct dev_pm_ops dhdpcie_pm_ops = { + SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL) + .suspend_noirq = dhdpcie_pm_system_suspend_noirq, + .resume_noirq = dhdpcie_pm_system_resume_noirq +}; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +static struct pci_driver dhdpcie_driver = { + node: {&dhdpcie_driver.node, &dhdpcie_driver.node}, + name: "pcieh", + id_table: dhdpcie_pci_devid, + probe: dhdpcie_pci_probe, + remove: dhdpcie_pci_remove, +#if defined (DHD_PCIE_RUNTIMEPM) || defined (DHD_PCIE_NATIVE_RUNTIMEPM) + .driver.pm = &dhd_pcie_pm_ops, +#else + suspend: dhdpcie_pci_suspend, + resume: dhdpcie_pci_resume, +#if defined(BT_OVER_PCIE) + resume_early: dhdpcie_pci_resume_early, +#endif /* BT_OVER_PCIE */ +#endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */ +}; + +int dhdpcie_init_succeeded = FALSE; + +#ifdef USE_SMMU_ARCH_MSM +static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt) +{ + struct dma_iommu_mapping *mapping; + struct device_node *root_node = NULL; + dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; + int smmu_iova_address[2]; + char *wlan_node = "android,bcmdhd_wlan"; + char *wlan_smmu_node = "wlan-smmu-iova-address"; + int atomic_ctx = 1; + int s1_bypass = 1; + int ret = 0; + + DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__)); + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (!root_node) { + WARN(1, "failed to get device node of BRCM WLAN\n"); + return -ENODEV; + } + + if (of_property_read_u32_array(root_node, wlan_smmu_node, + smmu_iova_address, 2) == 0) { + DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n", + __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1])); + smmu_info->smmu_iova_start = smmu_iova_address[0]; + smmu_info->smmu_iova_len = smmu_iova_address[1]; + } else { + printf("%s : can't get smmu iova address property\n", + __FUNCTION__); + return -ENODEV; + } + + if (smmu_info->smmu_iova_len <= 0) { + DHD_ERROR(("%s: Invalid smmu iova len %d\n", + __FUNCTION__, (int)smmu_info->smmu_iova_len)); + return -EINVAL; + } + + DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__)); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__)); + return -EINVAL; + } + + mapping = arm_iommu_create_mapping(&platform_bus_type, + smmu_info->smmu_iova_start, smmu_info->smmu_iova_len); + if (IS_ERR(mapping)) { + DHD_ERROR(("%s: create mapping failed, err = %d\n", + __FUNCTION__, ret)); + ret = PTR_ERR(mapping); + goto map_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_ATOMIC, &atomic_ctx); + if (ret) { + DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n", + __FUNCTION__, ret)); + goto set_attr_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_S1_BYPASS, &s1_bypass); + if (ret < 0) { + DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n", + __FUNCTION__, ret)); + goto set_attr_fail; + } + + ret = arm_iommu_attach_device(&pdev->dev, mapping); + if (ret) { + DHD_ERROR(("%s: attach device failed, err = %d\n", + __FUNCTION__, ret)); + goto attach_fail; + } + + smmu_info->smmu_mapping = mapping; + + return ret; + +attach_fail: +set_attr_fail: + arm_iommu_release_mapping(mapping); +map_fail: + return ret; +} + +static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt) +{ + dhdpcie_smmu_info_t *smmu_info; + + if (!smmu_cxt) { + return; + } + + smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; + if (smmu_info->smmu_mapping) { + arm_iommu_detach_device(&pdev->dev); + arm_iommu_release_mapping(smmu_info->smmu_mapping); + smmu_info->smmu_mapping = NULL; + } +} +#endif /* USE_SMMU_ARCH_MSM */ + +#ifdef FORCE_TPOWERON +static void +dhd_bus_get_tpoweron(dhd_bus_t *bus) +{ + + uint32 tpoweron_rc; + uint32 tpoweron_ep; + + tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0); + tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0); + DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n", + __FUNCTION__, tpoweron_rc, tpoweron_ep)); +} + +static void +dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron) +{ + + dhd_bus_get_tpoweron(bus); + /* Set the tpoweron */ + DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron)); + dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron); + dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron); + + dhd_bus_get_tpoweron(bus); + +} + +static bool +dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus) +{ + /* + * On Fire's reference platform, coming out of L1.2, + * there is a constant delay of 45us between CLKREQ# and stable REFCLK + * Due to this delay, with tPowerOn < 50 + * there is a chance of the refclk sense to trigger on noise. + * + * Which ever chip needs forced tPowerOn of 50us should be listed below. + */ + if (si_chipid(bus->sih) == BCM4377_CHIP_ID) { + return TRUE; + } + return FALSE; +} +#endif /* FORCE_TPOWERON */ + +#ifdef BT_OVER_PCIE +int dhd_bus_pwr_off(dhd_pub_t *dhdp, int reason) +{ + DHD_ERROR(("%s: WARNING ! function not implemented in %s\n", + __FUNCTION__, __FILE__)); + return BCME_OK; +} + +int dhd_bus_pwr_on(dhd_pub_t *dhdp, int reason) +{ + DHD_ERROR(("%s: WARNING ! function not implemented in %s\n", + __FUNCTION__, __FILE__)); + return BCME_OK; +} + +int dhd_bus_pwr_toggle(dhd_pub_t *dhdp, int reason) +{ + DHD_ERROR(("%s: WARNING ! function not implemented in %s\n", + __FUNCTION__, __FILE__)); + return BCME_OK; +} + +bool dhdpcie_is_btop_chip(struct dhd_bus *bus) +{ + DHD_ERROR(("%s: WARNING ! function not implemented in %s\n", + __FUNCTION__, __FILE__)); + return FALSE; +} + +int dhdpcie_redownload_fw(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: WARNING ! function not implemented in %s\n", + __FUNCTION__, __FILE__)); + return BCME_OK; +} +#endif /* BT_OVER_PCIE */ + +static bool +dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable) +{ + uint32 linkctrl_before; + uint32 linkctrl_after = 0; + uint8 linkctrl_asm; + char *device; + + device = (dev == bus->dev) ? "EP" : "RC"; + + linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK); + + if (enable) { + if (linkctrl_asm == PCIE_ASPM_L1_ENAB) { + DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n", + __FUNCTION__, device, linkctrl_before)); + return FALSE; + } + /* Enable only L1 ASPM (bit 1) */ + dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, + TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB)); + } else { + if (linkctrl_asm == 0) { + DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n", + __FUNCTION__, device, linkctrl_before)); + return FALSE; + } + /* Disable complete ASPM (bit 1 and bit 0) */ + dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, + TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB))); + } + + linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n", + __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"), + linkctrl_before, linkctrl_after)); + + return TRUE; +} + +static bool +dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus) +{ + uint32 rc_aspm_cap; + uint32 ep_aspm_cap; + + /* RC ASPM capability */ + rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + if (rc_aspm_cap == BCME_ERROR) { + DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__)); + return FALSE; + } + + /* EP ASPM capability */ + ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + if (ep_aspm_cap == BCME_ERROR) { + DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +bool +dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable) +{ + bool ret; + + if (!bus->rc_ep_aspm_cap) { + DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n", + __FUNCTION__, bus->rc_ep_aspm_cap)); + return FALSE; + } + + if (enable) { + /* Enable only L1 ASPM first RC then EP */ + ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable); + ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable); + } else { + /* Disable complete ASPM first EP then RC */ + ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable); + ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable); + } + + return ret; +} + +static void +dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable) +{ + uint32 l1ssctrl_before; + uint32 l1ssctrl_after = 0; + uint8 l1ss_ep; + char *device; + + device = (dev == bus->dev) ? "EP" : "RC"; + + /* Extendend Capacility Reg */ + l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK); + + if (enable) { + if (l1ss_ep == PCIE_EXT_L1SS_ENAB) { + DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n", + __FUNCTION__, device, l1ssctrl_before)); + return; + } + dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET, + TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB)); + } else { + if (l1ss_ep == 0) { + DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n", + __FUNCTION__, device, l1ssctrl_before)); + return; + } + dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET, + TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB))); + } + l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n", + __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"), + l1ssctrl_before, l1ssctrl_after)); + +} + +static bool +dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus) +{ + uint32 rc_l1ss_cap; + uint32 ep_l1ss_cap; + + /* RC Extendend Capacility */ + rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + if (rc_l1ss_cap == BCME_ERROR) { + DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__)); + return FALSE; + } + + /* EP Extendend Capacility */ + ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + if (ep_l1ss_cap == BCME_ERROR) { + DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +void +dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable) +{ + bool ret; + + if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) { + DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", + __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap)); + return; + } + + /* Disable ASPM of RC and EP */ + ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE); + + if (enable) { + /* Enable RC then EP */ + dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable); + dhd_bus_l1ss_enable_dev(bus, bus->dev, enable); + } else { + /* Disable EP then RC */ + dhd_bus_l1ss_enable_dev(bus, bus->dev, enable); + dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable); + } + + /* Enable ASPM of RC and EP only if this API disabled */ + if (ret == TRUE) { + dhd_bus_aspm_enable_rc_ep(bus, TRUE); + } +} + +void +dhd_bus_aer_config(dhd_bus_t *bus) +{ + uint32 val; + + DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__)); + val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0); + if (val != (uint32)-1) { + val &= ~CORR_ERR_AE; + dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val); + } else { + DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n", + __FUNCTION__, val)); + } + + DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__)); + val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0); + if (val != (uint32)-1) { + val &= ~CORR_ERR_AE; + dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val); + } else { + DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n", + __FUNCTION__, val)); + } +} + +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev) +{ + int ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + int msglevel = dhd_msg_level; + + printf("%s: Enter\n", __FUNCTION__); + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhd_msg_level |= DHD_RPM_VAL; + if (bus->dhd->up) + ret = dhdpcie_set_suspend_resume(bus, TRUE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + dhd_msg_level = msglevel; + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; + +} + +static int dhdpcie_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (!pch || !pch->bus) { + return 0; + } + + bus = pch->bus; + DHD_DISABLE_RUNTIME_PM(bus->dhd); + bus->chk_pm = TRUE; + + return 0; +} + +static int dhdpcie_pm_resume(struct device *dev) +{ + int ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + int msglevel = dhd_msg_level; + + printf("%s: Enter\n", __FUNCTION__); + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhd_msg_level |= DHD_RPM_VAL; + if (bus->dhd->up) + ret = dhdpcie_set_suspend_resume(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + dhd_msg_level = msglevel; + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; +} + +static void dhdpcie_pm_complete(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (!pch || !pch->bus) { + return; + } + + bus = pch->bus; + DHD_ENABLE_RUNTIME_PM(bus->dhd); + bus->chk_pm = FALSE; + + return; +} +#else +static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + uint32 i = 0; + + printf("%s: Enter\n", __FUNCTION__); + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + BCM_REFERENCE(state); + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + OSL_DELAY(1000); + /* retry till the transaction is complete */ + while (i < 100) { + OSL_DELAY(1000); + i++; + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n", + __FUNCTION__, i)); + break; + } + if (i != 100) { + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + } + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_GENERAL_UNLOCK(bus->dhd, flags); + DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, " + "dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, i, bus->dhd->dhd_bus_busy_state)); + return -EBUSY; + } + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_CFG80211_SUSPEND_RESUME + dhd_cfg80211_suspend(bus->dhd); +#endif /* DHD_CFG80211_SUSPEND_RESUME */ + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, TRUE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; +} + +#if defined(BT_OVER_PCIE) +static int dhdpcie_pci_resume_early(struct pci_dev *pdev) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + uint32 pmcsr; + + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9)) + /* On fc30 (linux ver 5.0.9), + * PMEStat of PMCSR(cfg reg) is cleared before this callback by kernel. + * So, we use SwPme of FunctionControl(enum reg) instead of PMEStat without kernel change. + */ + if (bus->sih->buscorerev >= 64) { + uint32 ftnctrl; + volatile void *regsva = (volatile void *)bus->regs; + + ftnctrl = pcie_corereg(bus->osh, regsva, + OFFSETOF(sbpcieregs_t, ftn_ctrl.control), 0, 0); + pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr)); + + DHD_ERROR(("%s(): pmcsr is 0x%x, ftnctrl is 0x%8x \r\n", + __FUNCTION__, pmcsr, ftnctrl)); + if (ftnctrl & PCIE_FTN_SWPME_MASK) { + DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__)); + } + } else +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9)) */ + { + pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr)); + + DHD_ERROR(("%s(): pmcsr is 0x%x \r\n", __FUNCTION__, pmcsr)); + if (pmcsr & PCIE_PMCSR_PMESTAT) { + DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__)); + } + } + + /* + * TODO: Add code to take adavantage of what is read from pmcsr + */ + + return ret; +} +#endif /* BT_OVER_PCIE */ + +static int dhdpcie_pci_resume(struct pci_dev *pdev) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + + printf("%s: Enter\n", __FUNCTION__); + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_CFG80211_SUSPEND_RESUME + dhd_cfg80211_resume(bus->dhd); +#endif /* DHD_CFG80211_SUSPEND_RESUME */ + return ret; +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +static int +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint) +#else +dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +{ + int ret = 0; + + ASSERT(bus && !bus->dhd->dongle_reset); + +#ifdef DHD_PCIE_RUNTIMEPM + /* if wakelock is held during suspend, return failed */ + if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { + return -EBUSY; + } + mutex_lock(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + DHD_RPM(("%s: Enter state=%d\n", __FUNCTION__, state)); + + /* When firmware is not loaded do the PCI bus */ + /* suspend/resume only */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + ret = dhdpcie_pci_suspend_resume(bus, state); +#ifdef DHD_PCIE_RUNTIMEPM + mutex_unlock(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; + } +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + ret = dhdpcie_bus_suspend(bus, state, byint); +#else + ret = dhdpcie_bus_suspend(bus, state); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT) + if (ret == BCME_OK) { + /* + * net.ipv4.tcp_limit_output_bytes is used for all ipv4 sockets + * so, returning back to original value when there is no traffic(suspend) + */ + if (state == TRUE) { + dhd_ctrl_tcp_limit_output_bytes(0); + } else { + dhd_ctrl_tcp_limit_output_bytes(1); + } + } +#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */ + DHD_RPM(("%s: Exit ret=%d\n", __FUNCTION__, ret)); + +#ifdef DHD_PCIE_RUNTIMEPM + mutex_unlock(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + + return ret; +} + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_pm_runtime_suspend(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + int ret = 0; + + if (!pch) + return -EBUSY; + + bus = pch->bus; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + if (atomic_read(&bus->dhd->block_bus)) + return -EHOSTDOWN; + + dhd_netif_stop_queue(bus); + atomic_set(&bus->dhd->block_bus, TRUE); + + if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) { + pm_runtime_mark_last_busy(dev); + ret = -EAGAIN; + } + + atomic_set(&bus->dhd->block_bus, FALSE); + dhd_bus_start_queue(bus); + + return ret; +} + +static int dhdpcie_pm_runtime_resume(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = pch->bus; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + if (atomic_read(&bus->dhd->block_bus)) + return -EHOSTDOWN; + + if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE)) + return -EAGAIN; + + return 0; +} + +static int dhdpcie_pm_system_suspend_noirq(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + int ret; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + if (!pch) + return -EBUSY; + + bus = pch->bus; + + if (atomic_read(&bus->dhd->block_bus)) + return -EHOSTDOWN; + + dhd_netif_stop_queue(bus); + atomic_set(&bus->dhd->block_bus, TRUE); + + ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE); + + if (ret) { + dhd_bus_start_queue(bus); + atomic_set(&bus->dhd->block_bus, FALSE); + } + + return ret; +} + +static int dhdpcie_pm_system_resume_noirq(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + int ret; + + if (!pch) + return -EBUSY; + + bus = pch->bus; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE); + + atomic_set(&bus->dhd->block_bus, FALSE); + dhd_bus_start_queue(bus); + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + + return ret; +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +static void +dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state) +{ + DHD_RPM(("%s: BaseAddress0(0x%x)=0x%x, " + "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x " + "PCI_BAR1_WIN(0x%x)=(0x%x)\n", + suspend_state, + PCIECFGREG_BASEADDR0, + dhd_pcie_config_read(bus, + PCIECFGREG_BASEADDR0, sizeof(uint32)), + PCIECFGREG_BASEADDR1, + dhd_pcie_config_read(bus, + PCIECFGREG_BASEADDR1, sizeof(uint32)), + PCIE_CFG_PMCSR, + dhd_pcie_config_read(bus, + PCIE_CFG_PMCSR, sizeof(uint32)), + PCI_BAR1_WIN, + dhd_pcie_config_read(bus, + PCI_BAR1_WIN, sizeof(uint32)))); +} + +static int dhdpcie_suspend_dev(struct pci_dev *dev) +{ + int ret; + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_RPM(("%s: Enter\n", __FUNCTION__)); +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \ + defined(CONFIG_SOC_EXYNOS1000) + DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__)); + exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI); +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 || + * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 || + * CONFIG_SOC_EXYNOS1000 + */ +#if defined(CONFIG_SOC_GS101) + DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__)); + exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1); +#endif /* CONFIG_SOC_GS101 */ + + dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND"); +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhd_dpc_tasklet_kill(bus->dhd); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_save_state(dev); +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch->state = pci_store_saved_state(dev); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_enable_wake(dev, PCI_D0, TRUE); + if (pci_is_enabled(dev)) + pci_disable_device(dev); + + ret = pci_set_power_state(dev, PCI_D3hot); + if (ret) { + DHD_ERROR(("%s: pci_set_power_state error %d\n", + __FUNCTION__, ret)); + } +#ifdef OEM_ANDROID +// dev->state_saved = FALSE; +#endif /* OEM_ANDROID */ + dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND"); + return ret; +} + +#ifdef DHD_WAKE_STATUS +int bcmpcie_get_total_wake(struct dhd_bus *bus) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + return pch->total_wake_count; +} + +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + unsigned long flags; + int ret; + + DHD_PKT_WAKE_LOCK(&pch->pkt_wake_lock, flags); + + ret = pch->pkt_wake; + pch->total_wake_count += flag; + pch->pkt_wake = flag; + + DHD_PKT_WAKE_UNLOCK(&pch->pkt_wake_lock, flags); + return ret; +} +#endif /* DHD_WAKE_STATUS */ + +static int dhdpcie_resume_dev(struct pci_dev *dev) +{ + int err = 0; + dhdpcie_info_t *pch = pci_get_drvdata(dev); +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pci_load_and_free_saved_state(dev, &pch->state); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_RPM(("%s: Enter\n", __FUNCTION__)); +#ifdef OEM_ANDROID +// dev->state_saved = TRUE; +#endif /* OEM_ANDROID */ + pci_restore_state(dev); + + /* Resture back current bar1 window */ + OSL_PCI_WRITE_CONFIG(pch->bus->osh, PCI_BAR1_WIN, 4, pch->bus->curr_bar1_win); + +#ifdef FORCE_TPOWERON + if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) { + dhd_bus_set_tpoweron(pch->bus, tpoweron_scale); + } +#endif /* FORCE_TPOWERON */ + err = pci_enable_device(dev); + if (err) { + printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); + goto out; + } + pci_set_master(dev); + err = pci_set_power_state(dev, PCI_D0); + if (err) { + printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); + goto out; + } + BCM_REFERENCE(pch); + dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME"); +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \ + defined(CONFIG_SOC_EXYNOS1000) + DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__)); + exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI); +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 || + * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 || + * CONFIG_SOC_EXYNOS1000 + */ +#if defined(CONFIG_SOC_GS101) + DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__)); + exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1); +#endif /* CONFIG_SOC_GS101 */ + +out: + return err; +} + +static int dhdpcie_resume_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; + + bcmerror = dhdpcie_start_host_dev(bus); + if (bcmerror < 0) { + DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", + __FUNCTION__, bcmerror)); + bus->is_linkdown = 1; +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + } + + return bcmerror; +} + +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef CONFIG_ARCH_EXYNOS + /* + * XXX : SWWLAN-82173, SWWLAN-82183 WAR for SS PCIe RC + * SS PCIe RC/EP is 1 to 1 mapping using different channel + * RC0 - LTE, RC1 - WiFi RC0-1 is working independently + */ + + if (bus->rc_dev) { + pci_save_state(bus->rc_dev); + } else { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + } +#endif /* CONFIG_ARCH_EXYNOS */ + bcmerror = dhdpcie_stop_host_dev(bus); + return bcmerror; +} + +int +dhdpcie_set_master_and_d0_pwrstate(dhd_bus_t *bus) +{ + int err; + pci_set_master(bus->dev); + err = pci_set_power_state(bus->dev, PCI_D0); + if (err) { + DHD_ERROR(("%s: pci_set_power_state error %d \n", __FUNCTION__, err)); + } + return err; +} + +uint32 +dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) +{ + uint val = -1; /* Initialise to 0xfffffff */ + if (bus->rc_dev) { + pci_read_config_dword(bus->rc_dev, offset, &val); + OSL_DELAY(100); + } else { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + } + DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val)); + return (val); +} + +/* + * Reads/ Writes the value of capability register + * from the given CAP_ID section of PCI Root Port + * + * Arguements + * @bus current dhd_bus_t pointer + * @cap Capability or Extended Capability ID to get + * @offset offset of Register to Read + * @is_ext TRUE if @cap is given for Extended Capability + * @is_write is set to TRUE to indicate write + * @val value to write + * + * Return Value + * Returns 0xffffffff on error + * on write success returns BCME_OK (0) + * on Read Success returns the value of register requested + * Note: caller shoud ensure valid capability ID and Ext. Capability ID. + */ + +uint32 +dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + int cap_ptr = 0; + uint32 ret = -1; + uint32 readval; + + if (!(pdev)) { + DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__)); + return ret; + } + + /* Find Capability offset */ + if (is_ext) { + /* removing max EXT_CAP_ID check as + * linux kernel definition's max value is not upadted yet as per spec + */ + cap_ptr = pci_find_ext_capability(pdev, cap); + + } else { + /* removing max PCI_CAP_ID_MAX check as + * pervious kernel versions dont have this definition + */ + cap_ptr = pci_find_capability(pdev, cap); + } + + /* Return if capability with given ID not found */ + if (cap_ptr == 0) { + DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n", + __FUNCTION__, cap)); + return BCME_ERROR; + } + + if (is_write) { + pci_write_config_dword(pdev, (cap_ptr + offset), writeval); + ret = BCME_OK; + + } else { + + pci_read_config_dword(pdev, (cap_ptr + offset), &readval); + ret = readval; + } + + return ret; +} + +uint32 +dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + if (!(bus->rc_dev)) { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + return BCME_ERROR; + } + + return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval); +} + +uint32 +dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + if (!(bus->dev)) { + DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval); +} + +/* API wrapper to read Root Port link capability + * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found + */ + +uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) +{ + uint32 linkcap = -1; + linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, + PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0); + linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK; + return linkcap; +} + +static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state) +{ + if (bus->coreid == ARMCA7_CORE_ID) { + if (state) { + /* Sleep */ + bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus, + PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK; + } else { + uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, + 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state; + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val); + } + } +} + +int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) +{ + int rc; + + struct pci_dev *dev = bus->dev; + + if (state) { + dhdpcie_config_save_restore_coherent(bus, state); +#if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB) + dhdpcie_pme_active(bus->osh, state); +#endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */ + rc = dhdpcie_suspend_dev(dev); + if (!rc) { + dhdpcie_suspend_host_dev(bus); + } + } else { + rc = dhdpcie_resume_host_dev(bus); + if (!rc) { + rc = dhdpcie_resume_dev(dev); + if (PCIECTO_ENAB(bus)) { + /* reinit CTO configuration + * because cfg space got reset at D3 (PERST) + */ + dhdpcie_cto_cfg_init(bus, TRUE); + } + if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { + dhdpcie_ssreset_dis_enum_rst(bus); + } +#if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB) + dhdpcie_pme_active(bus->osh, state); +#endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */ + } + dhdpcie_config_save_restore_coherent(bus, state); +#if defined(OEM_ANDROID) +#if defined(DHD_HANG_SEND_UP_TEST) + if (bus->is_linkdown || + bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL) +#else /* DHD_HANG_SEND_UP_TEST */ + if (bus->is_linkdown) +#endif /* DHD_HANG_SEND_UP_TEST */ + { + bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; + dhd_os_send_hang_message(bus->dhd); + } +#endif /* OEM_ANDROID */ + } + return rc; +} + +static int dhdpcie_device_scan(struct device *dev, void *data) +{ + struct pci_dev *pcidev; + int *cnt = data; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + pcidev = container_of(dev, struct pci_dev, dev); + GCC_DIAGNOSTIC_POP(); + + if (pcidev->vendor != 0x14e4) + return 0; + + DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); + *cnt += 1; + if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) + DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n", + pcidev->device, pcidev->driver->name)); + + return 0; +} + +int +dhdpcie_bus_register(void) +{ + int error = 0; + + if (!(error = pci_register_driver(&dhdpcie_driver))) { + bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); + if (!error) { + DHD_ERROR(("No Broadcom PCI device enumerated!\n")); +#ifdef DHD_PRELOAD + return 0; +#endif + } else if (!dhdpcie_init_succeeded) { + DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__)); + } else { + return 0; + } + + pci_unregister_driver(&dhdpcie_driver); + error = BCME_ERROR; + } + + return error; +} + +void +dhdpcie_bus_unregister(void) +{ + pci_unregister_driver(&dhdpcie_driver); +} + +int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + DHD_MUTEX_LOCK(); + + if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { + DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); + err = -ENODEV; + goto exit; + } + + printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" + "(good PCI location)\n", pdev->bus->number, + PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); + + if (dhdpcie_init_succeeded == TRUE) { + DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n", + __FUNCTION__)); + err = -ENODEV; + goto exit; + } + + if (dhdpcie_init (pdev)) { + DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); + err = -ENODEV; + goto exit; + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* + Since MSM PCIe RC dev usage conunt already incremented +2 even + before dhdpcie_pci_probe() called, then we inevitably to call + pm_runtime_put_noidle() two times to make the count start with zero. + */ + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND + /* disable async suspend */ + device_disable_async_suspend(&pdev->dev); +#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ + + DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); +exit: + DHD_MUTEX_UNLOCK(); + return err; +} + +int +dhdpcie_detach(dhdpcie_info_t *pch) +{ + if (pch) { +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (!dhd_download_fw_on_driverload) { + pci_load_and_free_saved_state(pch->dev, &pch->default_state); + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + MFREE(pch->osh, pch, sizeof(dhdpcie_info_t)); + } + return 0; +} + +void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev) +{ + osl_t *osh = NULL; + dhdpcie_info_t *pch = NULL; + dhd_bus_t *bus = NULL; + + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + DHD_MUTEX_LOCK(); + + pch = pci_get_drvdata(pdev); + bus = pch->bus; + osh = pch->osh; + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (bus) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + msm_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_EXYNOS + exynos_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_ARCH_EXYNOS */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + bus->rc_dev = NULL; + + dhdpcie_bus_release(bus); + } + + /* + * For module type driver, + * it needs to back up configuration space before rmmod + * Since original backed up configuration space won't be restored if state_saved = false + * This back up the configuration space again & state_saved = true + */ + pci_save_state(pdev); + + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); +#ifdef BCMPCIE_OOB_HOST_WAKE + /* pcie os info detach */ + MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + /* smmu info detach */ + dhdpcie_smmu_remove(pdev, pch->smmu_cxt); + MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t)); +#endif /* USE_SMMU_ARCH_MSM */ + /* pcie info detach */ + dhdpcie_detach(pch); + /* osl detach */ + osl_detach(osh); + +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ + defined(CONFIG_ARCH_APQ8084) + brcm_pcie_wake.wake_irq = NULL; + brcm_pcie_wake.data = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ + + dhdpcie_init_succeeded = FALSE; + + DHD_MUTEX_UNLOCK(); + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + + return; +} + +/* Enable Linux Msi */ +int +dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + return pci_enable_msi_range(pdev, min_vecs, max_vecs); +#else + return pci_enable_msi_block(pdev, max_vecs); +#endif +} + +/* Disable Linux Msi */ +void +dhdpcie_disable_msi(struct pci_dev *pdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + pci_free_irq_vectors(pdev); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) + pci_disable_msi(pdev); +#else + pci_disable_msi(pdev); +#endif + return; +} + +/* Request Linux irq */ +int +dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) +{ + dhd_bus_t *bus = dhdpcie_info->bus; + struct pci_dev *pdev = dhdpcie_info->bus->dev; + int host_irq_disabled; + + if (!bus->irq_registered) { + snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), + "dhdpcie:%s", pci_name(pdev)); + + if (bus->d2h_intr_method == PCIE_MSI) { + if (dhdpcie_enable_msi(pdev, 1, 1) < 0) { + DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__)); + dhdpcie_disable_msi(pdev); + bus->d2h_intr_method = PCIE_INTX; + } + } + + if (bus->d2h_intr_method == PCIE_MSI) + printf("%s: MSI enabled\n", __FUNCTION__); + else + printf("%s: INTx enabled\n", __FUNCTION__); + + if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, + dhdpcie_info->pciname, bus) < 0) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + if (bus->d2h_intr_method == PCIE_MSI) { + dhdpcie_disable_msi(pdev); + } + return -1; + } + else { + bus->irq_registered = TRUE; + } + } else { + DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); + } + + host_irq_disabled = dhdpcie_irq_disabled(bus); + if (host_irq_disabled) { + DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n", + __FUNCTION__, host_irq_disabled)); + dhdpcie_enable_irq(bus); + } + + DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); + + return 0; /* SUCCESS */ +} + +/** + * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd + */ +int +dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq) +{ + struct pci_dev *pdev = bus->dev; + + if (!pdev) { + DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + *irq = pdev->irq; + + return 0; /* SUCCESS */ +} + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRINTF_RESOURCE "0x%016llx" +#else +#define PRINTF_RESOURCE "0x%08x" +#endif + +#ifdef EXYNOS_PCIE_MODULE_PATCH +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +extern struct pci_saved_state *bcm_pcie_default_state; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#endif /* EXYNOS_MODULE_PATCH */ + +/* + +Name: osl_pci_get_resource + +Parametrs: + +1: struct pci_dev *pdev -- pci device structure +2: pci_res -- structure containing pci configuration space values + +Return value: + +int - Status (TRUE or FALSE) + +Description: +Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure. + + */ +int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) +{ + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + struct pci_dev *pdev = NULL; + pdev = dhdpcie_info->dev; +#ifdef EXYNOS_PCIE_MODULE_PATCH +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (bcm_pcie_default_state) { + pci_load_saved_state(pdev, bcm_pcie_default_state); + pci_restore_state(pdev); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#endif /* EXYNOS_MODULE_PATCH */ + + /* + * For built-in type driver, + * it can't restore configuration backup because of state_saved = false at first load time + * For module type driver, + * it couldn't remap the BAR0/BAR1 address + * without restoring configuration backup at second load, + * and remains configuration backup in pci_dev, DHD didn't remove it from the bus + * pci_restore_state() restores proper BAR0/BAR1 address + */ + pci_restore_state(pdev); + + do { + if (pci_enable_device(pdev)) { + printf("%s: Cannot enable PCI device\n", __FUNCTION__); + break; + } + pci_set_master(pdev); + bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(pdev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + goto err; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + dhdpcie_info->bar1_size = + (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size); + + if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { + DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); + break; + } +#ifdef EXYNOS_PCIE_MODULE_PATCH +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (bcm_pcie_default_state == NULL) { + pci_save_state(pdev); + bcm_pcie_default_state = pci_store_saved_state(pdev); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#endif /* EXYNOS_MODULE_PATCH */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + /* Backup PCIe configuration so as to use Wi-Fi on/off process + * in case of built in driver + */ + pci_save_state(pdev); + dhdpcie_info->default_state = pci_store_saved_state(pdev); + + if (dhdpcie_info->default_state == NULL) { + DHD_ERROR(("%s pci_store_saved_state returns NULL\n", + __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + REG_UNMAP(dhdpcie_info->tcm); + pci_disable_device(pdev); + break; + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; /* SUCCESS */ + } while (0); +err: + return -1; /* FAILURE */ +} + +int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) +{ + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + /* define it here only!! */ + if (dhdpcie_get_resource (dhdpcie_info)) { + DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__)); + break; + } + DHD_TRACE(("%s:Exit - SUCCESS \n", + __FUNCTION__)); + + return 0; /* SUCCESS */ + + } while (0); + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* FAILURE */ + +} + +void dhdpcie_dump_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *pch; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + /* BAR0 */ + DHD_RPM(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n", + __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0), + DONGLE_REG_MAP_SIZE)); + + /* BAR1 */ + DHD_RPM(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n", + __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2), + pch->bar1_size)); +} + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS) +void dhdpcie_linkdown_cb(struct_pcie_notify *noti) +{ + struct pci_dev *pdev = (struct pci_dev *)noti->user; + dhdpcie_info_t *pch = NULL; + + if (pdev) { + pch = pci_get_drvdata(pdev); + if (pch) { + dhd_bus_t *bus = pch->bus; + if (bus) { + dhd_pub_t *dhd = bus->dhd; + if (dhd) { +#ifdef CONFIG_ARCH_MSM + DHD_ERROR(("%s: Set no_cfg_restore flag\n", + __FUNCTION__)); + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#ifdef DHD_SSSR_DUMP + if (dhd->fis_triggered) { + DHD_ERROR(("%s: PCIe linkdown due to FIS, Ignore\n", + __FUNCTION__)); + } else +#endif /* DHD_SSSR_DUMP */ + { + DHD_ERROR(("%s: Event HANG send up " + "due to PCIe linkdown\n", + __FUNCTION__)); + bus->is_linkdown = 1; + dhd->hang_reason = + HANG_REASON_PCIE_LINK_DOWN_RC_DETECT; + dhd_os_send_hang_message(dhd); + } + } + } + } + } + +} +#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +int dhdpcie_init(struct pci_dev *pdev) +{ + + osl_t *osh = NULL; + dhd_bus_t *bus = NULL; + dhdpcie_info_t *dhdpcie_info = NULL; + wifi_adapter_info_t *adapter = NULL; +#ifdef BCMPCIE_OOB_HOST_WAKE + dhdpcie_os_info_t *dhdpcie_osinfo = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL; +#endif /* USE_SMMU_ARCH_MSM */ + int ret = 0; + + do { + /* osl attach */ + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__)); + break; + } + + /* initialize static buffer */ + adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number, + PCI_SLOT(pdev->devfn)); + if (adapter != NULL) { + DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name)); + adapter->bus_type = PCI_BUS; + adapter->bus_num = pdev->bus->number; + adapter->slot_num = PCI_SLOT(pdev->devfn); + adapter->pci_dev = pdev; + } else + DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); + osl_static_mem_init(osh, adapter); + + /* allocate linux spcific pcie structure here */ + if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + break; + } + bzero(dhdpcie_info, sizeof(dhdpcie_info_t)); + dhdpcie_info->osh = osh; + dhdpcie_info->dev = pdev; + +#ifdef BCMPCIE_OOB_HOST_WAKE + /* allocate OS speicific structure */ + dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t)); + if (dhdpcie_osinfo == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo; + + /* Initialize host wake IRQ */ + spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock); + /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */ + dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter, + &dhdpcie_osinfo->oob_irq_flags); + if (dhdpcie_osinfo->oob_irq_num < 0) { + DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef USE_SMMU_ARCH_MSM + /* allocate private structure for using SMMU */ + dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t)); + if (dhdpcie_smmu_info == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); + dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info; + + /* Initialize smmu structure */ + if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) { + DHD_ERROR(("%s: Failed to initialize SMMU\n", + __FUNCTION__)); + break; + } +#endif /* USE_SMMU_ARCH_MSM */ + +#ifdef DHD_WAKE_STATUS + /* Initialize pkt_wake_lock */ + spin_lock_init(&dhdpcie_info->pkt_wake_lock); +#endif /* DHD_WAKE_STATUS */ + + /* Find the PCI resources, verify the */ + /* vendor and device ID, map BAR regions and irq, update in structures */ + if (dhdpcie_scan_resource(dhdpcie_info)) { + DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__)); + + break; + } + + /* Bus initialization */ + ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev, adapter); + if (ret != BCME_OK) { + DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_info->bus = bus; + bus->bar1_size = dhdpcie_info->bar1_size; + bus->is_linkdown = 0; + bus->no_bus_init = FALSE; + bus->cto_triggered = 0; + + bus->rc_dev = NULL; + + /* Get RC Device Handle */ + if (bus->dev->bus) { + /* self member of structure pci_bus is bridge device as seen by parent */ + bus->rc_dev = bus->dev->bus->self; + if (bus->rc_dev) + DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__, + bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev)); + else + DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__)); + } + + /* if rc_dev is still NULL, try to get from vendor/device IDs */ + if (bus->rc_dev == NULL) { + bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL); + DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__, + PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev)); + } + + bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus); + bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus); + DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", + __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap)); + +#ifdef FORCE_TPOWERON + if (dhdpcie_chip_req_forced_tpoweron(bus)) { + dhd_bus_set_tpoweron(bus, tpoweron_scale); + } +#endif /* FORCE_TPOWERON */ + +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ + defined(CONFIG_ARCH_APQ8084) + brcm_pcie_wake.wake_irq = wlan_oob_irq; + brcm_pcie_wake.data = bus; +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ + +#ifdef DONGLE_ENABLE_ISOLATION + bus->dhd->dongle_isolation = TRUE; +#endif /* DONGLE_ENABLE_ISOLATION */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY; + msm_pcie_register_event(&bus->pcie_event); + bus->no_cfg_restore = FALSE; +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_EXYNOS + bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + exynos_pcie_register_event(&bus->pcie_event); +#endif /* CONFIG_ARCH_EXYNOS */ + bus->read_shm_fail = FALSE; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + bus->intr_enabled = FALSE; + dhdpcie_bus_intr_disable(bus); + + if (dhdpcie_request_irq(dhdpcie_info)) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + break; + } + } else { + bus->pollrate = 1; + DHD_INFO(("%s: PCIe interrupt function is NOT registered " + "due to polling mode\n", __FUNCTION__)); + } + +#if defined(BCM_REQUEST_FW) + if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) { + DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__)); + } + bus->nv_path = NULL; + bus->fw_path = NULL; +#endif /* BCM_REQUEST_FW */ + + /* set private data for pci_dev */ + pci_set_drvdata(pdev, dhdpcie_info); + + /* Ensure BAR1 switch feature enable if needed before FW download */ + dhdpcie_bar1_window_switch_enab(bus); + +#if defined(BCMDHD_MODULAR) && defined(INSMOD_FW_LOAD) + if (1) +#else + if (dhd_download_fw_on_driverload) +#endif + { + if (dhd_bus_start(bus->dhd)) { + DHD_ERROR(("%s: dhd_bus_start() failed\n", __FUNCTION__)); + if (!allow_delay_fwdl) + break; + } + } else { + /* Set ramdom MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } + + /* Attach to the OS network interface */ + DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); + if (dhd_attach_net(bus->dhd, TRUE)) { + DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_init_succeeded = TRUE; +#ifdef CONFIG_ARCH_MSM + sec_pcie_set_use_ep_loaded(bus->rc_dev); +#endif /* CONFIG_ARCH_MSM */ +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(MULTIPLE_SUPPLICANT) + wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif /* MULTIPLE_SUPPLICANT */ + + DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); + return 0; /* return SUCCESS */ + + } while (0); + /* reverse the initialization in order in case of error */ + + if (bus) + dhdpcie_bus_release(bus); + +#ifdef BCMPCIE_OOB_HOST_WAKE + if (dhdpcie_osinfo) { + MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef USE_SMMU_ARCH_MSM + if (dhdpcie_smmu_info) { + MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); + dhdpcie_info->smmu_cxt = NULL; + } +#endif /* USE_SMMU_ARCH_MSM */ + + if (dhdpcie_info) + dhdpcie_detach(dhdpcie_info); + pci_disable_device(pdev); + if (osh) + osl_detach(osh); + if (adapter != NULL) { + adapter->bus_type = -1; + adapter->bus_num = -1; + adapter->slot_num = -1; + } + + dhdpcie_init_succeeded = FALSE; + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* return FAILURE */ +} + +/* Free Linux irq */ +void +dhdpcie_free_irq(dhd_bus_t *bus) +{ + struct pci_dev *pdev = NULL; + + DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); + if (bus) { + pdev = bus->dev; + if (bus->irq_registered) { +#if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150) + /* clean up the affinity_hint before + * the unregistration of PCIe irq + */ + (void)irq_set_affinity_hint(pdev->irq, NULL); +#endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */ + free_irq(pdev->irq, bus); + bus->irq_registered = FALSE; + if (bus->d2h_intr_method == PCIE_MSI) { + dhdpcie_disable_msi(pdev); + } + } else { + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); + } + } + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; +} + +/* + +Name: dhdpcie_isr + +Parametrs: + +1: IN int irq -- interrupt vector +2: IN void *arg -- handle to private data structure + +Return value: + +Status (TRUE or FALSE) + +Description: +Interrupt Service routine checks for the status register, +disable interrupt and queue DPC if mail box interrupts are raised. +*/ + +irqreturn_t +dhdpcie_isr(int irq, void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bus->isr_entry_time = OSL_LOCALTIME_NS(); + if (!dhdpcie_bus_isr(bus)) { + DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__)); + } + bus->isr_exit_time = OSL_LOCALTIME_NS(); + return IRQ_HANDLED; +} + +int +dhdpcie_disable_irq_nosync(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + disable_irq_nosync(dev->irq); + return BCME_OK; +} + +int +dhdpcie_disable_irq(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + disable_irq(dev->irq); + return BCME_OK; +} + +int +dhdpcie_enable_irq(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + enable_irq(dev->irq); + return BCME_OK; +} + +int +dhdpcie_irq_disabled(dhd_bus_t *bus) +{ + struct irq_desc *desc = irq_to_desc(bus->dev->irq); + /* depth will be zero, if enabled */ + return desc->depth; +} + +#if defined(CONFIG_ARCH_EXYNOS) +int pcie_ch_num = EXYNOS_PCIE_CH_NUM; +#endif /* CONFIG_ARCH_EXYNOS */ + +int +dhdpcie_start_host_dev(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + int options = 0; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_EXYNOS + exynos_pcie_pm_resume(pcie_ch_num); +#endif /* CONFIG_ARCH_EXYNOS */ +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->no_cfg_restore) { + options = MSM_PCIE_CONFIG_NO_CFG_RESTORE; + } + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, options); + if (bus->no_cfg_restore && !ret) { + msm_pcie_recover_config(bus->dev); + bus->no_cfg_restore = 0; + } +#else + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, 0); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_TEGRA + ret = tegra_pcie_pm_resume(); +#endif /* CONFIG_ARCH_TEGRA */ + + if (ret) { + DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__)); + goto done; + } + +done: + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_stop_host_dev(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + int options = 0; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_EXYNOS + exynos_pcie_pm_suspend(pcie_ch_num); +#endif /* CONFIG_ARCH_EXYNOS */ +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->no_cfg_restore) { + options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; + } + + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, options); +#else + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, 0); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_TEGRA + ret = tegra_pcie_pm_suspend(); +#endif /* CONFIG_ARCH_TEGRA */ + if (ret) { + DHD_ERROR(("Failed to stop PCIe link\n")); + goto done; + } +done: + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_disable_device(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + + if (pci_is_enabled(bus->dev)) + pci_disable_device(bus->dev); + + return 0; +} + +int +dhdpcie_enable_device(dhd_bus_t *bus) +{ + int ret = BCME_ERROR; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890) + /* Updated with pci_load_and_free_saved_state to compatible + * with Kernel version 3.14.0 to 3.18.41. + */ + pci_load_and_free_saved_state(bus->dev, &pch->default_state); + pch->default_state = pci_store_saved_state(bus->dev); +#else + pci_load_saved_state(bus->dev, pch->default_state); +#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */ + + /* Check if Device ID is valid */ + if (bus->dev->state_saved) { + uint32 vid, saved_vid; + pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid); + saved_vid = bus->dev->saved_config_space[PCI_CFG_VID]; + if (vid != saved_vid) { + DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) " + "Skip the bus init\n", __FUNCTION__, vid, saved_vid)); + bus->no_bus_init = TRUE; + /* Check if the PCIe link is down */ + if (vid == (uint32)-1) { + bus->is_linkdown = 1; +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = TRUE; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + } + return BCME_ERROR; + } + } + + pci_restore_state(bus->dev); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ + + ret = pci_enable_device(bus->dev); + if (ret) { + pci_disable_device(bus->dev); + } else { + pci_set_master(bus->dev); + } + + return ret; +} + +int +dhdpcie_alloc_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + + do { + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + break; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + break; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + break; + } + + bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(bus->dev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + break; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + if (!dhdpcie_info->regs) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + break; + } + + bus->regs = dhdpcie_info->regs; + dhdpcie_info->bar1_size = + (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size); + if (!dhdpcie_info->tcm) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + break; + } + + bus->tcm = dhdpcie_info->tcm; + bus->bar1_size = dhdpcie_info->bar1_size; + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; + } while (0); + + return BCME_ERROR; +} + +void +dhdpcie_free_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return; + } + + if (bus->regs) { + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + } + + if (bus->tcm) { + REG_UNMAP(dhdpcie_info->tcm); + bus->tcm = NULL; + } +} + +int +dhdpcie_bus_request_irq(struct dhd_bus *bus) +{ + dhdpcie_info_t *dhdpcie_info; + int ret = 0; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + bus->intr_enabled = FALSE; + dhdpcie_bus_intr_disable(bus); + ret = dhdpcie_request_irq(dhdpcie_info); + if (ret) { + DHD_ERROR(("%s: request_irq() failed, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } + } + + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +#ifdef CONFIG_BCMDHD_GET_OOB_STATE +extern int dhd_get_wlan_oob_gpio(void); +#endif /* CONFIG_BCMDHD_GET_OOB_STATE */ + +int dhdpcie_get_oob_irq_level(void) +{ + int gpio_level; + +#ifdef CONFIG_BCMDHD_GET_OOB_STATE + gpio_level = dhd_get_wlan_oob_gpio(); +#else + gpio_level = BCME_UNSUPPORTED; +#endif /* CONFIG_BCMDHD_GET_OOB_STATE */ + return gpio_level; +} + +int dhdpcie_get_oob_irq_status(struct dhd_bus *bus) +{ + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return 0; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return 0; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return 0; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + + return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0; +} + +int dhdpcie_get_oob_irq_num(struct dhd_bus *bus) +{ + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return 0; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return 0; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return 0; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + + return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0; +} + +void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) +{ + unsigned long flags; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + DHD_OOB_IRQ_LOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags); + if ((dhdpcie_osinfo->oob_irq_enabled != enable) && + (dhdpcie_osinfo->oob_irq_num > 0)) { + if (enable) { + enable_irq(dhdpcie_osinfo->oob_irq_num); + bus->oob_intr_enable_count++; + bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS(); + } else { + disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); + bus->oob_intr_disable_count++; + bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS(); + } + dhdpcie_osinfo->oob_irq_enabled = enable; + } + DHD_OOB_IRQ_UNLOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags); +} + +#if defined(DHD_USE_SPIN_LOCK_BH) && !defined(DHD_USE_PCIE_OOB_THREADED_IRQ) +#error "Cannot enable DHD_USE_SPIN_LOCK_BH without enabling DHD_USE_PCIE_OOB_THREADED_IRQ" +#endif /* DHD_USE_SPIN_LOCK_BH && !DHD_USE_PCIE_OOB_THREADED_IRQ */ + +#ifdef DHD_USE_PCIE_OOB_THREADED_IRQ +static irqreturn_t wlan_oob_irq_isr(int irq, void *data) +{ + dhd_bus_t *bus = (dhd_bus_t *)data; + DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__)); + bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS(); + return IRQ_WAKE_THREAD; +} +#endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */ + +static irqreturn_t wlan_oob_irq(int irq, void *data) +{ + dhd_bus_t *bus; + bus = (dhd_bus_t *)data; + dhdpcie_oob_intr_set(bus, FALSE); +#ifdef DHD_USE_PCIE_OOB_THREADED_IRQ + DHD_TRACE(("%s: IRQ Thread\n", __FUNCTION__)); + bus->last_oob_irq_thr_time = OSL_LOCALTIME_NS(); +#else + DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__)); + bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS(); +#endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */ + + if (bus->dhd->up == 0) { + DHD_ERROR(("%s: ########### IRQ during dhd pub up is 0 ############\n", + __FUNCTION__)); + } + + bus->oob_intr_count++; +#ifdef DHD_WAKE_STATUS +#ifdef DHD_PCIE_RUNTIMEPM + /* This condition is for avoiding counting of wake up from Runtime PM */ + if (bus->chk_pm) +#endif /* DHD_PCIE_RUNTIMPM */ + { + bcmpcie_set_get_wake(bus, 1); + } +#endif /* DHD_WAKE_STATUS */ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); +#endif /* DHD_PCIE_RUNTIMPM */ +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + dhd_bus_wakeup_work(bus->dhd); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + /* Hold wakelock if bus_low_power_state is + * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED + */ + if (bus->dhd->up && DHD_CHK_BUS_IN_LPS(bus)) { + DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); + } + return IRQ_HANDLED; +} + +int dhdpcie_oob_intr_register(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } + + if (dhdpcie_osinfo->oob_irq_num > 0) { + printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__, + (int)dhdpcie_osinfo->oob_irq_num, + (int)dhdpcie_osinfo->oob_irq_flags); +#ifdef DHD_USE_PCIE_OOB_THREADED_IRQ + err = request_threaded_irq(dhdpcie_osinfo->oob_irq_num, + wlan_oob_irq_isr, wlan_oob_irq, + dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", + bus); +#else + err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq, + dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", + bus); +#endif /* DHD_USE_THREADED_IRQ_PCIE_OOB */ + if (err) { + DHD_ERROR(("%s: request_irq failed with %d\n", + __FUNCTION__, err)); + return err; + } +#if defined(DISABLE_WOWLAN) + printf("%s: disable_irq_wake\n", __FUNCTION__); + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; +#else + printf("%s: enable_irq_wake\n", __FUNCTION__); + err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; + } else + printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err); +#endif + dhdpcie_osinfo->oob_irq_enabled = TRUE; + } + + dhdpcie_osinfo->oob_irq_registered = TRUE; + + return 0; +} + +void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (!dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (dhdpcie_osinfo->oob_irq_num > 0) { + if (dhdpcie_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; + } + } + if (dhdpcie_osinfo->oob_irq_enabled) { + disable_irq(dhdpcie_osinfo->oob_irq_num); + dhdpcie_osinfo->oob_irq_enabled = FALSE; + } + free_irq(dhdpcie_osinfo->oob_irq_num, bus); + } + dhdpcie_osinfo->oob_irq_registered = FALSE; +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef PCIE_OOB +void dhdpcie_oob_init(dhd_bus_t *bus) +{ + /* XXX this should be passed in as a command line parameter */ + gpio_handle_val = get_handle(OOB_PORT); + if (gpio_handle_val < 0) + { + DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__)); + ASSERT(FALSE); + } + + gpio_direction = 0; + ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG); + + /* Note BT core is also enabled here */ + gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + gpio_write_port(gpio_handle_val, gpio_port); + + gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG); + + bus->oob_enabled = TRUE; + bus->oob_presuspend = FALSE; + + /* drive the Device_Wake GPIO low on startup */ + bus->device_wake_state = TRUE; + dhd_bus_set_device_wake(bus, FALSE); + dhd_bus_doorbell_timeout_reset(bus); + +} + +void +dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val) +{ + DHD_INFO(("Set Device_Wake to %d\n", val)); + if (val) + { + gpio_port = gpio_port | (1 << BIT_BT_REG_ON); + gpio_write_port(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON)); + gpio_write_port(gpio_handle_val, gpio_port); + } +} + +int +dhd_oob_get_bt_reg_on(struct dhd_bus *bus) +{ + int ret; + uint8 val; + ret = gpio_read_port(gpio_handle_val, &val); + + if (ret < 0) { + /* XXX handle error properly */ + DHD_ERROR(("gpio_read_port returns %d\n", ret)); + return ret; + } + + if (val & (1 << BIT_BT_REG_ON)) + { + ret = 1; + } else { + ret = 0; + } + + return ret; +} + +int +dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->device_wake_state != val) + { + DHD_INFO(("Set Device_Wake to %d\n", val)); + + if (bus->oob_enabled && !bus->oob_presuspend) + { + if (val) + { + gpio_port = gpio_port | (1 << DEVICE_WAKE); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE)); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } + } + + bus->device_wake_state = val; + } + return BCME_OK; +} + +INLINE void +dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val) +{ + /* TODO: Currently Inband implementation of Device_Wake is not supported, + * so this function is left empty later this can be used to support the same. + */ +} +#endif /* PCIE_OOB */ + +#ifdef DHD_PCIE_RUNTIMEPM +bool dhd_runtimepm_state(dhd_pub_t *dhd) +{ + dhd_bus_t *bus; + unsigned long flags; + bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + bus->idlecount++; + + DHD_TRACE(("%s : Enter \n", __FUNCTION__)); + + if (dhd_query_bus_erros(dhd)) { + /* Becasue bus_error/dongle_trap ... etc, + * driver don't allow enter suspend, return FALSE + */ + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; + } + + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + bus->idlecount = 0; + if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) && + !DHD_CHECK_CFG_IN_PROGRESS(dhd) && !dhd_os_check_wakelock_all(bus->dhd)) { + DHD_RPM(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", + __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); + bus->bus_wake = 0; + DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd); + bus->runtime_resume_done = FALSE; + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + /* RPM suspend is failed, return FALSE then re-trying */ + if (dhdpcie_set_suspend_resume(bus, TRUE)) { + DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd); + dhd_os_busbusy_wake(bus->dhd); + bus->runtime_resume_done = TRUE; + /* It can make stuck NET TX Queue without below */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + if (bus->dhd->rx_pending_due_to_rpm) { + /* Reschedule tasklet to process Rx frames */ + DHD_ERROR(("%s: Schedule DPC to process pending" + " Rx packets\n", __FUNCTION__)); + /* irq will be enabled at the end of dpc */ + dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, 0); + } else { + /* enabling host irq deferred from system suspend */ + if (dhdpcie_irq_disabled(bus)) { + dhdpcie_enable_irq(bus); + /* increasing intrrupt count when it enabled */ + bus->resume_intr_enable_count++; + } + } + smp_wmb(); + wake_up(&bus->rpm_queue); + return FALSE; + } + + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd); + DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd); + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + wait_event(bus->rpm_queue, bus->bus_wake); + + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd); + DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd); + DHD_GENERAL_UNLOCK(dhd, flags); + + dhdpcie_set_suspend_resume(bus, FALSE); + + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd); + dhd_os_busbusy_wake(bus->dhd); + /* Inform the wake up context that Resume is over */ + bus->runtime_resume_done = TRUE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (bus->dhd->rx_pending_due_to_rpm) { + /* Reschedule tasklet to process Rx frames */ + DHD_ERROR(("%s: Schedule DPC to process pending Rx packets\n", + __FUNCTION__)); + bus->rpm_sched_dpc_time = OSL_LOCALTIME_NS(); + dhd_sched_dpc(bus->dhd); + } + + /* enabling host irq deferred from system suspend */ + if (dhdpcie_irq_disabled(bus)) { + dhdpcie_enable_irq(bus); + /* increasing intrrupt count when it enabled */ + bus->resume_intr_enable_count++; + } + + smp_wmb(); + wake_up(&bus->rpm_queue); + DHD_RPM(("%s : runtime resume ended \n", __FUNCTION__)); + return TRUE; + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + /* Since one of the contexts are busy (TX, IOVAR or RX) + * we should not suspend + */ + DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, dhd->dhd_bus_busy_state)); + return FALSE; + } + } + + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; +} /* dhd_runtimepm_state */ + +/* + * dhd_runtime_bus_wake + * TRUE - related with runtime pm context + * FALSE - It isn't invloved in runtime pm context + */ +bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) +{ + unsigned long flags; + bus->idlecount = 0; + DHD_TRACE(("%s : enter\n", __FUNCTION__)); + if (bus->dhd->up == FALSE) { + DHD_INFO(("%s : dhd is not up\n", __FUNCTION__)); + return FALSE; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) { + /* Wake up RPM state thread if it is suspend in progress or suspended */ + if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) || + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) { + bus->bus_wake = 1; + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (dhd_msg_level & DHD_RPM_VAL) + DHD_ERROR_RLMT(("%s: Runtime Resume is called in %pf\n", __FUNCTION__, func_addr)); + smp_wmb(); + wake_up(&bus->rpm_queue); + /* No need to wake up the RPM state thread */ + } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) { + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + + /* If wait is TRUE, function with wait = TRUE will be wait in here */ + if (wait) { + if (!wait_event_timeout(bus->rpm_queue, bus->runtime_resume_done, + msecs_to_jiffies(RPM_WAKE_UP_TIMEOUT))) { + DHD_ERROR(("%s: RPM_WAKE_UP_TIMEOUT error\n", __FUNCTION__)); + return FALSE; + } + } else { + DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__)); + } + /* If it is called from RPM context, it returns TRUE */ + return TRUE; + } + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return FALSE; +} + +bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr) +{ + dhd_bus_t *bus = dhdp->bus; + return dhd_runtime_bus_wake(bus, wait, func_addr); +} + +void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bus->idletime = 0; +} + +bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->runtime_resume_done; +} +#endif /* DHD_PCIE_RUNTIMEPM */ + +struct device * dhd_bus_to_dev(dhd_bus_t *bus) +{ + struct pci_dev *pdev; + pdev = bus->dev; + + if (pdev) + return &pdev->dev; + else + return NULL; +} + +#ifdef DHD_FW_COREDUMP +int +dhd_dongle_mem_dump(void) +{ + if (!g_dhd_bus) { + DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + dhd_bus_dump_console_buffer(g_dhd_bus); + dhd_prot_debug_info_print(g_dhd_bus->dhd); + + g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; + g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + + dhd_bus_mem_dump(g_dhd_bus->dhd); + return 0; +} +EXPORT_SYMBOL(dhd_dongle_mem_dump); +#endif /* DHD_FW_COREDUMP */ + +#ifdef CONFIG_ARCH_MSM +void +dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t *dhdp, bool up) +{ + sec_pcie_set_ep_driver_loaded(dhdp->bus->rc_dev, up); +} +#endif /* CONFIG_ARCH_MSM */ + +bool +dhd_bus_check_driver_up(void) +{ + dhd_bus_t *bus; + dhd_pub_t *dhdp; + bool isup = FALSE; + + bus = (dhd_bus_t *)g_dhd_bus; + if (!bus) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return isup; + } + + dhdp = bus->dhd; + if (dhdp) { + isup = dhdp->up; + } + + return isup; +} +EXPORT_SYMBOL(dhd_bus_check_driver_up); diff --git a/bcmdhd.101.10.361.x/dhd_pktlog.c b/bcmdhd.101.10.361.x/dhd_pktlog.c new file mode 100755 index 0000000..0d57344 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pktlog.c @@ -0,0 +1,1684 @@ +/* + * DHD debugability packet logging support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef DHD_COMPACT_PKT_LOG +#include +#include +#include +#include +#include +#include +#include <802.11.h> +#include +#include +#include +#include +#include +#include +#endif /* DHD_COMPACT_PKT_LOG */ + +#ifdef DHD_PKT_LOGGING +#ifndef strtoul +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#endif /* strtoul */ +extern int wl_pattern_atoh(char *src, char *dst); +extern int pattern_atoh_len(char *src, char *dst, int len); +extern wifi_tx_packet_fate __dhd_dbg_map_tx_status_to_pkt_fate(uint16 status); + +#ifdef DHD_COMPACT_PKT_LOG +#define CPKT_LOG_BITS_PER_BYTE 8 + +#define CPKT_LOG_BIT_LEN_TYPE 4 + +#define CPKT_LOG_BIT_OFFSET_TS 0 +#define CPKT_LOG_BIT_OFFSET_DIR 5 +#define CPKT_LOG_BIT_OFFSET_TYPE 6 +#define CPKT_LOG_BIT_OFFSET_SUBTYPE 10 +#define CPKT_LOG_BIT_OFFSET_PKT_FATE 18 + +#define CPKT_LOG_BIT_MASK_TS 0x1f +#define CPKT_LOG_BIT_MASK_DIR 0x01 +#define CPKT_LOG_BIT_MASK_TYPE 0x0f +#define CPKT_LOG_BIT_MASK_SUBTYPE 0xff +#define CPKT_LOG_BIT_MASK_PKT_FATE 0x0f + +#define CPKT_LOG_DNS_PORT_CLIENT 53 +#define CPKT_LOG_MDNS_PORT_CLIENT 5353 + +#define CPKT_LOG_TYPE_DNS 0x0 +#define CPKT_LOG_TYPE_ARP 0x1 +#define CPKT_LOG_TYPE_ICMP_REQ 0x2 +#define CPKT_LOG_TYPE_ICMP_RES 0x3 +#define CPKT_LOG_TYPE_ICMP_UNREACHABLE 0x4 +#define CPKT_LOG_TYPE_DHCP 0x5 +#define CPKT_LOG_TYPE_802_1X 0x6 +#define CPKT_LOG_TYPE_ICMPv6 0x7 +#define CPKT_LOG_TYPE_OTHERS 0xf + +#define CPKT_LOG_802_1X_SUBTYPE_IDENTITY 0x0 +#define CPKT_LOG_802_1X_SUBTYPE_TLS 0x1 +#define CPKT_LOG_802_1X_SUBTYPE_TTLS 0x2 +#define CPKT_LOG_802_1X_SUBTYPE_PEAP 0x3 +#define CPKT_LOG_802_1X_SUBTYPE_FAST 0x4 +#define CPKT_LOG_802_1X_SUBTYPE_LEAP 0x5 +#define CPKT_LOG_802_1X_SUBTYPE_PWD 0x6 +#define CPKT_LOG_802_1X_SUBTYPE_SIM 0x7 +#define CPKT_LOG_802_1X_SUBTYPE_AKA 0x8 +#define CPKT_LOG_802_1X_SUBTYPE_AKAP 0x9 +#define CPKT_LOG_802_1X_SUBTYPE_SUCCESS 0xA +#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M1 0xB +#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M2 0xC +#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M3 0xD +#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M4 0xE +#define CPKT_LOG_802_1X_SUBTYPE_OTHERS 0xF + +#define CPKT_LOG_DHCP_MAGIC_COOKIE_LEN 4 + +#define CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE 3 +#define CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE_IPV4_OFFSET 4 + +typedef struct dhd_cpkt_log_ts_node { + struct rb_node rb; + + uint64 ts_diff; /* key, usec */ + int idx; +} dhd_cpkt_log_ts_node_t; + +/* Compact Packet Log Timestamp values, unit: uSec */ +const uint64 dhd_cpkt_log_tt_idx[] = { + 10000, 50000, 100000, 150000, 300000, 500000, 750000, 1000000, 3000000, 5000000, 7500000, + 10000000, 12500000, 15000000, 17500000, 20000000, 22500000, 25000000, 27500000, 30000000, + 32500000, 35000000, 37500000, 40000000, 50000000, 75000000, 150000000, 300000000, 400000000, + 500000000, 600000000 +}; +#define CPKT_LOG_TT_IDX_ARR_SZ ARRAYSIZE(dhd_cpkt_log_tt_idx) + +static int dhd_cpkt_log_init_tt(dhd_pub_t *dhdp); +static void dhd_cpkt_log_deinit_tt(dhd_pub_t *dhdp); +#endif /* DHD_COMPACT_PKT_LOG */ + +int +dhd_os_attach_pktlog(dhd_pub_t *dhdp) +{ + dhd_pktlog_t *pktlog; + + if (!dhdp) { + DHD_ERROR(("%s(): dhdp is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + pktlog = (dhd_pktlog_t *)MALLOCZ(dhdp->osh, sizeof(dhd_pktlog_t)); + if (unlikely(!pktlog)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_pktlog_t\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdp->pktlog = pktlog; + pktlog->dhdp = dhdp; + + OSL_ATOMIC_INIT(dhdp->osh, &pktlog->pktlog_status); + + /* pktlog ring */ + dhdp->pktlog->pktlog_ring = dhd_pktlog_ring_init(dhdp, MIN_PKTLOG_LEN); + dhdp->pktlog->pktlog_filter = dhd_pktlog_filter_init(MAX_DHD_PKTLOG_FILTER_LEN); +#ifdef DHD_COMPACT_PKT_LOG + dhd_cpkt_log_init_tt(dhdp); +#endif + + DHD_ERROR(("%s(): dhd_os_attach_pktlog attach\n", __FUNCTION__)); + + return BCME_OK; +} + +int +dhd_os_detach_pktlog(dhd_pub_t *dhdp) +{ + if (!dhdp || !dhdp->pktlog) { + DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n", + __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL))); + return -EINVAL; + } + + dhd_pktlog_ring_deinit(dhdp, dhdp->pktlog->pktlog_ring); + dhd_pktlog_filter_deinit(dhdp->pktlog->pktlog_filter); +#ifdef DHD_COMPACT_PKT_LOG + dhd_cpkt_log_deinit_tt(dhdp); +#endif /* DHD_COMPACT_PKT_LOG */ + + DHD_ERROR(("%s(): dhd_os_attach_pktlog detach\n", __FUNCTION__)); + + MFREE(dhdp->osh, dhdp->pktlog, sizeof(dhd_pktlog_t)); + + return BCME_OK; +} + +dhd_pktlog_ring_t* +dhd_pktlog_ring_init(dhd_pub_t *dhdp, int size) +{ + dhd_pktlog_ring_t *ring; + int i = 0; + + if (!dhdp) { + DHD_ERROR(("%s(): dhdp is NULL\n", __FUNCTION__)); + return NULL; + } + + ring = (dhd_pktlog_ring_t *)MALLOCZ(dhdp->osh, sizeof(dhd_pktlog_ring_t)); + if (unlikely(!ring)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_pktlog_ring_t\n", __FUNCTION__)); + goto fail; + } + + dll_init(&ring->ring_info_head); + dll_init(&ring->ring_info_free); + + ring->ring_info_mem = (dhd_pktlog_ring_info_t *)MALLOCZ(dhdp->osh, + sizeof(dhd_pktlog_ring_info_t) * size); + if (unlikely(!ring->ring_info_mem)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_pktlog_ring_info_t\n", __FUNCTION__)); + goto fail; + } + + /* initialize free ring_info linked list */ + for (i = 0; i < size; i++) { + dll_append(&ring->ring_info_free, (dll_t *)&ring->ring_info_mem[i].p_info); + } + + OSL_ATOMIC_SET(dhdp->osh, &ring->start, TRUE); + ring->pktlog_minmize = FALSE; + ring->pktlog_len = size; + ring->pktcount = 0; + ring->dhdp = dhdp; + ring->pktlog_ring_lock = osl_spin_lock_init(dhdp->osh); + + DHD_ERROR(("%s(): pktlog ring init success\n", __FUNCTION__)); + + return ring; +fail: + if (ring) { + MFREE(dhdp->osh, ring, sizeof(dhd_pktlog_ring_t)); + } + + return NULL; +} + +/* Maximum wait counts */ +#define DHD_PKTLOG_WAIT_MAXCOUNT 1000 +int +dhd_pktlog_ring_deinit(dhd_pub_t *dhdp, dhd_pktlog_ring_t *ring) +{ + int ret = BCME_OK; + dhd_pktlog_ring_info_t *ring_info; + dll_t *item, *next_p; + int waitcounts = 0; + + if (!ring) { + DHD_ERROR(("%s(): ring is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + if (!ring->dhdp) { + DHD_ERROR(("%s(): dhdp is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + /* stop pkt log */ + OSL_ATOMIC_SET(dhdp->osh, &ring->start, FALSE); + + /* waiting TX/RX/TXS context is done, max timeout 1 second */ + while ((waitcounts++ < DHD_PKTLOG_WAIT_MAXCOUNT)) { + if (!OSL_ATOMIC_READ(dhdp->osh, &dhdp->pktlog->pktlog_status)) + break; + OSL_SLEEP(1); + } + + if (waitcounts >= DHD_PKTLOG_WAIT_MAXCOUNT) { + DHD_ERROR(("%s(): pktlog wait timeout pktlog_status : 0x%x \n", + __FUNCTION__, + OSL_ATOMIC_READ(dhdp->osh, &dhdp->pktlog->pktlog_status))); + ASSERT(0); + return -EINVAL; + } + + /* free ring_info->info.pkt */ + for (item = dll_head_p(&ring->ring_info_head); !dll_end(&ring->ring_info_head, item); + item = next_p) { + next_p = dll_next_p(item); + + ring_info = (dhd_pktlog_ring_info_t *)item; + + if (ring_info->info.pkt) { + PKTFREE(ring->dhdp->osh, ring_info->info.pkt, TRUE); + DHD_PKT_LOG(("%s(): pkt free pos %p\n", + __FUNCTION__, ring_info->info.pkt)); + } + } + + if (ring->ring_info_mem) { + MFREE(ring->dhdp->osh, ring->ring_info_mem, + sizeof(dhd_pktlog_ring_info_t) * ring->pktlog_len); + } + + if (ring->pktlog_ring_lock) { + osl_spin_lock_deinit(ring->dhdp->osh, ring->pktlog_ring_lock); + } + + MFREE(dhdp->osh, ring, sizeof(dhd_pktlog_ring_t)); + + DHD_ERROR(("%s(): pktlog ring deinit\n", __FUNCTION__)); + + return ret; +} + +/* + * dhd_pktlog_ring_add_pkts : add filtered packets into pktlog ring + * pktid : incase of rx, pktid is not used (pass DHD_INVALID_PKID) + * direction : 1 - TX / 0 - RX / 2 - RX Wakeup Packet + */ +int +dhd_pktlog_ring_add_pkts(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid, uint32 direction) +{ + dhd_pktlog_ring_info_t *pkts; + dhd_pktlog_ring_t *pktlog_ring; + dhd_pktlog_filter_t *pktlog_filter; + u64 ts_nsec; + uint32 pktlog_case = 0; + unsigned long rem_nsec; + unsigned long flags = 0; + + /* + * dhdp, dhdp->pktlog, dhd->pktlog_ring, pktlog_ring->start + * are validated from the DHD_PKTLOG_TX macro + */ + + pktlog_ring = dhdp->pktlog->pktlog_ring; + pktlog_filter = dhdp->pktlog->pktlog_filter; + + if (direction == PKT_TX) { + pktlog_case = PKTLOG_TXPKT_CASE; + } else if ((direction == PKT_RX) || (direction == PKT_WAKERX)) { + pktlog_case = PKTLOG_RXPKT_CASE; + } + + if ((direction != PKT_WAKERX) && + dhd_pktlog_filter_matched(pktlog_filter, pktdata, pktlog_case) + == FALSE) { + return BCME_OK; + } + + if (direction == PKT_TX && pktid == DHD_INVALID_PKTID) { + DHD_ERROR(("%s : Invalid PKTID \n", __FUNCTION__)); + return BCME_ERROR; + } + + /* get free ring_info and insert to ring_info_head */ + DHD_PKT_LOG_LOCK(pktlog_ring->pktlog_ring_lock, flags); + /* if free_list is empty, use the oldest ring_info */ + if (dll_empty(&pktlog_ring->ring_info_free)) { + pkts = (dhd_pktlog_ring_info_t *)dll_head_p(&pktlog_ring->ring_info_head); + dll_delete((dll_t *)pkts); + /* free the oldest packet */ + PKTFREE(pktlog_ring->dhdp->osh, pkts->info.pkt, TRUE); + pktlog_ring->pktcount--; + } else { + pkts = (dhd_pktlog_ring_info_t *)dll_tail_p(&pktlog_ring->ring_info_free); + dll_delete((dll_t *)pkts); + } + + /* Update packet information */ + ts_nsec = local_clock(); + rem_nsec = do_div(ts_nsec, NSEC_PER_SEC); + + pkts->info.pkt = PKTDUP(dhdp->osh, pkt); + pkts->info.pkt_len = PKTLEN(dhdp->osh, pkt); + pkts->info.driver_ts_sec = (uint32)ts_nsec; + pkts->info.driver_ts_usec = (uint32)(rem_nsec/NSEC_PER_USEC); + pkts->info.firmware_ts = 0U; + pkts->info.payload_type = FRAME_TYPE_ETHERNET_II; + pkts->info.direction = direction; + + if (direction == PKT_TX) { + pkts->info.pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + pkts->tx_fate = TX_PKT_FATE_DRV_QUEUED; + } else if (direction == PKT_RX) { + pkts->info.pkt_hash = 0U; + pkts->rx_fate = RX_PKT_FATE_SUCCESS; + } else if (direction == PKT_WAKERX) { + pkts->info.pkt_hash = 0U; + pkts->rx_fate = RX_PKT_FATE_WAKE_PKT; + } + + DHD_PKT_LOG(("%s(): pkt hash %d\n", __FUNCTION__, pkts->info.pkt_hash)); + DHD_PKT_LOG(("%s(): sec %d usec %d\n", __FUNCTION__, + pkts->info.driver_ts_sec, pkts->info.driver_ts_usec)); + + /* insert tx_pkts to the pktlog_ring->ring_info_head */ + dll_append(&pktlog_ring->ring_info_head, (dll_t *)pkts); + pktlog_ring->pktcount++; + DHD_PKT_LOG_UNLOCK(pktlog_ring->pktlog_ring_lock, flags); + return BCME_OK; +} + +int +dhd_pktlog_ring_tx_status(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid, + uint16 status) +{ + dhd_pktlog_ring_info_t *tx_pkt; + wifi_tx_packet_fate pkt_fate; + uint32 pkt_hash, temp_hash; + dhd_pktlog_ring_t *pktlog_ring; + dhd_pktlog_filter_t *pktlog_filter; + dll_t *item_p, *next_p; + unsigned long flags = 0; + +#ifdef BDC + struct bdc_header *h; + BCM_REFERENCE(h); +#endif /* BDC */ + /* + * dhdp, dhdp->pktlog, dhd->pktlog_ring, pktlog_ring->start + * are validated from the DHD_PKTLOG_TXS macro + */ + + pktlog_ring = dhdp->pktlog->pktlog_ring; + pktlog_filter = dhdp->pktlog->pktlog_filter; + + if (dhd_pktlog_filter_matched(pktlog_filter, pktdata, + PKTLOG_TXSTATUS_CASE) == FALSE) { + return BCME_OK; + } + + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status); + + /* find the sent tx packet and adding pkt_fate info */ + DHD_PKT_LOG_LOCK(pktlog_ring->pktlog_ring_lock, flags); + /* Inverse traverse from the last packets */ + for (item_p = dll_tail_p(&pktlog_ring->ring_info_head); + !dll_end(&pktlog_ring->ring_info_head, item_p); + item_p = next_p) + { + if (dll_empty(item_p)) { + break; + } + next_p = dll_prev_p(item_p); + tx_pkt = (dhd_pktlog_ring_info_t *)item_p; + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->tx_fate = pkt_fate; +#ifdef BDC + h = (struct bdc_header *)PKTDATA(dhdp->osh, tx_pkt->info.pkt); + PKTPULL(dhdp->osh, tx_pkt->info.pkt, BDC_HEADER_LEN); + PKTPULL(dhdp->osh, tx_pkt->info.pkt, (h->dataOffset << DHD_WORD_TO_LEN_SHIFT)); +#endif /* BDC */ + DHD_PKT_LOG(("%s(): Found pkt hash in prev pos\n", __FUNCTION__)); + break; + } + } + DHD_PKT_LOG_UNLOCK(pktlog_ring->pktlog_ring_lock, flags); + return BCME_OK; +} + +dhd_pktlog_filter_t* +dhd_pktlog_filter_init(int size) +{ + int i; + gfp_t kflags; + uint32 alloc_len; + dhd_pktlog_filter_t *filter; + dhd_pktlog_filter_info_t *filter_info = NULL; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* allocate and initialze pktmon filter */ + alloc_len = sizeof(dhd_pktlog_filter_t); + filter = (dhd_pktlog_filter_t *)kzalloc(alloc_len, kflags); + if (unlikely(!filter)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_pktlog_filter_t\n", __FUNCTION__)); + goto fail; + } + + alloc_len = (sizeof(dhd_pktlog_filter_info_t) * size); + filter_info = (dhd_pktlog_filter_info_t *)kzalloc(alloc_len, kflags); + if (unlikely(!filter_info)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_pktlog_filter_info_t\n", __FUNCTION__)); + goto fail; + } + + filter->info = filter_info; + filter->list_cnt = 0; + + for (i = 0; i < MAX_DHD_PKTLOG_FILTER_LEN; i++) { + filter->info[i].id = 0; + } + + filter->enable = PKTLOG_TXPKT_CASE | PKTLOG_TXSTATUS_CASE | PKTLOG_RXPKT_CASE; + + DHD_ERROR(("%s(): pktlog filter init success\n", __FUNCTION__)); + + return filter; +fail: + if (filter) { + kfree(filter); + } + + return NULL; +} + +int +dhd_pktlog_filter_deinit(dhd_pktlog_filter_t *filter) +{ + int ret = BCME_OK; + + if (!filter) { + DHD_ERROR(("%s(): filter is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + if (filter->info) { + kfree(filter->info); + } + kfree(filter); + + DHD_ERROR(("%s(): pktlog filter deinit\n", __FUNCTION__)); + + return ret; +} + +bool +dhd_pktlog_filter_existed(dhd_pktlog_filter_t *filter, char *arg, uint32 *id) +{ + char filter_pattern[MAX_FILTER_PATTERN_LEN]; + char *p; + int i, j; + int nchar; + int len; + + if (!filter || !arg) { + DHD_ERROR(("%s(): filter=%p arg=%p\n", __FUNCTION__, filter, arg)); + return TRUE; + } + + for (i = 0; i < filter->list_cnt; i++) { + p = filter_pattern; + len = sizeof(filter_pattern); + + nchar = snprintf(p, len, "%d ", filter->info[i].offset); + p += nchar; + len -= nchar; + + nchar = snprintf(p, len, "0x"); + p += nchar; + len -= nchar; + + for (j = 0; j < filter->info[i].size_bytes; j++) { + nchar = snprintf(p, len, "%02x", filter->info[i].mask[j]); + p += nchar; + len -= nchar; + } + + nchar = snprintf(p, len, " 0x"); + p += nchar; + len -= nchar; + + for (j = 0; j < filter->info[i].size_bytes; j++) { + nchar = snprintf(p, len, "%02x", filter->info[i].pattern[j]); + p += nchar; + len -= nchar; + } + + if (strlen(arg) < strlen(filter_pattern)) { + continue; + } + + DHD_PKT_LOG(("%s(): Pattern %s\n", __FUNCTION__, filter_pattern)); + + if (strncmp(filter_pattern, arg, strlen(filter_pattern)) == 0) { + *id = filter->info[i].id; + DHD_ERROR(("%s(): This pattern is existed\n", __FUNCTION__)); + DHD_ERROR(("%s(): arg %s\n", __FUNCTION__, arg)); + return TRUE; + } + } + + return FALSE; +} + +int +dhd_pktlog_filter_add(dhd_pktlog_filter_t *filter, char *arg) +{ + int32 mask_size, pattern_size; + char *offset, *bitmask, *pattern; + uint32 id = 0; + + if (!filter || !arg) { + DHD_ERROR(("%s(): pktlog_filter =%p arg =%p\n", __FUNCTION__, filter, arg)); + return BCME_ERROR; + } + + DHD_PKT_LOG(("%s(): arg %s\n", __FUNCTION__, arg)); + + if (dhd_pktlog_filter_existed(filter, arg, &id) == TRUE) { + DHD_PKT_LOG(("%s(): This pattern id %d is existed\n", __FUNCTION__, id)); + return BCME_OK; + } + + if (filter->list_cnt >= MAX_DHD_PKTLOG_FILTER_LEN) { + DHD_ERROR(("%s(): pktlog filter full\n", __FUNCTION__)); + return BCME_ERROR; + } + + if ((offset = bcmstrtok(&arg, " ", 0)) == NULL) { + DHD_ERROR(("%s(): offset not found\n", __FUNCTION__)); + return BCME_ERROR; + } + + if ((bitmask = bcmstrtok(&arg, " ", 0)) == NULL) { + DHD_ERROR(("%s(): bitmask not found\n", __FUNCTION__)); + return BCME_ERROR; + } + + if ((pattern = bcmstrtok(&arg, " ", 0)) == NULL) { + DHD_ERROR(("%s(): pattern not found\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* parse filter bitmask */ + mask_size = pattern_atoh_len(bitmask, + (char *) &filter->info[filter->list_cnt].mask[0], + MAX_MASK_PATTERN_FILTER_LEN); + if (mask_size == -1) { + DHD_ERROR(("Rejecting: %s\n", bitmask)); + return BCME_ERROR; + } + + /* parse filter pattern */ + pattern_size = pattern_atoh_len(pattern, + (char *) &filter->info[filter->list_cnt].pattern[0], + MAX_MASK_PATTERN_FILTER_LEN); + if (pattern_size == -1) { + DHD_ERROR(("Rejecting: %s\n", pattern)); + return BCME_ERROR; + } + + prhex("mask", (char *)&filter->info[filter->list_cnt].mask[0], + mask_size); + prhex("pattern", (char *)&filter->info[filter->list_cnt].pattern[0], + pattern_size); + + if (mask_size != pattern_size) { + DHD_ERROR(("%s(): Mask and pattern not the same size\n", __FUNCTION__)); + return BCME_ERROR; + } + + filter->info[filter->list_cnt].offset = strtoul(offset, NULL, 0); + filter->info[filter->list_cnt].size_bytes = mask_size; + filter->info[filter->list_cnt].id = filter->list_cnt + 1; + filter->info[filter->list_cnt].enable = TRUE; + + filter->list_cnt++; + + return BCME_OK; +} + +int +dhd_pktlog_filter_del(dhd_pktlog_filter_t *filter, char *arg) +{ + uint32 id = 0; + + if (!filter || !arg) { + DHD_ERROR(("%s(): pktlog_filter =%p arg =%p\n", __FUNCTION__, filter, arg)); + return BCME_ERROR; + } + + DHD_PKT_LOG(("%s(): arg %s\n", __FUNCTION__, arg)); + + if (dhd_pktlog_filter_existed(filter, arg, &id) != TRUE) { + DHD_PKT_LOG(("%s(): This pattern id %d doesn't existed\n", __FUNCTION__, id)); + return BCME_OK; + } + + dhd_pktlog_filter_pull_forward(filter, id, filter->list_cnt); + + filter->list_cnt--; + + return BCME_OK; +} + +int +dhd_pktlog_filter_enable(dhd_pktlog_filter_t *filter, uint32 pktmon_case, uint32 enable) +{ + if (!filter) { + DHD_ERROR(("%s(): filter is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_PKT_LOG(("%s(): pktlog_case %d enable %d\n", __FUNCTION__, pktmon_case, enable)); + + if (enable) { + filter->enable |= pktmon_case; + } else { + filter->enable &= ~pktmon_case; + } + + return BCME_OK; +} + +int +dhd_pktlog_filter_pattern_enable(dhd_pktlog_filter_t *filter, char *arg, uint32 enable) +{ + uint32 id = 0; + + if (!filter || !arg) { + DHD_ERROR(("%s(): pktlog_filter =%p arg =%p\n", __FUNCTION__, filter, arg)); + return BCME_ERROR; + } + + if (dhd_pktlog_filter_existed(filter, arg, &id) == TRUE) { + if (id > 0) { + filter->info[id-1].enable = enable; + DHD_ERROR(("%s(): This pattern id %d is %s\n", + __FUNCTION__, id, (enable ? "enabled" : "disabled"))); + } + } else { + DHD_ERROR(("%s(): This pattern is not existed\n", __FUNCTION__)); + DHD_ERROR(("%s(): arg %s\n", __FUNCTION__, arg)); + } + + return BCME_OK; +} + +int +dhd_pktlog_filter_info(dhd_pktlog_filter_t *filter) +{ + char filter_pattern[MAX_FILTER_PATTERN_LEN]; + char *p; + int i, j; + int nchar; + int len; + + if (!filter) { + DHD_ERROR(("%s(): pktlog_filter is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_ERROR(("---- PKTLOG FILTER INFO ----\n\n")); + + DHD_ERROR(("Filter list cnt %d Filter is %s\n", + filter->list_cnt, (filter->enable ? "enabled" : "disabled"))); + + for (i = 0; i < filter->list_cnt; i++) { + p = filter_pattern; + len = sizeof(filter_pattern); + + nchar = snprintf(p, len, "%d ", filter->info[i].offset); + p += nchar; + len -= nchar; + + nchar = snprintf(p, len, "0x"); + p += nchar; + len -= nchar; + + for (j = 0; j < filter->info[i].size_bytes; j++) { + nchar = snprintf(p, len, "%02x", filter->info[i].mask[j]); + p += nchar; + len -= nchar; + } + + nchar = snprintf(p, len, " 0x"); + p += nchar; + len -= nchar; + + for (j = 0; j < filter->info[i].size_bytes; j++) { + nchar = snprintf(p, len, "%02x", filter->info[i].pattern[j]); + p += nchar; + len -= nchar; + } + + DHD_ERROR(("ID:%d is %s\n", + filter->info[i].id, (filter->info[i].enable ? "enabled" : "disabled"))); + DHD_ERROR(("Pattern %s\n", filter_pattern)); + } + + DHD_ERROR(("---- PKTLOG FILTER END ----\n")); + + return BCME_OK; +} +bool +dhd_pktlog_filter_matched(dhd_pktlog_filter_t *filter, char *data, uint32 pktlog_case) +{ + uint16 szbts; /* pattern size */ + uint16 offset; /* pattern offset */ + int i, j; + uint8 *mask = NULL; /* bitmask */ + uint8 *pattern = NULL; + uint8 *pkt_offset = NULL; /* packet offset */ + bool matched; + + if (!filter || !data) { + DHD_PKT_LOG(("%s(): filter=%p data=%p\n", + __FUNCTION__, filter, data)); + return TRUE; + } + + if (!(pktlog_case & filter->enable)) { + DHD_PKT_LOG(("%s(): pktlog_case %d return TRUE filter is disabled\n", + __FUNCTION__, pktlog_case)); + return TRUE; + } + + for (i = 0; i < filter->list_cnt; i++) { + if (&filter->info[i] && filter->info[i].id && filter->info[i].enable) { + szbts = filter->info[i].size_bytes; + offset = filter->info[i].offset; + mask = &filter->info[i].mask[0]; + pkt_offset = &data[offset]; + pattern = &filter->info[i].pattern[0]; + + matched = TRUE; + for (j = 0; j < szbts; j++) { + if ((mask[j] & pkt_offset[j]) != pattern[j]) { + matched = FALSE; + break; + } + } + + if (matched) { + DHD_PKT_LOG(("%s(): pktlog_filter return TRUE id %d\n", + __FUNCTION__, filter->info[i].id)); + return TRUE; + } + } else { + DHD_PKT_LOG(("%s(): filter ino is null %p\n", + __FUNCTION__, &filter->info[i])); + } + } + + return FALSE; +} + +/* Ethernet Type MAC Header 12 bytes + Frame payload 10 bytes */ +#define PKTLOG_MINIMIZE_REPORT_LEN 22 + +static char pktlog_minmize_mask_table[] = { + 0xff, 0x00, 0x00, 0x00, 0xff, 0x0f, /* Ethernet Type MAC Header - Destination MAC Address */ + 0xff, 0x00, 0x00, 0x00, 0xff, 0x0f, /* Ethernet Type MAC Header - Source MAC Address */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Ethernet Type MAC Header - Ether Type - 2 bytes */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Frame payload */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* UDP port number offset - bytes as 0xff */ + 0xff, 0xff, +}; + +static inline void +dhd_pktlog_minimize_report(char *pkt, uint32 frame_len, + void *file, const void *user_buf, void *pos) +{ + int i; + int ret = 0; + int table_len; + int report_len; + char *p_table; + char *mem_buf = NULL; + + table_len = sizeof(pktlog_minmize_mask_table); + report_len = table_len; + p_table = &pktlog_minmize_mask_table[0]; + + if (frame_len < PKTLOG_MINIMIZE_REPORT_LEN) { + DHD_ERROR(("%s : frame_len is samller than min\n", __FUNCTION__)); + return; + } + + mem_buf = vmalloc(frame_len); + if (!mem_buf) { + DHD_ERROR(("%s : failed to alloc membuf\n", __FUNCTION__)); + return; + } + + bzero(mem_buf, frame_len); + + if (frame_len < table_len) { + report_len = PKTLOG_MINIMIZE_REPORT_LEN; + } + + for (i = 0; i < report_len; i++) { + mem_buf[i] = pkt[i] & p_table[i]; + } + + ret = dhd_export_debug_data(mem_buf, + file, user_buf, frame_len, pos); + if (ret < 0) { + DHD_ERROR(("%s : Write minimize report\n", __FUNCTION__)); + } + vfree(mem_buf); +} + +dhd_pktlog_ring_t* +dhd_pktlog_ring_change_size(dhd_pktlog_ring_t *ringbuf, int size) +{ + uint32 alloc_len; + uint32 pktlog_minmize; + dhd_pktlog_ring_t *pktlog_ring = NULL; + dhd_pub_t *dhdp; + + if (!ringbuf) { + DHD_ERROR(("%s(): ringbuf is NULL\n", __FUNCTION__)); + return NULL; + } + + alloc_len = size; + if (alloc_len < MIN_PKTLOG_LEN) { + alloc_len = MIN_PKTLOG_LEN; + } + if (alloc_len > MAX_PKTLOG_LEN) { + alloc_len = MAX_PKTLOG_LEN; + } + DHD_ERROR(("ring size requested: %d alloc: %d\n", size, alloc_len)); + + /* backup variable */ + pktlog_minmize = ringbuf->pktlog_minmize; + dhdp = ringbuf->dhdp; + + /* free ring_info */ + dhd_pktlog_ring_deinit(dhdp, ringbuf); + + /* alloc ring_info */ + pktlog_ring = dhd_pktlog_ring_init(dhdp, alloc_len); + + /* restore variable */ + if (pktlog_ring) { + OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, TRUE); + pktlog_ring->pktlog_minmize = pktlog_minmize; + } + + return pktlog_ring; +} + +void +dhd_pktlog_filter_pull_forward(dhd_pktlog_filter_t *filter, uint32 del_filter_id, uint32 list_cnt) +{ + int ret = 0; + int pos = 0; + int move_list_cnt = 0; + int move_bytes = 0; + + if ((del_filter_id > list_cnt) || + (list_cnt > MAX_DHD_PKTLOG_FILTER_LEN)) { + DHD_ERROR(("Wrong id %d cnt %d tried to remove\n", del_filter_id, list_cnt)); + return; + } + + move_list_cnt = list_cnt - del_filter_id; + + pos = del_filter_id -1; + move_bytes = sizeof(dhd_pktlog_filter_info_t) * move_list_cnt; + if (move_list_cnt) { + ret = memmove_s(&filter->info[pos], move_bytes + sizeof(dhd_pktlog_filter_info_t), + &filter->info[pos+1], move_bytes); + if (ret) { + DHD_ERROR(("filter moving failed\n")); + return; + } + for (; pos < list_cnt -1; pos++) { + filter->info[pos].id -= 1; + } + } + bzero(&filter->info[list_cnt-1], sizeof(dhd_pktlog_filter_info_t)); +} + +void dhd_pktlog_get_filename(dhd_pub_t *dhdp, char *dump_path, int len) +{ + /* Init file name */ + bzero(dump_path, len); + clear_debug_dump_time(dhdp->debug_dump_time_pktlog_str); + get_debug_dump_time(dhdp->debug_dump_time_pktlog_str); + + if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) { + if (dhdp->debug_dump_subcmd == CMD_UNWANTED) { + snprintf(dump_path, len, "%s", + DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE + DHD_DUMP_SUBSTR_UNWANTED); + } else if (dhdp->debug_dump_subcmd == CMD_DISCONNECTED) { + snprintf(dump_path, len, "%s", + DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE + DHD_DUMP_SUBSTR_DISCONNECTED); + } else { + snprintf(dump_path, len, "%s", + DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE); + } + } else { + if (dhdp->pktlog_debug) { + snprintf(dump_path, len, "%s", + DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DEBUG_DUMP_TYPE); + } else { + snprintf(dump_path, len, "%s", + DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE); + } + + } + + snprintf(dump_path, len, "%s_%s.pcap", dump_path, + dhdp->debug_dump_time_pktlog_str); + DHD_ERROR(("%s: pktlog path = %s%s\n", __FUNCTION__, dump_path, FILE_NAME_HAL_TAG)); + clear_debug_dump_time(dhdp->debug_dump_time_pktlog_str); +} + +uint32 +dhd_pktlog_get_item_length(dhd_pktlog_ring_info_t *report_ptr) +{ + uint32 len = 0; + char buf[DHD_PKTLOG_FATE_INFO_STR_LEN]; + int bytes_user_data = 0; + uint32 write_frame_len; + uint32 frame_len; + + len += (uint32)sizeof(report_ptr->info.driver_ts_sec); + len += (uint32)sizeof(report_ptr->info.driver_ts_usec); + + if (report_ptr->info.payload_type == FRAME_TYPE_ETHERNET_II) { + frame_len = (uint32)min(report_ptr->info.pkt_len, (size_t)MAX_FRAME_LEN_ETHERNET); + } else { + frame_len = (uint32)min(report_ptr->info.pkt_len, (size_t)MAX_FRAME_LEN_80211_MGMT); + } + + bytes_user_data = sprintf(buf, "%s:%s:%02d\n", DHD_PKTLOG_FATE_INFO_FORMAT, + (report_ptr->tx_fate ? "Failure" : "Succeed"), report_ptr->tx_fate); + write_frame_len = frame_len + bytes_user_data; + + /* pcap pkt head has incl_len and orig_len */ + len += (uint32)sizeof(write_frame_len); + len += (uint32)sizeof(write_frame_len); + len += frame_len; + len += bytes_user_data; + + return len; +} + +uint32 +dhd_pktlog_get_dump_length(dhd_pub_t *dhdp) +{ + dhd_pktlog_ring_info_t *report_ptr; + dhd_pktlog_ring_t *pktlog_ring; + uint32 len; + dll_t *item_p, *next_p; + + if (!dhdp || !dhdp->pktlog) { + DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n", + __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL))); + return -EINVAL; + } + + if (!dhdp->pktlog->pktlog_ring) { + DHD_PKT_LOG(("%s(): pktlog_ring =%p\n", + __FUNCTION__, dhdp->pktlog->pktlog_ring)); + return -EINVAL; + } + + pktlog_ring = dhdp->pktlog->pktlog_ring; + OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, FALSE); + + len = sizeof(dhd_pktlog_pcap_hdr_t); + + for (item_p = dll_head_p(&pktlog_ring->ring_info_head); + !dll_end(&pktlog_ring->ring_info_head, item_p); + item_p = next_p) { + next_p = dll_next_p(item_p); + report_ptr = (dhd_pktlog_ring_info_t *)item_p; + len += dhd_pktlog_get_item_length(report_ptr); + } + OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, TRUE); + DHD_PKT_LOG(("calcuated pkt log dump len:%d\n", len)); + + return len; +} + +int +dhd_pktlog_dump_write(dhd_pub_t *dhdp, void *file, const void *user_buf, uint32 size) +{ + dhd_pktlog_ring_info_t *report_ptr; + dhd_pktlog_ring_t *pktlog_ring; + char buf[DHD_PKTLOG_FATE_INFO_STR_LEN]; + dhd_pktlog_pcap_hdr_t pcap_h; + uint32 write_frame_len; + uint32 frame_len; + ulong len; + int bytes_user_data = 0; + loff_t pos = 0; + int ret = BCME_OK; + dll_t *item_p, *next_p; + + if (!dhdp || !dhdp->pktlog) { + DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n", + __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL))); + return -EINVAL; + } + + if (!dhdp->pktlog->pktlog_ring) { + DHD_PKT_LOG(("%s(): pktlog_ring =%p\n", + __FUNCTION__, dhdp->pktlog->pktlog_ring)); + return -EINVAL; + } + + if (file && !user_buf && (size == 0)) { + DHD_ERROR(("Local file pktlog dump requested\n")); + } else if (!file && user_buf && (size > 0)) { + DHD_ERROR(("HAL file pktlog dump %d bytes requested\n", size)); + } else { + DHD_ERROR(("Wrong type pktlog dump requested\n")); + return -EINVAL; + } + + pktlog_ring = dhdp->pktlog->pktlog_ring; + OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, FALSE); + + pcap_h.magic_number = PKTLOG_PCAP_MAGIC_NUM; + pcap_h.version_major = PKTLOG_PCAP_MAJOR_VER; + pcap_h.version_minor = PKTLOG_PCAP_MINOR_VER; + pcap_h.thiszone = 0x0; + pcap_h.sigfigs = 0x0; + pcap_h.snaplen = PKTLOG_PCAP_SNAP_LEN; + pcap_h.network = PKTLOG_PCAP_NETWORK_TYPE; + + ret = dhd_export_debug_data((char *)&pcap_h, file, user_buf, sizeof(pcap_h), &pos); + len = sizeof(pcap_h); + + for (item_p = dll_head_p(&pktlog_ring->ring_info_head); + !dll_end(&pktlog_ring->ring_info_head, item_p); + item_p = next_p) { + + next_p = dll_next_p(item_p); + report_ptr = (dhd_pktlog_ring_info_t *)item_p; + + if ((file == NULL) && + (len + dhd_pktlog_get_item_length(report_ptr) > size)) { + DHD_ERROR(("overflowed pkt logs are dropped\n")); + break; + } + + ret = dhd_export_debug_data((char*)&report_ptr->info.driver_ts_sec, file, + user_buf, sizeof(report_ptr->info.driver_ts_sec), &pos); + len += sizeof(report_ptr->info.driver_ts_sec); + + ret = dhd_export_debug_data((char*)&report_ptr->info.driver_ts_usec, file, + user_buf, sizeof(report_ptr->info.driver_ts_usec), &pos); + len += sizeof(report_ptr->info.driver_ts_usec); + + if (report_ptr->info.payload_type == FRAME_TYPE_ETHERNET_II) { + frame_len = (uint32)min(report_ptr->info.pkt_len, + (size_t)MAX_FRAME_LEN_ETHERNET); + + } else { + frame_len = (uint32)min(report_ptr->info.pkt_len, + (size_t)MAX_FRAME_LEN_80211_MGMT); + } + + bytes_user_data = sprintf(buf, "%s:%s:%02d\n", DHD_PKTLOG_FATE_INFO_FORMAT, + (report_ptr->tx_fate ? "Failure" : "Succeed"), report_ptr->tx_fate); + write_frame_len = frame_len + bytes_user_data; + + /* pcap pkt head has incl_len and orig_len */ + ret = dhd_export_debug_data((char*)&write_frame_len, file, user_buf, + sizeof(write_frame_len), &pos); + len += sizeof(write_frame_len); + + ret = dhd_export_debug_data((char*)&write_frame_len, file, user_buf, + sizeof(write_frame_len), &pos); + len += sizeof(write_frame_len); + + if (pktlog_ring->pktlog_minmize) { + dhd_pktlog_minimize_report(PKTDATA(pktlog_ring->dhdp->osh, + report_ptr->info.pkt), frame_len, file, user_buf, &pos); + } else { + ret = dhd_export_debug_data(PKTDATA(pktlog_ring->dhdp->osh, + report_ptr->info.pkt), file, user_buf, frame_len, &pos); + } + len += frame_len; + + ret = dhd_export_debug_data(buf, file, user_buf, bytes_user_data, &pos); + len += bytes_user_data; + } + OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, TRUE); + + return ret; +} + +int +dhd_pktlog_dump_write_memory(dhd_pub_t *dhdp, const void *user_buf, uint32 size) +{ + int ret = dhd_pktlog_dump_write(dhdp, NULL, user_buf, size); + if (ret < 0) { + DHD_ERROR(("dhd_pktlog_dump_write_memory error\n")); + } + return ret; +} + +int +dhd_pktlog_dump_write_file(dhd_pub_t *dhdp) +{ + struct file *w_pcap_fp = NULL; + uint32 file_mode; + mm_segment_t old_fs; + char pktlogdump_path[128]; + int ret = BCME_OK; + + dhd_pktlog_get_filename(dhdp, pktlogdump_path, 128); + old_fs = get_fs(); + set_fs(KERNEL_DS); + file_mode = O_CREAT | O_WRONLY; + + w_pcap_fp = filp_open(pktlogdump_path, file_mode, 0664); + if (IS_ERR(w_pcap_fp)) { + DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n", + __FUNCTION__, pktlogdump_path, PTR_ERR(w_pcap_fp))); + ret = BCME_ERROR; + goto fail; + } + + dhd_pktlog_dump_write(dhdp, w_pcap_fp, NULL, 0); + if (ret < 0) { + DHD_ERROR(("dhd_pktlog_dump_write error\n")); + goto fail; + } + + /* Sync file from filesystem to physical media */ + ret = vfs_fsync(w_pcap_fp, 0); + if (ret < 0) { + DHD_ERROR(("%s(): sync pcap file error, err = %d\n", __FUNCTION__, ret)); + goto fail; + } +fail: + if (!IS_ERR(w_pcap_fp)) { + filp_close(w_pcap_fp, NULL); + } + + set_fs(old_fs); + +#ifdef DHD_DUMP_MNGR + if (ret >= 0) { + dhd_dump_file_manage_enqueue(dhdp, pktlogdump_path, DHD_PKTLOG_DUMP_TYPE); + } +#endif /* DHD_DUMP_MNGR */ + return ret; +} + +#ifdef DHD_COMPACT_PKT_LOG +static uint64 +dhd_cpkt_log_calc_time_diff(dhd_pktlog_ring_info_t *pkt_info, uint64 curr_ts_nsec) +{ + uint64 pkt_ts_nsec = pkt_info->info.driver_ts_sec * NSEC_PER_SEC + + pkt_info->info.driver_ts_usec * NSEC_PER_USEC; + + return (curr_ts_nsec - pkt_ts_nsec) / NSEC_PER_USEC; +} + +static int +dhd_cpkt_log_get_ts_idx(dhd_pktlog_t *pktlog, dhd_pktlog_ring_info_t *pkt_info, u64 curr_ts_nsec) +{ + struct rb_node *n = pktlog->cpkt_log_tt_rbt.rb_node; + dhd_cpkt_log_ts_node_t *node = NULL; + + uint64 ts_diff = dhd_cpkt_log_calc_time_diff(pkt_info, curr_ts_nsec); + + if (ts_diff > dhd_cpkt_log_tt_idx[CPKT_LOG_TT_IDX_ARR_SZ - 1]) + return CPKT_LOG_TT_IDX_ARR_SZ; + + while (n) { + node = rb_entry(n, dhd_cpkt_log_ts_node_t, rb); + + if (ts_diff < node->ts_diff) + n = n->rb_left; + else if (ts_diff > node->ts_diff) + n = n->rb_right; + else + break; + } + + if (node != NULL) { + if (node->idx && ts_diff < node->ts_diff) + return node->idx - 1; + return node->idx; + } + + return BCME_NOTFOUND; +} + +static int +dhd_cpkt_log_get_direction(dhd_pktlog_ring_info_t *pkt_info) +{ + return pkt_info->info.direction == PKTLOG_TXPKT_CASE ? PKT_TX : PKT_RX; +} + +static int +dhd_cpkt_log_get_802_1x_subtype(eapol_header_t *eapol) +{ + int subtype; + eap_header_t *eap; + eapol_wpa_key_header_t *ek; + + uint16 key_info; + int pair, ack, mic, kerr, req, sec, install; + + subtype = CPKT_LOG_802_1X_SUBTYPE_OTHERS; + if (eapol->type != EAPOL_KEY) { + eap = (eap_header_t *)eapol->body; + + switch (eap->type) { + case EAP_IDENTITY: + subtype = CPKT_LOG_802_1X_SUBTYPE_IDENTITY; + break; + case REALM_EAP_TLS: + subtype = CPKT_LOG_802_1X_SUBTYPE_TLS; + break; + case REALM_EAP_TTLS: + subtype = CPKT_LOG_802_1X_SUBTYPE_TTLS; + break; + case REALM_EAP_FAST: + subtype = CPKT_LOG_802_1X_SUBTYPE_FAST; + break; + case REALM_EAP_LEAP: + subtype = CPKT_LOG_802_1X_SUBTYPE_LEAP; + break; + case REALM_EAP_PSK: + subtype = CPKT_LOG_802_1X_SUBTYPE_PWD; + break; + case REALM_EAP_SIM: + subtype = CPKT_LOG_802_1X_SUBTYPE_SIM; + break; + case REALM_EAP_AKA: + subtype = CPKT_LOG_802_1X_SUBTYPE_AKA; + break; + case REALM_EAP_AKAP: + subtype = CPKT_LOG_802_1X_SUBTYPE_AKAP; + break; + default: + break; + } + if (eap->code == EAP_SUCCESS) + subtype = CPKT_LOG_802_1X_SUBTYPE_SUCCESS; + } else { + /* in case of 4 way handshake */ + ek = (eapol_wpa_key_header_t *)(eapol->body); + + if (ek->type == EAPOL_WPA2_KEY || ek->type == EAPOL_WPA_KEY) { + key_info = ntoh16_ua(&ek->key_info); + + pair = 0 != (key_info & WPA_KEY_PAIRWISE); + ack = 0 != (key_info & WPA_KEY_ACK); + mic = 0 != (key_info & WPA_KEY_MIC); + kerr = 0 != (key_info & WPA_KEY_ERROR); + req = 0 != (key_info & WPA_KEY_REQ); + sec = 0 != (key_info & WPA_KEY_SECURE); + install = 0 != (key_info & WPA_KEY_INSTALL); + + if (!sec && !mic && ack && !install && pair && !kerr && !req) + subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M1; + else if (pair && !install && !ack && mic && !sec && !kerr && !req) + subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M2; + else if (pair && ack && mic && sec && !kerr && !req) + subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M3; + else if (pair && !install && !ack && mic && sec && !req && !kerr) + subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M4; + } + } + + return subtype; +} + +static int +dhd_cpkt_log_get_pkt_info(dhd_pktlog_t *pktlog, dhd_pktlog_ring_info_t *pkt_info) +{ + int type; + int subtype = 0; + + uint8 prot; + uint16 src_port, dst_port; + int len, offset; + + uint8 *pdata; + uint8 *pkt_data; + + uint16 eth_type; + struct bcmarp *arp; + struct bcmicmp_hdr *icmp; + struct ipv4_hdr *ipv4; + struct ether_header *eth_hdr; + bcm_tlv_t *dhcp_opt; + + struct ipv6_hdr *ipv6; + struct icmp6_hdr *icmpv6_hdr; + + pkt_data = (uint8 *)PKTDATA(pktlog->dhdp->osh, pkt_info->info.pkt); + + eth_hdr = (struct ether_header *)pkt_data; + eth_type = ntoh16(eth_hdr->ether_type); + + type = CPKT_LOG_TYPE_OTHERS; + switch (eth_type) { + case ETHER_TYPE_IP: + if (get_pkt_ip_type(pktlog->dhdp->osh, pkt_info->info.pkt, + &pdata, &len, &prot) != 0) { + DHD_PKT_LOG(("%s: fail to get pkt ip type\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (prot == IP_PROT_ICMP) { + icmp = (struct bcmicmp_hdr *)(pdata); + if (!(icmp->type == ICMP_TYPE_ECHO_REQUEST || + icmp->type == ICMP_TYPE_ECHO_REPLY || + icmp->type == CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE)) { + return BCME_ERROR; + } + + if (icmp->type == ICMP_TYPE_ECHO_REQUEST) { + type = CPKT_LOG_TYPE_ICMP_REQ; + /* Subtype = Last 8 bits of identifier */ + subtype = ntoh16_ua(pdata + sizeof(*icmp)) & 0xFF; + } else if (icmp->type == ICMP_TYPE_ECHO_REPLY) { + type = CPKT_LOG_TYPE_ICMP_RES; + /* Subtype = Last 8 bits of identifier */ + subtype = ntoh16_ua(pdata + sizeof(*icmp)) & 0xFF; + } else if (icmp->type == CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE) { + type = CPKT_LOG_TYPE_ICMP_UNREACHABLE; + /* Subtype = Last 8 bits of identifier */ + ipv4 = (struct ipv4_hdr *)(pdata + sizeof(*icmp) + + CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE_IPV4_OFFSET); + subtype = ipv4->id & 0xFF; + } + + DHD_PKT_LOG(("%s: type = ICMP(%d), subtype = %x \n", + __FUNCTION__, type, subtype)); + } else if (prot == IP_PROT_UDP) { + if (len < UDP_HDR_LEN) + return BCME_ERROR; + + src_port = ntoh16_ua(pdata); + dst_port = ntoh16_ua(pdata + UDP_DEST_PORT_OFFSET); + + if (src_port == DHCP_PORT_SERVER || src_port == DHCP_PORT_CLIENT) { + type = CPKT_LOG_TYPE_DHCP; + /* Subtype = DHCP message type */ + offset = DHCP_OPT_OFFSET + CPKT_LOG_DHCP_MAGIC_COOKIE_LEN; + if ((UDP_HDR_LEN + offset) >= len) + return BCME_ERROR; + len -= (UDP_HDR_LEN - offset); + + dhcp_opt = bcm_parse_tlvs(pdata + UDP_HDR_LEN + offset, + len, DHCP_OPT_MSGTYPE); + if (dhcp_opt == NULL) + return BCME_NOTFOUND; + subtype = dhcp_opt->data[0]; + + DHD_PKT_LOG(("%s: type = DHCP(%d), subtype = %x \n", + __FUNCTION__, type, subtype)); + } else if (src_port == CPKT_LOG_DNS_PORT_CLIENT || + dst_port == CPKT_LOG_DNS_PORT_CLIENT || + dst_port == CPKT_LOG_MDNS_PORT_CLIENT) { + type = CPKT_LOG_TYPE_DNS; + /* Subtype = Last 8 bits of DNS Transaction ID */ + subtype = ntoh16_ua(pdata + UDP_HDR_LEN) & 0xFF; + + DHD_PKT_LOG(("%s: type = DNS(%d), subtype = %x \n", + __FUNCTION__, type, subtype)); + } else { + DHD_PKT_LOG(("%s: unsupported ports num (src:%d, dst:%d)\n", + __FUNCTION__, src_port, dst_port)); + } + } else { + DHD_PKT_LOG(("%s: prot = %x\n", __FUNCTION__, prot)); + } + + break; + case ETHER_TYPE_ARP: + type = CPKT_LOG_TYPE_ARP; + /* Subtype = Last 8 bits of target IP address */ + arp = (struct bcmarp *)(pkt_data + ETHER_HDR_LEN); + subtype = arp->dst_ip[IPV4_ADDR_LEN - 1]; + + DHD_PKT_LOG(("%s: type = ARP(%d), subtype = %x\n", + __FUNCTION__, type, subtype)); + + break; + case ETHER_TYPE_802_1X: + type = CPKT_LOG_TYPE_802_1X; + /* EAPOL for 802.3/Ethernet */ + subtype = dhd_cpkt_log_get_802_1x_subtype((eapol_header_t *)pkt_data); + + DHD_PKT_LOG(("%s: type = 802.1x(%d), subtype = %x\n", + __FUNCTION__, type, subtype)); + + break; + case ETHER_TYPE_IPV6: + ipv6 = (struct ipv6_hdr *)(pkt_data + ETHER_HDR_LEN); + if (ipv6->nexthdr == ICMPV6_HEADER_TYPE) { + type = CPKT_LOG_TYPE_ICMPv6; + icmpv6_hdr = + (struct icmp6_hdr *)(pkt_data + ETHER_HDR_LEN + sizeof(*ipv6)); + subtype = icmpv6_hdr->icmp6_type; + + DHD_PKT_LOG(("%s: type = ICMPv6(%x), subtype = %x\n", + __FUNCTION__, type, subtype)); + } else { + DHD_ERROR(("%s: unsupported ipv6 next header\n", __FUNCTION__)); + } + + break; + default: + DHD_ERROR(("%s: Invalid eth type (%x)\n", __FUNCTION__, eth_hdr->ether_type)); + break; + } + + return (subtype << CPKT_LOG_BIT_LEN_TYPE) | type; +} + +static int +dhd_cpkt_log_get_pkt_fate(dhd_pktlog_ring_info_t *pktlog_info) +{ + return pktlog_info->fate; +} + +/* + * dhd_cpkt_log_build: prepare 22 bits of data as compact packet log format to report to big data + * + * pkt_info: one packet data from packet log + * curr_ts_nsec: current time (nano seconds) + * cpkt: pointer for output(22 bits compact packet log) + * + */ +static int +dhd_cpkt_log_build(dhd_pktlog_t *pktlog, dhd_pktlog_ring_info_t *pkt_info, + u64 curr_ts_nsec, int *cpkt) +{ + int ret; + int mask; + int temp = 0; + + /* Timestamp index */ + ret = dhd_cpkt_log_get_ts_idx(pktlog, pkt_info, curr_ts_nsec); + if (ret < 0) { + DHD_ERROR(("%s: Invalid cpktlog ts, err = %d\n", __FUNCTION__, ret)); + return ret; + } + mask = CPKT_LOG_BIT_MASK_TS; + temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_TS); + + /* Direction: Tx/Rx */ + ret = dhd_cpkt_log_get_direction(pkt_info); + mask = CPKT_LOG_BIT_MASK_DIR; + temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_DIR); + + /* Info = Packet Type & Packet Subtype */ + ret = dhd_cpkt_log_get_pkt_info(pktlog, pkt_info); + if (ret < 0) { + DHD_ERROR(("%s: Invalid cpktlog info, err = %d\n", __FUNCTION__, ret)); + return ret; + } + mask = CPKT_LOG_BIT_MASK_SUBTYPE << CPKT_LOG_BIT_LEN_TYPE | CPKT_LOG_BIT_MASK_TYPE; + temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_TYPE); + + /* Packet Fate */ + ret = dhd_cpkt_log_get_pkt_fate(pkt_info); + mask = CPKT_LOG_BIT_MASK_PKT_FATE; + temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_PKT_FATE); + + *cpkt = temp; + + return BCME_OK; +} + +int +dhd_cpkt_log_proc(dhd_pub_t *dhdp, char *buf, int buf_len, int bit_offset, int req_pkt_num) +{ + int ret; + int cpkt; + int offset = bit_offset; + dll_t *item_p, *prev_p; + + uint8 pkt_cnt; + u64 curr_ts_nsec; + + dhd_pktlog_t *pktlog; + dhd_pktlog_ring_t *pktlog_rbuf; + + if (!dhdp || !dhdp->pktlog) { + DHD_ERROR(("%s: dhdp or pktlog is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!dhdp->pktlog->pktlog_ring) { + DHD_ERROR(("%s: pktlog_ring is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_PKT_LOG(("%s: start cpkt log\n", __FUNCTION__)); + + pktlog = dhdp->pktlog; + pktlog_rbuf = pktlog->pktlog_ring; + + req_pkt_num = req_pkt_num > CPKT_LOG_MAX_NUM ? + CPKT_LOG_MAX_NUM : req_pkt_num; + + pkt_cnt = 0; + curr_ts_nsec = local_clock(); + for (item_p = dll_tail_p(&pktlog_rbuf->ring_info_head); + !dll_end(&pktlog_rbuf->ring_info_head, item_p); + item_p = prev_p) { + prev_p = dll_prev_p(item_p); + if (prev_p == NULL) + break; + + ret = dhd_cpkt_log_build(pktlog, (dhd_pktlog_ring_info_t *)item_p, + curr_ts_nsec, &cpkt); + if (ret < 0) + continue; + + offset = dhd_bit_pack(buf, buf_len, offset, cpkt, CPKT_LOG_BIT_SIZE); + + pkt_cnt++; + if (pkt_cnt >= req_pkt_num) + break; + } + + return offset; +} + +static void +dhd_cpkt_log_insert_ts(dhd_cpkt_log_ts_node_t *node, struct rb_root *root) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + u64 ts_diff = node->ts_diff; + + while (*new) { + parent = *new; + if (ts_diff < rb_entry(parent, dhd_cpkt_log_ts_node_t, rb)->ts_diff) + new = &parent->rb_left; + else + new = &parent->rb_right; + } + + rb_link_node(&node->rb, parent, new); + rb_insert_color(&node->rb, root); +} + +static void +dhd_cpkt_log_deinit_tt(dhd_pub_t *dhdp) +{ + struct rb_node *n; + dhd_pktlog_t *pktlog = dhdp->pktlog; + + dhd_cpkt_log_ts_node_t *node; + + while ((n = rb_first(&pktlog->cpkt_log_tt_rbt))) { + node = rb_entry(n, dhd_cpkt_log_ts_node_t, rb); + rb_erase(&node->rb, &pktlog->cpkt_log_tt_rbt); + MFREE(dhdp->osh, node, sizeof(*node)); + } +} + +static int +dhd_cpkt_log_init_tt(dhd_pub_t *dhdp) +{ + int i; + int ret = BCME_OK; + + dhd_pktlog_t *pktlog = dhdp->pktlog; + + dhd_cpkt_log_ts_node_t *node; + + for (i = 0; i < ARRAYSIZE(dhd_cpkt_log_tt_idx); i++) { + node = (dhd_cpkt_log_ts_node_t *)MALLOCZ(dhdp->osh, sizeof(*node)); + if (!node) { + ret = BCME_NOMEM; + goto exit; + } + node->ts_diff = dhd_cpkt_log_tt_idx[i]; + node->idx = i; + + dhd_cpkt_log_insert_ts(node, &pktlog->cpkt_log_tt_rbt); + } + + return BCME_OK; +exit: + dhd_cpkt_log_deinit_tt(dhdp); + + return ret; +} +#endif /* DHD_COMPACT_PKT_LOG */ +#endif /* DHD_PKT_LOGGING */ diff --git a/bcmdhd.101.10.361.x/dhd_pktlog.h b/bcmdhd.101.10.361.x/dhd_pktlog.h new file mode 100755 index 0000000..bfa38f5 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pktlog.h @@ -0,0 +1,311 @@ +/* + * DHD debugability packet logging header file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_PKTLOG_H_ +#define __DHD_PKTLOG_H_ + +#include +#include +#include +#ifdef DHD_COMPACT_PKT_LOG +#include +#endif /* DHD_COMPACT_PKT_LOG */ + +#ifdef DHD_PKT_LOGGING +#define DHD_PKT_LOG(args) DHD_INFO(args) +#define DEFAULT_MULTIPLE_PKTLOG_BUF 1 +#ifndef CUSTOM_MULTIPLE_PKTLOG_BUF +#define CUSTOM_MULTIPLE_PKTLOG_BUF DEFAULT_MULTIPLE_PKTLOG_BUF +#endif /* CUSTOM_MULTIPLE_PKTLOG_BUF */ +#define MIN_PKTLOG_LEN (32 * 10 * 2 * CUSTOM_MULTIPLE_PKTLOG_BUF) +#define MAX_PKTLOG_LEN (32 * 10 * 2 * 10) +#define MAX_DHD_PKTLOG_FILTER_LEN 14 +#define MAX_MASK_PATTERN_FILTER_LEN 64 +#define PKTLOG_TXPKT_CASE 0x0001 +#define PKTLOG_TXSTATUS_CASE 0x0002 +#define PKTLOG_RXPKT_CASE 0x0004 +/* MAX_FILTER_PATTERN_LEN is buf len to print bitmask/pattern with string */ +#define MAX_FILTER_PATTERN_LEN \ + ((MAX_MASK_PATTERN_FILTER_LEN * HD_BYTE_SIZE) + HD_PREFIX_SIZE + 1) * 2 +#define PKTLOG_DUMP_BUF_SIZE (64 * 1024) + +typedef struct dhd_dbg_pktlog_info { + frame_type payload_type; + size_t pkt_len; + uint32 driver_ts_sec; + uint32 driver_ts_usec; + uint32 firmware_ts; + uint32 pkt_hash; + bool direction; + void *pkt; +} dhd_dbg_pktlog_info_t; + +typedef struct dhd_pktlog_ring_info +{ + dll_t p_info; /* list pointer */ + union { + wifi_tx_packet_fate tx_fate; + wifi_rx_packet_fate rx_fate; + uint32 fate; + }; + dhd_dbg_pktlog_info_t info; +} dhd_pktlog_ring_info_t; + +typedef struct dhd_pktlog_ring +{ + dll_t ring_info_head; /* ring_info list */ + dll_t ring_info_free; /* ring_info free list */ + osl_atomic_t start; + uint32 pktlog_minmize; + uint32 pktlog_len; /* size of pkts */ + uint32 pktcount; + spinlock_t *pktlog_ring_lock; + dhd_pub_t *dhdp; + dhd_pktlog_ring_info_t *ring_info_mem; /* ring_info mem pointer */ +} dhd_pktlog_ring_t; + +typedef struct dhd_pktlog_filter_info +{ + uint32 id; + uint32 offset; + uint32 size_bytes; /* Size of pattern. */ + uint32 enable; + uint8 mask[MAX_MASK_PATTERN_FILTER_LEN]; + uint8 pattern[MAX_MASK_PATTERN_FILTER_LEN]; +} dhd_pktlog_filter_info_t; + +typedef struct dhd_pktlog_filter +{ + dhd_pktlog_filter_info_t *info; + uint32 list_cnt; + uint32 enable; +} dhd_pktlog_filter_t; + +typedef struct dhd_pktlog +{ + struct dhd_pktlog_ring *pktlog_ring; + struct dhd_pktlog_filter *pktlog_filter; + osl_atomic_t pktlog_status; + dhd_pub_t *dhdp; +#ifdef DHD_COMPACT_PKT_LOG + struct rb_root cpkt_log_tt_rbt; +#endif /* DHD_COMPACT_PKT_LOG */ +} dhd_pktlog_t; + +typedef struct dhd_pktlog_pcap_hdr +{ + uint32 magic_number; + uint16 version_major; + uint16 version_minor; + uint16 thiszone; + uint32 sigfigs; + uint32 snaplen; + uint32 network; +} dhd_pktlog_pcap_hdr_t; + +#define PKTLOG_PCAP_MAGIC_NUM 0xa1b2c3d4 +#define PKTLOG_PCAP_MAJOR_VER 0x02 +#define PKTLOG_PCAP_MINOR_VER 0x04 +#define PKTLOG_PCAP_SNAP_LEN 0x40000 +#define PKTLOG_PCAP_NETWORK_TYPE 147 + +extern int dhd_os_attach_pktlog(dhd_pub_t *dhdp); +extern int dhd_os_detach_pktlog(dhd_pub_t *dhdp); +extern dhd_pktlog_ring_t* dhd_pktlog_ring_init(dhd_pub_t *dhdp, int size); +extern int dhd_pktlog_ring_deinit(dhd_pub_t *dhdp, dhd_pktlog_ring_t *ring); +extern int dhd_pktlog_ring_set_nextpos(dhd_pktlog_ring_t *ringbuf); +extern int dhd_pktlog_ring_get_nextbuf(dhd_pktlog_ring_t *ringbuf, void **data); +extern int dhd_pktlog_ring_set_prevpos(dhd_pktlog_ring_t *ringbuf); +extern int dhd_pktlog_ring_get_prevbuf(dhd_pktlog_ring_t *ringbuf, void **data); +extern int dhd_pktlog_ring_get_writebuf(dhd_pktlog_ring_t *ringbuf, void **data); +extern int dhd_pktlog_ring_add_pkts(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid, + uint32 direction); +extern int dhd_pktlog_ring_tx_status(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid, + uint16 status); +extern dhd_pktlog_ring_t* dhd_pktlog_ring_change_size(dhd_pktlog_ring_t *ringbuf, int size); +extern void dhd_pktlog_filter_pull_forward(dhd_pktlog_filter_t *filter, + uint32 del_filter_id, uint32 list_cnt); + +#define PKT_RX 0 +#define PKT_TX 1 +#define PKT_WAKERX 2 +#define DHD_INVALID_PKTID (0U) +#define PKTLOG_TRANS_TX 0x01 +#define PKTLOG_TRANS_RX 0x02 +#define PKTLOG_TRANS_TXS 0x04 + +#define PKTLOG_SET_IN_TX(dhdp) \ +{ \ + do { \ + OSL_ATOMIC_OR((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, PKTLOG_TRANS_TX); \ + } while (0); \ +} + +#define PKTLOG_SET_IN_RX(dhdp) \ +{ \ + do { \ + OSL_ATOMIC_OR((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, PKTLOG_TRANS_RX); \ + } while (0); \ +} + +#define PKTLOG_SET_IN_TXS(dhdp) \ +{ \ + do { \ + OSL_ATOMIC_OR((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, PKTLOG_TRANS_TXS); \ + } while (0); \ +} + +#define PKTLOG_CLEAR_IN_TX(dhdp) \ +{ \ + do { \ + OSL_ATOMIC_AND((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, ~PKTLOG_TRANS_TX); \ + } while (0); \ +} + +#define PKTLOG_CLEAR_IN_RX(dhdp) \ +{ \ + do { \ + OSL_ATOMIC_AND((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, ~PKTLOG_TRANS_RX); \ + } while (0); \ +} + +#define PKTLOG_CLEAR_IN_TXS(dhdp) \ +{ \ + do { \ + OSL_ATOMIC_AND((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, ~PKTLOG_TRANS_TXS); \ + } while (0); \ +} + +#define DHD_PKTLOG_TX(dhdp, pkt, pktdata, pktid) \ +{ \ + do { \ + if ((dhdp) && (dhdp)->pktlog && (pkt)) { \ + PKTLOG_SET_IN_TX(dhdp); \ + if ((dhdp)->pktlog->pktlog_ring && \ + OSL_ATOMIC_READ((dhdp)->osh, \ + (&(dhdp)->pktlog->pktlog_ring->start))) { \ + dhd_pktlog_ring_add_pkts(dhdp, pkt, pktdata, pktid, PKT_TX); \ + } \ + PKTLOG_CLEAR_IN_TX(dhdp); \ + } \ + } while (0); \ +} + +#define DHD_PKTLOG_TXS(dhdp, pkt, pktdata, pktid, status) \ +{ \ + do { \ + if ((dhdp) && (dhdp)->pktlog && (pkt)) { \ + PKTLOG_SET_IN_TXS(dhdp); \ + if ((dhdp)->pktlog->pktlog_ring && \ + OSL_ATOMIC_READ((dhdp)->osh, \ + (&(dhdp)->pktlog->pktlog_ring->start))) { \ + dhd_pktlog_ring_tx_status(dhdp, pkt, pktdata, pktid, status); \ + } \ + PKTLOG_CLEAR_IN_TXS(dhdp); \ + } \ + } while (0); \ +} + +#define DHD_PKTLOG_RX(dhdp, pkt, pktdata) \ +{ \ + do { \ + if ((dhdp) && (dhdp)->pktlog && (pkt)) { \ + PKTLOG_SET_IN_RX(dhdp); \ + if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \ + if ((dhdp)->pktlog->pktlog_ring && \ + OSL_ATOMIC_READ((dhdp)->osh, \ + (&(dhdp)->pktlog->pktlog_ring->start))) { \ + dhd_pktlog_ring_add_pkts(dhdp, pkt, pktdata, \ + DHD_INVALID_PKTID, PKT_RX); \ + } \ + } \ + PKTLOG_CLEAR_IN_RX(dhdp); \ + } \ + } while (0); \ +} + +#define DHD_PKTLOG_WAKERX(dhdp, pkt, pktdata) \ +{ \ + do { \ + if ((dhdp) && (dhdp)->pktlog && (pkt)) { \ + PKTLOG_SET_IN_RX(dhdp); \ + if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \ + if ((dhdp)->pktlog->pktlog_ring && \ + OSL_ATOMIC_READ((dhdp)->osh, \ + (&(dhdp)->pktlog->pktlog_ring->start))) { \ + dhd_pktlog_ring_add_pkts(dhdp, pkt, pktdata, \ + DHD_INVALID_PKTID, PKT_WAKERX); \ + } \ + } \ + PKTLOG_CLEAR_IN_RX(dhdp); \ + } \ + } while (0); \ +} + +extern dhd_pktlog_filter_t* dhd_pktlog_filter_init(int size); +extern int dhd_pktlog_filter_deinit(dhd_pktlog_filter_t *filter); +extern int dhd_pktlog_filter_add(dhd_pktlog_filter_t *filter, char *arg); +extern int dhd_pktlog_filter_del(dhd_pktlog_filter_t *filter, char *arg); +extern int dhd_pktlog_filter_enable(dhd_pktlog_filter_t *filter, uint32 pktlog_case, uint32 enable); +extern int dhd_pktlog_filter_pattern_enable(dhd_pktlog_filter_t *filter, char *arg, uint32 enable); +extern int dhd_pktlog_filter_info(dhd_pktlog_filter_t *filter); +extern bool dhd_pktlog_filter_matched(dhd_pktlog_filter_t *filter, char *data, uint32 pktlog_case); +extern bool dhd_pktlog_filter_existed(dhd_pktlog_filter_t *filter, char *arg, uint32 *id); + +#define DHD_PKTLOG_FILTER_ADD(pattern, filter_pattern, dhdp) \ +{ \ + do { \ + if ((strlen(pattern) + 1) < sizeof(filter_pattern)) { \ + strncpy(filter_pattern, pattern, sizeof(filter_pattern)); \ + dhd_pktlog_filter_add(dhdp->pktlog->pktlog_filter, filter_pattern); \ + } \ + } while (0); \ +} + +#define DHD_PKTLOG_DUMP_PATH DHD_COMMON_DUMP_PATH +extern int dhd_pktlog_debug_dump(dhd_pub_t *dhdp); +extern void dhd_pktlog_dump(void *handle, void *event_info, u8 event); +extern void dhd_schedule_pktlog_dump(dhd_pub_t *dhdp); +extern int dhd_pktlog_dump_write_memory(dhd_pub_t *dhdp, const void *user_buf, uint32 size); +extern int dhd_pktlog_dump_write_file(dhd_pub_t *dhdp); + +#define DHD_PKTLOG_FATE_INFO_STR_LEN 256 +#define DHD_PKTLOG_FATE_INFO_FORMAT "BRCM_Packet_Fate" +#define DHD_PKTLOG_DUMP_TYPE "pktlog_dump" +#define DHD_PKTLOG_DEBUG_DUMP_TYPE "pktlog_debug_dump" + +extern void dhd_pktlog_get_filename(dhd_pub_t *dhdp, char *dump_path, int len); +extern uint32 dhd_pktlog_get_item_length(dhd_pktlog_ring_info_t *report_ptr); +extern uint32 dhd_pktlog_get_dump_length(dhd_pub_t *dhdp); +extern uint32 __dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid); + +#ifdef DHD_COMPACT_PKT_LOG +#define CPKT_LOG_BIT_SIZE 22 +#define CPKT_LOG_MAX_NUM 80 +extern int dhd_cpkt_log_proc(dhd_pub_t *dhdp, char *buf, int buf_len, + int bit_offset, int req_pkt_num); +#endif /* DHD_COMPACT_PKT_LOG */ +#endif /* DHD_PKT_LOGGING */ +#endif /* __DHD_PKTLOG_H_ */ diff --git a/bcmdhd.101.10.361.x/dhd_plat.h b/bcmdhd.101.10.361.x/dhd_plat.h new file mode 100755 index 0000000..8c07b5b --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_plat.h @@ -0,0 +1,58 @@ +/* + * DHD Linux platform header file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_PLAT_H__ +#define __DHD_PLAT_H__ + +#include + +#if !defined(CONFIG_WIFI_CONTROL_FUNC) +#define WLAN_PLAT_NODFS_FLAG 0x01 +#define WLAN_PLAT_AP_FLAG 0x02 +struct wifi_platform_data { + int (*set_power)(int val, wifi_adapter_info_t *adapter); + int (*set_reset)(int val); + int (*set_carddetect)(int val); +#ifdef DHD_COREDUMP + int (*set_coredump)(const char *buf, int buf_len, const char *info); +#endif /* DHD_COREDUMP */ +#ifdef BCMDHD_MDRIVER + void *(*mem_prealloc)(uint bus_type, int index, int section, unsigned long size); +#else + void *(*mem_prealloc)(int section, unsigned long size); +#endif + int (*get_mac_addr)(unsigned char *buf, int ifidx); +#ifdef BCMSDIO + int (*get_wake_irq)(void); +#endif +#ifdef CUSTOM_FORCE_NODFS_FLAG + void *(*get_country_code)(char *ccode, u32 flags); +#else /* defined (CUSTOM_FORCE_NODFS_FLAG) */ + void *(*get_country_code)(char *ccode); +#endif +}; +#endif /* CONFIG_WIFI_CONTROL_FUNC */ + +#endif /* __DHD_PLAT_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_pno.c b/bcmdhd.101.10.361.x/dhd_pno.c new file mode 100755 index 0000000..e002405 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pno.c @@ -0,0 +1,4871 @@ +/* + * Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#if defined (GSCAN_SUPPORT) && !defined(PNO_SUPPORT) +#error "GSCAN needs PNO to be enabled!" +#endif + +#ifdef PNO_SUPPORT +#include +#include + +#include +#include + +#include + +#ifdef OEM_ANDROID +#include +#include +#include +#include +#include +#endif + +#include +#include + +#include +#include +#include +#include +#ifdef GSCAN_SUPPORT +#include +#endif /* GSCAN_SUPPORT */ +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ + +#ifdef __BIG_ENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#ifdef OEM_ANDROID +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) +#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state) + +#define PNO_BESTNET_LEN WLC_IOCTL_MEDLEN + +#define PNO_ON 1 +#define PNO_OFF 0 +#define CHANNEL_2G_MIN 1 +#define CHANNEL_2G_MAX 14 +#define CHANNEL_5G_MIN 34 +#define CHANNEL_5G_MAX 165 +#define IS_2G_CHANNEL(ch) ((ch >= CHANNEL_2G_MIN) && \ + (ch <= CHANNEL_2G_MAX)) +#define IS_5G_CHANNEL(ch) ((ch >= CHANNEL_5G_MIN) && \ + (ch <= CHANNEL_5G_MAX)) +#define MAX_NODE_CNT 5 +#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE) +#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \ + - (uint32)(timestamp2/1000))) +#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \ + - (uint32)(timestamp2))) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") +#define TIME_MIN_DIFF 5 + +#define EVENT_DATABUF_MAXLEN (512 - sizeof(bcm_event_t)) +#define EVENT_MAX_NETCNT_V1 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v1_t)) \ + / sizeof(wl_pfn_net_info_v1_t) + 1) +#define EVENT_MAX_NETCNT_V2 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v2_t)) \ + / sizeof(wl_pfn_net_info_v2_t) + 1) +#define EVENT_MAX_NETCNT_V3 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v3_t)) \ + / sizeof(wl_pfn_net_info_v3_t) + 1) + +#ifdef GSCAN_SUPPORT +static int _dhd_pno_flush_ssid(dhd_pub_t *dhd); +static wl_pfn_gscan_ch_bucket_cfg_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, + uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); +#endif /* GSCAN_SUPPORT */ + +static int dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat, + int pno_freq_expo_max, uint16 *channel_list, int nchan); + +static inline bool +is_dfs(dhd_pub_t *dhd, uint16 channel) +{ + u32 ch; + s32 err; + u8 buf[32]; + + ch = wl_ch_host_to_driver(channel); + err = dhd_iovar(dhd, 0, "per_chan_info", (char *)&ch, + sizeof(u32), buf, sizeof(buf), FALSE); + if (unlikely(err)) { + DHD_ERROR(("get per chan info failed:%d\n", err)); + return FALSE; + } + /* Check the channel flags returned by fw */ + if (*((u32 *)buf) & WL_CHAN_PASSIVE) { + return TRUE; + } + return FALSE; +} + +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + int pfn = 0; + int err; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", + __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = DHD_PNO_DISABLED; + err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", + __FUNCTION__, err)); + } +exit: + return err; +} + +bool +dhd_is_pno_supported(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return WLS_SUPPORTED(_pno_state); +} + +bool +dhd_is_legacy_pno_enabled(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) != 0); +} + +#ifdef GSCAN_SUPPORT +static uint64 +convert_fw_rel_time_to_systime(struct osl_timespec *ts, uint32 fw_ts_ms) +{ + return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000)); +} + +static void +dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params, + dhd_epno_results_t *res, uint32 idx) +{ + dhd_pno_ssid_t *iter, *next; + int i; + + /* If idx doesn't make sense */ + if (idx >= gscan_params->epno_cfg.num_epno_ssid) { + DHD_ERROR(("No match, idx %d num_ssid %d\n", idx, + gscan_params->epno_cfg.num_epno_ssid)); + goto exit; + } + + if (gscan_params->epno_cfg.num_epno_ssid > 0) { + i = 0; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + &gscan_params->epno_cfg.epno_ssid_list, list) { + GCC_DIAGNOSTIC_POP(); + if (i++ == idx) { + memcpy(res->ssid, iter->SSID, iter->SSID_len); + res->ssid_len = iter->SSID_len; + return; + } + } + } +exit: + /* If we are here then there was no match */ + res->ssid[0] = '\0'; + res->ssid_len = 0; + return; +} + +/* Translate HAL flag bitmask to BRCM FW flag bitmask */ +void +dhd_pno_translate_epno_fw_flags(uint32 *flags) +{ + uint32 in_flags, fw_flags = 0; + in_flags = *flags; + + if (in_flags & DHD_EPNO_A_BAND_TRIG) { + fw_flags |= WL_PFN_SSID_A_BAND_TRIG; + } + + if (in_flags & DHD_EPNO_BG_BAND_TRIG) { + fw_flags |= WL_PFN_SSID_BG_BAND_TRIG; + } + + if (!(in_flags & DHD_EPNO_STRICT_MATCH) && + !(in_flags & DHD_EPNO_HIDDEN_SSID)) { + fw_flags |= WL_PFN_SSID_IMPRECISE_MATCH; + } + + if (in_flags & DHD_EPNO_SAME_NETWORK) { + fw_flags |= WL_PFN_SSID_SAME_NETWORK; + } + + /* Add any hard coded flags needed */ + fw_flags |= WL_PFN_SUPPRESS_AGING_MASK; + *flags = fw_flags; + + return; +} + +/* Translate HAL auth bitmask to BRCM FW bitmask */ +void +dhd_pno_set_epno_auth_flag(uint32 *wpa_auth) +{ + switch (*wpa_auth) { + case DHD_PNO_AUTH_CODE_OPEN: + *wpa_auth = WPA_AUTH_DISABLED; + break; + case DHD_PNO_AUTH_CODE_PSK: + *wpa_auth = (WPA_AUTH_PSK | WPA2_AUTH_PSK); + break; + case DHD_PNO_AUTH_CODE_EAPOL: + *wpa_auth = ~WPA_AUTH_NONE; + break; + default: + DHD_ERROR(("%s: Unknown auth %d", __FUNCTION__, *wpa_auth)); + *wpa_auth = WPA_AUTH_PFN_ANY; + break; + } + return; +} + +/* Cleanup all results */ +static void +dhd_gscan_clear_all_batch_results(dhd_pub_t *dhd) +{ + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter; + + _pno_state = PNO_GET_PNOSTATE(dhd); + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + iter = gscan_params->gscan_batch_cache; + /* Mark everything as consumed */ + while (iter) { + iter->tot_consumed = iter->tot_count; + iter = iter->next; + } + dhd_gscan_batch_cache_cleanup(dhd); + return; +} + +static int +_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static int +_dhd_pno_flush_ssid(dhd_pub_t *dhd) +{ + int err; + wl_pfn_t pfn_elem; + memset(&pfn_elem, 0, sizeof(wl_pfn_t)); + pfn_elem.flags = htod32(WL_PFN_FLUSH_ALL_SSIDS); + + err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_elem, sizeof(wl_pfn_t), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + } + return err; +} + +static bool +is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) +{ + smp_rmb(); + return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE); +} +#endif /* GSCAN_SUPPORT */ + +static int +_dhd_pno_suspend(dhd_pub_t *dhd) +{ + int err; + int suspend = 1; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); + goto exit; + + } + _pno_state->pno_status = DHD_PNO_SUSPEND; +exit: + return err; +} +static int +_dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (enable & 0xfffe) { + DHD_ERROR(("%s invalid value\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + if (!dhd_support_sta_mode(dhd)) { + DHD_ERROR(("PNO is not allowed for non-STA mode")); + err = BCME_BADOPTION; + goto exit; + } + if (enable) { + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + dhd_is_associated(dhd, 0, NULL)) { + DHD_ERROR(("%s Legacy PNO mode cannot be enabled " + "in assoc mode , ignore it\n", __FUNCTION__)); + err = BCME_BADOPTION; + goto exit; + } + } + /* Enable/Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = (enable)? + DHD_PNO_ENABLED : DHD_PNO_DISABLED; + if (!enable) + _pno_state->pno_mode = DHD_PNO_NONE_MODE; + + DHD_PNO(("%s set pno as %s\n", + __FUNCTION__, enable ? "Enable" : "Disable")); +exit: + return err; +} + +static int +_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + wl_pfn_param_t pfn_param; + dhd_pno_params_t *_params; + dhd_pno_status_info_t *_pno_state; + bool combined_scan = FALSE; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + memset(&pfn_param, 0, sizeof(pfn_param)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) | + (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT)); + if (mode == DHD_PNO_LEGACY_MODE) { + /* check and set extra pno params */ + if ((pno_params->params_legacy.pno_repeat != 0) || + (pno_params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max); + } + /* set up pno scan fr */ + if (pno_params->params_legacy.scan_fr != 0) + pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr); + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n")); + mode |= DHD_PNO_BATCH_MODE; + combined_scan = TRUE; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n")); + mode |= DHD_PNO_HOTLIST_MODE; + combined_scan = TRUE; + } +#ifdef GSCAN_SUPPORT + else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n")); + mode |= DHD_PNO_GSCAN_MODE; + } +#endif /* GSCAN_SUPPORT */ + } + if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* Scan frequency of 30 sec */ + pfn_param.scan_freq = htod32(30); + /* slow adapt scan is off by default */ + pfn_param.slow_freq = htod32(0); + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* Network timeout 60 sec */ + pfn_param.lost_network_timeout = htod32(60); + /* best n = 2 by default */ + pfn_param.bestn = DEFAULT_BESTN; + /* mscan m=0 by default, so not record best networks by default */ + pfn_param.mscan = DEFAULT_MSCAN; + /* default repeat = 10 */ + pfn_param.repeat = DEFAULT_REPEAT; + /* by default, maximum scan interval = 2^2 + * scan_freq when adaptive scan is turned on + */ + pfn_param.exp = DEFAULT_EXP; + if (mode == DHD_PNO_BATCH_MODE) { + /* In case of BATCH SCAN */ + if (pno_params->params_batch.bestn) + pfn_param.bestn = pno_params->params_batch.bestn; + if (pno_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr); + if (pno_params->params_batch.mscan) + pfn_param.mscan = pno_params->params_batch.mscan; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } else if (mode == DHD_PNO_HOTLIST_MODE) { + /* In case of HOTLIST SCAN */ + if (pno_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + if (combined_scan) { + /* Disable Adaptive Scan */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + pfn_param.repeat = 0; + pfn_param.exp = 0; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* In case of Legacy PNO + BATCH SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params->params_batch.bestn) + pfn_param.bestn = _params->params_batch.bestn; + if (_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(_params->params_batch.scan_fr); + if (_params->params_batch.mscan) + pfn_param.mscan = _params->params_batch.mscan; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* In case of Legacy PNO + HOTLIST SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + } + } + } +#ifdef GSCAN_SUPPORT + if (mode & DHD_PNO_GSCAN_MODE) { + uint32 lost_network_timeout; + + pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr); + if (pno_params->params_gscan.mscan) { + pfn_param.bestn = pno_params->params_gscan.bestn; + pfn_param.mscan = pno_params->params_gscan.mscan; + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + pfn_param.repeat = 0; + pfn_param.exp = 0; + pfn_param.slow_freq = 0; + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + dhd_pno_params_t *params; + + params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + + pfn_param.scan_freq = gcd(pno_params->params_gscan.scan_fr, + params->params_legacy.scan_fr); + + if ((params->params_legacy.pno_repeat != 0) || + (params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.repeat = (uchar) (params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (params->params_legacy.pno_freq_expo_max); + } + } + + lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq * + pfn_param.scan_freq * + pno_params->params_gscan.lost_ap_window); + if (lost_network_timeout) { + pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout, + GSCAN_MIN_BSSID_TIMEOUT)); + } else { + pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT); + } + } else +#endif /* GSCAN_SUPPORT */ + { + if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || + pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { + DHD_ERROR(("%s pno freq(%d sec) is not valid \n", + __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + err = BCME_BADARG; + goto exit; + } + } +#if (!defined(WL_USE_RANDOMIZED_SCAN)) + err = dhd_set_rand_mac_oui(dhd); + /* Ignore if chip doesnt support the feature */ + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : failed to set random mac for PNO scan, %d\n", __FUNCTION__, err)); + goto exit; + } +#endif /* !defined(WL_USE_RANDOMIZED_SCAN */ +#ifdef GSCAN_SUPPORT + if (mode == DHD_PNO_BATCH_MODE || + ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) +#else + if (mode == DHD_PNO_BATCH_MODE) +#endif /* GSCAN_SUPPORT */ + { + int _tmp = pfn_param.bestn; + /* set bestn to calculate the max mscan which firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); + goto exit; + } + /* get max mscan which the firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", NULL, 0, (char *)&_tmp, sizeof(_tmp), FALSE); + if (err < 0) { + DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); + goto exit; + } + pfn_param.mscan = MIN(pfn_param.mscan, _tmp); + DHD_PNO((" returned mscan : %d, set bestn : %d mscan %d\n", _tmp, pfn_param.bestn, + pfn_param.mscan)); + } + err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err)); + goto exit; + } + /* need to return mscan if this is for batch scan instead of err */ + err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err; +exit: + return err; +} + +static int +_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head* ssid_list, int nssid) +{ + int err = BCME_OK; + int i = 0, mem_needed; + wl_pfn_t *pfn_elem_buf; + struct dhd_pno_ssid *iter, *next; + + NULL_CHECK(dhd, "dhd is NULL", err); + if (!nssid) { + NULL_CHECK(ssid_list, "ssid list is NULL", err); + return BCME_ERROR; + } + mem_needed = (sizeof(wl_pfn_t) * nssid); + pfn_elem_buf = (wl_pfn_t *) MALLOCZ(dhd->osh, mem_needed); + if (!pfn_elem_buf) { + DHD_ERROR(("%s: Can't malloc %d bytes!\n", __FUNCTION__, mem_needed)); + return BCME_NOMEM; + } + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, ssid_list, list) { + GCC_DIAGNOSTIC_POP(); + pfn_elem_buf[i].infra = htod32(1); + pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM); + pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth); + pfn_elem_buf[i].flags = htod32(iter->flags); + if (iter->hidden) + pfn_elem_buf[i].flags |= htod32(ENABLE << WL_PFN_HIDDEN_BIT); + /* If a single RSSI threshold is defined, use that */ +#ifdef PNO_MIN_RSSI_TRIGGER + pfn_elem_buf[i].flags |= ((PNO_MIN_RSSI_TRIGGER & 0xFF) << WL_PFN_RSSI_SHIFT); +#else + pfn_elem_buf[i].flags |= ((iter->rssi_thresh & 0xFF) << WL_PFN_RSSI_SHIFT); +#endif /* PNO_MIN_RSSI_TRIGGER */ + memcpy((char *)pfn_elem_buf[i].ssid.SSID, iter->SSID, + iter->SSID_len); + pfn_elem_buf[i].ssid.SSID_len = iter->SSID_len; + DHD_PNO(("%s size = %d hidden = %d flags = %x rssi_thresh %d\n", + iter->SSID, iter->SSID_len, iter->hidden, + iter->flags, iter->rssi_thresh)); + if (++i >= nssid) { + /* shouldn't happen */ + break; + } + } + + err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + } + MFREE(dhd->osh, pfn_elem_buf, mem_needed); + return err; +} + +/* qsort compare function */ +static int +_dhd_pno_cmpfunc(const void *a, const void *b) +{ + return (*(const uint16*)a - *(const uint16*)b); +} + +static int +_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan, + uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2) +{ + int err = BCME_OK; + int i = 0, j = 0, k = 0; + uint16 tmp; + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + NULL_CHECK(nchan, "nchan is NULL", err); + NULL_CHECK(chan_list1, "chan_list1 is NULL", err); + NULL_CHECK(chan_list2, "chan_list2 is NULL", err); + /* chan_list1 and chan_list2 should be sorted at first */ + while (i < nchan1 && j < nchan2) { + tmp = chan_list1[i] < chan_list2[j]? + chan_list1[i++] : chan_list2[j++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + } + + while (i < nchan1) { + tmp = chan_list1[i++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + d_chan_list[k++] = tmp; + } + + while (j < nchan2) { + tmp = chan_list2[j++]; + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + + } + *nchan = k; + return err; +} + +static int +_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, + int *nchan, uint8 band, bool skip_dfs) +{ + int err = BCME_OK; + int i, j; + uint32 chan_buf[WL_NUMCHANNELS + 1]; + wl_uint32_list_t *list; + NULL_CHECK(dhd, "dhd is NULL", err); + if (*nchan) { + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + } + memset(&chan_buf, 0, sizeof(chan_buf)); + list = (wl_uint32_list_t *) (void *)chan_buf; + list->count = htod32(WL_NUMCHANNELS); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); + if (err < 0) { + DHD_ERROR(("failed to get channel list (err: %d)\n", err)); + return err; + } + for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) { + if (IS_2G_CHANNEL(dtoh32(list->element[i]))) { + if (!(band & WLC_BAND_2G)) { + /* Skip, if not 2g */ + continue; + } + /* fall through to include the channel */ + } else if (IS_5G_CHANNEL(dtoh32(list->element[i]))) { + bool dfs_channel = is_dfs(dhd, dtoh32(list->element[i])); + if ((skip_dfs && dfs_channel) || + (!(band & WLC_BAND_5G) && !dfs_channel)) { + /* Skip the channel if: + * the DFS bit is NOT set & the channel is a dfs channel + * the band 5G is not set & the channel is a non DFS 5G channel + */ + continue; + } + /* fall through to include the channel */ + } else { + /* Not in range. Bad channel */ + DHD_ERROR(("Not in range. bad channel\n")); + *nchan = 0; + return BCME_BADCHAN; + } + + /* Include the channel */ + d_chan_list[j++] = (uint16) dtoh32(list->element[i]); + } + *nchan = j; + return err; +} + +static int +_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch, + char *buf, int nbufsize) +{ + int err = BCME_OK; + int bytes_written = 0, nreadsize = 0; + int t_delta = 0; + int nleftsize = nbufsize; + uint8 cnt = 0; + char *bp = buf; + char eabuf[ETHER_ADDR_STR_LEN]; +#ifdef PNO_DEBUG + char *_base_bp; + char msg[150]; +#endif + dhd_pno_bestnet_entry_t *iter, *next; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + NULL_CHECK(params_batch, "params_batch is NULL", err); + if (nbufsize > 0) + NULL_CHECK(buf, "buf is NULL", err); + /* initialize the buffer */ + memset(buf, 0, nbufsize); + DHD_PNO(("%s enter \n", __FUNCTION__)); + /* # of scans */ + if (!params_batch->get_batch.batch_started) { + bp += nreadsize = snprintf(bp, nleftsize, "scancount=%d\n", + params_batch->get_batch.expired_tot_scan_cnt); + nleftsize -= nreadsize; + params_batch->get_batch.batch_started = TRUE; + } + DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); + /* preestimate scan count until which scan result this report is going to end */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(siter, snext, + ¶ms_batch->get_batch.expired_scan_results_list, list) { + GCC_DIAGNOSTIC_POP(); + phead = siter->bestnetheader; + while (phead != NULL) { + /* if left_size is less than bestheader total size , stop this */ + if (nleftsize <= + (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD)) + goto exit; + /* increase scan count */ + cnt++; + /* # best of each scan */ + DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); + /* attribute of the scan */ + if (phead->reason & PNO_STATUS_ABORT_MASK) { + bp += nreadsize = snprintf(bp, nleftsize, "trunc\n"); + nleftsize -= nreadsize; + } + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + GCC_DIAGNOSTIC_POP(); + t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); +#ifdef PNO_DEBUG + _base_bp = bp; + memset(msg, 0, sizeof(msg)); +#endif + /* BSSID info */ + bp += nreadsize = snprintf(bp, nleftsize, "bssid=%s\n", + bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); + nleftsize -= nreadsize; + /* SSID */ + bp += nreadsize = snprintf(bp, nleftsize, "ssid=%s\n", iter->SSID); + nleftsize -= nreadsize; + /* channel */ + bp += nreadsize = snprintf(bp, nleftsize, "freq=%d\n", + wl_channel_to_frequency(wf_chspec_ctlchan(iter->channel), + CHSPEC_BAND(iter->channel))); + nleftsize -= nreadsize; + /* RSSI */ + bp += nreadsize = snprintf(bp, nleftsize, "level=%d\n", iter->RSSI); + nleftsize -= nreadsize; + /* add the time consumed in Driver to the timestamp of firmware */ + iter->timestamp += t_delta; + bp += nreadsize = snprintf(bp, nleftsize, + "age=%d\n", iter->timestamp); + nleftsize -= nreadsize; + /* RTT0 */ + bp += nreadsize = snprintf(bp, nleftsize, "dist=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt0); + nleftsize -= nreadsize; + /* RTT1 */ + bp += nreadsize = snprintf(bp, nleftsize, "distSd=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt1); + nleftsize -= nreadsize; + bp += nreadsize = snprintf(bp, nleftsize, "%s", AP_END_MARKER); + nleftsize -= nreadsize; + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); +#ifdef PNO_DEBUG + memcpy(msg, _base_bp, bp - _base_bp); + DHD_PNO(("Entry : \n%s", msg)); +#endif + } + bp += nreadsize = snprintf(bp, nleftsize, "%s", SCAN_END_MARKER); + DHD_PNO(("%s", SCAN_END_MARKER)); + nleftsize -= nreadsize; + pprev = phead; + /* reset the header */ + siter->bestnetheader = phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + + siter->cnt_header--; + } + if (phead == NULL) { + /* we store all entry in this scan , so it is ok to delete */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } +exit: + if (cnt < params_batch->get_batch.expired_tot_scan_cnt) { + DHD_ERROR(("Buffer size is small to save all batch entry," + " cnt : %d (remained_scan_cnt): %d\n", + cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt)); + } + params_batch->get_batch.expired_tot_scan_cnt -= cnt; + /* set FALSE only if the link list is empty after returning the data */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { + GCC_DIAGNOSTIC_POP(); + params_batch->get_batch.batch_started = FALSE; + bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER); + DHD_PNO(("%s", RESULTS_END_MARKER)); + DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); + } + /* return used memory in buffer */ + bytes_written = (int32)(bp - buf); + return bytes_written; +} + +static int +_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last) +{ + int err = BCME_OK; + int removed_scan_cnt = 0; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + dhd_pno_bestnet_entry_t *iter, *next; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(head, "head is NULL", err); + NULL_CHECK(head->next, "head->next is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(siter, snext, + head, list) { + if (only_last) { + /* in case that we need to delete only last one */ + if (!list_is_last(&siter->list, head)) { + /* skip if the one is not last */ + continue; + } + } + /* delete all data belong if the one is last */ + phead = siter->bestnetheader; + while (phead != NULL) { + removed_scan_cnt++; + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); + } + pprev = phead; + phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + } + if (phead == NULL) { + /* it is ok to delete top node */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } + GCC_DIAGNOSTIC_POP(); + return removed_scan_cnt; +} + +static int +_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_cfg_t pfncfg_param; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nchan) { + if (nchan > WL_NUMCHANNELS) { + return BCME_RANGE; + } + DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); + (void)memset_s(&pfncfg_param, sizeof(wl_pfn_cfg_t), 0, sizeof(wl_pfn_cfg_t)); + pfncfg_param.channel_num = htod32(0); + + for (i = 0; i < nchan; i++) { + if (dhd->wlc_ver_major >= DHD_PNO_CHSPEC_SUPPORT_VER) { + pfncfg_param.channel_list[i] = CH20MHZ_CHSPEC(channel_list[i]); + } else { + pfncfg_param.channel_list[i] = channel_list[i]; + } + } + } + + /* Setup default values */ + pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); + pfncfg_param.channel_num = htod32(nchan); + err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static int +_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + switch (mode) { + case DHD_PNO_LEGACY_MODE: { + struct dhd_pno_ssid *iter, *next; + if (params->params_legacy.nssid > 0) { + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + ¶ms->params_legacy.ssid_list, list) { + GCC_DIAGNOSTIC_POP(); + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid)); + } + } + + params->params_legacy.nssid = 0; + params->params_legacy.scan_fr = 0; + params->params_legacy.pno_freq_expo_max = 0; + params->params_legacy.pno_repeat = 0; + params->params_legacy.nchan = 0; + memset(params->params_legacy.chan_list, 0, + sizeof(params->params_legacy.chan_list)); + break; + } + case DHD_PNO_BATCH_MODE: { + params->params_batch.scan_fr = 0; + params->params_batch.mscan = 0; + params->params_batch.nchan = 0; + params->params_batch.rtt = 0; + params->params_batch.bestn = 0; + params->params_batch.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_batch.chan_list, 0, + sizeof(params->params_batch.chan_list)); + params->params_batch.get_batch.batch_started = FALSE; + params->params_batch.get_batch.buf = NULL; + params->params_batch.get_batch.bufsize = 0; + params->params_batch.get_batch.reason = 0; + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.scan_results_list, FALSE); + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.expired_scan_results_list, FALSE); + params->params_batch.get_batch.tot_scan_cnt = 0; + params->params_batch.get_batch.expired_tot_scan_cnt = 0; + params->params_batch.get_batch.top_node_cnt = 0; + INIT_LIST_HEAD(¶ms->params_batch.get_batch.scan_results_list); + INIT_LIST_HEAD(¶ms->params_batch.get_batch.expired_scan_results_list); + break; + } + case DHD_PNO_HOTLIST_MODE: { + struct dhd_pno_bssid *iter, *next; + if (params->params_hotlist.nbssid > 0) { + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + ¶ms->params_hotlist.bssid_list, list) { + GCC_DIAGNOSTIC_POP(); + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid)); + } + } + params->params_hotlist.scan_fr = 0; + params->params_hotlist.nbssid = 0; + params->params_hotlist.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_hotlist.chan_list, 0, + sizeof(params->params_hotlist.chan_list)); + break; + } + default: + DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode)); + break; + } + mutex_unlock(&_pno_state->pno_mutex); + return err; +} + +static int +_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nbssid) { + NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); + } + err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * nbssid, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +int +dhd_pno_stop_for_ssid(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0, cnt = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL, *tmp_bssid; + + NULL_CHECK(dhd, "dev is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) { + DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* If pno mode is PNO_LEGACY_MODE clear the pno values and unset the DHD_PNO_LEGACY_MODE */ + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = &_params->params_gscan; + if (gscan_params->mscan) { + /* retrieve the batching data from firmware into host */ + err = dhd_wait_batch_results_complete(dhd); + if (err != BCME_OK) + goto exit; + } + /* save current pno_mode before calling dhd_pno_clean */ + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + goto exit; + } + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + /* Restart gscan */ + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + /* restart Batch mode if the batch mode is on */ + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + err = BCME_ERROR; + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* restart HOTLIST SCAN */ + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = MALLOCZ(dhd->osh, sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + /* convert dhd_pno_bssid to wl_pfn_bssid */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + cnt = 0; + tmp_bssid = p_pfn_bssid; + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + GCC_DIAGNOSTIC_POP(); + memcpy(&tmp_bssid->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + tmp_bssid->flags = iter->flags; + if (cnt < _params->params_hotlist.nbssid) { + tmp_bssid++; + cnt++; + } else { + DHD_ERROR(("%s: Allocated insufficient memory\n", + __FUNCTION__)); + break; + } + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + if (p_pfn_bssid) { + MFREE(dhd->osh, p_pfn_bssid, sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid); + } + return err; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + return (_dhd_pno_enable(dhd, enable)); +} + +static int +dhd_pno_add_to_ssid_list(dhd_pub_t *dhd, struct list_head *ptr, wlc_ssid_ext_t *ssid_list, + int nssid, int *num_ssid_added) +{ + int ret = BCME_OK; + int i; + struct dhd_pno_ssid *_pno_ssid; + + for (i = 0; i < nssid; i++) { + if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s : Invalid SSID length %d\n", + __FUNCTION__, ssid_list[i].SSID_len)); + ret = BCME_ERROR; + goto exit; + } + /* Check for broadcast ssid */ + if (!ssid_list[i].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is illegal for PNO setting\n", i)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid = (struct dhd_pno_ssid *)MALLOCZ(dhd->osh, + sizeof(struct dhd_pno_ssid)); + if (_pno_ssid == NULL) { + DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid->SSID_len = ssid_list[i].SSID_len; + _pno_ssid->hidden = ssid_list[i].hidden; + _pno_ssid->rssi_thresh = ssid_list[i].rssi_thresh; + _pno_ssid->flags = ssid_list[i].flags; + _pno_ssid->wpa_auth = WPA_AUTH_PFN_ANY; + + memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); + list_add_tail(&_pno_ssid->list, ptr); + } + +exit: + *num_ssid_added = i; + return ret; +} + +int +dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_legacy_params *params_legacy; + int err = BCME_OK; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("%s: PNO Not enabled/Not ready\n", __FUNCTION__)); + return BCME_NOTREADY; + } + + if (!dhd_support_sta_mode(dhd)) { + return BCME_BADOPTION; + } + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + params_legacy = &(_params->params_legacy); + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + + if (err < 0) { + DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", + __FUNCTION__, err)); + return err; + } + + INIT_LIST_HEAD(¶ms_legacy->ssid_list); + + if (dhd_pno_add_to_ssid_list(dhd, ¶ms_legacy->ssid_list, ssid_list, + nssid, ¶ms_legacy->nssid) < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + return BCME_ERROR; + } + + DHD_PNO(("%s enter : nssid %d, scan_fr :%d, pno_repeat :%d," + "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, + params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan)); + + return dhd_pno_set_legacy_pno(dhd, scan_fr, pno_repeat, + pno_freq_expo_max, channel_list, nchan); + +} + +static int +dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat, + int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + uint16 _chan_list[WL_NUMCHANNELS]; + int32 tot_nchan = 0; + int err = BCME_OK; + int i, nssid; + int mode = 0; + struct list_head *ssid_list; + + _pno_state = PNO_GET_PNOSTATE(dhd); + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + /* If GSCAN is also ON will handle this down below */ +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE && + !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#else + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) +#endif /* GSCAN_SUPPORT */ + { + DHD_ERROR(("%s : Legacy PNO mode was already started, " + "will disable previous one to start new one\n", __FUNCTION__)); + err = dhd_pno_stop_for_ssid(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", + __FUNCTION__, err)); + return err; + } + } + _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; + (void)memset_s(_chan_list, sizeof(_chan_list), + 0, sizeof(_chan_list)); + tot_nchan = MIN(nchan, WL_NUMCHANNELS); + if (tot_nchan > 0 && channel_list) { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i]; + } +#ifdef GSCAN_SUPPORT + else { + /* FW scan module will include all valid channels when chan count + * is set to 0 + */ + tot_nchan = 0; + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + DHD_PNO(("BATCH SCAN is on progress in firmware\n")); + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* use superset of channel list between two mode */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params2->params_batch.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_batch.chan_list[0], + _params2->params_batch.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use" + " all channels in firmware\n")); + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_hotlist.chan_list[0], + _params2->params_hotlist.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + } + } + _params->params_legacy.scan_fr = scan_fr; + _params->params_legacy.pno_repeat = pno_repeat; + _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; + _params->params_legacy.nchan = tot_nchan; + ssid_list = &_params->params_legacy.ssid_list; + nssid = _params->params_legacy.nssid; + +#ifdef GSCAN_SUPPORT + /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */ + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + /* ePNO and Legacy PNO do not co-exist */ + if (gscan_params->epno_cfg.num_epno_ssid) { + DHD_PNO(("ePNO and Legacy PNO do not co-exist\n")); + err = BCME_EPERM; + goto exit; + } + DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n")); + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) { + DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); + goto exit; + } + + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + if (err < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + } + } + return err; +} + +int +dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) +{ + int err = BCME_OK; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0, tot_nchan = 0; + int mode = 0, mscan = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(batch_params, "batch_params is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + _pno_state->pno_mode |= DHD_PNO_BATCH_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } else { + /* batch mode is already started */ + return -EBUSY; + } + _params->params_batch.scan_fr = batch_params->scan_fr; + _params->params_batch.bestn = batch_params->bestn; + _params->params_batch.mscan = (batch_params->mscan)? + batch_params->mscan : DEFAULT_BATCH_MSCAN; + _params->params_batch.nchan = batch_params->nchan; + memcpy(_params->params_batch.chan_list, batch_params->chan_list, + sizeof(_params->params_batch.chan_list)); + + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan; + if (batch_params->band == WLC_BAND_2G || +#ifdef WL_6G_BAND + batch_params->band == WLC_BAND_6G || +#endif /* WL_6G_BAND */ + batch_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_batch.chan_list[batch_params->nchan], + &rem_nchan, batch_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, batch_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_batch.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_batch.chan_list, _params->params_batch.nchan, + sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_batch.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list)); + tot_nchan = _params->params_batch.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_batch.chan_list[0], _params->params_batch.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use all channels in firmware\n")); + } + if ((err = _dhd_pno_add_ssid(dhd, &_params2->params_legacy.ssid_list, + _params2->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } else { + /* we need to return mscan */ + mscan = err; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + else { + /* return #max scan firmware can do */ + err = mscan; + } + return err; +} + +#ifdef GSCAN_SUPPORT + +static int +dhd_set_epno_params(dhd_pub_t *dhd, wl_ssid_ext_params_t *params, bool set) +{ + wl_pfn_ssid_cfg_t cfg; + int err; + NULL_CHECK(dhd, "dhd is NULL\n", err); + memset(&cfg, 0, sizeof(wl_pfn_ssid_cfg_t)); + cfg.version = WL_PFN_SSID_CFG_VERSION; + + /* If asked to clear params (set == FALSE) just set the CLEAR bit */ + if (!set) + cfg.flags |= WL_PFN_SSID_CFG_CLEAR; + else if (params) + memcpy(&cfg.params, params, sizeof(wl_ssid_ext_params_t)); + err = dhd_iovar(dhd, 0, "pfn_ssid_cfg", (char *)&cfg, + sizeof(wl_pfn_ssid_cfg_t), NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute pfn_ssid_cfg %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_pno_flush_fw_epno(dhd_pub_t *dhd) +{ + int err; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + + err = dhd_set_epno_params(dhd, NULL, FALSE); + if (err < 0) { + DHD_ERROR(("failed to set ePNO params %d\n", err)); + return err; + } + err = _dhd_pno_flush_ssid(dhd); + return err; +} + +int +dhd_pno_set_epno(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (gscan_params->epno_cfg.num_epno_ssid) { + DHD_PNO(("num_epno_ssid %d\n", gscan_params->epno_cfg.num_epno_ssid)); + if ((err = _dhd_pno_add_ssid(dhd, &gscan_params->epno_cfg.epno_ssid_list, + gscan_params->epno_cfg.num_epno_ssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) to firmware\n", err)); + return err; + } + err = dhd_set_epno_params(dhd, &gscan_params->epno_cfg.params, TRUE); + if (err < 0) { + DHD_ERROR(("failed to set ePNO params %d\n", err)); + } + } + return err; +} + +static void +dhd_pno_reset_cfg_gscan(dhd_pub_t *dhd, dhd_pno_params_t *_params, + dhd_pno_status_info_t *_pno_state, uint8 flags) +{ + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (flags & GSCAN_FLUSH_SCAN_CFG) { + _params->params_gscan.bestn = 0; + _params->params_gscan.mscan = 0; + _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + _params->params_gscan.scan_fr = 0; + _params->params_gscan.send_all_results_flag = 0; + memset(_params->params_gscan.channel_bucket, 0, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + _params->params_gscan.nchannel_buckets = 0; + DHD_PNO(("Flush Scan config\n")); + } + if (flags & GSCAN_FLUSH_HOTLIST_CFG) { + struct dhd_pno_bssid *iter, *next; + if (_params->params_gscan.nbssid_hotlist > 0) { + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + &_params->params_gscan.hotlist_bssid_list, list) { + GCC_DIAGNOSTIC_POP(); + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid)); + } + } + _params->params_gscan.nbssid_hotlist = 0; + DHD_PNO(("Flush Hotlist Config\n")); + } + if (flags & GSCAN_FLUSH_EPNO_CFG) { + dhd_pno_ssid_t *iter, *next; + dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg; + + if (epno_cfg->num_epno_ssid > 0) { + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + &epno_cfg->epno_ssid_list, list) { + GCC_DIAGNOSTIC_POP(); + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid)); + } + epno_cfg->num_epno_ssid = 0; + } + memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t)); + DHD_PNO(("Flushed ePNO Config\n")); + } + + return; +} + +int +dhd_pno_lock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + return err; +} + +void +dhd_pno_unlock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_unlock(&_pno_state->pno_mutex); + return; +} + +int +dhd_wait_batch_results_complete(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + /* Has the workqueue finished its job already?? */ + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) { + DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__)); + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */ + gscan_results_cache_t *iter; + uint16 num_results = 0; + + mutex_lock(&_pno_state->pno_mutex); + iter = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + iter = iter->next; + } + mutex_unlock(&_pno_state->pno_mutex); + + /* All results consumed/No results cached?? + * Get fresh results from FW + */ + if ((_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) && !num_results) { + DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__)); + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } + } + DHD_PNO(("%s: Wait complete\n", __FUNCTION__)); + return err; +} + +int +dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *_params; + int i; + dhd_pno_status_info_t *_pno_state; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + + switch (type) { + case DHD_PNO_BATCH_SCAN_CFG_ID: + { + gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf; + _params->params_gscan.bestn = ptr->bestn; + _params->params_gscan.mscan = ptr->mscan; + _params->params_gscan.buffer_threshold = ptr->buffer_threshold; + } + break; + case DHD_PNO_GEOFENCE_SCAN_CFG_ID: + { + gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf; + struct dhd_pno_bssid *_pno_bssid; + struct bssid_t *bssid_ptr; + int8 flags; + + if (flush) { + dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, + GSCAN_FLUSH_HOTLIST_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_hotlist) { + INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); + } + + if ((_params->params_gscan.nbssid_hotlist + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", + (_params->params_gscan.nbssid_hotlist + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { + _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh, + sizeof(struct dhd_pno_bssid)); + if (!_pno_bssid) { + DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes", + sizeof(struct dhd_pno_bssid))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN); + + flags = (int8) bssid_ptr->rssi_reporting_threshold; + _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT; + list_add_tail(&_pno_bssid->list, + &_params->params_gscan.hotlist_bssid_list); + } + + _params->params_gscan.nbssid_hotlist += ptr->nbssid; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + } + break; + case DHD_PNO_SCAN_CFG_ID: + { + int k; + uint16 band; + gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) { + _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets; + + memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + ch_bucket = _params->params_gscan.channel_bucket; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + band = ch_bucket[i].band; + for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) { + ch_bucket[i].chan_list[k] = + wf_mhz2channel(ptr->channel_bucket[i].chan_list[k], + 0); + } + ch_bucket[i].band = 0; + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (band & GSCAN_BG_BAND_MASK) { + ch_bucket[i].band |= WLC_BAND_2G; + } + if (band & GSCAN_A_BAND_MASK) { + ch_bucket[i].band |= WLC_BAND_6G | WLC_BAND_5G; + } + if (band & GSCAN_DFS_MASK) { + ch_bucket[i].band |= GSCAN_DFS_MASK; + } + DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band, + ch_bucket[i].report_flag)); + } + + for (i = 0; i < ptr->nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple/ptr->scan_fr; + ch_bucket[i].bucket_max_multiple = + ch_bucket[i].bucket_max_multiple/ptr->scan_fr; + DHD_PNO(("mult %d max_mult %d\n", + ch_bucket[i].bucket_freq_multiple, + ch_bucket[i].bucket_max_multiple)); + } + _params->params_gscan.scan_fr = ptr->scan_fr; + + DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets, + _params->params_gscan.scan_fr)); + } else { + err = BCME_BADARG; + } + } + break; + case DHD_PNO_EPNO_CFG_ID: + if (flush) { + dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, + GSCAN_FLUSH_EPNO_CFG); + } + break; + case DHD_PNO_EPNO_PARAMS_ID: + if (flush) { + memset(&_params->params_gscan.epno_cfg.params, 0, + sizeof(wl_ssid_ext_params_t)); + } + if (buf) { + memcpy(&_params->params_gscan.epno_cfg.params, buf, + sizeof(wl_ssid_ext_params_t)); + } + break; + default: + err = BCME_BADARG; + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } +exit: + mutex_unlock(&_pno_state->pno_mutex); + return err; + +} + +static bool +validate_gscan_params(struct dhd_pno_gscan_params *gscan_params) +{ + unsigned int i, k; + + if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) { + DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n", + __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets)); + return false; + } + + for (i = 0; i < gscan_params->nchannel_buckets; i++) { + if (!gscan_params->channel_bucket[i].band) { + for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) { + if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) { + DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__, + gscan_params->channel_bucket[i].chan_list[k])); + return false; + } + } + } + } + + return true; +} + +static int +dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) +{ + int err = BCME_OK; + int mode, i = 0; + uint16 _chan_list[WL_NUMCHANNELS]; + int tot_nchan = 0; + int num_buckets_to_fw, tot_num_buckets, gscan_param_size; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL; + wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + dhd_pno_params_t *_params; + bool fw_flushed = FALSE; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(gscan_params, "gscan_params is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!validate_gscan_params(gscan_params)) { + DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + + if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state, + _chan_list, &tot_num_buckets, &num_buckets_to_fw))) { + goto exit; + } + + mutex_lock(&_pno_state->pno_mutex); + /* Clear any pre-existing results in our cache + * not consumed by framework + */ + dhd_gscan_clear_all_batch_results(dhd); + if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) { + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + mutex_unlock(&_pno_state->pno_mutex); + goto exit; + } + fw_flushed = TRUE; + /* restore the previous mode */ + _pno_state->pno_mode = mode; + } + _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE; + mutex_unlock(&_pno_state->pno_mutex); + + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + !gscan_params->epno_cfg.num_epno_ssid) { + struct dhd_pno_legacy_params *params_legacy; + params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + + if ((err = _dhd_pno_add_ssid(dhd, ¶ms_legacy->ssid_list, + params_legacy->nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + + gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOCZ(dhd->osh, gscan_param_size); + + if (!pfn_gscan_cfg_t) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, gscan_param_size)); + err = BCME_NOMEM; + goto exit; + } + + pfn_gscan_cfg_t->version = WL_GSCAN_CFG_VERSION; + if (gscan_params->mscan) + pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold; + else + pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; + + pfn_gscan_cfg_t->flags = + (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); + pfn_gscan_cfg_t->flags |= GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK; + pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; + pfn_gscan_cfg_t->retry_threshold = GSCAN_RETRY_THRESHOLD; + + for (i = 0; i < num_buckets_to_fw; i++) { + pfn_gscan_cfg_t->channel_bucket[i].bucket_end_index = + ch_bucket[i].bucket_end_index; + pfn_gscan_cfg_t->channel_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[i].max_freq_multiple = + ch_bucket[i].max_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[i].repeat = + ch_bucket[i].repeat; + pfn_gscan_cfg_t->channel_bucket[i].flag = + ch_bucket[i].flag; + } + + tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1; + DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan, + tot_num_buckets, num_buckets_to_fw)); + + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + + if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) { + DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + /* Reprogram ePNO cfg from dhd cache if FW has been flushed */ + if (fw_flushed) { + dhd_pno_set_epno(dhd); + } + + if (gscan_params->nbssid_hotlist) { + struct dhd_pno_bssid *iter, *next; + wl_pfn_bssid_t *ptr; + p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh, + sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_NOMEM; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + ptr = p_pfn_bssid; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + &gscan_params->hotlist_bssid_list, list) { + char buffer_hotlist[64]; + GCC_DIAGNOSTIC_POP(); + memcpy(&ptr->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + BCM_REFERENCE(buffer_hotlist); + DHD_PNO(("%s\n", bcm_ether_ntoa(&ptr->macaddr, buffer_hotlist))); + ptr->flags = iter->flags; + ptr++; + } + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) { + DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err)); + } + +exit: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE; + } + } + MFREE(dhd->osh, p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist); + if (pfn_gscan_cfg_t) { + MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size); + } + if (ch_bucket) { + MFREE(dhd->osh, ch_bucket, + (tot_num_buckets * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + } + return err; + +} + +static wl_pfn_gscan_ch_bucket_cfg_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, + dhd_pno_status_info_t *_pno_state, + uint16 *chan_list, + uint32 *num_buckets, + uint32 *num_buckets_to_fw) +{ + int i, num_channels, err, nchan = WL_NUMCHANNELS, ch_cnt; + uint16 *ptr = chan_list, max; + wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket; + dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + bool is_pno_legacy_running; + dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket; + + /* ePNO and Legacy PNO do not co-exist */ + is_pno_legacy_running = ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + !_params->params_gscan.epno_cfg.num_epno_ssid); + + if (is_pno_legacy_running) + *num_buckets = _params->params_gscan.nchannel_buckets + 1; + else + *num_buckets = _params->params_gscan.nchannel_buckets; + + *num_buckets_to_fw = 0; + + ch_bucket = (wl_pfn_gscan_ch_bucket_cfg_t *) MALLOC(dhd->osh, + ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + + if (!ch_bucket) { + DHD_ERROR(("%s: failed to malloc memory of size %zd\n", + __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + max = gscan_buckets[0].bucket_freq_multiple; + num_channels = 0; + /* nchan is the remaining space left in chan_list buffer + * So any overflow list of channels is ignored + */ + for (i = 0; i < _params->params_gscan.nchannel_buckets && nchan; i++) { + if (!gscan_buckets[i].band) { + ch_cnt = MIN(gscan_buckets[i].num_channels, (uint8)nchan); + num_channels += ch_cnt; + memcpy(ptr, gscan_buckets[i].chan_list, + ch_cnt * sizeof(uint16)); + ptr = ptr + ch_cnt; + } else { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, ptr, + &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK), + !(gscan_buckets[i].band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, gscan_buckets[i].band)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + num_channels += nchan; + ptr = ptr + nchan; + } + + ch_bucket[i].bucket_end_index = num_channels - 1; + ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple; + ch_bucket[i].repeat = gscan_buckets[i].repeat; + ch_bucket[i].max_freq_multiple = gscan_buckets[i].bucket_max_multiple; + ch_bucket[i].flag = gscan_buckets[i].report_flag; + /* HAL and FW interpretations are opposite for this bit */ + ch_bucket[i].flag ^= DHD_PNO_REPORT_NO_BATCH; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + nchan = WL_NUMCHANNELS - num_channels; + *num_buckets_to_fw = *num_buckets_to_fw + 1; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple)); + } + + _params->params_gscan.max_ch_bucket_freq = max; + /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket + * Get GCF of Legacy PNO and Gscan scanfreq + */ + if (is_pno_legacy_running) { + dhd_pno_params_t *_params1 = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + uint16 *legacy_chan_list = _params1->params_legacy.chan_list; + uint16 common_freq; + uint32 legacy_bucket_idx = _params->params_gscan.nchannel_buckets; + /* If no space is left then only gscan buckets will be sent to FW */ + if (nchan) { + common_freq = gcd(_params->params_gscan.scan_fr, + _params1->params_legacy.scan_fr); + max = gscan_buckets[0].bucket_freq_multiple; + /* GSCAN buckets */ + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; + ch_bucket[i].bucket_freq_multiple /= common_freq; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + } + /* Legacy PNO bucket */ + ch_bucket[legacy_bucket_idx].bucket_freq_multiple = + _params1->params_legacy.scan_fr; + ch_bucket[legacy_bucket_idx].bucket_freq_multiple /= + common_freq; + _params->params_gscan.max_ch_bucket_freq = MAX(max, + ch_bucket[legacy_bucket_idx].bucket_freq_multiple); + ch_bucket[legacy_bucket_idx].flag = CH_BUCKET_REPORT_REGULAR; + /* Now add channels to the legacy scan bucket */ + for (i = 0; i < _params1->params_legacy.nchan && nchan; i++, nchan--) { + ptr[i] = legacy_chan_list[i]; + num_channels++; + } + ch_bucket[legacy_bucket_idx].bucket_end_index = num_channels - 1; + *num_buckets_to_fw = *num_buckets_to_fw + 1; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[legacy_bucket_idx].bucket_end_index, + ch_bucket[legacy_bucket_idx].bucket_freq_multiple)); + } + } + return ch_bucket; +} + +static int +dhd_pno_stop_for_gscan(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode; + dhd_pno_status_info_t *_pno_state; + + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + if (_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan.mscan) { + /* retrieve the batching data from firmware into host */ + err = dhd_wait_batch_results_complete(dhd); + if (err != BCME_OK) + goto exit; + } + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + return err; + } + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + + /* Reprogram Legacy PNO if it was running */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *params_legacy; + uint16 chan_list[WL_NUMCHANNELS]; + + params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + + DHD_PNO(("Restarting Legacy PNO SSID scan...\n")); + memcpy(chan_list, params_legacy->chan_list, + (params_legacy->nchan * sizeof(uint16))); + err = dhd_pno_set_legacy_pno(dhd, params_legacy->scan_fr, + params_legacy->pno_repeat, params_legacy->pno_freq_expo_max, + chan_list, params_legacy->nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + } + +exit: + return err; +} + +int +dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush)); + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (run) { + err = dhd_pno_set_for_gscan(dhd, gscan_params); + } else { + if (flush) { + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(dhd, params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } + /* Need to stop all gscan */ + err = dhd_pno_stop_for_gscan(dhd); + } + + return err; +} + +int +dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + uint8 old_flag; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + mutex_lock(&_pno_state->pno_mutex); + + old_flag = gscan_params->send_all_results_flag; + gscan_params->send_all_results_flag = (uint8) real_time_flag; + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (old_flag != gscan_params->send_all_results_flag) { + wl_pfn_gscan_cfg_t gscan_cfg; + + gscan_cfg.version = WL_GSCAN_CFG_VERSION; + gscan_cfg.flags = (gscan_params->send_all_results_flag & + GSCAN_SEND_ALL_RESULTS_MASK); + gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK; + + if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg, + sizeof(wl_pfn_gscan_cfg_t))) < 0) { + DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + } else { + DHD_PNO(("No change in flag - %d\n", old_flag)); + } + } else { + DHD_PNO(("Gscan not started\n")); + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + return err; +} + +/* Cleanup any consumed results + * Return TRUE if all results consumed else FALSE + */ +int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) +{ + int ret = 0; + dhd_pno_params_t *params; + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter, *tmp; + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + iter = gscan_params->gscan_batch_cache; + + while (iter) { + if (iter->tot_consumed == iter->tot_count) { + tmp = iter->next; + MFREE(dhd->osh, iter, + ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + iter = tmp; + } else + break; + } + gscan_params->gscan_batch_cache = iter; + ret = (iter == NULL); + return ret; +} + +static int +_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 timestamp = 0, ts = 0, i, j, timediff; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + wl_pfn_lnet_info_v1_t *plnetinfo; + wl_pfn_lnet_info_v2_t *plnetinfo_v2; + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL; + wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL; + gscan_results_cache_t *iter, *tail; + wifi_gscan_result_t *result; + uint8 *nAPs_per_scan = NULL; + uint8 num_scans_in_cur_iter; + uint16 count; + uint16 fwcount; + uint16 fwstatus = PFN_INCOMPLETE; + struct osl_timespec tm_spec; + + /* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2 + * net_info and subnet_info structures are compatible in size and SSID offset, + * allowing v1 to be safely used in the code below except for lscanresults + * fields themselves (status, count, offset to netinfo). + */ + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s: GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + gscan_params = ¶ms->params_gscan; + nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan); + + if (!nAPs_per_scan) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + gscan_params->mscan)); + err = BCME_NOMEM; + goto exit; + } + + plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + if (!plbestnet_v1) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + (int)PNO_BESTNET_LEN)); + err = BCME_NOMEM; + goto exit; + } + plbestnet_v2 = (wl_pfn_lscanresults_v2_t *)plbestnet_v1; + + mutex_lock(&_pno_state->pno_mutex); + + dhd_gscan_clear_all_batch_results(dhd); + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit_mutex_unlock; + } + + timediff = gscan_params->scan_fr * 1000; + timediff = timediff >> 1; + + /* Ok, now lets start getting results from the FW */ + tail = gscan_params->gscan_batch_cache; + do { + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN, + FALSE); + if (err < 0) { + DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + osl_get_monotonic_boottime(&tm_spec); + + if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) { + fwstatus = plbestnet_v1->status; + fwcount = plbestnet_v1->count; + plnetinfo = &plbestnet_v1->netinfo[0]; + + DHD_PNO(("ver %d, status : %d, count %d\n", + plbestnet_v1->version, fwstatus, fwcount)); + + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit_mutex_unlock; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + num_scans_in_cur_iter = 0; + + timestamp = plnetinfo->timestamp; + /* find out how many scans' results did we get in + * this batch of FW results + */ + for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo++) { + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter >= gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = fwcount; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo->timestamp; + } + if (num_scans_in_cur_iter < gscan_params->mscan) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + } + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + /* reset plnetinfo to the first item for the next loop */ + plnetinfo -= i; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) * + sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) { + iter->scan_id = ++gscan_params->scan_id; + } else { + iter->scan_id = gscan_params->scan_id; + } + DHD_PNO(("scan_id %d tot_count %d \n", + gscan_params->scan_id, nAPs_per_scan[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->tot_consumed = 0; + iter->flag = 0; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) { + result = &iter->results[j]; + + result->channel = wl_channel_to_frequency( + wf_chspec_ctlchan(plnetinfo->pfnsubnet.channel), + CHSPEC_BAND(plnetinfo->pfnsubnet.channel)); + result->rssi = (int32) plnetinfo->RSSI; + result->beacon_period = 0; + result->capability = 0; + result->rtt = (uint64) plnetinfo->rtt0; + result->rtt_sd = (uint64) plnetinfo->rtt1; + result->ts = convert_fw_rel_time_to_systime(&tm_spec, + plnetinfo->timestamp); + ts = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, + plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + (void)memcpy_s(result->ssid, DOT11_MAX_SSID_LEN, + plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + (void)memcpy_s(&result->macaddr, ETHER_ADDR_LEN, + &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(result->macaddr.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo->rtt0, plnetinfo->rtt1)); + + } + } + + } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) { + fwstatus = plbestnet_v2->status; + fwcount = plbestnet_v2->count; + plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0]; + + DHD_PNO(("ver %d, status : %d, count %d\n", + plbestnet_v2->version, fwstatus, fwcount)); + + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit_mutex_unlock; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + num_scans_in_cur_iter = 0; + + timestamp = plnetinfo_v2->timestamp; + /* find out how many scans' results did we get + * in this batch of FW results + */ + for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo_v2++) { + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter >= gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = fwcount; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo_v2->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo_v2->timestamp; + } + if (num_scans_in_cur_iter < gscan_params->mscan) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + } + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + /* reset plnetinfo to the first item for the next loop */ + plnetinfo_v2 -= i; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) * + sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo_v2->timestamp) > timediff) { + iter->scan_id = ++gscan_params->scan_id; + } else { + iter->scan_id = gscan_params->scan_id; + } + DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n", + gscan_params->scan_id, nAPs_per_scan[i], + plbestnet_v2->scan_ch_buckets[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->scan_ch_bucket = plbestnet_v2->scan_ch_buckets[i]; + iter->tot_consumed = 0; + iter->flag = 0; + if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo_v2++) { + result = &iter->results[j]; + + result->channel = + wl_channel_to_frequency( + wf_chspec_ctlchan(plnetinfo_v2->pfnsubnet.channel), + CHSPEC_BAND(plnetinfo_v2->pfnsubnet.channel)); + result->rssi = (int32) plnetinfo_v2->RSSI; + /* Info not available & not expected */ + result->beacon_period = 0; + result->capability = 0; + result->rtt = (uint64) plnetinfo_v2->rtt0; + result->rtt_sd = (uint64) plnetinfo_v2->rtt1; + result->ts = convert_fw_rel_time_to_systime(&tm_spec, + plnetinfo_v2->timestamp); + ts = plnetinfo_v2->timestamp; + if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, + plnetinfo_v2->pfnsubnet.SSID_len)); + plnetinfo_v2->pfnsubnet.SSID_len = + DOT11_MAX_SSID_LEN; + } + (void)memcpy_s(result->ssid, DOT11_MAX_SSID_LEN, + plnetinfo_v2->pfnsubnet.u.SSID, + plnetinfo_v2->pfnsubnet.SSID_len); + result->ssid[plnetinfo_v2->pfnsubnet.SSID_len] = '\0'; + (void)memcpy_s(&result->macaddr, ETHER_ADDR_LEN, + &plnetinfo_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(result->macaddr.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo_v2->pfnsubnet.channel, + plnetinfo_v2->RSSI, plnetinfo_v2->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo_v2->rtt0, plnetinfo_v2->rtt1)); + + } + } + + } else { + err = BCME_VERSION; + DHD_ERROR(("bestnet fw version %d not supported\n", + plbestnet_v1->version)); + goto exit_mutex_unlock; + } + } while (fwstatus == PFN_INCOMPLETE); + +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE; + smp_wmb(); + wake_up_interruptible(&_pno_state->batch_get_wait); + if (nAPs_per_scan) { + MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8)); + } + if (plbestnet_v1) { + MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN); + } + DHD_PNO(("Batch retrieval done!\n")); + return err; +} +#endif /* GSCAN_SUPPORT */ + +#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +static void * +dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) +{ + gscan_results_cache_t *iter, *results; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + uint16 num_scan_ids = 0, num_results = 0; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + iter = results = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + num_scan_ids++; + iter = iter->next; + } + + *len = ((num_results << 16) | (num_scan_ids)); + return results; +} + +void * +dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + void *ret = NULL; + dhd_pno_gscan_capabilities_t *ptr; + dhd_pno_ssid_t *ssid_elem; + dhd_pno_params_t *_params; + dhd_epno_ssid_cfg_t *epno_cfg; + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); + return NULL; + } + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + if (!len) { + DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); + return NULL; + } + + switch (type) { + case DHD_PNO_GET_CAPABILITIES: + ptr = (dhd_pno_gscan_capabilities_t *) + MALLOCZ(dhd->osh, sizeof(dhd_pno_gscan_capabilities_t)); + if (!ptr) + break; + /* Hardcoding these values for now, need to get + * these values from FW, will change in a later check-in + */ + ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE; + ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; + ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN; + ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; + ptr->max_scan_reporting_threshold = 100; + ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_hotlist_ssids = 0; + ptr->max_significant_wifi_change_aps = 0; + ptr->max_bssid_history_entries = 0; + ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM; + ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID; + ptr->max_white_list_ssid = MAX_WHITELIST_SSID; + ret = (void *)ptr; + *len = sizeof(dhd_pno_gscan_capabilities_t); + break; + + case DHD_PNO_GET_BATCH_RESULTS: + ret = dhd_get_gscan_batch_results(dhd, len); + break; + case DHD_PNO_GET_CHANNEL_LIST: + if (info) { + uint16 ch_list[WL_NUMCHANNELS]; + uint32 *p, mem_needed, i; + int32 err, nchan = WL_NUMCHANNELS; + uint32 *gscan_band = (uint32 *) info; + uint8 band = 0; + + /* No band specified?, nothing to do */ + if ((*gscan_band & GSCAN_BAND_MASK) == 0) { + DHD_PNO(("No band specified\n")); + *len = 0; + break; + } + + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (*gscan_band & GSCAN_BG_BAND_MASK) { + band |= WLC_BAND_2G; + } + if (*gscan_band & GSCAN_A_BAND_MASK) { + band |= +#ifdef WL_6G_BAND + WLC_BAND_6G | +#endif /* WL_6G_BAND */ + WLC_BAND_5G; + } + + err = _dhd_pno_get_channels(dhd, ch_list, &nchan, + (band & GSCAN_ABG_BAND_MASK), + !(*gscan_band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list\n", + __FUNCTION__)); + *len = 0; + } else { + mem_needed = sizeof(uint32) * nchan; + p = (uint32 *)MALLOC(dhd->osh, mem_needed); + if (!p) { + DHD_ERROR(("%s: Unable to malloc %d bytes\n", + __FUNCTION__, mem_needed)); + break; + } + for (i = 0; i < nchan; i++) { + p[i] = wl_channel_to_frequency( + (ch_list[i]), + CHSPEC_BAND(ch_list[i])); + } + ret = p; + *len = mem_needed; + } + } else { + *len = 0; + DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); + } + break; + case DHD_PNO_GET_NEW_EPNO_SSID_ELEM: + epno_cfg = &_params->params_gscan.epno_cfg; + if (epno_cfg->num_epno_ssid >= + MAX_EPNO_SSID_NUM) { + DHD_ERROR(("Excessive number of ePNO SSIDs programmed %d\n", + epno_cfg->num_epno_ssid)); + return NULL; + } + if (!epno_cfg->num_epno_ssid) { + INIT_LIST_HEAD(&epno_cfg->epno_ssid_list); + } + ssid_elem = MALLOCZ(dhd->osh, sizeof(dhd_pno_ssid_t)); + if (!ssid_elem) { + DHD_ERROR(("EPNO ssid: cannot alloc %zd bytes", + sizeof(dhd_pno_ssid_t))); + return NULL; + } + epno_cfg->num_epno_ssid++; + list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list); + ret = ssid_elem; + break; + default: + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } + + return ret; + +} +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +static int +_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + int i, j; + uint32 timestamp = 0; + dhd_pno_params_t *_params = NULL; + dhd_pno_status_info_t *_pno_state = NULL; + wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL; + wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL; + wl_pfn_lnet_info_v1_t *plnetinfo; + wl_pfn_lnet_info_v2_t *plnetinfo_v2; + dhd_pno_bestnet_entry_t *pbestnet_entry; + dhd_pno_best_header_t *pbestnetheader = NULL; + dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; + bool allocate_header = FALSE; + uint16 fwstatus = PFN_INCOMPLETE; + uint16 fwcount; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + /* The static asserts below guarantee the v1 and v2 net_info and subnet_info + * structures are compatible in size and SSID offset, allowing v1 to be safely + * used in the code below except for lscanresults fields themselves + * (status, count, offset to netinfo). + */ + STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t)); + STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t)); + STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t)); + ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) == + OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID)); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_unlock; + } + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit_no_unlock; + } + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + goto exit_no_unlock; + } + mutex_lock(&_pno_state->pno_mutex); + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (buf && bufsize) { + if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) { + /* need to check whether we have cashed data or not */ + DHD_PNO(("%s: have cashed batching data in Driver\n", + __FUNCTION__)); + /* convert to results format */ + goto convert_format; + } else { + /* this is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + GCC_DIAGNOSTIC_POP(); + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + goto convert_format; + } + } + } + /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */ + pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE); + if (pscan_results == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n")); + goto exit; + } + pscan_results->bestnetheader = NULL; + pscan_results->cnt_header = 0; + /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */ + if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) { + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + _params->params_batch.get_batch.top_node_cnt++; + } else { + int _removed_scan_cnt; + /* remove oldest one and add new one */ + DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__)); + _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd, + &_params->params_batch.get_batch.scan_results_list, TRUE); + _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt; + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + + } + + plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + if (!plbestnet_v1) { + err = BCME_NOMEM; + DHD_ERROR(("%s: failed to allocate buffer for bestnet", __FUNCTION__)); + goto exit; + } + + plbestnet_v2 = (wl_pfn_lscanresults_v2_t*)plbestnet_v1; + + DHD_PNO(("%s enter\n", __FUNCTION__)); + do { + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN, + FALSE); + if (err < 0) { + if (err == BCME_EPERM) { + DHD_ERROR(("we cannot get the batching data " + "during scanning in firmware, try again\n,")); + msleep(500); + continue; + } else { + DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) { + fwstatus = plbestnet_v1->status; + fwcount = plbestnet_v1->count; + plnetinfo = &plbestnet_v1->netinfo[0]; + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + for (i = 0; i < fwcount; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + /* record the current time */ + pbestnet_entry->recorded_time = jiffies; + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate" + " dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = + pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + pbestnet_entry->channel = plnetinfo->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo->RSSI; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo->rtt0; + pbestnet_entry->rtt1 = plnetinfo->rtt1; + pbestnet_entry->timestamp = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length" + " %d: trimming it to max\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(plnetinfo->pfnsubnet.BSSID.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, + plnetinfo->rtt1)); + plnetinfo++; + } + } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) { + fwstatus = plbestnet_v2->status; + fwcount = plbestnet_v2->count; + plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0]; + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + DHD_PNO(("ver %d, status : %d, count %d\n", + plbestnet_v2->version, fwstatus, fwcount)); + + for (i = 0; i < fwcount; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + /* record the current time */ + pbestnet_entry->recorded_time = jiffies; + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo_v2->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo_v2->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate" + " dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = + pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + /* fills the best network info */ + pbestnet_entry->channel = plnetinfo_v2->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo_v2->RSSI; + if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo_v2->rtt0; + pbestnet_entry->rtt1 = plnetinfo_v2->rtt1; + pbestnet_entry->timestamp = plnetinfo_v2->timestamp; + if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length" + " %d: trimming it to max\n", + __FUNCTION__, plnetinfo_v2->pfnsubnet.SSID_len)); + plnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo_v2->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo_v2->pfnsubnet.u.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo_v2->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo_v2->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo_v2->pfnsubnet.u.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(plnetinfo_v2->pfnsubnet.BSSID.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo_v2->pfnsubnet.channel, + plnetinfo_v2->RSSI, plnetinfo_v2->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo_v2->rtt0, + plnetinfo_v2->rtt1)); + plnetinfo_v2++; + } + } else { + err = BCME_VERSION; + DHD_ERROR(("bestnet fw version %d not supported\n", + plbestnet_v1->version)); + goto exit; + } + } while (fwstatus != PFN_COMPLETE); + + if (pscan_results->cnt_header == 0) { + /* In case that we didn't get any data from the firmware + * Remove the current scan_result list from get_bach.scan_results_list. + */ + DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n")); + list_del(&pscan_results->list); + MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); + _params->params_batch.get_batch.top_node_cnt--; + } else { + /* increase total scan count using current scan count */ + _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; + } + + if (buf && bufsize) { + /* This is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + GCC_DIAGNOSTIC_POP(); + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + /* reset gloval values after moving to expired list */ + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + } +convert_format: + err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize); + if (err < 0) { + DHD_ERROR(("failed to convert the data into upper layer format\n")); + goto exit; + } + } +exit: + if (plbestnet_v1) + MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN); + if (_params) { + _params->params_batch.get_batch.buf = NULL; + _params->params_batch.get_batch.bufsize = 0; + _params->params_batch.get_batch.bytes_written = err; + } + mutex_unlock(&_pno_state->pno_mutex); +exit_no_unlock: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + if (waitqueue_active(&_pno_state->get_batch_done)) { + _pno_state->batch_recvd = TRUE; + wake_up(&_pno_state->get_batch_done); + } +#else + if (waitqueue_active(&_pno_state->get_batch_done.wait)) + complete(&_pno_state->get_batch_done); +#endif + return err; +} + +static void +_dhd_pno_get_batch_handler(struct work_struct *work) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pub_t *dhd; + struct dhd_pno_batch_params *params_batch; + DHD_PNO(("%s enter\n", __FUNCTION__)); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + _pno_state = container_of(work, struct dhd_pno_status_info, work); + GCC_DIAGNOSTIC_POP(); + + dhd = _pno_state->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef GSCAN_SUPPORT + _dhd_pno_get_gscan_batch_from_fw(dhd); +#endif /* GSCAN_SUPPORT */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + + _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, + params_batch->get_batch.bufsize, params_batch->get_batch.reason); + } +} + +int +dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + char *pbuf = buf; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_batch_params *params_batch; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + gscan_params->reason = reason; + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(gscan_params), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } else +#endif + { + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + memset(pbuf, 0, bufsize); + pbuf += snprintf(pbuf, bufsize, "scancount=%d\n", 0); + snprintf(pbuf, bufsize, "%s", RESULTS_END_MARKER); + err = strlen(buf); + goto exit; + } + params_batch->get_batch.buf = buf; + params_batch->get_batch.bufsize = bufsize; + params_batch->get_batch.reason = reason; + params_batch->get_batch.bytes_written = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + _pno_state->batch_recvd = FALSE; +#endif + schedule_work(&_pno_state->work); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + wait_event(_pno_state->get_batch_done, _pno_state->batch_recvd); +#else + wait_for_completion(&_pno_state->get_batch_done); +#endif + } + +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#endif + err = params_batch->get_batch.bytes_written; +exit: + return err; +} + +int +dhd_pno_stop_for_batch(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode = 0; + int i = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("Gscan is ongoing, nothing to stop here\n")); + return err; + } +#endif + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + _pno_state->pno_mode = mode; + /* restart Legacy PNO if the Legacy PNO is on */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr, + _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, + _params_legacy->chan_list, _params_legacy->nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh, + sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + GCC_DIAGNOSTIC_POP(); + memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid[i].flags = iter->flags; + i++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + MFREE(dhd->osh, p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid); + return err; +} + +int +dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + int err = BCME_OK; + int i; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0; + int tot_nchan = 0; + int mode = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + struct dhd_pno_bssid *_pno_bssid; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(hotlist_params, "hotlist_params is NULL", err); + NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } + _params->params_batch.nchan = hotlist_params->nchan; + _params->params_batch.scan_fr = hotlist_params->scan_fr; + if (hotlist_params->nchan) + memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list, + sizeof(_params->params_hotlist.chan_list)); + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan; + if (hotlist_params->band == WLC_BAND_2G || +#ifdef WL_6G_BAND + hotlist_params->band == WLC_BAND_6G || +#endif /* WL_6G_BAND */ + hotlist_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_hotlist.chan_list[hotlist_params->nchan], + &rem_nchan, hotlist_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, hotlist_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_hotlist.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan, + sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + int i; + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_hotlist.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_hotlist.chan_list, + sizeof(_chan_list)); + tot_nchan = _params->params_hotlist.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && + _params->params_hotlist.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_hotlist.chan_list[0], + _params->params_hotlist.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + "between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + + } + + INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list)); + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + for (i = 0; i < hotlist_params->nbssid; i++) { + _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh, + sizeof(struct dhd_pno_bssid)); + NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err); + memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN); + _pno_bssid->flags = p_pfn_bssid[i].flags; + list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list); + } + _params->params_hotlist.nbssid = hotlist_params->nbssid; + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + return err; +} + +int +dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + DHD_ERROR(("%s : Hotlist MODE is not enabled\n", + __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previos pno mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + /* restart Legacy PNO Scan */ + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr, + _params_legacy->pno_repeat, _params_legacy->pno_freq_expo_max, + _params_legacy->chan_list, _params_legacy->nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* restart Batching Scan */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + return err; +} + +#ifdef GSCAN_SUPPORT +int +dhd_retreive_batch_scan_results(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_batch_params *params_batch; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) { + DHD_PNO(("Retreive batch results\n")); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS; + smp_wmb(); + schedule_work(&_pno_state->work); + } else { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval" + "already in progress, will skip\n", __FUNCTION__)); + err = BCME_ERROR; + } + + return err; +} + +void +dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) +{ + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + gscan_results_cache_t *iter, *tmp; + + if (!_pno_state) { + return; + } + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (type == HOTLIST_FOUND) { + iter = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = NULL; + } else { + iter = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = NULL; + } + + while (iter) { + tmp = iter->next; + MFREE(dhd->osh, iter, + ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + iter = tmp; + } + + return; +} + +void * +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int *size) +{ + wl_bss_info_t *bi = NULL; + wl_gscan_result_t *gscan_result; + wifi_gscan_full_result_t *result = NULL; + u32 bi_length = 0; + uint8 channel; + uint32 mem_needed; + struct osl_timespec ts; + u32 bi_ie_length = 0; + u32 bi_ie_offset = 0; + + *size = 0; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + gscan_result = (wl_gscan_result_t *)data; + GCC_DIAGNOSTIC_POP(); + if (!gscan_result) { + DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); + goto exit; + } + + if ((len < sizeof(*gscan_result)) || + (len < dtoh32(gscan_result->buflen)) || + (dtoh32(gscan_result->buflen) > + (sizeof(*gscan_result) + WL_SCAN_IE_LEN_MAX))) { + DHD_ERROR(("%s: invalid gscan buflen:%u\n", __FUNCTION__, + dtoh32(gscan_result->buflen))); + goto exit; + } + + bi = &gscan_result->bss_info[0].info; + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(gscan_result->buflen) - + WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { + DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + bi_ie_offset = dtoh32(bi->ie_offset); + bi_ie_length = dtoh32(bi->ie_length); + if ((bi_ie_offset + bi_ie_length) > bi_length) { + DHD_ERROR(("%s: Invalid ie_length:%u or ie_offset:%u\n", + __FUNCTION__, bi_ie_length, bi_ie_offset)); + goto exit; + } + if (bi->SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length:%u\n", __FUNCTION__, bi->SSID_len)); + goto exit; + } + + mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi->ie_length; + result = (wifi_gscan_full_result_t *)MALLOC(dhd->osh, mem_needed); + if (!result) { + DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", + __FUNCTION__, mem_needed)); + goto exit; + } + + result->scan_ch_bucket = gscan_result->scan_ch_bucket; + memcpy(result->fixed.ssid, bi->SSID, bi->SSID_len); + result->fixed.ssid[bi->SSID_len] = '\0'; + channel = wf_chspec_ctlchspec(bi->chanspec); + result->fixed.channel = wl_channel_to_frequency(channel, CHSPEC_BAND(channel)); + result->fixed.rssi = (int32) bi->RSSI; + result->fixed.rtt = 0; + result->fixed.rtt_sd = 0; + osl_get_monotonic_boottime(&ts); + result->fixed.ts = (uint64) TIMESPEC_TO_US(ts); + result->fixed.beacon_period = dtoh16(bi->beacon_period); + result->fixed.capability = dtoh16(bi->capability); + result->ie_length = bi_ie_length; + memcpy(&result->fixed.macaddr, &bi->BSSID, ETHER_ADDR_LEN); + memcpy(result->ie_data, ((uint8 *)bi + bi_ie_offset), bi_ie_length); + *size = mem_needed; +exit: + return result; +} + +static void * +dhd_pno_update_pfn_v3_results(dhd_pub_t *dhd, wl_pfn_scanresults_v3_t *pfn_result, + uint32 *mem_needed, struct dhd_pno_gscan_params *gscan_params, uint32 event) +{ + uint32 i; + uint8 ssid[DOT11_MAX_SSID_LEN + 1]; + struct ether_addr *bssid; + wl_pfn_net_info_v3_t *net_info = NULL; + dhd_epno_results_t *results = NULL; + + if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V3)) { + DHD_ERROR(("%s event %d: wrong pfn v3 results count %d\n", + __FUNCTION__, event, pfn_result->count)); + return NULL; + } + + *mem_needed = sizeof(dhd_epno_results_t) * pfn_result->count; + results = (dhd_epno_results_t *)MALLOC(dhd->osh, (*mem_needed)); + if (!results) { + DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__, + *mem_needed)); + return NULL; + } + for (i = 0; i < pfn_result->count; i++) { + net_info = &pfn_result->netinfo[i]; + results[i].rssi = net_info->RSSI; + results[i].channel = wl_channel_to_frequency( + CHSPEC_CHANNEL(net_info->pfnsubnet.chanspec), + CHSPEC_BAND(net_info->pfnsubnet.chanspec)); + results[i].flags = (event == WLC_E_PFN_NET_FOUND) ? + WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST; + results[i].ssid_len = min(net_info->pfnsubnet.SSID_len, + (uint8)DOT11_MAX_SSID_LEN); + bssid = &results[i].bssid; + (void)memcpy_s(bssid, ETHER_ADDR_LEN, + &net_info->pfnsubnet.BSSID, ETHER_ADDR_LEN); + if (!net_info->pfnsubnet.SSID_len) { + dhd_pno_idx_to_ssid(gscan_params, &results[i], + net_info->pfnsubnet.u.index); + } else { + (void)memcpy_s(results[i].ssid, DOT11_MAX_SSID_LEN, + net_info->pfnsubnet.u.SSID, results[i].ssid_len); + } + (void)memcpy_s(ssid, DOT11_MAX_SSID_LEN, results[i].ssid, results[i].ssid_len); + ssid[results[i].ssid_len] = '\0'; + DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n", + ssid, MAC2STRDBG(bssid->octet), results[i].channel, + results[i].rssi, results[i].flags)); + } + + return results; +} + +void * +dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int *size) +{ + dhd_epno_results_t *results = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + uint32 count, mem_needed = 0, i; + uint8 ssid[DOT11_MAX_SSID_LEN + 1]; + struct ether_addr *bssid; + + *size = 0; + if (!_pno_state) + return NULL; + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) { + wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data; + wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data; + wl_pfn_scanresults_v3_t *pfn_result_v3 = (wl_pfn_scanresults_v3_t *)data; + wl_pfn_net_info_v1_t *net; + wl_pfn_net_info_v2_t *net_v2; + + if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) { + if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V1)) { + DHD_ERROR(("%s event %d: wrong pfn v1 results count %d\n", + __FUNCTION__, event, pfn_result->count)); + return NULL; + } + count = pfn_result->count; + mem_needed = sizeof(dhd_epno_results_t) * count; + results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed); + if (!results) { + DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__, + mem_needed)); + return NULL; + } + for (i = 0; i < count; i++) { + net = &pfn_result->netinfo[i]; + results[i].rssi = net->RSSI; + results[i].channel = wf_channel2mhz(net->pfnsubnet.channel, + (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + results[i].flags = (event == WLC_E_PFN_NET_FOUND) ? + WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST; + results[i].ssid_len = min(net->pfnsubnet.SSID_len, + (uint8)DOT11_MAX_SSID_LEN); + bssid = &results[i].bssid; + (void)memcpy_s(bssid, ETHER_ADDR_LEN, + &net->pfnsubnet.BSSID, ETHER_ADDR_LEN); + if (!net->pfnsubnet.SSID_len) { + DHD_ERROR(("%s: Gscan results indexing is not" + " supported in version 1 \n", __FUNCTION__)); + MFREE(dhd->osh, results, mem_needed); + return NULL; + } else { + (void)memcpy_s(results[i].ssid, DOT11_MAX_SSID_LEN, + net->pfnsubnet.SSID, results[i].ssid_len); + } + (void)memcpy_s(ssid, DOT11_MAX_SSID_LEN, + results[i].ssid, results[i].ssid_len); + ssid[results[i].ssid_len] = '\0'; + DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n", + ssid, MAC2STRDBG(bssid->octet), results[i].channel, + results[i].rssi, results[i].flags)); + } + } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) { + if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V2)) { + DHD_ERROR(("%s event %d: wrong pfn v2 results count %d\n", + __FUNCTION__, event, pfn_result->count)); + return NULL; + } + count = pfn_result_v2->count; + mem_needed = sizeof(dhd_epno_results_t) * count; + results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed); + if (!results) { + DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__, + mem_needed)); + return NULL; + } + for (i = 0; i < count; i++) { + net_v2 = &pfn_result_v2->netinfo[i]; + results[i].rssi = net_v2->RSSI; + results[i].channel = wf_channel2mhz(net_v2->pfnsubnet.channel, + (net_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + results[i].flags = (event == WLC_E_PFN_NET_FOUND) ? + WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST; + results[i].ssid_len = min(net_v2->pfnsubnet.SSID_len, + (uint8)DOT11_MAX_SSID_LEN); + bssid = &results[i].bssid; + (void)memcpy_s(bssid, ETHER_ADDR_LEN, + &net_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN); + if (!net_v2->pfnsubnet.SSID_len) { + dhd_pno_idx_to_ssid(gscan_params, &results[i], + net_v2->pfnsubnet.u.index); + } else { + (void)memcpy_s(results[i].ssid, DOT11_MAX_SSID_LEN, + net_v2->pfnsubnet.u.SSID, results[i].ssid_len); + } + (void)memcpy_s(ssid, DOT11_MAX_SSID_LEN, + results[i].ssid, results[i].ssid_len); + ssid[results[i].ssid_len] = '\0'; + DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n", + ssid, MAC2STRDBG(bssid->octet), results[i].channel, + results[i].rssi, results[i].flags)); + } + } else if (pfn_result_v3->version == PFN_SCANRESULT_VERSION_V3) { + results = dhd_pno_update_pfn_v3_results(dhd, pfn_result_v3, &mem_needed, + gscan_params, event); + if (results == NULL) { + return results; + } + } else { + DHD_ERROR(("%s event %d: Incorrect version %d , not supported\n", + __FUNCTION__, event, pfn_result->version)); + return NULL; + } + } + *size = mem_needed; + return results; +} + +static void * +dhd_pno_update_hotlist_v3_results(dhd_pub_t *dhd, wl_pfn_scanresults_v3_t *pfn_result, + int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) +{ + u32 malloc_size = 0, i; + struct osl_timespec tm_spec; + struct dhd_pno_gscan_params *gscan_params; + gscan_results_cache_t *gscan_hotlist_cache; + wifi_gscan_result_t *hotlist_found_array; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + wl_pfn_net_info_v3_t *pnetinfo = (wl_pfn_net_info_v3_t*)&pfn_result->netinfo[0]; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!pfn_result->count || (pfn_result->count > EVENT_MAX_NETCNT_V3)) { + DHD_ERROR(("%s: wrong v3 fwcount:%d\n", __FUNCTION__, pfn_result->count)); + *send_evt_bytes = 0; + return NULL; + } + + osl_get_monotonic_boottime(&tm_spec); + malloc_size = sizeof(gscan_results_cache_t) + + ((pfn_result->count - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = + (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size); + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return NULL; + } + *buf_len = malloc_size; + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, pfn_result->count)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, pfn_result->count)); + } + + gscan_hotlist_cache->tot_count = pfn_result->count; + gscan_hotlist_cache->tot_consumed = 0; + gscan_hotlist_cache->scan_ch_bucket = pfn_result->scan_ch_bucket; + + for (i = 0; i < pfn_result->count; i++, pnetinfo++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + (void)memset_s(hotlist_found_array, sizeof(wifi_gscan_result_t), + 0, sizeof(wifi_gscan_result_t)); + hotlist_found_array->channel = wl_channel_to_frequency( + CHSPEC_CHANNEL(pnetinfo->pfnsubnet.chanspec), + CHSPEC_BAND(pnetinfo->pfnsubnet.chanspec)); + hotlist_found_array->rssi = (int32) pnetinfo->RSSI; + + hotlist_found_array->ts = + convert_fw_rel_time_to_systime(&tm_spec, + (pnetinfo->timestamp * 1000)); + if (pnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + pnetinfo->pfnsubnet.SSID_len)); + pnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + (void)memcpy_s(hotlist_found_array->ssid, DOT11_MAX_SSID_LEN, + pnetinfo->pfnsubnet.u.SSID, pnetinfo->pfnsubnet.SSID_len); + hotlist_found_array->ssid[pnetinfo->pfnsubnet.SSID_len] = '\0'; + + (void)memcpy_s(&hotlist_found_array->macaddr, ETHER_ADDR_LEN, + &pnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + DHD_PNO(("\t%s "MACDBG" rssi %d\n", + hotlist_found_array->ssid, + MAC2STRDBG(hotlist_found_array->macaddr.octet), + hotlist_found_array->rssi)); + } + + return gscan_hotlist_cache; +} + +void * +dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data; + wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data; + wl_pfn_scanresults_v3_t *results_v3 = (wl_pfn_scanresults_v3_t *)event_data; + wifi_gscan_result_t *hotlist_found_array; + wl_pfn_net_info_v1_t *pnetinfo; + wl_pfn_net_info_v2_t *pnetinfo_v2; + gscan_results_cache_t *gscan_hotlist_cache; + u32 malloc_size = 0, i, total = 0; + struct osl_timespec tm_spec; + uint16 fwstatus; + uint16 fwcount; + + /* Static asserts in _dhd_pno_get_for_batch() above guarantee the v1 and v2 + * net_info and subnet_info structures are compatible in size and SSID offset, + * allowing v1 to be safely used in the code below except for lscanresults + * fields themselves (status, count, offset to netinfo). + */ + + *buf_len = 0; + if (results_v1->version == PFN_SCANRESULTS_VERSION_V1) { + fwstatus = results_v1->status; + fwcount = results_v1->count; + pnetinfo = &results_v1->netinfo[0]; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V1)) { + DHD_ERROR(("%s: wrong v1 fwcount:%d\n", __FUNCTION__, fwcount)); + *send_evt_bytes = 0; + return ptr; + } + + osl_get_monotonic_boottime(&tm_spec); + malloc_size = sizeof(gscan_results_cache_t) + + ((fwcount - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size); + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + + *buf_len = malloc_size; + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount)); + } + + gscan_hotlist_cache->tot_count = fwcount; + gscan_hotlist_cache->tot_consumed = 0; + + for (i = 0; i < fwcount; i++, pnetinfo++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t)); + hotlist_found_array->channel = wf_channel2mhz(pnetinfo->pfnsubnet.channel, + (pnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) pnetinfo->RSSI; + + hotlist_found_array->ts = + convert_fw_rel_time_to_systime(&tm_spec, + (pnetinfo->timestamp * 1000)); + if (pnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + pnetinfo->pfnsubnet.SSID_len)); + pnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + (void)memcpy_s(hotlist_found_array->ssid, DOT11_MAX_SSID_LEN, + pnetinfo->pfnsubnet.SSID, pnetinfo->pfnsubnet.SSID_len); + hotlist_found_array->ssid[pnetinfo->pfnsubnet.SSID_len] = '\0'; + + (void)memcpy_s(&hotlist_found_array->macaddr, ETHER_ADDR_LEN, + &pnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + DHD_PNO(("\t%s "MACDBG" rssi %d\n", + hotlist_found_array->ssid, + MAC2STRDBG(hotlist_found_array->macaddr.octet), + hotlist_found_array->rssi)); + } + } else if (results_v2->version == PFN_SCANRESULTS_VERSION_V2) { + fwstatus = results_v2->status; + fwcount = results_v2->count; + pnetinfo_v2 = (wl_pfn_net_info_v2_t*)&results_v2->netinfo[0]; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V2)) { + DHD_ERROR(("%s: wrong v2 fwcount:%d\n", __FUNCTION__, fwcount)); + *send_evt_bytes = 0; + return ptr; + } + + osl_get_monotonic_boottime(&tm_spec); + malloc_size = sizeof(gscan_results_cache_t) + + ((fwcount - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = + (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size); + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + *buf_len = malloc_size; + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount)); + } + + gscan_hotlist_cache->tot_count = fwcount; + gscan_hotlist_cache->tot_consumed = 0; + gscan_hotlist_cache->scan_ch_bucket = results_v2->scan_ch_bucket; + + for (i = 0; i < fwcount; i++, pnetinfo_v2++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t)); + hotlist_found_array->channel = + wf_channel2mhz(pnetinfo_v2->pfnsubnet.channel, + (pnetinfo_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) pnetinfo_v2->RSSI; + + hotlist_found_array->ts = + convert_fw_rel_time_to_systime(&tm_spec, + (pnetinfo_v2->timestamp * 1000)); + if (pnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + pnetinfo_v2->pfnsubnet.SSID_len)); + pnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + (void)memcpy_s(hotlist_found_array->ssid, DOT11_MAX_SSID_LEN, + pnetinfo_v2->pfnsubnet.u.SSID, pnetinfo_v2->pfnsubnet.SSID_len); + hotlist_found_array->ssid[pnetinfo_v2->pfnsubnet.SSID_len] = '\0'; + + (void)memcpy_s(&hotlist_found_array->macaddr, ETHER_ADDR_LEN, + &pnetinfo_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN); + DHD_PNO(("\t%s "MACDBG" rssi %d\n", + hotlist_found_array->ssid, + MAC2STRDBG(hotlist_found_array->macaddr.octet), + hotlist_found_array->rssi)); + } + } else if (results_v3->version == PFN_SCANRESULTS_VERSION_V3) { + fwstatus = results_v3->status; + gscan_hotlist_cache = (gscan_results_cache_t *)dhd_pno_update_hotlist_v3_results( + dhd, results_v3, send_evt_bytes, type, buf_len); + } else { + DHD_ERROR(("%s: event version %d not supported\n", + __FUNCTION__, results_v1->version)); + *send_evt_bytes = 0; + return ptr; + } + if (fwstatus == PFN_COMPLETE) { + ptr = (void *) gscan_hotlist_cache; + while (gscan_hotlist_cache) { + total += gscan_hotlist_cache->tot_count; + gscan_hotlist_cache = gscan_hotlist_cache->next; + } + *send_evt_bytes = total * sizeof(wifi_gscan_result_t); + } + + return ptr; +} +#endif /* GSCAN_SUPPORT */ + +int +dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + uint event_type; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + event_type = ntoh32(event->event_type); + DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); + switch (event_type) { + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BSSID_NET_LOST: + /* how can we inform this to framework ? */ + /* TODO : need to implement event logic using generic netlink */ + break; + case WLC_E_PFN_BEST_BATCHING: +#ifndef GSCAN_SUPPORT + { + struct dhd_pno_batch_params *params_batch; + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + if (!waitqueue_active(&_pno_state->get_batch_done)) +#else + if (!waitqueue_active(&_pno_state->get_batch_done.wait)) +#endif + { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__)); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + schedule_work(&_pno_state->work); + } else + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING" + "will skip this event\n", __FUNCTION__)); + break; + } +#else + break; +#endif /* !GSCAN_SUPPORT */ + default: + DHD_ERROR(("unknown event : %d\n", event_type)); + } +exit: + return err; +} + +int dhd_pno_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + char *buf = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + UNUSED_PARAMETER(_dhd_pno_suspend); + if (dhd->pno_state) + goto exit; + dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t)); + NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err); + memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t)); + /* need to check whether current firmware support batching and hotlist scan */ + _pno_state = PNO_GET_PNOSTATE(dhd); + _pno_state->wls_supported = TRUE; + _pno_state->dhd = dhd; + mutex_init(&_pno_state->pno_mutex); + INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + init_waitqueue_head(&_pno_state->get_batch_done); +#else + init_completion(&_pno_state->get_batch_done); +#endif +#ifdef GSCAN_SUPPORT + init_waitqueue_head(&_pno_state->batch_get_wait); +#endif /* GSCAN_SUPPORT */ + buf = MALLOC(dhd->osh, WLC_IOCTL_SMLEN); + if (!buf) { + DHD_ERROR((":%s buf alloc err.\n", __FUNCTION__)); + return BCME_NOMEM; + } + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, buf, WLC_IOCTL_SMLEN, + FALSE); + if (err == BCME_UNSUPPORTED) { + _pno_state->wls_supported = FALSE; + DHD_ERROR(("Android Location Service, UNSUPPORTED\n")); + DHD_INFO(("Current firmware doesn't support" + " Android Location Service\n")); + } else { + DHD_ERROR(("%s: Support Android Location Service\n", + __FUNCTION__)); + } +exit: + MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN); + return err; +} + +int dhd_pno_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + NULL_CHECK(_pno_state, "pno_state is NULL", err); + /* may need to free legacy ssid_list */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + /* clear resource if the BATCH MODE is on */ + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + } + cancel_work_sync(&_pno_state->work); + MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t)); + dhd->pno_state = NULL; + return err; +} +#endif /* OEM_ANDROID */ + +#ifndef OEM_ANDROID +#if defined(NDIS) +#define DHD_IOVAR_BUF_SIZE 128 +int +dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg) +{ + int ret = -1; + uint len = 0; + char iovbuf[2 * DHD_IOVAR_BUF_SIZE]; + + if (!dhd) + return ret; + memset(iovbuf, 0, sizeof(iovbuf)); + if ((len = + bcm_mkiovar("pfn_cfg", (char *)pcfg, + sizeof(wl_pfn_cfg_t), iovbuf, sizeof(iovbuf))) > 0) { + if ((ret = + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) + DHD_ERROR(("%s failed for error=%d\n", + __FUNCTION__, ret)); + else + DHD_ERROR(("%s set OK\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s iovar failed\n", __FUNCTION__)); + ret = -1; + } + + return ret; +} + +int +dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int ret = -1; + + if ((!dhd) || ((pfn_suspend != 0) && (pfn_suspend != 1))) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + return ret; + } + + memset(iovbuf, 0, sizeof(iovbuf)); + /* suspend/resume PNO */ + if ((ret = bcm_mkiovar("pfn_suspend", (char *)&pfn_suspend, 4, iovbuf, + sizeof(iovbuf))) > 0) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret)); + else { + DHD_TRACE(("%s set pno to %s\n", __FUNCTION__, + (pfn_suspend? "suspend" : "resume"))); + dhd->pno_suspend = pfn_suspend; + } + } + else { + DHD_ERROR(("%s failed at mkiovar, err=%d\n", __FUNCTION__, ret)); + ret = -1; + } + + return ret; +} + +int +dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr, ushort slowscan_fr, + uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags) +{ + int err = -1; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int k, i; + wl_pfn_param_t pfn_param; + wl_pfn_t pfn_element; + uint len = 0; + + DHD_TRACE(("%s nssid=%d scan_fr=%d\n", __FUNCTION__, nssid, scan_fr)); + + if ((!dhd) || (!netinfo) || + (nssid > MAX_PFN_LIST_COUNT) || (nssid <= 0)) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + return err; + } + + /* Check for broadcast ssid */ + for (k = 0; k < nssid; k++) { + if (!netinfo[k].ssid.SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k)); + return err; + } + } + + /* clean up everything */ + if (dhd_pno_clean(dhd) < 0) { + DHD_ERROR(("%s failed\n", __FUNCTION__)); + return err; + } + memset(&pfn_param, 0, sizeof(pfn_param)); + memset(&pfn_element, 0, sizeof(pfn_element)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = htod16(flags |(PFN_LIST_ORDER << SORT_CRITERIA_BIT)); + + /* set extra pno params */ + pfn_param.repeat = pno_repeat; + pfn_param.exp = pno_freq_expo_max; + pfn_param.slow_freq = slowscan_fr; + + /* set up pno scan fr */ + if (scan_fr > PNO_SCAN_MAX_FW_SEC) { + DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC)); + return err; + } + if (scan_fr < PNO_SCAN_MIN_FW_SEC) { + DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + return err; + } + pfn_param.scan_freq = htod32(scan_fr); + if (slowscan_fr) + pfn_param.lost_network_timeout = -1; /* so no aging out */ + memset(iovbuf, 0, sizeof(iovbuf)); + len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf)); + if (!len) + return err; + + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) + return err; + + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + pfn_element.infra = htod32(1); + pfn_element.auth = htod32(netinfo[i].auth); + pfn_element.wpa_auth = htod32(netinfo[i].wpa_auth); + pfn_element.wsec = htod32(netinfo[i].wsec); + pfn_element.flags = htod32(netinfo[i].flags); + + memcpy((char *)pfn_element.ssid.SSID, netinfo[i].ssid.SSID, + netinfo[i].ssid.SSID_len); + pfn_element.ssid.SSID_len = netinfo[i].ssid.SSID_len; + + if ((len = + bcm_mkiovar("pfn_add", (char *)&pfn_element, + sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) { + if ((err = + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) { + DHD_ERROR(("%s failed for i=%d error=%d\n", + __FUNCTION__, i, err)); + return err; + } + else + DHD_ERROR(("%s set ssid %s\n", + __FUNCTION__, netinfo[i].ssid.SSID)); + } + else + DHD_ERROR(("%s: mkiovar pfn_add failed\n", __FUNCTION__)); + + memset(&pfn_element, 0, sizeof(pfn_element)); + } + + return err; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int ret = -1; + + if ((!dhd) || ((pfn_enabled != 0) && (pfn_enabled != 1))) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + return ret; + } + +#ifndef WL_SCHED_SCAN + memset(iovbuf, 0, sizeof(iovbuf)); + + if ((pfn_enabled) && (dhd_is_associated(dhd, 0, NULL) == TRUE)) { + DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__)); + return ret; + } +#endif /* !WL_SCHED_SCAN */ + + /* make sure PNO is not suspended when it is going to be enabled */ + if (pfn_enabled) { + int pfn_suspend = 0; + memset(iovbuf, 0, sizeof(iovbuf)); + if ((ret = bcm_mkiovar("pfn_suspend", (char *)&pfn_suspend, 4, iovbuf, + sizeof(iovbuf))) > 0) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("pfn_suspend failed for error=%d\n", __FUNCTION__, ret)); + return ret; + } else { + DHD_TRACE(("pno resumed\n")); + } + } else { + return -1; + } + } + + /* Enable/disable PNO */ + if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret)); + return ret; + } else { + dhd->pno_enable = pfn_enabled; + DHD_TRACE(("%s set pno as %s\n", + __FUNCTION__, dhd->pno_enable ? "Enable" : "Disable")); + } + } + else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret)); + + return ret; +} + +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int pfn_enabled = 0; + int iov_len = 0; + int ret; + + /* Disable pfn */ + iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %d \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) >= 0) { + /* clear pfn */ + iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf)); + if (iov_len) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + iov_len, TRUE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + } + } else { + ret = -1; + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len)); + } + } else + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + + return ret; +} +#endif /* defined(NDIS) */ +#endif /* OEM_ANDROID */ +#endif /* PNO_SUPPORT */ diff --git a/bcmdhd.101.10.361.x/dhd_pno.h b/bcmdhd.101.10.361.x/dhd_pno.h new file mode 100755 index 0000000..b2dd021 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_pno.h @@ -0,0 +1,586 @@ +/* + * Header file of Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef __DHD_PNO_H__ +#define __DHD_PNO_H__ + +#if defined(OEM_ANDROID) && defined(PNO_SUPPORT) +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBTYPE_LEGACY_PNO '2' +#define PNO_TLV_RESERVED '0' + +#define PNO_BATCHING_SET "SET" +#define PNO_BATCHING_GET "GET" +#define PNO_BATCHING_STOP "STOP" + +#define PNO_PARAMS_DELIMETER " " +#define PNO_PARAM_CHANNEL_DELIMETER "," +#define PNO_PARAM_VALUE_DELLIMETER '=' +#define PNO_PARAM_SCANFREQ "SCANFREQ" +#define PNO_PARAM_BESTN "BESTN" +#define PNO_PARAM_MSCAN "MSCAN" +#define PNO_PARAM_CHANNEL "CHANNEL" +#define PNO_PARAM_RTT "RTT" + +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' + +#define MAXNUM_SSID_PER_ADD 16 +#define MAXNUM_PNO_PARAMS 2 +#define PNO_TLV_COMMON_LENGTH 1 +#define DEFAULT_BATCH_MSCAN 16 + +#define RESULTS_END_MARKER "----\n" +#define SCAN_END_MARKER "####\n" +#define AP_END_MARKER "====\n" +#define PNO_RSSI_MARGIN_DBM 30 + +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' + +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +#define GSCAN_MAX_CH_BUCKETS 8 +#define GSCAN_MAX_CHANNELS_IN_BUCKET 32 +#define GSCAN_MAX_AP_CACHE_PER_SCAN 32 +#define GSCAN_MAX_AP_CACHE 320 +#define GSCAN_BG_BAND_MASK (1 << 0) +#define GSCAN_A_BAND_MASK (1 << 1) +#define GSCAN_DFS_MASK (1 << 2) +#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK) +#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK) + +#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0) +#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1) +#define GSCAN_FLUSH_SCAN_CFG (1 << 2) +#define GSCAN_FLUSH_EPNO_CFG (1 << 3) +#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \ + GSCAN_FLUSH_SIGNIFICANT_CFG | \ + GSCAN_FLUSH_HOTLIST_CFG | \ + GSCAN_FLUSH_EPNO_CFG) +#define DHD_EPNO_HIDDEN_SSID (1 << 0) +#define DHD_EPNO_A_BAND_TRIG (1 << 1) +#define DHD_EPNO_BG_BAND_TRIG (1 << 2) +#define DHD_EPNO_STRICT_MATCH (1 << 3) +#define DHD_EPNO_SAME_NETWORK (1 << 4) +#define DHD_PNO_USE_SSID (DHD_EPNO_HIDDEN_SSID | DHD_EPNO_STRICT_MATCH) + +/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */ +#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0 +#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1 +#define GSCAN_BATCH_NO_THR_SET 101 +#define GSCAN_LOST_AP_WINDOW_DEFAULT 4 +#define GSCAN_MIN_BSSID_TIMEOUT 90 +#define GSCAN_BATCH_GET_MAX_WAIT 500 +#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF +#define GSCAN_RETRY_THRESHOLD 3 + +#define MAX_EPNO_SSID_NUM 64 +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +enum scan_status { + /* SCAN ABORT by other scan */ + PNO_STATUS_ABORT, + /* RTT is presence or not */ + PNO_STATUS_RTT_PRESENCE, + /* Disable PNO by Driver */ + PNO_STATUS_DISABLE, + /* NORMAL BATCHING GET */ + PNO_STATUS_NORMAL, + /* WLC_E_PFN_BEST_BATCHING */ + PNO_STATUS_EVENT, + PNO_STATUS_MAX +}; +#define PNO_STATUS_ABORT_MASK 0x0001 +#define PNO_STATUS_RTT_MASK 0x0002 +#define PNO_STATUS_DISABLE_MASK 0x0004 +#define PNO_STATUS_OOM_MASK 0x0010 + +enum index_mode { + INDEX_OF_LEGACY_PARAMS, + INDEX_OF_BATCH_PARAMS, + INDEX_OF_HOTLIST_PARAMS, + /* GSCAN includes hotlist scan and they do not run + * independent of each other + */ + INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS, + INDEX_MODE_MAX +}; +enum dhd_pno_status { + DHD_PNO_DISABLED, + DHD_PNO_ENABLED, + DHD_PNO_SUSPEND +}; +typedef struct cmd_tlv { + char prefix; + char version; + char subtype; + char reserved; +} cmd_tlv_t; +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +typedef enum { + HOTLIST_LOST, + HOTLIST_FOUND +} hotlist_type_t; + +typedef enum dhd_pno_gscan_cmd_cfg { + DHD_PNO_BATCH_SCAN_CFG_ID = 0, + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + DHD_PNO_SCAN_CFG_ID, + DHD_PNO_GET_CAPABILITIES, + DHD_PNO_GET_BATCH_RESULTS, + DHD_PNO_GET_CHANNEL_LIST, + DHD_PNO_GET_NEW_EPNO_SSID_ELEM, + DHD_PNO_EPNO_CFG_ID, + DHD_PNO_GET_AUTOJOIN_CAPABILITIES, + DHD_PNO_EPNO_PARAMS_ID +} dhd_pno_gscan_cmd_cfg_t; + +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)), + /* Wi-Fi Google Android SCAN Mode */ + DHD_PNO_GSCAN_MODE = (1 << (3)) +} dhd_pno_mode_t; +#else +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)) +} dhd_pno_mode_t; +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +typedef struct dhd_pno_ssid { + bool hidden; + int8 rssi_thresh; + uint8 dummy; + uint16 SSID_len; + uint32 flags; + int32 wpa_auth; + uchar SSID[DOT11_MAX_SSID_LEN]; + struct list_head list; +} dhd_pno_ssid_t; + +struct dhd_pno_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; + struct list_head list; +}; + +typedef struct dhd_pno_bestnet_entry { + struct ether_addr BSSID; + uint8 SSID_len; + uint8 SSID[DOT11_MAX_SSID_LEN]; + int8 RSSI; + uint8 channel; + uint32 timestamp; + uint16 rtt0; /* distance_cm based on RTT */ + uint16 rtt1; /* distance_cm based on sample standard deviation */ + unsigned long recorded_time; + struct list_head list; +} dhd_pno_bestnet_entry_t; +#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t)) + +typedef struct dhd_pno_bestnet_header { + struct dhd_pno_bestnet_header *next; + uint8 reason; + uint32 tot_cnt; + uint32 tot_size; + struct list_head entry_list; +} dhd_pno_best_header_t; +#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t)) + +typedef struct dhd_pno_scan_results { + dhd_pno_best_header_t *bestnetheader; + uint8 cnt_header; + struct list_head list; +} dhd_pno_scan_results_t; +#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t)) + +struct dhd_pno_get_batch_info { + /* info related to get batch */ + char *buf; + bool batch_started; + uint32 tot_scan_cnt; + uint32 expired_tot_scan_cnt; + uint32 top_node_cnt; + uint32 bufsize; + uint32 bytes_written; + int reason; + struct list_head scan_results_list; + struct list_head expired_scan_results_list; +}; +struct dhd_pno_legacy_params { + uint16 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + int pno_repeat; + int pno_freq_expo_max; + int nssid; + struct list_head ssid_list; +}; +struct dhd_pno_batch_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 band; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 rtt; + struct dhd_pno_get_batch_info get_batch; +}; +struct dhd_pno_hotlist_params { + uint8 band; + int32 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 nbssid; + struct list_head bssid_list; +}; + +#define DHD_PNO_CHSPEC_SUPPORT_VER 14 + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +#define DHD_PNO_REPORT_NO_BATCH (1 << 2) + +typedef struct dhd_pno_gscan_channel_bucket { + uint16 bucket_freq_multiple; + /* band = 1 All bg band channels, + * band = 2 All a band channels, + * band = 0 chan_list channels + */ + uint16 band; + uint8 report_flag; + uint8 num_channels; + uint16 repeat; + uint16 bucket_max_multiple; + uint16 chan_list[GSCAN_MAX_CHANNELS_IN_BUCKET]; +} dhd_pno_gscan_channel_bucket_t; + +#define DHD_PNO_AUTH_CODE_OPEN 1 /* Open */ +#define DHD_PNO_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */ +#define DHD_PNO_AUTH_CODE_EAPOL 4 /* any EAPOL */ + +#define DHD_EPNO_DEFAULT_INDEX 0xFFFFFFFF + +typedef struct dhd_epno_params { + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + int8 rssi_thresh; + uint8 flags; + uint8 auth; + /* index required only for visble ssid */ + uint32 index; + struct list_head list; +} dhd_epno_params_t; + +typedef struct dhd_epno_results { + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + int8 rssi; + uint16 channel; + uint16 flags; + struct ether_addr bssid; +} dhd_epno_results_t; + +typedef struct dhd_pno_swc_evt_param { + uint16 results_rxed_so_far; + wl_pfn_significant_net_t *change_array; +} dhd_pno_swc_evt_param_t; + +typedef struct wifi_gscan_result { + uint64 ts; /* Time of discovery */ + char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */ + struct ether_addr macaddr; /* BSSID */ + uint32 channel; /* channel frequency in MHz */ + int32 rssi; /* in db */ + uint64 rtt; /* in nanoseconds */ + uint64 rtt_sd; /* standard deviation in rtt */ + uint16 beacon_period; /* units are Kusec */ + uint16 capability; /* Capability information */ + uint32 pad; +} wifi_gscan_result_t; + +typedef struct wifi_gscan_full_result { + wifi_gscan_result_t fixed; + uint32 scan_ch_bucket; + uint32 ie_length; /* byte length of Information Elements */ + char ie_data[1]; /* IE data to follow */ +} wifi_gscan_full_result_t; + +typedef struct gscan_results_cache { + struct gscan_results_cache *next; + uint8 scan_id; + uint8 flag; + uint8 tot_count; + uint8 tot_consumed; + uint32 scan_ch_bucket; + wifi_gscan_result_t results[1]; +} gscan_results_cache_t; + +typedef struct dhd_pno_gscan_capabilities { + int max_scan_cache_size; + int max_scan_buckets; + int max_ap_cache_per_scan; + int max_rssi_sample_size; + int max_scan_reporting_threshold; + int max_hotlist_bssids; + int max_hotlist_ssids; + int max_significant_wifi_change_aps; + int max_bssid_history_entries; + int max_epno_ssid_crc32; + int max_epno_hidden_ssid; + int max_white_list_ssid; +} dhd_pno_gscan_capabilities_t; + +typedef struct dhd_epno_ssid_cfg { + wl_ssid_ext_params_t params; + uint32 num_epno_ssid; + struct list_head epno_ssid_list; +} dhd_epno_ssid_cfg_t; + +struct dhd_pno_gscan_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; + uint8 swc_nbssid_threshold; + uint8 swc_rssi_window_size; + uint8 lost_ap_window; + uint8 nchannel_buckets; + uint8 reason; + uint8 get_batch_flag; + uint8 send_all_results_flag; + uint16 max_ch_bucket_freq; + gscan_results_cache_t *gscan_batch_cache; + gscan_results_cache_t *gscan_hotlist_found; + gscan_results_cache_t*gscan_hotlist_lost; + uint16 nbssid_significant_change; + uint16 nbssid_hotlist; + struct dhd_pno_swc_evt_param param_significant; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; + struct list_head hotlist_bssid_list; + struct list_head significant_bssid_list; + dhd_epno_ssid_cfg_t epno_cfg; + uint32 scan_id; +}; + +typedef struct gscan_scan_params { + int32 scan_fr; + uint16 nchannel_buckets; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; +} gscan_scan_params_t; + +typedef struct gscan_batch_params { + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; +} gscan_batch_params_t; + +struct bssid_t { + struct ether_addr macaddr; + int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */ +}; + +typedef struct gscan_hotlist_scan_params { + uint16 lost_ap_window; /* number of scans to declare LOST */ + uint16 nbssid; /* number of bssids */ + struct bssid_t bssid[1]; /* n bssids to follow */ +} gscan_hotlist_scan_params_t; + +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +typedef union dhd_pno_params { + struct dhd_pno_legacy_params params_legacy; + struct dhd_pno_batch_params params_batch; + struct dhd_pno_hotlist_params params_hotlist; +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) + struct dhd_pno_gscan_params params_gscan; +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +} dhd_pno_params_t; + +typedef struct dhd_pno_status_info { + dhd_pub_t *dhd; + struct work_struct work; + struct mutex pno_mutex; +#ifdef GSCAN_SUPPORT + wait_queue_head_t batch_get_wait; +#endif /* GSCAN_SUPPORT */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + wait_queue_head_t get_batch_done; + bool batch_recvd; +#else + struct completion get_batch_done; +#endif + bool wls_supported; /* wifi location service supported or not */ + enum dhd_pno_status pno_status; + enum dhd_pno_mode pno_mode; + dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX]; + struct list_head head_list; +} dhd_pno_status_info_t; + +/* wrapper functions */ +extern int +dhd_dev_pno_enable(struct net_device *dev, int enable); + +extern int +dhd_dev_pno_stop_for_ssid(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int +dhd_dev_pno_set_for_batch(struct net_device *dev, + struct dhd_pno_batch_params *batch_params); + +extern int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize); + +extern int +dhd_dev_pno_stop_for_batch(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); +extern bool dhd_dev_is_legacy_pno_enabled(struct net_device *dev); +#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +extern void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#ifdef GSCAN_SUPPORT +extern int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush); +int dhd_dev_pno_lock_access_batch_results(struct net_device *dev); +void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); +extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); +extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); +int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); +extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len); +void * dhd_dev_process_full_gscan_result(struct net_device *dev, + const void *data, uint32 len, int *send_evt_bytes); +extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); +extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); +extern int dhd_dev_wait_batch_results_complete(struct net_device *dev); +extern void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes); +extern int dhd_dev_set_epno(struct net_device *dev); +extern int dhd_dev_flush_fw_epno(struct net_device *dev); +#endif /* GSCAN_SUPPORT */ +/* dhd pno fuctions */ +extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); +extern int dhd_pno_enable(dhd_pub_t *dhd, int enable); +extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params); + +extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason); + +extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd); + +extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); + +extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd); + +extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); +extern int dhd_pno_init(dhd_pub_t *dhd); +extern int dhd_pno_deinit(dhd_pub_t *dhd); +extern bool dhd_is_pno_supported(dhd_pub_t *dhd); +extern bool dhd_is_legacy_pno_enabled(dhd_pub_t *dhd); +#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#ifdef GSCAN_SUPPORT +extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush); +extern int dhd_pno_lock_batch_results(dhd_pub_t *dhd); +extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd); +extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); +extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); +extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); +extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); +extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type, u32 *buf_len); +extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, + uint32 len, int *send_evt_bytes); +extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); +extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); +extern int dhd_wait_batch_results_complete(dhd_pub_t *dhd); +extern void * dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, + uint32 event, int *size); +extern void dhd_pno_translate_epno_fw_flags(uint32 *flags); +extern int dhd_pno_set_epno(dhd_pub_t *dhd); +extern int dhd_pno_flush_fw_epno(dhd_pub_t *dhd); +extern void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth); +#endif /* GSCAN_SUPPORT */ +#endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */ + +#if defined(NDIS) +#if defined(PNO_SUPPORT) +extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg); +extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend); +extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr, + ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags); +extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled); +extern int dhd_pno_clean(dhd_pub_t *dhd); +#endif /* #if defined(PNO_SUPPORT) */ +#endif /* #if defined(NDIS) */ +#endif /* __DHD_PNO_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_proto.h b/bcmdhd.101.10.361.x/dhd_proto.h new file mode 100755 index 0000000..7f0b121 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_proto.h @@ -0,0 +1,302 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dhd_proto_h_ +#define _dhd_proto_h_ + +#include +#include +#ifdef BCMPCIE +#include +#endif + +#ifdef BCMINTERNAL +#ifdef DHD_FWTRACE +#include +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#define DEFAULT_IOCTL_RESP_TIMEOUT (5 * 1000) /* 5 seconds */ +#ifndef IOCTL_RESP_TIMEOUT +#if defined(BCMQT_HW) +#define IOCTL_RESP_TIMEOUT (600 * 1000) /* 600 sec in real time */ +#elif defined(BCMFPGA_HW) +#define IOCTL_RESP_TIMEOUT (60 * 1000) /* 60 sec in real time */ +#else +/* In milli second default value for Production FW */ +#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT +#endif /* BCMQT */ +#endif /* IOCTL_RESP_TIMEOUT */ + +#if defined(BCMQT_HW) +#define IOCTL_DMAXFER_TIMEOUT (260 * 1000) /* 260 seconds second */ +#elif defined(BCMFPGA_HW) +#define IOCTL_DMAXFER_TIMEOUT (120 * 1000) /* 120 seconds */ +#else +/* In milli second default value for Production FW */ +#define IOCTL_DMAXFER_TIMEOUT (15 * 1000) /* 15 seconds for Production FW */ +#endif /* BCMQT */ + +#ifndef MFG_IOCTL_RESP_TIMEOUT +#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ +#endif /* MFG_IOCTL_RESP_TIMEOUT */ + +#define DEFAULT_D3_ACK_RESP_TIMEOUT 2000 +#ifndef D3_ACK_RESP_TIMEOUT +#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT +#endif /* D3_ACK_RESP_TIMEOUT */ + +#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000) +#ifndef DHD_BUS_BUSY_TIMEOUT +#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT +#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */ + +#define DS_EXIT_TIMEOUT 1000 /* In ms */ +#define DS_ENTER_TIMEOUT 1000 /* In ms */ + +#define IOCTL_DISABLE_TIMEOUT 0 + +/* + * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) + */ + +/* Linkage, sets prot link and updates hdrlen in pub */ +extern int dhd_prot_attach(dhd_pub_t *dhdp); + +/* Initilizes the index block for dma'ing indices */ +extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz, + uint8 type, uint32 length); +#ifdef DHD_DMA_INDICES_SEQNUM +extern int dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz, + uint8 type); +extern uint32 dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host); +extern void dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num); +extern void dhd_prot_save_dmaidx(dhd_pub_t *dhd); +#endif /* DHD_DMA_INDICES_SEQNUM */ +/* Unlink, frees allocated protocol memory (including dhd_prot) */ +extern void dhd_prot_detach(dhd_pub_t *dhdp); + +/* Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +extern int dhd_sync_with_dongle(dhd_pub_t *dhdp); + +/* Protocol initialization needed for IOCTL/IOVAR path */ +extern int dhd_prot_init(dhd_pub_t *dhd); + +/* Stop protocol: sync w/dongle state. */ +extern void dhd_prot_stop(dhd_pub_t *dhdp); + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp); +extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp); + +/* Remove any protocol-specific data header. */ +extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len); + +/* Use protocol to issue ioctl to dongle */ +extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len); + +/* Handles a protocol control response asynchronously */ +extern int dhd_prot_ctl_complete(dhd_pub_t *dhd); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add prot dump output to a buffer */ +extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Dump extended trap data */ +extern int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw); + +/* Update local copy of dongle statistics */ +extern void dhd_prot_dstats(dhd_pub_t *dhdp); + +extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen); + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count); + +#ifdef BCMPCIE +extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype); +extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype); +extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound); +#ifdef BTLOG +extern bool dhd_prot_process_msgbuf_btlogcpl(dhd_pub_t *dhd, uint bound); +#endif /* BTLOG */ +extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd); +extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd); +extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd); +extern int dhd_post_dummy_msg(dhd_pub_t *dhd); +extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len); +extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); +extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); +extern void dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flow_id); +extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, + uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num, + uint32 mem_addr); +extern int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result); + +extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); +extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, + uint16 flowid, void *msgbuf_ring); +extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex); +extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b); +extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val); +extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd); +extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx); +extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx); +extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d, + struct bcmstrbuf *strbuf, const char * fmt); +extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf); +extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info); +extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id); +extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern void dhd_prot_reset(dhd_pub_t *dhd); +extern uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd); + +#ifdef IDLE_TX_FLOW_MGMT +extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count); +extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +#endif /* IDLE_TX_FLOW_MGMT */ +extern int dhd_prot_init_info_rings(dhd_pub_t *dhd); +#ifdef BTLOG +extern int dhd_prot_init_btlog_rings(dhd_pub_t *dhd); +#endif /* BTLOG */ +#ifdef DHD_HP2P +extern int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd); +#endif /* DHD_HP2P */ +extern int dhd_prot_check_tx_resource(dhd_pub_t *dhd); +#endif /* BCMPCIE */ + +#ifdef DHD_LB +extern void dhd_lb_tx_compl_handler(unsigned long data); +extern void dhd_lb_rx_compl_handler(unsigned long data); +extern void dhd_lb_rx_process_handler(unsigned long data); +#endif /* DHD_LB */ +extern int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data); + +#ifdef BCMPCIE +extern int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlv, uint16 tlv_len, + uint16 seq, uint16 xt_id); +extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set); +#else /* BCMPCIE */ +#define dhd_prot_send_host_timestamp(a, b, c, d, e) 0 +#define dhd_prot_data_path_tx_timestamp_logging(a, b, c) 0 +#define dhd_prot_data_path_rx_timestamp_logging(a, b, c) 0 +#endif /* BCMPCIE */ + +extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd); + +#ifdef SNAPSHOT_UPLOAD +/* send request to take snapshot */ +int dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param); +/* get uploaded snapshot */ +int dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset, + uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more); +#endif /* SNAPSHOT_UPLOAD */ + +#ifdef EWP_EDL +int dhd_prot_init_edl_rings(dhd_pub_t *dhd); +bool dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd); +int dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data); +#endif /* EWP_EDL */ + +/* APIs for managing a DMA-able buffer */ +int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len); +void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +void dhd_local_buf_reset(char *buf, uint32 len); + +/******************************** + * For version-string expansion * + */ +#if defined(BDC) +#define DHD_PROTOCOL "bdc" +#elif defined(CDC) +#define DHD_PROTOCOL "cdc" +#else +#define DHD_PROTOCOL "unknown" +#endif /* proto */ + +int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len); +int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff); + +#ifdef BCMINTERNAL +typedef struct host_page_location_info { + uint32 addr_lo; + uint32 addr_hi; + uint32 binary_size; + uint32 tlv_size; + uint32 tlv_signature; +} host_page_location_info_t; +#define BCM_HOST_PAGE_LOCATION_SIGNATURE 0xFEED10C5u + +#ifdef DHD_FWTRACE +typedef struct host_fwtrace_buf_location_info { + fwtrace_hostaddr_info_t host_buf_info; + uint32 tlv_size; + uint32 tlv_signature; +} host_fwtrace_buf_location_info_t; +/* Host buffer info for pushing the trace info */ +#define BCM_HOST_FWTRACE_BUF_LOCATION_SIGNATURE 0xFEED10C6u +#endif /* DHD_FWTRACE */ +#endif /* BCMINTERNAL */ + +#ifdef DHD_HP2P +extern uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable); +extern uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val); +#endif + +#ifdef DHD_MAP_LOGGING +extern void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp); +#endif /* DHD_MAP_LOGGING */ + +extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd); +extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost); + +#if defined(DHD_HTPUT_TUNABLES) +extern uint16 dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd); +extern void dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 max_txpost); +#endif /* DHD_HTPUT_TUNABLES */ + +#endif /* _dhd_proto_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_qos_algo.h b/bcmdhd.101.10.361.x/dhd_qos_algo.h new file mode 100755 index 0000000..368d120 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_qos_algo.h @@ -0,0 +1,90 @@ +/* + * Header file for QOS Algorithm on DHD + * + * Provides type definitions and function prototypes for the QOS Algorithm + * Note that this algorithm is a platform independent layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _DHD_QOS_ALGO_H_ +#define _DHD_QOS_ALGO_H_ + +#define LOWLAT_AVG_PKT_SIZE_LOW 50u +#define LOWLAT_AVG_PKT_SIZE_HIGH 200u +#define LOWLAT_NUM_PKTS_LOW 1u +#define LOWLAT_NUM_PKTS_HIGH 8u +#define LOWLAT_DETECT_CNT_INC_THRESH 10u +#define LOWLAT_DETECT_CNT_DEC_THRESH 0u +#define LOWLAT_DETECT_CNT_UPGRADE_THRESH 4u + +typedef struct qos_stat +{ + /* Statistics */ + unsigned long tx_pkts_prev; + unsigned long tx_bytes_prev; + unsigned long tx_pkts; + unsigned long tx_bytes; + + /* low latency flow detection algorithm counts */ + unsigned char lowlat_detect_count; + bool lowlat_flow; +} qos_stat_t; + +/* QoS alogrithm parameter, controllable at runtime */ +typedef struct _qos_algo_params +{ + /* The avg Tx packet size in the sampling interval must be between + * these two thresholds for QoS upgrade to take place. + * default values = LOWLAT_AVG_PKT_SIZE_LOW, LOWLAT_AVG_PKT_SIZE_HIGH + */ + unsigned long avg_pkt_size_low_thresh; + unsigned long avg_pkt_size_high_thresh; + /* The number of Tx packets in the sampling interval must be + * between these two thresholds for QoS upgrade to happen. + * default values = LOWLAT_NUM_PKTS_LOW, LOWLAT_NUM_PKTS_HIGH + */ + unsigned long num_pkts_low_thresh; + unsigned long num_pkts_high_thresh; + /* If low latency traffic is detected, then the low latency count + * is incremented till the first threshold is hit. + * If traffic ceases to be low latency, then the count is + * decremented till the second threshold is hit. + * default values = LOWLAT_DETECT_CNT_INC_THRESH, LOWLAT_DETECT_CNT_DEC_THRESH + */ + unsigned char detect_cnt_inc_thresh; + unsigned char detect_cnt_dec_thresh; + /* If the low latency count crosses this threshold, the flow will be upgraded. + * Default value = LOWLAT_DETECT_CNT_UPGRADE_THRESH + */ + unsigned char detect_cnt_upgrade_thresh; +} qos_algo_params_t; + +#define QOS_PARAMS(x) (&x->psk_qos->qos_params); + +/* + * Operates on a flow and returns 1 for upgrade and 0 for + * no up-grade + */ +int dhd_qos_algo(dhd_info_t *dhd, qos_stat_t *qos, qos_algo_params_t *qos_params); +int qos_algo_params_init(qos_algo_params_t *qos_params); +#endif /* _DHD_QOS_ALGO_H_ */ diff --git a/bcmdhd.101.10.361.x/dhd_rtt.c b/bcmdhd.101.10.361.x/dhd_rtt.c new file mode 100755 index 0000000..9f807e9 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_rtt.c @@ -0,0 +1,4855 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ +#ifdef WL_NAN +#include +#endif /* WL_NAN */ + +static DEFINE_SPINLOCK(noti_list_lock); +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) +#define DHD_RTT_CHK_SET_PARAM(param, param_cnt, targets, tlvid) \ + do { \ + if ((param_cnt) >= FTM_MAX_PARAMS) { \ + DHD_RTT_ERR(("Param cnt exceeded for FTM cfg iovar\n")); \ + err = BCME_ERROR; \ + goto exit; \ + } else { \ + dhd_rtt_set_ftm_config_param((param), &(param_cnt), \ + (targets), (tlvid)); \ + }\ + } while (0) + +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#undef DHD_RTT_MEM +#undef DHD_RTT_ERR +#define DHD_RTT_MEM DHD_LOG_MEM +#define DHD_RTT_ERR DHD_ERROR + +#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */ +#define FTM_AVAIL_MAX_SLOTS 32 +#define FTM_MAX_CONFIGS 10 +#define FTM_MAX_PARAMS 20 +#define FTM_DEFAULT_SESSION 1 +#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */ +#define FTM_INVALID -1 +#define FTM_DEFAULT_CNT_20M 24u +#define FTM_DEFAULT_CNT_40M 16u +#define FTM_DEFAULT_CNT_80M 11u +/* To handle congestion env, set max dur/timeout */ +#define FTM_MAX_BURST_DUR_TMO_MS 128u + +/* convenience macros */ +#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10) +#define FTM_MICRO2TU(_tu) ((uint64)(_tu) >> 10) +#define FTM_TU2MILLI(_tu) ((uint32)FTM_TU2MICRO(_tu) / 1000) +#define FTM_MICRO2MILLI(_x) ((uint32)(_x) / 1000) +#define FTM_MICRO2SEC(_x) ((uint32)(_x) / 1000000) +#define FTM_INTVL2NSEC(_intvl) ((uint32)ftm_intvl2nsec(_intvl)) +#define FTM_INTVL2USEC(_intvl) ((uint32)ftm_intvl2usec(_intvl)) +#define FTM_INTVL2MSEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000) +#define FTM_INTVL2SEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000000) +#define FTM_USECIN100MILLI(_usec) ((_usec) / 100000) + +/* broadcom specific set to have more accurate data */ +#define ENABLE_VHT_ACK +#define CH_MIN_5G_CHANNEL 34 + +/* CUR ETH became obsolete with this major version onwards */ +#define RTT_IOV_CUR_ETH_OBSOLETE 12 + +/* + * Parallel RTT Sessions are supported + * with this major and minor verion onwards + */ +#define RTT_PARALLEL_SSNS_SUPPORTED_MAJ_VER 14 +#define RTT_PARALLEL_SSNS_SUPPORTED_MIN_VER 2 + +/* PROXD TIMEOUT */ +#define DHD_RTT_TIMER_INTERVAL_MS 5000u +#define DHD_NAN_RTT_TIMER_INTERVAL_MS 20000u + +#define DHD_NAN_RTT_MAX_SESSIONS 4u +#define DHD_NAN_RTT_MAX_SESSIONS_LEGACY 1u + +struct rtt_noti_callback { + struct list_head list; + void *ctx; + dhd_rtt_compl_noti_fn noti_fn; +}; + +/* bitmask indicating which command groups; */ +typedef enum { + FTM_SUBCMD_FLAG_METHOD = 0x01, /* FTM method command */ + FTM_SUBCMD_FLAG_SESSION = 0x02, /* FTM session command */ + FTM_SUBCMD_FLAG_ALL = FTM_SUBCMD_FLAG_METHOD | FTM_SUBCMD_FLAG_SESSION +} ftm_subcmd_flag_t; + +/* proxd ftm config-category definition */ +typedef enum { + FTM_CONFIG_CAT_GENERAL = 1, /* generial configuration */ + FTM_CONFIG_CAT_OPTIONS = 2, /* 'config options' */ + FTM_CONFIG_CAT_AVAIL = 3, /* 'config avail' */ +} ftm_config_category_t; + +typedef struct ftm_subcmd_info { + int16 version; /* FTM version (optional) */ + char *name; /* cmd-name string as cmdline input */ + wl_proxd_cmd_t cmdid; /* cmd-id */ + bcm_xtlv_unpack_cbfn_t *handler; /* cmd response handler (optional) */ + ftm_subcmd_flag_t cmdflag; /* CMD flag (optional) */ +} ftm_subcmd_info_t; + +typedef struct ftm_config_options_info { + uint32 flags; /* wl_proxd_flags_t/wl_proxd_session_flags_t */ + bool enable; +} ftm_config_options_info_t; + +typedef struct ftm_config_param_info { + uint16 tlvid; /* mapping TLV id for the item */ + union { + uint32 chanspec; + struct ether_addr mac_addr; + wl_proxd_intvl_t data_intvl; + uint32 data32; + uint16 data16; + uint8 data8; + uint32 event_mask; + }; +} ftm_config_param_info_t; + +/* +* definition for id-string mapping. +* This is used to map an id (can be cmd-id, tlv-id, ....) to a text-string +* for debug-display or cmd-log-display +*/ +typedef struct ftm_strmap_entry { + int32 id; + char *text; +} ftm_strmap_entry_t; + +typedef struct ftm_status_map_host_entry { + wl_proxd_status_t proxd_status; + rtt_reason_t rtt_reason; +} ftm_status_map_host_entry_t; + +static uint16 +rtt_result_ver(uint16 tlvid, const uint8 *p_data); + +static int +dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data, + uint16 tlvid, uint16 len); + +static int +dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data, + uint16 tlvid, uint16 len); + +static wifi_rate_v1 +dhd_rtt_convert_rate_to_host(uint32 ratespec); + +#if defined(WL_CFG80211) && defined(RTT_DEBUG) +const char * +ftm_cmdid_to_str(uint16 cmdid); +#endif /* WL_CFG80211 && RTT_DEBUG */ + +#ifdef WL_CFG80211 +static int +dhd_rtt_start(dhd_pub_t *dhd); +static int dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status, + struct ether_addr *addr); +static void dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd); +static void dhd_rtt_timeout_work(struct work_struct *work); +static bool dhd_rtt_get_report_header(rtt_status_info_t *rtt_status, + rtt_results_header_t **rtt_results_header, struct ether_addr *addr); +#ifdef WL_NAN +static void dhd_rtt_trigger_pending_targets_on_session_end(dhd_pub_t *dhd); +#endif /* WL_NAN */ +#endif /* WL_CFG80211 */ +static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0}; + +/* ftm status mapping to host status */ +static const ftm_status_map_host_entry_t ftm_status_map_info[] = { + {WL_PROXD_E_INCOMPLETE, RTT_STATUS_FAILURE}, + {WL_PROXD_E_OVERRIDDEN, RTT_STATUS_FAILURE}, + {WL_PROXD_E_ASAP_FAILED, RTT_STATUS_FAILURE}, + {WL_PROXD_E_NOTSTARTED, RTT_STATUS_FAIL_NOT_SCHEDULED_YET}, + {WL_PROXD_E_INVALIDMEAS, RTT_STATUS_FAIL_INVALID_TS}, + {WL_PROXD_E_INCAPABLE, RTT_STATUS_FAIL_NO_CAPABILITY}, + {WL_PROXD_E_MISMATCH, RTT_STATUS_FAILURE}, + {WL_PROXD_E_DUP_SESSION, RTT_STATUS_FAILURE}, + {WL_PROXD_E_REMOTE_FAIL, RTT_STATUS_FAILURE}, + {WL_PROXD_E_REMOTE_INCAPABLE, RTT_STATUS_FAILURE}, + {WL_PROXD_E_SCHED_FAIL, RTT_STATUS_FAIL_SCHEDULE}, + {WL_PROXD_E_PROTO, RTT_STATUS_FAIL_PROTOCOL}, + {WL_PROXD_E_EXPIRED, RTT_STATUS_FAILURE}, + {WL_PROXD_E_TIMEOUT, RTT_STATUS_FAIL_TM_TIMEOUT}, + {WL_PROXD_E_NOACK, RTT_STATUS_FAIL_NO_RSP}, + {WL_PROXD_E_DEFERRED, RTT_STATUS_FAILURE}, + {WL_PROXD_E_INVALID_SID, RTT_STATUS_FAILURE}, + {WL_PROXD_E_REMOTE_CANCEL, RTT_STATUS_FAILURE}, + {WL_PROXD_E_CANCELED, RTT_STATUS_ABORTED}, + {WL_PROXD_E_INVALID_SESSION, RTT_STATUS_FAILURE}, + {WL_PROXD_E_BAD_STATE, RTT_STATUS_FAILURE}, + {WL_PROXD_E_ERROR, RTT_STATUS_FAILURE}, + {WL_PROXD_E_OK, RTT_STATUS_SUCCESS} +}; + +static const ftm_strmap_entry_t ftm_event_type_loginfo[] = { + /* wl_proxd_event_type_t, text-string */ + { WL_PROXD_EVENT_NONE, "none" }, + { WL_PROXD_EVENT_SESSION_CREATE, "session create" }, + { WL_PROXD_EVENT_SESSION_START, "session start" }, + { WL_PROXD_EVENT_FTM_REQ, "FTM req" }, + { WL_PROXD_EVENT_BURST_START, "burst start" }, + { WL_PROXD_EVENT_BURST_END, "burst end" }, + { WL_PROXD_EVENT_SESSION_END, "session end" }, + { WL_PROXD_EVENT_SESSION_RESTART, "session restart" }, + { WL_PROXD_EVENT_BURST_RESCHED, "burst rescheduled" }, + { WL_PROXD_EVENT_SESSION_DESTROY, "session destroy" }, + { WL_PROXD_EVENT_RANGE_REQ, "range request" }, + { WL_PROXD_EVENT_FTM_FRAME, "FTM frame" }, + { WL_PROXD_EVENT_DELAY, "delay" }, + { WL_PROXD_EVENT_VS_INITIATOR_RPT, "initiator-report " }, /* rx initiator-rpt */ + { WL_PROXD_EVENT_RANGING, "ranging " }, + { WL_PROXD_EVENT_COLLECT, "collect" }, + { WL_PROXD_EVENT_MF_STATS, "mf_stats" }, + { WL_PROXD_EVENT_START_WAIT, "start-wait"} +}; + +/* +* session-state --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = { + /* wl_proxd_session_state_t, text string */ + { WL_PROXD_SESSION_STATE_CREATED, "created" }, + { WL_PROXD_SESSION_STATE_CONFIGURED, "configured" }, + { WL_PROXD_SESSION_STATE_STARTED, "started" }, + { WL_PROXD_SESSION_STATE_DELAY, "delay" }, + { WL_PROXD_SESSION_STATE_USER_WAIT, "user-wait" }, + { WL_PROXD_SESSION_STATE_SCHED_WAIT, "sched-wait" }, + { WL_PROXD_SESSION_STATE_BURST, "burst" }, + { WL_PROXD_SESSION_STATE_STOPPING, "stopping" }, + { WL_PROXD_SESSION_STATE_ENDED, "ended" }, + { WL_PROXD_SESSION_STATE_DESTROYING, "destroying" }, + { WL_PROXD_SESSION_STATE_NONE, "none" } +}; + +/* +* status --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_status_value_loginfo[] = { + /* wl_proxd_status_t, text-string */ + { WL_PROXD_E_OVERRIDDEN, "overridden" }, + { WL_PROXD_E_ASAP_FAILED, "ASAP failed" }, + { WL_PROXD_E_NOTSTARTED, "not started" }, + { WL_PROXD_E_INVALIDMEAS, "invalid measurement" }, + { WL_PROXD_E_INCAPABLE, "incapable" }, + { WL_PROXD_E_MISMATCH, "mismatch"}, + { WL_PROXD_E_DUP_SESSION, "dup session" }, + { WL_PROXD_E_REMOTE_FAIL, "remote fail" }, + { WL_PROXD_E_REMOTE_INCAPABLE, "remote incapable" }, + { WL_PROXD_E_SCHED_FAIL, "sched failure" }, + { WL_PROXD_E_PROTO, "protocol error" }, + { WL_PROXD_E_EXPIRED, "expired" }, + { WL_PROXD_E_TIMEOUT, "timeout" }, + { WL_PROXD_E_NOACK, "no ack" }, + { WL_PROXD_E_DEFERRED, "deferred" }, + { WL_PROXD_E_INVALID_SID, "invalid session id" }, + { WL_PROXD_E_REMOTE_CANCEL, "remote cancel" }, + { WL_PROXD_E_CANCELED, "canceled" }, + { WL_PROXD_E_INVALID_SESSION, "invalid session" }, + { WL_PROXD_E_BAD_STATE, "bad state" }, + { WL_PROXD_E_ERROR, "error" }, + { WL_PROXD_E_OK, "OK" } +}; + +/* +* time interval unit --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = { + /* wl_proxd_tmu_t, text-string */ + { WL_PROXD_TMU_TU, "TU" }, + { WL_PROXD_TMU_SEC, "sec" }, + { WL_PROXD_TMU_MILLI_SEC, "ms" }, + { WL_PROXD_TMU_MICRO_SEC, "us" }, + { WL_PROXD_TMU_NANO_SEC, "ns" }, + { WL_PROXD_TMU_PICO_SEC, "ps" } +}; + +struct ieee_80211_mcs_rate_info { + uint8 constellation_bits; + uint8 coding_q; + uint8 coding_d; +}; + +static const struct ieee_80211_mcs_rate_info wl_mcs_info[] = { + { 1, 1, 2 }, /* MCS 0: MOD: BPSK, CR 1/2 */ + { 2, 1, 2 }, /* MCS 1: MOD: QPSK, CR 1/2 */ + { 2, 3, 4 }, /* MCS 2: MOD: QPSK, CR 3/4 */ + { 4, 1, 2 }, /* MCS 3: MOD: 16QAM, CR 1/2 */ + { 4, 3, 4 }, /* MCS 4: MOD: 16QAM, CR 3/4 */ + { 6, 2, 3 }, /* MCS 5: MOD: 64QAM, CR 2/3 */ + { 6, 3, 4 }, /* MCS 6: MOD: 64QAM, CR 3/4 */ + { 6, 5, 6 }, /* MCS 7: MOD: 64QAM, CR 5/6 */ + { 8, 3, 4 }, /* MCS 8: MOD: 256QAM, CR 3/4 */ + { 8, 5, 6 } /* MCS 9: MOD: 256QAM, CR 5/6 */ +}; + +/** + * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination. + * 'mcs' : a *single* spatial stream MCS (11n or 11ac) + */ +uint +rate_mcs2rate(uint mcs, uint nss, uint bw, int sgi) +{ + const int ksps = 250; /* kilo symbols per sec, 4 us sym */ + const int Nsd_20MHz = 52; + const int Nsd_40MHz = 108; + const int Nsd_80MHz = 234; + const int Nsd_160MHz = 468; + uint rate; + + if (mcs == 32) { + /* just return fixed values for mcs32 instead of trying to parametrize */ + rate = (sgi == 0) ? 6000 : 6778; + } else if (mcs <= 9) { + /* This calculation works for 11n HT and 11ac VHT if the HT mcs values + * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8. + * That is, HT MCS 23 is a base MCS = 7, Nss = 3 + */ + + /* find the number of complex numbers per symbol */ + if (RSPEC_IS20MHZ(bw)) { + /* 4360 TODO: eliminate Phy const in rspec bw, then just compare + * as in 80 and 160 case below instead of RSPEC_IS20MHZ(bw) + */ + rate = Nsd_20MHz; + } else if (RSPEC_IS40MHZ(bw)) { + /* 4360 TODO: eliminate Phy const in rspec bw, then just compare + * as in 80 and 160 case below instead of RSPEC_IS40MHZ(bw) + */ + rate = Nsd_40MHz; + } else if (bw == WL_RSPEC_BW_80MHZ) { + rate = Nsd_80MHz; + } else if (bw == WL_RSPEC_BW_160MHZ) { + rate = Nsd_160MHz; + } else { + rate = 0; + } + + /* multiply by bits per number from the constellation in use */ + rate = rate * wl_mcs_info[mcs].constellation_bits; + + /* adjust for the number of spatial streams */ + rate = rate * nss; + + /* adjust for the coding rate given as a quotient and divisor */ + rate = (rate * wl_mcs_info[mcs].coding_q) / wl_mcs_info[mcs].coding_d; + + /* multiply by Kilo symbols per sec to get Kbps */ + rate = rate * ksps; + + /* adjust the symbols per sec for SGI + * symbol duration is 4 us without SGI, and 3.6 us with SGI, + * so ratio is 10 / 9 + */ + if (sgi) { + /* add 4 for rounding of division by 9 */ + rate = ((rate * 10) + 4) / 9; + } + } else { + rate = 0; + } + + return rate; +} /* wlc_rate_mcs2rate */ + +/** take a well formed ratespec_t arg and return phy rate in [Kbps] units */ +static uint32 +rate_rspec2rate(uint32 rspec) +{ + int rate = 0; + + if (RSPEC_ISLEGACY(rspec)) { + rate = 500 * (rspec & WL_RSPEC_RATE_MASK); + } else if (RSPEC_ISHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_RATE_MASK); + + if (mcs == 32) { + rate = rate_mcs2rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec)); + } else { + uint nss = 1 + (mcs / 8); + mcs = mcs % 8; + rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } + } else if (RSPEC_ISVHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK); + uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + if (mcs > 9 || nss > 8) { + DHD_RTT(("%s: Invalid mcs %d or nss %d\n", __FUNCTION__, mcs, nss)); + goto exit; + } + + rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } else { + DHD_RTT(("%s: wrong rspec:%d\n", __FUNCTION__, rspec)); + } +exit: + return rate; +} + +char resp_buf[WLC_IOCTL_SMLEN]; + +static uint64 +ftm_intvl2nsec(const wl_proxd_intvl_t *intvl) +{ + uint64 ret; + ret = intvl->intvl; + switch (intvl->tmu) { + case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret) * 1000; break; + case WL_PROXD_TMU_SEC: ret *= 1000000000; break; + case WL_PROXD_TMU_MILLI_SEC: ret *= 1000000; break; + case WL_PROXD_TMU_MICRO_SEC: ret *= 1000; break; + case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000; break; + case WL_PROXD_TMU_NANO_SEC: /* fall through */ + default: break; + } + return ret; +} +uint64 +ftm_intvl2usec(const wl_proxd_intvl_t *intvl) +{ + uint64 ret; + ret = intvl->intvl; + switch (intvl->tmu) { + case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret); break; + case WL_PROXD_TMU_SEC: ret *= 1000000; break; + case WL_PROXD_TMU_NANO_SEC: ret = intvl->intvl / 1000; break; + case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000000; break; + case WL_PROXD_TMU_MILLI_SEC: ret *= 1000; break; + case WL_PROXD_TMU_MICRO_SEC: /* fall through */ + default: break; + } + return ret; +} + +/* +* lookup 'id' (as a key) from a fw status to host map table +* if found, return the corresponding reason code +*/ + +static rtt_reason_t +ftm_get_statusmap_info(wl_proxd_status_t id, const ftm_status_map_host_entry_t *p_table, + uint32 num_entries) +{ + int i; + const ftm_status_map_host_entry_t *p_entry; + /* scan thru the table till end */ + p_entry = p_table; + for (i = 0; i < (int) num_entries; i++) + { + if (p_entry->proxd_status == id) { + return p_entry->rtt_reason; + } + p_entry++; /* next entry */ + } + return RTT_STATUS_FAILURE; /* not found */ +} +/* +* lookup 'id' (as a key) from a table +* if found, return the entry pointer, otherwise return NULL +*/ +static const ftm_strmap_entry_t* +ftm_get_strmap_info(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries) +{ + int i; + const ftm_strmap_entry_t *p_entry; + + /* scan thru the table till end */ + p_entry = p_table; + for (i = 0; i < (int) num_entries; i++) + { + if (p_entry->id == id) + return p_entry; + p_entry++; /* next entry */ + } + return NULL; /* not found */ +} + +/* +* map enum to a text-string for display, this function is called by the following: +* For debug/trace: +* ftm_[cmdid|tlvid]_to_str() +* For TLV-output log for 'get' commands +* ftm_[method|tmu|caps|status|state]_value_to_logstr() +* Input: +* pTable -- point to a 'enum to string' table. +*/ +static const char * +ftm_map_id_to_str(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries) +{ + const ftm_strmap_entry_t*p_entry = ftm_get_strmap_info(id, p_table, num_entries); + if (p_entry) + return (p_entry->text); + + return "invalid"; +} + +#if defined(WL_CFG80211) && defined(RTT_DEBUG) +/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */ +#define DEF_STRMAP_ENTRY(id) { (id), #id } + +/* ftm cmd-id mapping */ +static const ftm_strmap_entry_t ftm_cmdid_map[] = { + /* {wl_proxd_cmd_t(WL_PROXD_CMD_xxx), "WL_PROXD_CMD_xxx" }, */ + DEF_STRMAP_ENTRY(WL_PROXD_CMD_NONE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_VERSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_ENABLE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DISABLE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_CONFIG), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_BURST_REQUEST), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DELETE_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RESULT), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_INFO), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_STATUS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_SESSIONS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_COUNTERS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_CLEAR_COUNTERS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_COLLECT), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_TUNE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DUMP), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_RANGING), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_RANGING), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RANGING_INFO), +}; + +/* +* map a ftm cmd-id to a text-string for display +*/ +const char * +ftm_cmdid_to_str(uint16 cmdid) +{ + return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map)); +} +#endif /* WL_CFG80211 && RTT_DEBUG */ + +/* +* convert BCME_xxx error codes into related error strings +* note, bcmerrorstr() defined in bcmutils is for BCMDRIVER only, +* this duplicate copy is for WL access and may need to clean up later +*/ +static const char *ftm_bcmerrorstrtable[] = BCMERRSTRINGTABLE; +static const char * +ftm_status_value_to_logstr(wl_proxd_status_t status) +{ + static char ftm_msgbuf_status_undef[32]; + const ftm_strmap_entry_t *p_loginfo; + int bcmerror; + + /* check if within BCME_xxx error range */ + bcmerror = (int) status; + if (VALID_BCMERROR(bcmerror)) + return ftm_bcmerrorstrtable[-bcmerror]; + + /* otherwise, look for 'proxd ftm status' range */ + p_loginfo = ftm_get_strmap_info((int32) status, + &ftm_status_value_loginfo[0], ARRAYSIZE(ftm_status_value_loginfo)); + if (p_loginfo) + return p_loginfo->text; + + /* report for 'out of range' FTM-status error code */ + memset(ftm_msgbuf_status_undef, 0, sizeof(ftm_msgbuf_status_undef)); + snprintf(ftm_msgbuf_status_undef, sizeof(ftm_msgbuf_status_undef), + "Undefined status %d", status); + return &ftm_msgbuf_status_undef[0]; +} + +static const char * +ftm_tmu_value_to_logstr(wl_proxd_tmu_t tmu) +{ + return ftm_map_id_to_str((int32)tmu, + &ftm_tmu_value_loginfo[0], ARRAYSIZE(ftm_tmu_value_loginfo)); +} + +static const ftm_strmap_entry_t* +ftm_get_event_type_loginfo(wl_proxd_event_type_t event_type) +{ + /* look up 'event-type' from a predefined table */ + return ftm_get_strmap_info((int32) event_type, + ftm_event_type_loginfo, ARRAYSIZE(ftm_event_type_loginfo)); +} + +static const char * +ftm_session_state_value_to_logstr(wl_proxd_session_state_t state) +{ + return ftm_map_id_to_str((int32)state, &ftm_session_state_value_loginfo[0], + ARRAYSIZE(ftm_session_state_value_loginfo)); +} + +#ifdef WL_CFG80211 +/* +* send 'proxd' iovar for all ftm get-related commands +*/ +static int +rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsize, + ftm_subcmd_info_t *p_subcmd_info) +{ + + wl_proxd_iov_t *p_iovresp = (wl_proxd_iov_t *)resp_buf; + int status; + int tlvs_len; + /* send getbuf proxd iovar */ + status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov, + proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN); + if (status != BCME_OK) { + DHD_RTT_ERR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n", + __FUNCTION__, p_subcmd_info->cmdid, status)); + return status; + } + if (p_subcmd_info->cmdid == WL_PROXD_CMD_GET_VERSION) { + p_subcmd_info->version = ltoh16(p_iovresp->version); + DHD_RTT(("ftm version: 0x%x\n", ltoh16(p_iovresp->version))); + goto exit; + } + + tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE; + if (tlvs_len < 0) { + DHD_RTT_ERR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n", + __FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE)); + tlvs_len = 0; + } + + if (tlvs_len > 0 && p_subcmd_info->handler) { + /* unpack TLVs and invokes the cbfn for processing */ + status = bcm_unpack_xtlv_buf(p_proxd_iov, (uint8 *)p_iovresp->tlvs, + tlvs_len, BCM_XTLV_OPTION_ALIGN32, p_subcmd_info->handler); + } +exit: + return status; +} + +static wl_proxd_iov_t * +rtt_alloc_getset_buf(dhd_pub_t *dhd, wl_proxd_method_t method, wl_proxd_session_id_t session_id, + wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize) +{ + uint16 proxd_iovsize; + wl_proxd_tlv_t *p_tlv; + wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL; + + *p_out_bufsize = 0; /* init */ + /* calculate the whole buffer size, including one reserve-tlv entry in the header */ + proxd_iovsize = sizeof(wl_proxd_iov_t) + tlvs_bufsize; + + p_proxd_iov = (wl_proxd_iov_t *)MALLOCZ(dhd->osh, proxd_iovsize); + if (p_proxd_iov == NULL) { + DHD_RTT_ERR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize)); + return NULL; + } + + /* setup proxd-FTM-method iovar header */ + p_proxd_iov->version = htol16(WL_PROXD_API_VERSION); + p_proxd_iov->len = htol16(proxd_iovsize); /* caller may adjust it based on #of TLVs */ + p_proxd_iov->cmd = htol16(cmdid); + p_proxd_iov->method = htol16(method); + p_proxd_iov->sid = htol16(session_id); + + /* initialize the reserved/dummy-TLV in iovar header */ + p_tlv = p_proxd_iov->tlvs; + p_tlv->id = htol16(WL_PROXD_TLV_ID_NONE); + p_tlv->len = htol16(0); + + *p_out_bufsize = proxd_iovsize; /* for caller's reference */ + + return p_proxd_iov; +} + +static int +dhd_rtt_common_get_handler(dhd_pub_t *dhd, ftm_subcmd_info_t *p_subcmd_info, + wl_proxd_method_t method, + wl_proxd_session_id_t session_id) +{ + int status = BCME_OK; + uint16 proxd_iovsize = 0; + wl_proxd_iov_t *p_proxd_iov; +#ifdef RTT_DEBUG + DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n", + __FUNCTION__, method, session_id, p_subcmd_info->cmdid, + ftm_cmdid_to_str(p_subcmd_info->cmdid))); +#endif + /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */ + p_proxd_iov = rtt_alloc_getset_buf(dhd, method, session_id, p_subcmd_info->cmdid, + 0, &proxd_iovsize); + + if (p_proxd_iov == NULL) + return BCME_NOMEM; + + status = rtt_do_get_ioctl(dhd, p_proxd_iov, proxd_iovsize, p_subcmd_info); + + if (status != BCME_OK) { + DHD_RTT(("%s failed: status=%d\n", __FUNCTION__, status)); + } + MFREE(dhd->osh, p_proxd_iov, proxd_iovsize); + return status; +} + +/* +* common handler for set-related proxd method commands which require no TLV as input +* wl proxd ftm [session-id] +* e.g. +* wl proxd ftm enable -- to enable ftm +* wl proxd ftm disable -- to disable ftm +* wl proxd ftm start -- to start a specified session +* wl proxd ftm stop -- to cancel a specified session; +* state is maintained till session is delete. +* wl proxd ftm delete -- to delete a specified session +* wl proxd ftm [] clear-counters -- to clear counters +* wl proxd ftm burst-request -- on initiator: to send burst request; +* on target: send FTM frame +* wl proxd ftm collect +* wl proxd ftm tune (TBD) +*/ +static int +dhd_rtt_common_set_handler(dhd_pub_t *dhd, const ftm_subcmd_info_t *p_subcmd_info, + wl_proxd_method_t method, wl_proxd_session_id_t session_id) +{ + uint16 proxd_iovsize; + wl_proxd_iov_t *p_proxd_iov; + int ret; + +#ifdef RTT_DEBUG + DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n", + __FUNCTION__, method, session_id, p_subcmd_info->cmdid, + ftm_cmdid_to_str(p_subcmd_info->cmdid))); +#endif + + /* allocate and initialize a temp buffer for 'set proxd' iovar */ + proxd_iovsize = 0; + p_proxd_iov = rtt_alloc_getset_buf(dhd, method, session_id, p_subcmd_info->cmdid, + 0, &proxd_iovsize); /* no TLV */ + if (p_proxd_iov == NULL) + return BCME_NOMEM; + + /* no TLV to pack, simply issue a set-proxd iovar */ + ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, proxd_iovsize, NULL, 0, TRUE); +#ifdef RTT_DEBUG + if (ret != BCME_OK) { + DHD_RTT(("error: IOVAR failed, status=%d\n", ret)); + } +#endif + /* clean up */ + MFREE(dhd->osh, p_proxd_iov, proxd_iovsize); + + return ret; +} +#endif /* WL_CFG80211 */ + +/* gets the length and returns the version + * of the wl_proxd_collect_event_t version + */ +static uint +rtt_collect_data_event_ver(uint16 len) +{ + if (len > sizeof(wl_proxd_collect_event_data_v3_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX; + } else if (len == sizeof(wl_proxd_collect_event_data_v4_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_4; + } else if (len == sizeof(wl_proxd_collect_event_data_v3_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_3; + } else if (len == sizeof(wl_proxd_collect_event_data_v2_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_2; + } else { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_1; + } +} + +static void +rtt_collect_event_data_display(uint8 ver, void *ctx, const uint8 *p_data, uint16 len) +{ + int i; + wl_proxd_collect_event_data_v1_t *p_collect_data_v1 = NULL; + wl_proxd_collect_event_data_v2_t *p_collect_data_v2 = NULL; + wl_proxd_collect_event_data_v3_t *p_collect_data_v3 = NULL; + wl_proxd_collect_event_data_v4_t *p_collect_data_v4 = NULL; + + if (!ctx || !p_data) { + return; + } + + switch (ver) { + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_1: + DHD_RTT(("\tVERSION_1\n")); + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v1_t)); + p_collect_data_v1 = (wl_proxd_collect_event_data_v1_t *)ctx; + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v1->H_RX[i] = ltoh32_ua(&p_collect_data_v1->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v1->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v1->H_LB[i] = ltoh32_ua(&p_collect_data_v1->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v1->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v1->ri_rr[i])); + } + p_collect_data_v1->phy_err_mask = ltoh32_ua(&p_collect_data_v1->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v1->phy_err_mask)); + break; + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_2: + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v2_t)); + p_collect_data_v2 = (wl_proxd_collect_event_data_v2_t *)ctx; + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v2->H_RX[i] = ltoh32_ua(&p_collect_data_v2->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v2->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v2->H_LB[i] = ltoh32_ua(&p_collect_data_v2->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v2->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v2->ri_rr[i])); + } + p_collect_data_v2->phy_err_mask = ltoh32_ua(&p_collect_data_v2->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v2->phy_err_mask)); + break; + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3: + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v3_t)); + p_collect_data_v3 = (wl_proxd_collect_event_data_v3_t *)ctx; + switch (p_collect_data_v3->version) { + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3: + if (p_collect_data_v3->length != + (len - OFFSETOF(wl_proxd_collect_event_data_v3_t, H_LB))) { + DHD_RTT(("\tversion/length mismatch\n")); + break; + } + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v3->H_RX[i] = + ltoh32_ua(&p_collect_data_v3->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v3->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v3->H_LB[i] = + ltoh32_ua(&p_collect_data_v3->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v3->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v3->ri_rr[i])); + } + p_collect_data_v3->phy_err_mask = + ltoh32_ua(&p_collect_data_v3->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v3->phy_err_mask)); + break; + /* future case */ + } + break; + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_4: + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v4_t)); + p_collect_data_v4 = (wl_proxd_collect_event_data_v4_t *)ctx; + switch (p_collect_data_v4->version) { + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_4: + if (p_collect_data_v4->length != + (len - OFFSETOF(wl_proxd_collect_event_data_v4_t, H_LB))) { + DHD_RTT(("\tversion/length mismatch\n")); + break; + } + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v4->H_RX[i] = + ltoh32_ua(&p_collect_data_v4->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v4->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v4->H_LB[i] = + ltoh32_ua(&p_collect_data_v4->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v4->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0_5G; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v4->ri_rr[i])); + } + p_collect_data_v4->phy_err_mask = + ltoh32_ua(&p_collect_data_v4->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v4->phy_err_mask)); + break; + /* future case */ + } + break; + } +} + +static uint16 +rtt_result_ver(uint16 tlvid, const uint8 *p_data) +{ + uint16 ret = BCME_OK; + const wl_proxd_rtt_result_v2_t *r_v2 = NULL; + + switch (tlvid) { + case WL_PROXD_TLV_ID_RTT_RESULT: + BCM_REFERENCE(p_data); + ret = WL_PROXD_RTT_RESULT_VERSION_1; + break; + case WL_PROXD_TLV_ID_RTT_RESULT_V2: + if (p_data) { + r_v2 = (const wl_proxd_rtt_result_v2_t *)p_data; + if (r_v2->version == WL_PROXD_RTT_RESULT_VERSION_2) { + ret = WL_PROXD_RTT_RESULT_VERSION_2; + } + } + break; + default: + DHD_RTT_ERR(("%s: > Unsupported TLV ID %d\n", + __FUNCTION__, tlvid)); + break; + } + return ret; +} + +/* pretty hex print a contiguous buffer */ +static void +rtt_prhex(const char *msg, const uint8 *buf, uint nbytes) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + DHD_RTT(("%s:\n", msg)); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + DHD_RTT(("%s\n", line)); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + DHD_RTT(("%s\n", line)); +} + +static int +rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len) +{ + int ret = BCME_OK; + int i; + wl_proxd_ftm_session_status_t *p_data_info = NULL; + uint32 chan_data_entry = 0; + uint16 expected_rtt_result_ver = 0; + + BCM_REFERENCE(p_data_info); + + switch (tlvid) { + case WL_PROXD_TLV_ID_RTT_RESULT: + case WL_PROXD_TLV_ID_RTT_RESULT_V2: + DHD_RTT(("WL_PROXD_TLV_ID_RTT_RESULT\n")); + expected_rtt_result_ver = rtt_result_ver(tlvid, p_data); + switch (expected_rtt_result_ver) { + case WL_PROXD_RTT_RESULT_VERSION_1: + ret = dhd_rtt_convert_results_to_host_v1((rtt_result_t *)ctx, + p_data, tlvid, len); + break; + case WL_PROXD_RTT_RESULT_VERSION_2: + ret = dhd_rtt_convert_results_to_host_v2((rtt_result_t *)ctx, + p_data, tlvid, len); + break; + default: + DHD_RTT_ERR((" > Unsupported RTT_RESULT version\n")); + ret = BCME_UNSUPPORTED; + break; + } + break; + case WL_PROXD_TLV_ID_SESSION_STATUS: + DHD_RTT(("WL_PROXD_TLV_ID_SESSION_STATUS\n")); + memcpy(ctx, p_data, sizeof(wl_proxd_ftm_session_status_t)); + p_data_info = (wl_proxd_ftm_session_status_t *)ctx; + p_data_info->sid = ltoh16_ua(&p_data_info->sid); + p_data_info->state = ltoh16_ua(&p_data_info->state); + p_data_info->status = ltoh32_ua(&p_data_info->status); + p_data_info->burst_num = ltoh16_ua(&p_data_info->burst_num); + p_data_info->core_info = ltoh16_ua(&p_data_info->core_info); + DHD_RTT(("\tsid=%u, state=%d, status=%d, burst_num=%u\n", + p_data_info->sid, p_data_info->state, + p_data_info->status, p_data_info->burst_num)); + DHD_RTT(("\tnum_cores=%u, core=%u\n", (p_data_info->core_info & 0xFF), + (p_data_info->core_info >> 8u & 0xFF))); + + break; + case WL_PROXD_TLV_ID_COLLECT_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_DATA\n")); + /* we do not have handle to wl in the context of + * xtlv callback without changing the xtlv API. + */ + rtt_collect_event_data_display( + rtt_collect_data_event_ver(len), + ctx, p_data, len); + break; + case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA: + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n")); + DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32)))); + for (i = 0; (uint16)i < (uint16)(len/sizeof(chan_data_entry)); i++) { + uint32 *p = (uint32*)p_data; + chan_data_entry = ltoh32_ua(p + i); + DHD_RTT(("\t%u\n", chan_data_entry)); + } + GCC_DIAGNOSTIC_POP(); + break; + case WL_PROXD_TLV_ID_MF_STATS_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n")); + DHD_RTT(("\tmf stats len=%u\n", len)); + rtt_prhex("", p_data, len); + break; + default: + DHD_RTT_ERR(("> Unsupported TLV ID %d\n", tlvid)); + ret = BCME_ERROR; + break; + } + + return ret; +} + +#ifdef WL_CFG80211 +static int +rtt_handle_config_options(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv, + uint16 *p_buf_space_left, ftm_config_options_info_t *ftm_configs, int ftm_cfg_cnt) +{ + int ret = BCME_OK; + int cfg_idx = 0; + uint32 flags = WL_PROXD_FLAG_NONE; + uint32 flags_mask = WL_PROXD_FLAG_NONE; + uint32 new_mask; /* cmdline input */ + ftm_config_options_info_t *p_option_info; + uint16 type = (session_id == WL_PROXD_SESSION_ID_GLOBAL) ? + WL_PROXD_TLV_ID_FLAGS_MASK : WL_PROXD_TLV_ID_SESSION_FLAGS_MASK; + for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) { + p_option_info = (ftm_configs + cfg_idx); + if (p_option_info != NULL) { + new_mask = p_option_info->flags; + /* update flags mask */ + flags_mask |= new_mask; + if (p_option_info->enable) { + flags |= new_mask; /* set the bit on */ + } else { + flags &= ~new_mask; /* set the bit off */ + } + } + } + flags = htol32(flags); + flags_mask = htol32(flags_mask); + /* setup flags_mask TLV */ + ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, + type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n", + __FUNCTION__, ret)); + goto exit; + } + + type = (session_id == WL_PROXD_SESSION_ID_GLOBAL)? + WL_PROXD_TLV_ID_FLAGS : WL_PROXD_TLV_ID_SESSION_FLAGS; + /* setup flags TLV */ + ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, + type, sizeof(uint32), (uint8 *)&flags, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { +#ifdef RTT_DEBUG + DHD_RTT(("%s: bcm_pack_xltv_entry() for flags failed, status=%d\n", + __FUNCTION__, ret)); +#endif + } +exit: + return ret; +} + +static int +rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv, + uint16 *p_buf_space_left, ftm_config_param_info_t *ftm_configs, int ftm_cfg_cnt) +{ + int ret = BCME_OK; + int cfg_idx = 0; + uint32 chanspec; + ftm_config_param_info_t *p_config_param_info; + void *p_src_data; + uint16 src_data_size; /* size of data pointed by p_src_data as 'source' */ + for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) { + p_config_param_info = (ftm_configs + cfg_idx); + if (p_config_param_info != NULL) { + switch (p_config_param_info->tlvid) { + case WL_PROXD_TLV_ID_BSS_INDEX: + case WL_PROXD_TLV_ID_FTM_RETRIES: + case WL_PROXD_TLV_ID_FTM_REQ_RETRIES: + p_src_data = &p_config_param_info->data8; + src_data_size = sizeof(uint8); + break; + case WL_PROXD_TLV_ID_BURST_NUM_FTM: /* uint16 */ + case WL_PROXD_TLV_ID_NUM_BURST: + case WL_PROXD_TLV_ID_RX_MAX_BURST: + p_src_data = &p_config_param_info->data16; + src_data_size = sizeof(uint16); + break; + case WL_PROXD_TLV_ID_TX_POWER: /* uint32 */ + case WL_PROXD_TLV_ID_RATESPEC: + case WL_PROXD_TLV_ID_EVENT_MASK: /* wl_proxd_event_mask_t/uint32 */ + case WL_PROXD_TLV_ID_DEBUG_MASK: + p_src_data = &p_config_param_info->data32; + src_data_size = sizeof(uint32); + break; + case WL_PROXD_TLV_ID_CHANSPEC: /* chanspec_t --> 32bit */ + chanspec = p_config_param_info->chanspec; + p_src_data = (void *) &chanspec; + src_data_size = sizeof(uint32); + break; + case WL_PROXD_TLV_ID_BSSID: /* mac address */ + case WL_PROXD_TLV_ID_PEER_MAC: + case WL_PROXD_TLV_ID_CUR_ETHER_ADDR: + p_src_data = &p_config_param_info->mac_addr; + src_data_size = sizeof(struct ether_addr); + break; + case WL_PROXD_TLV_ID_BURST_DURATION: /* wl_proxd_intvl_t */ + case WL_PROXD_TLV_ID_BURST_PERIOD: + case WL_PROXD_TLV_ID_BURST_FTM_SEP: + case WL_PROXD_TLV_ID_BURST_TIMEOUT: + case WL_PROXD_TLV_ID_INIT_DELAY: + p_src_data = &p_config_param_info->data_intvl; + src_data_size = sizeof(wl_proxd_intvl_t); + break; + default: + ret = BCME_BADARG; + break; + } + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s bad TLV ID : %d\n", + __FUNCTION__, p_config_param_info->tlvid)); + break; + } + + ret = bcm_pack_xtlv_entry((uint8 **) p_tlv, p_buf_space_left, + p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data, + BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s: bcm_pack_xltv_entry() failed," + " status=%d\n", __FUNCTION__, ret)); + break; + } + + } + } + return ret; +} + +static int +dhd_rtt_ftm_enable(dhd_pub_t *dhd, bool enable) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = (enable)? "enable" : "disable"; + subcmd_info.cmdid = (enable)? WL_PROXD_CMD_ENABLE: WL_PROXD_CMD_DISABLE; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL); +} + +static int +dhd_rtt_start_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, bool start) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = (start)? "start session" : "stop session"; + subcmd_info.cmdid = (start)? WL_PROXD_CMD_START_SESSION: WL_PROXD_CMD_STOP_SESSION; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, session_id); +} + +static int +dhd_rtt_delete_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = "delete session"; + subcmd_info.cmdid = WL_PROXD_CMD_DELETE_SESSION; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, session_id); +} +#ifdef WL_NAN +int +dhd_rtt_delete_nan_session(dhd_pub_t *dhd) +{ + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct wiphy *wiphy = wdev->wiphy; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + wl_cfgnan_terminate_directed_rtt_sessions(dev, cfg); + return BCME_OK; +} +/* API to find out if the given Peer Mac from FTM events +* is nan-peer. Based on this we will handle the SESSION_END +* event. For nan-peer FTM_SESSION_END event is ignored and handled in +* nan-ranging-cancel or nan-ranging-end event. +*/ +static bool +dhd_rtt_is_nan_peer(dhd_pub_t *dhd, struct ether_addr *peer_mac) +{ + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct wiphy *wiphy = wdev->wiphy; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + nan_ranging_inst_t *ranging_inst = NULL; + bool ret = FALSE; + + if ((wl_cfgnan_is_enabled(cfg) == FALSE) || ETHER_ISNULLADDR(peer_mac)) { + goto exit; + } + + ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_mac); + if (ranging_inst) { + DHD_RTT((" RTT peer is of type NAN\n")); + ret = TRUE; + goto exit; + } +exit: + return ret; +} + +bool +dhd_rtt_nan_is_directed_setup_in_prog(dhd_pub_t *dhd) +{ + bool setup_in_prog; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + setup_in_prog = rtt_status->directed_cfg.directed_setup_status.directed_na_setup_inprog; + + return setup_in_prog; +} + +bool +dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd_pub_t *dhd, + struct ether_addr *peer) +{ + bool setup_in_prog = TRUE; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + nan_ranging_inst_t *ranging_inst = NULL; + + if (!dhd_rtt_nan_is_directed_setup_in_prog(dhd)) { + setup_in_prog = FALSE; + goto exit; + } + + ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer); + + if ((ranging_inst == NULL) || + (ranging_inst != rtt_status->directed_cfg.directed_setup_status.rng_inst)) { + setup_in_prog = FALSE; + } + +exit: + return setup_in_prog; +} + +void +dhd_rtt_nan_update_directed_setup_inprog(dhd_pub_t *dhd, + nan_ranging_inst_t *rng_inst, bool inprog) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + rtt_status->directed_cfg.directed_setup_status.directed_na_setup_inprog = inprog; + if (inprog) { + ASSERT(rng_inst); + rtt_status->directed_cfg.directed_setup_status.rng_inst = rng_inst; + } else { + rtt_status->directed_cfg.directed_setup_status.rng_inst = NULL; + } +} + +bool +dhd_rtt_nan_directed_sessions_allowed(dhd_pub_t *dhd) +{ + int max_sessions = 0; + bool allowed = TRUE; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + max_sessions = rtt_status->max_nan_rtt_sessions; + + if (dhd_rtt_nan_is_directed_setup_in_prog(dhd)) { + max_sessions--; + } + + if (rtt_status->directed_cfg.directed_sessions_cnt >= max_sessions) { + allowed = FALSE; + } + + return allowed; +} + +bool +dhd_rtt_nan_all_directed_sessions_triggered(dhd_pub_t *dhd) +{ + bool done; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + if ((rtt_status->cur_idx + 1) >= rtt_status->rtt_config.rtt_target_cnt) { + done = TRUE; + } else { + done = FALSE; + } + + return done; +} + +void +dhd_rtt_nan_update_directed_sessions_cnt(dhd_pub_t *dhd, bool incr) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + if (incr) { + rtt_status->directed_cfg.directed_sessions_cnt++; + } else { + rtt_status->directed_cfg.directed_sessions_cnt--; + } +} + +static void +dhd_rtt_event_trigger_failure(dhd_pub_t *dhd, rtt_target_info_t *rtt_target) +{ + wl_event_msg_t msg; + + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + wl_proxd_event_t p_event; + + bzero(&msg, sizeof(wl_event_msg_t)); + bzero(&p_event, sizeof(p_event)); + + msg.event_type = hton32(WLC_E_PROXD); + msg.bsscfgidx = 0; + msg.datalen = hton32(sizeof(p_event)); + msg.addr = rtt_target->addr; + + p_event.version = htol16(WL_PROXD_API_VERSION); + p_event.type = htol16(WL_PROXD_EVENT_SESSION_END); + p_event.len = htol16(OFFSETOF(wl_proxd_event_t, tlvs)); + + wl_cfg80211_event(dev, &msg, &p_event); +} + +static int +dhd_rtt_nan_start_session(dhd_pub_t *dhd, rtt_target_info_t *rtt_target) +{ + s32 err = BCME_OK; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct wiphy *wiphy = wdev->wiphy; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + nan_ranging_inst_t *ranging_inst = NULL; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + NAN_MUTEX_LOCK(); + + if (!rtt_status) { + err = BCME_NOTENABLED; + goto done; + } + + if (!wl_cfgnan_is_enabled(cfg)) { /* If nan is not enabled report error */ + err = BCME_NOTENABLED; + goto done; + } + + /* Below Scenarios should be avoided by callers/schedulers */ + if (dhd_rtt_nan_is_directed_setup_in_prog(dhd)) { + DHD_RTT_ERR(("dhd_rtt_nan_start_session failed, setup already in prog\n")); + err = BCME_ERROR; + ASSERT(0); + goto done; + } + + if (!dhd_rtt_nan_directed_sessions_allowed(dhd)) { + DHD_RTT_ERR(("dhd_rtt_nan_start_session failed, already max sessions running\n")); + err = BCME_ERROR; + ASSERT(0); + goto done; + } + + ranging_inst = wl_cfgnan_get_ranging_inst(cfg, + &rtt_target->addr, NAN_RANGING_ROLE_INITIATOR); + if (!ranging_inst) { + err = BCME_NORESOURCE; + goto done; + } + + DHD_RTT(("Trigger nan based range request\n")); + err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), + cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD, TRUE); + if (unlikely(err)) { + goto done; + } + ranging_inst->range_type = RTT_TYPE_NAN_DIRECTED; + ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR; + dhd_rtt_nan_update_directed_setup_inprog(dhd, ranging_inst, TRUE); + +done: + if (err) { + DHD_RTT_ERR(("Failed to issue Nan Ranging Request err %d\n", err)); + /* Fake session end event which will help in + * scheduling the new target in the deffered context instead of here + * i.e, avoid scheduling work from itself + */ + dhd_rtt_event_trigger_failure(dhd, rtt_target); + } + NAN_MUTEX_UNLOCK(); + return err; +} +#endif /* WL_NAN */ + +static int +dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, + ftm_config_category_t catagory, void *ftm_configs, int ftm_cfg_cnt) +{ + ftm_subcmd_info_t subcmd_info; + wl_proxd_tlv_t *p_tlv; + /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */ + wl_proxd_iov_t *p_proxd_iov; + uint16 proxd_iovsize = 0; + uint16 bufsize; + uint16 buf_space_left; + uint16 all_tlvsize; + int ret = BCME_OK; + + subcmd_info.name = "config"; + subcmd_info.cmdid = WL_PROXD_CMD_CONFIG; + + p_proxd_iov = rtt_alloc_getset_buf(dhd, WL_PROXD_METHOD_FTM, session_id, subcmd_info.cmdid, + FTM_IOC_BUFSZ, &proxd_iovsize); + + if (p_proxd_iov == NULL) { + DHD_RTT_ERR(("%s : failed to allocate the iovar (size :%d)\n", + __FUNCTION__, FTM_IOC_BUFSZ)); + return BCME_NOMEM; + } + /* setup TLVs */ + bufsize = proxd_iovsize - WL_PROXD_IOV_HDR_SIZE; /* adjust available size for TLVs */ + p_tlv = &p_proxd_iov->tlvs[0]; + /* TLV buffer starts with a full size, will decrement for each packed TLV */ + buf_space_left = bufsize; + if (catagory == FTM_CONFIG_CAT_OPTIONS) { + ret = rtt_handle_config_options(session_id, &p_tlv, &buf_space_left, + (ftm_config_options_info_t *)ftm_configs, ftm_cfg_cnt); + } else if (catagory == FTM_CONFIG_CAT_GENERAL) { + ret = rtt_handle_config_general(session_id, &p_tlv, &buf_space_left, + (ftm_config_param_info_t *)ftm_configs, ftm_cfg_cnt); + } + if (ret == BCME_OK) { + /* update the iov header, set len to include all TLVs + header */ + all_tlvsize = (bufsize - buf_space_left); + p_proxd_iov->len = htol16(all_tlvsize + WL_PROXD_IOV_HDR_SIZE); + ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, + all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : failed to set config err %d\n", __FUNCTION__, ret)); + } + } + /* clean up */ + MFREE(dhd->osh, p_proxd_iov, proxd_iovsize); + return ret; +} + +static int +dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version) +{ + int ret; + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = "ver"; + subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION; + subcmd_info.handler = NULL; + ret = dhd_rtt_common_get_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL); + *out_version = (ret == BCME_OK) ? subcmd_info.version : 0; + return ret; +} +#endif /* WL_CFG80211 */ + +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info channel) +{ + int bw; + chanspec_t chanspec = 0; + uint8 center_chan; + uint8 primary_chan; + /* set witdh to 20MHZ for 2.4G HZ */ + if (channel.center_freq >= 2400 && channel.center_freq <= 2500) { + channel.width = WIFI_CHAN_WIDTH_20; + } + switch (channel.width) { + case WIFI_CHAN_WIDTH_20: + bw = WL_CHANSPEC_BW_20; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + chanspec = wf_channel2chspec(primary_chan, bw); + break; + case WIFI_CHAN_WIDTH_40: + bw = WL_CHANSPEC_BW_40; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + chanspec = wf_channel2chspec(primary_chan, bw); + break; + case WIFI_CHAN_WIDTH_80: + bw = WL_CHANSPEC_BW_80; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + center_chan = wf_mhz2channel(channel.center_freq0, 0); + chanspec = wf_chspec_80(center_chan, primary_chan); + break; + default: + DHD_RTT_ERR(("doesn't support this bandwith : %d", channel.width)); + bw = -1; + break; + } + return chanspec; +} + +int +dhd_rtt_idx_to_burst_duration(uint idx) +{ + if (idx >= ARRAY_SIZE(burst_duration_idx)) { + return -1; + } + return burst_duration_idx[idx]; +} + +int8 +dhd_rtt_get_cur_target_idx(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + return rtt_status->cur_idx; +} + +int8 +dhd_rtt_set_next_target_idx(dhd_pub_t *dhd, int start_idx) +{ + int idx; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + for (idx = start_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + + if (idx == rtt_status->rtt_config.rtt_target_cnt) { + /* All targets trigerred */ + rtt_status->cur_idx = rtt_status->rtt_config.rtt_target_cnt; + } + + return (int8)rtt_status->cur_idx; +} + +void +dhd_rtt_set_target_list_mode(dhd_pub_t *dhd) +{ + int8 idx; + bool legacy = FALSE, nan = FALSE; + rtt_target_info_t *rtt_target; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_INVALID; + for (idx = rtt_status->start_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + rtt_target = &rtt_status->rtt_config.target_info[idx]; + /* skip the disabled device */ + if (rtt_target->disable) { + continue; + } else { + if (rtt_target->peer == RTT_PEER_NAN) { + nan = TRUE; + } else { + legacy = TRUE; + } + } + } + + if ((nan == TRUE) && (legacy == TRUE)) { + rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_MIX; + } else if (nan == TRUE) { + rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_NAN; + } else if (legacy == TRUE) { + rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_LEGACY; + } +} + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status = NULL; + struct net_device *dev = NULL; + + NULL_CHECK(params, "params is NULL", err); + NULL_CHECK(dhd, "dhd is NULL", err); + + dev = dhd_linux_get_primary_netdev(dhd); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(dev, "dev is NULL", err); + + DHD_RTT(("%s enter\n", __FUNCTION__)); + + if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) { + DHD_RTT_ERR(("doesn't support RTT \n")); + err = BCME_ERROR; + goto exit; + } + + /* check if work is being scheduled ...cancel/sync if so + * Host REQ has more priority , so we to have cancel any + * geofence sessions in progress...for that we need to make sure + * work queue is IDLE & then cancel the geofence sessions + */ + cancel_work_sync(&rtt_status->work); + + mutex_lock(&rtt_status->rtt_mutex); + + if (rtt_status->status != RTT_STOPPED) { + DHD_RTT_ERR(("rtt is already started, status : %d\n", rtt_status->status)); + err = BCME_BUSY; + goto exit; + } + if (params->rtt_target_cnt > 0) { +#ifdef WL_NAN + /* cancel ongoing geofence RTT both initiators and responders */ + wl_cfgnan_suspend_all_geofence_rng_sessions(dev, + RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG, 0); +#endif /* WL_NAN */ + } else { + err = BCME_BADARG; + goto exit; + } + + memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt; + memcpy(rtt_status->rtt_config.target_info, + params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt)); + rtt_status->status = RTT_STARTED; + DHD_RTT_MEM(("dhd_rtt_set_cfg: RTT Started, target_cnt = %d\n", params->rtt_target_cnt)); + + /* This is the starting Directed RTT index */ + rtt_status->start_idx = dhd_rtt_set_next_target_idx(dhd, 0); + + dhd_rtt_set_target_list_mode(dhd); + + if (rtt_status->cur_idx < rtt_status->rtt_config.rtt_target_cnt) { +#ifdef WL_NAN + if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_NAN) { + /* reset directed cfg params */ + dhd_rtt_nan_update_directed_setup_inprog(dhd, NULL, FALSE); + rtt_status->directed_cfg.directed_sessions_cnt = 0; + + /* + * schedule proxd timeout + * Proxd timeout for NAN target list is scheduled as a whole, + * and not per target, unlike for legacy target list + */ + schedule_delayed_work(&rtt_status->proxd_timeout, + msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS)); + } +#endif /* WL_NAN */ + /* schedule RTT */ + dhd_rtt_schedule_rtt_work_thread(dhd, RTT_SCHED_HOST_TRIGGER); + } +exit: + mutex_unlock(&rtt_status->rtt_mutex); + return err; +} + +#ifdef WL_NAN +void +dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; + } + + GEOFENCE_RTT_LOCK(rtt_status); + memset_s(&rtt_status->geofence_cfg, sizeof(rtt_status->geofence_cfg), + 0, sizeof(rtt_status->geofence_cfg)); + + /* initialize non zero params of geofence cfg */ + rtt_status->geofence_cfg.cur_target_idx = DHD_RTT_INVALID_TARGET_INDEX; + rtt_status->geofence_cfg.geofence_rtt_interval = DHD_RTT_RETRY_TIMER_INTERVAL_MS; + rtt_status->geofence_cfg.geofence_sessions_cnt = 0; + + rtt_status->geofence_cfg.max_geofence_sessions = + dhd_rtt_get_max_nan_rtt_sessions_supported(dhd); + GEOFENCE_RTT_UNLOCK(rtt_status); + return; +} + +#ifdef RTT_GEOFENCE_CONT +void +dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; + } + GEOFENCE_RTT_LOCK(rtt_status); + *geofence_cont = rtt_status->geofence_cfg.geofence_cont; + GEOFENCE_RTT_UNLOCK(rtt_status); +} + +void +dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; + } + GEOFENCE_RTT_LOCK(rtt_status); + rtt_status->geofence_cfg.geofence_cont = geofence_cont; + DHD_RTT(("dhd_rtt_set_geofence_cont_override, geofence_cont = %d\n", + rtt_status->geofence_cfg.geofence_cont)); + GEOFENCE_RTT_UNLOCK(rtt_status); +} +#endif /* RTT_GEOFENCE_CONT */ + +#ifdef RTT_GEOFENCE_INTERVAL +void +dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; + } + GEOFENCE_RTT_LOCK(rtt_status); + rtt_status->geofence_cfg.geofence_rtt_interval = interval; + DHD_RTT(("dhd_rtt_set_geofence_rtt_interval: geofence interval = %d\n", + rtt_status->geofence_cfg.geofence_rtt_interval)); + GEOFENCE_RTT_UNLOCK(rtt_status); +} +#endif /* RTT_GEOFENCE_INTERVAL */ + +int +dhd_rtt_get_geofence_max_sessions(dhd_pub_t *dhd) +{ + int max_sessions; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg; + + max_sessions = geofence_cfg->max_geofence_sessions; + if (dhd_rtt_is_geofence_setup_inprog(dhd)) { + /* One slot busy with setup in prog */ + max_sessions -= 1; + } + + return max_sessions; +} + +/* + * Return True, if geofence + * session count maxed out + */ +bool +dhd_rtt_geofence_sessions_maxed_out(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg; + bool ret = TRUE; + + if (geofence_cfg->geofence_sessions_cnt < + dhd_rtt_get_geofence_max_sessions(dhd)) { + ret = FALSE; + } + + return ret; +} + +int +dhd_rtt_get_geofence_sessions_cnt(dhd_pub_t *dhd) +{ + + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg; + + return geofence_cfg->geofence_sessions_cnt; +} + +int +dhd_rtt_update_geofence_sessions_cnt(dhd_pub_t *dhd, bool incr, + struct ether_addr *peer_addr) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg; + int ret = BCME_OK; + + if (incr) { + //ASSERT(!dhd_rtt_geofence_sessions_maxed_out(dhd)); + if (dhd_rtt_geofence_sessions_maxed_out(dhd)) { + ret = BCME_ERROR; + goto exit; + } + geofence_cfg->geofence_sessions_cnt++; + } else { + if (peer_addr && dhd_rtt_is_geofence_setup_inprog_with_peer(dhd, + peer_addr)) { + /* Set geofence RTT in progress state to false */ + dhd_rtt_set_geofence_setup_status(dhd, FALSE, NULL); + } else { + //ASSERT(geofence_cfg->geofence_sessions_cnt > 0); + if (geofence_cfg->geofence_sessions_cnt <= 0) { + ret = BCME_ERROR; + goto exit; + } + /* Decrement session count */ + geofence_cfg->geofence_sessions_cnt--; + } + } + if (peer_addr) { + WL_INFORM_MEM(("session cnt update, upd = %d, cnt = %d, peer : "MACDBG", " + " ret = %d\n", incr, geofence_cfg->geofence_sessions_cnt, + MAC2STRDBG(peer_addr), ret)); + } else { + WL_INFORM_MEM(("session cnt update, upd = %d, cnt = %d, ret = %d\n", + incr, geofence_cfg->geofence_sessions_cnt, ret)); + } + +exit: + return ret; +} + +int8 +dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return 0; + } + return rtt_status->geofence_cfg.geofence_target_cnt; +} + +/* returns geofence RTT target list Head */ +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_target_info_t* head = NULL; + + if (!rtt_status) { + return NULL; + } + + if (rtt_status->geofence_cfg.geofence_target_cnt) { + head = &rtt_status->geofence_cfg.geofence_target_info[0]; + } + + return head; +} + +int8 +dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd) +{ + int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + if (!rtt_status) { + goto exit; + } + + target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + if (target_cnt == 0) { + goto exit; + } + + cur_idx = rtt_status->geofence_cfg.cur_target_idx; + if (cur_idx >= target_cnt) { + WL_INFORM_MEM(("dhd_rtt_get_geofence_cur_target_idx: " + "cur_index exceeded (>=) target_cnt, cur_idx = %d, " + "target_cnt = %d\n", cur_idx, target_cnt)); + ASSERT(cur_idx < target_cnt); + cur_idx = DHD_RTT_INVALID_TARGET_INDEX; + } + +exit: + return cur_idx; +} + +void +dhd_rtt_set_geofence_cur_target_idx(dhd_pub_t *dhd, int8 idx) +{ + int8 target_cnt = 0; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + ASSERT(idx < target_cnt); + rtt_status->geofence_cfg.cur_target_idx = idx; + return; +} + +void +dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + if (!rtt_status) { + return; + } + + if (rtt_status->geofence_cfg.geofence_target_cnt == 0) { + /* Invalidate current idx if no targets */ + rtt_status->geofence_cfg.cur_target_idx = + DHD_RTT_INVALID_TARGET_INDEX; + /* Cancel pending retry timer if any */ + if (delayed_work_pending(&rtt_status->rtt_retry_timer)) { + cancel_delayed_work(&rtt_status->rtt_retry_timer); + } + return; + } + rtt_status->geofence_cfg.cur_target_idx++; + + if (rtt_status->geofence_cfg.cur_target_idx >= + rtt_status->geofence_cfg.geofence_target_cnt) { + /* Reset once all targets done */ + rtt_status->geofence_cfg.cur_target_idx = 0; + } +} + +/* returns geofence current RTT target */ +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_target_info_t* cur_target = NULL; + int cur_idx = 0; + + if (!rtt_status) { + return NULL; + } + + cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd); + if (cur_idx >= 0) { + cur_target = &rtt_status->geofence_cfg.geofence_target_info[cur_idx]; + } + + return cur_target; +} + +/* returns geofence target from list for the peer */ +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, int8 *index) +{ + int8 i; + rtt_status_info_t *rtt_status; + int target_cnt; + rtt_geofence_target_info_t *geofence_target_info, *tgt = NULL; + + rtt_status = GET_RTTSTATE(dhd); + + if (!rtt_status) { + return NULL; + } + + target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + geofence_target_info = rtt_status->geofence_cfg.geofence_target_info; + + /* Loop through to find target */ + for (i = 0; i < target_cnt; i++) { + if (geofence_target_info[i].valid == FALSE) { + break; + } + if (!memcmp(peer_addr, &geofence_target_info[i].peer_addr, + ETHER_ADDR_LEN)) { + *index = i; + tgt = &geofence_target_info[i]; + } + } + if (!tgt) { + DHD_RTT(("dhd_rtt_get_geofence_target: Target not found in list," + " MAC ADDR: "MACDBG" \n", MAC2STRDBG(peer_addr))); + } + return tgt; +} + +/* add geofence target to the target list */ +int +dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_geofence_target_info_t *geofence_target_info; + int8 geofence_target_cnt, index; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + GEOFENCE_RTT_LOCK(rtt_status); + + /* Get the geofence_target via peer addr, index param is dumm here */ + geofence_target_info = dhd_rtt_get_geofence_target(dhd, &target->peer_addr, &index); + if (geofence_target_info) { + DHD_RTT(("Duplicate geofencing RTT add request dropped\n")); + err = BCME_OK; + goto exit; + } + + geofence_target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + if (geofence_target_cnt >= RTT_MAX_GEOFENCE_TARGET_CNT) { + DHD_RTT(("Queue full, Geofencing RTT add request dropped\n")); + err = BCME_NORESOURCE; + goto exit; + } + + /* Add Geofence RTT request and increment target count */ + geofence_target_info = rtt_status->geofence_cfg.geofence_target_info; + /* src and dest buffer len same, pointers of same DS statically allocated */ + (void)memcpy_s(&geofence_target_info[geofence_target_cnt], + sizeof(geofence_target_info[geofence_target_cnt]), target, + sizeof(*target)); + geofence_target_info[geofence_target_cnt].valid = TRUE; + rtt_status->geofence_cfg.geofence_target_cnt++; + if (rtt_status->geofence_cfg.geofence_target_cnt == 1) { + /* Adding first target */ + rtt_status->geofence_cfg.cur_target_idx = 0; + } + + WL_INFORM_MEM(("dhd_rtt_add_geofence_target: " MACDBG + ", cur_idx = %d, total cnt = %d\n", MAC2STRDBG(&target->peer_addr), + rtt_status->geofence_cfg.cur_target_idx, + rtt_status->geofence_cfg.geofence_target_cnt)); + +exit: + GEOFENCE_RTT_UNLOCK(rtt_status); + return err; +} + +/* removes geofence target from the target list */ +int +dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_geofence_target_info_t *geofence_target_info; + int8 geofence_target_cnt, j, index = 0; + struct net_device *dev; + struct bcm_cfg80211 *cfg; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + dev = dhd_linux_get_primary_netdev(dhd); + cfg = wl_get_cfg(dev); + + GEOFENCE_RTT_LOCK(rtt_status); + + geofence_target_cnt = dhd_rtt_get_geofence_target_cnt(dhd); + if (geofence_target_cnt == 0) { + DHD_RTT(("Queue Empty, Geofencing RTT remove request dropped\n")); + goto exit; + } + + /* Get the geofence_target via peer addr */ + geofence_target_info = dhd_rtt_get_geofence_target(dhd, peer_addr, &index); + if (geofence_target_info == NULL) { + DHD_RTT(("Geofencing RTT target not found, remove request dropped\n")); + err = BCME_NOTFOUND; + goto exit; + } + + /* left shift all the valid entries, as we dont keep holes in list */ + for (j = index; j < geofence_target_cnt; j++) { + /* + * src and dest buffer len same, pointers of same DS + * statically allocated + */ + if ((j + 1) < geofence_target_cnt) { + (void)memcpy_s(&geofence_target_info[j], sizeof(geofence_target_info[j]), + &geofence_target_info[j + 1], sizeof(geofence_target_info[j + 1])); + } else { + /* reset the last target info */ + bzero(&geofence_target_info[j], sizeof(rtt_geofence_target_info_t)); + } + } + + rtt_status->geofence_cfg.geofence_target_cnt--; + if (rtt_status->geofence_cfg.geofence_target_cnt == 0) { + rtt_status->geofence_cfg.cur_target_idx = + DHD_RTT_INVALID_TARGET_INDEX; + } else { + if (rtt_status->geofence_cfg.geofence_target_cnt == + rtt_status->geofence_cfg.cur_target_idx) { + /* + * Wrap to first (next here) target again, + * as the last target, got removed, + * which was the current target (idx) also + */ + rtt_status->geofence_cfg.cur_target_idx = 0; + } + wl_cfgnan_update_geofence_target_idx(cfg); + } + + WL_INFORM_MEM(("dhd_rtt_remove_geofence_target: " MACDBG + ", cur_idx = %d, target_cnt = %d\n", MAC2STRDBG(peer_addr), + rtt_status->geofence_cfg.cur_target_idx, + rtt_status->geofence_cfg.geofence_target_cnt)); + +exit: + GEOFENCE_RTT_UNLOCK(rtt_status); + return err; +} + +/* deletes/empty geofence target list */ +int +dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status; + + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + GEOFENCE_RTT_LOCK(rtt_status); + memset_s(&rtt_status->geofence_cfg, sizeof(rtt_geofence_cfg_t), + 0, sizeof(rtt_geofence_cfg_t)); + GEOFENCE_RTT_UNLOCK(rtt_status); + return err; +} + +rtt_geofence_setup_status_t* +dhd_rtt_get_geofence_setup_status(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg; + rtt_geofence_setup_status_t* rng_setup_status; + + rng_setup_status = &geofence_cfg->geofence_setup_status; + return rng_setup_status; +} + +bool +dhd_rtt_is_geofence_setup_inprog(dhd_pub_t *dhd) +{ + rtt_geofence_setup_status_t* rng_setup_status; + + rng_setup_status = dhd_rtt_get_geofence_setup_status(dhd); + + return rng_setup_status->geofence_setup_inprog; +} + +bool +dhd_rtt_is_geofence_setup_inprog_with_peer(dhd_pub_t *dhd, + struct ether_addr *peer_addr) +{ + rtt_geofence_setup_status_t* rng_setup_status; + struct nan_ranging_inst *rng_inst = NULL; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + bool ret = FALSE; + + rng_setup_status = dhd_rtt_get_geofence_setup_status(dhd); + if (rng_setup_status->geofence_setup_inprog == FALSE) { + goto exit; + } + rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr); + if (rng_inst && (rng_inst == rng_setup_status->rng_inst)) { + ret = TRUE; + } + +exit: + return ret; +} + +/* + * Call with inprog as true and corresponding + * rng_inst, to take setup lock, + * call with inprog as False and rng_inst as NULL + * to unlock setup lock + */ +void +dhd_rtt_set_geofence_setup_status(dhd_pub_t *dhd, bool inprog, + struct ether_addr *peer_addr) +{ + struct nan_ranging_inst *rng_inst = NULL; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + rtt_geofence_setup_status_t* rng_setup_status; + + rng_setup_status = dhd_rtt_get_geofence_setup_status(dhd); + rng_setup_status->geofence_setup_inprog = inprog; + if (inprog) { + ASSERT(peer_addr); + rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr); + ASSERT(rng_inst); + if (rng_inst) { + rng_setup_status->rng_inst = rng_inst; + } + } else { + rng_setup_status->rng_inst = NULL; + } +} + +int +dhd_rtt_sched_geofencing_target(dhd_pub_t *dhd) +{ + rtt_geofence_target_info_t *geofence_target_info; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + int ret = BCME_OK; + u8 rtt_invalid_reason = RTT_STATE_VALID; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg; + int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX; + + NAN_MUTEX_LOCK(); + + if (wl_cfgnan_is_enabled(cfg) == FALSE) { + ret = BCME_NOTENABLED; + goto done; + } + + DHD_RTT_ERR(("dhd_rtt_sched_geofencing_target: " + " sched_reason = %d, sessions cnt = %d, cur target idx = %d\n", + rtt_status->rtt_sched_reason, geofence_cfg->geofence_sessions_cnt, + rtt_status->geofence_cfg.cur_target_idx)); + + //ASSERT(!dhd_rtt_geofence_sessions_maxed_out(dhd)); + if (dhd_rtt_geofence_sessions_maxed_out(dhd)) { + ret = BCME_NORESOURCE; + goto done; + } + + target_cnt = dhd_rtt_get_geofence_target_cnt(dhd); + + if (target_cnt == 0) { + DHD_RTT_MEM(("dhd_rtt_sched_geofencing_target: " + "No geofence targets to schedule\n")); + goto done; + } + + cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd); + if (cur_idx == DHD_RTT_INVALID_TARGET_INDEX) { + /* + * This Can be valid scenario, as cur_idx might + * get invalidated, after rtt thread sched and + * thread actually executing + */ + DHD_RTT_MEM(("dhd_rtt_sched_geofencing_target: " + "cur idx is invalid, bail out\n")); + goto done; + } + + /* Get current geofencing target */ + geofence_target_info = dhd_rtt_get_geofence_current_target(dhd); + //ASSERT(geofence_target_info); + + /* call cfg API for trigerring geofencing RTT */ + if (geofence_target_info) { + /* check for dp/others concurrency */ + rtt_invalid_reason = dhd_rtt_invalid_states(dev, + &geofence_target_info->peer_addr); + if ((rtt_invalid_reason != RTT_STATE_VALID) || + wl_cfgnan_check_role_concurrency(cfg, + &geofence_target_info->peer_addr)) { + /* TODO: see if we can move to next target.. + * i.e, if invalid state is due to DP with same peer + */ + ret = BCME_BUSY; + DHD_RTT_ERR(("DRV State is not valid for RTT, " + "invalid_state = %d\n", rtt_invalid_reason)); + goto done; + } + + ret = wl_cfgnan_trigger_geofencing_ranging(dev, + &geofence_target_info->peer_addr); + if (ret == BCME_OK) { + dhd_rtt_set_geofence_setup_status(dhd, TRUE, + &geofence_target_info->peer_addr); + } + } else { + DHD_RTT(("No RTT target to schedule\n")); + ret = BCME_NOTFOUND; + } + +done: + NAN_MUTEX_UNLOCK(); + return ret; +} +#endif /* WL_NAN */ + +#ifdef WL_CFG80211 +#ifdef WL_NAN +static void +dhd_rtt_retry(dhd_pub_t *dhd) +{ + + /* Attempt RTT for current geofence target */ + wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd, + RTT_SCHED_RTT_RETRY_GEOFENCE); + +} +static void +dhd_rtt_retry_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status = NULL; + dhd_pub_t *dhd = NULL; + struct net_device *dev = NULL; + struct bcm_cfg80211 *cfg = NULL; + + if (!work) { + goto exit; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + rtt_status = container_of(work, rtt_status_info_t, rtt_retry_timer.work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__)); + goto exit; + } + dev = dhd_linux_get_primary_netdev(dhd); + cfg = wl_get_cfg(dev); + + NAN_MUTEX_LOCK(); + (void) dhd_rtt_retry(dhd); + NAN_MUTEX_UNLOCK(); + +exit: + return; +} +#endif /* WL_NAN */ + +/* + * Return zero (0) + * for valid RTT state + * means if RTT is applicable + */ +uint8 +dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr) +{ + uint8 invalid_reason = RTT_STATE_VALID; + struct bcm_cfg80211 *cfg = wl_get_cfg(ndev); + + UNUSED_PARAMETER(cfg); + UNUSED_PARAMETER(invalid_reason); + + /* Make sure peer addr is not NULL in caller */ + ASSERT(peer_addr); + /* + * Keep adding prohibited drv states here + * Only generic conditions which block + * All RTTs like NDP connection + */ + +#ifdef WL_NAN + if (wl_cfgnan_data_dp_exists_with_peer(cfg, peer_addr)) { + invalid_reason = RTT_STATE_INV_REASON_NDP_EXIST; + DHD_RTT(("NDP in progress/connected, RTT prohibited\n")); + goto exit; + } +#endif /* WL_NAN */ + + /* Remove below #defines once more exit calls come */ +#ifdef WL_NAN +exit: +#endif /* WL_NAN */ + return invalid_reason; +} +#endif /* WL_CFG80211 */ + +void +dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (rtt_status == NULL) { + ASSERT(0); + } else { + rtt_status->rtt_sched_reason = sched_reason; + rtt_status->rtt_sched = TRUE; + schedule_work(&rtt_status->work); + } + return; +} + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + int i = 0, j = 0; + rtt_status_info_t *rtt_status; + rtt_results_header_t *entry, *next; + rtt_result_t *rtt_result, *next2; + struct rtt_noti_callback *iter; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->status == RTT_STOPPED) { + DHD_RTT_ERR(("rtt is not started\n")); + return BCME_OK; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + mutex_lock(&rtt_status->rtt_mutex); + for (i = 0; i < mac_cnt; i++) { + for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { + if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, + ETHER_ADDR_LEN)) { + rtt_status->rtt_config.target_info[j].disable = TRUE; + } + } + } + if (rtt_status->all_cancel) { + /* cancel all of request */ + rtt_status->status = RTT_STOPPED; + DHD_RTT(("current RTT process is cancelled\n")); + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + MFREE(dhd->osh, rtt_result, + sizeof(rtt_result_t)); + } + MFREE(dhd->osh, entry, sizeof(rtt_results_header_t)); + } + GCC_DIAGNOSTIC_POP(); + } + /* send the rtt complete event to wake up the user process */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + GCC_DIAGNOSTIC_POP(); + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset(rtt_status->rtt_config.target_info, 0, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + /* Cancel pending proxd timeout work if any */ + if (delayed_work_pending(&rtt_status->proxd_timeout)) { + cancel_delayed_work(&rtt_status->proxd_timeout); + } + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); +#ifdef WL_NAN + dhd_rtt_delete_nan_session(dhd); +#endif /* WL_NAN */ + dhd_rtt_ftm_enable(dhd, FALSE); + } + mutex_unlock(&rtt_status->rtt_mutex); +#endif /* WL_CFG80211 */ + return err; +} + +#ifdef WL_CFG80211 +static void +dhd_rtt_timeout(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status; +#ifndef DHD_DUMP_ON_RTT_TIMEOUT + rtt_target_info_t *rtt_target = NULL; + rtt_target_info_t *rtt_target_info = NULL; +#ifdef WL_NAN + int8 idx; + nan_ranging_inst_t *ranging_inst = NULL; + int ret = BCME_OK; + uint32 status; + struct net_device *ndev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy); +#endif /* WL_NAN */ +#endif /* !DHD_DUMP_ON_RTT_TIMEOUT */ + + rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + DHD_RTT_ERR(("Proxd timer expired but no RTT status\n")); + goto exit; + } + + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT_ERR(("Proxd timer expired but no RTT Request\n")); + goto exit; + } + +#ifdef DHD_DUMP_ON_RTT_TIMEOUT + /* Dump, and Panic depending on memdump.info */ +#ifdef BCMDONGLEHOST + if (dhd_query_bus_erros(dhd)) { + goto exit; + } +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* Behave based on user memdump info */ + dhd->memdump_type = DUMP_TYPE_PROXD_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#endif /* BCMDONGLEHOST */ +#else /* DHD_DUMP_ON_RTT_TIMEOUT */ +#ifdef WL_NAN + if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_NAN) { + for (idx = rtt_status->start_idx; + idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + rtt_target = &rtt_status->rtt_config.target_info[idx]; + if ((!rtt_target->disable) && + (!dhd_rtt_get_report_header(rtt_status, + NULL, &rtt_target->addr))) { + if (wl_cfgnan_ranging_is_in_prog_for_peer(cfg, &rtt_target->addr)) { + ranging_inst = wl_cfgnan_check_for_ranging(cfg, + &rtt_target->addr); + ret = wl_cfgnan_cancel_ranging(ndev, cfg, + &ranging_inst->range_id, + NAN_RNG_TERM_FLAG_IMMEDIATE, &status); + if (unlikely(ret) || unlikely(status)) { + WL_ERR(("%s:nan range cancel failed ret = %d " + "status = %d\n", __FUNCTION__, + ret, status)); + } + } + dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr); + } + } + dhd_rtt_handle_rtt_session_end(dhd); + /* reset directed cfg params */ + rtt_status->directed_cfg.directed_setup_status.rng_inst = NULL; + rtt_status->directed_cfg.directed_setup_status.directed_na_setup_inprog = FALSE; + rtt_status->directed_cfg.directed_sessions_cnt = 0; + } else +#endif /* WL_NAN */ + { + /* Cancel RTT for target and proceed to next target */ + rtt_target_info = rtt_status->rtt_config.target_info; + if ((!rtt_target_info) || + (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt)) { + goto exit; + } + rtt_target = &rtt_target_info[rtt_status->cur_idx]; + WL_ERR(("Proxd timer expired for Target: "MACDBG" \n", + MAC2STRDBG(&rtt_target->addr))); + /* For Legacy RTT */ + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr); + dhd_rtt_handle_rtt_session_end(dhd); + } +#endif /* DHD_DUMP_ON_RTT_TIMEOUT */ +exit: + return; +} + +static void +dhd_rtt_timeout_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status = NULL; + dhd_pub_t *dhd = NULL; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + + mutex_lock(&rtt_status->rtt_mutex); + (void) dhd_rtt_timeout(dhd); + mutex_unlock(&rtt_status->rtt_mutex); +} + +static void +dhd_rtt_set_ftm_config_ratespec(ftm_config_param_info_t *ftm_params, + int *ftm_param_cnt, rtt_target_info_t *rtt_target) +{ + bool use_default = FALSE; + int nss; + int mcs; + uint32 rspec = 0; + + if (!(rtt_target->bw && rtt_target->preamble)) { + goto exit; + } + switch (rtt_target->preamble) { + case RTT_PREAMBLE_LEGACY: + rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */ + rspec |= WL_RATE_6M; + break; + case RTT_PREAMBLE_HT: + rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */ + mcs = 0; /* default MCS 0 */ + rspec |= mcs; + break; + case RTT_PREAMBLE_VHT: + rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + mcs = 0; /* default MCS 0 */ + nss = 1; /* default Nss = 1 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + break; + default: + DHD_RTT(("doesn't support this preamble : %d\n", + rtt_target->preamble)); + use_default = TRUE; + break; + } + switch (rtt_target->bw) { + case RTT_BW_20: + rspec |= WL_RSPEC_BW_20MHZ; + break; + case RTT_BW_40: + rspec |= WL_RSPEC_BW_40MHZ; + break; + case RTT_BW_80: + rspec |= WL_RSPEC_BW_80MHZ; + break; + default: + DHD_RTT(("doesn't support this BW : %d\n", + rtt_target->bw)); + use_default = TRUE; + break; + } + if (!use_default) { + ftm_params[*ftm_param_cnt].data32 = htol32(rspec); + ftm_params[*ftm_param_cnt].tlvid = + WL_PROXD_TLV_ID_RATESPEC; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t ratespec : %d\n", rspec)); + } + +exit: + return; + +} + +static void +dhd_rtt_set_ftm_config_param(ftm_config_param_info_t *ftm_params, + int *ftm_param_cnt, rtt_target_info_t *rtt_target, uint16 tlvid) +{ + char eabuf[ETHER_ADDR_STR_LEN]; + char chanbuf[CHANSPEC_STR_LEN]; + + switch (tlvid) { + case WL_PROXD_TLV_ID_CUR_ETHER_ADDR: + /* local mac address */ + if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) { + ftm_params[*ftm_param_cnt].mac_addr = rtt_target->local_addr; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR; + *ftm_param_cnt = *ftm_param_cnt + 1; + bcm_ether_ntoa(&rtt_target->local_addr, eabuf); + DHD_RTT((">\t local %s\n", eabuf)); + } + break; + case WL_PROXD_TLV_ID_PEER_MAC: + /* target's mac address */ + if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) { + ftm_params[*ftm_param_cnt].mac_addr = rtt_target->addr; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_PEER_MAC; + *ftm_param_cnt = *ftm_param_cnt + 1; + bcm_ether_ntoa(&rtt_target->addr, eabuf); + DHD_RTT((">\t target %s\n", eabuf)); + } + break; + case WL_PROXD_TLV_ID_CHANSPEC: + /* target's chanspec */ + if (rtt_target->chanspec) { + ftm_params[*ftm_param_cnt].chanspec = + htol32((uint32)rtt_target->chanspec); + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_CHANSPEC; + *ftm_param_cnt = *ftm_param_cnt + 1; + wf_chspec_ntoa(rtt_target->chanspec, chanbuf); + DHD_RTT((">\t chanspec : %s\n", chanbuf)); + } + break; + case WL_PROXD_TLV_ID_NUM_BURST: + /* num-burst */ + if (rtt_target->num_burst) { + ftm_params[*ftm_param_cnt].data16 = htol16(rtt_target->num_burst); + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_NUM_BURST; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst)); + } + break; + case WL_PROXD_TLV_ID_BURST_NUM_FTM: + /* number of frame per burst */ + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M; + if (CHSPEC_IS80(rtt_target->chanspec)) { + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M; + } else if (CHSPEC_IS40(rtt_target->chanspec)) { + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_40M; + } else if (CHSPEC_IS20(rtt_target->chanspec)) { + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_20M; + } + ftm_params[*ftm_param_cnt].data16 = + htol16(rtt_target->num_frames_per_burst); + ftm_params[*ftm_param_cnt].tlvid = + WL_PROXD_TLV_ID_BURST_NUM_FTM; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t number of frame per burst : %d\n", + rtt_target->num_frames_per_burst)); + break; + case WL_PROXD_TLV_ID_FTM_RETRIES: + /* FTM retry count */ + if (rtt_target->num_retries_per_ftm) { + ftm_params[*ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_FTM_RETRIES; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t retry count of FTM : %d\n", + rtt_target->num_retries_per_ftm)); + } + break; + case WL_PROXD_TLV_ID_FTM_REQ_RETRIES: + /* FTM Request retry count */ + if (rtt_target->num_retries_per_ftmr) { + ftm_params[*ftm_param_cnt].data8 = rtt_target->num_retries_per_ftmr; + ftm_params[*ftm_param_cnt].tlvid = + WL_PROXD_TLV_ID_FTM_REQ_RETRIES; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t retry count of FTM Req : %d\n", + rtt_target->num_retries_per_ftmr)); + } + break; + case WL_PROXD_TLV_ID_BURST_PERIOD: + /* burst-period */ + if (rtt_target->burst_period) { + ftm_params[*ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_period); /* ms */ + ftm_params[*ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period)); + } + break; + case WL_PROXD_TLV_ID_BURST_DURATION: + /* burst-duration */ + rtt_target->burst_duration = FTM_MAX_BURST_DUR_TMO_MS; + if (rtt_target->burst_duration) { + ftm_params[*ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_duration); /* ms */ + ftm_params[*ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_BURST_DURATION; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t burst duration : %d ms\n", + rtt_target->burst_duration)); + } + break; + case WL_PROXD_TLV_ID_BURST_TIMEOUT: + /* burst-timeout */ + rtt_target->burst_timeout = FTM_MAX_BURST_DUR_TMO_MS; + if (rtt_target->burst_timeout) { + ftm_params[*ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_timeout); /* ms */ + ftm_params[*ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_BURST_TIMEOUT; + *ftm_param_cnt = *ftm_param_cnt + 1; + DHD_RTT((">\t burst timeout : %d ms\n", + rtt_target->burst_timeout)); + } + break; + case WL_PROXD_TLV_ID_RATESPEC: + dhd_rtt_set_ftm_config_ratespec(ftm_params, + ftm_param_cnt, rtt_target); + break; + case WL_PROXD_TLV_ID_EVENT_MASK: + { + /* set burst end and session end in ev mask by def */ + uint32 event_mask = ((1 << WL_PROXD_EVENT_BURST_END) | + (1 << WL_PROXD_EVENT_SESSION_END)); + /* only burst end for directed nan-rtt target */ + if (rtt_target && (rtt_target->peer == RTT_PEER_NAN)) { + event_mask = (1 << WL_PROXD_EVENT_BURST_END); + } + ftm_params[*ftm_param_cnt].event_mask = event_mask; + ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_EVENT_MASK; + *ftm_param_cnt = *ftm_param_cnt + 1; + } + break; + default: + DHD_RTT_ERR(("Invalid FTM Param Config, tlvid = %d\n", tlvid)); + break; + } + + return; +} + +static int +dhd_rtt_start(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int err_at = 0; + int ftm_cfg_cnt = 0; + int ftm_param_cnt = 0; + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + rtt_target_info_t *rtt_target; + rtt_status_info_t *rtt_status; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + u8 ioctl_buf[WLC_IOCTL_SMLEN]; + u8 rtt_invalid_reason = RTT_STATE_VALID; + int rtt_sched_type = RTT_TYPE_INVALID; + + NULL_CHECK(dhd, "dhd is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + DHD_RTT(("Enter %s\n", __FUNCTION__)); + + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("No Directed RTT target to process, check for geofence\n")); + goto geofence; + } + + if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { + err = BCME_RANGE; + err_at = 1; + DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx)); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT_ERR(("STA is set as Target/Responder \n")); + err = BCME_ERROR; + } + goto exit; + } + + /* Get a target information */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + + DHD_RTT(("%s enter\n", __FUNCTION__)); + + if (ETHER_ISNULLADDR(rtt_target->addr.octet)) { + err = BCME_BADADDR; + err_at = 2; + DHD_RTT(("RTT Target addr is NULL\n")); + goto exit; + } + + /* check for dp/others concurrency */ + rtt_invalid_reason = dhd_rtt_invalid_states(dev, &rtt_target->addr); + if (rtt_invalid_reason != RTT_STATE_VALID) { + err = BCME_BUSY; + err_at = 3; + DHD_RTT(("DRV State is not valid for RTT\n")); + goto exit; + } + + /* enable ftm */ + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_RTT_ERR(("failed to enable FTM (%d)\n", err)); + err_at = 4; + goto exit; + } + rtt_status->status = RTT_ENABLED; + +#ifdef WL_NAN + if (rtt_target->peer == RTT_PEER_NAN) { + rtt_sched_type = RTT_TYPE_NAN_DIRECTED; + /* apply event mask */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_EVENT_MASK); + dhd_rtt_ftm_config(dhd, 0, FTM_CONFIG_CAT_GENERAL, + ftm_params, ftm_param_cnt); + /* Ignore return value..failure taken care inside the API */ + dhd_rtt_nan_start_session(dhd, rtt_target); + goto exit; + } +#endif /* WL_NAN */ + + /* delete session of index default sesession */ + err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + if (err < 0 && err != BCME_NOTFOUND) { + DHD_RTT_ERR(("failed to delete session of FTM (%d)\n", err)); + err_at = 5; + goto exit; + } + + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); + + /* configure the session 1 as initiator */ + if (ftm_cfg_cnt < FTM_MAX_CONFIGS) { + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = + WL_PROXD_SESSION_FLAG_INITIATOR | WL_PROXD_SESSION_FLAG_RANDMAC; + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); + } else { + DHD_RTT_ERR(("Max FTM Config Options exceeded\n")); + err = BCME_ERROR; + err_at = 6; + goto exit; + } + + memset(ioctl_buf, 0, WLC_IOCTL_SMLEN); + /* Rand Mac for newer version in place of cur_eth */ + if (dhd->wlc_ver_major < RTT_IOV_CUR_ETH_OBSOLETE) { + err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0, + ioctl_buf, WLC_IOCTL_SMLEN, NULL); + if (err) { + DHD_RTT_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err)); + err_at = 7; + goto exit; + } + memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN); + + /* local mac address */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_CUR_ETHER_ADDR); + } + /* target's mac address */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_PEER_MAC); + + /* target's chanspec */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_CHANSPEC); + + /* num-burst */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_NUM_BURST); + + /* number of frame per burst */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_BURST_NUM_FTM); + + /* FTM retry count */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_FTM_RETRIES); + + /* FTM Request retry count */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_FTM_REQ_RETRIES); + + /* burst-period */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_BURST_PERIOD); + + /* Setting both duration and timeout to MAX duration + * to handle the congestion environments. + * Hence ignoring the user config. + */ + /* burst-duration */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_BURST_DURATION); + + /* burst-timeout */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_BURST_TIMEOUT); + + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_RATESPEC); + + /* event_mask..applicable for only Legacy RTT. + * For nan-rtt config happens from firmware + */ + DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt, + rtt_target, WL_PROXD_TLV_ID_EVENT_MASK); + +#if !defined(WL_USE_RANDOMIZED_SCAN) + /* legacy rtt randmac */ + dhd_set_rand_mac_oui(dhd); +#endif /* !defined(WL_USE_RANDOMIZED_SCAN */ + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL, + ftm_params, ftm_param_cnt); + + rtt_sched_type = RTT_TYPE_LEGACY; + err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE); + if (err) { + DHD_RTT_ERR(("failed to start session of FTM : error %d\n", err)); + err_at = 8; + } else { + /* schedule proxd timeout */ + schedule_delayed_work(&rtt_status->proxd_timeout, + msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS)); + + } + + goto exit; +geofence: +#ifdef WL_NAN + /* sched geofencing rtt */ + rtt_sched_type = RTT_TYPE_NAN_GEOFENCE; + if ((err = dhd_rtt_sched_geofencing_target(dhd)) != BCME_OK) { + DHD_RTT_ERR(("geofencing sched failed, err = %d\n", err)); + err_at = 9; + } +#endif /* WL_NAN */ + +exit: + if (err) { + /* RTT Failed */ + DHD_RTT_ERR(("dhd_rtt_start: Failed & RTT_STOPPED, err = %d," + " err_at = %d, rtt_sched_type = %d, rtt_invalid_reason = %d\n" + " sched_reason = %d", + err, err_at, rtt_sched_type, rtt_invalid_reason, + rtt_status->rtt_sched_reason)); + rtt_status->status = RTT_STOPPED; + /* disable FTM */ + dhd_rtt_ftm_enable(dhd, FALSE); + } + rtt_status->rtt_sched = FALSE; + return err; +} +#endif /* WL_CFG80211 */ + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + GCC_DIAGNOSTIC_POP(); + if (iter->noti_fn == noti_fn) { + goto exit; + } + } + cb = (struct rtt_noti_callback *)MALLOCZ(dhd->osh, sizeof(struct rtt_noti_callback)); + if (!cb) { + err = -ENOMEM; + goto exit; + } + cb->noti_fn = noti_fn; + cb->ctx = ctx; + list_add(&cb->list, &rtt_status->noti_fn_list); +exit: + spin_unlock_bh(¬i_list_lock); + return err; +} + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + GCC_DIAGNOSTIC_POP(); + if (iter->noti_fn == noti_fn) { + cb = iter; + list_del(&cb->list); + break; + } + } + + spin_unlock_bh(¬i_list_lock); + if (cb) { + MFREE(dhd->osh, cb, sizeof(struct rtt_noti_callback)); + } + return err; +} + +static wifi_rate_v1 +dhd_rtt_convert_rate_to_host(uint32 rspec) +{ + wifi_rate_v1 host_rate; + uint32 bandwidth; + memset(&host_rate, 0, sizeof(wifi_rate_v1)); + if (RSPEC_ISLEGACY(rspec)) { + host_rate.preamble = 0; + } else if (RSPEC_ISHT(rspec)) { + host_rate.preamble = 2; + host_rate.rateMcsIdx = rspec & WL_RSPEC_RATE_MASK; + } else if (RSPEC_ISVHT(rspec)) { + host_rate.preamble = 3; + host_rate.rateMcsIdx = rspec & WL_RSPEC_VHT_MCS_MASK; + host_rate.nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + } + + bandwidth = RSPEC_BW(rspec); + switch (bandwidth) { + case WL_RSPEC_BW_20MHZ: + host_rate.bw = RTT_RATE_20M; + break; + case WL_RSPEC_BW_40MHZ: + host_rate.bw = RTT_RATE_40M; + break; + case WL_RSPEC_BW_80MHZ: + host_rate.bw = RTT_RATE_80M; + break; + case WL_RSPEC_BW_160MHZ: + host_rate.bw = RTT_RATE_160M; + break; + default: + host_rate.bw = RTT_RATE_20M; + break; + } + + host_rate.bitrate = rate_rspec2rate(rspec) / 100; /* 100kbps */ + DHD_RTT(("bit rate : %d\n", host_rate.bitrate)); + return host_rate; +} + +#define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"} +static int +dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data, + uint16 tlvid, uint16 len) +{ + int i; + int err = BCME_OK; + char eabuf[ETHER_ADDR_STR_LEN]; + wl_proxd_result_flags_t flags; + wl_proxd_session_state_t session_state; + wl_proxd_status_t proxd_status; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct osl_timespec ts; +#endif /* LINUX_VER >= 2.6.39 */ + uint32 ratespec; + uint32 avg_dist; + const wl_proxd_rtt_result_v1_t *p_data_info = NULL; + const wl_proxd_rtt_sample_v1_t *p_sample_avg = NULL; + const wl_proxd_rtt_sample_v1_t *p_sample = NULL; + wl_proxd_intvl_t rtt; + wl_proxd_intvl_t p_time; + uint16 num_rtt = 0, snr = 0, bitflips = 0; + wl_proxd_phy_error_t tof_phy_error = 0; + wl_proxd_phy_error_t tof_phy_tgt_error = 0; + wl_proxd_snr_t tof_target_snr = 0; + wl_proxd_bitflips_t tof_target_bitflips = 0; + int16 rssi = 0; + int32 dist = 0; + uint8 num_ftm = 0; + char *ftm_frame_types[] = FTM_FRAME_TYPES; + rtt_report_t *rtt_report = &(rtt_result->report); + + BCM_REFERENCE(ftm_frame_types); + BCM_REFERENCE(dist); + BCM_REFERENCE(rssi); + BCM_REFERENCE(tof_target_bitflips); + BCM_REFERENCE(tof_target_snr); + BCM_REFERENCE(tof_phy_tgt_error); + BCM_REFERENCE(tof_phy_error); + BCM_REFERENCE(bitflips); + BCM_REFERENCE(snr); + BCM_REFERENCE(session_state); + BCM_REFERENCE(ftm_session_state_value_to_logstr); + + NULL_CHECK(rtt_report, "rtt_report is NULL", err); + NULL_CHECK(p_data, "p_data is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + p_data_info = (const wl_proxd_rtt_result_v1_t *) p_data; + /* unpack and format 'flags' for display */ + flags = ltoh16_ua(&p_data_info->flags); + + /* session state and status */ + session_state = ltoh16_ua(&p_data_info->state); + proxd_status = ltoh32_ua(&p_data_info->status); + bcm_ether_ntoa((&(p_data_info->peer)), eabuf); + DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", + eabuf, + session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, + ftm_status_value_to_logstr(proxd_status))); + + /* show avg_dist (1/256m units), burst_num */ + avg_dist = ltoh32_ua(&p_data_info->avg_dist); + if (avg_dist == 0xffffffff) { /* report 'failure' case */ + DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt)); /* in a session */ + avg_dist = FTM_INVALID; + } + else { + DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n", + avg_dist >> 8, /* 1/256m units */ + ((avg_dist & 0xff) * 625) >> 4, + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt, + p_data_info->num_ftm)); /* in a session */ + } + /* show 'avg_rtt' sample */ + p_sample_avg = &p_data_info->avg_rtt; + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)); + DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n", + (int16) ltoh16_ua(&p_sample_avg->rssi), + ltoh32_ua(&p_sample_avg->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)), + ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, + ltoh32_ua(&p_sample_avg->ratespec))); + + /* set peer address */ + rtt_report->addr = p_data_info->peer; + /* burst num */ + rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); + /* success num */ + rtt_report->success_num = p_data_info->num_valid_rtt; + /* actual number of FTM supported by peer */ + rtt_report->num_per_burst_peer = p_data_info->num_ftm; + rtt_report->negotiated_burst_num = p_data_info->num_ftm; + /* status */ + rtt_report->status = ftm_get_statusmap_info(proxd_status, + &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + + /* rssi (0.5db) */ + rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2; + + /* rx rate */ + ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec); + rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); + /* tx rate */ + if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); + } else { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + } + /* rtt_sd */ + rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu); + rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl); + rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ + rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ + DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); + DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); + + /* average distance */ + if (avg_dist != FTM_INVALID) { + rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ + rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; + } else { + rtt_report->distance = FTM_INVALID; + } + /* time stamp */ + /* get the time elapsed from boot time */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + osl_get_monotonic_boottime(&ts); + rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); +#endif /* LINUX_VER >= 2.6.39 */ + + if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { + /* retry time after failure */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ + DHD_RTT((">\tretry_after: %d%s\n", + ltoh32_ua(&p_data_info->u.retry_after.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + } else { + /* burst duration */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ + DHD_RTT((">\tburst_duration: %d%s\n", + ltoh32_ua(&p_data_info->u.burst_duration.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); + DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); + } + + /* display detail if available */ + num_rtt = ltoh16_ua(&p_data_info->num_rtt); + if (num_rtt > 0) { + DHD_RTT((">\tnum rtt: %d samples\n", num_rtt)); + p_sample = &p_data_info->rtt[0]; + for (i = 0; i < num_rtt; i++) { + snr = 0; + bitflips = 0; + tof_phy_error = 0; + tof_phy_tgt_error = 0; + tof_target_snr = 0; + tof_target_bitflips = 0; + rssi = 0; + dist = 0; + num_ftm = p_data_info->num_ftm; + /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */ + if ((i % num_ftm) == 1) { + rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi); + snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr); + bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips); + tof_phy_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_phy_error); + tof_phy_tgt_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_tgt_phy_error); + tof_target_snr = + (wl_proxd_snr_t) + ltoh16_ua(&p_sample->tof_tgt_snr); + tof_target_bitflips = + (wl_proxd_bitflips_t) + ltoh16_ua(&p_sample->tof_tgt_bitflips); + dist = ltoh32_ua(&p_sample->distance); + } else { + rssi = -1; + snr = 0; + bitflips = 0; + dist = 0; + tof_target_bitflips = 0; + tof_target_snr = 0; + tof_phy_tgt_error = 0; + } + DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" + " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" + " target_bitflips=%d dist=%d rtt=%d%s status %s" + " Type %s coreid=%d\n", + i, p_sample->id, rssi, snr, + bitflips, tof_phy_error, tof_phy_tgt_error, + tof_target_snr, + tof_target_bitflips, dist, + ltoh32_ua(&p_sample->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), + ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), + ftm_frame_types[i % num_ftm], p_sample->coreid)); + p_sample++; + } + } + return err; +} + +static int +dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data, + uint16 tlvid, uint16 len) +{ + int i; + int err = BCME_OK; + char eabuf[ETHER_ADDR_STR_LEN]; + wl_proxd_result_flags_t flags; + wl_proxd_session_state_t session_state; + wl_proxd_status_t proxd_status; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct osl_timespec ts; +#endif /* LINUX_VER >= 2.6.39 */ + uint32 ratespec; + uint32 avg_dist; + const wl_proxd_rtt_result_v2_t *p_data_info = NULL; + const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL; + const wl_proxd_rtt_sample_v2_t *p_sample = NULL; + uint16 num_rtt = 0; + wl_proxd_intvl_t rtt; + wl_proxd_intvl_t p_time; + uint16 snr = 0, bitflips = 0; + wl_proxd_phy_error_t tof_phy_error = 0; + wl_proxd_phy_error_t tof_phy_tgt_error = 0; + wl_proxd_snr_t tof_target_snr = 0; + wl_proxd_bitflips_t tof_target_bitflips = 0; + int16 rssi = 0; + int32 dist = 0; + uint32 chanspec = 0; + uint8 num_ftm = 0; + char *ftm_frame_types[] = FTM_FRAME_TYPES; + rtt_report_t *rtt_report = &(rtt_result->report); + + BCM_REFERENCE(ftm_frame_types); + BCM_REFERENCE(dist); + BCM_REFERENCE(rssi); + BCM_REFERENCE(tof_target_bitflips); + BCM_REFERENCE(tof_target_snr); + BCM_REFERENCE(tof_phy_tgt_error); + BCM_REFERENCE(tof_phy_error); + BCM_REFERENCE(bitflips); + BCM_REFERENCE(snr); + BCM_REFERENCE(chanspec); + BCM_REFERENCE(session_state); + BCM_REFERENCE(ftm_session_state_value_to_logstr); + + NULL_CHECK(rtt_report, "rtt_report is NULL", err); + NULL_CHECK(p_data, "p_data is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data; + /* unpack and format 'flags' for display */ + flags = ltoh16_ua(&p_data_info->flags); + /* session state and status */ + session_state = ltoh16_ua(&p_data_info->state); + proxd_status = ltoh32_ua(&p_data_info->status); + bcm_ether_ntoa((&(p_data_info->peer)), eabuf); + + if ((proxd_status != BCME_OK) || (p_data_info->num_meas == 0)) { + DHD_RTT_ERR((">\tTarget(%s) session state=%d(%s), status=%d(%s) " + "num_meas_ota %d num_valid_rtt %d result_flags %x\n", + eabuf, session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, ftm_status_value_to_logstr(proxd_status), + p_data_info->num_meas, p_data_info->num_valid_rtt, + p_data_info->flags)); + } else { + DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", + eabuf, session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, ftm_status_value_to_logstr(proxd_status))); + } + /* show avg_dist (1/256m units), burst_num */ + avg_dist = ltoh32_ua(&p_data_info->avg_dist); + if (avg_dist == 0xffffffff) { /* report 'failure' case */ + DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt)); /* in a session */ + avg_dist = FTM_INVALID; + } else { + DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d " + "num_meas_ota=%d, result_flags=%x\n", avg_dist >> 8, /* 1/256m units */ + ((avg_dist & 0xff) * 625) >> 4, + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt, + p_data_info->num_ftm, p_data_info->num_meas, + p_data_info->flags)); /* in a session */ + } + rtt_result->rtt_detail.num_ota_meas = p_data_info->num_meas; + rtt_result->rtt_detail.result_flags = p_data_info->flags; + /* show 'avg_rtt' sample */ + /* in v2, avg_rtt is the first element of the variable rtt[] */ + p_sample_avg = &p_data_info->rtt[0]; + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)); + DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d" + "ratespec=0x%08x chanspec=0x%08x\n", + (int16) ltoh16_ua(&p_sample_avg->rssi), + ltoh32_ua(&p_sample_avg->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)), + ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, + ltoh32_ua(&p_sample_avg->ratespec), + ltoh32_ua(&p_sample_avg->chanspec))); + + /* set peer address */ + rtt_report->addr = p_data_info->peer; + + /* burst num */ + rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); + + /* success num */ + rtt_report->success_num = p_data_info->num_valid_rtt; + + /* num-ftm configured */ + rtt_report->ftm_num = p_data_info->num_ftm; + + /* actual number of FTM supported by peer */ + rtt_report->num_per_burst_peer = p_data_info->num_ftm; + rtt_report->negotiated_burst_num = p_data_info->num_ftm; + + /* status */ + rtt_report->status = ftm_get_statusmap_info(proxd_status, + &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + + /* Framework expects status as SUCCESS else all results will be + * set to zero even if we have partial valid result. + * So setting status as SUCCESS if we have a valid_rtt + * On burst timeout we stop burst with "timeout" reason and + * on msch end we set status as "cancel" + */ + if ((proxd_status == WL_PROXD_E_TIMEOUT || + proxd_status == WL_PROXD_E_CANCELED) && + rtt_report->success_num) { + rtt_report->status = RTT_STATUS_SUCCESS; + } + + /* rssi (0.5db) */ + rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2; + + /* rx rate */ + ratespec = ltoh32_ua(&p_sample_avg->ratespec); + rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); + + /* tx rate */ + if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); + } else { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + } + + /* rtt_sd */ + rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu); + rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl); + rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ + rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ + DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); + DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); + + /* average distance */ + if (avg_dist != FTM_INVALID) { + rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ + rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; + /* rtt_sd is in 0.1 ns. + * host needs distance_sd in milli mtrs + * (0.1 * rtt_sd/2 * 10^-9) * C * 1000 + */ + rtt_report->distance_sd = rtt_report->rtt_sd * 15; /* mm */ + } else { + rtt_report->distance = FTM_INVALID; + } + /* time stamp */ + /* get the time elapsed from boot time */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + osl_get_monotonic_boottime(&ts); + rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); +#endif /* LINUX_VER >= 2.6.39 */ + + if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { + /* retry time after failure */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ + DHD_RTT((">\tretry_after: %d%s\n", + ltoh32_ua(&p_data_info->u.retry_after.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + } else { + /* burst duration */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ + DHD_RTT((">\tburst_duration: %d%s\n", + ltoh32_ua(&p_data_info->u.burst_duration.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); + DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); + } + /* display detail if available */ + num_rtt = ltoh16_ua(&p_data_info->num_rtt); + if (num_rtt > 0) { + DHD_RTT((">\tnum rtt: %d samples\n", num_rtt)); + p_sample = &p_data_info->rtt[1]; + for (i = 0; i < num_rtt; i++) { + snr = 0; + bitflips = 0; + tof_phy_error = 0; + tof_phy_tgt_error = 0; + tof_target_snr = 0; + tof_target_bitflips = 0; + rssi = 0; + dist = 0; + num_ftm = p_data_info->num_ftm; + /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */ + if ((i % num_ftm) == 1) { + rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi); + snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr); + bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips); + tof_phy_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_phy_error); + tof_phy_tgt_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_tgt_phy_error); + tof_target_snr = + (wl_proxd_snr_t) + ltoh16_ua(&p_sample->tof_tgt_snr); + tof_target_bitflips = + (wl_proxd_bitflips_t) + ltoh16_ua(&p_sample->tof_tgt_bitflips); + dist = ltoh32_ua(&p_sample->distance); + chanspec = ltoh32_ua(&p_sample->chanspec); + } else { + rssi = -1; + snr = 0; + bitflips = 0; + dist = 0; + tof_target_bitflips = 0; + tof_target_snr = 0; + tof_phy_tgt_error = 0; + } + DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" + " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" + " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s" + " coreid=%d chanspec=0x%08x\n", + i, p_sample->id, rssi, snr, + bitflips, tof_phy_error, tof_phy_tgt_error, + tof_target_snr, + tof_target_bitflips, dist, + ltoh32_ua(&p_sample->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), + ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), + ftm_frame_types[i % num_ftm], p_sample->coreid, + chanspec)); + p_sample++; + } + } + return err; +} +#ifdef WL_CFG80211 +/* Common API for handling Session End. +* This API will flush out the results for a peer MAC. +* +* @For legacy FTM session, this API will be called +* when legacy FTM_SESSION_END event is received. +* @For legacy Nan-RTT , this API will be called when +* we are cancelling the nan-ranging session or on +* nan-ranging-end event. +*/ + +static bool +dhd_rtt_all_directed_targets_done(dhd_pub_t *dhd) +{ + int8 idx; + bool done = TRUE; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + for (idx = rtt_status->start_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + if (!rtt_status->rtt_config.target_info[idx].disable) { + if (!dhd_rtt_get_report_header(rtt_status, + NULL, &rtt_status->rtt_config.target_info[idx].addr)) { + done = FALSE; + break; + } + } + } + return done; +} + +static void +dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd) +{ + + struct rtt_noti_callback *iter; + rtt_results_header_t *entry, *next; + rtt_result_t *next2; + rtt_result_t *rtt_result; + bool all_targets_done = FALSE; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); +#ifdef WL_NAN + struct net_device *ndev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy); +#endif /* WL_NAN */ + + /* check if all targets results received */ + all_targets_done = dhd_rtt_all_directed_targets_done(dhd); + if (all_targets_done) { + DHD_RTT_MEM(("RTT_STOPPED\n")); + rtt_status->status = RTT_STOPPED; + /* notify the completed information to others */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + MFREE(dhd->osh, rtt_result, + sizeof(rtt_result_t)); + } + MFREE(dhd->osh, entry, sizeof(rtt_results_header_t)); + } + } + GCC_DIAGNOSTIC_POP(); + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset_s(rtt_status->rtt_config.target_info, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), + 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + + /* Cancel pending proxd timeout work if any */ + if (delayed_work_pending(&rtt_status->proxd_timeout)) { + cancel_delayed_work(&rtt_status->proxd_timeout); + } +#ifdef WL_NAN + /* Reset for Geofence */ + wl_cfgnan_reset_geofence_ranging(cfg, NULL, + RTT_SCHED_RNG_RPT_DIRECTED, FALSE); +#endif /* WL_NAN */ + } else { + /* Targets still pending */ + if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_LEGACY) { + /* Pure legacy target list */ + + /* Cancel pending proxd timeout work if any */ + if (delayed_work_pending(&rtt_status->proxd_timeout)) { + cancel_delayed_work(&rtt_status->proxd_timeout); + } + + dhd_rtt_set_next_target_idx(dhd, (rtt_status->cur_idx + 1)); + if (rtt_status->cur_idx < rtt_status->rtt_config.rtt_target_cnt) { + /* restart to measure RTT from next device */ + DHD_INFO(("restart to measure rtt\n")); + rtt_status->rtt_sched = TRUE; + schedule_work(&rtt_status->work); + } + } +#ifdef WL_NAN + else if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_NAN) { + /* Pure NAN target list */ + dhd_rtt_trigger_pending_targets_on_session_end(dhd); + } +#endif /* WL_NAN */ + } +} +#endif /* WL_CFG80211 */ + +#ifdef WL_CFG80211 +static int +dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status, + struct ether_addr *addr) +{ + rtt_results_header_t *rtt_results_header = NULL; + rtt_target_info_t *rtt_target_info; + int ret = BCME_OK; + rtt_result_t *rtt_result; + + /* allocate new header for rtt_results */ + rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh, + sizeof(rtt_results_header_t)); + if (!rtt_results_header) { + ret = -ENOMEM; + goto exit; + } + rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + /* same src and dest len */ + (void)memcpy_s(&rtt_results_header->peer_mac, + ETHER_ADDR_LEN, addr, ETHER_ADDR_LEN); + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + + /* allocate rtt_results for new results */ + rtt_result = (rtt_result_t *)MALLOCZ(rtt_status->dhd->osh, + sizeof(rtt_result_t)); + if (!rtt_result) { + ret = -ENOMEM; + /* Free rtt result header */ + MFREE(rtt_status->dhd->osh, rtt_results_header, sizeof(rtt_results_header_t)); + goto exit; + } + /* fill out the results from the configuration param */ + rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP; + /* same src and dest len */ + (void)memcpy_s(&rtt_result->report.addr, ETHER_ADDR_LEN, + &rtt_target_info->addr, ETHER_ADDR_LEN); + rtt_result->report.distance = FTM_INVALID; + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len; +exit: + return ret; +} + +static bool +dhd_rtt_get_report_header(rtt_status_info_t *rtt_status, + rtt_results_header_t **rtt_results_header, struct ether_addr *addr) +{ + rtt_results_header_t *entry; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + /* find a rtt_report_header for this mac address */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + GCC_DIAGNOSTIC_POP(); + if (!memcmp(&entry->peer_mac, addr, ETHER_ADDR_LEN)) { + /* found a rtt_report_header for peer_mac in the list */ + if (rtt_results_header) { + *rtt_results_header = entry; + } + return TRUE; + } + } + return FALSE; +} + +#ifdef WL_NAN +int +dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, struct ether_addr *peer) +{ + bool is_new = TRUE; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + mutex_lock(&rtt_status->rtt_mutex); + is_new = !dhd_rtt_get_report_header(rtt_status, NULL, peer); + + if (is_new) { /* no FTM result..create failure result */ + dhd_rtt_create_failure_result(rtt_status, peer); + } + DHD_RTT_MEM(("RTT Session End for NAN peer "MACDBG"\n", MAC2STRDBG(peer))); + dhd_rtt_handle_rtt_session_end(dhd); + mutex_unlock(&rtt_status->rtt_mutex); + return BCME_OK; +} + +static bool +dhd_rtt_is_valid_measurement(rtt_result_t *rtt_result) +{ + bool ret = FALSE; + + if (rtt_result && (rtt_result->report.success_num != 0)) { + ret = TRUE; + } + return ret; +} + +static void +dhd_rtt_trigger_pending_targets_on_session_end(dhd_pub_t *dhd) +{ + if (!(dhd_rtt_nan_is_directed_setup_in_prog(dhd)) && + (!dhd_rtt_nan_all_directed_sessions_triggered(dhd)) && + (!dhd_rtt_nan_directed_sessions_allowed(dhd))) { + /* Trigger next target from here */ + dhd_rtt_set_next_target_idx(dhd, + (dhd_rtt_get_cur_target_idx(dhd) + 1)); + dhd_rtt_schedule_rtt_work_thread(dhd, RTT_SCHED_RNG_DIR_EXCESS_TARGET); + } +} +#endif /* WL_NAN */ +#endif /* WL_CFG80211 */ + +static int +dhd_rtt_parse_result_event(wl_proxd_event_t *proxd_ev_data, + int tlvs_len, rtt_result_t *rtt_result) +{ + int ret = BCME_OK; + + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) rtt_result, + (uint8 *)&proxd_ev_data->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } + /* fill out the results from the configuration param */ + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + rtt_result->detail_len = sizeof(rtt_result->rtt_detail); + +exit: + return ret; + +} + +static int +dhd_rtt_handle_directed_rtt_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr, + wl_proxd_event_t *proxd_ev_data, int tlvs_len, rtt_result_t *rtt_result, bool is_nan) +{ + int ret = BCME_OK; + +#ifdef WL_CFG80211 + int err_at = 0; + rtt_status_info_t *rtt_status; + bool is_new = TRUE; + rtt_results_header_t *rtt_results_header = NULL; +#endif /* WL_CFG80211 */ + +#ifdef WL_CFG80211 + rtt_status = GET_RTTSTATE(dhd); + is_new = !dhd_rtt_get_report_header(rtt_status, + &rtt_results_header, peer_addr); + + if (tlvs_len > 0) { + if (is_new) { + /* allocate new header for rtt_results */ + rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh, + sizeof(rtt_results_header_t)); + if (!rtt_results_header) { + ret = BCME_NORESOURCE; + err_at = 1; + goto exit; + } + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + /* same src and header len */ + (void)memcpy_s(&rtt_results_header->peer_mac, ETHER_ADDR_LEN, + peer_addr, ETHER_ADDR_LEN); + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + } +#endif /* WL_CFG80211 */ + + ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result); +#ifdef WL_CFG80211 + if (ret == BCME_OK) { + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len + + rtt_result->detail_len; + } else { + err_at = 2; + goto exit; + } + } else { + ret = BCME_ERROR; + err_at = 4; + goto exit; + } + +exit: + if (ret != BCME_OK) { + DHD_RTT_ERR(("dhd_rtt_handle_directed_rtt_burst_end: failed, " + " ret = %d, err_at = %d\n", ret, err_at)); + if (rtt_results_header) { + list_del(&rtt_results_header->list); + MFREE(dhd->osh, rtt_results_header, + sizeof(rtt_results_header_t)); + } + } +#endif /* WL_CFG80211 */ + return ret; +} + +#ifdef WL_NAN +static void +dhd_rtt_nan_range_report(struct bcm_cfg80211 *cfg, + rtt_result_t *rtt_result, bool is_geofence) +{ + wl_nan_ev_rng_rpt_ind_t range_res; + int rtt_status; + + UNUSED_PARAMETER(range_res); + + if (!rtt_result) + return; + + rtt_status = rtt_result->report.status; + bzero(&range_res, sizeof(range_res)); + range_res.dist_mm = rtt_result->report.distance; + /* same src and header len, ignoring ret val here */ + (void)memcpy_s(&range_res.peer_m_addr, ETHER_ADDR_LEN, + &rtt_result->report.addr, ETHER_ADDR_LEN); + wl_cfgnan_process_range_report(cfg, &range_res, rtt_status); + + return; +} + +static int +dhd_rtt_handle_nan_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr, + wl_proxd_event_t *proxd_ev_data, int tlvs_len) +{ + struct net_device *ndev = NULL; + struct bcm_cfg80211 *cfg = NULL; + nan_ranging_inst_t *rng_inst = NULL; + rtt_status_info_t *rtt_status = NULL; + rtt_result_t *rtt_result = NULL; + bool geofence_rtt = FALSE; + int ret = BCME_OK; + rtt_result_t nan_rtt_res; + uint8 ftm_retry_cnt = 0; + int burst_status = -1; + + ndev = dhd_linux_get_primary_netdev(dhd); + cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", ret); + NAN_MUTEX_LOCK(); + mutex_lock(&rtt_status->rtt_mutex); + + if ((wl_cfgnan_is_enabled(cfg) == FALSE) || + ETHER_ISNULLADDR(peer_addr)) { + DHD_RTT_ERR(("Received Burst End with NULL ether addr, " + "or nan disable, nan_enable = %d\n", wl_cfgnan_is_enabled(cfg))); + ret = BCME_UNSUPPORTED; + goto exit; + } + + rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr); + if (rng_inst) { + geofence_rtt = (rng_inst->range_type + == RTT_TYPE_NAN_GEOFENCE); + } else { + DHD_RTT_ERR(("Received Burst End without Ranging Instance\n")); + ret = BCME_ERROR; + goto exit; + } + + if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER) { + ret = BCME_OK; + goto exit; + } + + bzero(&nan_rtt_res, sizeof(nan_rtt_res)); + ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, &nan_rtt_res); + if (ret != BCME_OK) { + DHD_RTT_ERR(("Failed to parse RTT result %d\n", ret)); + goto exit; + } + + burst_status = nan_rtt_res.report.status; + if (nan_rtt_res.rtt_detail.num_ota_meas <= 1) { + /* Wait for some time(CRBs) for ftm protocol to go through */ + if (rng_inst->ftm_ssn_retry_count < NAN_RTT_FTM_SSN_RETRIES) { + rng_inst->ftm_ssn_retry_count++; + ftm_retry_cnt = rng_inst->ftm_ssn_retry_count; + ret = BCME_ERROR; + goto exit; + } + /* retries over...report the result as is to host */ + } + + BCM_REFERENCE(dhd_rtt_is_valid_measurement); + + if (geofence_rtt) { + rtt_result = &nan_rtt_res; + } else { + if (RTT_IS_STOPPED(rtt_status)) { + /* Ignore the Proxd event */ + DHD_RTT((" event handler rtt is stopped \n")); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT(("Device is target/Responder. Recv the event. \n")); + } else { + ret = BCME_UNSUPPORTED; + goto exit; + } + } + /* allocate rtt_results for new results */ + rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t)); + if (!rtt_result) { + ret = BCME_NORESOURCE; + goto exit; + } + ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, peer_addr, + proxd_ev_data, tlvs_len, rtt_result, TRUE); + if (ret != BCME_OK) { + goto exit; + } + + } + +exit: + mutex_unlock(&rtt_status->rtt_mutex); + if (ret == BCME_OK) { + /* Nothing to do for Responder */ + if (rng_inst->range_role == NAN_RANGING_ROLE_INITIATOR) { + dhd_rtt_nan_range_report(cfg, rtt_result, geofence_rtt); + } + } else { + DHD_RTT_ERR(("nan-rtt: Burst End handling failed err %d is_geofence %d " + "retry cnt %d burst status %d", ret, geofence_rtt, + ftm_retry_cnt, burst_status)); + if (rtt_result && !geofence_rtt) { + MFREE(dhd->osh, rtt_result, + sizeof(rtt_result_t)); + } + } + NAN_MUTEX_UNLOCK(); + return ret; +} +#endif /* WL_NAN */ + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int ret = BCME_OK; + int tlvs_len; + uint16 version; + wl_proxd_event_t *p_event; + wl_proxd_event_type_t event_type; + wl_proxd_ftm_session_status_t session_status; + const ftm_strmap_entry_t *p_loginfo; + rtt_result_t *rtt_result; +#ifdef WL_CFG80211 + rtt_status_info_t *rtt_status; + rtt_results_header_t *rtt_results_header = NULL; + bool is_new = TRUE; + rtt_target_info_t *target = NULL; +#endif /* WL_CFG80211 */ + + DHD_RTT(("Enter %s \n", __FUNCTION__)); + NULL_CHECK(dhd, "dhd is NULL", ret); + + if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) { + DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__, + ntoh32_ua((void *)&event->datalen))); + return -EINVAL; + } + event_type = ntoh32_ua((void *)&event->event_type); + if (event_type != WLC_E_PROXD) { + DHD_RTT_ERR((" failed event \n")); + return -EINVAL; + } + + if (!event_data) { + DHD_RTT_ERR(("%s: event_data:NULL\n", __FUNCTION__)); + return -EINVAL; + } + p_event = (wl_proxd_event_t *) event_data; + version = ltoh16(p_event->version); + if (version < WL_PROXD_API_VERSION) { + DHD_RTT_ERR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n", + version, WL_PROXD_API_VERSION)); + return ret; + } + + event_type = (wl_proxd_event_type_t) ltoh16(p_event->type); + + DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n", + p_event->type, ntoh16(p_event->type), ltoh16(p_event->type))); + p_loginfo = ftm_get_event_type_loginfo(event_type); + if (p_loginfo == NULL) { + DHD_RTT_ERR(("receive an invalid FTM event %d\n", event_type)); + ret = -EINVAL; + return ret; /* ignore this event */ + } + /* get TLVs len, skip over event header */ + if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) { + DHD_RTT_ERR(("invalid FTM event length:%d\n", ltoh16(p_event->len))); + ret = -EINVAL; + return ret; + } + tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs); + DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n", + p_loginfo->text, + version, + ltoh16(p_event->len), + ltoh16(p_event->method), + ltoh16(p_event->sid), + tlvs_len)); +#ifdef WL_CFG80211 +#ifdef WL_NAN + if ((event_type == WL_PROXD_EVENT_BURST_END) && + dhd_rtt_is_nan_peer(dhd, &event->addr)) { + DHD_RTT(("WL_PROXD_EVENT_BURST_END for NAN RTT\n")); + ret = dhd_rtt_handle_nan_burst_end(dhd, &event->addr, p_event, tlvs_len); + return ret; + } +#endif /* WL_NAN */ + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", ret); + mutex_lock(&rtt_status->rtt_mutex); + + if (RTT_IS_STOPPED(rtt_status)) { + /* Ignore the Proxd event */ + DHD_RTT((" event handler rtt is stopped \n")); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT(("Device is target/Responder. Recv the event. \n")); + } else { + ret = BCME_NOTREADY; + goto exit; + } + } + + /* check current target_mac and event_mac are matching */ + target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + if (memcmp(&target->addr, &event->addr, ETHER_ADDR_LEN)) { + DHD_RTT(("Ignore Proxd event for the unexpected peer "MACDBG + " expected peer "MACDBG"\n", MAC2STRDBG(&event->addr), + MAC2STRDBG(&target->addr))); + goto exit; + } + +#endif /* WL_CFG80211 */ + +#ifdef WL_CFG80211 + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + is_new = !dhd_rtt_get_report_header(rtt_status, + &rtt_results_header, &event->addr); + GCC_DIAGNOSTIC_POP(); +#endif /* WL_CFG80211 */ + switch (event_type) { + case WL_PROXD_EVENT_SESSION_CREATE: + DHD_RTT(("WL_PROXD_EVENT_SESSION_CREATE\n")); + break; + case WL_PROXD_EVENT_SESSION_START: + DHD_RTT(("WL_PROXD_EVENT_SESSION_START\n")); + break; + case WL_PROXD_EVENT_BURST_START: + DHD_RTT(("WL_PROXD_EVENT_BURST_START\n")); + break; + case WL_PROXD_EVENT_BURST_END: + DHD_RTT(("WL_PROXD_EVENT_BURST_END for Legacy RTT\n")); + /* allocate rtt_results for new legacy rtt results */ + rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t)); + if (!rtt_result) { + ret = -ENOMEM; + goto exit; + } + ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, &event->addr, + p_event, tlvs_len, rtt_result, FALSE); + if (rtt_result && +#ifdef WL_CFG80211 + (ret != BCME_OK) && +#endif /* WL_CFG80211 */ + TRUE) { + /* + * Free rtt_result irrespectively, for non-cfg, + * as it would not be needed any further + */ + MFREE(dhd->osh, rtt_result, + sizeof(rtt_result_t)); + goto exit; + } + break; + case WL_PROXD_EVENT_SESSION_END: + DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n")); +#ifdef WL_CFG80211 + if (!RTT_IS_ENABLED(rtt_status)) { + DHD_RTT(("Ignore the session end evt\n")); + goto exit; + } +#endif /* WL_CFG80211 */ + if (tlvs_len > 0) { + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) &session_status, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } + } +#ifdef WL_CFG80211 + /* In case of no result for the peer device, make fake result for error case */ + if (is_new) { + dhd_rtt_create_failure_result(rtt_status, &event->addr); + } + DHD_RTT_MEM(("RTT Session End for Legacy peer "MACDBG"\n", + MAC2STRDBG(&event->addr))); + dhd_rtt_handle_rtt_session_end(dhd); +#endif /* WL_CFG80211 */ + break; + case WL_PROXD_EVENT_SESSION_RESTART: + DHD_RTT(("WL_PROXD_EVENT_SESSION_RESTART\n")); + break; + case WL_PROXD_EVENT_BURST_RESCHED: + DHD_RTT(("WL_PROXD_EVENT_BURST_RESCHED\n")); + break; + case WL_PROXD_EVENT_SESSION_DESTROY: + DHD_RTT(("WL_PROXD_EVENT_SESSION_DESTROY\n")); + break; + case WL_PROXD_EVENT_FTM_FRAME: + DHD_RTT(("WL_PROXD_EVENT_FTM_FRAME\n")); + break; + case WL_PROXD_EVENT_DELAY: + DHD_RTT(("WL_PROXD_EVENT_DELAY\n")); + break; + case WL_PROXD_EVENT_VS_INITIATOR_RPT: + DHD_RTT(("WL_PROXD_EVENT_VS_INITIATOR_RPT\n ")); + break; + case WL_PROXD_EVENT_RANGING: + DHD_RTT(("WL_PROXD_EVENT_RANGING\n")); + break; + case WL_PROXD_EVENT_COLLECT: + DHD_RTT(("WL_PROXD_EVENT_COLLECT\n")); + if (tlvs_len > 0) { + void *buffer = NULL; + if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf(buffer, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); + MFREE(dhd->osh, buffer, tlvs_len); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n", + __FUNCTION__, event_type)); + goto exit; + } + } + break; + case WL_PROXD_EVENT_MF_STATS: + DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n")); + if (tlvs_len > 0) { + void *buffer = NULL; + if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf(buffer, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); + MFREE(dhd->osh, buffer, tlvs_len); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n", + __FUNCTION__, event_type)); + goto exit; + } + } + break; + + default: + DHD_RTT_ERR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type)); + break; + } +exit: +#ifdef WL_CFG80211 + mutex_unlock(&rtt_status->rtt_mutex); +#endif /* WL_CFG80211 */ + + return ret; +} + +#ifdef WL_CFG80211 +static void +dhd_rtt_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status; + dhd_pub_t *dhd; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + rtt_status = container_of(work, rtt_status_info_t, work); + GCC_DIAGNOSTIC_POP(); + + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + (void) dhd_rtt_start(dhd); +} +#endif /* WL_CFG80211 */ + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) +{ + rtt_status_info_t *rtt_status; + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(capa, "capa is NULL", err); + bzero(capa, sizeof(rtt_capabilities_t)); + + /* set rtt capabilities */ + if (rtt_status->rtt_capa.proto & RTT_CAP_ONE_WAY) + capa->rtt_one_sided_supported = 1; + if (rtt_status->rtt_capa.proto & RTT_CAP_FTM_WAY) + capa->rtt_ftm_supported = 1; + + if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCI) + capa->lci_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCR) + capa->lcr_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_PREAMBLE) + capa->preamble_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_BW) + capa->bw_support = 1; + + /* bit mask */ + capa->preamble_support = rtt_status->rtt_capa.preamble; + capa->bw_support = rtt_status->rtt_capa.bw; + + return err; +} + +#ifdef WL_CFG80211 +int +dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info) +{ + u32 chanspec = 0; + int err = BCME_OK; + chanspec_t c = 0; + u32 channel; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + + if ((err = wldev_iovar_getint(dev, "chanspec", + (s32 *)&chanspec)) == BCME_OK) { + c = (chanspec_t)dtoh32(chanspec); + c = wl_chspec_driver_to_host(c); + channel = wf_chspec_ctlchan(c); + DHD_RTT((" control channel is %d \n", channel)); + if (CHSPEC_IS20(c)) { + channel_info->width = WIFI_CHAN_WIDTH_20; + DHD_RTT((" band is 20 \n")); + } else if (CHSPEC_IS40(c)) { + channel_info->width = WIFI_CHAN_WIDTH_40; + DHD_RTT(("band is 40 \n")); + } else { + channel_info->width = WIFI_CHAN_WIDTH_80; + DHD_RTT(("band is 80 \n")); + } + if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) && + (channel <= CH_MAX_2G_CHANNEL)) { + channel_info->center_freq = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + } else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) { + channel_info->center_freq = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } + if ((channel_info->width == WIFI_CHAN_WIDTH_80) || + (channel_info->width == WIFI_CHAN_WIDTH_40)) { + channel = CHSPEC_CHANNEL(c); + channel_info->center_freq0 = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } + } else { + DHD_RTT_ERR(("Failed to get the chanspec \n")); + } + return err; +} + +int +dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info) +{ + int err = BCME_OK; + char chanbuf[CHANSPEC_STR_LEN]; + int pm = PM_OFF; + int ftm_cfg_cnt = 0; + chanspec_t chanspec; + wifi_channel_info channel; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + rtt_status_info_t *rtt_status; + + memset(&channel, 0, sizeof(channel)); + BCM_REFERENCE(chanbuf); + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("STA responder/Target. \n")); + } + DHD_RTT(("Enter %s \n", __FUNCTION__)); + if (!dhd_is_associated(dhd, 0, NULL)) { + if (channel_info) { + channel.width = channel_info->width; + channel.center_freq = channel_info->center_freq; + channel.center_freq0 = channel_info->center_freq; + } + else { + channel.width = WIFI_CHAN_WIDTH_80; + channel.center_freq = DEFAULT_FTM_FREQ; + channel.center_freq0 = DEFAULT_FTM_CNTR_FREQ0; + } + chanspec = dhd_rtt_convert_to_chspec(channel); + DHD_RTT(("chanspec/channel set as %s for rtt.\n", + wf_chspec_ntoa(chanspec, chanbuf))); + err = wldev_iovar_setint(dev, "chanspec", chanspec); + if (err) { + DHD_RTT_ERR(("Failed to set the chanspec \n")); + } + } + rtt_status->pm = PM_OFF; + err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); + DHD_RTT(("Current PM value read %d\n", rtt_status->pm)); + if (err) { + DHD_RTT_ERR(("Failed to get the PM value \n")); + } else { + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_RTT_ERR(("Failed to set the PM \n")); + rtt_status->pm_restore = FALSE; + } else { + rtt_status->pm_restore = TRUE; + } + } + if (!RTT_IS_ENABLED(rtt_status)) { + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_RTT_ERR(("Failed to enable FTM (%d)\n", err)); + goto exit; + } + DHD_RTT(("FTM enabled \n")); + } + rtt_status->status = RTT_ENABLED; + DHD_RTT(("Responder enabled \n")); + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_TARGET; + rtt_status->flags = WL_PROXD_SESSION_FLAG_TARGET; + DHD_RTT(("Set the device as responder \n")); + err = dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); +exit: + if (err) { + rtt_status->status = RTT_STOPPED; + DHD_RTT_ERR(("rtt is stopped %s \n", __FUNCTION__)); + dhd_rtt_ftm_enable(dhd, FALSE); + DHD_RTT(("restoring the PM value \n")); + if (rtt_status->pm_restore) { + pm = PM_FAST; + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_RTT_ERR(("Failed to restore PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + } + return err; +} + +int +dhd_rtt_cancel_responder(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + int pm = 0; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + DHD_RTT(("Enter %s \n", __FUNCTION__)); + err = dhd_rtt_ftm_enable(dhd, FALSE); + if (err) { + DHD_RTT_ERR(("failed to disable FTM (%d)\n", err)); + } + rtt_status->status = RTT_STOPPED; + if (rtt_status->pm_restore) { + pm = PM_FAST; + DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore)); + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_RTT_ERR(("Failed to restore PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + return err; +} + +#ifdef WL_NAN +static bool +dhd_rtt_parallel_nan_rtt_sessions_supported(dhd_pub_t *dhd) +{ + bool supported = FALSE; + + if ((dhd->wlc_ver_major > RTT_PARALLEL_SSNS_SUPPORTED_MAJ_VER) || + ((dhd->wlc_ver_major == RTT_PARALLEL_SSNS_SUPPORTED_MAJ_VER) && + (dhd->wlc_ver_minor >= RTT_PARALLEL_SSNS_SUPPORTED_MIN_VER))) { + supported = TRUE; + } + + return supported; +} + +int +dhd_rtt_get_max_nan_rtt_sessions_supported(dhd_pub_t *dhd) +{ + int max_sessions = 0; + + /* Older fw branches does not support parallel rtt sessions */ + if (dhd_rtt_parallel_nan_rtt_sessions_supported(dhd)) { + max_sessions = DHD_NAN_RTT_MAX_SESSIONS; + } else { + max_sessions = DHD_NAN_RTT_MAX_SESSIONS_LEGACY; + } + + return max_sessions; +} +#endif /* WL_NAN */ +#endif /* WL_CFG80211 */ + +/* + * DHD Attach Context + */ +int +dhd_rtt_attach(dhd_pub_t *dhd) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + rtt_status_info_t *rtt_status = NULL; + + dhd->rtt_supported = FALSE; + if (dhd->rtt_state) { + return err; + } + + dhd->rtt_state = (rtt_status_info_t *)MALLOCZ(dhd->osh, + sizeof(rtt_status_info_t)); + if (dhd->rtt_state == NULL) { + err = BCME_NOMEM; + DHD_RTT_ERR(("%s : failed to create rtt_state\n", __FUNCTION__)); + return err; + } + bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); + rtt_status = GET_RTTSTATE(dhd); + rtt_status->rtt_config.target_info = + (rtt_target_info_t *)MALLOCZ(dhd->osh, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + if (rtt_status->rtt_config.target_info == NULL) { + DHD_RTT_ERR(("%s failed to allocate the target info for %d\n", + __FUNCTION__, RTT_MAX_TARGET_CNT)); + err = BCME_NOMEM; + goto exit; + } + rtt_status->dhd = dhd; + mutex_init(&rtt_status->rtt_mutex); + mutex_init(&rtt_status->geofence_mutex); + INIT_LIST_HEAD(&rtt_status->noti_fn_list); + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + INIT_WORK(&rtt_status->work, dhd_rtt_work); + /* initialize proxd timer */ + INIT_DELAYED_WORK(&rtt_status->proxd_timeout, dhd_rtt_timeout_work); +#ifdef WL_NAN + /* initialize proxd retry timer */ + INIT_DELAYED_WORK(&rtt_status->rtt_retry_timer, dhd_rtt_retry_work); +#endif /* WL_NAN */ +exit: + if (err < 0) { + MFREE(dhd->osh, rtt_status->rtt_config.target_info, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t)); + } +#endif /* WL_CFG80211 */ + return err; + +} + +/* + * DHD Detach Context + */ +int +dhd_rtt_detach(dhd_pub_t *dhd) +{ + int err = BCME_OK; + +#ifdef WL_CFG80211 + rtt_status_info_t *rtt_status; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + err = dhd_rtt_deinit(dhd); + if (err != BCME_OK) { + DHD_RTT_ERR(("dhd_rtt_deinit failed while detaching" + " err = %d\n", err)); + goto exit; + } + +exit: + MFREE(dhd->osh, rtt_status->rtt_config.target_info, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t)); + +#endif /* WL_CFG80211 */ + + return err; +} + +/* + * If Up context + */ +int +dhd_rtt_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + int ret; + int32 version; + rtt_status_info_t *rtt_status; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + DHD_RTT_MEM(("dhd_rtt_init ENTRY\n")); + + ret = dhd_rtt_get_version(dhd, &version); + if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) { + DHD_RTT_ERR(("%s : FTM is supported\n", __FUNCTION__)); + dhd->rtt_supported = TRUE; + /* TODO : need to find a way to check rtt capability */ + /* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */ + rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY; + + /* indicate to set tx rate */ + rtt_status->rtt_capa.feature |= RTT_FEATURE_LCI; + rtt_status->rtt_capa.feature |= RTT_FEATURE_LCR; + rtt_status->rtt_capa.feature |= RTT_FEATURE_PREAMBLE; + rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_VHT; + rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_HT; + + /* indicate to set bandwith */ + rtt_status->rtt_capa.feature |= RTT_FEATURE_BW; + rtt_status->rtt_capa.bw |= RTT_BW_20; + rtt_status->rtt_capa.bw |= RTT_BW_40; + rtt_status->rtt_capa.bw |= RTT_BW_80; + } else { + if ((ret != BCME_OK) || (version == 0)) { + DHD_RTT_ERR(("%s : FTM is not supported\n", __FUNCTION__)); + } else { + DHD_RTT_ERR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n", + __FUNCTION__, WL_PROXD_API_VERSION, version)); + } + goto exit; + } + +#ifdef WL_NAN + rtt_status->max_nan_rtt_sessions = dhd_rtt_get_max_nan_rtt_sessions_supported(dhd); +#endif /* WL_NAN */ + /* cancel all of RTT request once we got the cancel request */ + rtt_status->all_cancel = TRUE; + +exit: + DHD_ERROR(("dhd_rtt_init EXIT, err = %d\n", err)); +#endif /* WL_CFG80211 */ + + return err; +} + +/* + * If Down context + */ +int +dhd_rtt_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + rtt_status_info_t *rtt_status; + rtt_results_header_t *rtt_header, *next; + rtt_result_t *rtt_result, *next2; + struct rtt_noti_callback *iter, *iter2; + rtt_target_info_t *rtt_target = NULL; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + DHD_RTT_MEM(("dhd_rtt_deinit: ENTER\n")); + +#ifdef WL_NAN + if (delayed_work_pending(&rtt_status->rtt_retry_timer)) { + cancel_delayed_work_sync(&rtt_status->rtt_retry_timer); + } +#endif /* WL_NAN */ + + if (work_pending(&rtt_status->work)) { + cancel_work_sync(&rtt_status->work); + rtt_status->rtt_sched = FALSE; + } + + if (delayed_work_pending(&rtt_status->proxd_timeout)) { + cancel_delayed_work_sync(&rtt_status->proxd_timeout); + } + + /* + * Cleanup attempt is required, + * if legacy RTT session is in progress + */ + if ((!RTT_IS_STOPPED(rtt_status)) && + rtt_status->rtt_config.rtt_target_cnt && + (rtt_status->cur_idx < rtt_status->rtt_config.rtt_target_cnt)) { + /* if dhd is started and there is a target cnt */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + if (rtt_target->peer == RTT_PEER_AP) { + DHD_RTT_MEM(("dhd_rtt_deinit: Deleting Default FTM Session\n")); + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + } + } + + rtt_status->status = RTT_STOPPED; + DHD_RTT(("rtt is stopped %s \n", __FUNCTION__)); + /* clear evt callback list */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + if (!list_empty(&rtt_status->noti_fn_list)) { + list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct rtt_noti_callback)); + } + } + /* remove the rtt results */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + list_for_each_entry_safe(rtt_header, next, &rtt_status->rtt_results_cache, list) { + list_del(&rtt_header->list); + list_for_each_entry_safe(rtt_result, next2, + &rtt_header->result_list, list) { + list_del(&rtt_result->list); + MFREE(dhd->osh, rtt_result, sizeof(rtt_result_t)); + } + MFREE(dhd->osh, rtt_header, sizeof(rtt_results_header_t)); + } + } + GCC_DIAGNOSTIC_POP(); + DHD_RTT_MEM(("dhd_rtt_deinit: EXIT, err = %d\n", err)); +#endif /* WL_CFG80211 */ + return err; +} diff --git a/bcmdhd.101.10.361.x/dhd_rtt.h b/bcmdhd.101.10.361.x/dhd_rtt.h new file mode 100755 index 0000000..dd38122 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_rtt.h @@ -0,0 +1,555 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef __DHD_RTT_H__ +#define __DHD_RTT_H__ + +#include +#include "wifi_stats.h" + +#define RTT_MAX_TARGET_CNT 50 +#define RTT_MAX_FRAME_CNT 25 +#define RTT_MAX_RETRY_CNT 10 +#define DEFAULT_FTM_CNT 6 +#define DEFAULT_RETRY_CNT 6 +#define DEFAULT_FTM_FREQ 5180 +#define DEFAULT_FTM_CNTR_FREQ0 5210 +#define RTT_MAX_GEOFENCE_TARGET_CNT 8 + +#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count) + +#define TARGET_TYPE(target) (target->type) + +#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED) +#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED) + +#define GEOFENCE_RTT_LOCK(rtt_status) mutex_lock(&(rtt_status)->geofence_mutex) +#define GEOFENCE_RTT_UNLOCK(rtt_status) mutex_unlock(&(rtt_status)->geofence_mutex) + +#ifndef BIT +#define BIT(x) (1 << (x)) +#endif + +/* DSSS, CCK and 802.11n rates in [500kbps] units */ +#define WL_MAXRATE 108 /* in 500kbps units */ +#define WL_RATE_1M 2 /* in 500kbps units */ +#define WL_RATE_2M 4 /* in 500kbps units */ +#define WL_RATE_5M5 11 /* in 500kbps units */ +#define WL_RATE_11M 22 /* in 500kbps units */ +#define WL_RATE_6M 12 /* in 500kbps units */ +#define WL_RATE_9M 18 /* in 500kbps units */ +#define WL_RATE_12M 24 /* in 500kbps units */ +#define WL_RATE_18M 36 /* in 500kbps units */ +#define WL_RATE_24M 48 /* in 500kbps units */ +#define WL_RATE_36M 72 /* in 500kbps units */ +#define WL_RATE_48M 96 /* in 500kbps units */ +#define WL_RATE_54M 108 /* in 500kbps units */ +#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) + +#ifdef WL_NAN +/* RTT Retry Timer Interval */ +/* Fix Me: Revert back once retry logic is back in place */ +#define DHD_RTT_RETRY_TIMER_INTERVAL_MS -1 +#endif /* WL_NAN */ + +#define DHD_RTT_INVALID_TARGET_INDEX -1 + +enum rtt_role { + RTT_INITIATOR = 0, + RTT_TARGET = 1 +}; +enum rtt_status { + RTT_STOPPED = 0, + RTT_STARTED = 1, + RTT_ENABLED = 2 +}; +typedef int64_t wifi_timestamp; /* In microseconds (us) */ +typedef int64_t wifi_timespan; +typedef int32 wifi_rssi_rtt; + +typedef enum { + RTT_INVALID, + RTT_ONE_WAY, + RTT_TWO_WAY, + RTT_AUTO +} rtt_type_t; + +/* RTT peer type */ +typedef enum { + RTT_PEER_AP = 0x1, + RTT_PEER_STA = 0x2, + RTT_PEER_P2P_GO = 0x3, + RTT_PEER_P2P_CLIENT = 0x4, + RTT_PEER_NAN = 0x5, + RTT_PEER_INVALID = 0x6 +} rtt_peer_type_t; + +/* Ranging status */ +typedef enum rtt_reason { + RTT_STATUS_SUCCESS = 0, + RTT_STATUS_FAILURE = 1, // general failure status + RTT_STATUS_FAIL_NO_RSP = 2, // target STA does not respond to request + RTT_STATUS_FAIL_REJECTED = 3, // request rejected. Applies to 2-sided RTT only + RTT_STATUS_FAIL_NOT_SCHEDULED_YET = 4, + RTT_STATUS_FAIL_TM_TIMEOUT = 5, // timing measurement times out + RTT_STATUS_FAIL_AP_ON_DIFF_CHANNEL = 6, // Target on different channel, cannot range + RTT_STATUS_FAIL_NO_CAPABILITY = 7, // ranging not supported + RTT_STATUS_ABORTED = 8, // request aborted for unknown reason + RTT_STATUS_FAIL_INVALID_TS = 9, // Invalid T1-T4 timestamp + RTT_STATUS_FAIL_PROTOCOL = 10, // 11mc protocol failed + RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled + RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request + RTT_STATUS_INVALID_REQ = 13, // bad request args + RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info + // cannot range with new params + RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15 +} rtt_reason_t; + +enum { + RTT_CAP_ONE_WAY = BIT(0), + /* IEEE802.11mc */ + RTT_CAP_FTM_WAY = BIT(1) +}; + +enum { + RTT_FEATURE_LCI = BIT(0), + RTT_FEATURE_LCR = BIT(1), + RTT_FEATURE_PREAMBLE = BIT(2), + RTT_FEATURE_BW = BIT(3) +}; + +enum { + RTT_PREAMBLE_LEGACY = BIT(0), + RTT_PREAMBLE_HT = BIT(1), + RTT_PREAMBLE_VHT = BIT(2) +}; + +enum { + RTT_BW_5 = BIT(0), + RTT_BW_10 = BIT(1), + RTT_BW_20 = BIT(2), + RTT_BW_40 = BIT(3), + RTT_BW_80 = BIT(4), + RTT_BW_160 = BIT(5) +}; + +enum rtt_rate_bw { + RTT_RATE_20M, + RTT_RATE_40M, + RTT_RATE_80M, + RTT_RATE_160M +}; + +typedef enum ranging_type { + RTT_TYPE_INVALID = 0, + RTT_TYPE_LEGACY = 1, + RTT_TYPE_NAN_DIRECTED = 2, + RTT_TYPE_NAN_GEOFENCE = 3 +} ranging_type_t; + +typedef enum ranging_target_list_mode { + RNG_TARGET_LIST_MODE_INVALID = 0, + RNG_TARGET_LIST_MODE_LEGACY = 1, + RNG_TARGET_LIST_MODE_NAN = 2, + RNG_TARGET_LIST_MODE_MIX = 3 +} ranging_target_list_mode_t; + +#define FTM_MAX_NUM_BURST_EXP 14 +#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY) +#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY) +#define HAS_RTT_CAP(cap) (HAS_ONEWAY_CAP(cap) || HAS_11MC_CAP(cap)) + +typedef struct rtt_target_info { + struct ether_addr addr; + struct ether_addr local_addr; + rtt_type_t type; /* rtt_type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info channel; /* channel information */ + chanspec_t chanspec; /* chanspec for channel */ + bool disable; /* disable for RTT measurement */ + /* + * Time interval between bursts (units: 100 ms). + * Applies to 1-sided and 2-sided RTT multi-burst requests. + * Range: 0-31, 0: no preference by initiator (2-sided RTT) + */ + uint32 burst_period; + /* + * Total number of RTT bursts to be executed. It will be + * specified in the same way as the parameter "Number of + * Burst Exponent" found in the FTM frame format. It + * applies to both: 1-sided RTT and 2-sided RTT. Valid + * values are 0 to 15 as defined in 802.11mc std. + * 0 means single shot + * The implication of this parameter on the maximum + * number of RTT results is the following: + * for 1-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst) + * for 2-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst - 1) + */ + uint16 num_burst; + /* + * num of frames per burst. + * Minimum value = 1, Maximum value = 31 + * For 2-sided this equals the number of FTM frames + * to be attempted in a single burst. This also + * equals the number of FTM frames that the + * initiator will request that the responder send + * in a single frame + */ + uint32 num_frames_per_burst; + /* + * num of frames in each RTT burst + * for single side, measurement result num = frame number + * for 2 side RTT, measurement result num = frame number - 1 + */ + uint32 num_retries_per_ftm; /* retry time for RTT measurment frame */ + /* following fields are only valid for 2 side RTT */ + uint32 num_retries_per_ftmr; + uint8 LCI_request; + uint8 LCR_request; + /* + * Applies to 1-sided and 2-sided RTT. Valid values will + * be 2-11 and 15 as specified by the 802.11mc std for + * the FTM parameter burst duration. In a multi-burst + * request, if responder overrides with larger value, + * the initiator will return failure. In a single-burst + * request if responder overrides with larger value, + * the initiator will sent TMR_STOP to terminate RTT + * at the end of the burst_duration it requested. + */ + uint32 burst_duration; + uint32 burst_timeout; + uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */ + uint8 bw; /* 5, 10, 20, 40, 80, 160 */ +} rtt_target_info_t; + +typedef struct rtt_goefence_target_info { + bool valid; + struct ether_addr peer_addr; +} rtt_geofence_target_info_t; + +typedef struct rtt_config_params { + int8 rtt_target_cnt; + uint8 target_list_mode; + rtt_target_info_t *target_info; +} rtt_config_params_t; + +typedef struct rtt_geofence_setup_status { + bool geofence_setup_inprog; /* Lock to serialize geofence setup */ + struct nan_ranging_inst *rng_inst; /* Locked for this ranging instance */ +} rtt_geofence_setup_status_t; + +typedef struct rtt_geofence_cfg { + int8 geofence_target_cnt; + int8 cur_target_idx; + rtt_geofence_target_info_t geofence_target_info[RTT_MAX_GEOFENCE_TARGET_CNT]; + int geofence_rtt_interval; + int max_geofence_sessions; /* Polled from FW via IOVAR Query */ + int geofence_sessions_cnt; /* No. of Geofence/Resp Sessions running currently */ + rtt_geofence_setup_status_t geofence_setup_status; +#ifdef RTT_GEOFENCE_CONT + bool geofence_cont; +#endif /* RTT_GEOFENCE_CONT */ +} rtt_geofence_cfg_t; + +typedef struct rtt_directed_setup_status { + bool directed_na_setup_inprog; /* Lock to serialize directed setup */ + struct nan_ranging_inst *rng_inst; /* Locked for this ranging instance */ +} rtt_directed_setup_status_t; + +typedef struct rtt_directed_cfg { + int directed_sessions_cnt; /* No. of Geofence/Resp Sessions running currently */ + rtt_directed_setup_status_t directed_setup_status; +} rtt_directed_cfg_t; + +/* + * Keep Adding more reasons + * going forward if needed + */ +enum rtt_schedule_reason { + RTT_SCHED_HOST_TRIGGER = 1, /* On host command for directed RTT */ + RTT_SCHED_SUB_MATCH = 2, /* on Sub Match for svc with range req */ + RTT_SCHED_DIR_TRIGGER_FAIL = 3, /* On failure of Directed RTT Trigger */ + RTT_SCHED_DP_END = 4, /* ON NDP End event from fw */ + RTT_SCHED_DP_REJECTED = 5, /* On receving reject dp event from fw */ + RTT_SCHED_RNG_RPT_DIRECTED = 6, /* On Ranging report for directed RTT */ + RTT_SCHED_RNG_TERM = 7, /* On Range Term Indicator */ + RTT_SHCED_HOST_DIRECTED_TERM = 8, /* On host terminating directed RTT sessions */ + RTT_SCHED_RNG_RPT_GEOFENCE = 9, /* On Ranging report for geofence RTT */ + RTT_SCHED_RTT_RETRY_GEOFENCE = 10, /* On Geofence Retry */ + RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE = 11, /* On Rng Term, while pending role change */ + RTT_SCHED_RNG_TERM_SUB_SVC_CANCEL = 12, /* Due rng canc attempt, on sub cancel */ + RTT_SCHED_RNG_TERM_SUB_SVC_UPD = 13, /* Due rng canc attempt, on sub update */ + RTT_SCHED_RNG_TERM_PUB_RNG_CLEAR = 14, /* Due rng canc attempt, on pub upd/timeout */ + RTT_SCHED_RNG_RESP_IND = 15, /* Due to rng resp ind */ + RTT_SCHED_RNG_DIR_EXCESS_TARGET = 16 /* On ssn end, if excess dir tgt pending */ +}; + +/* + * Keep Adding more invalid RTT states + * going forward if needed + */ +enum rtt_invalid_state { + RTT_STATE_VALID = 0, /* RTT state is valid */ + RTT_STATE_INV_REASON_NDP_EXIST = 1 /* RTT state invalid as ndp exists */ +}; + +typedef struct rtt_status_info { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 txchain; /* current device tx chain */ + int pm; /* to save current value of pm */ + int8 pm_restore; /* flag to reset the old value of pm */ + int8 cur_idx; /* current entry to do RTT */ + int8 start_idx; /* start index for RTT */ + bool all_cancel; /* cancel all request once we got the cancel requet */ + uint32 flags; /* indicate whether device is configured as initiator or target */ + struct capability { + int32 proto :8; + int32 feature :8; + int32 preamble :8; + int32 bw :8; + } rtt_capa; /* rtt capability */ + struct mutex rtt_mutex; + struct mutex geofence_mutex; + rtt_config_params_t rtt_config; + rtt_geofence_cfg_t geofence_cfg; + rtt_directed_cfg_t directed_cfg; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ + int rtt_sched_reason; /* rtt_schedule_reason: what scheduled RTT */ + struct delayed_work proxd_timeout; /* Proxd Timeout work */ + struct delayed_work rtt_retry_timer; /* Timer for retry RTT after all targets done */ + bool rtt_sched; /* TO serialize rtt thread */ + int max_nan_rtt_sessions; /* To be Polled from FW via IOVAR Query */ +} rtt_status_info_t; + +typedef struct rtt_report { + struct ether_addr addr; + unsigned int burst_num; /* # of burst inside a multi-burst request */ + unsigned int ftm_num; /* total RTT measurement frames attempted */ + unsigned int success_num; /* total successful RTT measurement frames */ + uint8 num_per_burst_peer; /* max number of FTM number per burst the peer support */ + rtt_reason_t status; /* raging status */ + /* in s, 11mc only, only for RTT_REASON_FAIL_BUSY_TRY_LATER, 1- 31s */ + uint8 retry_after_duration; + rtt_type_t type; /* rtt type */ + wifi_rssi_rtt rssi; /* average rssi in 0.5 dB steps e.g. 143 implies -71.5 dB */ + wifi_rssi_rtt rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ + /* + * 1-sided RTT: TX rate of RTT frame. + * 2-sided RTT: TX rate of initiator's Ack in response to FTM frame. + */ + wifi_rate_v1 tx_rate; + /* + * 1-sided RTT: TX rate of Ack from other side. + * 2-sided RTT: TX rate of FTM frame coming from responder. + */ + wifi_rate_v1 rx_rate; + wifi_timespan rtt; /* round trip time in 0.1 nanoseconds */ + wifi_timespan rtt_sd; /* rtt standard deviation in 0.1 nanoseconds */ + wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */ + int distance; /* distance in cm (optional) */ + int distance_sd; /* standard deviation in cm (optional) */ + int distance_spread; /* difference between max and min distance recorded (optional) */ + wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */ + int burst_duration; /* in ms, how long the FW time is to fininish one burst measurement */ + int negotiated_burst_num; /* Number of bursts allowed by the responder */ + bcm_tlv_t *LCI; /* LCI Report */ + bcm_tlv_t *LCR; /* Location Civic Report */ +} rtt_report_t; +#define RTT_REPORT_SIZE (sizeof(rtt_report_t)) + +/* rtt_results_header to maintain rtt result list per mac address */ +typedef struct rtt_results_header { + struct ether_addr peer_mac; + uint32 result_cnt; + uint32 result_tot_len; /* sum of report_len of rtt_result */ + struct list_head list; + struct list_head result_list; +} rtt_results_header_t; +struct rtt_result_detail { + uint8 num_ota_meas; + uint32 result_flags; +}; +/* rtt_result to link all of rtt_report */ +typedef struct rtt_result { + struct list_head list; + struct rtt_report report; + int32 report_len; /* total length of rtt_report */ + struct rtt_result_detail rtt_detail; + int32 detail_len; +} rtt_result_t; + +/* RTT Capabilities */ +typedef struct rtt_capabilities { + uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */ + uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */ + uint8 lci_support; /* location configuration information */ + uint8 lcr_support; /* Civic Location */ + uint8 preamble_support; /* bit mask indicate what preamble is supported */ + uint8 bw_support; /* bit mask indicate what BW is supported */ +} rtt_capabilities_t; + +/* RTT responder information */ +typedef struct wifi_rtt_responder { + wifi_channel_info channel; /* channel of responder */ + uint8 preamble; /* preamble supported by responder */ +} wifi_rtt_responder_t; + +typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data); +/* Linux wrapper to call common dhd_rtt_set_cfg */ +int dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf); + +int dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt); + +int dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, + dhd_rtt_compl_noti_fn noti_fn); + +int dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn); + +int dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa); + +int dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info); + +int dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info); + +int dhd_dev_rtt_cancel_responder(struct net_device *dev); +/* export to upper layer */ +chanspec_t dhd_rtt_convert_to_chspec(wifi_channel_info channel); + +int dhd_rtt_idx_to_burst_duration(uint idx); + +int dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); + +#ifdef WL_NAN +void dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd); +#ifdef RTT_GEOFENCE_CONT +void dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont); + +void dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont); +#endif /* RTT_GEOFENCE_CONT */ + +#ifdef RTT_GEOFENCE_INTERVAL +void dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval); +#endif /* RTT_GEOFENCE_INTERVAL */ + +int dhd_rtt_get_geofence_max_sessions(dhd_pub_t *dhd); + +bool dhd_rtt_geofence_sessions_maxed_out(dhd_pub_t *dhd); + +int dhd_rtt_get_geofence_sessions_cnt(dhd_pub_t *dhd); + +int dhd_rtt_update_geofence_sessions_cnt(dhd_pub_t *dhd, bool incr, + struct ether_addr *peer_addr); + +int8 dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd); + +rtt_geofence_target_info_t* dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd); + +rtt_geofence_target_info_t* dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd); + +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, + int8 *index); + +int dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target); + +int dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr); + +int dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd); + +int dhd_rtt_delete_nan_session(dhd_pub_t *dhd); + +bool dhd_rtt_nan_is_directed_setup_in_prog(dhd_pub_t *dhd); + +bool dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd_pub_t *dhd, + struct ether_addr *peer); + +void dhd_rtt_nan_update_directed_setup_inprog(dhd_pub_t *dhd, + struct nan_ranging_inst *rng_inst, bool inprog); + +bool dhd_rtt_nan_directed_sessions_allowed(dhd_pub_t *dhd); + +bool dhd_rtt_nan_all_directed_sessions_triggered(dhd_pub_t *dhd); + +void dhd_rtt_nan_update_directed_sessions_cnt(dhd_pub_t *dhd, bool incr); +#endif /* WL_NAN */ + +uint8 dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr); + +int8 dhd_rtt_get_cur_target_idx(dhd_pub_t *dhd); + +int8 dhd_rtt_set_next_target_idx(dhd_pub_t *dhd, int start_idx); + +void dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason); + +int dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt); + +int dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn); + +int dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn); + +int dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); + +int dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa); + +int dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info); + +int dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info); + +int dhd_rtt_cancel_responder(dhd_pub_t *dhd); + +int dhd_rtt_attach(dhd_pub_t *dhd); + +int dhd_rtt_detach(dhd_pub_t *dhd); + +int dhd_rtt_init(dhd_pub_t *dhd); + +int dhd_rtt_deinit(dhd_pub_t *dhd); + +#ifdef WL_CFG80211 +#ifdef WL_NAN +int dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, + struct ether_addr *peer); + +void dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd); + +int8 dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd); + +void dhd_rtt_set_geofence_cur_target_idx(dhd_pub_t *dhd, int8 idx); + +rtt_geofence_setup_status_t* dhd_rtt_get_geofence_setup_status(dhd_pub_t *dhd); + +bool dhd_rtt_is_geofence_setup_inprog(dhd_pub_t *dhd); + +bool dhd_rtt_is_geofence_setup_inprog_with_peer(dhd_pub_t *dhd, + struct ether_addr *peer_addr); + +void dhd_rtt_set_geofence_setup_status(dhd_pub_t *dhd, bool inprog, + struct ether_addr *peer_addr); + +int dhd_rtt_get_max_nan_rtt_sessions_supported(dhd_pub_t *dhd); +#endif /* WL_NAN */ +#endif /* WL_CFG80211 */ + +#endif /* __DHD_RTT_H__ */ diff --git a/bcmdhd.101.10.361.x/dhd_sdio.c b/bcmdhd.101.10.361.x/dhd_sdio.c new file mode 100755 index 0000000..6fdb0e9 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_sdio.c @@ -0,0 +1,11777 @@ +/* + * DHD Bus Module for SDIO + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#include +#include +#include + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#include +#include +#include +#include +#include /* need to still support chips no longer in trunk firmware */ + +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_SPROM) +#include +#endif /* defined(DHD_SPROM) */ + +#include +#ifdef BCMSPI +#include +#endif /* BCMSPI */ +#include +#include +#include +#include + +#include +#include <802.1d.h> +#include <802.11.h> + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DHD_PKTDUMP_TOFW +#include +#endif +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef BT_OVER_SDIO +#include +#endif /* BT_OVER_SDIO */ + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) +#include +#endif /* DEBUGGER || DHD_DSCOPE */ + +bool dhd_mp_halting(dhd_pub_t *dhdp); +extern void bcmsdh_waitfor_iodrain(void *sdh); +extern void bcmsdh_reject_ioreqs(void *sdh, bool reject); +extern bool bcmsdh_fatal_error(void *sdh); +static int dhdsdio_suspend(void *context); +static int dhdsdio_resume(void *context); + +#ifndef DHDSDIO_MEM_DUMP_FNAME +#define DHDSDIO_MEM_DUMP_FNAME "mem_dump" +#endif + +#define QLEN (1024) /* bulk rx and tx queue lengths */ +#define FCHI (QLEN - 10) +#define FCLOW (FCHI / 2) +#define PRIOMASK 7 /* XXX FixMe: should come from elsewhere... + * MAXPRIO? PKTQ_MAX_PREC? WLC? Other? + */ + +#define TXRETRIES 2 /* # of retries for tx frames */ +#define READ_FRM_CNT_RETRIES 3 +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */ +#endif + +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */ +#endif + +#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_MEMBLOCK (32 * 1024) /* Block size used for downloading of dongle image */ + +#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */ +#define MAX_MEM_BUF 4096 + +#ifndef DHD_FIRSTREAD +#define DHD_FIRSTREAD 32 +#endif +#if !ISPOWEROF2(DHD_FIRSTREAD) +#error DHD_FIRSTREAD is not a power of 2! +#endif + +/* Total length of frame header for dongle protocol */ +#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) +#define SDPCM_HDRLEN_TXGLOM (SDPCM_HDRLEN + SDPCM_HWEXT_LEN) +#define MAX_TX_PKTCHAIN_CNT SDPCM_MAXGLOM_SIZE + +#ifdef SDTEST +#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN) +#else +#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN) +#endif + +/* Space for header read, limit for data packets */ +#ifndef MAX_HDR_READ +#define MAX_HDR_READ 32 +#endif +#if !ISPOWEROF2(MAX_HDR_READ) +#error MAX_HDR_READ is not a power of 2! +#endif + +#define MAX_RX_DATASZ 2048 /* XXX Should be based on PKTGET limits? */ + +/* Maximum milliseconds to wait for F2 to come up */ +#ifdef BCMQT +#define DHD_WAIT_F2RDY 30000 +#else +#define DHD_WAIT_F2RDY 3000 +#endif /* BCMQT */ + +/* Maximum usec to wait for HTAVAIL to come up */ +#ifdef BCMQT +#define DHD_WAIT_HTAVAIL 10000000 +#else +#define DHD_WAIT_HTAVAIL 10000 +#endif /* BCMQT */ + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#if (PMU_MAX_TRANSITION_DLY <= 1000000) +#undef PMU_MAX_TRANSITION_DLY +#ifdef NO_EXT32K +#define PMU_MAX_TRANSITION_DLY (1000000*5) +#else +#define PMU_MAX_TRANSITION_DLY 1000000 +#endif +#endif // endif + +/* hooks for limiting threshold custom tx num in rx processing */ +#define DEFAULT_TXINRX_THRES 0 +#ifndef CUSTOM_TXINRX_THRES +#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES +#endif + +/* Value for ChipClockCSR during initial setup */ +#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ) +#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) +/* XXX #define F2ASYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED | SDIO_REQ_ASYNC) */ + +/* Packet free applicable unconditionally for sdio and sdspi. Conditional if + * bufpool was present for gspi bus. + */ +#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ + PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); + +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW +extern unsigned int system_hw_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */ + +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX 8192 +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; + +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) +#define KSO_ENAB(bus) ((bus)->kso) +#define SR_ENAB(bus) ((bus)->_srenab) +#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto)) + +#define MIN_RSRC_SR 0x3 +#define CORE_CAPEXT_ADDR_OFFSET (0x64c) +#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1) +#define RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define RCTL_LOGIC_DISABLE_MASK (1 << 27) + +#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup) +#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */ +#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */ +#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */ +#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0) +#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2) +#define OVERFLOW_BLKSZ512_WM 96 +#define OVERFLOW_BLKSZ512_MES 80 + +#define CC_PMUCC3 (0x3) + +#ifdef DHD_UCODE_DOWNLOAD +/* Ucode host download related macros */ +#define UCODE_DOWNLOAD_REQUEST 0xCAFECAFE +#define UCODE_DOWNLOAD_COMPLETE 0xABCDABCD +#endif /* DHD_UCODE_DOWNLOAD */ + +#if defined(BT_OVER_SDIO) +#define BTMEM_OFFSET 0x19000000 +/* BIT0 => WLAN Power UP and BIT1=> WLAN Wake */ +#define BT2WLAN_PWRUP_WAKE 0x03 +#define BT2WLAN_PWRUP_ADDR 0x640894 /* This address is specific to 43012B0 */ + +#define BTFW_MAX_STR_LEN 600 +#define BTFW_DOWNLOAD_BLK_SIZE (BTFW_MAX_STR_LEN/2 + 8) + +#define BTFW_ADDR_MODE_UNKNOWN 0 +#define BTFW_ADDR_MODE_EXTENDED 1 +#define BTFW_ADDR_MODE_SEGMENT 2 +#define BTFW_ADDR_MODE_LINEAR32 3 + +#define BTFW_HEX_LINE_TYPE_DATA 0 +#define BTFW_HEX_LINE_TYPE_END_OF_DATA 1 +#define BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS 2 +#define BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS 4 +#define BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS 5 + +#endif /* defined (BT_OVER_SDIO) */ + +/* Private data for SDIO bus interaction */ +typedef struct dhd_bus { + dhd_pub_t *dhd; + + bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */ + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + + sdpcmd_regs_t *regs; /* Registers for SDIO core */ + uint sdpcmrev; /* SDIO core revision */ + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 bus_num; /* bus number */ + uint32 slot_num; /* slot ID */ + uint32 hostintmask; /* Copy of Host Interrupt Mask */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + uint8 flowcontrol; /* per prio flow control bitmask */ + uint8 tx_seq; /* Transmit sequence number (next) */ + uint8 tx_max; /* Maximum transmit sequence allowed */ + +#ifdef DYNAMIC_MAX_HDR_READ + uint8 *hdrbufp; +#else + uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN]; +#endif + uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + uint16 nextlen; /* Next Read Len from last header */ + uint8 rx_seq; /* Receive sequence number (expected) */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + + void *glomd; /* Packet containing glomming descriptor */ + void *glom; /* Packet chain for glommed superframe */ + uint glomerr; /* Glom packet read errors */ + + uint8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + uint8 *rxctl; /* Aligned pointer into rxbuf */ + uint8 *databuf; /* Buffer for receiving big glom packet */ + uint8 *dataptr; /* Aligned pointer into databuf */ + uint rxlen; /* Length of valid data in buffer */ + + uint8 sdpcm_ver; /* Bus protocol reported by dongle */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint spurious; /* Count of spurious interrupts */ + uint pollrate; /* Ticks between device polls */ + uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ + + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ + + uint regfails; /* Count of R_REG/W_REG failures */ + + uint clkstate; /* State of sd and backplane clock(s) */ + bool activity; /* Activity flag for clock down */ + int32 idletime; /* Control for activity timeout */ + int32 idlecount; /* Activity timeout counter */ + int32 idleclock; /* How to set bus driver when idle */ + int32 sd_divisor; /* Speed control to bus driver */ + int32 sd_mode; /* Mode control to bus driver */ + int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ + bool use_rxchain; /* If dhd should use PKT chains */ + bool sleeping; /* Is SDIO bus sleeping? */ +#if defined(LINUX) && defined(SUPPORT_P2P_GO_PS) + wait_queue_head_t bus_sleep; +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + bool ctrl_wait; + wait_queue_head_t ctrl_tx_wait; + uint rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ +#ifdef BCMINTERNAL + bool _nopmu; /* No PMU (FPGA), don't try it */ + bool clockpoll; /* Force clock polling (no chipactive interrupt) */ +#endif + bool alp_only; /* Don't use HT clock (ALP only) */ + /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ + bool usebufpool; + int32 txinrx_thres; /* num of in-queued pkts */ + int32 dotxinrx; /* tx first in dhdsdio_readframes */ +#ifdef BCMSDIO_RXLIM_POST + bool rxlim_en; + uint32 rxlim_addr; +#endif /* BCMSDIO_RXLIM_POST */ +#ifdef SDTEST + /* external loopback */ + bool ext_loop; + uint8 loopid; + + /* pktgen configuration */ + uint pktgen_freq; /* Ticks between bursts */ + uint pktgen_count; /* Packets to send each burst */ + uint pktgen_print; /* Bursts between count displays */ + uint pktgen_total; /* Stop after this many */ + uint pktgen_minlen; /* Minimum packet data len */ + uint pktgen_maxlen; /* Maximum packet data len */ + uint pktgen_mode; /* Configured mode: tx, rx, or echo */ + uint pktgen_stop; /* Number of tx failures causing stop */ + + /* active pktgen fields */ + uint pktgen_tick; /* Tick counter for bursts */ + uint pktgen_ptick; /* Burst counter for printing */ + uint pktgen_sent; /* Number of test packets generated */ + uint pktgen_rcvd; /* Number of test packets received */ + uint pktgen_prev_time; /* Time at which previous stats where printed */ + uint pktgen_prev_sent; /* Number of test packets generated when + * previous stats were printed + */ + uint pktgen_prev_rcvd; /* Number of test packets received when + * previous stats were printed + */ + uint pktgen_fail; /* Number of failed send attempts */ + uint16 pktgen_len; /* Length of next packet to send */ +#define PKTGEN_RCV_IDLE (0) +#define PKTGEN_RCV_ONGOING (1) + uint16 pktgen_rcv_state; /* receive state */ + uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */ +#endif /* SDTEST */ + + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + wake_counts_t wake_counts; /* Wake up counter */ +#ifdef BCMSPI + bool dwordmode; +#endif /* BCMSPI */ +#ifdef DHDENABLE_TAILPAD + uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */ + uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */ +#endif /* DHDENABLE_TAILPAD */ +#ifdef BCMINTERNAL + uint tx_deferred; /* Tx calls queued while event pending */ +#endif + uint8 *ctrl_frame_buf; + uint32 ctrl_frame_len; + bool ctrl_frame_stat; +#ifndef BCMSPI + uint32 rxint_mode; /* rx interrupt mode */ +#endif /* BCMSPI */ + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + bool kso; + bool _slpauto; + bool _oobwakeup; + bool _srenab; + bool readframes; + bool reqbussleep; + uint32 resetinstr; + uint32 dongle_ram_base; + + void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */ + uint32 txglom_cnt; /* Number of pkts in the glom array */ + uint32 txglom_total_len; /* Total length of pkts in glom array */ + bool txglom_enable; /* Flag to indicate whether tx glom is enabled/disabled */ + uint32 txglomsize; /* Glom size limitation */ +#ifdef DHDENABLE_TAILPAD + void *pad_pkt; +#endif /* DHDENABLE_TAILPAD */ + uint32 dongle_trap_addr; /* device trap addr location in device memory */ +#if defined(BT_OVER_SDIO) + char *btfw_path; /* module_param: path to BT firmware image */ + uint32 bt_use_count; /* Counter that tracks whether BT is using the bus */ +#endif /* defined (BT_OVER_SDIO) */ + uint txglomframes; /* Number of tx glom frames (superframes) */ + uint txglompkts; /* Number of packets from tx glom frames */ +#ifdef PKT_STATICS + struct pkt_statics tx_statics; +#endif + uint8 *membuf; /* Buffer for dhdsdio_membytes */ +#ifdef CONSOLE_DPC + char cons_cmd[16]; +#endif +} dhd_bus_t; + +/* + * Whenever DHD_IDLE_IMMEDIATE condition is handled, we have to now check if + * BT is active too. Instead of adding #ifdef code in all the places, we thought + * of adding one macro check as part of the if condition that checks for DHD_IDLE_IMMEDIATE + * In case of non BT over SDIO builds, this macro will always return TRUE. In case + * of the builds where BT_OVER_SDIO is enabled, it will expand to a condition check + * that checks if bt_use_count is zero. So this macro will return equate to 1 if + * bt_use_count is 0, indicating that there are no active users and if bt_use_count + * is non zero it would return 0 there by preventing the caller from executing the + * sleep calls. + */ +#ifdef BT_OVER_SDIO +#define NO_OTHER_ACTIVE_BUS_USER(bus) (bus->bt_use_count == 0) +#else +#define NO_OTHER_ACTIVE_BUS_USER(bus) (1) +#endif /* BT_OVER_SDIO */ + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 /* Not used yet */ +#define CLK_AVAIL 3 + +#ifdef BCMINTERNAL +#define DHD_NOPMU(dhd) ((dhd)->_nopmu) +#else +#define DHD_NOPMU(dhd) (FALSE) +#endif + +#if defined(BCMSDIOH_STD) +#define BLK_64_MAXTXGLOM 20 +#endif /* BCMSDIOH_STD */ + +#ifdef DHD_DEBUG +static int qcount[NUMPRIO]; +static int tx_packets[NUMPRIO]; +#endif /* DHD_DEBUG */ + +/* Deferred transmit */ +const uint dhd_deferred_tx = 1; + +extern uint dhd_watchdog_ms; +#ifndef NDIS +extern uint sd_f1_blocksize; +#endif /* !NDIS */ + +#ifdef BCMSPI_ANDROID +extern uint *dhd_spi_lockcount; +#endif /* BCMSPI_ANDROID */ + +extern void dhd_os_wd_timer(void *bus, uint wdtick); +int dhd_enableOOB(dhd_pub_t *dhd, bool sleep); + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +/* Tx/Rx bounds */ +uint dhd_txbound; +uint dhd_rxbound; +uint dhd_txminmax = DHD_TXMINMAX; + +/* override the RAM size if possible */ +#define DONGLE_MIN_RAMSIZE (128 *1024) +int dhd_dongle_ramsize; + +uint dhd_doflow = TRUE; +uint dhd_dpcpoll = FALSE; + +#ifdef linux +module_param(dhd_doflow, uint, 0644); +module_param(dhd_dpcpoll, uint, 0644); +#endif + +static bool dhd_alignctl; + +static bool sd1idle; + +static bool retrydata; +#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata) + +#ifndef BCMINTERNAL +#ifdef BCMSPI +/* At a watermark around 8 the spid hits underflow error. */ +static uint watermark = 32; +static uint mesbusyctrl = 0; +#else +static uint watermark = 8; +static uint mesbusyctrl = 0; +#endif /* BCMSPI */ +#ifdef DYNAMIC_MAX_HDR_READ +uint firstread = DHD_FIRSTREAD; +#else +static const uint firstread = DHD_FIRSTREAD; +#endif +#else /* BCMINTERNAL */ +/* PR47410: low watermark to avoid F2 hang after SD clock stops */ +/* PR48178: read to (not through) bus burst to avoid F2 underflow */ +#ifdef BCMSPI +static uint watermark = 32; +static uint mesbusyctrl = 0; +#else +static uint watermark = 8; +static uint mesbusyctrl = 0; +#endif /* BCMSPI */ +static uint firstread = DHD_FIRSTREAD; +/* Additional knobs in case we need them */ +static bool tstoph = FALSE; +static bool checkfifo = FALSE; +uint dhd_anychip = 0; +#endif /* BCMINTERNAL */ + +/* Retry count for register access failures */ +static const uint retry_limit = 2; + +/* Force even SD lengths (some host controllers mess up on odd bytes) */ +static bool forcealign; + +#if defined(DEBUGGER) +static uint32 dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr); +static void dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val); + +/** the debugger layer will call back into this (bus) layer to read/write dongle memory */ +static struct dhd_dbg_bus_ops_s bus_ops = { + .read_u16 = NULL, + .read_u32 = dhd_sdio_reg_read, + .write_u32 = dhd_sdio_reg_write, +}; +#endif /* DEBUGGER */ + +#define ALIGNMENT 4 + +#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN) +extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable); +#endif + +#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) +#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD +#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */ +#define PKTALIGN(osh, p, len, align) \ + do { \ + uintptr datalign; \ + datalign = (uintptr)PKTDATA((osh), (p)); \ + datalign = ROUNDUP(datalign, (align)) - datalign; \ + ASSERT(datalign < (align)); \ + ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \ + if (datalign) \ + PKTPULL((osh), (p), (uint)datalign); \ + PKTSETLEN((osh), (p), (len)); \ + } while (0) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +/* Try doing readahead */ +static bool dhd_readahead; + +#if defined(BCMSDIOH_TXGLOM_EXT) +bool +dhdsdio_is_dataok(dhd_bus_t *bus) { + return (((uint8)(bus->tx_max - bus->tx_seq) - bus->dhd->conf->tx_max_offset > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)); +} + +uint8 +dhdsdio_get_databufcnt(dhd_bus_t *bus) { + return ((uint8)(bus->tx_max - bus->tx_seq) - 1 - bus->dhd->conf->tx_max_offset); +} +#endif + +/* To check if there's window offered */ +#if defined(BCMSDIOH_TXGLOM_EXT) +#define DATAOK(bus) dhdsdio_is_dataok(bus) +#else +#define DATAOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) +#endif + +/* To check if there's window offered for ctrl frame */ +#define TXCTLOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* Number of pkts available in dongle for data RX */ +#if defined(BCMSDIOH_TXGLOM_EXT) +#define DATABUFCNT(bus) dhdsdio_get_databufcnt(bus) +#else +#define DATABUFCNT(bus) \ + ((uint8)(bus->tx_max - bus->tx_seq) - 1) +#endif + +/* Macros to get register read/write status */ +/* NOTE: these assume a local dhdsdio_bus_t *bus! */ +/* XXX Need to replace these with something better. */ +#define R_SDREG(regvar, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + regvar = R_REG(bus->dhd->osh, regaddr); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) { \ + DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + regvar = 0; \ + } \ + } \ +} while (0) + +#define W_SDREG(regval, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + W_REG(bus->dhd->osh, regaddr, regval); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) \ + DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + } \ +} while (0) + +#define BUS_WAKE(bus) \ + do { \ + bus->idlecount = 0; \ + if ((bus)->sleeping) \ + dhdsdio_bussleep((bus), FALSE); \ + } while (0); + +/* + * pktavail interrupts from dongle to host can be managed in 3 different ways + * whenever there is a packet available in dongle to transmit to host. + * + * Mode 0: Dongle writes the software host mailbox and host is interrupted. + * Mode 1: (sdiod core rev >= 4) + * Device sets a new bit in the intstatus whenever there is a packet + * available in fifo. Host can't clear this specific status bit until all the + * packets are read from the FIFO. No need to ack dongle intstatus. + * Mode 2: (sdiod core rev >= 4) + * Device sets a bit in the intstatus, and host acks this by writing + * one to this bit. Dongle won't generate anymore packet interrupts + * until host reads all the packets from the dongle and reads a zero to + * figure that there are no more packets. No need to disable host ints. + * Need to ack the intstatus. + */ + +#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */ +#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */ +#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */ + +#ifdef BCMSPI + +#define FRAME_AVAIL_MASK(bus) I_HMB_FRAME_IND + +#define DHD_BUS SPI_BUS + +/* check packet-available-interrupt in piggybacked dstatus */ +#define PKT_AVAILABLE(bus, intstatus) (bcmsdh_get_dstatus(bus->sdh) & STATUS_F2_PKT_AVAILABLE) + +#define HOSTINTMASK (I_HMB_FC_CHANGE | I_HMB_HOST_INT) + +#define GSPI_PR55150_BAILOUT \ +do { \ + uint32 dstatussw = bcmsdh_get_dstatus((void *)bus->sdh); \ + uint32 dstatushw = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); \ + uint32 intstatuserr = 0; \ + uint retries = 0; \ + \ + R_SDREG(intstatuserr, &bus->regs->intstatus, retries); \ + printf("dstatussw = 0x%x, dstatushw = 0x%x, intstatus = 0x%x\n", \ + dstatussw, dstatushw, intstatuserr); \ + \ + bus->nextlen = 0; \ + *finished = TRUE; \ +} while (0) + +#else /* BCMSDIO */ + +#define FRAME_AVAIL_MASK(bus) \ + ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL) + +#define DHD_BUS SDIO_BUS + +#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus))) + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +#define GSPI_PR55150_BAILOUT + +#endif /* BCMSPI */ + +#ifdef SDTEST +static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); +static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count); +#endif + +static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG +static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); +#endif /* DHD_DEBUG */ + +#if defined(DHD_FW_COREDUMP) +static int dhdsdio_mem_dump(dhd_bus_t *bus); +static int dhdsdio_get_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ +static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap); +static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); + +static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_disconnect(void *ptr); +static bool dhdsdio_chipmatch(uint16 chipid); +static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, + void * regsva, uint16 devid); +static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); +static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, + bool reset_flag); + +static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size); +static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); +static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry); +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt); +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt +#if defined(BCMSDIOH_TXGLOM_EXT) + , int first_frame +#endif +); +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt); + +static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_firmware(dhd_bus_t *bus); + +#ifdef DHD_UCODE_DOWNLOAD +static int dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path); +#endif /* DHD_UCODE_DOWNLOAD */ +static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path); +static int dhdsdio_download_nvram(dhd_bus_t *bus); +#ifdef BCMEMBEDIMAGE +static int dhdsdio_download_code_array(dhd_bus_t *bus); +#endif +static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep); +static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok); +static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus); +static bool dhdsdio_dpc(dhd_bus_t *bus); +static int dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len); +static int dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode); +static int dhdsdio_sdclk(dhd_bus_t *bus, bool on); +static void dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp); +static void dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp); + +#if defined(BT_OVER_SDIO) +static int extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value); +static int read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, + uint16 * hi_addr, uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes); +static int dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_btfw(struct dhd_bus *bus); +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef DHD_WAKE_STATUS +int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh); +int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag); +#endif /* DHD_WAKE_STATUS */ + +/* + * PR 114233: [4335] Sdio 3.0 overflow due to spur mode PLL change + */ +static void +dhdsdio_tune_fifoparam(struct dhd_bus *bus) +{ + int err; + uint8 devctl, wm, mes; + + if (bus->sih->buscorerev >= 15) { + /* See .ppt in PR for these recommended values */ + if (bus->blocksize == 512) { + wm = OVERFLOW_BLKSZ512_WM; + mes = OVERFLOW_BLKSZ512_MES; + } else { + mes = bus->blocksize/4; + wm = bus->blocksize/4; + } + + /* XXX: Need to set watermark since SBSDIO_WATERMARK could be overwritten + based on watermark value in other place. Refer to SWDHD-17. + */ + watermark = wm; + mesbusyctrl = mes; + } else { + DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n", + bus->sih->buscorerev)); + return; + } + + /* Update watermark */ + if (wm > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err); + + devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + /* Update MES */ + if (mes > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + (mes | SBSDIO_MESBUSYCTRL_ENAB), &err); + } + + DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n", + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err))); +} + +static void +dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_RAMSIZE; + /* Restrict the ramsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_ramsize, min_size)); + if ((dhd_dongle_ramsize > min_size) && + (dhd_dongle_ramsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_ramsize; +} + +static int +dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address) +{ + int err = 0; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + return err; +} + +#ifdef BCMSPI +static void +dhdsdio_wkwlan(dhd_bus_t *bus, bool on) +{ + int err; + uint32 regdata; + bcmsdh_info_t *sdh = bus->sdh; + + /* XXX: sdiod cores have SPI as a block, PCMCIA doesn't have the gspi core */ + /* XXX: may be we don't even need this check at all */ + if (bus->sih->buscoretype == SDIOD_CORE_ID) { + /* wake up wlan function :WAKE_UP goes as ht_avail_request and alp_avail_request */ + regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL); + DHD_INFO(("F0 REG0 rd = 0x%x\n", regdata)); + + if (on == TRUE) + regdata |= WAKE_UP; + else + regdata &= ~WAKE_UP; + + bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err); + } +} +#endif /* BCMSPI */ + +#ifdef USE_OOB_GPIO1 +static int +dhdsdio_oobwakeup_init(dhd_bus_t *bus) +{ + uint32 val, addr, data; + + bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP); + + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data); + + /* Set device for gpio1 wakeup */ + bcmsdh_reg_write(bus->sdh, addr, 4, 2); + val = bcmsdh_reg_read(bus->sdh, data, 4); + val |= CC_CHIPCTRL2_GPIO1_WAKEUP; + bcmsdh_reg_write(bus->sdh, data, 4, val); + + bus->_oobwakeup = TRUE; + + return 0; +} +#endif /* USE_OOB_GPIO1 */ + +#ifndef BCMSPI +/* + * Query if FW is in SR mode + */ +static bool +dhdsdio_sr_cap(dhd_bus_t *bus) +{ + bool cap = FALSE; + uint32 core_capext, addr, data; + + if (bus->sih->chip == BCM43430_CHIP_ID || + bus->sih->chip == BCM43018_CHIP_ID) { + /* check if fw initialized sr engine */ + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, sr_control1); + if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0) + cap = TRUE; + + return cap; + } + if ( +#ifdef UNRELEASEDCHIP + (bus->sih->chip == BCM4347_CHIP_ID) || + (bus->sih->chip == BCM4357_CHIP_ID) || + (bus->sih->chip == BCM4361_CHIP_ID) || +#endif + 0) { + core_capext = FALSE; + } else if ((bus->sih->chip == BCM4330_CHIP_ID) || + (bus->sih->chip == BCM43362_CHIP_ID) || + (BCM4347_CHIP(bus->sih->chip))) { + core_capext = FALSE; + } else if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || + (bus->sih->chip == BCM4371_CHIP_ID) || + (BCM4349_CHIP(bus->sih->chip)) || + (bus->sih->chip == BCM4350_CHIP_ID) || + (bus->sih->chip == BCM4362_CHIP_ID) || + (bus->sih->chip == BCM43012_CHIP_ID) || + (bus->sih->chip == BCM43013_CHIP_ID) || + (bus->sih->chip == BCM43014_CHIP_ID) || + (bus->sih->chip == BCM43751_CHIP_ID) || + (bus->sih->chip == BCM43752_CHIP_ID)) { + core_capext = TRUE; + } else { + /* XXX: For AOB, CORE_CAPEXT_ADDR is moved to PMU core */ + core_capext = bcmsdh_reg_read(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, core_cap_ext)), + 4); + + core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK); + } + if (!(core_capext)) + return FALSE; + + if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || + (bus->sih->chip == BCM4371_CHIP_ID) || + (bus->sih->chip == BCM4350_CHIP_ID)) { + uint32 enabval = 0; + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data); + /* XXX: assuming the dongle doesn't change chipcontrol_addr, because + * if that happens, the chipcontrol_data read will be wrong. So we need + * to make sure the dongle and host will not access chipcontrol_addr + * simultaneously at this point. + */ + bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3); + enabval = bcmsdh_reg_read(bus->sdh, data, 4); + + if ((bus->sih->chip == BCM4350_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || + (bus->sih->chip == BCM4371_CHIP_ID)) + enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE; + + /* XXX: not checking the CC_PMUCC3_SRCC_SR_ENG_ENAB bit [val 4], but + * will check the whole register to be non-zero so that, sleep sequence + * can be also checked without enabling SR. + */ + if (enabval) + cap = TRUE; + } else { + data = bcmsdh_reg_read(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, retention_ctl)), + 4); + if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0) + cap = TRUE; + } + + return cap; +} + +static int +dhdsdio_sr_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + if (bus->sih->chip == BCM43012_CHIP_ID || + bus->sih->chip == BCM43013_CHIP_ID || + bus->sih->chip == BCM43014_CHIP_ID) { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + } else { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + } + +#ifdef USE_CMD14 + /* Add CMD14 Support */ + dhdsdio_devcap_set(bus, + (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT)); +#endif /* USE_CMD14 */ + + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID || + CHIPID(bus->sih->chip) == BCM4339_CHIP_ID || + CHIPID(bus->sih->chip) == BCM4362_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43012_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43013_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43014_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43751_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43752_CHIP_ID) + dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); + + if (bus->sih->chip == BCM43012_CHIP_ID || + bus->sih->chip == BCM43013_CHIP_ID || + bus->sih->chip == BCM43014_CHIP_ID) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_HT_AVAIL_REQ, &err); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err); + } + bus->_slpauto = dhd_slpauto ? TRUE : FALSE; + + bus->_srenab = TRUE; + + return 0; +} +#endif /* BCMSPI */ + +/* + * FIX: Be sure KSO bit is enabled + * Currently, it's defaulting to 0 which should be 1. + */ +static int +dhdsdio_clk_kso_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + /* set flag */ + bus->kso = TRUE; + + /* + * Enable KeepSdioOn (KSO) bit for normal operation + * Default is 0 (4334A0) so set it. Fixed in B0. + */ + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL); + if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err); + if (err) + DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err)); + } + + return 0; +} + +#define KSO_DBG(x) +/* XXX KSO set typically takes depending on resource up & number of +* resources which were down. Max value is PMU_MAX_TRANSITION_DLY usec. +* Currently the KSO attempt loop is such that, it waits +* (KSO_WAIT_US [50usec] time + 2 SDIO operations) * MAX_KSO_ATTEMPTS. +* So setting a value of maximum PMU_MAX_TRANSITION_DLY as wait time., +* to calculate MAX_KSO_ATTEMPTS. +*/ +#define KSO_WAIT_US 50 +#define KSO_WAIT_MS 1 +#define KSO_SLEEP_RETRY_COUNT 20 +#define KSO_WAKE_RETRY_COUNT 100 +#define ERROR_BCME_NODEVICE_MAX 1 + +#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) +#ifndef CUSTOM_MAX_KSO_ATTEMPTS +#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS +#endif + +static int +dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on) +{ + uint8 wr_val = 0, rd_val, cmp_val, bmask; + int err = 0; + int try_cnt = 0, try_max = CUSTOM_MAX_KSO_ATTEMPTS; + struct dhd_conf *conf = bus->dhd->conf; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) + wifi_adapter_info_t *adapter = NULL; + uint32 bus_type = -1, bus_num = -1, slot_num = -1; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) */ + + KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"))); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) + dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num); + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + sdio_retune_crc_disable(adapter->sdio_func); + if (on) + sdio_retune_hold_now(adapter->sdio_func); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) */ + + wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + + /* XXX 1st KSO write goes to AOS wake up core if device is asleep */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + + /* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared. + * So the further reads of KSO register could fail. Thereby just bailing out immediately + * after clearing KSO bit, to avoid polling of KSO bit. + */ + if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID || + bus->sih->chip == BCM43013_CHIP_ID || + bus->sih->chip == BCM43014_CHIP_ID)) { + goto exit; + } + + if (on) { + /* XXX + * device WAKEUP through KSO: + * write bit 0 & read back until + * both bits 0(kso bit) & 1 (dev on status) are set + */ + cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; + bmask = cmp_val; + +#if defined(NDIS) + /* XXX Windows Host controller hangs if chip still sleeps before read. + * So during a wake we write 0x1 for 5 msec to guarantee that chip is a wake. + */ + for (int i = 0; i < KSO_WAKE_RETRY_COUNT; i++) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, + wr_val, &err); + OSL_DELAY(KSO_WAIT_US); + } + + rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (((rd_val & bmask) != cmp_val) || err) { + /* Sdio Bus Failure - Bus hang */ + DHD_ERROR(("%s> op:%s, ERROR: SDIO Bus Hang, rd_val:%x, ERR:%x \n", + __FUNCTION__, "KSO_SET", rd_val, err)); + } +#else + OSL_SLEEP(3); +#endif /* defined(NDIS) */ + + } else { + /* Put device to sleep, turn off KSO */ + cmp_val = 0; + /* XXX only check for bit0, bit1(devon status) may not get cleared right away */ + bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK; + } +#if !defined(NDIS) + /* XXX We can't use polling in Windows since Windows Host controller + * hangs if chip is a sleep during read or write. + */ + + if (conf->kso_try_max) + try_max = conf->kso_try_max; + do { + /* + * XXX reliable KSO bit set/clr: + * the sdiod sleep write access appears to be is synced to PMU 32khz clk + * just one write attempt may fail,(same is with read ?) + * in any case, read it back until it matches written value + */ + rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (((rd_val & bmask) == cmp_val) && !err) + break; + + KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err)); + + if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) { + OSL_SLEEP(KSO_WAIT_MS); + } else + OSL_DELAY(KSO_WAIT_US); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + } while (try_cnt++ < try_max); + +#ifdef KSO_DEBUG + if (try_cnt > 0 && try_cnt <= 10) + conf->kso_try_array[0] += 1; + else if (try_cnt <= 50) + conf->kso_try_array[1] += 1; + else if (try_cnt <= 100) + conf->kso_try_array[2] += 1; + else if (try_cnt <= 200) + conf->kso_try_array[3] += 1; + else if (try_cnt <= 500) + conf->kso_try_array[4] += 1; + else if (try_cnt <= 1000) + conf->kso_try_array[5] += 1; + else if (try_cnt <= 2000) + conf->kso_try_array[6] += 1; + else if (try_cnt <= 5000) + conf->kso_try_array[7] += 1; + else if (try_cnt <= 10000) + conf->kso_try_array[8] += 1; + else + conf->kso_try_array[9] += 1; +#endif + if (try_cnt > 2) + KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + + if (try_cnt > try_max) { + DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); +#ifdef KSO_DEBUG + { + int i; + printk(KERN_CONT DHD_LOG_PREFIXS); + for (i=0; i<10; i++) { + printk(KERN_CONT "[%d]: %d, ", i, conf->kso_try_array[i]); + } + printk("\n"); + } +#endif + } +#endif /* !defined(NDIS) */ + +exit: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) + if (on) + sdio_retune_release(adapter->sdio_func); + sdio_retune_crc_enable(adapter->sdio_func); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) */ + + return err; +} + +static int +dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0; + + if (on == FALSE) { + + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + dhdsdio_clk_kso_enab(bus, FALSE); + } else { + DHD_ERROR(("%s: KSO enable\n", __FUNCTION__)); + + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + + dhdsdio_clk_kso_enab(bus, TRUE); + + DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__, + dhdsdio_sleepcsr_get(bus))); + } + + bus->kso = on; + BCM_REFERENCE(err); + + return 0; +} + +static uint8 +dhdsdio_sleepcsr_get(dhd_bus_t *bus) +{ + int err = 0; + uint8 val = 0; + + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + /* XXX: Propagate error */ + if (err) + DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err)); + + return val; +} + +uint8 +dhdsdio_devcap_get(dhd_bus_t *bus) +{ + return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL); +} + +static int +dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap) +{ + int err = 0; + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err); + if (err) + DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err)); + + return 0; +} + +static int +dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0, retry; + uint8 val; + + retry = 0; + if (on == TRUE) { + /* Enter Sleep */ + + /* Be sure we request clk before going to sleep + * so we can wake-up with clk request already set + * else device can go back to sleep immediately + */ + if (!SLPAUTO_ENAB(bus)) + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + else { + /* XXX: Check if Host cleared clock request + * XXX: With CMD14, Host does not need to explicitly toggle clock requests + * XXX: Just keep clock request active and use CMD14 to enter/exit sleep + */ + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if ((val & SBSDIO_CSR_MASK) == 0) { + DHD_ERROR(("%s: No clock before enter sleep:0x%x\n", + __FUNCTION__, val)); + + /* Reset clock request */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_ALP_AVAIL_REQ, &err); + DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + } + } + + DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, TRUE); +#else + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) { + if (sd1idle) { + /* Change to SD1 mode */ + dhdsdio_set_sdmode(bus, 1); + } + } + + err = dhdsdio_clk_kso_enab(bus, FALSE); + if (OOB_WAKEUP_ENAB(bus)) + { +#if !defined(NDIS) + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */ +#endif /* !defined(NDIS) */ + } +#endif /* USE_CMD14 */ + + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock != DHD_IDLE_ACTIVE)) { + DHD_TRACE(("%s: Turnoff SD clk\n", __FUNCTION__)); + /* Now remove the SD clock */ + err = dhdsdio_sdclk(bus, FALSE); + } + } else { + /* Exit Sleep */ + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, FALSE); + if (SLPAUTO_ENAB(bus) && (err != 0)) { + /* XXX: CMD14 exit sleep is failing somehow + * XXX: Is Host out of sync with device? + * XXX: Try toggling the reverse + */ + OSL_DELAY(10000); + DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + + /* XXX: Ugly hack for host-device out-of-sync while testing + * XXX: Need to root-cause + */ + if (err) { + /* XXX: Host and device out-of-sync */ + OSL_DELAY(10000); + DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + if (err) { + /* XXX: Give up and assumed it has exited sleep + * XXX: Device probably dead at this point + * XXX: So far only happens with SR + */ + DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__)); + DHD_ERROR(("%s: FATAL: Device non-response!\n", + __FUNCTION__)); + err = 0; + } + } + } +#else + if (OOB_WAKEUP_ENAB(bus)) + { +#if !defined(NDIS) + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */ +#endif /* !defined(NDIS) */ + } + /* PR 101351: sdiod_aos sleep followed by immediate wakeup + * before sdiod_aos takes over has a problem. + * Set KSO after ExitSleep. + */ + do { + err = dhdsdio_clk_kso_enab(bus, TRUE); + if (err) + OSL_SLEEP(10); + } while ((err != 0) && (++retry < 3)); + + if (err != 0) { + DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry)); +#ifndef BT_OVER_SDIO + err = 0; /* continue anyway */ +#endif /* BT_OVER_SDIO */ + } + + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) { + dhdsdio_set_sdmode(bus, bus->sd_mode); + } +#endif /* !USE_CMD14 */ + + if (err == 0) { + uint8 csr; + + /* Wait for device ready during transition to wake-up */ + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = dhdsdio_sleepcsr_get(bus)) & + SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) != + (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000)); + + DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr)); + + if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) { + DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + + /* PR 101351: sdiod_aos sleep followed by immediate wakeup + * before sdiod_aos takes over has a problem. + */ + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) != + (SBSDIO_HT_AVAIL)), (DHD_WAIT_HTAVAIL)); + + DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr)); + if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) { + DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + } + } + + /* Update if successful */ + if (err == 0) + bus->kso = on ? FALSE : TRUE; + else { + DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n", + __FUNCTION__, bus->kso, on, err)); + if (!on && retry > 2) + bus->kso = FALSE; + } + + return err; +} + +/* Turn backplane clock on or off */ +static int +dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) +{ +#define HT_AVAIL_ERROR_MAX 10 + static int ht_avail_error = 0; + int err; + uint8 clkctl, clkreq, devctl; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + clkctl = 0; + sdh = bus->sdh; + +#ifdef BCMINTERNAL + if (DHD_NOPMU(bus)) { + /* There is no PMU present, so just fake the clock state... */ + bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); + return BCME_OK; + } + + if (bus->clockpoll) + pendok = FALSE; +#endif /* BCMINTERNAL */ + + if (!KSO_ENAB(bus)) + return BCME_OK; + + if (SLPAUTO_ENAB(bus)) { + bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); + return BCME_OK; + } + + if (on) { + /* Request HT Avail */ + clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + +#ifdef BCMSPI + dhdsdio_wkwlan(bus, TRUE); +#endif /* BCMSPI */ + + /* XXX Should be able to early-exit if pendok && PENDING */ + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + if (err) { + ht_avail_error++; + if (ht_avail_error < HT_AVAIL_ERROR_MAX) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + } + +#ifdef OEM_ANDROID + else if (ht_avail_error == HT_AVAIL_ERROR_MAX) { + bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR; + dhd_os_send_hang_message(bus->dhd); + } +#endif /* OEM_ANDROID */ + return BCME_ERROR; + } else { + ht_avail_error = 0; + } + + /* Check current status */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + +#if !defined(OOB_INTR_ONLY) + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: Devctl access error setting CA: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + DHD_INFO(("CLKCTL: set PENDING\n")); + bus->clkstate = CLK_PENDING; + return BCME_OK; + } else +#endif /* !defined (OOB_INTR_ONLY) */ + { + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + } +#ifndef BCMSDIOLITE + /* Otherwise, wait here (polling) for HT Avail */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)), + !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY); + } + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", + __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + return BCME_ERROR; + } +#endif /* BCMSDIOLITE */ + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + DHD_INFO(("CLKCTL: turned ON\n")); + +#if defined(DHD_DEBUG) + if (bus->alp_only == TRUE) { +#if !defined(BCMLXSDMMC) + /* XXX For the SDMMC Driver stack, if DHD was unloaded, + * the chip is not completely reset, so in this case, + * the PMU may already be programmed to allow HT clock. + */ + if (!SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__)); + } +#endif /* !defined(BCMLXSDMMC) */ + } else { + if (SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__)); + } + } +#endif /* defined (DHD_DEBUG) */ + + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + if (!SR_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + DHD_INFO(("CLKCTL: turned OFF\n")); + if (err) { + DHD_ERROR(("%s: Failed access turning clock off: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } +#ifdef BCMSPI + dhdsdio_wkwlan(bus, FALSE); +#endif /* BCMSPI */ + } + return BCME_OK; +} + +/* Change SD1/SD4 bus mode */ +static int +dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode) +{ + int err; + + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &sd_mode, sizeof(sd_mode), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + return BCME_OK; +} + +/* Change idle/active SD state */ +static int +dhdsdio_sdclk(dhd_bus_t *bus, bool on) +{ +#ifndef BCMSPI + int err; + int32 iovalue; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (on) { + if (bus->idleclock == DHD_IDLE_STOP) { + /* Turn on clock and restore mode */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error enabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Restore clock speed */ + iovalue = bus->sd_divisor; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error restoring sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_SDONLY; + } else { + /* Stop or slow the SD clock itself */ + if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) { + DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n", + __FUNCTION__, bus->sd_divisor, bus->sd_mode)); + return BCME_ERROR; + } + if (bus->idleclock == DHD_IDLE_STOP) { + iovalue = 0; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error disabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Set divisor to idle value */ + iovalue = bus->idleclock; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_NONE; + } +#endif /* BCMSPI */ + + return BCME_OK; +} + +/* Transition SD and backplane clock readiness */ +static int +dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) +{ + int ret = BCME_OK; +#ifdef DHD_DEBUG + uint oldstate = bus->clkstate; +#endif /* DHD_DEBUG */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Early exit if we're already there */ + if (bus->clkstate == target) { + if (target == CLK_AVAIL) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + return ret; + } + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + /* Now request HT Avail on the backplane */ + ret = dhdsdio_htclk(bus, TRUE, pendok); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + break; + + case CLK_SDONLY: + +#ifdef BT_OVER_SDIO + /* + * If the request is to switch off Back plane clock, + * confirm that BT is inactive before doing so. + * If this call had come from Non Watchdog context any way + * the Watchdog would switch off the clock again when + * nothing is to be done & Bt has finished using the bus. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Req CLK_SDONLY, BT is active %d not switching off \r\n", + __FUNCTION__, bus->bt_use_count)); + ret = BCME_OK; + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + break; + } + + DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n", + __FUNCTION__)); +#endif /* BT_OVER_SDIO */ + + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + ret = dhdsdio_sdclk(bus, TRUE); + else if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + else + DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", + bus->clkstate, target)); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + } + break; + + case CLK_NONE: + +#ifdef BT_OVER_SDIO + /* + * If the request is to switch off Back plane clock, + * confirm that BT is inactive before doing so. + * If this call had come from Non Watchdog context any way + * the Watchdog would switch off the clock again when + * nothing is to be done & Bt has finished using the bus. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Request CLK_NONE BT is active %d not switching off \r\n", + __FUNCTION__, bus->bt_use_count)); + ret = BCME_OK; + break; + } + + DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n", + __FUNCTION__)); +#endif /* BT_OVER_SDIO */ + + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + /* Now remove the SD clock */ + ret = dhdsdio_sdclk(bus, FALSE); +#ifdef DHD_DEBUG + if (bus->dhd->dhd_console_ms == 0) +#endif /* DHD_DEBUG */ + if (bus->poll == 0) + dhd_os_wd_timer(bus->dhd, 0); + break; + } +#ifdef DHD_DEBUG + DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); +#endif /* DHD_DEBUG */ + + return ret; +} + +static int +dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) +{ + int err = 0; + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; +#if defined(BCMSDIOH_STD) + uint32 sd3_tuning_disable = FALSE; +#endif /* BCMSDIOH_STD */ + + DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE"))); + + if (bus->dhd->hang_was_sent) + return BCME_ERROR; + + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + return BCME_OK; + + /* Going to sleep: set the alarm and turn off the lights... */ + if (sleep) { + /* Don't sleep if something is pending */ +#ifdef DHD_USE_IDLECOUNT + if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq) || + bus->readframes || bus->ctrl_frame_stat) +#else + if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq)) +#endif /* DHD_USE_IDLECOUNT */ + return BCME_BUSY; + +#ifdef BT_OVER_SDIO + /* + * The following is the assumption based on which the hook is placed. + * From WLAN driver, either from the active contexts OR from the Watchdog contexts + * we will be attempting to Go to Sleep. AT that moment if we see that BT is still + * actively using the bus, we will return BCME_BUSY from here, but the bus->sleeping + * state would not have changed. So the caller can then schedule the Watchdog again + * which will come and attempt to sleep at a later point. + * + * In case if BT is the only one and is the last user, we don't switch off the clock + * immediately, we allow the WLAN to decide when to sleep i.e from the watchdog. + * Now if the watchdog becomes active and attempts to switch off the clock and if + * another WLAN context is active they are any way serialized with sdlock. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Cannot sleep BT is active \r\n", __FUNCTION__)); + return BCME_BUSY; + } +#endif /* !BT_OVER_SDIO */ + + /* XXX Is it an error to sleep when not in data state? */ + + if (!SLPAUTO_ENAB(bus)) { + /* Disable SDIO interrupts (no longer interested) */ + bcmsdh_intr_disable(bus->sdh); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); + + /* Isolate the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } else { +#ifdef FORCE_SWOOB_ENABLE + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); +#endif + /* Leave interrupts enabled since device can exit sleep and + * interrupt host + */ + err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */); + } + + /* Change state */ + bus->sleeping = TRUE; +#if defined(BCMSDIOH_STD) + sd3_tuning_disable = TRUE; + err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0, + &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE); +#endif /* BCMSDIOH_STD */ +#if defined(LINUX) && defined(SUPPORT_P2P_GO_PS) + wake_up(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + /* XXX Should be able to turn off clock and power */ + /* XXX Make sure GPIO interrupt input is enabled */ + } else { + /* Waking up: bus power up is ok, set local state */ + + if (!SLPAUTO_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err); + + /* Force pad isolation off if possible (in case power never toggled) */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL); + + /* XXX Make sure GPIO interrupt input is disabled */ + /* XXX Should be able to turn on power and clock */ + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Enable interrupts again */ + if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } else { + err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */); +#ifdef FORCE_SWOOB_ENABLE + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); +#endif +#ifdef BT_OVER_SDIO + if (err < 0) { + struct net_device *net = NULL; + dhd_pub_t *dhd = bus->dhd; + net = dhd_idx2net(dhd, 0); + if (net != NULL) { + DHD_ERROR(("<< WIFI HANG by KSO Enabled failure\n")); + dhd_os_sdunlock(dhd); + net_os_send_hang_message(net); + dhd_os_sdlock(dhd); + } else { + DHD_ERROR(("<< WIFI HANG Fail because net is NULL\n")); + } + } +#endif /* BT_OVER_SDIO */ + } + + if (err == 0) { + /* Change state */ + bus->sleeping = FALSE; +#if defined(BCMSDIOH_STD) + sd3_tuning_disable = FALSE; + err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0, + &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE); +#endif /* BCMSDIOH_STD */ + } + } + + return err; +} + +#ifdef BT_OVER_SDIO +/* + * Call this function to Get the Clock running. + * Assumes that the caller holds the sdlock. + * bus - Pointer to the dhd_bus handle + * can_wait - TRUE if the caller can wait until the clock becomes ready + * FALSE if the caller cannot wait + */ +int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait) +{ + int ret = BCME_ERROR; + + BCM_REFERENCE(owner); + + bus->bt_use_count++; + + /* + * We can call BUS_WAKE, clkctl multiple times, both of the items + * have states and if its already ON, no new configuration is done + */ + + /* Wake up the Dongle FW from SR */ + BUS_WAKE(bus); + + /* + * Make sure back plane ht clk is on + * CLK_AVAIL - Turn On both SD & HT clock + */ + ret = dhdsdio_clkctl(bus, CLK_AVAIL, can_wait); + + DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__, + bus->bt_use_count)); + return ret; +} + +/* + * Call this function to relinquish the Clock. + * Assumes that the caller holds the sdlock. + * bus - Pointer to the dhd_bus handle + * can_wait - TRUE if the caller can wait until the clock becomes ready + * FALSE if the caller cannot wait + */ +int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait) +{ + int ret = BCME_ERROR; + + BCM_REFERENCE(owner); + BCM_REFERENCE(can_wait); + + if (bus->bt_use_count == 0) { + DHD_ERROR(("%s(): Clocks are already turned off \r\n", + __FUNCTION__)); + return ret; + } + + bus->bt_use_count--; + + /* + * When the SDIO Bus is shared between BT & WLAN, we turn Off the clock + * once the last user has relinqushed the same. But there are two schemes + * in that too. We consider WLAN as the bus master (even if its not + * active). Even when the WLAN is OFF the DHD Watchdog is active. + * So this Bus Watchdog is the context whill put the Bus to sleep. + * Refer dhd_bus_watchdog function + */ + + ret = BCME_OK; + DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__, + bus->bt_use_count)); + return ret; +} + +void dhdsdio_reset_bt_use_count(struct dhd_bus *bus) +{ + /* reset bt use count */ + bus->bt_use_count = 0; +} +#endif /* BT_OVER_SDIO */ + +#ifdef USE_DYNAMIC_F2_BLKSIZE +int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) +{ + int func_blk_size = function_num; + int bcmerr = 0; + int result; + + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size, + sizeof(int), &result, sizeof(int), IOV_GET); + + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num)); + return BCME_ERROR; + } + + if (result != block_size) { + DHD_TRACE_HW4(("%s: F%d Block size set from %d to %d\n", + __FUNCTION__, function_num, result, block_size)); + func_blk_size = function_num << 16 | block_size; + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL, + 0, &func_blk_size, sizeof(int32), IOV_SET); + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__)); + return BCME_ERROR; + } + } + + return BCME_OK; +} +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN) +void +dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) +{ +#if defined(BCMSPI_ANDROID) + bcmsdh_intr_enable(bus->sdh); +#elif defined(HW_OOB) || defined(FORCE_WOWLAN) + bcmsdh_enable_hw_oob_intr(bus->sdh, enable); +#else + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (enable == TRUE) { + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + } else { + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + } + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); +#endif /* !defined(HW_OOB) */ +} +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pkt) +{ + int ret = BCME_ERROR; + osl_t *osh; + uint datalen, prec; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + osh = bus->dhd->osh; + datalen = PKTLEN(osh, pkt); + +#ifdef SDTEST + /* Push the test header if doing loopback */ + if (bus->ext_loop) { + uint8* data; + PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN); + data = PKTDATA(osh, pkt); + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->loopid++; + *data++ = (datalen >> 0); + *data++ = (datalen >> 8); + datalen += SDPCM_TEST_HDRLEN; + } +#else /* SDTEST */ + BCM_REFERENCE(datalen); +#endif /* SDTEST */ + + prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); + + /* move from dhdsdio_sendfromq(), try to orphan skb early */ + if (bus->dhd->conf->orphan_move == 1) + PKTORPHAN(pkt, bus->dhd->conf->tsq); + + /* Check for existing queue, current flow-control, pending event, or pending clock */ + if (dhd_deferred_tx || bus->fcstate || pktq_n_pkts_tot(&bus->txq) || bus->dpc_sched || + (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || + (bus->clkstate != CLK_AVAIL)) { + bool deq_ret; + int pkq_len = 0; + + DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, + pktq_n_pkts_tot(&bus->txq))); +#ifdef BCMINTERNAL + if (!bus->fcstate) + bus->tx_deferred++; +#endif /* BCMINTERNAL */ + bus->fcqueued++; + + /* Priority based enq */ + dhd_os_sdlock_txq(bus->dhd); + deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec); + dhd_os_sdunlock_txq(bus->dhd); + + if (!deq_ret) { +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0) +#endif /* PROP_TXSTATUS */ + { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + dhd_txcomplete(bus->dhd, pkt, FALSE); + PKTFREE(osh, pkt, TRUE); /* XXX update counter */ + } + ret = BCME_NORESOURCE; + } else + ret = BCME_OK; + + /* XXX Possible race since check and action are not locked? */ + if (dhd_doflow) { + dhd_os_sdlock_txq(bus->dhd); + pkq_len = pktq_n_pkts_tot(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + } + if (dhd_doflow && pkq_len >= FCHI) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) != + WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled && dhd_doflow) { + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + } + } + +#ifdef DHD_DEBUG + dhd_os_sdlock_txq(bus->dhd); + if (pktqprec_n_pkts(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktqprec_n_pkts(&bus->txq, prec); + dhd_os_sdunlock_txq(bus->dhd); +#endif + + /* Schedule DPC if needed to send queued packet(s) */ + /* XXX Also here, since other deferral conditions may no longer hold? */ + if (dhd_deferred_tx && !bus->dpc_sched) { + if (bus->dhd->conf->deferred_tx_len) { + if(dhd_os_wd_timer_enabled(bus->dhd) == FALSE) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if(pktq_n_pkts_tot(&bus->txq) >= bus->dhd->conf->deferred_tx_len && + dhd_os_wd_timer_enabled(bus->dhd) == FALSE) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + } else { + int chan = SDPCM_DATA_CHANNEL; + +#ifdef SDTEST + chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL); +#endif + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); + + /* Otherwise, send it now */ + BUS_WAKE(bus); + /* Make sure back plane ht clk is on, no pending allowed */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + + ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE); + + if (ret != BCME_OK) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + } + + return ret; +} + +/* align packet data pointer and packet length to n-byte boundary, process packet headers, + * a new packet may be allocated if there is not enough head and/or tail from for padding. + * the caller is responsible for updating the glom size in the head packet (when glom is + * used) + * + * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter + * is taken in tx glom mode only + * + * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment + * padding, NULL if not needed, the caller is responsible for freeing the new packet + * + * return: positive value - length of the packet, including head and tail padding + * negative value - errors + */ +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt +#if defined(BCMSDIOH_TXGLOM_EXT) + , int first_frame +#endif +) +{ + osl_t *osh; + uint8 *frame; + int pkt_len; + int modulo; + int head_padding; + int tail_padding = 0; + uint32 swheader; + uint32 swhdr_offset; + bool alloc_new_pkt = FALSE; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; +#ifdef PKT_STATICS + uint16 len; +#endif + + *new_pkt = NULL; + osh = bus->dhd->osh; + +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* Add space for the SDPCM hardware/software headers */ + PKTPUSH(osh, pkt, sdpcm_hdrlen); + ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); + + frame = (uint8*)PKTDATA(osh, pkt); + pkt_len = (uint16)PKTLEN(osh, pkt); + +#ifdef PKT_STATICS + len = (uint16)PKTLEN(osh, pkt); + switch(chan) { + case SDPCM_CONTROL_CHANNEL: + bus->tx_statics.ctrl_count++; + bus->tx_statics.ctrl_size += len; + break; + case SDPCM_DATA_CHANNEL: + bus->tx_statics.data_count++; + bus->tx_statics.data_size += len; + break; + case SDPCM_GLOM_CHANNEL: + bus->tx_statics.glom_count++; + bus->tx_statics.glom_size += len; + break; + case SDPCM_EVENT_CHANNEL: + bus->tx_statics.event_count++; + bus->tx_statics.event_size += len; + break; + case SDPCM_TEST_CHANNEL: + bus->tx_statics.test_count++; + bus->tx_statics.test_size += len; + break; + + default: + break; + } +#endif /* PKT_STATICS */ +#ifdef DHD_DEBUG + if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) + tx_packets[PKTPRIO(pkt)]++; +#endif /* DHD_DEBUG */ + + /* align the data pointer, allocate a new packet if there is not enough space (new + * packet data pointer will be aligned thus no padding will be needed) + */ + head_padding = (uintptr)frame % DHD_SDALIGN; + if (PKTHEADROOM(osh, pkt) < head_padding) { + head_padding = 0; + alloc_new_pkt = TRUE; + } else { + uint cur_chain_total_len; + int chain_tail_padding = 0; + + /* All packets need to be aligned by DHD_SDALIGN */ + modulo = (pkt_len + head_padding) % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + + /* Total pkt chain length needs to be aligned by block size, + * unless it is a single pkt chain with total length less than one block size, + * which we prefer sending by byte mode. + * + * Do the chain alignment here if + * 1. This is the last pkt of the chain of multiple pkts or a single pkt. + * 2-1. This chain is of multiple pkts, or + * 2-2. This is a single pkt whose size is longer than one block size. + */ + cur_chain_total_len = prev_chain_total_len + + (head_padding + pkt_len + tail_padding); + if (last_chained_pkt && bus->blocksize != 0 && + (cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_chain_total_len % bus->blocksize; + chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } + +#ifdef DHDENABLE_TAILPAD + if (PKTTAILROOM(osh, pkt) < tail_padding) { + /* We don't have tail room to align by DHD_SDALIGN */ + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) { + /* We have tail room for tail_padding of this pkt itself, but not for + * total pkt chain alignment by block size. + * Use the padding packet to avoid memory copy if applicable, + * otherwise, just allocate a new pkt. + */ + if (bus->pad_pkt) { + *pad_pkt_len = chain_tail_padding; + bus->tx_tailpad_chain++; + } else { + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } + } else + /* This last pkt's tailroom is sufficient to hold both tail_padding + * of the pkt itself and chain_tail_padding of total pkt chain + */ +#endif /* DHDENABLE_TAILPAD */ + tail_padding += chain_tail_padding; + } + + DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n", + __FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len)); + + if (alloc_new_pkt) { + void *tmp_pkt; + int newpkt_size; + int cur_total_len; + + ASSERT(*pad_pkt_len == 0); + + DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__)); + + /* head pointer is aligned now, no padding needed */ + head_padding = 0; + + /* update the tail padding as it depends on the head padding, since a new packet is + * allocated, the head padding is non longer needed and packet length is chagned + */ + + cur_total_len = prev_chain_total_len + pkt_len; + if (last_chained_pkt && bus->blocksize != 0 && + (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_total_len % bus->blocksize; + tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } else { + modulo = pkt_len % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + } + + newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN; + bus->dhd->tx_realloc++; + tmp_pkt = PKTGET(osh, newpkt_size, TRUE); + if (tmp_pkt == NULL) { + DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size)); + return BCME_NOMEM; + } + PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN); + bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt)); + *new_pkt = tmp_pkt; + pkt = tmp_pkt; + } + + if (head_padding) + PKTPUSH(osh, pkt, head_padding); + + frame = (uint8*)PKTDATA(osh, pkt); + bzero(frame, head_padding + sdpcm_hdrlen); + pkt_len = (uint16)PKTLEN(osh, pkt); + + /* the header has the followming format + * 4-byte HW frame tag: length, ~length (for glom this is the total length) + * + * 8-byte HW extesion flags (glom mode only) as the following: + * 2-byte packet length, excluding HW tag and padding + * 2-byte frame channel and frame flags (e.g. next frame following) + * 2-byte header length + * 2-byte tail padding size + * + * 8-byte SW frame tags as the following + * 4-byte flags: host tx seq, channel, data offset + * 4-byte flags: TBD + */ + + swhdr_offset = SDPCM_FRAMETAG_LEN; + + /* hardware frame tag: + * + * in tx-glom mode, dongle only checks the hardware frame tag in the first + * packet and sees it as the total lenght of the glom (including tail padding), + * for each packet in the glom, the packet length needs to be updated, (see + * below PKTSETLEN) + * + * in non tx-glom mode, PKTLEN still need to include tail padding as to be + * referred to in sdioh_request_buffer(). The tail length will be excluded in + * dhdsdio_txpkt_postprocess(). + */ +#if defined(BCMSDIOH_TXGLOM_EXT) + if (bus->dhd->conf->txglom_bucket_size) + tail_padding = 0; +#endif + *(uint16*)frame = (uint16)htol16(pkt_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len); + pkt_len += tail_padding; + + /* hardware extesion flags */ + if (bus->txglom_enable) { + uint32 hwheader1; + uint32 hwheader2; +#ifdef BCMSDIOH_TXGLOM_EXT + uint32 act_len = pkt_len - tail_padding; + uint32 real_pad = 0; + if(bus->dhd->conf->txglom_ext && !last_chained_pkt) { + tail_padding = 0; + if(first_frame == 0) { + // first pkt, add pad to bucket size - recv offset + pkt_len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET; + } else { + // add pad to bucket size + pkt_len = bus->dhd->conf->txglom_bucket_size; + } + swhdr_offset += SDPCM_HWEXT_LEN; + hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (last_chained_pkt << 24); + hwheader2 = (pkt_len - act_len) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + real_pad = pkt_len - act_len; + + if (PKTTAILROOM(osh, pkt) < real_pad) { + DHD_INFO(("%s : insufficient tailroom %d for %d real_pad\n", + __func__, (int)PKTTAILROOM(osh, pkt), real_pad)); + if (PKTPADTAILROOM(osh, pkt, real_pad)) { + DHD_ERROR(("CHK1: padding error size %d\n", real_pad)); + } else + frame = (uint8 *)PKTDATA(osh, pkt); + } + } else +#endif + { + swhdr_offset += SDPCM_HWEXT_LEN; + hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) | + (last_chained_pkt << 24); + hwheader2 = (tail_padding) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + } + } + PKTSETLEN((osh), (pkt), (pkt_len)); + + /* software frame tags */ + swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | (txseq % SDPCM_SEQUENCE_WRAP) | + (((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + swhdr_offset); + htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader)); + + return pkt_len; +} + +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt) +{ + osl_t *osh; + uint8 *frame; + int data_offset; + int tail_padding; + int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0); + + (void)osh; + osh = bus->dhd->osh; + + /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */ + frame = (uint8*)PKTDATA(osh, pkt); + + DHD_INFO(("%s PKTLEN before postprocess %d", + __FUNCTION__, PKTLEN(osh, pkt))); + + /* PKTLEN still includes tail_padding, so exclude it. + * We shall have head_padding + original pkt_len for PKTLEN afterwards. + */ + if (bus->txglom_enable) { + /* txglom pkts have tail_padding length in HW ext header */ + tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16; + PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding); + DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n", + tail_padding, PKTLEN(osh, pkt))); + } else { + /* non-txglom pkts have head_padding + original pkt length in HW frame tag. + * We cannot refer to this field for txglom pkts as the first pkt of the chain will + * have the field for the total length of the chain. + */ + PKTSETLEN(osh, pkt, *(uint16*)frame); + DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n", + *(uint16*)frame, PKTLEN(osh, pkt))); + } + + data_offset = ltoh32_ua(frame + swhdr_offset); + data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT; + /* Get rid of sdpcm header + head_padding */ + PKTPULL(osh, pkt, data_offset); + + DHD_INFO(("%s data_offset %d, PKTLEN %d\n", + __FUNCTION__, data_offset, PKTLEN(osh, pkt))); + + return BCME_OK; +} + +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt) +{ + int i; + int ret = 0; + osl_t *osh; + bcmsdh_info_t *sdh; + void *pkt = NULL; + void *pkt_chain; + int total_len = 0; + void *head_pkt = NULL; + void *prev_pkt = NULL; + int pad_pkt_len = 0; + int new_pkt_num = 0; + void *new_pkts[MAX_TX_PKTCHAIN_CNT]; + bool wlfc_enabled = FALSE; + + if (bus->dhd->dongle_reset) + return BCME_NOTREADY; + + if (num_pkt <= 0) + return BCME_BADARG; + + sdh = bus->sdh; + osh = bus->dhd->osh; + /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */ + new_pkts[0] = NULL; + + for (i = 0; i < num_pkt; i++) { + int pkt_len; + bool last_pkt; + void *new_pkt = NULL; + + pkt = pkts[i]; + ASSERT(pkt); + last_pkt = (i == num_pkt - 1); + pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i, + total_len, last_pkt, &pad_pkt_len, &new_pkt +#if defined(BCMSDIOH_TXGLOM_EXT) + , i +#endif + ); + if (pkt_len <= 0) + goto done; + if (new_pkt) { + pkt = new_pkt; + new_pkts[new_pkt_num++] = new_pkt; + } + total_len += pkt_len; + + PKTSETNEXT(osh, pkt, NULL); + /* insert the packet into the list */ + head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt); + prev_pkt = pkt; + + } + + /* Update the HW frame tag (total length) in the first pkt of the glom */ + if (bus->txglom_enable) { + uint8 *frame; + + total_len += pad_pkt_len; + frame = (uint8*)PKTDATA(osh, head_pkt); + *(uint16*)frame = (uint16)htol16(total_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~total_len); + + } + +#ifdef DHDENABLE_TAILPAD + /* if a padding packet if needed, insert it to the end of the link list */ + if (pad_pkt_len) { + PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len); + PKTSETNEXT(osh, pkt, bus->pad_pkt); + } +#endif /* DHDENABLE_TAILPAD */ + + /* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet + * parameter is not NULL, for non packet chian we pass NULL pkt pointer + * so it will take the aligned length and buffer pointer. + */ + pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL; +#ifdef TPUT_MONITOR + if ((bus->dhd->conf->data_drop_mode == TXPKT_DROP) && (total_len > 500)) + ret = BCME_OK; + else +#endif + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP; + + /* if a padding packet was needed, remove it from the link list as it not a data pkt */ + if (pad_pkt_len && pkt) + PKTSETNEXT(osh, pkt, NULL); + +done: + pkt = head_pkt; + while (pkt) { + void *pkt_next = PKTNEXT(osh, pkt); + PKTSETNEXT(osh, pkt, NULL); + dhdsdio_txpkt_postprocess(bus, pkt); + pkt = pkt_next; + } + + /* new packets might be allocated due to insufficient room for padding, but we + * still have to indicate the original packets to upper layer + */ + for (i = 0; i < num_pkt; i++) { + pkt = pkts[i]; + wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) { + wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) != + WLFC_UNSUPPORTED); + } +#endif /* PROP_TXSTATUS */ + if (!wlfc_enabled) { + PKTSETNEXT(osh, pkt, NULL); + dhd_txcomplete(bus->dhd, pkt, ret != 0); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + } + } + + for (i = 0; i < new_pkt_num; i++) + PKTFREE(osh, new_pkts[i], TRUE); + + return ret; +} + +static uint +dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) +{ + uint cnt = 0; + uint8 tx_prec_map; + uint16 txpktqlen = 0; + uint32 intstatus = 0; + uint retries = 0; + osl_t *osh; + dhd_pub_t *dhd = bus->dhd; + sdpcmd_regs_t *regs = bus->regs; +#if defined(DHD_LOSSLESS_ROAMING) || defined(DHD_PKTDUMP_TOFW) + uint8 *pktdata; + struct ether_header *eh; +#ifdef BDC + struct bdc_header *bdc_header; + uint8 data_offset; +#endif +#endif /* DHD_LOSSLESS_ROAMING */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + osh = dhd->osh; + tx_prec_map = ~bus->flowcontrol; +#ifdef DHD_LOSSLESS_ROAMING + tx_prec_map &= dhd->dequeue_prec_map; +#endif /* DHD_LOSSLESS_ROAMING */ + for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) { + int i; + int num_pkt = 1; + void *pkts[MAX_TX_PKTCHAIN_CNT]; + int prec_out; + uint datalen = 0; + + dhd_os_sdlock_txq(bus->dhd); + if (bus->txglom_enable) { + uint32 glomlimit = (uint32)bus->txglomsize; +#if defined(BCMSDIOH_STD) + if (bus->blocksize == 64) { + glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM); + } +#endif /* BCMSDIOH_STD */ + num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit); + num_pkt = MIN(num_pkt, ARRAYSIZE(pkts)); + } + num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map)); + for (i = 0; i < num_pkt; i++) { + pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out); + if (!pkts[i]) { + DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n", + __FUNCTION__)); + ASSERT(0); + break; + } +#if defined(DHD_LOSSLESS_ROAMING) || defined(DHD_PKTDUMP_TOFW) + pktdata = (uint8 *)PKTDATA(osh, pkts[i]); +#ifdef BDC + /* Skip BDC header */ + bdc_header = (struct bdc_header *)pktdata; + data_offset = bdc_header->dataOffset; + pktdata += BDC_HEADER_LEN + (data_offset << 2); +#endif + eh = (struct ether_header *)pktdata; +#ifdef DHD_LOSSLESS_ROAMING + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(pkts[i]); + + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(pkts[i], dhd->prio_8021x); +#ifdef BDC + /* Restore to original priority in BDC header */ + bdc_header->priority = + (dhd->prio_8021x & BDC_PRIORITY_MASK); +#endif + } + } +#endif /* DHD_LOSSLESS_ROAMING */ +#ifdef DHD_PKTDUMP_TOFW + dhd_dump_pkt(bus->dhd, BDC_GET_IF_IDX(bdc_header), pktdata, + (uint32)PKTLEN(bus->dhd->osh, pkts[i]), TRUE, NULL, NULL); +#endif +#endif /* DHD_LOSSLESS_ROAMING || DHD_8021X_DUMP */ + if (!bus->dhd->conf->orphan_move) + PKTORPHAN(pkts[i], bus->dhd->conf->tsq); + datalen += PKTLEN(osh, pkts[i]); + } + dhd_os_sdunlock_txq(bus->dhd); + + if (i == 0) + break; + if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK) + dhd->tx_errors++; + else { + dhd->dstats.tx_bytes += datalen; + bus->txglomframes++; + bus->txglompkts += num_pkt; +#ifdef PKT_STATICS + bus->tx_statics.glom_cnt_us[num_pkt-1] = + (bus->tx_statics.glom_cnt[num_pkt-1]*bus->tx_statics.glom_cnt_us[num_pkt-1] + + bcmsdh_get_spend_time(bus->sdh))/(bus->tx_statics.glom_cnt[num_pkt-1] + 1); +#endif + } + cnt += i; +#ifdef PKT_STATICS + if (num_pkt) { + bus->tx_statics.glom_cnt[num_pkt-1]++; + if (num_pkt > bus->tx_statics.glom_max) + bus->tx_statics.glom_max = num_pkt; + } +#endif + + /* In poll mode, need to check for other events */ + if (!bus->intr && cnt) + { + /* Check device status, signal pending interrupt */ + R_SDREG(intstatus, ®s->intstatus, retries); + bus->f2txdata++; + if (bcmsdh_regfail(bus->sdh)) + break; + if (intstatus & bus->hostintmask) + bus->ipend = TRUE; + } + + } + + if (dhd_doflow) { + dhd_os_sdlock_txq(bus->dhd); + txpktqlen = pktq_n_pkts_tot(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + } + + /* Do flow-control if needed */ + if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled && dhd_doflow && dhd->txoff) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + + return cnt; +} + +static void +dhdsdio_sendpendctl(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + int ret; + uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN; + + if (bus->txglom_enable) + frame_seq += SDPCM_HWEXT_LEN; + + if (*frame_seq != bus->tx_seq) { + DHD_INFO(("%s IOCTL frame seq lag detected!" + " frm_seq:%d != bus->tx_seq:%d, corrected\n", + __FUNCTION__, *frame_seq, bus->tx_seq)); + *frame_seq = bus->tx_seq; + } + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, + NULL, NULL, NULL, 1); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + bus->ctrl_frame_stat = FALSE; + dhd_wait_event_wakeup(bus->dhd); +} + +int +dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + static int err_nodevice = 0; + uint8 *frame; + uint16 len; + uint32 swheader; + uint8 doff = 0; + int ret = -1; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Back the pointer to make a room for bus header */ + frame = msg - sdpcm_hdrlen; + len = (msglen += sdpcm_hdrlen); + + /* Add alignment padding (optional for ctl frames) */ + if (dhd_alignctl) { + if ((doff = ((uintptr)frame % DHD_SDALIGN))) { + frame -= doff; + len += doff; + msglen += doff; + bzero(frame, doff + sdpcm_hdrlen); + } + ASSERT(doff < DHD_SDALIGN); + } + doff += sdpcm_hdrlen; + +#ifndef BCMSPI + /* Round send length to next SDIO block */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } +#endif /* BCMSPI */ + + /* Satisfy length-alignment requirements */ + if (forcealign && (len & (ALIGNMENT - 1))) + len = ROUNDUP(len, ALIGNMENT); + + ASSERT(ISALIGNED((uintptr)frame, 2)); + + /* Need to lock here to protect txseq and SDIO tx calls */ + dhd_os_sdlock(bus->dhd); + if (bus->dhd->conf->txctl_tmo_fix > 0 && !TXCTLOK(bus)) { + bus->ctrl_wait = TRUE; + dhd_os_sdunlock(bus->dhd); + wait_event_interruptible_timeout(bus->ctrl_tx_wait, TXCTLOK(bus), + msecs_to_jiffies(bus->dhd->conf->txctl_tmo_fix)); + dhd_os_sdlock(bus->dhd); + bus->ctrl_wait = FALSE; + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + *(uint16*)frame = htol16((uint16)msglen); + *(((uint16*)frame) + 1) = htol16(~msglen); + + if (bus->txglom_enable) { + uint32 hwheader1, hwheader2; + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq + | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + + SDPCM_HWEXT_LEN + sizeof(swheader)); + + hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24); + hwheader2 = (len - (msglen)) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + + *(uint16*)frame = htol16(len); + *(((uint16*)frame) + 1) = htol16(~(len)); + } else { + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + } + + if (!TXCTLOK(bus)) + { + DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq)); + bus->ctrl_frame_stat = TRUE; + /* Send from dpc */ + bus->ctrl_frame_buf = frame; + bus->ctrl_frame_len = len; + +#if defined(NDIS) + dhd_os_sdunlock(bus->dhd); + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + dhd_os_sdlock(bus->dhd); +#else + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if (bus->ctrl_frame_stat) { + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + } +#endif /* NDIS */ + + if (bus->ctrl_frame_stat == FALSE) { + DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); + ret = 0; + } else { + bus->dhd->txcnt_timeout++; + if (!bus->dhd->hang_was_sent) { +#ifdef CUSTOMER_HW4_DEBUG + /* XXX Add Debug code for find root cause from CSP:565333 */ + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n", + __FUNCTION__, status)); + DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n", + __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate)); +#endif /* CUSTOMER_HW4_DEBUG */ + DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", + __FUNCTION__, bus->dhd->txcnt_timeout)); +#ifdef BCMSDIO_RXLIM_POST + DHD_ERROR(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n", + __FUNCTION__, + bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr)); +#endif /* BCMSDIO_RXLIM_POST */ + } +#ifdef DHD_FW_COREDUMP + /* Collect socram dump */ + if ((bus->dhd->memdump_enabled) && + (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)) { + /* collect core dump */ + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX; + dhd_os_sdunlock(bus->dhd); + dhd_bus_mem_dump(bus->dhd); + dhd_os_sdlock(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + ret = -1; + bus->ctrl_frame_stat = FALSE; + goto done; + } + } + + bus->dhd->txcnt_timeout = 0; + bus->ctrl_frame_stat = TRUE; + + if (ret == -1) { +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif +#ifdef PKT_STATICS + bus->tx_statics.ctrl_count++; + bus->tx_statics.ctrl_size += len; +#endif + ret = dhd_bcmsdh_send_buffer(bus, frame, len); + } + bus->ctrl_frame_stat = FALSE; + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + + /* XXX Need to validate return code (ranges) */ + if (ret) + bus->dhd->tx_ctlerrs++; + else + bus->dhd->tx_ctlpkts++; + + if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + return -BCME_ERROR; + } else { + return -ETIMEDOUT; + } +#else + return -ETIMEDOUT; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + } + if (ret == BCME_NODEVICE) + err_nodevice++; + else + err_nodevice = 0; + + return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0; +} + +int +dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + static uint cnt = 0; + uint max_rxcnt; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + + if (bus->dhd->conf->ctrl_resched > 0 && !rxlen && timeleft == 0) { + cnt++; + if (cnt <= bus->dhd->conf->ctrl_resched) { + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + if ((status & I_HMB_HOST_INT) || PKT_AVAILABLE(bus, status)) { + DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, status=0x%x\n", + __FUNCTION__, cnt, status)); + bus->ipend = TRUE; + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + } + } + } else { + cnt = 0; + } + + if (rxlen) { + DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", + __FUNCTION__, rxlen, msglen)); + } else { + if (timeleft == 0) { +#ifdef DHD_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n", + __FUNCTION__, status)); +#else + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#endif /* DHD_DEBUG */ + if (!bus->dhd->dongle_trap_occured) { +#ifdef DHD_FW_COREDUMP + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; +#endif /* DHD_FW_COREDUMP */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); + if (!bus->dhd->dongle_trap_occured) { +#ifdef DHD_FW_COREDUMP + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_UNKNOWN; +#endif /* DHD_FW_COREDUMP */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + } +#ifdef DHD_FW_COREDUMP + /* Dump the ram image */ + if (bus->dhd->memdump_enabled && !bus->dhd->dongle_trap_occured) + dhdsdio_mem_dump(bus); +#endif /* DHD_FW_COREDUMP */ + } + if (timeleft == 0) { + if (rxlen == 0) + bus->dhd->rxcnt_timeout++; + DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__, + bus->dhd->rxcnt_timeout, rxlen)); +#ifdef DHD_FW_COREDUMP + /* collect socram dump */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_RX; + dhd_bus_mem_dump(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + } else { + bus->dhd->rxcnt_timeout = 0; + } + + if (rxlen) + bus->dhd->rx_ctlpkts++; + else + bus->dhd->rx_ctlerrs++; + + if (bus->dhd->conf->rxcnt_timeout) + max_rxcnt = bus->dhd->conf->rxcnt_timeout; + else + max_rxcnt = MAX_CNTL_RX_TIMEOUT; + if (bus->dhd->rxcnt_timeout >= max_rxcnt) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + return -BCME_ERROR; + } else { + return -ETIMEDOUT; + } +#else + return -ETIMEDOUT; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + } + if (bus->dhd->dongle_trap_occured) + return -EREMOTEIO; + + return rxlen ? (int)rxlen : -EIO; /* XXX Returns EIO error */ +} + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_POLLRATE, + IOV_SDREG, + IOV_SBREG, + IOV_SDCIS, +#ifdef DHD_BUS_MEM_ACCESS + IOV_MEMBYTES, +#endif /* DHD_BUS_MEM_ACCESS */ + IOV_RAMSIZE, + IOV_RAMSTART, +#ifdef DHD_DEBUG + IOV_CHECKDIED, + IOV_SERIALCONS, +#endif /* DHD_DEBUG */ + IOV_SET_DOWNLOAD_STATE, + IOV_SOCRAM_STATE, + IOV_FORCEEVEN, + IOV_SDIOD_DRIVE, + IOV_READAHEAD, + IOV_SDRXCHAIN, + IOV_ALIGNCTL, + IOV_SDALIGN, + IOV_DEVRESET, + IOV_CPU, +#if defined(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL) + IOV_WATERMARK, + IOV_MESBUSYCTRL, +#endif /* USE_SDIOFIFO_IOVAR */ +#ifdef BCMINTERNAL + IOV_SDRESET, + IOV_SDABORT, + IOV_FIRSTREAD, + IOV_TSTOPH, + IOV_RETRYDATA, + IOV_CHECKFIFO, + IOV_DOFLOW, + IOV_SDF2, + IOV_CLOCKPOLL, + IOV_MAXRAMSIZE, + IOV_SIALL, +#endif /* BCMINTERNAL */ +#ifdef SDTEST + IOV_PKTGEN, + IOV_EXTLOOP, +#endif /* SDTEST */ + IOV_SPROM, + IOV_TXBOUND, + IOV_RXBOUND, + IOV_TXMINMAX, + IOV_IDLETIME, + IOV_IDLECLOCK, + IOV_SD1IDLE, + IOV_SLEEP, + IOV_DONGLEISOLATION, + IOV_KSO, + IOV_DEVSLEEP, + IOV_DEVCAP, + IOV_VARS, +#ifdef SOFTAP + IOV_FWPATH, +#endif + IOV_TXGLOMSIZE, + IOV_TXGLOMMODE, + IOV_HANGREPORT, + IOV_TXINRX_THRES, + IOV_SDIO_SUSPEND +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + IOV_GDB_SERVER, /**< starts gdb server on given interface */ +#endif /* DEBUGGER || DHD_DSCOPE */ +}; + +const bcm_iovar_t dhdsdio_iovars[] = { + {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, 0, IOVT_BOOL, 0 }, +#ifdef DHD_BUS_MEM_ACCESS + {"membytes", IOV_MEMBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int) }, +#endif /* DHD_BUS_MEM_ACCESS */ + {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, + {"socram_state", IOV_SOCRAM_STATE, 0, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"sdreg", IOV_SDREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"checkdied", IOV_CHECKDIED, 0, 0, IOVT_BUFFER, 0 }, + {"serial", IOV_SERIALCONS, 0, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ +#ifdef BCMINTERNAL + {"siregall", IOV_SIALL, 0, 0, IOVT_UINT32, 0 }, +#endif /* BCMINTERNAL */ +#endif /* DHD_DEBUG */ +#if defined(BCMINTERNAL) || defined(DHD_SPROM) + {"sprom", IOV_SPROM, 0, 0, IOVT_BUFFER, 2 * sizeof(int) }, +#endif /* BCMINTERNAL || DHD_SPROM */ +#ifdef SDTEST + {"extloop", IOV_EXTLOOP, 0, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, +#endif /* SDTEST */ +#if defined(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL) + {"watermark", IOV_WATERMARK, 0, 0, IOVT_UINT32, 0 }, + {"mesbusyctrl", IOV_MESBUSYCTRL, 0, 0, IOVT_UINT32, 0 }, +#endif /* USE_SDIOFIFO_IOVAR */ +#ifdef BCMINTERNAL + {"firstread", IOV_FIRSTREAD, 0, 0, IOVT_UINT32, 0 }, /* INTERNAL */ + {"tstoph", IOV_TSTOPH, 0, 0, IOVT_BOOL, 0 }, + {"retrydata", IOV_RETRYDATA, 0, 0, IOVT_BOOL, 0 }, + {"checkfifo", IOV_CHECKFIFO, 0, 0, IOVT_BOOL, 0 }, + {"sdf2", IOV_SDF2, 0, 0, IOVT_UINT32, 0 }, + {"sdreset", IOV_SDRESET, 0, 0, IOVT_VOID, 0 }, + {"sdabort", IOV_SDABORT, 0, 0, IOVT_UINT32, 0 }, + {"doflow", IOV_DOFLOW, 0, 0, IOVT_BOOL, 0 }, + {"clockpoll", IOV_CLOCKPOLL, 0, 0, IOVT_BOOL, 0 }, + {"maxsocram", IOV_MAXRAMSIZE, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_DEBUG + {"serial", IOV_SERIALCONS, 0, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ +#endif /* BCMINTERNAL */ + {"devcap", IOV_DEVCAP, 0, 0, IOVT_UINT32, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, + {"kso", IOV_KSO, 0, 0, IOVT_UINT32, 0 }, + {"devsleep", IOV_DEVSLEEP, 0, 0, IOVT_UINT32, 0 }, +#ifdef SOFTAP + {"fwpath", IOV_FWPATH, 0, 0, IOVT_BUFFER, 0 }, +#endif + {"txglomsize", IOV_TXGLOMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, + {"txinrx_thres", IOV_TXINRX_THRES, 0, 0, IOVT_INT32, 0 }, + {"sdio_suspend", IOV_SDIO_SUSPEND, 0, 0, IOVT_UINT32, 0 }, +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 }, +#endif /* DEBUGGER || DHD_DSCOPE */ + {NULL, 0, 0, 0, 0, 0 } +}; + +static void +dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div) +{ + uint q1, q2; + + if (!div) { + bcm_bprintf(strbuf, "%s N/A", desc); + } else { + q1 = num / div; + q2 = (100 * (num - (q1 * div))) / div; + bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2); + } +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus = dhdp->bus; +#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKE_EVENT_STATUS) + int i; +#endif + + bcm_bprintf(strbuf, "Bus SDIO structure:\n"); + bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", + bus->hostintmask, bus->intstatus, bus->sdpcm_ver); + bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n", + bus->fcstate, pktq_n_pkts_tot(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip, + bus->rxlen, bus->rx_seq); + bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n", + bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + +#ifdef DHD_WAKE_STATUS + bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", + bcmsdh_get_total_wake(bus->sdh), bus->wake_counts.rxwake, + bus->wake_counts.rcwake); +#ifdef DHD_WAKE_RX_STATUS + bcm_bprintf(strbuf, " unicast %u multicast %u broadcast %u arp %u\n", + bus->wake_counts.rx_ucast, bus->wake_counts.rx_mcast, + bus->wake_counts.rx_bcast, bus->wake_counts.rx_arp); + bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", + bus->wake_counts.rx_multi_ipv4, bus->wake_counts.rx_multi_ipv6, + bus->wake_counts.rx_icmpv6, bus->wake_counts.rx_multi_other); + bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", + bus->wake_counts.rx_icmpv6_ra, bus->wake_counts.rx_icmpv6_na, + bus->wake_counts.rx_icmpv6_ns); +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + for (i = 0; i < WLC_E_LAST; i++) + if (bus->wake_counts.rc_event[i] != 0) + bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(i), + bus->wake_counts.rc_event[i]); + bcm_bprintf(strbuf, "\n"); +#endif /* DHD_WAKE_EVENT_STATUS */ +#endif /* DHD_WAKE_STATUS */ + + bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n", + bus->pollrate, bus->pollcnt, bus->regfails); + + bcm_bprintf(strbuf, "\nAdditional counters:\n"); +#ifdef DHDENABLE_TAILPAD + bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n", + bus->tx_tailpad_chain, bus->tx_tailpad_pktget); +#endif /* DHDENABLE_TAILPAD */ + bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n", + bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong, + bus->rxc_errors); + bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n", + bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq); + bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n", + bus->fc_rcvd, bus->fc_xoff, bus->fc_xon); + bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n", + bus->rxglomfail, bus->rxglomframes, bus->rxglompkts); + bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n", + (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata, + bus->f2txdata, bus->f1regdata); + { + dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts), + bus->dhd->rx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets, + (bus->f2txdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Total: pkts/f2rw", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount); + bcm_bprintf(strbuf, "\n\n"); + } + +#ifdef SDTEST + /* XXX Add new stats, include pktq len */ + if (bus->pktgen_count) { + bcm_bprintf(strbuf, "pktgen config and count:\n"); + bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n", + bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print, + bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen); + bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n", + bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + } +#endif /* SDTEST */ +#ifdef DHD_DEBUG + bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n", + bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not ")); + bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup); +#endif /* DHD_DEBUG */ + bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n", + bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping); +#ifdef BCMINTERNAL + bcm_bprintf(strbuf, "tx_deferred %d, fc 0x%x\n", bus->tx_deferred, bus->flowcontrol); +#ifdef DHD_DEBUG + { + int i; + bcm_bprintf(strbuf, "qcount: "); + for (i = 0; i < 8; i++) + bcm_bprintf(strbuf, " %d , ", qcount[i]); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "tx_packets: "); + for (i = 0; i < 8; i++) + bcm_bprintf(strbuf, " %d , ", tx_packets[i]); + bcm_bprintf(strbuf, "\n"); + } +#endif /* DHD_DEBUG */ +#endif /* BCMINTERNAL */ + dhd_dump_pct(strbuf, "Tx: glom pct", (100 * bus->txglompkts), bus->dhd->tx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->txglompkts, bus->txglomframes); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "txglomframes %u, txglompkts %u\n", bus->txglomframes, bus->txglompkts); + bcm_bprintf(strbuf, "\n"); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0; + bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0; + bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0; +#ifdef DHDENABLE_TAILPAD + bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0; +#endif /* DHDENABLE_TAILPAD */ + bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0; + bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0; + bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0; +#ifdef BCMINTERNAL + bus->tx_deferred = bus->flowcontrol = 0; +#endif + bus->txglomframes = bus->txglompkts = 0; +} + +#ifdef SDTEST +static int +dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + + pktgen.version = DHD_PKTGEN_VERSION; + pktgen.freq = bus->pktgen_freq; + pktgen.count = bus->pktgen_count; + pktgen.print = bus->pktgen_print; + pktgen.total = bus->pktgen_total; + pktgen.minlen = bus->pktgen_minlen; + pktgen.maxlen = bus->pktgen_maxlen; + pktgen.numsent = bus->pktgen_sent; + pktgen.numrcvd = bus->pktgen_rcvd; + pktgen.numfail = bus->pktgen_fail; + pktgen.mode = bus->pktgen_mode; + pktgen.stop = bus->pktgen_stop; + + bcopy(&pktgen, arg, sizeof(pktgen)); + + return 0; +} + +static int +dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + uint oldcnt, oldmode; + + bcopy(arg, &pktgen, sizeof(pktgen)); + if (pktgen.version != DHD_PKTGEN_VERSION) + return BCME_BADARG; + + oldcnt = bus->pktgen_count; + oldmode = bus->pktgen_mode; + + bus->pktgen_freq = pktgen.freq; + bus->pktgen_count = pktgen.count; + bus->pktgen_print = pktgen.print; + bus->pktgen_total = pktgen.total; + bus->pktgen_minlen = pktgen.minlen; + bus->pktgen_maxlen = pktgen.maxlen; + bus->pktgen_mode = pktgen.mode; + bus->pktgen_stop = pktgen.stop; + + bus->pktgen_tick = bus->pktgen_ptick = 0; +#if defined(LINUX) + bus->pktgen_prev_time = jiffies; +#endif /* LINUX */ + bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen); + bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen); + + /* Clear counts for a new pktgen (mode change, or was stopped) */ + if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) { + bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0; + bus->pktgen_prev_rcvd = bus->pktgen_fail = 0; + } + + return 0; +} +#endif /* SDTEST */ + +static int +dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size) +{ + int bcmerror = 0; + uint32 sdaddr; + uint dsize; + uint8 *pdata; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) { + address -= bus->orig_ramsize; + address += SOCDEVRAM_BP_ADDR; + } + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + /* Set the backplane window to include the start address */ + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + goto xfer_done; + } + + /* Do the transfer(s) */ + while (size) { + DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n", + __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr, + (address & SBSDIO_SBWINDOW_MASK))); + if (dsize <= MAX_MEM_BUF) { + pdata = bus->membuf; + if (write) + memcpy(bus->membuf, data, dsize); + } else { + pdata = data; + } + if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, pdata, dsize))) { + DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__)); + break; + } + if (dsize <= MAX_MEM_BUF && !write) + memcpy(data, bus->membuf, dsize); + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + break; + } + sdaddr = 0; + dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + + } + +xfer_done: + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) { + DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__, + bcmsdh_cur_sbwad(bus->sdh))); + } + + return bcmerror; +} + +static int +dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) +{ + uint32 addr; + int rv, i; + uint32 shaddr = 0; + + if (bus->sih == NULL) { + if (bus->dhd && bus->dhd->dongle_reset) { + DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__)); + return BCME_NOTREADY; + } else { + ASSERT(bus->dhd); + ASSERT(bus->sih); + DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__)); + return BCME_ERROR; + } + } + /* + * If SR is not implemented in 43430 FW we should not adjust shaddr + * XXX Should be REMOVED after SR will be implemented in 43430 FW + */ + if ((CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) && !dhdsdio_sr_cap(bus)) + bus->srmemsize = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + i = 0; + do { + /* Read last word in memory to determine address of sdpcm_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0) + return rv; + + addr = ltoh32(addr); + + DHD_INFO(("sdpcm_shared address 0x%08X\n", addr)); + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) { + if ((bus->srmemsize > 0) && (i++ == 0)) { + shaddr -= bus->srmemsize; + } else { + DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", + __FUNCTION__, addr)); + return BCME_ERROR; + } + } else + break; + } while (i < 2); + + /* Read hndrte_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0) + return rv; + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + +#ifdef BCMSDIO_RXLIM_POST + if (sh->flags & SDPCM_SHARED_RXLIM_POST) { + if (bus->dhd->conf->rxlim_en) + bus->rxlim_en = !!sh->msgtrace_addr; + bus->rxlim_addr = sh->msgtrace_addr; + DHD_INFO(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n", + __FUNCTION__, + bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr)); + sh->flags &= ~SDPCM_SHARED_RXLIM_POST; + } else { + bus->rxlim_en = 0; + DHD_INFO(("%s: FW has no rx limit post support\n", __FUNCTION__)); + } +#endif /* BCMSDIO_RXLIM_POST */ + +#ifdef BCMSDIO_TXSEQ_SYNC + if (bus->dhd->conf->txseq_sync) { + sh->txseq_sync_addr = ltoh32(sh->txseq_sync_addr); + if (sh->flags & SDPCM_SHARED_TXSEQ_SYNC) { + uint8 val = 0; + DHD_INFO(("%s: TXSEQ_SYNC enabled in fw\n", __FUNCTION__)); + if (0 == dhdsdio_membytes(bus, FALSE, sh->txseq_sync_addr, (uint8 *)&val, 1)) { + if (bus->tx_seq != val) { + DHD_INFO(("%s: Sync tx_seq from %d to %d\n", + __FUNCTION__, bus->tx_seq, val)); + bus->tx_seq = val; + bus->tx_max = bus->tx_seq + 4; + } + } + sh->flags &= ~SDPCM_SHARED_TXSEQ_SYNC; + } else { + bus->dhd->conf->txseq_sync = FALSE; + } + } +#endif /* BCMSDIO_TXSEQ_SYNC */ + + /* + * XXX - Allow a sdpcm_shared_t version mismatch between dhd structure + * version 1 and firmware structure version 3. + * The sdpcm_shared_t stucture fields used in this function are in the + * same positions in these two structure versions. + * For some chips in the FALCON release, the dhd driver is from the + * FALCON branch (sdpcm_shared_t structure version 1) and the firmware + * comes from the ROMTERM3 branch (sdpcm_shared_t structure version 1). + */ + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) + return BCME_OK; + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { + DHD_ERROR(("%s: sdpcm_shared version %d in dhd " + "is different than sdpcm_shared version %d in dongle\n", + __FUNCTION__, SDPCM_SHARED_VERSION, + sh->flags & SDPCM_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#define CONSOLE_LINE_MAX 192 + +#ifdef DHD_DEBUG +static int +dhdsdio_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + if (!KSO_ENAB(bus)) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + /* xxx this could optimize and read only the portion of the buffer needed, but + * it would also have to handle wrap-around. + */ + addr = ltoh32(c->log.buf); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); +#ifdef LOG_INTO_TCPDUMP + dhd_sendup_log(bus->dhd, line, n); +#endif /* LOG_INTO_TCPDUMP */ + } + } +break2: + + return BCME_OK; +} +#endif /* DHD_DEBUG */ + +static int +dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + sdpcm_shared_t l_sdpcm_shared; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) + return 0; + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdsdio_readshared(bus, &l_sdpcm_shared)) < 0) + goto done; + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + l_sdpcm_shared.msgtrace_addr, l_sdpcm_shared.console_addr); + + if ((l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (l_sdpcm_shared.assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + l_sdpcm_shared.assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (l_sdpcm_shared.assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + l_sdpcm_shared.assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", l_sdpcm_shared.assert_line); + } + + if (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + trap_t *tr = &bus->dhd->last_trap_info; + bus->dhd->dongle_trap_occured = TRUE; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + l_sdpcm_shared.trap_addr, + (uint8*)tr, sizeof(trap_t))) < 0) + goto done; + + bus->dongle_trap_addr = ltoh32(l_sdpcm_shared.trap_addr); + + dhd_bus_dump_trap_info(bus, &strbuf); + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + goto printbuf; + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) + goto printbuf; + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) + goto printbuf; + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + goto printbuf; + + if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) + goto printbuf; + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + if (dhd_msg_level & DHD_ERROR_VAL) + printf("CONSOLE: %s\n", line); + } + } + } + } + +printbuf: + if (l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + } + +#if defined(DHD_FW_COREDUMP) + if (bus->dhd->memdump_enabled && (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP)) { + /* Mem dump to a file on device */ + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + /* xxx this sdunlock has been put as a WAR here. We tried to come up + * with a better solution but with the current structure of sdlocks it is very + * unlikely to have a better fix for now. The better Rearch of sdio bus + * locking has been put up as a cleanup activity and a thorough + * code walkthrough is needed. + */ + dhd_os_sdunlock(bus->dhd); + dhdsdio_mem_dump(bus); + dhd_os_sdlock(bus->dhd); +#ifdef NDIS + /* Windows would like to crash and collect memory dump for analysis */ + ASSERT(0 && "Dongle firmware died."); + + /* For free drivers ASSERT will not bugcheck */ + KeBugCheckEx(__LINE__, 0, 0, 0, 0); +#endif + } +#endif /* #if defined(DHD_FW_COREDUMP) */ + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} + +#if defined(DHD_FW_COREDUMP) +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); + return 0; + } + return dhdsdio_mem_dump(bus); +} + +int +dhd_bus_get_mem_dump(dhd_pub_t *dhdp) +{ + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + return dhdsdio_get_mem_dump(dhdp->bus); +} + +static int +dhdsdio_get_mem_dump(dhd_bus_t *bus) +{ + int ret = BCME_ERROR; + int size = bus->ramsize; /* Full mem size */ + uint32 start = bus->dongle_ram_base; /* Start address */ + uint read_size = 0; /* Read size of each iteration */ + uint8 *p_buf = NULL, *databuf = NULL; + + /* Get full mem size */ + p_buf = dhd_get_fwdump_buf(bus->dhd, size); + if (!p_buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", + __FUNCTION__, size)); + return BCME_ERROR; + } + + dhd_os_sdlock(bus->dhd); + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Read mem content */ + DHD_ERROR(("Dump dongle memory\n")); + databuf = p_buf; + while (size) { + read_size = MIN(MEMBLOCK, size); + ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size); + if (ret) { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); + ret = BCME_ERROR; + break; + } + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return ret; +} + +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + dhd_pub_t *dhdp; + int ret = BCME_ERROR; + + dhdp = bus->dhd; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return ret; + } + + ret = dhdsdio_get_mem_dump(bus); + if (ret) { + DHD_ERROR(("%s: failed to get mem dump, err=%d\n", + __FUNCTION__, ret)); + } else { + /* schedule a work queue to perform actual memdump. + * dhd_mem_dump() performs the job + */ + dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); + /* soc_ram free handled in dhd_{free,clear} */ + } + + return ret; +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t * bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdsdio_mem_dump(bus)); +#else + return -1; +#endif +} + +int +dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->up && + 1) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +err: + return bcmerror; +} + +#ifdef DHD_DEBUG +static int +dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) +{ + int int_val; + uint32 addr, data, uart_enab = 0; + + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data); + *bcmerror = 0; + + bcmsdh_reg_write(bus->sdh, addr, 4, 1); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + int_val = bcmsdh_reg_read(bus->sdh, data, 4); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + + if (!set) + return (int_val & uart_enab); + if (enable) + int_val |= uart_enab; + else + int_val &= ~uart_enab; + bcmsdh_reg_write(bus->sdh, data, 4, int_val); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + + return (int_val & uart_enab); +} +#endif /* BCMINTERNAL */ + +static int +dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, uint plen, void *arg, uint len, uint val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + /* Some ioctls use the bus */ + dhd_os_sdlock(bus->dhd); + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + /* + * Special handling for keepSdioOn: New SDIO Wake-up Mechanism + */ + if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) { + dhdsdio_clk_kso_iovar(bus, bool_val); + goto exit; + } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) { +#ifdef BCMINTERNAL + /* XXX: Temp for debugging devsleep */ + if (int_val == 2) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } else if (int_val == 3) { + bus->_slpauto = FALSE; + } else if (int_val == 4) { + bus->_slpauto = TRUE; + } else if (int_val == 5) { + bus->kso = TRUE; + } else if (int_val == 6) { + bus->kso = FALSE; + } else +#endif + { + dhdsdio_clk_devsleep_iovar(bus, bool_val); + if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) { + DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n", + bus->dpc_sched)); + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + } + goto exit; + } + + /* Handle sleep stuff before any clock mucking */ + if (vi->varid == IOV_SLEEP) { + if (IOV_ISSET(actionid)) { + bcmerror = dhdsdio_bussleep(bus, bool_val); + } else { + int_val = (int32)bus->sleeping; + bcopy(&int_val, arg, val_size); + } + goto exit; + } + + /* Request clock to allow SDIO accesses */ + if (!bus->dhd->dongle_reset) { + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + + switch (actionid) { + case IOV_GVAL(IOV_INTR): + int_val = (int32)bus->intr; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_INTR): + bus->intr = bool_val; + bus->intdis = FALSE; + if (bus->dhd->up) { + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + // terence 20141207: enbale intdis + bus->intdis = TRUE; + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + } + break; + + case IOV_GVAL(IOV_POLLRATE): + int_val = (int32)bus->pollrate; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POLLRATE): + bus->pollrate = (uint)int_val; + bus->poll = (bus->pollrate != 0); + break; + + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; + + case IOV_GVAL(IOV_IDLECLOCK): + int_val = (int32)bus->idleclock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLECLOCK): + bus->idleclock = int_val; + break; + + case IOV_GVAL(IOV_SD1IDLE): + int_val = (int32)sd1idle; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SD1IDLE): + sd1idle = bool_val; + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_CHECKDIED): + bcmerror = dhdsdio_checkdied(bus, arg, len); + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_BUS_MEM_ACCESS + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* + * If address is start of RAM (i.e. a downloaded image), + * store the reset instruction to be written in 0 + */ + if (set && address == bus->dongle_ram_base) { + bus->resetinstr = *(((uint32*)params) + 2); + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdsdio_membytes(bus, set, address, data, size); + + break; + } +#endif /* DHD_BUS_MEM_ACCESS */ + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_SDIOD_DRIVE): + int_val = (int32)dhd_sdiod_drive_strength; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIOD_DRIVE): + dhd_sdiod_drive_strength = int_val; + si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength); + break; + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_SOCRAM_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdsdio_downloadvars(bus, arg, len); + break; + + case IOV_GVAL(IOV_READAHEAD): + int_val = (int32)dhd_readahead; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_READAHEAD): + if (bool_val && !dhd_readahead) + bus->nextlen = 0; + dhd_readahead = bool_val; + break; + + case IOV_GVAL(IOV_SDRXCHAIN): + int_val = (int32)bus->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDRXCHAIN): + if (bool_val && !bus->sd_rxchain) + bcmerror = BCME_UNSUPPORTED; + else + bus->use_rxchain = bool_val; + break; +#ifndef BCMSPI + case IOV_GVAL(IOV_ALIGNCTL): + int_val = (int32)dhd_alignctl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ALIGNCTL): + dhd_alignctl = bool_val; + break; +#endif /* BCMSPI */ + + case IOV_GVAL(IOV_SDALIGN): + int_val = DHD_SDALIGN; + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_VARS): + if (bus->varsz < (uint)len) + bcopy(bus->vars, arg, bus->varsz); + else + bcmerror = BCME_BUFTOOSHORT; + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG + /* XXX Until these return BCME ranges, make assumptions here */ + case IOV_GVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uintptr addr; + uint size; + + sd_ptr = (sdreg_t *)params; + + addr = ((uintptr)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uintptr addr; + uint size; + + sd_ptr = (sdreg_t *)params; + + addr = ((uintptr)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + /* XXX Same as above */ + /* Same as above, but offset is not backplane (not SDIO core) */ + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE(bus->sih) + sdreg.offset; + size = sdreg.func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE(bus->sih) + sdreg.offset; + size = sdreg.func; + bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + case IOV_GVAL(IOV_SDCIS): + { + *(char *)arg = 0; + + /* XXX Ignoring return codes, should be evident from printed results */ + bcmstrcat(arg, "\nFunc 0\n"); + bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 1\n"); + bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 2\n"); + bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + break; + } + + case IOV_GVAL(IOV_FORCEEVEN): + int_val = (int32)forcealign; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCEEVEN): + forcealign = bool_val; + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TXMINMAX): + int_val = (int32)dhd_txminmax; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXMINMAX): + dhd_txminmax = (uint)int_val; + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SERIALCONS): + int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror); + if (bcmerror != 0) + break; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SERIALCONS): + dhd_serialconsole(bus, TRUE, bool_val, &bcmerror); + break; +#endif /* DHD_DEBUG */ + +#if defined(BCMINTERNAL) && defined(BCMDBG) + case IOV_SVAL(IOV_SIALL): + DHD_ERROR(("Dumping all the Backplane registers\n")); + si_viewall(bus->sih, TRUE); + break; +#endif /* defined(BCMINTERNAL) && defined(BCMDBG) */ + +#endif /* DHD_DEBUG */ + +#if defined(DHD_SPROM) + case IOV_SVAL(IOV_SPROM): + case IOV_GVAL(IOV_SPROM): + { + uint32 offset; + uint size, dsize; + + bool set = (actionid == IOV_SVAL(IOV_SPROM)); + + ASSERT(plen >= 2*sizeof(int)); + + offset = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Avoid bigger size of srom reads that may be requested from app. + * gSPI has only F1 OTP visible from CC. There is no CIS in gSPI. + */ + if (bus->bus == SPI_BUS) + size = SBSDIO_CIS_SIZE_LIMIT; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on srom %s, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "write" : "read"), offset, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + if ((offset > SROM_MAX) || ((offset + size) > SROM_MAX)) { + DHD_ERROR(("%s: error on srom %s, offset %d size %d exceeds limit %d\n", + __FUNCTION__, (set ? "write" : "read"), offset, size, SROM_MAX)); + bcmerror = BCME_BADARG; + break; + } + + if (!set) { + if (!ISALIGNED((uintptr)arg, sizeof(uint16))) { + DHD_ERROR(("%s: srom data pointer %p not word-aligned\n", + __FUNCTION__, arg)); + bcmerror = BCME_BADARG; + break; + } + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + bcmerror = srom_read(bus->sih, DHD_BUS, (void*)bus->regs, bus->dhd->osh, + offset, size, (uint16*)arg, FALSE); + GCC_DIAGNOSTIC_POP(); + + } else { + arg = (void*)((uintptr)arg + 2 * sizeof(int)); + if (!ISALIGNED((uintptr)arg, sizeof(uint16))) { + DHD_ERROR(("%s: srom data pointer %p not word-aligned\n", + __FUNCTION__, arg)); + bcmerror = BCME_BADARG; + break; + } + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + bcmerror = srom_write(bus->sih, DHD_BUS, (void*)bus->regs, bus->dhd->osh, + offset, size, (uint16*)arg); + GCC_DIAGNOSTIC_POP(); + } + break; + } +#endif /* DHD_SPROM */ + +#ifdef SDTEST + case IOV_GVAL(IOV_EXTLOOP): + int_val = (int32)bus->ext_loop; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_EXTLOOP): + bus->ext_loop = bool_val; + break; + + case IOV_GVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_get(bus, arg); + break; + + case IOV_SVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_set(bus, arg); + break; +#endif /* SDTEST */ + +#if defined(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL) + case IOV_GVAL(IOV_WATERMARK): + int_val = (int32)watermark; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WATERMARK): + watermark = (uint)int_val; + watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark; + DHD_ERROR(("Setting watermark as 0x%x.\n", watermark)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL); + break; + + case IOV_GVAL(IOV_MESBUSYCTRL): + int_val = (int32)mesbusyctrl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MESBUSYCTRL): + mesbusyctrl = (uint)int_val; + mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK) + ? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl; + DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + ((uint8)mesbusyctrl | 0x80), NULL); + break; +#endif /* define(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL) */ + +#ifdef BCMINTERNAL + case IOV_GVAL(IOV_FIRSTREAD): + int_val = (int32)firstread; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FIRSTREAD): + if ((int_val < 12) || (int_val > 32)) { + bcmerror = BCME_BADARG; + break; + } + firstread = (uint)int_val; + break; + + case IOV_GVAL(IOV_TSTOPH): + int_val = tstoph; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TSTOPH): + if (tstoph && bus->dhd->busstate == DHD_BUS_DOWN) { + tstoph = bool_val; + bus->dhd->busstate = DHD_BUS_DATA; + if (bus->intr) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } else { + tstoph = bool_val; + } + break; + + case IOV_GVAL(IOV_RETRYDATA): + int_val = (int32)retrydata; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RETRYDATA): + retrydata = bool_val; + break; + + case IOV_GVAL(IOV_CHECKFIFO): + int_val = (int32)checkfifo; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CHECKFIFO): + checkfifo = bool_val; + break; + + case IOV_GVAL(IOV_SDF2): + case IOV_SVAL(IOV_SDF2): + { + uint8 *buf; + int ret = BCME_OK; + + if (!(buf = MALLOC(bus->dhd->osh, int_val))) { + bcmerror = BCME_NOMEM; + break; + } + + if (actionid == IOV_SVAL(IOV_SDF2)) { + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, buf, int_val, NULL, NULL, NULL, 1); + } else { + ret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, buf, int_val, NULL, NULL, NULL); + } + if (ret != BCME_OK) { + bcmerror = BCME_SDIO_ERROR; + } + + MFREE(bus->dhd->osh, buf, int_val); + + break; + } + + case IOV_SVAL(IOV_CPU): + /* Go to the ARM core */ + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + break; + } + + /* Take the request action */ + if (bool_val) + si_core_reset(bus->sih, 0, 0); + else + si_core_disable(bus->sih, 0); + + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + + /* Return to the SDIO core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + break; + + case IOV_GVAL(IOV_CPU): + /* Go to the ARM core */ + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + break; + } + + /* Get its status */ + int_val = (int32)si_iscoreup(bus->sih); + bcopy(&int_val, arg, val_size); + + /* Return to the SDIO core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + break; + + case IOV_SVAL(IOV_SDRESET): + bcmsdh_reset(bus->sdh); + break; + + case IOV_SVAL(IOV_SDABORT): + if (int_val == 1 || int_val == 2) + bcmsdh_abort(bus->sdh, int_val); + else + bcmerror = BCME_BADARG; + break; + + case IOV_GVAL(IOV_DOFLOW): + int_val = (int32)dhd_doflow; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DOFLOW): + dhd_doflow = bool_val; + /* De flowcontrol if turning off flowcontrol */ + if (!dhd_doflow) + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + break; + + case IOV_GVAL(IOV_CLOCKPOLL): + int_val = (int32)bus->clockpoll; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_MAXRAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MAXRAMSIZE): + dhd_dongle_ramsize = int_val; + dhd_dongle_setramsize(bus, dhd_dongle_ramsize); + break; + + case IOV_SVAL(IOV_CLOCKPOLL): + bus->clockpoll = bool_val; + break; +#endif /* BCMINTERNAL */ + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_SVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n", + __FUNCTION__, bool_val, bus->dhd->dongle_reset, + bus->dhd->busstate)); + + ASSERT(bus->dhd->osh); + /* ASSERT(bus->cl_devid); */ + + /* must release sdlock, since devreset also acquires it */ + dhd_os_sdunlock(bus->dhd); + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + dhd_os_sdlock(bus->dhd); + break; + /* + * softap firmware is updated through module parameter or android private command + */ + + case IOV_GVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__)); + + /* Get its status */ + int_val = (bool) bus->dhd->dongle_reset; + bcopy(&int_val, arg, val_size); + + break; + + case IOV_GVAL(IOV_KSO): + int_val = dhdsdio_sleepcsr_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DEVCAP): + int_val = dhdsdio_devcap_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DEVCAP): + dhdsdio_devcap_set(bus, (uint8) int_val); + break; + case IOV_GVAL(IOV_TXGLOMSIZE): + int_val = (int32)bus->txglomsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXGLOMSIZE): + if (int_val > SDPCM_MAXGLOM_SIZE) { + bcmerror = BCME_ERROR; + } else { + bus->txglomsize = (uint)int_val; + } + break; + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TXINRX_THRES): + int_val = bus->txinrx_thres; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TXINRX_THRES): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->txinrx_thres = int_val; + } + break; + + case IOV_GVAL(IOV_SDIO_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIO_SUSPEND): + if (bool_val) { /* Suspend */ + dhdsdio_suspend(bus); + } + else { /* Resume */ + dhdsdio_resume(bus); + } + break; + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + case IOV_SVAL(IOV_GDB_SERVER): + if (bool_val == TRUE) { + debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih)); + } else { + debugger_close(); + } + break; +#endif /* DEBUGGER || DHD_DSCOPE */ + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + + return bcmerror; +} + +static int +dhdsdio_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + // terence 20150412: fix for nvram failed to download + if (bus->dhd->conf->chip == BCM43340_CHIP_ID || + bus->dhd->conf->chip == BCM43341_CHIP_ID) { + varsize = varsize ? ROUNDUP(varsize, 64) : 0; + varaddr = (bus->ramsize - 64) - varsize; + } + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + /* XXX: WAR for PR85623 */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) { + if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) { + DHD_ERROR(("PR85623WAR in place\n")); + varsize += 4; + varaddr -= 4; + } + } + + /* XXX In case the controller has trouble with odd bytes... */ + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + + /* Write the vars list */ + bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + return bcmerror; + } + +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) { + MFREE(bus->dhd->osh, vbuffer, varsize); + return BCME_NOMEM; + } + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + +#ifdef MINIME + phys_size = bus->ramsize; +#else + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; +#endif + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + phys_size, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ +#ifdef DHD_DEBUG + if (bcmerror) { + varsizew = 0; + } else +#endif /* DHD_DEBUG */ + { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} + +bool +dhd_bus_is_multibp_capable(struct dhd_bus *bus) +{ + return MULTIBP_CAP(bus->sih); +} + +static int +dhdsdio_download_state(dhd_bus_t *bus, bool enter) +{ + uint retries; + int bcmerror = 0; + int foundcr4 = 0; + + if (!bus->sih) + return BCME_ERROR; + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + foundcr4 = 1; + } else { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + if (!foundcr4) { + si_core_disable(bus->sih, 0); + if (bcmsdh_regfail(bus->sdh)) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", + __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) { + /* Disabling Remap for SRAM_3 */ + si_socram_set_bankpda(bus->sih, 0x3, 0x0); + } + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + } + } else { + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMSDIOLITE + if (!si_setcore(bus->sih, CC_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#else + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#endif + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + /* XXX Change standby configuration here if necessary */ + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + /* cr4 has no socram, but tcm's */ + /* write vars */ + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } +#ifdef BCMSDIOLITE + if (!si_setcore(bus->sih, CC_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#else + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#endif + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdsdio_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + /* verify write */ + bcmerror = dhdsdio_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to SDIOD core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + return bcmerror; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, uint plen, void *arg, uint len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + uint val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) { + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Turn on clock in case SD command needs backplane */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set); + + /* Check for bus configuration changes of interest */ + + /* If it was divisor change, read the new one */ + if (set && strcmp(name, "sd_divisor") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_divisor = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_divisor)); + } + } + /* If it was a mode change, read the new one */ + if (set && strcmp(name, "sd_mode") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_mode = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_mode)); + } + } + /* Similar check for blocksize change */ + if (set && strcmp(name, "sd_blocksize") == 0) { + int32 fnum = 2; + if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + osl_t *osh; + uint32 local_hostintmask; + uint8 saveclk; + uint retries; + int err; + bool wlfc_enabled = FALSE; + unsigned long flags; + + if (!bus->dhd) + return; + + osh = bus->dhd->osh; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_waitlockfree(bus->sdh); + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) { + /* if Firmware already hangs disbale any interrupt */ + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->hostintmask = 0; + bcmsdh_intr_disable(bus->sdh); + } else { + + BUS_WAKE(bus); + + if (KSO_ENAB(bus)) { + + /* Enable clock for device interrupts */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Disable and clear interrupts at the chip level also */ + W_SDREG(0, &bus->regs->hostintmask, retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", + __FUNCTION__, err)); + } + + /* Turn off the bus (F2), free any pending packets */ + /* XXX How to wake up any waiting processes? */ + /* XXX New API: bcmsdh_fn_set(bus->sdh, SDIO_FUNC_2, FALSE); */ + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); +#if !defined(NDIS) + bcmsdh_intr_disable(bus->sdh); /* XXX bcmsdh_intr_mask(bus->sdh); */ +#endif /* !defined(NDIS) */ +#ifndef BCMSPI + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); +#endif /* !BCMSPI */ + + /* Clear any pending interrupts now that F2 is disabled */ + W_SDREG(local_hostintmask, &bus->regs->intstatus, retries); + } + + /* Turn off the backplane clock (only) */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Change our idea of bus state */ + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + } + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + dhd_os_sdlock_txq(bus->dhd); + /* Clear the data packet queues */ + pktq_flush(osh, &bus->txq, TRUE); + dhd_os_sdunlock_txq(bus->dhd); + } + + /* Clear any held glomming stuff */ + if (bus->glomd) + PKTFREE(osh, bus->glomd, FALSE); + + if (bus->glom) + PKTFREE(osh, bus->glom, FALSE); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + /* XXX More important in disconnect, but no context? */ + bus->rxlen = 0; + dhd_os_ioctl_resp_wake(bus->dhd); + + /* Reset some F2 state stuff */ + bus->rxskip = FALSE; + bus->tx_seq = bus->rx_seq = 0; + + /* Initializing tx_max to a reasonable value to start xfer + * Gets updated to correct value after receving the first + * packet from firmware + * XXX - Need to find a right mechanism to querry from + * firmware when the device is coming up + */ + bus->tx_max = 4; + + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); +} + +#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD) +extern uint sd_txglom; +#endif +void +dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) +{ + /* can't enable host txglom by default, some platforms have no + * (or crappy) ADMA support and txglom will cause kernel assertions (e.g. + * panda board) + */ + dhd_bus_t *bus = dhdp->bus; +#ifdef BCMSDIOH_TXGLOM + uint32 rxglom; + int32 ret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMSDIOH_STD + if (enable) + enable = sd_txglom; +#endif /* BCMSDIOH_STD */ + + if (enable) { + rxglom = 1; + ret = dhd_iovar(dhdp, 0, "bus:rxglom", (char *)&rxglom, sizeof(rxglom), NULL, 0, + TRUE); + if (ret >= 0) + bus->txglom_enable = TRUE; + else { +#ifdef BCMSDIOH_STD + sd_txglom = 0; +#endif /* BCMSDIOH_STD */ + bus->txglom_enable = FALSE; + } + } else +#endif /* BCMSDIOH_TXGLOM */ + bus->txglom_enable = FALSE; + printf("%s: enable %d\n", __FUNCTION__, bus->txglom_enable); + dhd_conf_set_txglom_params(bus->dhd, bus->txglom_enable); + bcmsdh_set_mode(bus->sdh, bus->dhd->conf->txglom_mode); +} + +int +dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + dhd_timeout_t tmo; + uint retries = 0; + uint8 ready, enable; + int err, ret = 0; +#ifdef BCMSPI + uint32 dstatus = 0; /* gSPI device-status bits */ +#else /* BCMSPI */ + uint8 saveclk; +#endif /* BCMSPI */ +#if defined(SDIO_ISR_THREAD) + int intr_extn; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + if (bus->sih->chip == BCM43362_CHIP_ID) { + printf("%s: delay 100ms for BCM43362\n", __FUNCTION__); + OSL_DELAY(100000); // terence 20131209: delay for 43362 + } + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (bus->clkstate != CLK_AVAIL) { + DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate)); + ret = -1; + goto exit; + } + +#ifdef BCMSPI + /* fake "ready" for spi, wake-wlan would have already enabled F1 and F2 */ + ready = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + enable = 0; + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, WAIT_F2RXFIFORDY * WAIT_F2RXFIFORDY_DELAY * 1000); + while (!enable && !dhd_timeout_expired(&tmo)) { + dstatus = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + + if (enable) { + DHD_ERROR(("Took %u usec before dongle is ready\n", tmo.elapsed)); + enable = ready; + } else { + DHD_ERROR(("dstatus when timed out on f2-fifo not ready = 0x%x\n", dstatus)); + DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed)); + ret = -1; + goto exit; + } + +#else /* !BCMSPI */ + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (!err) { +#ifndef BCMQT + /* QT requires HT clock */ + if (bus->sih->chip == BCM43012_CHIP_ID || + bus->sih->chip == BCM43013_CHIP_ID || + bus->sih->chip == BCM43014_CHIP_ID) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_HT_AVAIL_REQ), &err); + } else +#endif /* BCMQT */ + { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + } + + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + ret = -1; + goto exit; + } + + /* Enable function 2 (frame transfers) */ + /* XXX New API: change to bcmsdh_fn_set(sdh, SDIO_FUNC_2, TRUE); */ + W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT), + &bus->regs->tosbmailboxdata, retries); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + + /* Give the dongle some time to do its thing and set IOR2 */ +#ifdef BCMSLTGT + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000 * htclkratio); +#else + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000); +#endif /* BCMSLTGT */ + + ready = 0; + while (ready != enable && !dhd_timeout_expired(&tmo)) + ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL); + +#endif /* !BCMSPI */ + + DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", + __FUNCTION__, enable, ready, tmo.elapsed)); + +#if defined(SDIO_ISR_THREAD) + if (dhdp->conf->intr_extn) { + intr_extn = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, NULL); + if (intr_extn & 0x1) { + intr_extn |= 0x2; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, intr_extn, NULL); + } + } +#endif + + /* XXX For simplicity, fail and await next call if F2 not ready. + * XXX Should really set timer to poll, and/or enable interrupt; + * XXX then put this process in wait until a result... + */ + + /* If F2 successfully enabled, set core and enable interrupts */ + if (ready == enable) { + /* Make sure we're talking to the core. */ +#ifdef BCMSDIOLITE + bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(bus->regs != NULL); +#else + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0))) + bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0); + ASSERT(bus->regs != NULL); +#endif + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + /* corerev 4 could use the newer interrupt logic to detect the frames */ +#ifndef BCMSPI + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) && + (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) { + bus->hostintmask &= ~I_HMB_FRAME_IND; + bus->hostintmask |= I_XMTDATA_AVAIL; + } +#endif /* BCMSPI */ + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + + /* PR47410 - Lower F2 Watermark to avoid DMA Hang + * in F2 when SD Clock is stopped. + */ + if (bus->sih->buscorerev < 15) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, + (uint8)watermark, &err); + } + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + /* Need to set fn2 block size to match fn1 block size. + * Requests to fn2 go thru fn1. * + * faltwig has this code contitioned with #if !BCMSPI_ANDROID. + * It would be cleaner to use the ->sdh->block_sz[fno] instead of + * 64, but this layer has no access to sdh types. + */ +#if defined(NDIS) + { + uint8 *ptr = NULL; + uint16 block_sz = 64; + ptr = (uint8*) &block_sz; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, + (SDIOD_FBR_BASE(SDIO_FUNC_2) + SDIOD_CCCR_BLKSIZE_0), + *ptr++, &err); + if (err == BCME_OK) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, + (SDIOD_FBR_BASE(SDIO_FUNC_2) + SDIOD_CCCR_BLKSIZE_1), + *ptr++, &err); + if (err != BCME_OK) { + printf("%s: set block size for func 2 failed\n", + __FUNCTION__); + ret = -1; + goto exit; + } + } +#endif /* NDIS */ + + /* XXX These need to change w/API updates */ + /* bcmsdh_intr_unmask(bus->sdh); */ + + bus->intdis = FALSE; + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); +#ifndef BCMSPI_ANDROID + bcmsdh_intr_enable(bus->sdh); +#endif /* !BCMSPI_ANDROID */ + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + +#ifdef DEBUG_LOST_INTERRUPTS + { + uint32 intstatus; + bool hostpending; + uint8 devena, devpend; + uint sdr_retries = 0; + + hostpending = bcmsdh_intr_pending(bus->sdh); + devena = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTEN, NULL); + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTPEND, NULL); + + R_SDREG(intstatus, &bus->regs->intstatus, sdr_retries); + intstatus &= bus->hostintmask; + + DHD_ERROR(("%s: interrupts -- host %s device ena/pend 0x%02x/0x%02x\n" + "intstatus 0x%08x, hostmask 0x%08x\n", __FUNCTION__, + (hostpending ? "PENDING" : "NOT PENDING"), + devena, devpend, intstatus, bus->hostintmask)); + } +#endif /* DEBUG_LOST_INTERRUPTS */ + } + +#ifndef BCMSPI + + else { + /* Disable F2 again */ + enable = SDIO_FUNC_ENABLE_1; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + } + + if (dhdsdio_sr_cap(bus)) { + dhdsdio_sr_init(bus); + /* Masking the chip active interrupt permanantly */ + bus->hostintmask &= ~I_CHIPACTIVE; + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n", + __FUNCTION__, bus->hostintmask)); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + } +#endif /* !BCMSPI */ + + /* If we didn't come up, turn off backplane clock */ + if (dhdp->busstate != DHD_BUS_DATA) + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + +exit: + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); + + /* XXX Temp errnum workaround: return ok, caller checks bus state */ + return ret; +} + +static void +dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + uint16 lastrbc; + uint8 hi, lo; + int err; + + DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__, + (abort ? "abort command, " : ""), (rtx ? ", send NAK" : ""))); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return; + } + + if (abort) { + bcmsdh_abort(sdh, SDIO_FUNC_2); + } + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__)); + goto fail; + } + bus->f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__)); + goto fail; + } + + bus->f1regdata += 2; + + if ((hi == 0) && (lo == 0)) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n", + __FUNCTION__, lastrbc, ((hi << 8) + lo))); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) { + DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc)); + } else { + DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries))); + } + + if (rtx) { + bus->rxrtx++; + W_SDREG(SMB_NAK, ®s->tosbmailbox, retries); + bus->f1regdata++; + if (retries <= retry_limit) { + bus->rxskip = TRUE; + } + } + + /* Clear partial in any case */ + bus->nextlen = 0; + +fail: + /* If we can't reach the device, signal failure */ + if (err || bcmsdh_regfail(sdh)) { + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + } +} + +static void +dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff) +{ + bcmsdh_info_t *sdh = bus->sdh; + uint rdlen, pad; + + int sdret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Control data already received in aligned rxctl */ + if ((bus->bus == SPI_BUS) && (!bus->usebufpool)) + goto gotpkt; + + ASSERT(bus->rxbuf); + /* Set rxctl for frame (w/optional alignment) */ + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + + /* Copy the already-read portion over */ + bcopy(hdr, bus->rxctl, firstread); + if (len <= firstread) + goto gotpkt; + + /* Copy the full data pkt in gSPI case and process ioctl. */ + if (bus->bus == SPI_BUS) { + bcopy(hdr, bus->rxctl, len); + goto gotpkt; + } + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - firstread; + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((len + pad) < bus->dhd->maxctl)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + firstread) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n", + __FUNCTION__, rdlen, bus->dhd->maxctl)); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + if ((len - doff) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + __FUNCTION__, len, (len - doff), bus->dhd->maxctl)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + /* XXX Could block readers with rxlen=0? */ + + /* Read remainder of frame body into the rxctl buffer */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (bus->rxctl + firstread), rdlen, NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret)); + bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */ + dhdsdio_rxfail(bus, TRUE, TRUE); + goto done; + } + +gotpkt: + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("RxCtrl", bus->rxctl, len); + } +#endif + + /* Point to valid data and indicate its length */ + bus->rxctl += doff; + bus->rxlen = len - doff; + +done: + /* Awake any waiters */ + dhd_os_ioctl_resp_wake(bus->dhd); +} +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count); + +static uint8 +dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) +{ + uint16 dlen, totlen; + uint8 *dptr, num = 0; + + uint16 sublen, check; + void *pfirst, *plast, *pnext; + void * list_tail[DHD_MAX_IFS] = { NULL }; + void * list_head[DHD_MAX_IFS] = { NULL }; + uint8 idx; + osl_t *osh = bus->dhd->osh; + + int errcode; + uint8 chan, seq, doff, sfdoff; + uint8 txmax; + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + + int ifidx = 0; + bool usechain = bus->use_rxchain; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + dhd_os_sdlock_rxq(bus->dhd); + + pfirst = plast = pnext = NULL; + dlen = (uint16)PKTLEN(osh, bus->glomd); + dptr = PKTDATA(osh, bus->glomd); + if (!dlen || (dlen & 1)) { + DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n", + __FUNCTION__, dlen)); + dlen = 0; + } + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = ltoh16_ua(dptr); + dlen -= sizeof(uint16); + dptr += sizeof(uint16); + if ((sublen < SDPCM_HDRLEN) || + ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { + DHD_ERROR(("%s: descriptor len %d bad: %d\n", + __FUNCTION__, num, sublen)); + pnext = NULL; + break; + } + if (sublen % DHD_SDALIGN) { + DHD_ERROR(("%s: sublen %d not a multiple of %d\n", + __FUNCTION__, sublen, DHD_SDALIGN)); + usechain = FALSE; + } + totlen += sublen; + + /* For last frame, adjust read len so total is a block multiple */ + if (!dlen) { + sublen += (ROUNDUP(totlen, bus->blocksize) - totlen); + totlen = ROUNDUP(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) { + DHD_ERROR(("%s: PKTGET failed, num %d len %d\n", + __FUNCTION__, num, sublen)); + break; + } + ASSERT(!PKTLINK(pnext)); + if (!pfirst) { + ASSERT(!plast); + pfirst = plast = pnext; + } else { + ASSERT(plast); + PKTSETNEXT(osh, plast, pnext); + plast = pnext; + } + + /* Adhere to start alignment requirements */ + PKTALIGN(osh, pnext, sublen, DHD_SDALIGN); + } + + /* If all allocations succeeded, save packet chain in bus structure */ + if (pnext) { + DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n", + __FUNCTION__, totlen, num)); + if (DHD_GLOM_ON() && bus->nextlen) { + if (totlen != bus->nextlen) { + DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " + "rxseq %d\n", __FUNCTION__, bus->nextlen, + totlen, rxseq)); + } + } + bus->glom = pfirst; + pfirst = pnext = NULL; + } else { + if (pfirst) + PKTFREE(osh, pfirst, FALSE); + bus->glom = NULL; + num = 0; + } + + /* Done with descriptor packet */ + PKTFREE(osh, bus->glomd, FALSE); + bus->glomd = NULL; + bus->nextlen = 0; + + dhd_os_sdunlock_rxq(bus->dhd); + } + + /* Ok -- either we just generated a packet chain, or had one from before */ + if (bus->glom) { + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__)); + for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) { + DHD_GLOM((" %p: %p len 0x%04x (%d)\n", + pnext, (uint8*)PKTDATA(osh, pnext), + PKTLEN(osh, pnext), PKTLEN(osh, pnext))); + } + } + + pfirst = bus->glom; + dlen = (uint16)pkttotlen(osh, pfirst); + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and and copy into the chain. + */ + if (usechain) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, (uint8*)PKTDATA(osh, pfirst), + dlen, pfirst, NULL, NULL); + } else if (bus->dataptr) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, bus->dataptr, + dlen, NULL, NULL, NULL); + sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr); + if (sublen != dlen) { + DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n", + __FUNCTION__, dlen, sublen)); + errcode = -1; + } + pnext = NULL; + BCM_REFERENCE(pnext); + } else { + DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); + errcode = -1; + } + bus->f2rxdata++; + ASSERT(errcode != BCME_PENDING); + + /* On failure, kill the superframe, allow a couple retries */ + if (errcode < 0) { + DHD_ERROR(("%s: glom read of %d bytes failed: %d\n", + __FUNCTION__, dlen, errcode)); + bus->dhd->rx_errors++; /* XXX Account for rtx?? */ + + if (bus->glomerr++ < 3) { + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + return 0; + } + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("SUPERFRAME", PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 48)); + } +#endif + + /* Validate the superframe header */ + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + errcode = 0; + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, sublen, check)); + errcode = -1; + } else if (ROUNDUP(sublen, bus->blocksize) != dlen) { + DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n", + __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen)); + errcode = -1; + } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) { + DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__, + SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]))); + errcode = -1; + } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) { + DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || + (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) { + DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n", + __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), + SDPCM_HDRLEN)); + errcode = -1; + } + + /* Check sequence number of superframe SW header */ + if (rxseq != seq) { + DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Remove superframe header, remember offset */ + PKTPULL(osh, pfirst, doff); + sfdoff = doff; + + /* Validate all the subframe headers */ + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = PKTNEXT(osh, pnext)) { + dptr = (uint8 *)PKTDATA(osh, pnext); + dlen = (uint16)PKTLEN(osh, pnext); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("subframe", dptr, 32); + } +#endif + + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (subframe %d): HW hdr error: " + "len/check 0x%04x/0x%04x\n", + __FUNCTION__, num, sublen, check)); + errcode = -1; + } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) { + DHD_ERROR(("%s (subframe %d): length mismatch: " + "len 0x%04x, expect 0x%04x\n", + __FUNCTION__, num, sublen, dlen)); + errcode = -1; + } else if ((chan != SDPCM_DATA_CHANNEL) && + (chan != SDPCM_EVENT_CHANNEL)) { + DHD_ERROR(("%s (subframe %d): bad channel %d\n", + __FUNCTION__, num, chan)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) { + DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n", + __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN)); + errcode = -1; + } + } + + if (errcode) { + /* Terminate frame on error, request a couple retries */ + if (bus->glomerr++ < 3) { + /* Restore superframe header space */ + PKTPUSH(osh, pfirst, sfdoff); + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + bus->nextlen = 0; + return 0; + } + + /* Basic SD framing looks ok - process each packet (header) */ + bus->glom = NULL; + plast = NULL; + + dhd_os_sdlock_rxq(bus->dhd); + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = PKTNEXT(osh, pfirst); + PKTSETNEXT(osh, pfirst, NULL); + + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n", + __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst), + PKTLEN(osh, pfirst), sublen, chan, seq)); + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL)); + + if (rxseq != seq) { + DHD_GLOM(("%s: rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Subframe Data", dptr, dlen); + } +#endif + + PKTSETLEN(osh, pfirst, sublen); + PKTPULL(osh, pfirst, doff); + + reorder_info_len = sizeof(reorder_info_buf); + + if (PKTLEN(osh, pfirst) == 0) { + PKTFREE(bus->dhd->osh, pfirst, FALSE); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + bus->dhd->rx_errors++; + PKTFREE(osh, pfirst, FALSE); + continue; + } + if (reorder_info_len) { + uint32 free_buf_count; + void *ppfirst; + + ppfirst = pfirst; + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, + reorder_info_len, &ppfirst, &free_buf_count); + + if (free_buf_count == 0) { + continue; + } else { + void *temp; + + /* go to the end of the chain and attach the pnext there */ + temp = ppfirst; + while (PKTNEXT(osh, temp) != NULL) { + temp = PKTNEXT(osh, temp); + } + pfirst = temp; + if (list_tail[ifidx] == NULL) + list_head[ifidx] = ppfirst; + else + PKTSETNEXT(osh, list_tail[ifidx], ppfirst); + list_tail[ifidx] = pfirst; + } + + num += (uint8)free_buf_count; + } else { + /* this packet will go up, link back into chain and count it */ + + if (list_tail[ifidx] == NULL) { + list_head[ifidx] = list_tail[ifidx] = pfirst; + } else { + PKTSETNEXT(osh, list_tail[ifidx], pfirst); + list_tail[ifidx] = pfirst; + } + num++; + } +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n", + __FUNCTION__, num, pfirst, + PKTDATA(osh, pfirst), PKTLEN(osh, pfirst), + PKTNEXT(osh, pfirst), PKTLINK(pfirst))); + prhex("", (uint8 *)PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 32)); + } +#endif /* DHD_DEBUG */ + } + dhd_os_sdunlock_rxq(bus->dhd); + + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (list_head[idx]) { + void *temp; + uint8 cnt = 0; + temp = list_head[idx]; + do { + temp = PKTNEXT(osh, temp); + cnt++; + } while (temp); + if (cnt) { + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0); + dhd_os_sdlock(bus->dhd); + } + } + } + bus->rxglomframes++; + bus->rxglompkts += num; + } + return num; +} + +/* Return TRUE if there may be more frames to read */ +static uint +dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) +{ + osl_t *osh = bus->dhd->osh; + bcmsdh_info_t *sdh = bus->sdh; + + uint16 len, check; /* Extracted hardware header fields */ + uint8 chan, seq, doff; /* Extracted software header fields */ + uint8 fcbits; /* Extracted fcbits from software header */ + uint8 delta; + + void *pkt; /* Packet for event or data frames */ + uint16 pad; /* Number of pad bytes to read */ + uint16 rdlen; /* Total number of bytes to read */ + uint8 rxseq; /* Next sequence number to expect */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int sdret; /* Return code from bcmsdh calls */ + uint8 txmax; /* Maximum tx sequence offered */ +#ifdef BCMSPI + uint32 dstatus = 0; /* gSPI device status bits of */ +#endif /* BCMSPI */ + bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */ + uint8 *rxbuf; + int ifidx = 0; + uint rxcount = 0; /* Total frames read */ + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + uint pkt_count; + +#if defined(DHD_DEBUG) || defined(SDTEST) + bool sdtest = FALSE; /* To limit message spew from test mode */ +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + bus->readframes = TRUE; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: KSO off\n", __FUNCTION__)); + bus->readframes = FALSE; + return 0; + } + + ASSERT(maxframes); + +#ifdef SDTEST + /* Allow pktgen to override maxframes */ + if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) { + maxframes = bus->pktgen_count; + sdtest = TRUE; + } +#endif + + /* Not finished unless we encounter no more frames indication */ + *finished = FALSE; + +#ifdef BCMSPI + /* Get pktlen from gSPI device F0 reg. */ + if (bus->bus == SPI_BUS) { + /* Peek in dstatus bits and find out size to do rx-read. */ + dstatus = bcmsdh_get_dstatus(bus->sdh); + if (dstatus == 0) + DHD_ERROR(("%s:ZERO spi dstatus, a case observed in PR61352 hit !!!\n", + __FUNCTION__)); + + DHD_TRACE(("Device status from regread = 0x%x\n", dstatus)); + DHD_TRACE(("Device status from bit-reconstruction = 0x%x\n", + bcmsdh_get_dstatus((void *)bus->sdh))); + + /* Check underflow also, WAR for PR55150 */ + if ((dstatus & STATUS_F2_PKT_AVAILABLE) && (((dstatus & STATUS_UNDERFLOW)) == 0)) { + bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >> + STATUS_F2_PKT_LEN_SHIFT); + /* '0' size with pkt-available interrupt is eqvt to 2048 bytes */ + bus->nextlen = (bus->nextlen == 0) ? SPI_MAX_PKT_LEN : bus->nextlen; + if (bus->dwordmode) + bus->nextlen = bus->nextlen << 2; + DHD_TRACE(("Entering %s: length to be read from gSPI = %d\n", + __FUNCTION__, bus->nextlen)); + } else { + if (dstatus & STATUS_F2_PKT_AVAILABLE) + DHD_ERROR(("Underflow during %s.\n", __FUNCTION__)); + else + DHD_ERROR(("False pkt-available intr.\n")); + *finished = TRUE; + return (maxframes - rxleft); + } + } +#endif /* BCMSPI */ + + for (rxseq = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN; + rxseq++, rxleft--) { +#ifdef DHDTCPACK_SUP_DBG + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) { + if (bus->dotxinrx == FALSE) + DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n", + __FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode)); + } +#ifdef DEBUG_COUNTER + else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) { + tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++; + } +#endif /* DEBUG_COUNTER */ +#endif /* DHDTCPACK_SUP_DBG */ + /* tx more to improve rx performance */ + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { + dhdsdio_sendpendctl(bus); + } else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) && + !bus->fcstate && DATAOK(bus) && + (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) { + dhdsdio_sendfromq(bus, dhd_txbound); +#ifdef DHDTCPACK_SUPPRESS + /* In TCPACK_SUP_DELAYTX mode, do txinrx only if + * 1. Any DATA packet to TX + * 2. TCPACK to TCPDATA PSH packets. + * in bus txq. + */ + bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ? + FALSE : TRUE; +#endif + } + + /* Handle glomming separately */ + if (bus->glom || bus->glomd) { + uint8 cnt; + DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n", + __FUNCTION__, bus->glomd, bus->glom)); + cnt = dhdsdio_rxglom(bus, rxseq); + DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt)); + rxseq += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + /* Try doing single read if we can */ + if (dhd_readahead && bus->nextlen) { + uint16 nextlen = bus->nextlen; + bus->nextlen = 0; + + if (bus->bus == SPI_BUS) { + rdlen = len = nextlen; + } else { + rdlen = len = nextlen << 4; + + /* Pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + } + + /* We use bus->rxctl buffer in WinXP for initial control pkt receives. + * Later we use buffer-poll for data as well as control packets. + * This is required because dhd receives full frame in gSPI unlike SDIO. + * After the frame is received we have to distinguish whether it is data + * or non-data frame. + */ + /* Allocate a packet buffer */ + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) { + if (bus->bus == SPI_BUS) { + bus->usebufpool = FALSE; + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + rxbuf = bus->rxctl; + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + +#ifdef BCMSPI + /* PR55150 WAR: Wait for next pkt-available interrupt for + * further processing + */ + if (bcmsdh_get_dstatus((void *)bus->sdh) & + STATUS_UNDERFLOW) { + bus->nextlen = 0; + *finished = TRUE; + DHD_ERROR(("%s: read %d control bytes failed " + "due to spi underflow\n", + __FUNCTION__, rdlen)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } +#endif /* BCMSPI */ + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } else { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " + "expected rxseq %d\n", + __FUNCTION__, len, rdlen, rxseq)); + /* XXX Can't issue retry (NAK), frame not started. */ + /* Just go try again w/normal header read */ + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } else { + if (bus->bus == SPI_BUS) + bus->usebufpool = TRUE; + + ASSERT(!PKTLINK(pkt)); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + rxbuf = (uint8 *)PKTDATA(osh, pkt); + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); +#ifdef BCMSPI + /* PR55150 WAR: Wait for next pkt-available interrupt for further + * processing + */ + if (bcmsdh_get_dstatus((void *)bus->sdh) & STATUS_UNDERFLOW) { + bus->nextlen = 0; + *finished = TRUE; + DHD_ERROR(("%s (nextlen): read %d bytes failed due " + "to spi underflow\n", + __FUNCTION__, rdlen)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } +#endif /* BCMSPI */ + + if (sdret < 0) { + DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; /* XXX Account for rtx?? */ + dhd_os_sdunlock_rxq(bus->dhd); + /* Force retry w/normal header read. Don't attempt NAK for + * gSPI + */ + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } + dhd_os_sdunlock_rxq(bus->dhd); + + /* Now check the header */ + bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN); + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means readahead info was bad */ + if (!(len|check)) { + DHD_INFO(("%s (nextlen): read zeros in HW header???\n", + __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check" + " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen, + len, check)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + /* XXX Might choose to allow length 4 for signaling */ + DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n", + __FUNCTION__, len)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Check for consistency with readahead info */ +#ifdef BCMSPI + if (bus->bus == SPI_BUS) { + if (bus->dwordmode) { + uint16 spilen; + spilen = ROUNDUP(len, 4); + len_consistent = (nextlen != spilen); + } else + len_consistent = (nextlen != len); + } else +#endif /* BCMSPI */ + len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4)); + if (len_consistent) { + /* Mismatch, force retry w/normal header (may be >4K) */ + DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; " + "expected rxseq %d\n", + __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + +#ifdef BCMSPI + /* Save the readahead length if there is one */ + if (bus->bus == SPI_BUS) { + /* Use reconstructed dstatus bits and find out readahead size */ + dstatus = bcmsdh_get_dstatus((void *)bus->sdh); + DHD_INFO(("Device status from bit-reconstruction = 0x%x\n", + bcmsdh_get_dstatus((void *)bus->sdh))); + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >> + STATUS_F2_PKT_LEN_SHIFT); + bus->nextlen = (bus->nextlen == 0) ? + SPI_MAX_PKT_LEN : bus->nextlen; + if (bus->dwordmode) + bus->nextlen = bus->nextlen << 2; + DHD_INFO(("readahead len from gSPI = %d \n", + bus->nextlen)); + bus->dhd->rx_readahead_cnt ++; + } else { + bus->nextlen = 0; + *finished = TRUE; + } + } else { +#endif /* BCMSPI */ + bus->nextlen = + bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large" + " (%d), seq %d\n", __FUNCTION__, bus->nextlen, + seq)); + bus->nextlen = 0; + } + + bus->dhd->rx_readahead_cnt ++; +#ifdef BCMSPI + } +#endif /* BCMSPI */ + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { +#ifdef BCMSPI + if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_seq + 2; + } else { +#endif /* BCMSPI */ + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; +#ifdef BCMSPI + } +#endif /* BCMSPI */ + } + bus->tx_max = txmax; + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", rxbuf, len); + } else if (DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + if (chan == SDPCM_CONTROL_CHANNEL) { + if (bus->bus == SPI_BUS) { + dhdsdio_read_control(bus, rxbuf, len, doff); + if (bus->usebufpool) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + } + continue; + } else { + DHD_ERROR(("%s (nextlen): readahead on control" + " packet %d?\n", __FUNCTION__, seq)); + /* Force retry w/normal header read */ + bus->nextlen = 0; + dhdsdio_rxfail(bus, FALSE, TRUE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } + + if ((bus->bus == SPI_BUS) && !bus->usebufpool) { + DHD_ERROR(("Received %d bytes on %d channel. Running out of " + "rx pktbuf's or not yet malloced.\n", len, chan)); + continue; + } + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* All done with this one -- now deliver the packet */ + goto deliver; + } + /* gSPI frames should not be handled in fractions */ + if (bus->bus == SPI_BUS) { + break; + } + + /* Read frame header (hardware and software) */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + bus->rxhdr, firstread, NULL, NULL, NULL); + bus->f2rxhdrs++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret)); + bus->rx_hdrfail++; +#ifdef BCMINTERNAL + if (tstoph) { + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + continue; + } +#endif + dhdsdio_rxfail(bus, TRUE, TRUE); + continue; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() || DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means no more frames */ + if (!(len|check)) { + *finished = TRUE; + break; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, len, check)); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + /* XXX Might choose to allow length 4 for signaling */ + DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len)); + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN, seq)); + bus->rx_badhdr++; + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Save the readahead length if there is one */ + bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Call a separate function for control frames */ + if (chan == SDPCM_CONTROL_CHANNEL) { + dhdsdio_read_control(bus, bus->rxhdr, len, doff); + continue; + } + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) || + (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL)); + + /* Length to read */ + rdlen = (len > firstread) ? (len - firstread) : 0; + + /* May pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + if ((rdlen + firstread) > MAX_RX_DATASZ) { + /* Too long -- skip this frame */ + DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n", + __FUNCTION__, rdlen, chan)); + bus->dhd->rx_dropped++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan)); + continue; + } + dhd_os_sdunlock_rxq(bus->dhd); + + ASSERT(!PKTLINK(pkt)); + + /* XXX Should check len for small packets in case we're done? */ + /* Leave room for what we already read, and align remainder */ + ASSERT(firstread < (PKTLEN(osh, pkt))); + PKTPULL(osh, pkt, firstread); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + + /* Read the remaining frame data */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen, + ((chan == SDPCM_EVENT_CHANNEL) ? "event" : + ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; /* XXX Account for rtx?? */ + dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan)); + continue; + } + + /* Copy the already-read portion */ + PKTPUSH(osh, pkt, firstread); + bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", PKTDATA(osh, pkt), len); + } +#endif + +deliver: + /* Save superframe descriptor and allocate packet frame */ + if (chan == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + DHD_GLOM(("%s: got glom descriptor, %d bytes:\n", + __FUNCTION__, len)); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("Glom Data", PKTDATA(osh, pkt), len); + } +#endif + PKTSETLEN(osh, pkt, len); + ASSERT(doff == SDPCM_HDRLEN); + PKTPULL(osh, pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__)); + dhdsdio_rxfail(bus, FALSE, FALSE); + } + continue; + } + + /* Fill in packet len and prio, deliver upward */ + PKTSETLEN(osh, pkt, len); + PKTPULL(osh, pkt, doff); + +#ifdef SDTEST + /* Test channel packets are processed separately */ + if (chan == SDPCM_TEST_CHANNEL) { + dhdsdio_testrcv(bus, pkt, seq); + continue; + } +#endif /* SDTEST */ + + if (PKTLEN(osh, pkt) == 0) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + continue; + } + + if (reorder_info_len) { + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len, + &pkt, &pkt_count); + if (pkt_count == 0) + continue; + } else { + pkt_count = 1; + } + + /* XXX Release the lock around the rx delivery: an OS (like Windows) + * might call tx in the same thread context, resulting in deadlock. + */ + /* Unlock during rx call */ + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan); + dhd_os_sdlock(bus->dhd); +#if defined(SDIO_ISR_THREAD) + /* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here, + * so call BUS_WAKE to wake up bus again + * dhd_bcmsdh_recv_buf: Device asleep + * dhdsdio_readframes: RXHEADER FAILED: -40 + * dhdsdio_rxfail: abort command, terminate frame, send NAK + */ + BUS_WAKE(bus); +#endif + } + rxcount = maxframes - rxleft; +#ifdef DHD_DEBUG + /* Message if we hit the limit */ + if (!rxleft && !sdtest) + DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes)); + else +#endif /* DHD_DEBUG */ + DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount)); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rxseq--; + bus->rx_seq = rxseq; + + if (bus->reqbussleep) + { + dhdsdio_bussleep(bus, TRUE); + bus->reqbussleep = FALSE; + } + bus->readframes = FALSE; + + return rxcount; +} + +static uint32 +dhdsdio_hostmail(dhd_bus_t *bus, uint32 *hmbd) +{ + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus = 0; + uint32 hmb_data; + uint8 fcbits; + uint retries = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Read mailbox data and ack that we did so */ + R_SDREG(hmb_data, ®s->tohostmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries); + bus->f1regdata += 2; + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq)); + /* XXX ASSERT(bus->rxskip); */ + if (!bus->rxskip) { + DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__)); + } + bus->rxskip = FALSE; + intstatus |= FRAME_AVAIL_MASK(bus); + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION)); + else + DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver)); +#ifndef BCMSPI + /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) { + uint32 val; + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(bus->dhd->osh, &bus->regs->corecontrol, val); + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + } +#endif /* BCMSPI */ + +#ifdef DHD_DEBUG + /* Retrieve console state address now that firmware should have updated it */ + { + sdpcm_shared_t shared; + if (dhdsdio_readshared(bus, &shared) == 0) + bus->console_addr = shared.console_addr; + } +#endif /* DHD_DEBUG */ + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. Leave this here for possibly remaining backward + * compatible with older dongles + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->fc_xoff++; + if (bus->flowcontrol & ~fcbits) + bus->fc_xon++; + + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* At least print a message if FW halted */ + if (hmb_data & HMB_DATA_FWHALT) { + DHD_ERROR(("FIRMWARE HALTED\n")); + dhdsdio_checkdied(bus, NULL, 0); + } + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_FWHALT | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FCDATA_MASK | + HMB_DATA_VERSION_MASK)) { + DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); + } + + if (hmbd) { + *hmbd = hmb_data; + } + + return intstatus; +} + +#ifdef BCMSDIO_INTSTATUS_WAR +static uint32 +dhdsdio_read_intstatus_byte(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 newstatus = 0, intstatus_byte = 0; + uint retries = 0; + int err1 = 0, err2 = 0, err3 = 0, err4 = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* read_intr_mode: + * 0: word mode only (default) + * 1: byte mode after read word failed + * 2: byte mode only + */ + if (bus->dhd->conf->read_intr_mode) { + if (bus->dhd->conf->read_intr_mode == 1) { + R_SDREG(newstatus, ®s->intstatus, retries); + if (!bcmsdh_regfail(bus->sdh)) { + goto exit; + } + } + intstatus_byte = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + ((unsigned long)®s->intstatus & 0xffff) + 0, &err1); + if (!err1) + newstatus |= intstatus_byte; + intstatus_byte = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + ((unsigned long)®s->intstatus & 0xffff) + 1, &err2) << 8; + if (!err2) + newstatus |= intstatus_byte; + intstatus_byte |= bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + ((unsigned long)®s->intstatus & 0xffff) + 2, &err3) << 16; + if (!err3) + newstatus |= intstatus_byte; + intstatus_byte |= bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + ((unsigned long)®s->intstatus & 0xffff) + 3, &err4) << 24; + if (!err4) + newstatus |= intstatus_byte; + + if (!err1 || !err2 || !err3 || !err4) + sdh->regfail = FALSE; + } + else { + R_SDREG(newstatus, ®s->intstatus, retries); + } + +exit: + return newstatus; +} +#endif + +static bool +dhdsdio_dpc(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus, newstatus = 0; + uint retries = 0; + uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */ + uint txlimit = dhd_txbound; /* Tx frames to send before resched */ + uint framecnt = 0; /* Temporary counter of tx/rx frames */ + bool rxdone = TRUE; /* Flag for no more read data */ + bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool is_resched_by_readframe = FALSE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_sdlock(bus->dhd); + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + dhd_os_sdunlock(bus->dhd); + return 0; + } + + DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + /* Start with leftover status bits */ + intstatus = bus->intstatus; + + if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + goto exit; + } + + /* If waiting for HTAVAIL, check status */ + if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) { + int err; + uint8 clkctl, devctl = 0; + +#ifdef DHD_DEBUG + /* Check for inconsistent device control */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + } else { + ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); + } +#endif /* DHD_DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl)); + + if (SBSDIO_HTAV(clkctl)) { + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", + __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + if (err) { + DHD_ERROR(("%s: error writing DEVCTL: %d\n", + __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + bus->clkstate = CLK_AVAIL; + } else { + goto clkwait; + } + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + if (bus->clkstate != CLK_AVAIL) + goto clkwait; + + /* Pending interrupt indicates new device status */ + if (bus->ipend) { + bus->ipend = FALSE; +#if defined(BT_OVER_SDIO) + bcmsdh_btsdio_process_f3_intr(); +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef BCMSDIO_INTSTATUS_WAR + newstatus = dhdsdio_read_intstatus_byte(bus); +#else + R_SDREG(newstatus, ®s->intstatus, retries); +#endif + bus->f1regdata++; + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + newstatus &= bus->hostintmask; + bus->fcstate = !!(newstatus & I_HMB_FC_STATE); + if (newstatus) { + bus->f1regdata++; +#ifndef BCMSPI + if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) && + (newstatus == I_XMTDATA_AVAIL)) { + } else +#endif /* BCMSPI */ + W_SDREG(newstatus, ®s->intstatus, retries); + } + } + + /* Merge new bits with previous */ + intstatus |= newstatus; + bus->intstatus = 0; + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries); + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata += 2; + bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + uint32 hmbdata = 0; + + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus, &hmbdata); + + } + +#ifdef DHD_UCODE_DOWNLOAD +exit_ucode: +#endif /* DHD_UCODE_DOWNLOAD */ + + /* Just being here means nothing more to do for chipactive */ + if (intstatus & I_CHIPACTIVE) { + /* ASSERT(bus->clkstate == CLK_AVAIL); */ + intstatus &= ~I_CHIPACTIVE; + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus, NULL); + } + + /* Generally don't ask for these, can get CRC errors... */ + /* XXX Besides noting the error, should we ABORT/TERM? */ + if (intstatus & I_WR_OOSYNC) { + DHD_ERROR(("Dongle reports WR_OOSYNC\n")); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + DHD_ERROR(("Dongle reports RD_OOSYNC\n")); + intstatus &= ~I_RD_OOSYNC; + } + + /* XXX Should reset or something here... */ + if (intstatus & I_SBINT) { + DHD_ERROR(("Dongle reports SBINT\n")); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + DHD_INFO(("Dongle reports CHIPACTIVE\n")); + intstatus &= ~I_CHIPACTIVE; + } + + if (intstatus & I_HMB_FC_STATE) { + DHD_INFO(("Dongle reports HMB_FC_STATE\n")); + intstatus &= ~I_HMB_FC_STATE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) { + intstatus &= ~FRAME_AVAIL_MASK(bus); + } + + /* On frame indication, read available frames */ + if (PKT_AVAILABLE(bus, intstatus)) { +#ifdef BCMINTERNAL + if (checkfifo) { + int count, regerrs = 0; + uint32 fifoaddr, rdptr, rdoffs, endptrs; + uint32 datalow[8], datahigh[8]; + uint coretype = bus->sih->buscoretype; + uint corerev = bus->sdpcmrev; + + /* set fifoaddr to fetch xmt fifo pointers */ + fifoaddr = (0xB << 16); + W_SDREG(fifoaddr, &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr, retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + R_SDREG(rdptr, &SDPCMFIFOREG(bus, coretype, corerev)->fifodatalow, retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + + /* Read the first 8 words out of the FIFO */ + for (count = 0, rdoffs = (rdptr & 0x7F); count < 8; count++) { + fifoaddr = (0xA << 16) | rdoffs; + W_SDREG(fifoaddr, + &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr, + retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + R_SDREG(datalow[count], + &SDPCMFIFOREG(bus, coretype, corerev)->fifodatalow, + retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + W_SDREG(fifoaddr, + &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr, + retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + R_SDREG(datahigh[count], + &SDPCMFIFOREG(bus, coretype, corerev)->fifodatahigh, + retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + rdoffs = (rdoffs + 1) & 0x7F; + } + + /* For the heck of it, read the pointers again */ + fifoaddr = (0xB << 16); + W_SDREG(fifoaddr, + &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr, retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + R_SDREG(endptrs, + &SDPCMFIFOREG(bus, coretype, corerev)->fifodatalow, retries); + if (bcmsdh_regfail(bus->sdh)) regerrs++; + + printf("Initial read of Transmit DMA Pointers: 0x%08x\n", rdptr); + printf("Transmit DMA Data\n"); + for (count = 0, rdoffs = (rdptr & 0x7F); count < 8; count++) { + printf("0x%08x: 0x%08x 0x%08x\n", rdoffs, + datahigh[count], datalow[count]); + rdoffs = (rdoffs + 1) & 0x7F; + } + printf("Final read of Transmit DMA Pointers: 0x%08x\n", endptrs); + printf("Register errors: %d\n", regerrs); + + checkfifo = FALSE; + } +#endif /* BCMINTERNAL */ + + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); + if (rxdone || bus->rxskip) + intstatus &= ~FRAME_AVAIL_MASK(bus); + rxlimit -= MIN(framecnt, rxlimit); + } + + /* Keep still-pending events for next scheduling */ + bus->intstatus = intstatus; + +clkwait: + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) && + !(bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ +#if !defined(NDIS) + bcmsdh_intr_enable(sdh); +#endif /* !defined(NDIS) */ +#ifdef BCMSPI_ANDROID + if (*dhd_spi_lockcount == 0) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* BCMSPI_ANDROID */ + } + +#if defined(OOB_INTR_ONLY) && !defined(HW_OOB) + /* In case of SW-OOB(using edge trigger), + * Check interrupt status in the dongle again after enable irq on the host. + * and rechedule dpc if interrupt is pended in the dongle. + * There is a chance to miss OOB interrupt while irq is disabled on the host. + * No need to do this with HW-OOB(level trigger) + */ + R_SDREG(newstatus, ®s->intstatus, retries); + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + if (newstatus & bus->hostintmask) { + bus->ipend = TRUE; + resched = TRUE; + } +#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */ + +#ifdef BCMSDIO_RXLIM_POST + if (!DATAOK(bus) && bus->rxlim_en) { + uint8 rxlim = 0; + if (0 == dhdsdio_membytes(bus, FALSE, bus->rxlim_addr, (uint8 *)&rxlim, 1)) { + if (bus->tx_max != rxlim) { + DHD_INFO(("%s: bus->tx_max/rxlim=%d/%d\n", __FUNCTION__, + bus->tx_max, rxlim)); + bus->tx_max = rxlim; + } + } + } +#endif /* BCMSDIO_RXLIM_POST */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE); +#endif + + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) + dhdsdio_sendpendctl(bus); +#ifdef CONSOLE_DPC + else if (DATAOK(bus) && strlen(bus->cons_cmd) && (bus->clkstate == CLK_AVAIL) && + !bus->fcstate) { + dhd_bus_console_in(bus->dhd, bus->cons_cmd, strlen(bus->cons_cmd)); + } +#endif + + /* Send queued frames (limit 1 if rx may still be pending) */ + else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { + + if (bus->dhd->conf->dhd_txminmax < 0) + framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus)); + else + framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; + } + /* Resched the DPC if ctrl cmd is pending on bus credit */ + if (bus->ctrl_frame_stat) { + if (bus->dhd->conf->txctl_tmo_fix) { + set_current_state(TASK_INTERRUPTIBLE); + if (!kthread_should_stop()) + schedule_timeout(1); + set_current_state(TASK_RUNNING); + } + resched = TRUE; + } + + /* Resched if events or tx frames are pending, else await next interrupt */ + /* On failed register access, all bets are off: no resched or interrupts */ + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) { + if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) & + SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + /* Bus failed because of KSO */ + DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__)); + bus->kso = FALSE; + } else { + DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n", + __FUNCTION__)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->intstatus = 0; + /* XXX Under certain conditions it may be reasonable to enable interrupts. + * E.g. if we get occasional 'bcmsdh_regfail' we should be able to continue + * operation. May want to make the decision to enable or not based on count + * of failures, so in case of bus lock up we avoid continuous interrupt. + */ + } + } else if (bus->clkstate == CLK_PENDING) { + /* Awaiting I_CHIPACTIVE; don't resched */ + } else if (bus->intstatus || bus->ipend || + (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) || + PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */ + resched = TRUE; + } + + bus->dpc_sched = resched; + + /* If we're done for now, turn off clock request. */ + /* XXX Leave request on if just waiting for new credit? */ + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + +exit: + + if (!resched) { + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) && + (bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + if (dhd_dpcpoll) { + if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) { + resched = TRUE; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + is_resched_by_readframe = TRUE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + } + } + +#ifdef TPUT_MONITOR + dhd_conf_tput_monitor(bus->dhd); +#endif + + if (bus->ctrl_wait && TXCTLOK(bus)) + wake_up_interruptible(&bus->ctrl_tx_wait); + dhd_os_sdunlock(bus->dhd); +#ifdef DEBUG_DPC_THREAD_WATCHDOG + if (bus->dhd->dhd_bug_on) { + DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x" + " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n", + __FUNCTION__, resched, bus->ctrl_frame_stat, + bus->intstatus, bus->ipend, + pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe)); + + bus->dhd->dhd_bug_on = FALSE; + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + return resched; +} + +bool +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched; + + /* Call the DPC directly. */ + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + resched = dhdsdio_dpc(bus); + + return resched; +} + +void +dhdsdio_isr(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bcmsdh_info_t *sdh; + + if (!bus) { + DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); + return; + } + sdh = bus->sdh; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return; + } + /* XXX Overall operation: + * XXX - Mask further interrupts + * XXX - Read/ack intstatus + * XXX - Take action based on bits and state + * XXX - Reenable interrupts (as per state) + */ + + DHD_INTR(("%s: Enter\n", __FUNCTION__)); + + /* Count the interrupt call */ + bus->intrcount++; + bus->ipend = TRUE; + + /* Shouldn't get this interrupt if we're sleeping? */ + if (!SLPAUTO_ENAB(bus)) { + if (bus->sleeping) { + DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n")); + return; + } else if (!KSO_ENAB(bus)) { + DHD_ERROR(("ISR in devsleep 1\n")); + } + } + + /* Disable additional interrupts (is this needed now)? */ + if (bus->intr) { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + } else { + DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); + } + +#ifdef BCMSPI_ANDROID + bcmsdh_oob_intr_set(bus->sdh, FALSE); +#endif /* BCMSPI_ANDROID */ +#if !defined(NDIS) + bcmsdh_intr_disable(sdh); /* XXX New API: bcmsdh_intr_mask()? */ +#endif /* !defined(NDIS) */ + bus->intdis = TRUE; + +#if defined(SDIO_ISR_THREAD) + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + /* terence 20150209: dpc should be scheded again if dpc_sched is TRUE or dhd_bus_txdata can + not schedule anymore because dpc_sched is TRUE now. + */ + if (dhdsdio_dpc(bus)) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else +#if !defined(NDIS) + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); +#endif /* !defined(NDIS) */ +#endif /* defined(SDIO_ISR_THREAD) */ + +} + +#ifdef PKT_STATICS +void +dhd_bus_dump_txpktstatics(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 total = 0; + uint i; + + printf("%s: TYPE EVENT: %d pkts (size=%d) transfered\n", + __FUNCTION__, bus->tx_statics.event_count, bus->tx_statics.event_size); + printf("%s: TYPE CTRL: %d pkts (size=%d) transfered\n", + __FUNCTION__, bus->tx_statics.ctrl_count, bus->tx_statics.ctrl_size); + printf("%s: TYPE DATA: %d pkts (size=%d) transfered\n", + __FUNCTION__, bus->tx_statics.data_count, bus->tx_statics.data_size); + printf("%s: Glom size distribution:\n", __FUNCTION__); + for (i=0;itx_statics.glom_max;i++) { + total += bus->tx_statics.glom_cnt[i]; + } + printk(KERN_CONT DHD_LOG_PREFIXS); + for (i=0;itx_statics.glom_max;i++) { + printk(KERN_CONT "%02d: %5d", i+1, bus->tx_statics.glom_cnt[i]); + if ((i+1)%8) + printk(KERN_CONT ", "); + else { + printk("\n"); + printk(KERN_CONT DHD_LOG_PREFIXS); + } + } + printk("\n"); + printk(KERN_CONT DHD_LOG_PREFIXS); + for (i=0;itx_statics.glom_max;i++) { + printk(KERN_CONT "%02d:%5d%%", i+1, (bus->tx_statics.glom_cnt[i]*100)/total); + if ((i+1)%8) + printk(KERN_CONT ", "); + else { + printk("\n"); + printk(KERN_CONT DHD_LOG_PREFIXS); + } + } + printk("\n"); + printf("%s: Glom spend time distribution(us):\n", __FUNCTION__); + printk(KERN_CONT DHD_LOG_PREFIXS); + for (i=0;itx_statics.glom_max;i++) { + printk(KERN_CONT "%02d: %5u", i+1, bus->tx_statics.glom_cnt_us[i]); + if ((i+1)%8) + printk(KERN_CONT ", "); + else { + printk("\n"); + printk(KERN_CONT DHD_LOG_PREFIXS); + } + } + printk("\n"); + if (total) { + printf("%s: data(%d)/glom(%d)=%d, glom_max=%d\n", + __FUNCTION__, bus->tx_statics.data_count, total, + bus->tx_statics.data_count/total, bus->tx_statics.glom_max); + } + printf("%s: TYPE RX GLOM: %d pkts (size=%d) transfered\n", + __FUNCTION__, bus->tx_statics.glom_count, bus->tx_statics.glom_size); + printf("%s: TYPE TEST: %d pkts (size=%d) transfered\n", + __FUNCTION__, bus->tx_statics.test_count, bus->tx_statics.test_size); + +#ifdef KSO_DEBUG + printf("%s: kso try distribution(us):\n", __FUNCTION__); + printk(KERN_CONT DHD_LOG_PREFIXS); + for (i=0; i<10; i++) { + printk(KERN_CONT "[%d]: %d, ", i, dhdp->conf->kso_try_array[i]); + } + printk("\n"); +#endif +} + +void +dhd_bus_clear_txpktstatics(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + memset((uint8*) &bus->tx_statics, 0, sizeof(pkt_statics_t)); +} +#endif + +#ifdef SDTEST +static void +dhdsdio_pktgen_init(dhd_bus_t *bus) +{ + /* Default to specified length, or full range */ + if (dhd_pktgen_len) { + bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN); + bus->pktgen_minlen = bus->pktgen_maxlen; + } else { + bus->pktgen_maxlen = MAX_PKTGEN_LEN; + bus->pktgen_minlen = 0; + } + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Default to per-watchdog burst with 10s print time */ + bus->pktgen_freq = 1; + bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0; + bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000; + + /* Default to echo mode */ + bus->pktgen_mode = DHD_PKTGEN_ECHO; + bus->pktgen_stop = 1; +} + +static void +dhdsdio_pktgen(dhd_bus_t *bus) +{ + void *pkt; + uint8 *data; + uint pktcount; + uint fillbyte; + osl_t *osh = bus->dhd->osh; + uint16 len; +#if defined(LINUX) + ulong time_lapse; + uint sent_pkts; + uint rcvd_pkts; +#endif /* LINUX */ + + /* Display current count if appropriate */ + if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) { + bus->pktgen_ptick = 0; + printf("%s: send attempts %d, rcvd %d, errors %d\n", + __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + +#if defined(LINUX) + /* Print throughput stats only for constant length packet runs */ + if (bus->pktgen_minlen == bus->pktgen_maxlen) { + time_lapse = jiffies - bus->pktgen_prev_time; + bus->pktgen_prev_time = jiffies; + sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent; + bus->pktgen_prev_sent = bus->pktgen_sent; + rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd; + bus->pktgen_prev_rcvd = bus->pktgen_rcvd; + + printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n", + __FUNCTION__, + (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8, + (rcvd_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8); + } +#endif /* LINUX */ + } + + /* For recv mode, just make sure dongle has started sending */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) { + bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING; + dhdsdio_sdtest_set(bus, bus->pktgen_total); + } + return; + } + + /* Otherwise, generate or request the specified number of packets */ + for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) { + /* Stop if total has been reached */ + if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) { + bus->pktgen_count = 0; + break; + } + + /* Allocate an appropriate-sized packet */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + len = SDPCM_TEST_PKT_CNT_FLD_LEN; + } else { + len = bus->pktgen_len; + } + if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN), + TRUE))) {; + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + break; + } + PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Write test header cmd and extra based on mode */ + switch (bus->pktgen_mode) { + case DHD_PKTGEN_ECHO: + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_SEND: + *data++ = SDPCM_TEST_DISCARD; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_RXBURST: + *data++ = SDPCM_TEST_BURST; + *data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */ + break; + + default: + DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode)); + PKTFREE(osh, pkt, TRUE); + bus->pktgen_count = 0; + return; + } + + /* Write test header length field */ + *data++ = (bus->pktgen_len >> 0); + *data++ = (bus->pktgen_len >> 8); + + /* Write frame count in a 4 byte field adjucent to SDPCM test header for + * burst mode + */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + *data++ = (uint8)(bus->pktgen_count >> 0); + *data++ = (uint8)(bus->pktgen_count >> 8); + *data++ = (uint8)(bus->pktgen_count >> 16); + *data++ = (uint8)(bus->pktgen_count >> 24); + } else { + + /* Then fill in the remainder -- N/A for burst */ + for (fillbyte = 0; fillbyte < len; fillbyte++) + *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent); + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN); + } +#endif + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) { + bus->pktgen_fail++; + if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail) + bus->pktgen_count = 0; + } + bus->pktgen_sent++; + + /* Bump length if not fixed, wrap at max */ + if (++bus->pktgen_len > bus->pktgen_maxlen) + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Special case for burst mode: just send one request! */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) + break; + } +} + +static void +dhdsdio_sdtest_set(dhd_bus_t *bus, uint count) +{ + void *pkt; + uint8 *data; + osl_t *osh = bus->dhd->osh; + + /* Allocate the packet */ + if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) { + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + return; + } + PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Fill in the test header */ + *data++ = SDPCM_TEST_SEND; + *data++ = (count > 0)?TRUE:FALSE; + *data++ = (bus->pktgen_maxlen >> 0); + *data++ = (bus->pktgen_maxlen >> 8); + *data++ = (uint8)(count >> 0); + *data++ = (uint8)(count >> 8); + *data++ = (uint8)(count >> 16); + *data++ = (uint8)(count >> 24); + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) + bus->pktgen_fail++; +} + +static void +dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq) +{ + osl_t *osh = bus->dhd->osh; + uint8 *data; + uint pktlen; + + uint8 cmd; + uint8 extra; + uint16 len; + uint16 offset; + + /* Check for min length */ + if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen)); + PKTFREE(osh, pkt, FALSE); + return; + } + + /* Extract header fields */ + data = PKTDATA(osh, pkt); + cmd = *data++; + extra = *data++; + len = *data++; len += *data++ << 8; + DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len)); + /* Check length for relevant commands */ + if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) { + if (pktlen != len + SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + return; + } + } + + /* Process as per command */ + switch (cmd) { + case SDPCM_TEST_ECHOREQ: + /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */ + *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP; + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) { + bus->pktgen_sent++; + } else { + bus->pktgen_fail++; + PKTFREE(osh, pkt, FALSE); + } + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_ECHORSP: + if (bus->ext_loop) { + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + } + + for (offset = 0; offset < len; offset++, data++) { + if (*data != SDPCM_TEST_FILL(offset, extra)) { + DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " + "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n", + offset, len, SDPCM_TEST_FILL(offset, extra), *data)); + break; + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_DISCARD: + { + int i = 0; + uint8 *prn = data; + uint8 testval = extra; + for (i = 0; i < len; i++) { + if (*prn != testval) { + DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n", + i, bus->pktgen_rcvd_rcvsession, testval, *prn)); + prn++; testval++; + } + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_BURST: + case SDPCM_TEST_SEND: + default: + DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + break; + } + + /* For recv mode, stop at limit (and tell dongle to stop sending) */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) { + bus->pktgen_rcvd_rcvsession++; + + if (bus->pktgen_total && + (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) { + bus->pktgen_count = 0; + DHD_ERROR(("Pktgen:rcv test complete!\n")); + bus->pktgen_rcv_state = PKTGEN_RCV_IDLE; + dhdsdio_sdtest_set(bus, FALSE); + bus->pktgen_rcvd_rcvsession = 0; + } + } + } +} +#endif /* SDTEST */ + +int dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + int err = 0; + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus); +#endif + return err; +} + +void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + bcmsdh_oob_intr_unregister(dhdp->bus->sdh); +#endif +} + +void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + bcmsdh_oob_intr_set(dhdp->bus->sdh, enable); +#endif +} + +int dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp) +{ + int irq_num = 0; +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + irq_num = bcmsdh_get_oob_intr_num(dhdp->bus->sdh); +#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID */ + return irq_num; +} + +void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub) +{ +#ifdef LINUX + bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh); +#endif +} + +void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub) +{ +#ifdef LINUX + bcmsdh_dev_relax(dhdpub->bus->sdh); +#endif +} + +bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub) +{ + bool enabled = FALSE; + +#ifdef LINUX + enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh); +#endif + return enabled; +} + +extern bool +dhd_bus_watchdog(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + unsigned long flags; + + DHD_TIMER(("%s: Enter\n", __FUNCTION__)); + + bus = dhdp->bus; + + if (bus->dhd->dongle_reset) + return FALSE; + + if (bus->dhd->hang_was_sent) { + dhd_os_wd_timer(bus->dhd, 0); + return FALSE; + } + + /* Ignore the timer if simulating bus down */ + if (!SLPAUTO_ENAB(bus) && bus->sleeping) + return FALSE; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) || + DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + return FALSE; + } + DHD_BUS_BUSY_SET_IN_WD(dhdp); + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + dhd_os_sdlock(bus->dhd); + + /* Poll period: check device if appropriate. */ + // terence 20160615: remove !SLPAUTO_ENAB(bus) to fix not able to polling if sr supported + if (1 && (bus->poll && (++bus->polltick >= bus->pollrate))) { + uint32 intstatus = 0; + + /* Reset poll tick */ + bus->polltick = 0; + + /* Check device if no interrupts */ + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { +#ifdef DEBUG_LOST_INTERRUPTS + uint retries = 0; + bool hostpending; + uint8 devena, devpend; + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + hostpending = bcmsdh_intr_pending(bus->sdh); + devena = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTEN, NULL); + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTPEND, NULL); + + R_SDREG(intstatus, &bus->regs->intstatus, retries); + intstatus &= bus->hostintmask; + + if (intstatus && !hostpending) { + DHD_ERROR(("%s: !hpend: ena 0x%02x pend 0x%02x intstatus 0x%08x\n", + __FUNCTION__, devena, devpend, intstatus)); + } +#endif /* DEBUG_LOST_INTERRUPTS */ + +#ifndef BCMSPI + /* XXX Needs to be fixed for polling operation (in CE) */ + if (!bus->dpc_sched) { + uint8 devpend; + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, + SDIOD_CCCR_INTPEND, NULL); + intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); + } +#else + if (!bus->dpc_sched) { + uint32 devpend; + devpend = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, + SPID_STATUS_REG, NULL); + intstatus = devpend & STATUS_F2_PKT_AVAILABLE; + } +#endif /* !BCMSPI */ + + /* If there is something, make like the ISR and schedule the DPC */ + if (intstatus) { + bus->pollcnt++; + bus->ipend = TRUE; + if (bus->intr) { + bcmsdh_intr_disable(bus->sdh); + } + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + + /* Update interrupt tracking */ + bus->lastintrs = bus->intrcount; + } + + if ((!bus->dpc_sched) && pktq_n_pkts_tot(&bus->txq)) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + +#ifdef DHD_DEBUG + /* Poll for console output periodically */ + if (dhdp->busstate == DHD_BUS_DATA && dhdp->dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhdp->dhd_console_ms) { + bus->console.count -= dhdp->dhd_console_ms; + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (dhdsdio_readconsole(bus) < 0) + dhdp->dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + /* Generate packets if configured */ + if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) { + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + bus->pktgen_tick = 0; + dhdsdio_pktgen(bus); + } +#endif + + /* On idle timeout clear activity flag and/or turn off clock */ +#ifdef DHD_USE_IDLECOUNT + if (bus->activity) + bus->activity = FALSE; + else { + bus->idlecount++; + + /* + * If the condition to switch off the clock is reached And if + * BT is inactive (in case of BT_OVER_SDIO build) turn off clk. + * + * Consider the following case, DHD is configured with + * 1) idletime == DHD_IDLE_IMMEDIATE + * 2) BT is the last user of the clock + * We cannot disable the clock from __dhdsdio_clk_disable + * since WLAN might be using it. If WLAN is active then + * from the respective function/context after doing the job + * the clk is turned off. + * But if WLAN is actually inactive then the watchdog should + * disable the clock. So the condition check below should be + * bus->idletime != 0 instead of idletime == 0 + */ + if ((bus->idletime != 0) && (bus->idlecount >= bus->idletime) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__)); + if (!bus->poll && SLPAUTO_ENAB(bus)) { + if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY) + dhd_os_wd_timer(bus->dhd, 0); + } else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + + bus->idlecount = 0; + } + } +#else + if ((bus->idletime != 0) && (bus->clkstate == CLK_AVAIL) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + if (++bus->idlecount >= bus->idletime) { + bus->idlecount = 0; + if (bus->activity) { + bus->activity = FALSE; +#if !defined(OEM_ANDROID) && !defined(NDIS) +/* XXX + * For Android turn off clocks as soon as possible, to improve power + * efficiency. For non-android, extend clock-active period for voice + * quality reasons (see PR84690/Jira:SWWLAN-7650). + */ + } else { +#endif /* !defined(OEM_ANDROID) && !defined(NDIS) */ + if (!bus->poll && SLPAUTO_ENAB(bus)) { + if (!bus->readframes) + dhdsdio_bussleep(bus, TRUE); + else + bus->reqbussleep = TRUE; + } else { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + } + } + } +#endif /* DHD_USE_IDLECOUNT */ + + dhd_os_sdunlock(bus->dhd); + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_WD(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + return bus->ipend; +} + +extern int +dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 addr, val; + int rv; + void *pkt; + +#ifndef CONSOLE_DPC + /* Exclusive bus access */ + dhd_os_sdlock(bus->dhd); +#endif + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) { + rv = BCME_UNSUPPORTED; + goto exit; + } + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + rv = BCME_NOTREADY; + goto exit; + } + +#ifndef CONSOLE_DPC + if (!DATAOK(bus)) { + DHD_CTL(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d, pktq_len %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq))); + rv = BCME_NOTREADY; + goto exit; + } + + /* Request clock to allow SDIO accesses */ + BUS_WAKE(bus); + /* No pend allowed since txpkt is called later, ht clk has to be on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); +#endif + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + /* handle difference in definition of hnd_log_t in certain branches */ + if (dhdp->wlc_ver_major < 14) { + addr -= sizeof(uint32); + } + val = htol32(0); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + /* handle difference in definition of hnd_log_t in certain branches */ + if (dhdp->wlc_ver_major < 14) { + addr -= sizeof(uint32); + } + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Bump dongle by sending an empty packet on the event channel. + * sdpcm_sendup (RX) checks for virtual console input. + */ + if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) + rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE); + +done: +#ifndef CONSOLE_DPC + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } +#endif + +exit: +#ifdef CONSOLE_DPC + memset(bus->cons_cmd, 0, sizeof(bus->cons_cmd)); +#else + dhd_os_sdunlock(bus->dhd); +#endif + return rv; +} + +#ifdef CONSOLE_DPC +extern int +dhd_bus_txcons(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = BCME_OK; + + dhd_os_sdlock(bus->dhd); + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) { + ret = BCME_UNSUPPORTED; + goto exit; + } + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + ret = BCME_NOTREADY; + goto exit; + } + + if (msglen >= sizeof(bus->cons_cmd)) { + DHD_ERROR(("%s: \"%s\"(%d) too long\n", __FUNCTION__, msg, msglen)); + ret = BCME_BADARG; + goto exit; + } + + if (!strlen(bus->cons_cmd)) { + strncpy(bus->cons_cmd, msg, sizeof(bus->cons_cmd)); + DHD_CTL(("%s: \"%s\" delay send, tx_max %d, tx_seq %d, pktq_len %d\n", + __FUNCTION__, bus->cons_cmd, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq))); + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { + DHD_CTL(("%s: \"%s\" is pending, tx_max %d, tx_seq %d, pktq_len %d\n", + __FUNCTION__, bus->cons_cmd, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq))); + ret = BCME_NOTREADY; + } + +exit: + dhd_os_sdunlock(bus->dhd); + + return ret; +} +#endif + +#if defined(DHD_DEBUG) && !defined(BCMSDIOLITE) +static void +dhd_dump_cis(uint fn, uint8 *cis) +{ + uint byte, tag, tdata; + DHD_INFO(("Function %d CIS:\n", fn)); + + for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) { + if ((byte % 16) == 0) + DHD_INFO((" ")); + DHD_INFO(("%02x ", cis[byte])); + if ((byte % 16) == 15) + DHD_INFO(("\n")); + if (!tdata--) { + tag = cis[byte]; + if (tag == 0xff) + break; + else if (!tag) + tdata = 0; + else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT) + tdata = cis[byte + 1] + 1; + else + DHD_INFO(("]")); + } + } + if ((byte % 16) != 15) + DHD_INFO(("\n")); +} +#endif /* DHD_DEBUG */ + +static bool +dhdsdio_chipmatch(uint16 chipid) +{ + if (chipid == BCM4330_CHIP_ID) + return TRUE; + if (chipid == BCM43362_CHIP_ID) + return TRUE; + if (chipid == BCM43340_CHIP_ID) + return TRUE; + if (chipid == BCM43341_CHIP_ID) + return TRUE; + if (chipid == BCM4334_CHIP_ID) + return TRUE; + if (chipid == BCM4324_CHIP_ID) + return TRUE; + if (chipid == BCM4335_CHIP_ID) + return TRUE; + if (chipid == BCM4339_CHIP_ID) + return TRUE; + if (BCM4345_CHIP(chipid)) + return TRUE; + if (chipid == BCM4350_CHIP_ID) + return TRUE; + if (chipid == BCM4354_CHIP_ID) + return TRUE; + if (chipid == BCM4358_CHIP_ID) + return TRUE; + if (chipid == BCM43569_CHIP_ID) + return TRUE; + if (chipid == BCM4371_CHIP_ID) + return TRUE; + if (chipid == BCM43430_CHIP_ID) + return TRUE; + if (chipid == BCM43018_CHIP_ID) + return TRUE; + if (BCM4349_CHIP(chipid)) + return TRUE; +#ifdef UNRELEASEDCHIP + if ((chipid == BCM4347_CHIP_ID) || + (chipid == BCM4357_CHIP_ID) || + (chipid == BCM4361_CHIP_ID)) + return TRUE; +#endif + if (chipid == BCM4364_CHIP_ID) + return TRUE; + + if (chipid == BCM43012_CHIP_ID) + return TRUE; + + if (chipid == BCM43014_CHIP_ID) + return TRUE; + + if (chipid == BCM43013_CHIP_ID) + return TRUE; + + if (chipid == BCM4369_CHIP_ID) + return TRUE; + + if (BCM4378_CHIP(chipid)) { + return TRUE; + } + + if (chipid == BCM4362_CHIP_ID) + return TRUE; + if (chipid == BCM43751_CHIP_ID) + return TRUE; + if (chipid == BCM43752_CHIP_ID) + return TRUE; + + return FALSE; +} + +static void * +dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, + uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh) +{ + int ret; + dhd_bus_t *bus; + + DHD_MUTEX_LOCK(); + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_txbound = DHD_TXBOUND; + dhd_rxbound = DHD_RXBOUND; +#ifdef BCMSPI + dhd_alignctl = FALSE; +#else + dhd_alignctl = TRUE; +#endif /* BCMSPI */ + sd1idle = TRUE; + dhd_readahead = TRUE; + retrydata = FALSE; + +#ifdef DISABLE_FLOW_CONTROL + dhd_doflow = FALSE; +#endif /* DISABLE_FLOW_CONTROL */ + dhd_dongle_ramsize = 0; + dhd_txminmax = DHD_TXMINMAX; + +#ifdef BCMSPI + forcealign = FALSE; +#else + forcealign = TRUE; +#endif /* !BCMSPI */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid)); + + /* We make assumptions about address window mappings */ + ASSERT((uintptr)regsva == si_enum_base(devid)); + + /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start + * means early parse could fail, so here we should get either an ID + * we recognize OR (-1) indicating we must request power first. + */ + /* Check the Vendor ID */ + switch (venid) { + case 0x0000: + case VENDOR_BROADCOM: + break; + default: + DHD_ERROR(("%s: unknown vendor: 0x%04x\n", + __FUNCTION__, venid)); + goto forcereturn; + } + + /* Check the Device ID and make sure it's one that we support */ + switch (devid) { +#ifdef BCMINTERNAL + case SDIOD_FPGA_ID: + DHD_INFO(("%s: found FPGA Dongle\n", __FUNCTION__)); + break; +#endif /* BCMINTERNAL */ + case 0: + DHD_INFO(("%s: allow device id 0, will check chip internals\n", + __FUNCTION__)); + break; + + default: + DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n", + __FUNCTION__, venid, devid)); + goto forcereturn; + } + + if (osh == NULL) { + DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__)); + goto forcereturn; + } + + /* Allocate private bus interface state */ + if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + goto fail; + } + bzero(bus, sizeof(dhd_bus_t)); + bus->sdh = sdh; + bus->cl_devid = (uint16)devid; + bus->bus = DHD_BUS; + bus->bus_num = bus_no; + bus->slot_num = slot; + bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ +#ifdef BT_OVER_SDIO + bus->bt_use_count = 0; +#endif + +#if defined(LINUX) && defined(SUPPORT_P2P_GO_PS) + init_waitqueue_head(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + init_waitqueue_head(&bus->ctrl_tx_wait); + + /* attempt to attach to the dongle */ + if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { + DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS/network interface */ + if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + +#if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME) + dhd_conf_get_otp(bus->dhd, sdh, bus->sih); +#endif + + /* Allocate buffers */ + if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__)); + goto fail; + } + + if (!(dhdsdio_probe_init(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__)); + goto fail; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__)); + bcmsdh_intr_disable(sdh); /* XXX New API: bcmsdh_intr_mask()? */ + if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) { + DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n", + __FUNCTION__, ret)); + goto fail; + } + DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__)); + } else { + DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n", + __FUNCTION__)); + } + + DHD_INFO(("%s: completed!!\n", __FUNCTION__)); + + /* if firmware path present try to download and bring up bus */ + bus->dhd->hang_report = TRUE; +#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd +#if defined(LINUX) || defined(linux) + if (dhd_download_fw_on_driverload) { +#endif /* LINUX || linux */ + if ((ret = dhd_bus_start(bus->dhd)) != 0) { + DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__)); +#if !defined(OEM_ANDROID) + if (ret == BCME_NOTUP) +#endif /* !OEM_ANDROID */ + goto fail; + } +#if defined(LINUX) || defined(linux) + } + else { + /* Set random MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } +#endif /* LINUX || linux */ +#endif +#if defined(BT_OVER_SDIO) + /* At this point Regulators are turned on and iconditionaly sdio bus is started + * based upon dhd_download_fw_on_driverload check, so + * increase the bus user count, this count will only be disabled inside + * dhd_register_if() function if flag dhd_download_fw_on_driverload is set to false, + * i.e FW download during insmod is not needed, otherwise it will not be decremented + * so that WALN will always hold the bus untill rmmod is done. + */ + dhdsdio_bus_usr_cnt_inc(bus->dhd); +#endif /* BT_OVER_SDIO */ + + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_attach_net(bus->dhd, TRUE) != 0) { + DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMHOST_XTAL_PU_TIME_MOD + bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11); + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001); +#endif /* BCMHOST_XTAL_PU_TIME_MOD */ + +#if defined(MULTIPLE_SUPPLICANT) + wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif /* MULTIPLE_SUPPLICANT */ + DHD_MUTEX_UNLOCK(); + + return bus; + +fail: + dhdsdio_release(bus, osh); + +forcereturn: + DHD_MUTEX_UNLOCK(); + + return NULL; +} + +static bool +dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, + uint16 devid) +{ +#ifndef BCMSPI + int err = 0; + uint8 clkctl = 0; +#endif /* !BCMSPI */ + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, si_enum_base(devid))) { + DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); + } + +#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG) + DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n", + bcmsdh_reg_read(bus->sdh, si_enum_base(devid), 4))); +#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */ + +#ifndef BCMSPI /* wake-wlan in gSPI will bring up the htavail/alpavail clocks. */ + + /* Force PLL off until si_attach() programs PLL control regs */ + + /* XXX Ideally should not access F1 power control regs before + * reading CIS and confirming device. But strapping option for + * low-power start requires turning on ALP before reading CIS, + * and at some point bcmsdh should read the CIS for the ID and + * not even tell us if it's some other device. At this point + * (see above) we should know it's us (powered on) or can't read + * CIS so we need to power on and try. + */ + + /* WAR for PR 39902: must force HT off until PLL programmed. */ + /* WAR for PR43618, PR44891: don't do ALPReq until ALPAvail set */ + + /* XXX Replace write/read sequence with single bcmsdh_cfg_raw() call */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err); + if (!err) + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { + DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, DHD_INIT_CLKCTL1, clkctl)); + goto fail; + } + +#endif /* !BCMSPI */ + +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + uint fn, numfn; + uint8 *cis = NULL; + int local_err = 0; + +#ifndef BCMSPI + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &local_err); + /* XXX Account for possible delay between ALP available and on active */ + OSL_DELAY(65); +#else + numfn = 0; /* internally func is hardcoded to 1 as gSPI has cis on F1 only */ +#endif /* !BCMSPI */ +#ifndef BCMSDIOLITE + if (!(cis = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: cis malloc failed\n")); + goto fail; + } + + for (fn = 0; fn <= numfn; fn++) { + bzero(cis, SBSDIO_CIS_SIZE_LIMIT); + if ((err = bcmsdh_cis_read(sdh, fn, cis, + SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", + fn, err)); + break; + } + dhd_dump_cis(fn, cis); + } + MFREE(osh, cis, SBSDIO_CIS_SIZE_LIMIT); +#else + BCM_REFERENCE(cis); + BCM_REFERENCE(fn); +#endif /* DHD_DEBUG */ + if (local_err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } + } +#endif /* DHD_DEBUG */ + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + +#ifdef DHD_DEBUG + DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n", + bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg)); +#endif /* DHD_DEBUG */ + + /* XXX Let the layers below dhd know the chipid and chiprev for + * controlling sw WARs for hw PRs + */ + bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); + + if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: unsupported chip: 0x%04x\n", + __FUNCTION__, bus->sih->chip)); +#ifdef BCMINTERNAL + if (dhd_anychip) + DHD_ERROR(("Continuing anyway...\n")); + else +#endif /* BCMINTERNAL */ + goto fail; + } + + if (bus->sih->buscorerev >= 12) + dhdsdio_clk_kso_init(bus); + else + bus->kso = TRUE; + + si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength); + +#ifdef BCMINTERNAL + /* Check if there is a PMU in the chip. The FPGA does not have a PMU. */ + if (!(bus->sih->cccaps & CC_CAP_PMU)) { + DHD_NOPMU(bus) = 1; + } +#endif /* BCMINTERNAL */ + + /* Get info on the ARM and SOCRAM cores... */ + /* XXX Should really be qualified by device id */ + if (!DHD_NOPMU(bus)) { + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4335_CHIP_ID: + case BCM4339_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4358_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM4371_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + CASE_BCM4345_CHIP: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM based changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); + break; + case BCM4364_CHIP_ID: + bus->dongle_ram_base = CR4_4364_RAM_BASE; + break; +#ifdef UNRELEASEDCHIP + case BCM4347_CHIP_ID: + case BCM4357_CHIP_ID: + case BCM4361_CHIP_ID: + bus->dongle_ram_base = CR4_4347_RAM_BASE; + break; +#endif + case BCM4362_CHIP_ID: + bus->dongle_ram_base = CR4_4362_RAM_BASE; + break; + case BCM43751_CHIP_ID: + bus->dongle_ram_base = CR4_43751_RAM_BASE; + break; + case BCM43752_CHIP_ID: + bus->dongle_ram_base = CR4_43752_RAM_BASE; + break; + case BCM4369_CHIP_ID: + bus->dongle_ram_base = CR4_4369_RAM_BASE; + break; + case BCM4378_CHIP_GRPID: + bus->dongle_ram_base = CR4_4378_RAM_BASE; + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_ramsize) + dhd_dongle_setramsize(bus, dhd_dongle_ramsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + } + + /* ...but normally deal with the SDPCMDEV core */ +#ifdef BCMSDIOLITE + if (!(bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find Chip Common core!\n", __FUNCTION__)); + goto fail; + } +#else + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) && + !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__)); + goto fail; + } +#endif + bus->sdpcmrev = si_corerev(bus->sih); + + /* Set core control so an SDIO reset does a backplane reset */ + OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN); +#ifndef BCMSPI + bus->rxint_mode = SDIO_DEVICE_HMB_RXINT; + + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) + { + uint32 val; + + val = R_REG(osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(osh, &bus->regs->corecontrol, val); + } +#endif /* BCMSPI */ + + /* XXX Tx needs priority queue, where to determine levels? */ + /* XXX Should it try to do WLC mapping, or just pass through? */ + pktq_init(&bus->txq, (PRIOMASK + 1), QLEN); + + /* Locate an appropriately-aligned portion of hdrbuf */ +#ifndef DYNAMIC_MAX_HDR_READ + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN); +#endif + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; + + /* Setting default Glom size */ + bus->txglomsize = SDPCM_DEFGLOM_SIZE; + + return TRUE; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + return FALSE; +} + +static bool +dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen); + goto fail; + } + /* Allocate buffer to membuf */ + bus->membuf = MALLOC(osh, MAX_MEM_BUF); + if (bus->membuf == NULL) { + DHD_ERROR(("%s: MALLOC of %d-byte membuf failed\n", + __FUNCTION__, MAX_MEM_BUF)); + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + bus->databuf = NULL; + } + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen); + goto fail; + } + memset(bus->membuf, 0, MAX_MEM_BUF); + + /* Align the buffer */ + if ((uintptr)bus->databuf % DHD_SDALIGN) + bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN)); + else + bus->dataptr = bus->databuf; + + return TRUE; + +fail: + return FALSE; +} + +static bool +dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + int32 fnum; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->_srenab = FALSE; + +#ifdef SDTEST + dhdsdio_pktgen_init(bus); +#endif /* SDTEST */ + +#ifndef BCMSPI + /* Disable F2 to clear any intermediate frame state on the dongle */ + /* XXX New API: change to bcmsdh_fn_set(sdh, SDIO_FUNC_2, FALSE); */ + /* XXX Might write SRES instead, or reset ARM (download prep)? */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); +#endif /* !BCMSPI */ + + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->sleeping = FALSE; + bus->rxflow = FALSE; + bus->prev_rxlim_hit = 0; + +#ifndef BCMSPI + /* Done with backplane-dependent accesses, can drop clock... */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); +#endif /* !BCMSPI */ + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + bus->idletime = (int32)dhd_idletime; + bus->idleclock = DHD_IDLE_ACTIVE; + + /* Query the SD clock speed */ + if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor")); + bus->sd_divisor = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_divisor", bus->sd_divisor)); + } + + /* Query the SD bus mode */ + if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode")); + bus->sd_mode = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_mode", bus->sd_mode)); + } + + /* Query the F2 block size, set roundup accordingly */ + fnum = 2; + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + bus->roundup = MIN(max_roundup, bus->blocksize); + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); + bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE); + if (bus->pad_pkt == NULL) + DHD_ERROR(("failed to allocate padding packet\n")); + else { + int alignment_offset = 0; + uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt); + if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN))) + PKTPUSH(osh, bus->pad_pkt, alignment_offset); + PKTSETNEXT(osh, bus->pad_pkt, NULL); + } +#endif /* DHDENABLE_TAILPAD */ + + /* Query if bus module supports packet chaining, default to use if supported */ + if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0, + &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_rxchain = FALSE; + } else { + DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n", + __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support"))); + } + bus->use_rxchain = (bool)bus->sd_rxchain; + bus->txinrx_thres = CUSTOM_TXINRX_THRES; + /* TX first in dhdsdio_readframes() */ + bus->dotxinrx = TRUE; + +#ifdef PKT_STATICS + dhd_bus_clear_txpktstatics(bus->dhd); +#endif + + return TRUE; +} + +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; + + ret = dhdsdio_download_firmware(bus, osh, bus->sdh); + + return ret; +} + +int +dhd_set_bus_params(struct dhd_bus *bus) +{ + int ret = 0; + + if (bus->dhd->conf->dhd_poll >= 0) { + bus->poll = bus->dhd->conf->dhd_poll; + if (!bus->pollrate) + bus->pollrate = 1; + printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); + } + if (bus->dhd->conf->use_rxchain >= 0) { + bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain; + } + if (bus->dhd->conf->txinrx_thres >= 0) { + bus->txinrx_thres = bus->dhd->conf->txinrx_thres; + } + if (bus->dhd->conf->txglomsize >= 0) { + bus->txglomsize = bus->dhd->conf->txglomsize; + } +#ifdef MINIME + if (bus->dhd->conf->fw_type == FW_TYPE_MINIME) { + bus->ramsize = bus->dhd->conf->ramsize; + printf("%s: set ramsize 0x%x\n", __FUNCTION__, bus->ramsize); + } +#endif +#ifdef DYNAMIC_MAX_HDR_READ + if (bus->dhd->conf->max_hdr_read <= 0) { + bus->dhd->conf->max_hdr_read = MAX_HDR_READ; + } + if (bus->hdrbufp) { + MFREE(bus->dhd->osh, bus->hdrbufp, bus->dhd->conf->max_hdr_read + DHD_SDALIGN); + } + bus->hdrbufp = MALLOC(bus->dhd->osh, bus->dhd->conf->max_hdr_read + DHD_SDALIGN); + if (bus->hdrbufp == NULL) { + DHD_ERROR(("%s: MALLOC of %d-byte hdrbufp failed\n", + __FUNCTION__, bus->dhd->conf->max_hdr_read + DHD_SDALIGN)); + ret = -1; + goto exit; + } + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)bus->hdrbufp, DHD_SDALIGN); + +exit: +#endif + return ret; +} + +static int +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + +#if defined(SUPPORT_MULTIPLE_REVISION) + if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) { + DHD_ERROR(("%s: fail to concatnate revison \n", + __FUNCTION__)); + return BCME_BADARG; + } +#endif /* SUPPORT_MULTIPLE_REVISION */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) + dhd_set_blob_support(bus->dhd, bus->fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + + DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + + dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path); + ret = dhd_set_bus_params(bus); + if (ret) { + goto exit; + } + + /* Download the firmware */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + ret = _dhdsdio_download_firmware(bus); + + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + +exit: + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +/* Detach and free everything */ +static void +dhdsdio_release(dhd_bus_t *bus, osl_t *osh) +{ + bool dongle_isolation = FALSE; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(osh); + + if (bus->dhd) { +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + debugger_close(); +#endif /* DEBUGGER || DHD_DSCOPE */ + dongle_isolation = bus->dhd->dongle_isolation; + dhd_detach(bus->dhd); + } + + /* De-register interrupt handler */ + bcmsdh_intr_disable(bus->sdh); + bcmsdh_intr_dereg(bus->sdh); + + if (bus->dhd) { + dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + dhdsdio_release_malloc(bus, osh); + +#ifdef DHD_DEBUG + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); +#endif /* DHDENABLE_TAILPAD */ +#ifdef DYNAMIC_MAX_HDR_READ + if (bus->hdrbufp) { + MFREE(osh, bus->hdrbufp, MAX_HDR_READ + DHD_SDALIGN); + } +#endif + + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->rxbuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->rxbuf, bus->rxblen); +#endif + bus->rxctl = NULL; + bus->rxlen = 0; + } + + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + } + + if (bus->membuf) { + MFREE(osh, bus->membuf, MAX_MEM_BUF); + bus->membuf = NULL; + } + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + } + +} + +static void +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) + return; + + if (bus->sih) { + /* In Win10, system will be BSOD if using "sysprep" to do OS image */ + /* Skip this will not cause the BSOD. */ +#if !defined(BCMLXSDMMC) && !defined(NDIS) + /* XXX - Using the watchdog to reset the chip does not allow + * further SDIO communication. For the SDMMC Driver, this + * causes interrupt to not be de-registered properly. + */ + /* XXX: dongle isolation mode is on don't reset the chip */ + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + if (KSO_ENAB(bus) && (dongle_isolation == FALSE)) + si_watchdog(bus->sih, 4); +#endif /* !defined(BCMLXSDMMC) */ + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + si_detach(bus->sih); + bus->sih = NULL; + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_disconnect(void *ptr) +{ + dhd_bus_t *bus = (dhd_bus_t *)ptr; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_MUTEX_LOCK(); + if (bus) { + ASSERT(bus->dhd); + /* Advertise bus remove during rmmod */ + dhdsdio_advertise_bus_remove(bus->dhd); + dhdsdio_release(bus, bus->dhd->osh); + } + DHD_MUTEX_UNLOCK(); + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static int +dhdsdio_suspend(void *context) +{ + int ret = 0; +#ifdef SUPPORT_P2P_GO_PS + int wait_time = 0; +#endif /* SUPPORT_P2P_GO_PS */ + +#if defined(LINUX) + dhd_bus_t *bus = (dhd_bus_t*)context; + unsigned long flags; + + DHD_ERROR(("%s Enter\n", __FUNCTION__)); + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + if (bus->dhd->up == FALSE) { + return BCME_OK; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + /* stop all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + bus->dhd->busstate = DHD_BUS_SUSPEND; +#if defined(LINUX) || defined(linux) + if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } +#endif /* LINUX || linux */ + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef SUPPORT_P2P_GO_PS + if (bus->idletime > 0) { + wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms); + } +#endif /* SUPPORT_P2P_GO_PS */ + ret = dhd_os_check_wakelock(bus->dhd); +#ifdef SUPPORT_P2P_GO_PS + // terence 20141124: fix for suspend issue + if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) { + if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) { + if (!bus->sleeping) { + ret = 1; + } + } + } +#endif /* SUPPORT_P2P_GO_PS */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (ret) { + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + } + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#endif /* LINUX */ + return ret; +} + +static int +dhdsdio_resume(void *context) +{ + dhd_bus_t *bus = (dhd_bus_t*)context; + ulong flags; + + DHD_ERROR(("%s Enter\n", __FUNCTION__)); + + if (bus->dhd->up == FALSE) { + return BCME_OK; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + if (dhd_os_check_if_up(bus->dhd)) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + bus->dhd->busstate = DHD_BUS_DATA; + dhd_os_busbusy_wake(bus->dhd); + /* resume all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + return 0; +} + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +static bcmsdh_driver_t dhd_sdio = { + dhdsdio_probe, + dhdsdio_disconnect, + dhdsdio_suspend, + dhdsdio_resume +}; + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return bcmsdh_register(&dhd_sdio); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_unregister(); +} + +#if defined(BCMLXSDMMC) +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +int dhd_bus_reg_sdio_notify(void* semaphore) +{ + return bcmsdh_reg_sdio_notify(semaphore); +} + +void dhd_bus_unreg_sdio_notify(void) +{ + bcmsdh_unreg_sdio_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +#ifdef BCMEMBEDIMAGE +static int +dhdsdio_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *ularray = NULL; + + DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__)); + + /* Download image */ + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)dlarray)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + ularray = MALLOC(bus->dhd->osh, bus->ramsize); + /* Upload image to verify downloaded contents. */ + offset = 0; + memset(ularray, 0xaa, bus->ramsize); + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, + ularray + offset, sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + + if (memcmp(dlarray, ularray, sizeof(dlarray))) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + goto err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + + } +#endif /* DHD_DEBUG */ + +err: + if (ularray) + MFREE(bus->dhd->osh, ularray, bus->ramsize); + return bcmerror; +} +#endif /* BCMEMBEDIMAGE */ + +static int +dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + void *image = NULL; + uint8 *memblock = NULL, *memptr; +#ifdef CHECK_DOWNLOAD_FW + uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct +#endif + uint memblock_size = MEMBLOCK; +#ifdef DHD_DEBUG_DOWNLOADTIME + unsigned long initial_jiffies = 0; + uint firmware_sz = 0; +#endif + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + /* XXX: Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + image = dhd_os_open_image1(bus->dhd, pfw_path); + if (image == NULL) { + printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path); + goto err; + } + + /* Update the dongle image download block size depending on the F1 block size */ +#ifndef NDIS + if (sd_f1_blocksize == 512) + memblock_size = MAX_MEMBLOCK; +#endif /* !NDIS */ + + memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } +#ifdef CHECK_DOWNLOAD_FW + if (bus->dhd->conf->fwchk) { + memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memptr_tmp == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + } +#endif + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + +#ifdef DHD_DEBUG_DOWNLOADTIME + initial_jiffies = jiffies; +#endif + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) { + // terence 20150412: fix for firmware failed to download + if (bus->dhd->conf->chip == BCM43340_CHIP_ID || + bus->dhd->conf->chip == BCM43341_CHIP_ID) { + if (len % 64 != 0) { + memset(memptr+len, 0, len%64); + len += (64 - len%64); + } + } + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, memblock_size, offset)); + goto err; + } + +#ifdef CHECK_DOWNLOAD_FW + if (bus->dhd->conf->fwchk) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, memptr_tmp, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + if (memcmp(memptr_tmp, memptr, len)) { + DHD_ERROR(("%s: Downloaded image is corrupted at 0x%08x\n", __FUNCTION__, offset)); + bcmerror = BCME_ERROR; + goto err; + } else + DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + } +#endif + + offset += memblock_size; +#ifdef DHD_DEBUG_DOWNLOADTIME + firmware_sz += len; +#endif + } + +#ifdef DHD_DEBUG_DOWNLOADTIME + DHD_ERROR(("Firmware download time for %u bytes: %u ms\n", + firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies))); +#endif + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN); +#ifdef CHECK_DOWNLOAD_FW + if (bus->dhd->conf->fwchk) { + if (memptr_tmp) + MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); + } +#endif + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcmerror; +} + +#ifdef DHD_UCODE_DOWNLOAD +/* Currently supported only for the chips in which ucode RAM is AXI addressable */ +static uint32 +dhdsdio_ucode_base(struct dhd_bus *bus) +{ + uint32 ucode_base = 0; + + switch ((uint16)bus->sih->chip) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + ucode_base = 0xE8020000; + break; + default: + DHD_ERROR(("%s: Unsupported!\n", __func__)); + break; + } + + return ucode_base; +} + +static int +dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + uint32 ucode_base; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + uint memblock_size = MEMBLOCK; +#ifdef DHD_DEBUG_DOWNLOADTIME + unsigned long initial_jiffies = 0; + uint firmware_sz = 0; +#endif + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, ucode_path)); + + ucode_base = dhdsdio_ucode_base(bus); + + /* XXX: Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + image = dhd_os_open_image1(bus->dhd, ucode_path); + if (image == NULL) + goto err; + + /* Update the dongle image download block size depending on the F1 block size */ + if (sd_f1_blocksize == 512) + memblock_size = MAX_MEMBLOCK; + + memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + +#ifdef DHD_DEBUG_DOWNLOADTIME + initial_jiffies = jiffies; +#endif + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + + bcmerror = dhdsdio_membytes(bus, TRUE, (ucode_base + offset), memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, memblock_size, offset)); + goto err; + } + + offset += memblock_size; +#ifdef DHD_DEBUG_DOWNLOADTIME + firmware_sz += len; +#endif + } + +#ifdef DHD_DEBUG_DOWNLOADTIME + DHD_ERROR(("ucode download time for %u bytes: %u ms\n", + firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies))); +#endif + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN); + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcmerror; +} /* dhdsdio_download_ucode_file */ + +void +dhd_bus_ucode_download(struct dhd_bus *bus) +{ + uint32 shaddr = 0, shdata = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&shdata, 4); + + DHD_TRACE(("%s: shdata:[0x%08x :0x%08x]\n", __func__, shaddr, shdata)); + + if (shdata == UCODE_DOWNLOAD_REQUEST) + { + DHD_ERROR(("%s: Received ucode download request!\n", __func__)); + + /* Download the ucode */ + if (!dhd_get_ucode_path(bus->dhd)) { + DHD_ERROR(("%s: bus->uc_path not set!\n", __func__)); + return; + } + dhdsdio_download_ucode_file(bus, dhd_get_ucode_path(bus->dhd)); + + DHD_ERROR(("%s: Ucode downloaded successfully!\n", __func__)); + + shdata = UCODE_DOWNLOAD_COMPLETE; + dhdsdio_membytes(bus, TRUE, shaddr, (uint8 *)&shdata, 4); + } +} + +#endif /* DHD_UCODE_DOWNLOAD */ + +static int +dhdsdio_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = -1; + uint len; + void * image = NULL; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* For Get nvram from UEFI */ + if (nvram_file_exists) { + image = dhd_os_open_image1(bus->dhd, pnv_path); + if (image == NULL) { + printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path); + goto err; + } + } + + memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + + /* For Get nvram from image or UEFI (when image == NULL ) */ + len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); + + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)memblock; + bufp[len] = 0; + len = process_nvram_vars(bufp, len); + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } else { + DHD_ERROR(("%s: error reading nvram file: %d\n", + __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcmerror; +} + +static int +_dhdsdio_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + return bcmerror; +#endif + } + + /* Keep arm in reset */ + if (dhdsdio_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdsdio_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } else { + embed = FALSE; + dlok = TRUE; + } + } + +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdsdio_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } else { + dlok = TRUE; + } + } +#else + BCM_REFERENCE(embed); +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* External nvram takes precedence if specified */ + if (dhdsdio_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdsdio_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} + +static int +dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + int status; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete_fn, handle); + + return status; +} + +static int +dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle, int max_retry) +{ + int ret; + int i = 0; + int retries = 0; + bcmsdh_info_t *sdh; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + sdh = bus->sdh; + do { + ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, + pkt, complete_fn, handle); + + bus->f2txdata++; + ASSERT(ret != BCME_PENDING); + + if (ret == BCME_NODEVICE) { + DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__)); + } else if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + bus->f1regdata++; + bus->dhd->tx_errors++; + bcmsdh_abort(sdh, SDIO_FUNC_2); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + for (i = 0; i < READ_FRM_CNT_RETRIES; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI, + NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO, + NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + } + } while ((ret < 0) && retrydata && ++retries < max_retry); + + return ret; +} + +uint8 +dhd_bus_is_ioready(struct dhd_bus *bus) +{ + uint8 enable; + bcmsdh_info_t *sdh; + ASSERT(bus); + ASSERT(bus->sih != NULL); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + sdh = bus->sdh; + return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL)); +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +uint +dhd_bus_hdrlen(struct dhd_bus *bus) +{ + return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; +} + +void +dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val) +{ + bus->dotxinrx = val; +} + +/* + * dhdsdio_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +static void +dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); +#ifdef LINUX + if ((timeleft == 0) || (timeleft == 1)) +#else + if (timeleft == 0) +#endif + { + /* XXX This condition ideally should not occur, this means some + * bus usage context is not clearing the respective usage bit, print + * dhd_bus_busy_state and crash the host for further debugging. + */ + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_REMOVE; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + dhd_bus_t *bus; + unsigned long flags; + + bus = dhdp->bus; + + if (flag == TRUE) { + if (!bus->dhd->dongle_reset) { + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + dhdsdio_advertise_bus_cleanup(bus->dhd); + dhd_os_sdlock(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if defined(OEM_ANDROID) +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ +#endif /* OEM_ANDROID */ + /* Expect app to have torn down any connection before calling */ + /* Stop the bus, disable F2 */ + dhd_bus_stop(bus, FALSE); + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + /* Clean up any pending IRQ */ + dhd_enable_oob_intr(bus, FALSE); + bcmsdh_oob_intr_set(bus->sdh, FALSE); + bcmsdh_oob_intr_unregister(bus->sdh); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + + /* Clean tx/rx buffer pointers, detach from the dongle */ + dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); + + bus->dhd->dongle_reset = TRUE; + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); + bus->dhd->up = FALSE; + dhd_txglom_enable(dhdp, FALSE); + dhd_os_sdunlock(dhdp); + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__)); + /* App can now remove power from device */ + } else + bcmerror = BCME_SDIO_ERROR; + } else { + /* App must have restored power to device before calling */ + + printf("%s: == Power ON ==\n", __FUNCTION__); + + if (bus->dhd->dongle_reset) { + /* Turn on WLAN */ + dhd_os_sdlock(dhdp); + /* Reset SD client */ + bcmsdh_reset(bus->sdh); + + /* Attempt to re-attach & download */ + if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, + (uint32 *)(uintptr)si_enum_base(bus->cl_devid), + bus->cl_devid)) { + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + /* Attempt to download binary to the dongle */ + if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && + dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + dhd_enable_oob_intr(bus, TRUE); + bcmsdh_oob_intr_register(bus->sdh, + dhdsdio_isr, bus); + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#elif defined(FORCE_WOWLAN) + dhd_enable_oob_intr(bus, TRUE); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; + +#if defined(OEM_ANDROID) && !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +#endif /* defined(OEM_ANDROID) && (!defined(IGNORE_ETH0_DOWN)) */ + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else { + DHD_ERROR(("%s Failed to download binary to the dongle\n", + __FUNCTION__)); + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + bcmerror = BCME_SDIO_ERROR; + } + } else + bcmerror = BCME_SDIO_ERROR; + + dhd_os_sdunlock(dhdp); + } else { + DHD_INFO(("%s called when dongle is not in reset\n", + __FUNCTION__)); +#if defined(OEM_ANDROID) + DHD_INFO(("Will call dhd_bus_start instead\n")); + dhd_bus_resume(dhdp, 1); +#if defined(HW_OOB) || defined(FORCE_WOWLAN) + dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih); // terence 20120615: fix for OOB initial issue +#endif + if ((bcmerror = dhd_bus_start(dhdp)) != 0) + DHD_ERROR(("%s: dhd_bus_start fail with %d\n", + __FUNCTION__, bcmerror)); +#endif /* defined(OEM_ANDROID) */ + } + } + +#ifdef PKT_STATICS + dhd_bus_clear_txpktstatics(dhdp); +#endif + return bcmerror; +} + +#if defined(LINUX) +int dhd_bus_suspend(dhd_pub_t *dhdpub) +{ + return bcmsdh_stop(dhdpub->bus->sdh); +} + +int dhd_bus_resume(dhd_pub_t *dhdpub, int stage) +{ + return bcmsdh_start(dhdpub->bus->sdh, stage); +} +#endif /* defined(LINUX) */ + +/* Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus && bus->sih) + return bus->sih->chip; + else + return 0; +} + +/* Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus && bus->sih) + return bus->sih->chiprev; + else + return 0; +} + +/* Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chippkg; +} + +int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num) +{ + *bus_type = bus->bus; + *bus_num = bus->bus_num; + *slot_num = bus->slot_num; + return 0; +} + +int +dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size) +{ + dhd_bus_t *bus; + + bus = dhdp->bus; + return dhdsdio_membytes(bus, set, address, data, size); +} + +#if defined(SUPPORT_MULTIPLE_REVISION) +static int +concate_revision_bcm4335(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + + uint chipver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = "_4335"; +#else + char chipver_tag[4] = {0, }; +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + + DHD_TRACE(("%s: BCM4335 Multiple Revision Check\n", __FUNCTION__)); + if (bus->sih->chip != BCM4335_CHIP_ID) { + DHD_ERROR(("%s:Chip is not BCM4335\n", __FUNCTION__)); + return -1; + } + chipver = bus->sih->chiprev; + DHD_ERROR(("CHIP VER = [0x%x]\n", chipver)); + if (chipver == 0x0) { + DHD_ERROR(("----- CHIP bcm4335_A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else if (chipver == 0x1) { + DHD_ERROR(("----- CHIP bcm4335_B0 -----\n")); +#if defined(SUPPORT_MULTIPLE_CHIPS) + strcat(chipver_tag, "_b0"); +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + return 0; +} + +static int +concate_revision_bcm4339(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + + uint chipver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = "_4339"; +#else + char chipver_tag[4] = {0, }; +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + + DHD_TRACE(("%s: BCM4339 Multiple Revision Check\n", __FUNCTION__)); + if (bus->sih->chip != BCM4339_CHIP_ID) { + DHD_ERROR(("%s:Chip is not BCM4339\n", __FUNCTION__)); + return -1; + } + chipver = bus->sih->chiprev; + DHD_ERROR(("CHIP VER = [0x%x]\n", chipver)); + if (chipver == 0x1) { + DHD_ERROR(("----- CHIP bcm4339_A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else { + DHD_ERROR(("----- CHIP bcm4339 unknown revision %d -----\n", + chipver)); + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + return 0; +} + +static int concate_revision_bcm4350(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chip_ver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = {0, }; +#else + char chipver_tag[4] = {0, }; +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + chip_ver = bus->sih->chiprev; + +#if defined(SUPPORT_MULTIPLE_CHIPS) + if (chip_ver == 3) + strcat(chipver_tag, "_4354"); + else + strcat(chipver_tag, "_4350"); +#endif + + if (chip_ver == 3) { + DHD_ERROR(("----- CHIP 4354 A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + return 0; +} + +static int concate_revision_bcm4354(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chip_ver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = "_4354"; +#else +#if !defined(CUSTOMER_HW4) + char chipver_tag[4] = {0, }; +#endif /* !CUSTOMER_HW4 */ +#endif /* SUPPORT_MULTIPLE_CHIPS */ + + chip_ver = bus->sih->chiprev; +#if !defined(SUPPORT_MULTIPLE_CHIPS) && defined(CUSTOMER_HW4) + DHD_INFO(("----- CHIP 4354, ver=%x -----\n", chip_ver)); +#else + if (chip_ver == 1) { + DHD_ERROR(("----- CHIP 4354 A1 -----\n")); + strcat(chipver_tag, "_a1"); + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); +#endif /* !SUPPORT_MULTIPLE_CHIPS && CUSTOMER_HW4 */ + + return 0; +} + +static int +concate_revision_bcm43454(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + char chipver_tag[10] = {0, }; +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT + int base_system_rev_for_nv = 0; +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */ + + DHD_TRACE(("%s: BCM43454 Multiple Revision Check\n", __FUNCTION__)); + if (bus->sih->chip != BCM43454_CHIP_ID) { + DHD_ERROR(("%s:Chip is not BCM43454!\n", __FUNCTION__)); + return -1; + } +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT + base_system_rev_for_nv = dhd_get_system_rev(); + if (base_system_rev_for_nv > 0) { + DHD_ERROR(("----- Board Rev [%d] -----\n", base_system_rev_for_nv)); + sprintf(chipver_tag, "_r%02d", base_system_rev_for_nv); + } +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */ +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW + DHD_ERROR(("----- Rev [%d] Fot MULTIPLE Board. -----\n", system_hw_rev)); + if ((system_hw_rev >= 8) && (system_hw_rev <= 11)) { + DHD_ERROR(("This HW is Rev 08 ~ 11. this is For FD-HW\n")); + strcat(chipver_tag, "_FD"); + } +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */ + + strcat(nv_path, chipver_tag); + return 0; +} + +int +concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int res = 0; + + if (!bus || !bus->sih) { + DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); + return -1; + } + + switch (bus->sih->chip) { + case BCM4335_CHIP_ID: + res = concate_revision_bcm4335(bus, fw_path, nv_path); + + break; + case BCM4339_CHIP_ID: + res = concate_revision_bcm4339(bus, fw_path, nv_path); + break; + case BCM4350_CHIP_ID: + res = concate_revision_bcm4350(bus, fw_path, nv_path); + break; + case BCM4354_CHIP_ID: + res = concate_revision_bcm4354(bus, fw_path, nv_path); + break; + case BCM43454_CHIP_ID: + res = concate_revision_bcm43454(bus, fw_path, nv_path); + break; + + /* XXX: Add New Multiple CHIP ID */ + default: + DHD_ERROR(("REVISION SPECIFIC feature is not required\n")); + /* XXX: if revision specific feature is not required then return true always */ + return res; + } + + if (res == 0) { +#ifdef BCMDBG + printf("dhd concatenated fw & nv:\n fw_path:%s\n" + " nv_path:%s\n", fw_path, nv_path); + printf("make sure they exist\n"); +#endif + } + return res; +} +#endif /* SUPPORT_MULTIPLE_REVISION */ + +#if defined(NDIS) +void +dhd_bus_reject_ioreqs(dhd_pub_t *dhdp, bool reject) +{ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_reject_ioreqs(dhdp->bus->sdh, reject); +} + +void +dhd_bus_waitfor_iodrain(dhd_pub_t *dhdp) +{ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_waitfor_iodrain(dhdp->bus->sdh); +} +#endif /* (NDIS) */ + +void +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) +{ + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; +} + +int +dhd_enableOOB(dhd_pub_t *dhd, bool sleep) +{ + dhd_bus_t *bus = dhd->bus; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + if (sleep) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) { + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + return BCME_BUSY; + } + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } else { + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + return BCME_OK; +} + +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bool wlfc_enabled = FALSE; + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Clear the data packet queues */ + pktq_flush(dhdp->osh, &bus->txq, TRUE); + } +} + +#ifdef BCMSDIO +int +dhd_sr_config(dhd_pub_t *dhd, bool on) +{ + dhd_bus_t *bus = dhd->bus; + + if (!bus->_srenab) + return -1; + + return dhdsdio_clk_devsleep_iovar(bus, on); +} + +uint16 +dhd_get_chipid(struct dhd_bus *bus) +{ + if (bus && bus->sih) + return (uint16)bus->sih->chip; + else + return 0; +} +#endif /* BCMSDIO */ + +#ifdef DEBUGGER +static uint32 +dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr) +{ + uint32 rval; + + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + rval = bcmsdh_reg_read(bus->sdh, addr, 4); + + dhd_os_sdunlock(bus->dhd); + + return rval; +} + +static void +dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val) +{ + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmsdh_reg_write(bus->sdh, addr, 4, val); + + dhd_os_sdunlock(bus->dhd); +} + +#endif /* DEBUGGER */ + +#if defined(SOFTAP_TPUT_ENHANCE) +void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time) +{ + if (!dhdp || !dhdp->bus) { + DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); + return; + } + dhdp->bus->idletime = idle_time; +} + +void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time) +{ + if (!dhdp || !dhdp->bus) { + DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); + return; + } + + if (!idle_time) { + DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__)); + return; + } + *idle_time = dhdp->bus->idletime; +} +#endif /* SOFTAP_TPUT_ENHANCE */ + +#if defined(BT_OVER_SDIO) +uint8 dhd_bus_cfg_read(void *h, uint fun_num, uint32 addr, int *err) +{ + uint8 intrd; + dhd_pub_t *dhdp = (dhd_pub_t *)h; + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + dhd_os_sdlock(bus->dhd); + + intrd = bcmsdh_cfg_read(bus->sdh, fun_num, addr, err); + + dhd_os_sdunlock(bus->dhd); + + return intrd; +} EXPORT_SYMBOL(dhd_bus_cfg_read); + +void dhd_bus_cfg_write(void *h, uint fun_num, uint32 addr, uint8 val, int *err) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)h; + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + dhd_os_sdlock(bus->dhd); + + bcmsdh_cfg_write(bus->sdh, fun_num, addr, val, err); + + dhd_os_sdunlock(bus->dhd); + +} EXPORT_SYMBOL(dhd_bus_cfg_write); + +static int +extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value) +{ + char field [8]; + + strlcpy(field, line + start_pos, sizeof(field)); + + return (sscanf (field, "%hX", value) == 1); +} + +static int +read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, uint16 * hi_addr, + uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes) +{ + int str_len; + uint16 num_data_bytes, addr, data_pos, type, w, i; + uint32 abs_base_addr32 = 0; + *num_bytes = 0; + + while (!*num_bytes) + { + str_len = dhd_os_gets_image(bus->dhd, line, BTFW_MAX_STR_LEN, file); + + DHD_TRACE(("%s: Len :0x%x %s\n", __FUNCTION__, str_len, line)); + + if (str_len == 0) { + break; + } else if (str_len > 9) { + extract_hex_field(line, 1, 2, &num_data_bytes); + extract_hex_field(line, 3, 4, &addr); + extract_hex_field(line, 7, 2, &type); + + data_pos = 9; + for (i = 0; i < num_data_bytes; i++) { + extract_hex_field(line, data_pos, 2, &w); + data_bytes [i] = (uint8)(w & 0x00FF); + data_pos += 2; + } + + if (type == BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS) { + *hi_addr = (data_bytes [0] << 8) | data_bytes [1]; + *addr_mode = BTFW_ADDR_MODE_EXTENDED; + } else if (type == BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS) { + *hi_addr = (data_bytes [0] << 8) | data_bytes [1]; + *addr_mode = BTFW_ADDR_MODE_SEGMENT; + } else if (type == BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS) { + abs_base_addr32 = (data_bytes [0] << 24) | (data_bytes [1] << 16) | + (data_bytes [2] << 8) | data_bytes [3]; + *addr_mode = BTFW_ADDR_MODE_LINEAR32; + } else if (type == BTFW_HEX_LINE_TYPE_DATA) { + *dest_addr = addr; + if (*addr_mode == BTFW_ADDR_MODE_EXTENDED) + *dest_addr += (*hi_addr << 16); + else if (*addr_mode == BTFW_ADDR_MODE_SEGMENT) + *dest_addr += (*hi_addr << 4); + else if (*addr_mode == BTFW_ADDR_MODE_LINEAR32) + *dest_addr += abs_base_addr32; + *num_bytes = num_data_bytes; + } + } + } + return (*num_bytes > 0); +} + +static int +_dhdsdio_download_btfw(struct dhd_bus *bus) +{ + int bcm_error = -1; + void *image = NULL; + uint8 *mem_blk = NULL, *mem_ptr = NULL, *data_ptr = NULL; + + uint32 offset_addr = 0, offset_len = 0, bytes_to_write = 0; + + char *line = NULL; + uint32 dest_addr = 0, num_bytes; + uint16 hiAddress = 0; + uint32 start_addr, start_data, end_addr, end_data, i, index, pad, + bt2wlan_pwrup_adr; + + int addr_mode = BTFW_ADDR_MODE_EXTENDED; + + /* Out immediately if no image to download */ + if ((bus->btfw_path == NULL) || (bus->btfw_path[0] == '\0')) { + return 0; + } + + /* XXX: Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + image = dhd_os_open_image1(bus->dhd, bus->btfw_path); + if (image == NULL) + goto err; + + mem_ptr = mem_blk = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN); + if (mem_blk == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN)); + goto err; + } + if ((uint32)(uintptr)mem_blk % DHD_SDALIGN) + mem_ptr += (DHD_SDALIGN - ((uint32)(uintptr)mem_blk % DHD_SDALIGN)); + + data_ptr = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE - 8); + if (data_ptr == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + BTFW_DOWNLOAD_BLK_SIZE - 8)); + goto err; + } + /* Write to BT register to hold WLAN wake high during BT FW download */ + bt2wlan_pwrup_adr = BTMEM_OFFSET + BT2WLAN_PWRUP_ADDR; + bcmsdh_reg_write(bus->sdh, bt2wlan_pwrup_adr, 4, BT2WLAN_PWRUP_WAKE); + /* + * Wait for at least 2msec for the clock to be ready/Available. + */ + OSL_DELAY(2000); + + line = MALLOC(bus->dhd->osh, BTFW_MAX_STR_LEN); + if (line == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, BTFW_MAX_STR_LEN)); + goto err; + } + memset(line, 0, BTFW_MAX_STR_LEN); + + while (read_more_btbytes (bus, image, line, &addr_mode, &hiAddress, &dest_addr, + data_ptr, &num_bytes)) { + + DHD_TRACE(("read %d bytes at address %08X\n", num_bytes, dest_addr)); + + start_addr = BTMEM_OFFSET + dest_addr; + index = 0; + + /* Make sure the start address is 4 byte aligned to avoid alignment issues + * with SD host controllers + */ + if (!ISALIGNED(start_addr, 4)) { + pad = start_addr % 4; + start_addr = ROUNDDN(start_addr, 4); + start_data = bcmsdh_reg_read(bus->sdh, start_addr, 4); + for (i = 0; i < pad; i++, index++) { + mem_ptr[index] = (uint8)((uint8 *)&start_data)[i]; + } + } + bcopy(data_ptr, &(mem_ptr[index]), num_bytes); + index += num_bytes; + + /* Make sure the length is multiple of 4bytes to avoid alignment issues + * with SD host controllers + */ + end_addr = start_addr + index; + if (!ISALIGNED(end_addr, 4)) { + end_addr = ROUNDDN(end_addr, 4); + end_data = bcmsdh_reg_read(bus->sdh, end_addr, 4); + for (i = (index % 4); i < 4; i++, index++) { + mem_ptr[index] = (uint8)((uint8 *)&end_data)[i]; + } + } + + offset_addr = start_addr & 0xFFF; + offset_len = offset_addr + index; + if (offset_len <= 0x1000) { + bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, index); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + } + else { + bytes_to_write = 0x1000 - offset_addr; + bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, + bytes_to_write); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + + OSL_DELAY(10000); + + bcm_error = dhdsdio_membytes(bus, TRUE, (start_addr + bytes_to_write), + (mem_ptr + bytes_to_write), (index - bytes_to_write)); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + } + memset(line, 0, BTFW_MAX_STR_LEN); + } + + bcm_error = 0; +err: + if (mem_blk) + MFREE(bus->dhd->osh, mem_blk, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN); + + if (data_ptr) + MFREE(bus->dhd->osh, data_ptr, BTFW_DOWNLOAD_BLK_SIZE - 8); + + if (line) + MFREE(bus->dhd->osh, line, BTFW_MAX_STR_LEN); + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcm_error; +} + +static int +dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + + DHD_TRACE(("%s: btfw path=%s\n", + __FUNCTION__, bus->btfw_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + dhd_os_sdlock(bus->dhd); + + /* Download the firmware */ + ret = _dhdsdio_download_btfw(bus); + + dhd_os_sdunlock(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + + return ret; +} + +int +dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, + char *pbtfw_path) +{ + int ret; + + bus->btfw_path = pbtfw_path; + + ret = dhdsdio_download_btfw(bus, osh, bus->sdh); + + return ret; +} +#endif /* defined (BT_OVER_SDIO) */ + +void +dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + trap_t *tr = &bus->dhd->last_trap_info; + + bcm_bprintf(strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), + ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), + ltoh32(bus->dongle_trap_addr), + ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), + ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7)); + +} + +static int +dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len) +{ + int ret = -1; + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(((dhd_bus_t*)bus)->sdh), + SDIO_FUNC_2, F2SYNC, frame, len, NULL, NULL, NULL, TXRETRIES); + + if (ret == BCME_OK) + ((dhd_bus_t*)bus)->tx_seq = (((dhd_bus_t*)bus)->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + return ret; +} + +/* Function to set the min res mask depending on the chip ID used */ +bool +dhd_bus_set_default_min_res_mask(struct dhd_bus *bus) +{ + if ((bus == NULL) || (bus->sih == NULL)) { + DHD_ERROR(("%s(): Invalid Arguments \r\n", __FUNCTION__)); + return FALSE; + } + + switch (bus->sih->chip) { + case BCM4339_CHIP_ID: + bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE(bus->sih) + 0x618, 4, 0x3fcaf377); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + return FALSE; + } + break; + + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + bcmsdh_reg_write(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(pmuregs_t, min_res_mask)), + 4, DEFAULT_43012_MIN_RES_MASK); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + return FALSE; + } + break; + + default: + DHD_ERROR(("%s: Unhandled chip id\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +/* Function to reset PMU registers */ +void +dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp) +{ + struct dhd_bus *bus = dhdp->bus; + bcmsdh_reg_write(bus->sdh, si_get_pmu_reg_addr(bus->sih, + OFFSETOF(pmuregs_t, swscratch)), 4, 0x0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + } +} + +int +dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) +{ + int bcmerror = 0; + struct dhd_bus *bus = dhdp->bus; + + if (read) { + *data = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + } else { + bcmsdh_reg_write(bus->sdh, addr, size, *data); + } + + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + + return bcmerror; +} + +int dhd_get_idletime(dhd_pub_t *dhd) +{ + return dhd->bus->idletime; +} + +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_bus_get_wakecount(dhd_pub_t *dhd) +{ + if (!dhd->bus) { + return NULL; + } + return &dhd->bus->wake_counts; +} +int +dhd_bus_get_bus_wake(dhd_pub_t *dhd) +{ + return bcmsdh_set_get_wake(dhd->bus->sdh, 0); +} +#endif /* DHD_WAKE_STATUS */ + +int +dhd_bus_sleep(dhd_pub_t *dhdp, bool sleep, uint32 *intstatus) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 retry = 0; + int ret = 0; + + if (bus) { + dhd_os_sdlock(dhdp); + BUS_WAKE(bus); + R_SDREG(*intstatus, &bus->regs->intstatus, retry); + if (sleep) { + if (SLPAUTO_ENAB(bus)) { + ret = dhdsdio_bussleep(bus, sleep); + if (ret != BCME_BUSY) + dhd_os_wd_timer(bus->dhd, 0); + } else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + dhd_os_sdunlock(dhdp); + } else { + DHD_ERROR(("bus is NULL\n")); + ret = -1; + } + + return ret; +} diff --git a/bcmdhd.101.10.361.x/dhd_sec_feature.h b/bcmdhd.101.10.361.x/dhd_sec_feature.h new file mode 100755 index 0000000..671a71a --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_sec_feature.h @@ -0,0 +1,226 @@ +/* + * Customer HW 4 dependant file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: dhd_sec_feature.h$ + */ + +/* XXX This File managed by Samsung */ + +/* + * ** Desciption *** + * 1. Module vs COB + * If your model's WIFI HW chip is COB type, you must add below feature + * - #undef USE_CID_CHECK + * - #define READ_MACADDR + * Because COB type chip have not CID and Mac address. + * So, you must add below feature to defconfig file. + * - CONFIG_WIFI_BROADCOM_COB + * + * 2. PROJECTS + * If you want add some feature only own Project, you can add it in 'PROJECTS' part. + * + * 3. Region code + * If you want add some feature only own region model, you can use below code. + * - 100 : EUR OPEN + * - 101 : EUR ORG + * - 200 : KOR OPEN + * - 201 : KOR SKT + * - 202 : KOR KTT + * - 203 : KOR LGT + * - 300 : CHN OPEN + * - 400 : USA OPEN + * - 401 : USA ATT + * - 402 : USA TMO + * - 403 : USA VZW + * - 404 : USA SPR + * - 405 : USA USC + * You can refer how to using it below this file. + * And, you can add more region code, too. + */ + +#ifndef _dhd_sec_feature_h_ +#define _dhd_sec_feature_h_ + +#include + +/* For COB type feature */ +#ifdef CONFIG_WIFI_BROADCOM_COB +#undef USE_CID_CHECK +#define READ_MACADDR +#endif /* CONFIG_WIFI_BROADCOM_COB */ + +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_ARCH_MSM8994) || \ + defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) +#define SUPPORT_MULTIPLE_MODULE_CIS +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_ARCH_MSM8994 || + * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8890 + */ + +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) +#define SUPPORT_BCM4359_MIXED_MODULES +#endif /* CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8890 */ + +#if defined(CONFIG_ARGOS) +#if defined(CONFIG_SPLIT_ARGOS_SET) +#define ARGOS_IRQ_WIFI_TABLE_LABEL "WIFI TX" +#define ARGOS_WIFI_TABLE_LABEL "WIFI RX" +#else /* CONFIG_SPLIT_ARGOS_SET */ +#define ARGOS_IRQ_WIFI_TABLE_LABEL "WIFI" +#define ARGOS_WIFI_TABLE_LABEL "WIFI" +#endif /* CONFIG_SPLIT_ARGOS_SET */ +#define ARGOS_P2P_TABLE_LABEL "P2P" +#endif /* CONFIG_ARGOS */ + +/* PROJECTS START */ + +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ + defined(CONFIG_SOC_EXYNOS8895) +#undef CUSTOM_SET_CPUCORE +#define PRIMARY_CPUCORE 0 +#define DPC_CPUCORE 4 +#define RXF_CPUCORE 5 +#define TASKLET_CPUCORE 5 +#define ARGOS_CPU_SCHEDULER +#define ARGOS_RPS_CPU_CTL + +#ifdef CONFIG_SOC_EXYNOS8895 +#define ARGOS_DPC_TASKLET_CTL +#endif /* CONFIG_SOC_EXYNOS8895 */ + +#ifdef CONFIG_MACH_UNIVERSAL7420 +#define EXYNOS_PCIE_DEBUG +#endif /* CONFIG_MACH_UNIVERSAL7420 */ +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */ + +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \ + defined(CONFIG_SOC_EXYNOS1000) +#define PCIE_IRQ_CPU_CORE 5 +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 || defined(CONFIG_SOC_EXYNOS9830 */ + +#if defined(DHD_LB) +#if defined(CONFIG_ARCH_SM8150) || defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA) +#define DHD_LB_PRIMARY_CPUS (0x70) +#define DHD_LB_SECONDARY_CPUS (0x0E) +#elif defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \ + defined(CONFIG_SOC_EXYNOS1000) +#define DHD_LB_PRIMARY_CPUS (0x70) +#define DHD_LB_SECONDARY_CPUS (0x0E) +#elif defined(CONFIG_SOC_EXYNOS8890) +/* + * Removed core 6~7 from NAPI CPU mask. + * Exynos 8890 disabled core 6~7 by default. + */ +#define DHD_LB_PRIMARY_CPUS (0x30) +#define DHD_LB_SECONDARY_CPUS (0x0E) +#elif defined(CONFIG_SOC_EXYNOS8895) +/* using whole big core with NAPI mask */ +#define DHD_LB_PRIMARY_CPUS (0xF0) +#define DHD_LB_SECONDARY_CPUS (0x0E) +#elif defined(CONFIG_ARCH_MSM8998) +#define DHD_LB_PRIMARY_CPUS (0x20) +#define DHD_LB_SECONDARY_CPUS (0x0E) +#elif defined(CONFIG_ARCH_MSM8996) +#define DHD_LB_PRIMARY_CPUS (0x0C) +#define DHD_LB_SECONDARY_CPUS (0x03) +#else /* Default LB masks */ +/* using whole big core with NAPI mask */ +#define DHD_LB_PRIMARY_CPUS (0xF0) +#define DHD_LB_SECONDARY_CPUS (0x0E) +#endif /* CONFIG_SOC_EXYNOS8890 */ +#else /* !DHD_LB */ +#define ARGOS_DPC_TASKLET_CTL +#endif /* !DHD_LB */ + +#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_SOC_EXYNOS8895) || \ + defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \ + defined(CONFIG_SOC_EXYNOS1000) +#if defined(CONFIG_BCMDHD_PCIE) +#define BCMPCIE_DISABLE_ASYNC_SUSPEND +#endif /* CONFIG_BCMDHD_PCIE */ +#endif /* CONFIG_ARCH_MSM */ +/* PROJECTS END */ + +/* REGION CODE START */ + +#ifndef CONFIG_WLAN_REGION_CODE +#define CONFIG_WLAN_REGION_CODE 100 +#endif /* CONFIG_WLAN_REGION_CODE */ + +#if (CONFIG_WLAN_REGION_CODE >= 100) && (CONFIG_WLAN_REGION_CODE < 200) /* EUR */ +#if (CONFIG_WLAN_REGION_CODE == 101) /* EUR ORG */ +/* GAN LITE NAT KEEPALIVE FILTER */ +#define GAN_LITE_NAT_KEEPALIVE_FILTER +#endif /* CONFIG_WLAN_REGION_CODE == 101 */ +#if (CONFIG_WLAN_REGION_CODE == 150) /* EUR FD(DualSIM) */ +#define SUPPORT_MULTIPLE_BOARD_REV_FROM_HW +#endif /* CONFIG_WLAN_REGION_CODE == 150 */ +#endif /* CONFIG_WLAN_REGION_CODE >= 100 && CONFIG_WLAN_REGION_CODE < 200 */ + +#if (CONFIG_WLAN_REGION_CODE >= 200) && (CONFIG_WLAN_REGION_CODE < 300) /* KOR */ +#undef USE_INITIAL_2G_SCAN +#ifndef ROAM_ENABLE +#define ROAM_ENABLE +#endif /* ROAM_ENABLE */ +#ifndef ROAM_API +#define ROAM_API +#endif /* ROAM_API */ +#ifndef ROAM_CHANNEL_CACHE +#define ROAM_CHANNEL_CACHE +#endif /* ROAM_CHANNEL_CACHE */ +#ifndef OKC_SUPPORT +#define OKC_SUPPORT +#endif /* OKC_SUPPORT */ + +#ifndef ROAM_AP_ENV_DETECTION +#define ROAM_AP_ENV_DETECTION +#endif /* ROAM_AP_ENV_DETECTION */ + +#undef WRITE_MACADDR +#ifndef READ_MACADDR +#define READ_MACADDR +#endif /* READ_MACADDR */ +#endif /* CONFIG_WLAN_REGION_CODE >= 200 && CONFIG_WLAN_REGION_CODE < 300 */ + +#if (CONFIG_WLAN_REGION_CODE >= 300) && (CONFIG_WLAN_REGION_CODE < 400) /* CHN */ +#define BCMWAPI_WPI +#define BCMWAPI_WAI +#endif /* CONFIG_WLAN_REGION_CODE >= 300 && CONFIG_WLAN_REGION_CODE < 400 */ + +#if (CONFIG_WLAN_REGION_CODE == 500) /* JP */ +#if defined(BCM4375_CHIP) +#define DISABLE_HE_ENAB +#endif /* BCM4375_CHIP */ +#endif /* CONFIG_WLAN_REGION_CODE == 500 */ + +/* REGION CODE END */ + +#if !defined(READ_MACADDR) && !defined(WRITE_MACADDR) +#define GET_MAC_FROM_OTP +#define SHOW_NVRAM_TYPE +#endif /* !READ_MACADDR && !WRITE_MACADDR */ + +#define WRITE_WLANINFO + +#endif /* _dhd_sec_feature_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_static_buf.c b/bcmdhd.101.10.361.x/dhd_static_buf.c new file mode 100755 index 0000000..4ff7e04 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_static_buf.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Amlogic, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DHD_STATIC_VERSION_STR "101.10.361.16 (wlan=r892223-20220401-1)" +#define STATIC_ERROR_LEVEL (1 << 0) +#define STATIC_TRACE_LEVEL (1 << 1) +#define STATIC_MSG_LEVEL (1 << 0) +uint static_msg_level = STATIC_ERROR_LEVEL | STATIC_MSG_LEVEL; + +#define DHD_STATIC_MSG(x, args...) \ + do { \ + if (static_msg_level & STATIC_MSG_LEVEL) { \ + pr_err("[dhd] STATIC-MSG) %s : " x, __func__, ## args); \ + } \ + } while (0) +#define DHD_STATIC_ERROR(x, args...) \ + do { \ + if (static_msg_level & STATIC_ERROR_LEVEL) { \ + pr_err("[dhd] STATIC-ERROR) %s : " x, __func__, ## args); \ + } \ + } while (0) +#define DHD_STATIC_TRACE(x, args...) \ + do { \ + if (static_msg_level & STATIC_TRACE_LEVEL) { \ + pr_err("[dhd] STATIC-TRACE) %s : " x, __func__, ## args); \ + } \ + } while (0) + +#define BCMDHD_SDIO +#define BCMDHD_PCIE +//#define BCMDHD_USB +#define CONFIG_BCMDHD_VTS := y +#define CONFIG_BCMDHD_DEBUG := y +//#define BCMDHD_UNUSE_MEM + +#ifndef MAX_NUM_ADAPTERS +#define MAX_NUM_ADAPTERS 1 +#endif + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, +#if defined(BCMDHD_SDIO) + DHD_PREALLOC_RXBUF = 1, + DHD_PREALLOC_DATABUF = 2, +#endif /* BCMDHD_SDIO */ + DHD_PREALLOC_OSL_BUF = 3, + DHD_PREALLOC_SKB_BUF = 4, + DHD_PREALLOC_WIPHY_ESCAN0 = 5, + DHD_PREALLOC_WIPHY_ESCAN1 = 6, + DHD_PREALLOC_DHD_INFO = 7, +#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB) + DHD_PREALLOC_DHD_WLFC_INFO = 8, +#endif /* BCMDHD_SDIO | BCMDHD_USB */ +#ifdef BCMDHD_PCIE + DHD_PREALLOC_IF_FLOW_LKUP = 9, +#endif /* BCMDHD_PCIE */ + DHD_PREALLOC_MEMDUMP_BUF = 10, +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) + DHD_PREALLOC_MEMDUMP_RAM = 11, +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ +#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB) + DHD_PREALLOC_DHD_WLFC_HANGER = 12, +#endif /* BCMDHD_SDIO | BCMDHD_USB */ + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_ESCAN = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, +#if defined(BCMDHD_UNUSE_MEM) + DHD_PREALLOC_NAN_EVENT_RING = 23, +#endif /* BCMDHD_UNUSE_MEM */ + DHD_PREALLOC_MAX +}; + +#define STATIC_BUF_MAX_NUM 20 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) + +#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB +#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */ +#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */ + +#define DHD_PREALLOC_PROT_SIZE (16 * 1024) +#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) +#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) +#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) +#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024) +#define DHD_PREALLOC_DHD_INFO_SIZE (36 * 1024) +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) +#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024) +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ +#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024) +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) +#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024) +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ +#define DHD_PREALLOC_WL_ESCAN_SIZE (70 * 1024) +#ifdef CONFIG_64BIT +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2) +#else +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif +#define FW_VERBOSE_RING_SIZE (256 * 1024) +#define FW_EVENT_RING_SIZE (64 * 1024) +#define DHD_EVENT_RING_SIZE (64 * 1024) +#define NAN_EVENT_RING_SIZE (64 * 1024) + +#if defined(CONFIG_64BIT) +#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +#else +#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif /* CONFIG_64BIT */ +#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) + +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#ifdef BCMDHD_PCIE +#define DHD_SKB_1PAGE_BUF_NUM 0 +#define DHD_SKB_2PAGE_BUF_NUM 192 +#elif defined(BCMDHD_SDIO) +#define DHD_SKB_1PAGE_BUF_NUM 8 +#define DHD_SKB_2PAGE_BUF_NUM 8 +#endif /* BCMDHD_PCIE */ +#define DHD_SKB_4PAGE_BUF_NUM 1 + +/* The number is defined in linux_osl.c + * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM + * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM + */ +#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE) +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM)) +#endif + +void *wlan_static_prot[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_rxbuf [MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_databuf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_osl_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_scan_buf0[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_scan_buf1[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_dhd_info_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_dhd_wlfc_info_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_if_flow_lkup[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_dhd_memdump_ram_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_dhd_wlfc_hanger_buf[MAX_NUM_ADAPTERS] = {NULL}; +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) +void *wlan_static_dhd_log_dump_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_dhd_log_dump_buf_ex[MAX_NUM_ADAPTERS] = {NULL}; +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ +void *wlan_static_wl_escan_info_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_fw_verbose_ring_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_fw_event_ring_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_dhd_event_ring_buf[MAX_NUM_ADAPTERS] = {NULL}; +void *wlan_static_nan_event_ring_buf[MAX_NUM_ADAPTERS] = {NULL}; + +#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE) +static struct sk_buff *wlan_static_skb[MAX_NUM_ADAPTERS][WLAN_SKB_BUF_NUM]; +#endif /* BCMDHD_SDIO | BCMDHD_PCIE */ + +void * +bcmdhd_mem_prealloc( +#ifdef BCMDHD_MDRIVER + uint bus_type, int index, +#endif + int section, unsigned long size) +{ +#ifndef BCMDHD_MDRIVER + int index = 0; +#endif + +#ifdef BCMDHD_MDRIVER + DHD_STATIC_MSG("bus_type %d, index %d, sectoin %d, size %ld\n", + bus_type, index, section, size); +#else + DHD_STATIC_MSG("sectoin %d, size %ld\n", section, size); +#endif + + if (section == DHD_PREALLOC_PROT) + return wlan_static_prot[index]; + +#if defined(BCMDHD_SDIO) + if (section == DHD_PREALLOC_RXBUF) + return wlan_static_rxbuf[index]; + + if (section == DHD_PREALLOC_DATABUF) + return wlan_static_databuf[index]; +#endif /* BCMDHD_SDIO */ + +#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE) + if (section == DHD_PREALLOC_SKB_BUF) + return wlan_static_skb[index]; +#endif /* BCMDHD_SDIO | BCMDHD_PCIE */ + + if (section == DHD_PREALLOC_WIPHY_ESCAN0) + return wlan_static_scan_buf0[index]; + + if (section == DHD_PREALLOC_WIPHY_ESCAN1) + return wlan_static_scan_buf1[index]; + + if (section == DHD_PREALLOC_OSL_BUF) { + if (size > DHD_PREALLOC_OSL_BUF_SIZE) { + DHD_STATIC_ERROR("request OSL_BUF(%lu) > %ld\n", + size, DHD_PREALLOC_OSL_BUF_SIZE); + return NULL; + } + return wlan_static_osl_buf[index]; + } + + if (section == DHD_PREALLOC_DHD_INFO) { + if (size > DHD_PREALLOC_DHD_INFO_SIZE) { + DHD_STATIC_ERROR("request DHD_INFO(%lu) > %d\n", + size, DHD_PREALLOC_DHD_INFO_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf[index]; + } +#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB) + if (section == DHD_PREALLOC_DHD_WLFC_INFO) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + DHD_STATIC_ERROR("request DHD_WLFC_INFO(%lu) > %d\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_info_buf[index]; + } +#endif /* BCMDHD_SDIO | BCMDHD_USB */ +#ifdef BCMDHD_PCIE + if (section == DHD_PREALLOC_IF_FLOW_LKUP) { + if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) { + DHD_STATIC_ERROR("request DHD_IF_FLOW_LKUP(%lu) > %d\n", + size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); + return NULL; + } + return wlan_static_if_flow_lkup[index]; + } +#endif /* BCMDHD_PCIE */ +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) + if (section == DHD_PREALLOC_MEMDUMP_RAM) { + if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_MEMDUMP_RAM(%lu) > %d\n", + size, DHD_PREALLOC_MEMDUMP_RAM_SIZE); + return NULL; + } + return wlan_static_dhd_memdump_ram_buf[index]; + } +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ +#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB) + if (section == DHD_PREALLOC_DHD_WLFC_HANGER) { + if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) { + DHD_STATIC_ERROR("request DHD_WLFC_HANGER(%lu) > %d\n", + size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger_buf[index]; + } +#endif /* BCMDHD_SDIO | BCMDHD_USB */ +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) + if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF) { + if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_DHD_LOG_DUMP_BUF(%lu) > %d\n", + size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf[index]; + } + if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX) { + if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX(%lu) > %d\n", + size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf_ex[index]; + } +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ + if (section == DHD_PREALLOC_WL_ESCAN) { + if (size > DHD_PREALLOC_WL_ESCAN_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_WL_ESCAN(%lu) > %d\n", + size, DHD_PREALLOC_WL_ESCAN_SIZE); + return NULL; + } + return wlan_static_wl_escan_info_buf[index]; + } + if (section == DHD_PREALLOC_FW_VERBOSE_RING) { + if (size > FW_VERBOSE_RING_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n", + size, FW_VERBOSE_RING_SIZE); + return NULL; + } + return wlan_static_fw_verbose_ring_buf[index]; + } + if (section == DHD_PREALLOC_FW_EVENT_RING) { + if (size > FW_EVENT_RING_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n", + size, FW_EVENT_RING_SIZE); + return NULL; + } + return wlan_static_fw_event_ring_buf[index]; + } + if (section == DHD_PREALLOC_DHD_EVENT_RING) { + if (size > DHD_EVENT_RING_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n", + size, DHD_EVENT_RING_SIZE); + return NULL; + } + return wlan_static_dhd_event_ring_buf[index]; + } +#if defined(BCMDHD_UNUSE_MEM) + if (section == DHD_PREALLOC_NAN_EVENT_RING) { + if (size > NAN_EVENT_RING_SIZE) { + DHD_STATIC_ERROR("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n", + size, NAN_EVENT_RING_SIZE); + return NULL; + } + return wlan_static_nan_event_ring_buf[index]; + } +#endif /* BCMDHD_UNUSE_MEM */ + if ((section < 0) || (section > DHD_PREALLOC_MAX)) + DHD_STATIC_ERROR("request section id(%d) is out of max index %d\n", + section, DHD_PREALLOC_MAX); + + DHD_STATIC_ERROR("failed to alloc section %d, size=%ld\n", + section, size); + + return NULL; +} +EXPORT_SYMBOL(bcmdhd_mem_prealloc); + +static void +dhd_deinit_wlan_mem(int index) +{ +#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE) + int i; +#endif /* BCMDHD_SDIO | BCMDHD_PCIE */ + + if (wlan_static_prot[index]) + kfree(wlan_static_prot[index]); +#if defined(BCMDHD_SDIO) + if (wlan_static_rxbuf[index]) + kfree(wlan_static_rxbuf[index]); + if (wlan_static_databuf[index]) + kfree(wlan_static_databuf[index]); +#endif /* BCMDHD_SDIO */ + if (wlan_static_osl_buf[index]) + kfree(wlan_static_osl_buf[index]); + if (wlan_static_scan_buf0[index]) + kfree(wlan_static_scan_buf0[index]); + if (wlan_static_scan_buf1[index]) + kfree(wlan_static_scan_buf1[index]); + if (wlan_static_dhd_info_buf[index]) + kfree(wlan_static_dhd_info_buf[index]); +#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB) + if (wlan_static_dhd_wlfc_info_buf[index]) + kfree(wlan_static_dhd_wlfc_info_buf[index]); +#endif /* BCMDHD_SDIO | BCMDHD_USB */ +#ifdef BCMDHD_PCIE + if (wlan_static_if_flow_lkup[index]) + kfree(wlan_static_if_flow_lkup[index]); +#endif /* BCMDHD_PCIE */ +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) + if (wlan_static_dhd_memdump_ram_buf[index]) + kfree(wlan_static_dhd_memdump_ram_buf[index]); +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ +#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB) + if (wlan_static_dhd_wlfc_hanger_buf[index]) + kfree(wlan_static_dhd_wlfc_hanger_buf[index]); +#endif /* BCMDHD_SDIO | BCMDHD_USB */ +#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG) + if (wlan_static_dhd_log_dump_buf[index]) + kfree(wlan_static_dhd_log_dump_buf[index]); + if (wlan_static_dhd_log_dump_buf_ex[index]) + kfree(wlan_static_dhd_log_dump_buf_ex[index]); +#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */ + if (wlan_static_wl_escan_info_buf[index]) + kfree(wlan_static_wl_escan_info_buf[index]); + if (wlan_static_fw_verbose_ring_buf[index]) + kfree(wlan_static_fw_verbose_ring_buf[index]); + if (wlan_static_fw_event_ring_buf[index]) + kfree(wlan_static_fw_event_ring_buf[index]); + if (wlan_static_dhd_event_ring_buf[index]) + kfree(wlan_static_dhd_event_ring_buf[index]); +#if defined(BCMDHD_UNUSE_MEM) + if (wlan_static_nan_event_ring_buf[index]) + kfree(wlan_static_nan_event_ring_buf[index]); +#endif /* BCMDHD_UNUSE_MEM */ + +#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE) + for (i=0; i> + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef DHD_STATUS_LOGGING + +#define DHD_STATLOG_ERR_INTERNAL(fmt, ...) DHD_ERROR(("STATLOG-" fmt, ##__VA_ARGS__)) +#define DHD_STATLOG_INFO_INTERNAL(fmt, ...) DHD_INFO(("STATLOG-" fmt, ##__VA_ARGS__)) + +#define DHD_STATLOG_PRINT(x) DHD_ERROR(x) +#define DHD_STATLOG_ERR(x) DHD_STATLOG_ERR_INTERNAL x +#define DHD_STATLOG_INFO(x) DHD_STATLOG_INFO_INTERNAL x +#define DHD_STATLOG_VALID(stat) (((stat) > (ST(INVALID))) && ((stat) < (ST(MAX)))) + +dhd_statlog_handle_t * +dhd_attach_statlog(dhd_pub_t *dhdp, uint32 num_items, uint32 bdlog_num_items, uint32 logbuf_len) +{ + dhd_statlog_t *statlog = NULL; + void *buf = NULL; + + if (!dhdp) { + DHD_STATLOG_ERR(("%s: dhdp is NULL\n", __FUNCTION__)); + return NULL; + } + + statlog = (dhd_statlog_t *)VMALLOCZ(dhdp->osh, sizeof(dhd_statlog_t)); + if (!statlog) { + DHD_STATLOG_ERR(("%s: failed to allocate memory for dhd_statlog_t\n", + __FUNCTION__)); + return NULL; + } + + /* allocate log buffer */ + statlog->logbuf = (uint8 *)VMALLOCZ(dhdp->osh, logbuf_len); + if (!statlog->logbuf) { + DHD_STATLOG_ERR(("%s: failed to alloc log buffer\n", __FUNCTION__)); + goto error; + } + statlog->logbuf_len = logbuf_len; + + /* alloc ring buffer */ + statlog->bufsize = (uint32)(dhd_ring_get_hdr_size() + + DHD_STATLOG_RING_SIZE(num_items)); + buf = VMALLOCZ(dhdp->osh, statlog->bufsize); + if (!buf) { + DHD_STATLOG_ERR(("%s: failed to allocate memory for ring buffer\n", + __FUNCTION__)); + goto error; + } + + statlog->ringbuf = dhd_ring_init(dhdp, buf, statlog->bufsize, + DHD_STATLOG_ITEM_SIZE, num_items, DHD_RING_TYPE_SINGLE_IDX); + if (!statlog->ringbuf) { + DHD_STATLOG_ERR(("%s: failed to init ring buffer\n", __FUNCTION__)); + VMFREE(dhdp->osh, buf, statlog->bufsize); + goto error; + } + + /* alloc ring buffer for bigdata logging */ + statlog->bdlog_bufsize = (uint32)(dhd_ring_get_hdr_size() + + DHD_STATLOG_RING_SIZE(bdlog_num_items)); + buf = VMALLOCZ(dhdp->osh, statlog->bdlog_bufsize); + if (!buf) { + DHD_STATLOG_ERR(("%s: failed to allocate memory for bigdata logging buffer\n", + __FUNCTION__)); + goto error; + } + + statlog->bdlog_ringbuf = dhd_ring_init(dhdp, buf, statlog->bdlog_bufsize, + DHD_STATLOG_ITEM_SIZE, bdlog_num_items, DHD_RING_TYPE_SINGLE_IDX); + if (!statlog->bdlog_ringbuf) { + DHD_STATLOG_ERR(("%s: failed to init ring buffer for bigdata logging\n", + __FUNCTION__)); + VMFREE(dhdp->osh, buf, statlog->bdlog_bufsize); + goto error; + } + + return (dhd_statlog_handle_t *)statlog; + +error: + if (statlog->logbuf) { + VMFREE(dhdp->osh, statlog->logbuf, logbuf_len); + } + + if (statlog->ringbuf) { + dhd_ring_deinit(dhdp, statlog->ringbuf); + VMFREE(dhdp->osh, statlog->ringbuf, statlog->bufsize); + } + + if (statlog->bdlog_ringbuf) { + dhd_ring_deinit(dhdp, statlog->bdlog_ringbuf); + VMFREE(dhdp->osh, statlog->bdlog_ringbuf, statlog->bdlog_bufsize); + } + + if (statlog) { + VMFREE(dhdp->osh, statlog, sizeof(dhd_statlog_t)); + } + + return NULL; +} + +void +dhd_detach_statlog(dhd_pub_t *dhdp) +{ + dhd_statlog_t *statlog; + + if (!dhdp) { + DHD_STATLOG_ERR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->statlog) { + DHD_STATLOG_ERR(("%s: statlog is NULL\n", __FUNCTION__)); + return; + } + + statlog = (dhd_statlog_t *)(dhdp->statlog); + + if (statlog->bdlog_ringbuf) { + dhd_ring_deinit(dhdp, statlog->bdlog_ringbuf); + VMFREE(dhdp->osh, statlog->bdlog_ringbuf, statlog->bdlog_bufsize); + } + + if (statlog->ringbuf) { + dhd_ring_deinit(dhdp, statlog->ringbuf); + VMFREE(dhdp->osh, statlog->ringbuf, statlog->bufsize); + } + + if (statlog->logbuf) { + VMFREE(dhdp->osh, statlog->logbuf, statlog->logbuf_len); + } + + VMFREE(dhdp->osh, statlog, sizeof(dhd_statlog_t)); + dhdp->statlog = NULL; +} + +static int +dhd_statlog_ring_log(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, uint8 dir, + uint16 status, uint16 reason) +{ + dhd_statlog_t *statlog; + stat_elem_t *elem; + + if (!dhdp || !dhdp->statlog) { + DHD_STATLOG_ERR(("%s: dhdp or dhdp->statlog is NULL\n", + __FUNCTION__)); + return BCME_ERROR; + } + + if (ifidx >= DHD_MAX_IFS) { + DHD_STATLOG_ERR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx)); + return BCME_ERROR; + } + + if (!DHD_STATLOG_VALID(stat)) { + DHD_STATLOG_ERR(("%s: invalid stat %d\n", __FUNCTION__, stat)); + return BCME_ERROR; + } + + statlog = (dhd_statlog_t *)(dhdp->statlog); + elem = (stat_elem_t *)dhd_ring_get_empty(statlog->ringbuf); + if (!elem) { + /* no available slot */ + DHD_STATLOG_ERR(("%s: cannot allocate a new element\n", + __FUNCTION__)); + return BCME_ERROR; + } + + elem->ts_tz = OSL_SYSTZTIME_US(); + elem->ts = OSL_LOCALTIME_NS(); + elem->stat = stat; + elem->ifidx = ifidx; + elem->dir = dir; + elem->reason = reason; + elem->status = status; + + /* Logging for the bigdata */ + if (isset(statlog->bdmask, stat)) { + stat_elem_t *elem_bd; + elem_bd = (stat_elem_t *)dhd_ring_get_empty(statlog->bdlog_ringbuf); + if (!elem_bd) { + /* no available slot */ + DHD_STATLOG_ERR(("%s: cannot allocate a new element for bigdata\n", + __FUNCTION__)); + return BCME_ERROR; + } + bcopy(elem, elem_bd, sizeof(stat_elem_t)); + } + + return BCME_OK; +} + +int +dhd_statlog_ring_log_data(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, + uint8 dir, bool cond) +{ + return cond ? dhd_statlog_ring_log(dhdp, stat, ifidx, + dir ? STDIR(TX) : STDIR(RX), 0, 0) : BCME_OK; +} + +int +dhd_statlog_ring_log_data_reason(dhd_pub_t *dhdp, uint16 stat, + uint8 ifidx, uint8 dir, uint16 reason) +{ + return dhd_statlog_ring_log(dhdp, stat, ifidx, + dir ? STDIR(TX) : STDIR(RX), 0, reason); +} + +int +dhd_statlog_ring_log_ctrl(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, uint16 reason) +{ + return dhd_statlog_ring_log(dhdp, stat, ifidx, ST(DIR_TX), 0, reason); +} + +int +dhd_statlog_process_event(dhd_pub_t *dhdp, int type, uint8 ifidx, + uint16 status, uint16 reason, uint16 flags) +{ + int stat = ST(INVALID); + uint8 dir = STDIR(RX); + + if (!dhdp || !dhdp->statlog) { + DHD_STATLOG_ERR(("%s: dhdp or dhdp->statlog is NULL\n", + __FUNCTION__)); + return BCME_ERROR; + } + + switch (type) { + case WLC_E_SET_SSID: + if (status == WLC_E_STATUS_SUCCESS) { + stat = ST(ASSOC_DONE); + } else if (status == WLC_E_STATUS_TIMEOUT) { + stat = ST(ASSOC_TIMEOUT); + } else if (status == WLC_E_STATUS_FAIL) { + stat = ST(ASSOC_FAIL); + } else if (status == WLC_E_STATUS_NO_ACK) { + stat = ST(ASSOC_NO_ACK); + } else if (status == WLC_E_STATUS_ABORT) { + stat = ST(ASSOC_ABORT); + } else if (status == WLC_E_STATUS_UNSOLICITED) { + stat = ST(ASSOC_UNSOLICITED); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + stat = ST(ASSOC_NO_NETWORKS); + } else { + stat = ST(ASSOC_OTHERS); + } + break; + case WLC_E_AUTH: + if (status == WLC_E_STATUS_SUCCESS) { + stat = ST(AUTH_DONE); + } else if (status == WLC_E_STATUS_TIMEOUT) { + stat = ST(AUTH_TIMEOUT); + } else if (status == WLC_E_STATUS_FAIL) { + stat = ST(AUTH_FAIL); + } else if (status == WLC_E_STATUS_NO_ACK) { + stat = ST(AUTH_NO_ACK); + } else { + stat = ST(AUTH_OTHERS); + } + dir = STDIR(TX); + break; + case WLC_E_AUTH_IND: + stat = ST(AUTH_DONE); + break; + case WLC_E_DEAUTH: + stat = ST(DEAUTH); + dir = STDIR(TX); + break; + case WLC_E_DEAUTH_IND: + stat = ST(DEAUTH); + break; + case WLC_E_DISASSOC: + stat = ST(DISASSOC); + dir = STDIR(TX); + break; + case WLC_E_LINK: + if (!(flags & WLC_EVENT_MSG_LINK)) { + stat = ST(LINKDOWN); + } + break; + case WLC_E_ROAM_PREP: + stat = ST(REASSOC_START); + break; + case WLC_E_ASSOC_REQ_IE: + stat = ST(ASSOC_REQ); + dir = STDIR(TX); + break; + case WLC_E_ASSOC_RESP_IE: + stat = ST(ASSOC_RESP); + break; + case WLC_E_BSSID: + if (status == WLC_E_STATUS_SUCCESS) { + stat = ST(REASSOC_DONE); + } else { + stat = ST(REASSOC_DONE_OTHERS); + } + break; + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + stat = ST(REASSOC_SUCCESS); + } else { + stat = ST(REASSOC_FAILURE); + } + dir = STDIR(TX); + break; + case WLC_E_ASSOC_IND: + stat = ST(ASSOC_REQ); + break; + default: + break; + } + + /* logging interested events */ + if (DHD_STATLOG_VALID(stat)) { + dhd_statlog_ring_log(dhdp, stat, ifidx, dir, status, reason); + } + + return BCME_OK; +} + +uint32 +dhd_statlog_get_logbuf_len(dhd_pub_t *dhdp) +{ + uint32 length = 0; + dhd_statlog_t *statlog; + + if (dhdp && dhdp->statlog) { + statlog = (dhd_statlog_t *)(dhdp->statlog); + length = statlog->logbuf_len; + } + + return length; +} + +void * +dhd_statlog_get_logbuf(dhd_pub_t *dhdp) +{ + dhd_statlog_t *statlog; + void *ret_addr = NULL; + + if (dhdp && dhdp->statlog) { + statlog = (dhd_statlog_t *)(dhdp->statlog); + ret_addr = (void *)(statlog->logbuf); + } + + return ret_addr; +} + +/* + * called function uses buflen as the DHD_STATLOG_STATSTR_BUF_LEN max. + * So when adding a case, make sure the string is less than + * the DHD_STATLOG_STATSTR_BUF_LEN bytes + */ +static void +dhd_statlog_stat_name(char *buf, uint32 buflen, uint32 state, uint8 dir) +{ + char *stat_str = NULL; + bool tx = (dir == STDIR(TX)); + uint32 max_buf_len = MIN(buflen, DHD_STATLOG_STATSTR_BUF_LEN); + + switch (state) { + case ST(INVALID): + stat_str = "INVALID_STATE"; + break; + case ST(WLAN_POWER_ON): + stat_str = "WLAN_POWER_ON"; + break; + case ST(WLAN_POWER_OFF): + stat_str = "WLAN_POWER_OFF"; + break; + case ST(ASSOC_START): + stat_str = "ASSOC_START"; + break; + case ST(AUTH_DONE): + stat_str = "AUTH_DONE"; + break; + case ST(ASSOC_REQ): + stat_str = tx ? "ASSOC_REQ" : "RX_ASSOC_REQ"; + break; + case ST(ASSOC_RESP): + stat_str = "ASSOC_RESP"; + break; + case ST(ASSOC_DONE): + stat_str = "ASSOC_DONE"; + break; + case ST(DISASSOC_START): + stat_str = "DISASSOC_START"; + break; + case ST(DISASSOC_INT_START): + stat_str = "DISASSOC_INTERNAL_START"; + break; + case ST(DISASSOC_DONE): + stat_str = "DISASSOC_DONE"; + break; + case ST(DISASSOC): + stat_str = tx ? "DISASSOC_EVENT" : "DISASSOC_IND_EVENT"; + break; + case ST(DEAUTH): + stat_str = tx ? "DEAUTH_EVENT" : "DEAUTH_IND_EVENT"; + break; + case ST(LINKDOWN): + stat_str = "LINKDOWN_EVENT"; + break; + case ST(REASSOC_START): + stat_str = "REASSOC_START"; + break; + case ST(REASSOC_INFORM): + stat_str = "REASSOC_INFORM"; + break; + case ST(REASSOC_DONE): + stat_str = "REASSOC_DONE_SUCCESS"; + break; + case ST(EAPOL_M1): + stat_str = tx ? "TX_EAPOL_M1" : "RX_EAPOL_M1"; + break; + case ST(EAPOL_M2): + stat_str = tx ? "TX_EAPOL_M2" : "RX_EAPOL_M2"; + break; + case ST(EAPOL_M3): + stat_str = tx ? "TX_EAPOL_M3" : "RX_EAPOL_M3"; + break; + case ST(EAPOL_M4): + stat_str = tx ? "TX_EAPOL_M4" : "RX_EAPOL_M4"; + break; + case ST(EAPOL_GROUPKEY_M1): + stat_str = tx ? "TX_EAPOL_GROUPKEY_M1" : "RX_EAPOL_GROUPKEY_M1"; + break; + case ST(EAPOL_GROUPKEY_M2): + stat_str = tx ? "TX_EAPOL_GROUPKEY_M2" : "RX_EAPOL_GROUPKEY_M2"; + break; + case ST(EAP_REQ_IDENTITY): + stat_str = tx ? "TX_EAP_REQ_IDENTITY" : "RX_EAP_REQ_IDENTITY"; + break; + case ST(EAP_RESP_IDENTITY): + stat_str = tx ? "TX_EAP_RESP_IDENTITY" : "RX_EAP_RESP_IDENTITY"; + break; + case ST(EAP_REQ_TLS): + stat_str = tx ? "TX_EAP_REQ_TLS" : "RX_EAP_REQ_TLS"; + break; + case ST(EAP_RESP_TLS): + stat_str = tx ? "TX_EAP_RESP_TLS" : "RX_EAP_RESP_TLS"; + break; + case ST(EAP_REQ_LEAP): + stat_str = tx ? "TX_EAP_REQ_LEAP" : "RX_EAP_REQ_LEAP"; + break; + case ST(EAP_RESP_LEAP): + stat_str = tx ? "TX_EAP_RESP_LEAP" : "RX_EAP_RESP_LEAP"; + break; + case ST(EAP_REQ_TTLS): + stat_str = tx ? "TX_EAP_REQ_TTLS" : "RX_EAP_REQ_TTLS"; + break; + case ST(EAP_RESP_TTLS): + stat_str = tx ? "TX_EAP_RESP_TTLS" : "RX_EAP_RESP_TTLS"; + break; + case ST(EAP_REQ_AKA): + stat_str = tx ? "TX_EAP_REQ_AKA" : "RX_EAP_REQ_AKA"; + break; + case ST(EAP_RESP_AKA): + stat_str = tx ? "TX_EAP_RESP_AKA" : "RX_EAP_RESP_AKA"; + break; + case ST(EAP_REQ_PEAP): + stat_str = tx ? "TX_EAP_REQ_PEAP" : "RX_EAP_REQ_PEAP"; + break; + case ST(EAP_RESP_PEAP): + stat_str = tx ? "TX_EAP_RESP_PEAP" : "RX_EAP_RESP_PEAP"; + break; + case ST(EAP_REQ_FAST): + stat_str = tx ? "TX_EAP_REQ_FAST" : "RX_EAP_REQ_FAST"; + break; + case ST(EAP_RESP_FAST): + stat_str = tx ? "TX_EAP_RESP_FAST" : "RX_EAP_RESP_FAST"; + break; + case ST(EAP_REQ_PSK): + stat_str = tx ? "TX_EAP_REQ_PSK" : "RX_EAP_REQ_PSK"; + break; + case ST(EAP_RESP_PSK): + stat_str = tx ? "TX_EAP_RESP_PSK" : "RX_EAP_RESP_PSK"; + break; + case ST(EAP_REQ_AKAP): + stat_str = tx ? "TX_EAP_REQ_AKAP" : "RX_EAP_REQ_AKAP"; + break; + case ST(EAP_RESP_AKAP): + stat_str = tx ? "TX_EAP_RESP_AKAP" : "RX_EAP_RESP_AKAP"; + break; + case ST(EAP_SUCCESS): + stat_str = tx ? "TX_EAP_SUCCESS" : "RX_EAP_SUCCESS"; + break; + case ST(EAP_FAILURE): + stat_str = tx ? "TX_EAP_FAILURE" : "RX_EAP_FAILURE"; + break; + case ST(EAPOL_START): + stat_str = tx ? "TX_EAPOL_START" : "RX_EAPOL_START"; + break; + case ST(WSC_START): + stat_str = tx ? "TX_WSC_START" : "RX_WSC_START"; + break; + case ST(WSC_DONE): + stat_str = tx ? "TX_WSC_DONE" : "RX_WSC_DONE"; + break; + case ST(WPS_M1): + stat_str = tx ? "TX_WPS_M1" : "RX_WPS_M1"; + break; + case ST(WPS_M2): + stat_str = tx ? "TX_WPS_M2" : "RX_WPS_M2"; + break; + case ST(WPS_M3): + stat_str = tx ? "TX_WPS_M3" : "RX_WPS_M3"; + break; + case ST(WPS_M4): + stat_str = tx ? "TX_WPS_M4" : "RX_WPS_M4"; + break; + case ST(WPS_M5): + stat_str = tx ? "TX_WPS_M5" : "RX_WPS_M5"; + break; + case ST(WPS_M6): + stat_str = tx ? "TX_WPS_M6" : "RX_WPS_M6"; + break; + case ST(WPS_M7): + stat_str = tx ? "TX_WPS_M7" : "RX_WPS_M7"; + break; + case ST(WPS_M8): + stat_str = tx ? "TX_WPS_M8" : "RX_WPS_M8"; + break; + case ST(8021X_OTHER): + stat_str = tx ? "TX_OTHER_8021X" : "RX_OTHER_8021X"; + break; + case ST(INSTALL_KEY): + stat_str = "INSTALL_KEY"; + break; + case ST(DELETE_KEY): + stat_str = "DELETE_KEY"; + break; + case ST(INSTALL_PMKSA): + stat_str = "INSTALL_PMKSA"; + break; + case ST(INSTALL_OKC_PMK): + stat_str = "INSTALL_OKC_PMK"; + break; + case ST(DHCP_DISCOVER): + stat_str = tx ? "TX_DHCP_DISCOVER" : "RX_DHCP_DISCOVER"; + break; + case ST(DHCP_OFFER): + stat_str = tx ? "TX_DHCP_OFFER" : "RX_DHCP_OFFER"; + break; + case ST(DHCP_REQUEST): + stat_str = tx ? "TX_DHCP_REQUEST" : "RX_DHCP_REQUEST"; + break; + case ST(DHCP_DECLINE): + stat_str = tx ? "TX_DHCP_DECLINE" : "RX_DHCP_DECLINE"; + break; + case ST(DHCP_ACK): + stat_str = tx ? "TX_DHCP_ACK" : "RX_DHCP_ACK"; + break; + case ST(DHCP_NAK): + stat_str = tx ? "TX_DHCP_NAK" : "RX_DHCP_NAK"; + break; + case ST(DHCP_RELEASE): + stat_str = tx ? "TX_DHCP_RELEASE" : "RX_DHCP_RELEASE"; + break; + case ST(DHCP_INFORM): + stat_str = tx ? "TX_DHCP_INFORM" : "RX_DHCP_INFORM"; + break; + case ST(ICMP_PING_REQ): + stat_str = tx ? "TX_ICMP_PING_REQ" : "RX_ICMP_PING_REQ"; + break; + case ST(ICMP_PING_RESP): + stat_str = tx ? "TX_ICMP_PING_RESP" : "RX_ICMP_PING_RESP"; + break; + case ST(ICMP_DEST_UNREACH): + stat_str = tx ? "TX_ICMP_DEST_UNREACH" : "RX_ICMP_DEST_UNREACH"; + break; + case ST(ICMP_OTHER): + stat_str = tx ? "TX_ICMP_OTHER" : "RX_ICMP_OTHER"; + break; + case ST(ARP_REQ): + stat_str = tx ? "TX_ARP_REQ" : "RX_ARP_REQ"; + break; + case ST(ARP_RESP): + stat_str = tx ? "TX_ARP_RESP" : "RX_ARP_RESP"; + break; + case ST(DNS_QUERY): + stat_str = tx ? "TX_DNS_QUERY" : "RX_DNS_QUERY"; + break; + case ST(DNS_RESP): + stat_str = tx ? "TX_DNS_RESP" : "RX_DNS_RESP"; + break; + case ST(REASSOC_SUCCESS): + stat_str = "REASSOC_SUCCESS"; + break; + case ST(REASSOC_FAILURE): + stat_str = "REASSOC_FAILURE"; + break; + case ST(AUTH_TIMEOUT): + stat_str = "AUTH_TIMEOUT"; + break; + case ST(AUTH_FAIL): + stat_str = "AUTH_FAIL"; + break; + case ST(AUTH_NO_ACK): + stat_str = "AUTH_NO_ACK"; + break; + case ST(AUTH_OTHERS): + stat_str = "AUTH_FAIL_OTHER_STATUS"; + break; + case ST(ASSOC_TIMEOUT): + stat_str = "ASSOC_TIMEOUT"; + break; + case ST(ASSOC_FAIL): + stat_str = "ASSOC_FAIL"; + break; + case ST(ASSOC_NO_ACK): + stat_str = "ASSOC_NO_ACK"; + break; + case ST(ASSOC_ABORT): + stat_str = "ASSOC_ABORT"; + break; + case ST(ASSOC_UNSOLICITED): + stat_str = "ASSOC_UNSOLICITED"; + break; + case ST(ASSOC_NO_NETWORKS): + stat_str = "ASSOC_NO_NETWORKS"; + break; + case ST(ASSOC_OTHERS): + stat_str = "ASSOC_FAIL_OTHER_STATUS"; + break; + case ST(REASSOC_DONE_OTHERS): + stat_str = "REASSOC_DONE_OTHER_STATUS"; + break; + default: + stat_str = "UNKNOWN_STATUS"; + break; + } + + strncpy(buf, stat_str, max_buf_len); + buf[max_buf_len - 1] = '\0'; +} + +static void +dhd_statlog_get_timestamp(stat_elem_t *elem, uint64 *sec, uint64 *usec) +{ + uint64 ts_nsec, rem_nsec; + + ts_nsec = elem->ts; + rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC); + *sec = ts_nsec; + *usec = (uint64)(rem_nsec / NSEC_PER_USEC); +} + +static void +dhd_statlog_convert_time(stat_elem_t *elem, uint8 *buf, uint32 buflen) +{ +#if defined(LINUX) || defined(linux) + struct rtc_time tm; + uint64 ts_sec, rem_usec; + + if (!buf) { + DHD_STATLOG_ERR(("%s: buf is NULL\n", __FUNCTION__)); + return; + } + + bzero(buf, buflen); + ts_sec = elem->ts_tz; + rem_usec = DIV_AND_MOD_U64_BY_U32(ts_sec, USEC_PER_SEC); + + rtc_time_to_tm((unsigned long)ts_sec, &tm); + snprintf(buf, buflen, DHD_STATLOG_TZFMT_YYMMDDHHMMSSMS, + tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, + (uint32)(rem_usec / USEC_PER_MSEC)); +#endif /* LINUX || linux */ +} + +#ifdef DHD_LOG_DUMP +static int +dhd_statlog_dump(dhd_statlog_t *statlog, char *buf, uint32 buflen) +{ + stat_elem_t *elem; + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + char stat_str[DHD_STATLOG_STATSTR_BUF_LEN]; + char ts_str[DHD_STATLOG_TZFMT_BUF_LEN]; + uint64 sec = 0, usec = 0; + + if (!statlog) { + DHD_STATLOG_ERR(("%s: statlog is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + bcm_binit(strbuf, buf, buflen); + bzero(stat_str, sizeof(stat_str)); + bzero(ts_str, sizeof(ts_str)); + dhd_ring_whole_lock(statlog->ringbuf); + elem = (stat_elem_t *)dhd_ring_get_first(statlog->ringbuf); + while (elem) { + if (DHD_STATLOG_VALID(elem->stat)) { + dhd_statlog_stat_name(stat_str, sizeof(stat_str), + elem->stat, elem->dir); + dhd_statlog_get_timestamp(elem, &sec, &usec); + dhd_statlog_convert_time(elem, ts_str, sizeof(ts_str)); + bcm_bprintf(strbuf, "[%s][%5lu.%06lu] status=%s, ifidx=%d, " + "reason=%d, status=%d\n", ts_str, (unsigned long)sec, + (unsigned long)usec, stat_str, elem->ifidx, + elem->reason, elem->status); + } + elem = (stat_elem_t *)dhd_ring_get_next(statlog->ringbuf, (void *)elem); + } + dhd_ring_whole_unlock(statlog->ringbuf); + + return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size); +} + +int +dhd_statlog_write_logdump(dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, unsigned long *pos) +{ + dhd_statlog_t *statlog; + log_dump_section_hdr_t sec_hdr; + char *buf; + uint32 buflen; + int remain_len = 0; + int ret = BCME_OK; + + if (!dhdp || !dhdp->statlog) { + DHD_STATLOG_ERR(("%s: dhdp or dhdp->statlog is NULL\n", + __FUNCTION__)); + return BCME_ERROR; + } + + statlog = (dhd_statlog_t *)(dhdp->statlog); + if (!statlog->logbuf) { + DHD_STATLOG_ERR(("%s: logbuf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + buf = statlog->logbuf; + buflen = statlog->logbuf_len; + bzero(buf, buflen); + + remain_len = dhd_statlog_dump(statlog, buf, buflen); + if (remain_len < 0) { + DHD_STATLOG_ERR(("%s: failed to write stat info to buffer\n", + __FUNCTION__)); + return BCME_ERROR; + } + + DHD_STATLOG_INFO(("%s: Start to write statlog\n", __FUNCTION__)); + + /* write the section header first */ + ret = dhd_export_debug_data(STATUS_LOG_HDR, fp, user_buf, + strlen(STATUS_LOG_HDR), pos); + if (ret < 0) { + goto exit; + } + + dhd_init_sec_hdr(&sec_hdr); + sec_hdr.type = LOG_DUMP_SECTION_STATUS; + sec_hdr.length = buflen - remain_len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, + sizeof(sec_hdr), pos); + if (ret < 0) { + goto exit; + } + + /* write status log info */ + ret = dhd_export_debug_data(buf, fp, user_buf, buflen - remain_len, pos); + if (ret < 0) { + DHD_STATLOG_ERR(("%s: failed to write stat info, err=%d\n", + __FUNCTION__, ret)); + } + + DHD_STATLOG_INFO(("%s: Complete to write statlog file, err=%d\n", + __FUNCTION__, ret)); + +exit: + return ret; +} +#endif /* DHD_LOG_DUMP */ + +int +dhd_statlog_generate_bdmask(dhd_pub_t *dhdp, void *reqbuf) +{ + dhd_statlog_t *statlog; + stat_bdmask_req_t *query; + uint8 *req_buf; + uint32 req_buf_len; + int cnt; + + if (!dhdp || !dhdp->statlog) { + DHD_STATLOG_ERR(("%s: dhdp or statlog is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!reqbuf) { + DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__)); + return BCME_ERROR; + } + + statlog = dhdp->statlog; + query = (stat_bdmask_req_t *)reqbuf; + req_buf = query->req_buf; + req_buf_len = query->req_buf_len; + if (!req_buf) { + DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__)); + return BCME_ERROR; + } + + bzero(statlog->bdmask, DHD_STAT_BDMASK_SIZE); + for (cnt = 0; cnt < req_buf_len; cnt++) { + if (DHD_STATLOG_VALID(req_buf[cnt])) { + setbit(statlog->bdmask, req_buf[cnt]); + } + } + + return BCME_OK; +} + +int +dhd_statlog_get_latest_info(dhd_pub_t *dhdp, void *reqbuf) +{ + dhd_statlog_t *statlog; + stat_query_t *query; + stat_elem_t *elem; + uint8 *req_buf, *resp_buf, *sp; + uint32 req_buf_len, resp_buf_len, req_num; + int i, remain_len, cpcnt = 0; + uint8 filter[DHD_STAT_BDMASK_SIZE]; + bool query_bigdata = FALSE; + void *ringbuf; + + if (!dhdp || !dhdp->statlog) { + DHD_STATLOG_ERR(("%s: dhdp or statlog is NULL\n", + __FUNCTION__)); + return BCME_ERROR; + } + + query = (stat_query_t *)reqbuf; + if (!query) { + DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__)); + return BCME_ERROR; + } + + statlog = (dhd_statlog_t *)(dhdp->statlog); + req_buf = query->req_buf; + req_buf_len = query->req_buf_len; + resp_buf = query->resp_buf; + resp_buf_len = query->resp_buf_len; + req_num = MIN(query->req_num, MAX_STATLOG_REQ_ITEM); + if (!resp_buf) { + DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__)); + return BCME_ERROR; + } + + bzero(filter, sizeof(filter)); + if (!req_buf || !req_buf_len) { + query_bigdata = TRUE; + ringbuf = statlog->bdlog_ringbuf; + } else { + ringbuf = statlog->ringbuf; + /* build a filter from req_buf */ + for (i = 0; i < req_buf_len; i++) { + if (DHD_STATLOG_VALID(req_buf[i])) { + setbit(filter, req_buf[i]); + } + } + } + + sp = resp_buf; + remain_len = resp_buf_len; + dhd_ring_whole_lock(ringbuf); + elem = (stat_elem_t *)dhd_ring_get_last(ringbuf); + while (elem) { + if (query_bigdata || isset(filter, elem->stat)) { + /* found the status from the list of interests */ + if (remain_len < sizeof(stat_elem_t)) { + dhd_ring_whole_unlock(ringbuf); + return BCME_BUFTOOSHORT; + } + bcopy((char *)elem, sp, sizeof(stat_elem_t)); + sp += sizeof(stat_elem_t); + remain_len -= sizeof(stat_elem_t); + cpcnt++; + } + + if (cpcnt >= req_num) { + break; + } + + /* Proceed to next item */ + elem = (stat_elem_t *)dhd_ring_get_prev(ringbuf, (void *)elem); + } + dhd_ring_whole_unlock(ringbuf); + + return cpcnt; +} + +int +dhd_statlog_query(dhd_pub_t *dhdp, char *cmd, int total_len) +{ + stat_elem_t *elem = NULL; + stat_query_t query; + char *pos, *token; + uint8 *req_buf = NULL, *resp_buf = NULL; + uint32 req_buf_len = 0, resp_buf_len = 0; + ulong req_num, stat_num, stat; + char stat_str[DHD_STATLOG_STATSTR_BUF_LEN]; + uint64 sec = 0, usec = 0; + int i, resp_num, err = BCME_OK; + char ts_str[DHD_STATLOG_TZFMT_BUF_LEN]; + + /* + * DRIVER QUERY_STAT_LOG + * Note: use the defult status list if the 'stat list num' is zero + */ + pos = cmd; + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + /* total number of request */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) { + err = BCME_BADARG; + goto exit; + } + + req_num = bcm_strtoul(token, NULL, 0); + + /* total number of status list */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) { + err = BCME_BADARG; + goto exit; + } + + stat_num = bcm_strtoul(token, NULL, 0); + if (stat_num) { + /* create a status list */ + req_buf_len = (uint32)(stat_num * sizeof(uint8)); + req_buf = (uint8 *)MALLOCZ(dhdp->osh, req_buf_len); + if (!req_buf) { + DHD_STATLOG_ERR(("%s: failed to allocate request buf\n", + __FUNCTION__)); + err = BCME_NOMEM; + goto exit; + } + + /* parse the status list and update to the request buffer */ + for (i = 0; i < (uint32)stat_num; i++) { + token = bcmstrtok(&pos, " ", NULL); + if (!token) { + err = BCME_BADARG; + goto exit; + } + stat = bcm_strtoul(token, NULL, 0); + req_buf[i] = (uint8)stat; + } + } + + /* creat a response list */ + resp_buf_len = (uint32)DHD_STATLOG_RING_SIZE(req_num); + resp_buf = (uint8 *)MALLOCZ(dhdp->osh, resp_buf_len); + if (!resp_buf) { + DHD_STATLOG_ERR(("%s: failed to allocate response buf\n", + __FUNCTION__)); + err = BCME_NOMEM; + goto exit; + } + + /* create query format and query the status */ + query.req_buf = req_buf; + query.req_buf_len = req_buf_len; + query.resp_buf = resp_buf; + query.resp_buf_len = resp_buf_len; + query.req_num = (uint32)req_num; + resp_num = dhd_statlog_get_latest_info(dhdp, (void *)&query); + if (resp_num < 0) { + DHD_STATLOG_ERR(("%s: failed to query the status\n", __FUNCTION__)); + err = BCME_ERROR; + goto exit; + } + + /* print out the results */ + DHD_STATLOG_PRINT(("=============== QUERY RESULT ===============\n")); + if (resp_num > 0) { + elem = (stat_elem_t *)resp_buf; + for (i = 0; i < resp_num; i++) { + if (DHD_STATLOG_VALID(elem->stat)) { + dhd_statlog_stat_name(stat_str, sizeof(stat_str), + elem->stat, elem->dir); + dhd_statlog_get_timestamp(elem, &sec, &usec); + dhd_statlog_convert_time(elem, ts_str, sizeof(ts_str)); + DHD_STATLOG_PRINT(("[RAWTS:%llu][%s][%5lu.%06lu] status=%s," + " ifidx=%d, reason=%d, status=%d\n", elem->ts_tz, + ts_str, (unsigned long)sec, (unsigned long)usec, + stat_str, elem->ifidx, elem->reason, elem->status)); + } + elem++; + } + } else { + DHD_STATLOG_PRINT(("No data found\n")); + } + +exit: + if (resp_buf) { + MFREE(dhdp->osh, resp_buf, resp_buf_len); + } + + if (req_buf) { + MFREE(dhdp->osh, req_buf, req_buf_len); + } + + return err; +} + +void +dhd_statlog_dump_scr(dhd_pub_t *dhdp) +{ + dhd_statlog_t *statlog; + stat_elem_t *elem; + char stat_str[DHD_STATLOG_STATSTR_BUF_LEN]; + char ts_str[DHD_STATLOG_TZFMT_BUF_LEN]; + uint64 sec = 0, usec = 0; + + if (!dhdp || !dhdp->statlog) { + DHD_STATLOG_ERR(("%s: dhdp or statlog is NULL\n", __FUNCTION__)); + return; + } + + statlog = (dhd_statlog_t *)(dhdp->statlog); + bzero(stat_str, sizeof(stat_str)); + bzero(ts_str, sizeof(ts_str)); + + DHD_STATLOG_PRINT(("=============== START OF CURRENT STATUS INFO ===============\n")); + dhd_ring_whole_lock(statlog->ringbuf); + elem = (stat_elem_t *)dhd_ring_get_first(statlog->ringbuf); + while (elem) { + if (DHD_STATLOG_VALID(elem->stat)) { + dhd_statlog_stat_name(stat_str, sizeof(stat_str), + elem->stat, elem->dir); + dhd_statlog_get_timestamp(elem, &sec, &usec); + dhd_statlog_convert_time(elem, ts_str, sizeof(ts_str)); + DHD_STATLOG_PRINT(("[RAWTS:%llu][%s][%5lu.%06lu] status=%s," + " ifidx=%d, reason=%d, status=%d\n", elem->ts_tz, ts_str, + (unsigned long)sec, (unsigned long)usec, stat_str, + elem->ifidx, elem->reason, elem->status)); + } + elem = (stat_elem_t *)dhd_ring_get_next(statlog->ringbuf, (void *)elem); + } + dhd_ring_whole_unlock(statlog->ringbuf); + DHD_STATLOG_PRINT(("=============== END OF CURRENT STATUS INFO ===============\n")); +} +#endif /* DHD_STATUS_LOGGING */ diff --git a/bcmdhd.101.10.361.x/dhd_statlog.h b/bcmdhd.101.10.361.x/dhd_statlog.h new file mode 100755 index 0000000..c6dc5cf --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_statlog.h @@ -0,0 +1,221 @@ +/* + * DHD debugability: Header file for the Status Information Logging + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef __DHD_STATLOG_H_ +#define __DHD_STATLOG_H_ + +#ifdef DHD_STATUS_LOGGING + +/* status element */ +typedef struct stat_elem { + uint16 stat; /* store status */ + uint64 ts; /* local timestamp(ns) */ + uint64 ts_tz; /* timestamp applied timezone(us) */ + uint8 ifidx; /* ifidx */ + uint8 dir; /* direction (TX/RX) */ + uint8 reason; /* reason code from dongle */ + uint8 status; /* status code from dongle */ + uint8 resv[2]; /* reserved for future use */ +} stat_elem_t; + +/* status logging info */ +#define DHD_STAT_BDMASK_SIZE 16 +typedef struct dhd_statlog { + uint8 *logbuf; /* log buffer */ + uint32 logbuf_len; /* length of the log buffer */ + void *ringbuf; /* fixed ring buffer */ + uint32 bufsize; /* size of ring buffer */ + void *bdlog_ringbuf; /* fixed ring buffer for bigdata logging */ + uint32 bdlog_bufsize; /* size of ring buffer for bigdata logging */ + uint8 bdmask[DHD_STAT_BDMASK_SIZE]; /* bitmask for bigdata */ +} dhd_statlog_t; + +/* status query format */ +typedef struct stat_query { + uint8 *req_buf; /* request buffer to interested status */ + uint32 req_buf_len; /* length of the request buffer */ + uint8 *resp_buf; /* response buffer */ + uint32 resp_buf_len; /* length of the response buffer */ + uint32 req_num; /* total number of items to query */ +} stat_query_t; + +/* bitmask generation request format */ +typedef struct stat_bdmask_req { + uint8 *req_buf; /* request buffer to gernerate bitmask */ + uint32 req_buf_len; /* length of the request buffer */ +} stat_bdmask_req_t; + +typedef void * dhd_statlog_handle_t; /* opaque handle to status log */ + +/* enums */ +#define ST(x) STATE_## x +#define STDIR(x) STATE_DIR_## x + +/* status direction */ +typedef enum stat_log_dir { + STDIR(TX) = 1, + STDIR(RX) = 2, + STDIR(MAX) = 3 +} stat_dir_t; + +/* status definition */ +typedef enum stat_log_stat { + ST(INVALID) = 0, /* invalid status */ + ST(WLAN_POWER_ON) = 1, /* Wi-Fi Power on */ + ST(WLAN_POWER_OFF) = 2, /* Wi-Fi Power off */ + ST(ASSOC_START) = 3, /* connect to the AP triggered by upper layer */ + ST(AUTH_DONE) = 4, /* complete to authenticate with the AP */ + ST(ASSOC_REQ) = 5, /* send or receive Assoc Req */ + ST(ASSOC_RESP) = 6, /* send or receive Assoc Resp */ + ST(ASSOC_DONE) = 7, /* complete to disconnect to the associated AP */ + ST(DISASSOC_START) = 8, /* disconnect to the associated AP by upper layer */ + ST(DISASSOC_INT_START) = 9, /* initiate the disassoc by DHD */ + ST(DISASSOC_DONE) = 10, /* complete to disconnect to the associated AP */ + ST(DISASSOC) = 11, /* send or receive Disassoc */ + ST(DEAUTH) = 12, /* send or receive Deauth */ + ST(LINKDOWN) = 13, /* receive the link down event */ + ST(REASSOC_START) = 14, /* reassoc the candidate AP */ + ST(REASSOC_INFORM) = 15, /* inform reassoc completion to upper layer */ + ST(REASSOC_DONE) = 16, /* complete to reassoc */ + ST(EAPOL_M1) = 17, /* send or receive the EAPOL M1 */ + ST(EAPOL_M2) = 18, /* send or receive the EAPOL M2 */ + ST(EAPOL_M3) = 19, /* send or receive the EAPOL M3 */ + ST(EAPOL_M4) = 20, /* send or receive the EAPOL M4 */ + ST(EAPOL_GROUPKEY_M1) = 21, /* send or receive the EAPOL Group key handshake M1 */ + ST(EAPOL_GROUPKEY_M2) = 22, /* send or receive the EAPOL Group key handshake M2 */ + ST(EAP_REQ_IDENTITY) = 23, /* send or receive the EAP REQ IDENTITY */ + ST(EAP_RESP_IDENTITY) = 24, /* send or receive the EAP RESP IDENTITY */ + ST(EAP_REQ_TLS) = 25, /* send or receive the EAP REQ TLS */ + ST(EAP_RESP_TLS) = 26, /* send or receive the EAP RESP TLS */ + ST(EAP_REQ_LEAP) = 27, /* send or receive the EAP REQ LEAP */ + ST(EAP_RESP_LEAP) = 28, /* send or receive the EAP RESP LEAP */ + ST(EAP_REQ_TTLS) = 29, /* send or receive the EAP REQ TTLS */ + ST(EAP_RESP_TTLS) = 30, /* send or receive the EAP RESP TTLS */ + ST(EAP_REQ_AKA) = 31, /* send or receive the EAP REQ AKA */ + ST(EAP_RESP_AKA) = 32, /* send or receive the EAP RESP AKA */ + ST(EAP_REQ_PEAP) = 33, /* send or receive the EAP REQ PEAP */ + ST(EAP_RESP_PEAP) = 34, /* send or receive the EAP RESP PEAP */ + ST(EAP_REQ_FAST) = 35, /* send or receive the EAP REQ FAST */ + ST(EAP_RESP_FAST) = 36, /* send or receive the EAP RESP FAST */ + ST(EAP_REQ_PSK) = 37, /* send or receive the EAP REQ PSK */ + ST(EAP_RESP_PSK) = 38, /* send or receive the EAP RESP PSK */ + ST(EAP_REQ_AKAP) = 39, /* send or receive the EAP REQ AKAP */ + ST(EAP_RESP_AKAP) = 40, /* send or receive the EAP RESP AKAP */ + ST(EAP_SUCCESS) = 41, /* send or receive the EAP SUCCESS */ + ST(EAP_FAILURE) = 42, /* send or receive the EAP FAILURE */ + ST(EAPOL_START) = 43, /* send or receive the EAPOL-START */ + ST(WSC_START) = 44, /* send or receive the WSC START */ + ST(WSC_DONE) = 45, /* send or receive the WSC DONE */ + ST(WPS_M1) = 46, /* send or receive the WPS M1 */ + ST(WPS_M2) = 47, /* send or receive the WPS M2 */ + ST(WPS_M3) = 48, /* send or receive the WPS M3 */ + ST(WPS_M4) = 49, /* send or receive the WPS M4 */ + ST(WPS_M5) = 50, /* send or receive the WPS M5 */ + ST(WPS_M6) = 51, /* send or receive the WPS M6 */ + ST(WPS_M7) = 52, /* send or receive the WPS M7 */ + ST(WPS_M8) = 53, /* send or receive the WPS M8 */ + ST(8021X_OTHER) = 54, /* send or receive the other 8021X frames */ + ST(INSTALL_KEY) = 55, /* install the key */ + ST(DELETE_KEY) = 56, /* remove the key */ + ST(INSTALL_PMKSA) = 57, /* install PMKID information */ + ST(INSTALL_OKC_PMK) = 58, /* install PMKID information for OKC */ + ST(DHCP_DISCOVER) = 59, /* send or recv DHCP Discover */ + ST(DHCP_OFFER) = 60, /* send or recv DHCP Offer */ + ST(DHCP_REQUEST) = 61, /* send or recv DHCP Request */ + ST(DHCP_DECLINE) = 62, /* send or recv DHCP Decline */ + ST(DHCP_ACK) = 63, /* send or recv DHCP ACK */ + ST(DHCP_NAK) = 64, /* send or recv DHCP NACK */ + ST(DHCP_RELEASE) = 65, /* send or recv DHCP Release */ + ST(DHCP_INFORM) = 66, /* send or recv DHCP Inform */ + ST(ICMP_PING_REQ) = 67, /* send or recv ICMP PING Req */ + ST(ICMP_PING_RESP) = 68, /* send or recv ICMP PING Resp */ + ST(ICMP_DEST_UNREACH) = 69, /* send or recv ICMP DEST UNREACH message */ + ST(ICMP_OTHER) = 70, /* send or recv other ICMP */ + ST(ARP_REQ) = 71, /* send or recv ARP Req */ + ST(ARP_RESP) = 72, /* send or recv ARP Resp */ + ST(DNS_QUERY) = 73, /* send or recv DNS Query */ + ST(DNS_RESP) = 74, /* send or recv DNS Resp */ + ST(REASSOC_SUCCESS) = 75, /* reassociation success */ + ST(REASSOC_FAILURE) = 76, /* reassociation failure */ + ST(AUTH_TIMEOUT) = 77, /* authentication timeout */ + ST(AUTH_FAIL) = 78, /* authentication failure */ + ST(AUTH_NO_ACK) = 79, /* authentication failure due to no ACK */ + ST(AUTH_OTHERS) = 80, /* authentication failure with other status */ + ST(ASSOC_TIMEOUT) = 81, /* association timeout */ + ST(ASSOC_FAIL) = 82, /* association failure */ + ST(ASSOC_NO_ACK) = 83, /* association failure due to no ACK */ + ST(ASSOC_ABORT) = 84, /* association abort */ + ST(ASSOC_UNSOLICITED) = 85, /* association unsolicited */ + ST(ASSOC_NO_NETWORKS) = 86, /* association failure due to no networks */ + ST(ASSOC_OTHERS) = 87, /* association failure due to no networks */ + ST(REASSOC_DONE_OTHERS) = 88, /* complete to reassoc with other reason */ + ST(MAX) = 89 /* Max Status */ +} stat_log_stat_t; + +/* functions */ +extern dhd_statlog_handle_t *dhd_attach_statlog(dhd_pub_t *dhdp, uint32 num_items, + uint32 bdlog_num_items, uint32 logbuf_len); +extern void dhd_detach_statlog(dhd_pub_t *dhdp); +extern int dhd_statlog_ring_log_data(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, + uint8 dir, bool cond); +extern int dhd_statlog_ring_log_data_reason(dhd_pub_t *dhdp, uint16 stat, + uint8 ifidx, uint8 dir, uint16 reason); +extern int dhd_statlog_ring_log_ctrl(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, + uint16 reason); +extern int dhd_statlog_process_event(dhd_pub_t *dhdp, int type, uint8 ifidx, + uint16 status, uint16 reason, uint16 flags); +extern int dhd_statlog_get_latest_info(dhd_pub_t *dhdp, void *reqbuf); +extern void dhd_statlog_dump_scr(dhd_pub_t *dhdp); +extern int dhd_statlog_query(dhd_pub_t *dhdp, char *cmd, int total_len); +extern uint32 dhd_statlog_get_logbuf_len(dhd_pub_t *dhdp); +extern void *dhd_statlog_get_logbuf(dhd_pub_t *dhdp); +extern int dhd_statlog_generate_bdmask(dhd_pub_t *dhdp, void *reqbuf); +#ifdef DHD_LOG_DUMP +extern int dhd_statlog_write_logdump(dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, unsigned long *pos); +#endif /* DHD_LOG_DUMP */ + +/* macros */ +#define MAX_STATLOG_ITEM 512 +#define MAX_STATLOG_REQ_ITEM 32 +#define STATLOG_LOGBUF_LEN (64 * 1024) +#define DHD_STATLOG_VERSION_V1 0x1 +#define DHD_STATLOG_VERSION DHD_STATLOG_VERSION_V1 +#define DHD_STATLOG_ITEM_SIZE (sizeof(stat_elem_t)) +#define DHD_STATLOG_RING_SIZE(items) ((items) * (DHD_STATLOG_ITEM_SIZE)) +#define DHD_STATLOG_STATSTR_BUF_LEN 32 +#define DHD_STATLOG_TZFMT_BUF_LEN 20 +#define DHD_STATLOG_TZFMT_YYMMDDHHMMSSMS "%02d%02d%02d%02d%02d%02d%04d" + +#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \ + dhd_statlog_ring_log_ctrl((dhdp), (stat), (ifidx), (reason)) +#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \ + dhd_statlog_ring_log_data((dhdp), (stat), (ifidx), (dir), (cond)) +#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \ + dhd_statlog_ring_log_data_reason((dhdp), (stat), (ifidx), \ + (dir), (reason)) + +#endif /* DHD_STATUS_LOGGING */ +#endif /* __DHD_STATLOG_H_ */ diff --git a/bcmdhd.101.10.361.x/dhd_timesync.c b/bcmdhd.101.10.361.x/dhd_timesync.c new file mode 100755 index 0000000..618d234 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_timesync.c @@ -0,0 +1,1239 @@ +/** + * @file Broadcom Dongle Host Driver (DHD), time sync protocol handler + * + * timesync mesasages are exchanged between the host and device to synchronize the source time + * for ingress and egress packets. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd); + +#define MAX_FW_CLKINFO_TYPES 8 +#define MAX_SIZE_FW_CLKINFO_TYPE (MAX_FW_CLKINFO_TYPES * sizeof(ts_fw_clock_info_t)) + +#define MAX_FW_TS_LOG_SAMPLES 64 + +#define BCMMSGBUF_HOST_TS_BADTAG 0xF0 + +#define DHD_DEFAULT_TIMESYNC_TIMER_VALUE 20 /* ms */ +#define DHD_DEFAULT_TIMESYNC_TIMER_VALUE_MAX 9000 /* ms */ + +#define MAX_TS_LOG_SAMPLES_DATA 128 +#define TS_NODROP_CONFIG_TO 1 +#define TS_DROP_CONFIG_TO 5 + +typedef struct clksrc_ts_log { + uchar name[4]; + uint32 inuse; + ts_timestamp_srcid_t log[MAX_FW_TS_LOG_SAMPLES]; +} clksrc_ts_log_t; + +typedef struct clk_ts_log { + uint32 clk_ts_inited; + uint32 cur_idx; + uint32 seqnum[MAX_FW_TS_LOG_SAMPLES]; + clksrc_ts_log_t ts_log[MAX_CLKSRC_ID+1]; +} clk_ts_log_t; + +typedef struct dhd_ts_xt_id { + uint16 host_timestamping_config; + uint16 host_clock_selection; + uint16 host_clk_info; + uint16 d2h_clk_correction; +} dhd_ts_xt_id_t; + +typedef struct dhd_ts_log_ts_item { + uint16 flowid; /* interface, Flow ID */ + uint8 intf; /* interface */ + uint8 rsvd; + uint32 ts_low; /* time stamp values */ + uint32 ts_high; /* time stamp values */ + uint32 proto; + uint32 t1; + uint32 t2; +} dhd_ts_log_ts_item_t; + +typedef struct dhd_ts_log_ts { + uint32 max_idx; + uint32 cur_idx; + dhd_ts_log_ts_item_t ts_log[MAX_TS_LOG_SAMPLES_DATA]; +} dhd_ts_log_ts_t; + +#define MAX_BUF_SIZE_HOST_CLOCK_INFO 512 + +#define HOST_TS_CONFIG_FW_TIMESTAMP_PERIOD_DEFAULT 1000 + +struct dhd_ts { + dhd_pub_t *dhdp; + osl_t *osh; + uint32 xt_id; + uint16 host_ts_capture_cnt; + uint32 fw_ts_capture_cnt; + uint32 fw_ts_disc_cnt; + uint32 h_clkid_min; + uint32 h_clkid_max; + uint32 h_tsconf_period; + + /* sould these be per clock source */ + ts_correction_m_t correction_m; + ts_correction_m_t correction_b; + + ts_fw_clock_info_t fw_tlv[MAX_FW_CLKINFO_TYPES]; + uint32 fw_tlv_len; + clk_ts_log_t fw_ts_log; + uchar host_ts_host_clk_info_buffer[MAX_BUF_SIZE_HOST_CLOCK_INFO]; + bool host_ts_host_clk_info_buffer_in_use; + dhd_ts_xt_id_t xt_ids; + uint32 active_ipc_version; + + uint32 fwts2hsts_delay; + uint32 fwts2hsts_delay_wdcount; + uint32 ts_watchdog_calls; + uint64 last_ts_watchdog_time; + uint32 pending_requests; + + dhd_ts_log_ts_t tx_timestamps; + dhd_ts_log_ts_t rx_timestamps; + /* outside modules could stop timesync independent of the user config */ + bool timesync_disabled; + uint32 host_reset_cnt; + bool nodrop_config; + + uint32 suspend_req; + uint32 resume_req; +}; +struct dhd_ts *g_dhd_ts; +static uint32 dhd_timesync_send_D2H_clk_correction(dhd_ts_t *ts); +static uint32 dhd_timesync_send_host_clk_info(dhd_ts_t *ts); +static uint32 dhd_timesync_send_host_clock_selection(dhd_ts_t *ts); +static uint32 dhd_timesync_send_host_timestamping_config(dhd_ts_t *ts, bool inject_err); +static void dhd_timesync_ts_log_dump_item(dhd_ts_log_ts_t *tsl, struct bcmstrbuf *b); + +/* Check for and handle local prot-specific iovar commands */ + +enum { + IOV_TS_INFO_DUMP, + IOV_TS_TX_TS_DUMP, + IOV_TS_RX_TS_DUMP, + IOV_TS_FW_CLKINFO_DUMP, + IOV_TS_HCLK_CLKID_MIN, + IOV_TS_HCLK_CLKID_MAX, + IOV_TS_HTSCONF_PERIOD, + IOV_TS_SEND_TSCONFIG, + IOV_TS_SEND_HCLK_SEL, + IOV_TS_SEND_HCLK_INFO, + IOV_TS_SEND_D2H_CRCT, + IOV_TS_TXS_LOG, + IOV_TS_RXS_LOG, + IOV_TS_INJECT_BAD_XTID, + IOV_TS_INJECT_BAD_TAG, + IOV_TS_FWTS2HSTS_DELAY, + IOV_TS_NODROP_CONFIG, + IOV_TS_CLEAR_LOGS, + IOV_TS_NO_RETRY, + IOV_TS_NO_AGGR, + IOV_TS_FIXED_RATE, + IOV_LAST +}; +const bcm_iovar_t dhd_ts_iovars[] = { + {"ts_info_dump", IOV_TS_INFO_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"ts_tx_ts_dump", IOV_TS_TX_TS_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"ts_rx_ts_dump", IOV_TS_RX_TS_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"ts_fw_clkinfo_dump", IOV_TS_FW_CLKINFO_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"ts_hclk_clkid_min", IOV_TS_HCLK_CLKID_MIN, 0, 0, IOVT_UINT32, 0 }, + {"ts_hclk_clkid_max", IOV_TS_HCLK_CLKID_MAX, 0, 0, IOVT_UINT32, 0 }, + {"ts_htsconf_period", IOV_TS_HTSCONF_PERIOD, 0, 0, IOVT_UINT32, 0 }, + {"ts_send_tsconfig", IOV_TS_SEND_TSCONFIG, 0, 0, IOVT_UINT32, 0 }, + {"ts_send_hostclk_sel", IOV_TS_SEND_HCLK_SEL, 0, 0, IOVT_UINT32, 0 }, + {"ts_send_hostclk_info", IOV_TS_SEND_HCLK_INFO, 0, 0, IOVT_UINT32, 0 }, + {"ts_send_d2h_corect ", IOV_TS_SEND_D2H_CRCT, 0, 0, IOVT_UINT32, 0 }, + {"ts_txs_log", IOV_TS_TXS_LOG, 0, 0, IOVT_UINT32, 0 }, + {"ts_rxs_log", IOV_TS_RXS_LOG, 0, 0, IOVT_UINT32, 0 }, + + /* error injection cases */ + {"ts_inject_bad_xtid", IOV_TS_INJECT_BAD_XTID, 0, 0, IOVT_UINT32, 0 }, + {"ts_inject_bad_tag", IOV_TS_INJECT_BAD_TAG, 0, 0, IOVT_UINT32, 0 }, + {"ts_fwts2hsts_delay", IOV_TS_FWTS2HSTS_DELAY, 0, 0, IOVT_UINT32, 0 }, + {"ts_nodrop_config", IOV_TS_NODROP_CONFIG, 0, 0, IOVT_UINT32, 0 }, + {"ts_clear_logs", IOV_TS_CLEAR_LOGS, 0, 0, IOVT_UINT32, 0 }, + {"ts_set_no_retry", IOV_TS_NO_RETRY, 0, 0, IOVT_UINT32, 0 }, + {"ts_set_no_aggr", IOV_TS_NO_AGGR, 0, 0, IOVT_UINT32, 0 }, + {"ts_set_fixed_rate", IOV_TS_FIXED_RATE, 0, 0, IOVT_UINT32, 0 }, + + {NULL, 0, 0, 0, 0, 0 } +}; + +static int dhd_ts_fw_clksrc_dump(dhd_ts_t *ts, char *buf, int buflen); +#ifdef CONFIG_PROC_FS +static int dhd_open_proc_ts_fw_clk_dump(struct inode *inode, struct file *file); +ssize_t dhd_read_proc_ts_fw_clk_dump(struct file *file, char *user_buf, size_t count, loff_t *loff); +static int dhd_open_proc_ts_tx_dump(struct inode *inode, struct file *file); +ssize_t dhd_read_proc_ts_tx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff); +static int dhd_open_proc_ts_rx_dump(struct inode *inode, struct file *file); +ssize_t dhd_read_proc_ts_rx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff); + +static int +dhd_open_proc_ts_fw_clk_dump(struct inode *inode, struct file *file) +{ + return single_open(file, 0, NULL); +} +ssize_t +dhd_read_proc_ts_fw_clk_dump(struct file *file, char *user_buf, size_t count, loff_t *loff) +{ + dhd_ts_t *ts; + char *buf; + ssize_t ret = 0; + + ts = g_dhd_ts; + if (ts == NULL) { + return -EAGAIN; + } + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(ts->dhdp)) { + DHD_INFO(("%s bus is in suspend or suspend in progress\n", __func__)); + return -EAGAIN; + } + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(ts->dhdp)) { + DHD_ERROR(("%s rmmod in progress\n", __func__)); + return -ENOENT; + } + buf = kmalloc(count, GFP_KERNEL); + if (buf == NULL) { + DHD_ERROR(("%s failed to allocate buf with size %zu\n", __func__, count)); + return -ENOMEM; + } + ret = dhd_ts_fw_clksrc_dump(ts, buf, count); + if (ret < 0) { + return 0; + } + ret = simple_read_from_buffer(user_buf, count, loff, buf, (count - ret)); + kfree(buf); + return ret; +} +static int dhd_open_proc_ts_tx_dump(struct inode *inode, struct file *file) +{ + return single_open(file, 0, NULL); +} +ssize_t +dhd_read_proc_ts_tx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff) +{ + dhd_ts_t *ts; + char *buf; + ssize_t ret = 0; + struct bcmstrbuf strbuf; + + ts = g_dhd_ts; + if (ts == NULL) { + return -EAGAIN; + } + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(ts->dhdp)) { + DHD_INFO(("%s bus is in suspend or suspend in progress\n", __func__)); + return -EAGAIN; + } + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(ts->dhdp)) { + DHD_ERROR(("%s rmmod in progress\n", __func__)); + return -ENOENT; + } + buf = kmalloc(count, GFP_KERNEL); + if (buf == NULL) { + DHD_ERROR(("%s failed to allocate buf with size %zu\n", __func__, count)); + return -ENOMEM; + } + bcm_binit(&strbuf, buf, count); + bcm_bprintf(&strbuf, "Tx Log dump\n"); + dhd_timesync_ts_log_dump_item(&ts->tx_timestamps, &strbuf); + ret = simple_read_from_buffer(user_buf, count, loff, buf, (count - strbuf.size)); + kfree(buf); + return ret; +} + +static int dhd_open_proc_ts_rx_dump(struct inode *inode, struct file *file) +{ + return single_open(file, 0, NULL); +} + +ssize_t +dhd_read_proc_ts_rx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff) +{ + dhd_ts_t *ts; + char *buf; + ssize_t ret = 0; + struct bcmstrbuf strbuf; + + ts = g_dhd_ts; + if (ts == NULL) { + return -EAGAIN; + } + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(ts->dhdp)) { + DHD_INFO(("%s bus is in suspend or suspend in progress\n", __func__)); + return -EAGAIN; + } + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(ts->dhdp)) { + DHD_ERROR(("%s rmmod in progress\n", __func__)); + return -ENOENT; + } + buf = kmalloc(count, GFP_KERNEL); + if (buf == NULL) { + DHD_ERROR(("%s failed to allocate buf with size %zu\n", __func__, count)); + return -ENOMEM; + } + bcm_binit(&strbuf, buf, count); + bcm_bprintf(&strbuf, "Rx Log dump\n"); + dhd_timesync_ts_log_dump_item(&ts->rx_timestamps, &strbuf); + ret = simple_read_from_buffer(user_buf, count, loff, buf, (count - strbuf.size)); + kfree(buf); + return ret; +} + +static const struct file_operations proc_fops_ts_fw_clk_dump = { + .read = dhd_read_proc_ts_fw_clk_dump, + .open = dhd_open_proc_ts_fw_clk_dump, + .release = seq_release, +}; +static const struct file_operations proc_fops_ts_tx_dump = { + .read = dhd_read_proc_ts_tx_dump, + .open = dhd_open_proc_ts_tx_dump, + .release = seq_release, +}; +static const struct file_operations proc_fops_ts_rx_dump = { + .read = dhd_read_proc_ts_rx_dump, + .open = dhd_open_proc_ts_rx_dump, + .release = seq_release, +}; +#endif /* CONFIG_PROC_FS */ + +int +dhd_timesync_detach(dhd_pub_t *dhdp) +{ + dhd_ts_t *ts; + + DHD_TRACE(("%s: %d\n", __FUNCTION__, __LINE__)); + + if (!dhdp) { + return BCME_OK; + } + ts = dhdp->ts; +#ifdef CONFIG_PROC_FS + remove_proc_entry("ts_fw_clk_dump", NULL); + remove_proc_entry("ts_tx_dump", NULL); + remove_proc_entry("ts_rx_dump", NULL); +#endif /* CONFIG_PROC_FS */ +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhdp->osh, ts, sizeof(dhd_ts_t)); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + g_dhd_ts = NULL; + dhdp->ts = NULL; + DHD_INFO(("Deallocated DHD TS\n")); + return BCME_OK; +} +int +dhd_timesync_attach(dhd_pub_t *dhdp) +{ + dhd_ts_t *ts; + + DHD_TRACE(("%s: %d\n", __FUNCTION__, __LINE__)); + /* Allocate prot structure */ + if (!(ts = (dhd_ts_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_PROT, + sizeof(dhd_ts_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(ts, 0, sizeof(*ts)); + + g_dhd_ts = ts; + ts->osh = dhdp->osh; + dhdp->ts = ts; + ts->dhdp = dhdp; + + ts->correction_m.low = 1; + ts->correction_m.high = 1; + + ts->correction_b.low = 0; + ts->correction_m.high = 0; + + ts->fwts2hsts_delay = DHD_DEFAULT_TIMESYNC_TIMER_VALUE; + ts->fwts2hsts_delay_wdcount = 0; + + ts->tx_timestamps.max_idx = MAX_TS_LOG_SAMPLES_DATA; + ts->rx_timestamps.max_idx = MAX_TS_LOG_SAMPLES_DATA; + + ts->xt_id = 1; + + DHD_INFO(("allocated DHD TS\n")); + +#ifdef CONFIG_PROC_FS + if (proc_create("ts_fw_clk_dump", S_IRUSR, NULL, &proc_fops_ts_fw_clk_dump) == NULL) { + DHD_ERROR(("Failed to create /proc/ts_fw_clk_dump procfs interface\n")); + } + if (proc_create("ts_tx_dump", S_IRUSR, NULL, &proc_fops_ts_tx_dump) == NULL) { + DHD_ERROR(("Failed to create /proc/ts_tx_dump procfs interface\n")); + } + if (proc_create("ts_rx_dump", S_IRUSR, NULL, &proc_fops_ts_rx_dump) == NULL) { + DHD_ERROR(("Failed to create /proc/ts_rx_dump procfs interface\n")); + } +#endif /* CONFIG_PROC_FS */ + + return BCME_OK; + +fail: + if (dhdp->ts != NULL) { + dhd_timesync_detach(dhdp); + } + return BCME_NOMEM; +} + +static void +dhd_timesync_ts_log_dump_item(dhd_ts_log_ts_t *tsl, struct bcmstrbuf *b) +{ + uint32 i = 0; + + bcm_bprintf(b, "Max_idx: %d, cur_idx %d\n", tsl->max_idx, tsl->cur_idx); + for (i = 0; i < tsl->max_idx; i++) { + bcm_bprintf(b, "\t idx: %03d, (%d: %d) timestamp: 0x%08x:0x%08x " + " proto: %02d, t1: 0x%08x t2: 0x%08x\n", + i, tsl->ts_log[i].intf, tsl->ts_log[i].flowid, + tsl->ts_log[i].ts_high, tsl->ts_log[i].ts_low, + tsl->ts_log[i].proto, tsl->ts_log[i].t1, + tsl->ts_log[i].t2); + } +} + +static int +dhd_timesync_ts_log_dump(dhd_ts_t *ts, char *buf, int buflen, bool tx) +{ + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + + bcm_binit(strbuf, buf, buflen); + + if (tx) { + bcm_bprintf(strbuf, "Tx Log dump\t"); + dhd_timesync_ts_log_dump_item(&ts->tx_timestamps, strbuf); + } + else { + bcm_bprintf(strbuf, "Rx Log dump\n"); + dhd_timesync_ts_log_dump_item(&ts->rx_timestamps, strbuf); + } + return BCME_OK; +} + +static void +dhd_timesync_clear_logs(dhd_ts_t *ts) +{ + dhd_ts_log_ts_t *tsl; + + tsl = &ts->rx_timestamps; + tsl->cur_idx = 0; + memset(tsl->ts_log, 0, sizeof(dhd_ts_log_ts_item_t) * + MAX_TS_LOG_SAMPLES_DATA); + + tsl = &ts->tx_timestamps; + tsl->cur_idx = 0; + memset(tsl->ts_log, 0, sizeof(dhd_ts_log_ts_item_t) * + MAX_TS_LOG_SAMPLES_DATA); + + return; +} + +void +dhd_timesync_debug_info_print(dhd_pub_t *dhdp) +{ + dhd_ts_t *ts = dhdp->ts; + uint64 current_time; + + if (!ts) { + DHD_ERROR(("%s: %d ts is NULL\n", __FUNCTION__, __LINE__)); + return; + } + + DHD_ERROR(("\nts info dump: active_ipc_version %d\n", ts->active_ipc_version)); + current_time = OSL_LOCALTIME_NS(); + DHD_ERROR(("current_time="SEC_USEC_FMT" last_ts_watchdog_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(current_time), GET_SEC_USEC(ts->last_ts_watchdog_time))); + DHD_ERROR(("timesync disabled %d\n", ts->timesync_disabled)); + DHD_ERROR(("Host TS dump cnt %d, fw TS dump cnt %d, descrepency %d\n", + ts->host_ts_capture_cnt, ts->fw_ts_capture_cnt, ts->fw_ts_disc_cnt)); + DHD_ERROR(("ts_watchdog calls %d reset cnt %d\n", + ts->ts_watchdog_calls, ts->host_reset_cnt)); + DHD_ERROR(("xt_ids tag/ID %d/%d, %d/%d, %d/%d, %d/%d\n", + BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG, ts->xt_ids.host_timestamping_config, + BCMMSGBUF_HOST_CLOCK_SELECT_TAG, ts->xt_ids.host_clock_selection, + BCMMSGBUF_HOST_CLOCK_INFO_TAG, ts->xt_ids.host_clk_info, + BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG, ts->xt_ids.d2h_clk_correction)); + DHD_ERROR(("pending requests %d suspend req %d resume req %d\n", + ts->pending_requests, ts->suspend_req, ts->resume_req)); + +} + +static int +dhd_timesync_dump(dhd_ts_t *ts, char *buf, int buflen) +{ + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + + bcm_binit(strbuf, buf, buflen); + + bcm_bprintf(strbuf, "ts info dump: active_ipc_version %d\n", ts->active_ipc_version); + bcm_bprintf(strbuf, "timesync disabled %d\n", ts->timesync_disabled); + bcm_bprintf(strbuf, "Host TS dump cnt %d, fw TS dump cnt %d, descrepency %d\n", + ts->host_ts_capture_cnt, ts->fw_ts_capture_cnt, ts->fw_ts_disc_cnt); + bcm_bprintf(strbuf, "ts_watchdog calls %d reset cnt %d\n", + ts->ts_watchdog_calls, ts->host_reset_cnt); + bcm_bprintf(strbuf, "xt_ids tag/ID %d/%d, %d/%d, %d/%d, %d/%d\n", + BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG, ts->xt_ids.host_timestamping_config, + BCMMSGBUF_HOST_CLOCK_SELECT_TAG, ts->xt_ids.host_clock_selection, + BCMMSGBUF_HOST_CLOCK_INFO_TAG, ts->xt_ids.host_clk_info, + BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG, ts->xt_ids.d2h_clk_correction); + bcm_bprintf(strbuf, "pending requests %d suspend req %d resume req %d\n", + ts->pending_requests, ts->suspend_req, ts->resume_req); + + return BCME_OK; +} + +static int +dhd_timesync_doiovar(dhd_ts_t *ts, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, uint plen, void *arg, uint len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_TS_INFO_DUMP): + dhd_timesync_dump(ts, arg, len); + break; + case IOV_GVAL(IOV_TS_TX_TS_DUMP): + dhd_timesync_ts_log_dump(ts, arg, len, TRUE); + break; + case IOV_GVAL(IOV_TS_RX_TS_DUMP): + dhd_timesync_ts_log_dump(ts, arg, len, FALSE); + break; + case IOV_GVAL(IOV_TS_FW_CLKINFO_DUMP): + dhd_ts_fw_clksrc_dump(ts, arg, len); + break; + case IOV_SVAL(IOV_TS_SEND_TSCONFIG): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcmerror = dhd_timesync_send_host_timestamping_config(ts, FALSE); + break; + case IOV_SVAL(IOV_TS_SEND_HCLK_SEL): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcmerror = dhd_timesync_send_host_clock_selection(ts); + break; + case IOV_SVAL(IOV_TS_SEND_HCLK_INFO): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcmerror = dhd_timesync_send_host_clk_info(ts); + break; + case IOV_SVAL(IOV_TS_SEND_D2H_CRCT): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcmerror = dhd_timesync_send_D2H_clk_correction(ts); + break; + case IOV_SVAL(IOV_TS_INJECT_BAD_TAG): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcmerror = dhd_timesync_send_host_timestamping_config(ts, TRUE); + break; + case IOV_SVAL(IOV_TS_INJECT_BAD_XTID): { + uint16 old_xt_id; + + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + old_xt_id = ts->xt_id; + ts->xt_id += 10; /* will cause the error now */ + DHD_ERROR(("generating bad XTID transaction for the device exp %d, sending %d", + old_xt_id, ts->xt_id)); + bcmerror = dhd_timesync_send_host_timestamping_config(ts, FALSE); + ts->xt_id = old_xt_id; + break; + } + case IOV_GVAL(IOV_TS_FWTS2HSTS_DELAY): + bcopy(&ts->fwts2hsts_delay, arg, val_size); + break; + case IOV_SVAL(IOV_TS_FWTS2HSTS_DELAY): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + if (int_val > DHD_DEFAULT_TIMESYNC_TIMER_VALUE_MAX) { + bcmerror = BCME_RANGE; + break; + } + if (int_val <= DHD_DEFAULT_TIMESYNC_TIMER_VALUE) { + bcmerror = BCME_RANGE; + break; + } + ts->fwts2hsts_delay = int_val; + break; + case IOV_GVAL(IOV_TS_NODROP_CONFIG): + bcopy(&ts->nodrop_config, arg, val_size); + break; + case IOV_SVAL(IOV_TS_NODROP_CONFIG): + ts->nodrop_config = int_val; + break; + case IOV_GVAL(IOV_TS_NO_RETRY): + int_val = dhd_prot_pkt_noretry(ts->dhdp, 0, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TS_NO_RETRY): + dhd_prot_pkt_noretry(ts->dhdp, int_val, TRUE); + break; + case IOV_GVAL(IOV_TS_NO_AGGR): + int_val = dhd_prot_pkt_noaggr(ts->dhdp, 0, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TS_NO_AGGR): + dhd_prot_pkt_noaggr(ts->dhdp, int_val, TRUE); + break; + case IOV_GVAL(IOV_TS_FIXED_RATE): + int_val = dhd_prot_pkt_fixed_rate(ts->dhdp, 0, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TS_FIXED_RATE): + dhd_prot_pkt_fixed_rate(ts->dhdp, int_val, TRUE); + break; + case IOV_SVAL(IOV_TS_CLEAR_LOGS): + dhd_timesync_clear_logs(ts); + break; + case IOV_GVAL(IOV_TS_TXS_LOG): + int_val = dhd_prot_data_path_tx_timestamp_logging(ts->dhdp, 0, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TS_TXS_LOG): + dhd_prot_data_path_tx_timestamp_logging(ts->dhdp, int_val, TRUE); + break; + case IOV_GVAL(IOV_TS_RXS_LOG): + int_val = dhd_prot_data_path_rx_timestamp_logging(ts->dhdp, 0, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TS_RXS_LOG): + dhd_prot_data_path_rx_timestamp_logging(ts->dhdp, int_val, TRUE); + break; + case IOV_SVAL(IOV_TS_HTSCONF_PERIOD): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + ts->h_tsconf_period = int_val; + break; + case IOV_GVAL(IOV_TS_HTSCONF_PERIOD): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcopy(&ts->h_tsconf_period, arg, val_size); + break; + case IOV_SVAL(IOV_TS_HCLK_CLKID_MAX): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + ts->h_clkid_max = int_val; + break; + case IOV_GVAL(IOV_TS_HCLK_CLKID_MAX): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcopy(&ts->h_clkid_max, arg, val_size); + break; + + case IOV_SVAL(IOV_TS_HCLK_CLKID_MIN): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + ts->h_clkid_min = int_val; + break; + case IOV_GVAL(IOV_TS_HCLK_CLKID_MIN): + if (ts->active_ipc_version < 7) { + bcmerror = BCME_ERROR; + break; + } + bcopy(&ts->h_clkid_min, arg, val_size); + break; + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror)); + return bcmerror; +} + +int +dhd_timesync_iovar_op(dhd_ts_t *ts, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int bcmerror = 0; + int val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_ts_iovars, name)) == NULL) { + DHD_TRACE(("%s: not ours\n", name)); + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + + bcmerror = dhd_timesync_doiovar(ts, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_timesync_handle_host_ts_complete(dhd_ts_t *ts, uint16 xt_id, uint16 status) +{ + if (ts == NULL) { + DHD_ERROR(("%s: called with ts null\n", __FUNCTION__)); + return; + } + DHD_INFO(("Host send TS complete, for ID %d, status %d\n", xt_id, status)); + if (xt_id == ts->xt_ids.host_clk_info) { + if (ts->host_ts_host_clk_info_buffer_in_use != TRUE) { + DHD_ERROR(("same ID as the host clock info, but buffer not in use: %d\n", + ts->xt_ids.host_clk_info)); + return; + } + ts->host_ts_host_clk_info_buffer_in_use = FALSE; + } + ts->pending_requests--; +} + +void +dhd_timesync_notify_ipc_rev(dhd_ts_t *ts, uint32 ipc_rev) +{ + if (ts != NULL) + ts->active_ipc_version = ipc_rev; +} + +static int +dhd_ts_fw_clksrc_dump(dhd_ts_t *ts, char *buf, int buflen) +{ + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + clk_ts_log_t *fw_ts_log; + uint32 i = 0, j = 0; + clksrc_ts_log_t *clk_src; + + fw_ts_log = &ts->fw_ts_log; + + bcm_binit(strbuf, buf, buflen); + + while (i <= MAX_CLKSRC_ID) { + clk_src = &fw_ts_log->ts_log[i]; + if (clk_src->inuse == FALSE) { + bcm_bprintf(strbuf, "clkID %d: not in use\n", i); + } + else { + bcm_bprintf(strbuf, "clkID %d: name %s Max idx %d, cur_idx %d\n", + i, clk_src->name, MAX_FW_TS_LOG_SAMPLES, fw_ts_log->cur_idx); + j = 0; + while (j < MAX_FW_TS_LOG_SAMPLES) { + bcm_bprintf(strbuf, "%03d: %03d: 0x%08x-0x%08x\n", j, + fw_ts_log->seqnum[j], clk_src->log[j].ts_high, + clk_src->log[j].ts_low); + j++; + } + } + i++; + } + return b.size; +} + +static void +dhd_ts_fw_clksrc_log(dhd_ts_t *ts, uchar *tlvs, uint32 tlv_len, uint32 seqnum) +{ + ts_fw_clock_info_t *fw_clock_info; + clksrc_ts_log_t *clk_src; + clk_ts_log_t *fw_ts_log; + + fw_ts_log = &ts->fw_ts_log; + + fw_ts_log->seqnum[fw_ts_log->cur_idx] = seqnum; + while (tlv_len) { + fw_clock_info = (ts_fw_clock_info_t *)tlvs; + clk_src = &fw_ts_log->ts_log[(fw_clock_info->ts.ts_high >> 28) & 0xF]; + if (clk_src->inuse == FALSE) { + bcopy(fw_clock_info->clk_src, clk_src->name, sizeof(clk_src->name)); + clk_src->inuse = TRUE; + } + clk_src->log[fw_ts_log->cur_idx].ts_low = fw_clock_info->ts.ts_low; + clk_src->log[fw_ts_log->cur_idx].ts_high = fw_clock_info->ts.ts_high; + + tlvs += sizeof(*fw_clock_info); + tlv_len -= sizeof(*fw_clock_info); + } + fw_ts_log->cur_idx++; + if (fw_ts_log->cur_idx >= MAX_FW_TS_LOG_SAMPLES) + fw_ts_log->cur_idx = 0; +} + +void +dhd_timesync_handle_fw_timestamp(dhd_ts_t *ts, uchar *tlvs, uint32 tlv_len, uint32 seqnum) +{ + ts_fw_clock_info_t *fw_clock_info; + uint16 tag_id; + + DHD_INFO(("FW sent timestamp message, tlv_len %d, seqnum %d\n", tlv_len, seqnum)); + bcm_print_bytes("fw ts", tlvs, tlv_len); + /* we are expecting only one TLV type from the firmware side */ + /* BCMMSGBUF_FW_CLOCK_INFO_TAG */ + /* Validate the tag ID */ + if (ts == NULL) { + DHD_ERROR(("%s: NULL TS \n", __FUNCTION__)); + return; + } + if (tlvs == NULL) { + DHD_ERROR(("%s: NULL TLV \n", __FUNCTION__)); + return; + } + if (tlv_len < BCM_XTLV_HDR_SIZE) { + DHD_ERROR(("%s: bad length %d\n", __FUNCTION__, tlv_len)); + return; + } + if (tlv_len > MAX_SIZE_FW_CLKINFO_TYPE) { + DHD_ERROR(("tlv_len %d more than what is supported in Host %d\n", tlv_len, + (uint32)MAX_SIZE_FW_CLKINFO_TYPE)); + return; + } + if (tlv_len % (sizeof(*fw_clock_info))) { + DHD_ERROR(("bad tlv_len for the packet %d, needs to be multiple of %d\n", tlv_len, + (uint32)(sizeof(*fw_clock_info)))); + return; + } + + /* validate the tag for all the include tag IDs */ + { + uint32 check_len = 0; + uchar *tag_ptr = (uchar *)(tlvs); + while (check_len < tlv_len) { + bcopy(tag_ptr+check_len, &tag_id, sizeof(uint16)); + DHD_INFO(("FWTS: tag_id %d, offset %d \n", + tag_id, check_len)); + if (tag_id != BCMMSGBUF_FW_CLOCK_INFO_TAG) { + DHD_ERROR(("Fatal: invalid tag from FW in TS: %d, offset %d \n", + tag_id, check_len)); + return; + } + check_len += sizeof(*fw_clock_info); + } + } + + if (seqnum != (ts->fw_ts_capture_cnt + 1)) { + DHD_ERROR(("FW TS descrepency: out of sequence exp %d, got %d, resyncing %d\n", + ts->fw_ts_capture_cnt + 1, seqnum, seqnum)); + ts->fw_ts_disc_cnt++; + } + ts->fw_ts_capture_cnt = seqnum; + + /* copy it into local info */ + bcopy(tlvs, &ts->fw_tlv[0], tlv_len); + ts->fw_tlv_len = tlv_len; + + dhd_ts_fw_clksrc_log(ts, tlvs, tlv_len, seqnum); + /* launch the watchdog to send the host time stamp as per the delay programmed */ + if (ts->fwts2hsts_delay_wdcount != 0) { + DHD_ERROR(("FATAL: Last Host sync is not sent out yet\n")); + return; + } + if (dhd_watchdog_ms == 0) { + DHD_ERROR(("FATAL: WATCHDOG is set to 0, timesync can't work properly \n")); + return; + } + /* schedule sending host time sync values to device */ + ts->fwts2hsts_delay_wdcount = ts->fwts2hsts_delay / dhd_watchdog_ms; + if (ts->fwts2hsts_delay_wdcount == 0) + ts->fwts2hsts_delay_wdcount = 1; +} + +static uint32 +dhd_timesync_send_host_timestamping_config(dhd_ts_t *ts, bool inject_err) +{ + ts_host_timestamping_config_t ts_config; + int ret_val; + + if (ts->timesync_disabled) { + DHD_ERROR(("Timesync Disabled: Cannot send HOST TS config msg\n")); + return BCME_ERROR; + } + bzero(&ts_config, sizeof(ts_config)); + + ts_config.xtlv.id = BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG; + if (inject_err) + ts_config.xtlv.id = BCMMSGBUF_HOST_TS_BADTAG; + + ts_config.xtlv.len = sizeof(ts_config) - sizeof(_bcm_xtlv_t); + ts_config.period_ms = ts->h_tsconf_period; + + if (ts_config.period_ms) { + ts_config.flags |= FLAG_HOST_RESET; + ts_config.reset_cnt = ts->host_reset_cnt + 1; + } + + if (ts->nodrop_config) { + ts_config.flags |= FLAG_CONFIG_NODROP; + ts_config.post_delay = TS_NODROP_CONFIG_TO; + } else { + ts_config.post_delay = TS_DROP_CONFIG_TO; + } + + DHD_ERROR(("sending Host Timestamping Config: TLV (ID %d, LEN %d), period %d, seq %d\n", + ts_config.xtlv.id, ts_config.xtlv.len, ts_config.period_ms, + ts->host_ts_capture_cnt)); + ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)&ts_config, sizeof(ts_config), + ts->host_ts_capture_cnt, ts->xt_id); + if (ret_val != 0) { + DHD_ERROR(("Fatal: Error sending HOST TS config msg to device: %d\n", ret_val)); + return BCME_ERROR; + } + + if (ts_config.period_ms) { + ts->host_reset_cnt++; + } + + ts->pending_requests++; + ts->xt_ids.host_timestamping_config = ts->xt_id; + ts->xt_id++; + return BCME_OK; +} + +static uint32 +dhd_timesync_send_host_clock_selection(dhd_ts_t *ts) +{ + ts_host_clock_sel_t ts_clk_sel; + int ret_val; + + if (ts->timesync_disabled) { + DHD_ERROR(("Timesync Disabled: Cannot send HOST clock sel msg\n")); + return BCME_ERROR; + } + + bzero(&ts_clk_sel, sizeof(ts_clk_sel)); + + ts_clk_sel.xtlv.id = BCMMSGBUF_HOST_CLOCK_SELECT_TAG; + ts_clk_sel.xtlv.len = sizeof(ts_clk_sel) - sizeof(_bcm_xtlv_t); + ts_clk_sel.min_clk_idx = ts->h_clkid_min; + ts_clk_sel.max_clk_idx = ts->h_clkid_max; + DHD_INFO(("sending Host ClockSel Config: TLV (ID %d, LEN %d), min %d, max %d, seq %d\n", + ts_clk_sel.xtlv.id, ts_clk_sel.xtlv.len, ts_clk_sel.min_clk_idx, + ts_clk_sel.max_clk_idx, + ts->host_ts_capture_cnt)); + ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)&ts_clk_sel, sizeof(ts_clk_sel), + ts->host_ts_capture_cnt, ts->xt_id); + if (ret_val != 0) { + DHD_ERROR(("Fatal: Error sending HOST ClockSel msg to device: %d\n", ret_val)); + return BCME_ERROR; + } + ts->xt_ids.host_clock_selection = ts->xt_id; + ts->xt_id++; + ts->pending_requests++; + return BCME_OK; +} + +static uint32 +dhd_timesync_send_host_clk_info(dhd_ts_t *ts) +{ + ts_host_clock_info_t *host_clock_info; + uchar *clk_info_buffer; + uint32 clk_info_bufsize; + int ret_val; + + if (ts->timesync_disabled) { + DHD_ERROR(("Timesync Disabled: Cannot send HOST clock config msg\n")); + return BCME_ERROR; + } + if (ts->host_ts_host_clk_info_buffer_in_use == TRUE) { + DHD_ERROR(("Host Ts Clock info buffer in Use\n")); + return BCME_ERROR; + } + clk_info_buffer = &ts->host_ts_host_clk_info_buffer[0]; + clk_info_bufsize = sizeof(ts->host_ts_host_clk_info_buffer); + + DHD_INFO(("clk_info_buf size %d, tlv_len %d, host clk_info_len %d\n", + clk_info_bufsize, ts->fw_tlv_len, (uint32)sizeof(*host_clock_info))); + + if (clk_info_bufsize < sizeof(*host_clock_info)) { + DHD_ERROR(("clock_info_buf_size too small to fit host clock info %d, %d\n", + clk_info_bufsize, (uint32)sizeof(*host_clock_info))); + return BCME_ERROR; + } + + host_clock_info = (ts_host_clock_info_t *)clk_info_buffer; + host_clock_info->xtlv.id = BCMMSGBUF_HOST_CLOCK_INFO_TAG; + host_clock_info->xtlv.len = sizeof(*host_clock_info) - sizeof(_bcm_xtlv_t); + /* OSL_GET_CYCLES */ + host_clock_info->ticks.low = 0; + host_clock_info->ticks.high = 0; + /* OSL_SYS_UPTIME?? */ + host_clock_info->ns.low = 0; + host_clock_info->ns.high = 0; + clk_info_buffer += (sizeof(*host_clock_info)); + clk_info_bufsize -= sizeof(*host_clock_info); + + /* copy the device clk config as that is the reference for this */ + if (clk_info_bufsize < ts->fw_tlv_len) { + DHD_ERROR(("clock info buffer is small to fit dev clk info %d, %d\n", + clk_info_bufsize, ts->fw_tlv_len)); + return BCME_ERROR; + } + bcopy(ts->fw_tlv, clk_info_buffer, ts->fw_tlv_len); + clk_info_bufsize -= ts->fw_tlv_len; + + DHD_INFO(("sending Host TS msg Len %d, xt_id %d, host_ts_capture_count %d\n", + (uint32)(sizeof(ts->host_ts_host_clk_info_buffer) - clk_info_bufsize), + ts->xt_id, ts->host_ts_capture_cnt)); + + bcm_print_bytes("host ts", (uchar *)ts->host_ts_host_clk_info_buffer, + sizeof(ts->host_ts_host_clk_info_buffer) - clk_info_bufsize); + + ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)ts->host_ts_host_clk_info_buffer, + sizeof(ts->host_ts_host_clk_info_buffer) - clk_info_bufsize, + ts->host_ts_capture_cnt, ts->xt_id); + if (ret_val != 0) { + DHD_ERROR(("Fatal: Error sending HOST ClockSel msg to device: %d\n", ret_val)); + return BCME_ERROR; + } + ts->host_ts_host_clk_info_buffer_in_use = TRUE; + ts->xt_ids.host_clk_info = ts->xt_id; + ts->xt_id++; + ts->pending_requests++; + return BCME_OK; +} + +static uint32 +dhd_timesync_send_D2H_clk_correction(dhd_ts_t *ts) +{ + ts_d2h_clock_correction_t ts_clk_crtion; + int ret_val; + + if (ts->timesync_disabled) { + DHD_ERROR(("Timesync Disabled: Cannot send d2h clock correction msg\n")); + return BCME_ERROR; + } + + bzero(&ts_clk_crtion, sizeof(ts_clk_crtion)); + + /* XXX: should this be sending for all the clock sources */ + + ts_clk_crtion.xtlv.id = BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG; + ts_clk_crtion.xtlv.len = sizeof(ts_clk_crtion) - sizeof(_bcm_xtlv_t); + ts_clk_crtion.clk_id = ts->h_clkid_max; + ts_clk_crtion.m.low = ts->correction_m.low; + ts_clk_crtion.m.high = ts->correction_m.high; + ts_clk_crtion.b.low = ts->correction_b.low; + ts_clk_crtion.b.high = ts->correction_b.high; + + DHD_INFO(("sending D2H Correction: ID %d, LEN %d, clkid %d, m %d:%d, b %d:%d, seq %d\n", + ts_clk_crtion.xtlv.id, ts_clk_crtion.xtlv.len, ts_clk_crtion.clk_id, + ts_clk_crtion.m.high, + ts_clk_crtion.m.low, + ts_clk_crtion.b.high, + ts_clk_crtion.b.low, + ts->host_ts_capture_cnt)); + + ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)&ts_clk_crtion, + sizeof(ts_clk_crtion), ts->host_ts_capture_cnt, ts->xt_id); + if (ret_val != 0) { + DHD_ERROR(("Fatal: Error sending HOST ClockSel msg to device: %d\n", ret_val)); + return BCME_ERROR; + } + ts->xt_ids.d2h_clk_correction = ts->xt_id; + ts->xt_id++; + ts->pending_requests++; + return BCME_OK; +} + +bool +dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp) +{ + return (dhdp->ts->fwts2hsts_delay != 0); +} + +bool +dhd_timesync_watchdog(dhd_pub_t *dhdp) +{ + dhd_ts_t *ts = dhdp->ts; + + if (ts == NULL) + return FALSE; + + ts->last_ts_watchdog_time = OSL_LOCALTIME_NS(); + ts->ts_watchdog_calls++; + + /* XXX: this is relying the watchdog to be running..which may not hold good */ + if (ts->fwts2hsts_delay_wdcount) { + ts->fwts2hsts_delay_wdcount--; + if (ts->fwts2hsts_delay != 0 && dhdp->busstate == DHD_BUS_DATA && + (ts->fwts2hsts_delay_wdcount == 0)) { + /* see if we need to send the host clock info */ + dhd_timesync_send_host_clk_info(ts); + dhd_msgbuf_delay_post_ts_bufs(dhdp); + } + } + return FALSE; +} + +static void +dhd_timesync_log_timestamp_item(dhd_ts_log_ts_t *tsl, uint16 flowid, uint8 intf, + uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *pkt) +{ + tsl->ts_log[tsl->cur_idx].ts_low = ts_low; + tsl->ts_log[tsl->cur_idx].ts_high = ts_high; + tsl->ts_log[tsl->cur_idx].intf = intf; + tsl->ts_log[tsl->cur_idx].proto = pkt->proto; + tsl->ts_log[tsl->cur_idx].t1 = pkt->t1; + tsl->ts_log[tsl->cur_idx].t2 = pkt->t2; + tsl->cur_idx++; + if (tsl->cur_idx == tsl->max_idx) + tsl->cur_idx = 0; +} + +void +dhd_timesync_log_tx_timestamp(dhd_ts_t *ts, uint16 flowid, uint8 intf, + uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *pkt) +{ + if (ts != NULL) { + dhd_timesync_log_timestamp_item(&ts->tx_timestamps, flowid, intf, + ts_low, ts_high, pkt); + } +} + +void +dhd_timesync_log_rx_timestamp(dhd_ts_t *ts, uint8 intf, uint32 ts_low, uint32 ts_high, + dhd_pkt_parse_t *pkt) +{ + if (ts != NULL) { + dhd_timesync_log_timestamp_item(&ts->rx_timestamps, 0, intf, + ts_low, ts_high, pkt); + } +} + +void +dhd_timesync_control(dhd_pub_t *dhdp, bool disabled) +{ + dhd_ts_t *ts; + if (dhdp == NULL) + return; + + ts = dhdp->ts; + if (ts != NULL) { + if (disabled) { + DHD_ERROR(("resetting the timesync counter, current(%d)\n", + ts->fw_ts_capture_cnt)); + + ts->fw_ts_capture_cnt = 0; + + /* Suspend case: Disable timesync after the config message */ + if ((ts->active_ipc_version >= 7) && (ts->h_tsconf_period != 0)) { + uint32 tsconf_period; + + tsconf_period = ts->h_tsconf_period; + ts->h_tsconf_period = 0; + + dhd_timesync_send_host_timestamping_config(ts, FALSE); + ts->h_tsconf_period = tsconf_period; + } + ts->timesync_disabled = TRUE; + ts->suspend_req++; + } else { + /* Resume case: Enable timesync before the config message */ + DHD_ERROR(("enabling the timesync counter, current(%d)\n", + ts->fw_ts_capture_cnt)); + + ts->timesync_disabled = FALSE; + ts->resume_req++; + + if ((ts->active_ipc_version >= 7) && (ts->h_tsconf_period != 0)) + dhd_timesync_send_host_timestamping_config(ts, FALSE); + } + } + /* XXX: may be all the other internal iovar calls should check for disabled state */ +} diff --git a/bcmdhd.101.10.361.x/dhd_timesync.h b/bcmdhd.101.10.361.x/dhd_timesync.h new file mode 100755 index 0000000..0e3afb3 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_timesync.h @@ -0,0 +1,68 @@ +/* + * Header file describing the common timesync functionality + * + * Provides type definitions and function prototypes used to handle timesync functionality. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$: + */ + +#ifndef _dhd_timesync_h_ +#define _dhd_timesync_h_ + +typedef struct dhd_ts dhd_ts_t; + +/* Linkage, sets "ts" link and updates hdrlen in pub */ +extern int dhd_timesync_attach(dhd_pub_t *dhdp); + +/* Linkage, sets "ts" link and updates hdrlen in pub */ +extern bool dhd_timesync_watchdog(dhd_pub_t *dhdp); + +/* Unlink, frees allocated timesync memory (including dhd_ts_t) */ +extern int dhd_timesync_detach(dhd_pub_t *dhdp); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_timesync_iovar_op(dhd_ts_t *ts, const char *name, void *params, int plen, + void *arg, int len, bool set); + +/* handle host time stamp completion */ +extern void dhd_timesync_handle_host_ts_complete(dhd_ts_t *ts, uint16 xt_id, uint16 status); + +/* handle fw time stamp event from firmware */ +extern void dhd_timesync_handle_fw_timestamp(dhd_ts_t *ts, uchar *tlv, uint32 tlv_len, + uint32 seqnum); + +/* get notified of the ipc rev */ +extern void dhd_timesync_notify_ipc_rev(dhd_ts_t *ts, uint32 ipc_rev); + +/* log txs timestamps */ +extern void dhd_timesync_log_tx_timestamp(dhd_ts_t *ts, uint16 flowid, uint8 intf, + uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *parse); + +/* log rx cpl timestamps */ +extern void dhd_timesync_log_rx_timestamp(dhd_ts_t *ts, uint8 intf, + uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *parse); + +/* dynamically disabling it based on the host driver suspend/resume state */ +extern void dhd_timesync_control(dhd_pub_t *dhdp, bool disabled); + +extern void dhd_timesync_debug_info_print(dhd_pub_t *dhdp); +#endif /* _dhd_timesync_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_wet.c b/bcmdhd.101.10.361.x/dhd_wet.c new file mode 100755 index 0000000..4537aef --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_wet.c @@ -0,0 +1,1187 @@ +/** + * @file + * @brief + * Wireless EThernet (WET) Bridge. + * + * WET STA and WET client are inter-exchangable in this file and refer to + * addressable entities whose traffic are sent and received through this + * bridge, including the hosting device. + * + * Supported protocol families: IP v4. + * + * Tx: replace frames' source MAC address with wireless interface's; + * update the IP-MAC address mapping table entry. + * + * Rx: replace frames' the destination MAC address with what found in + * the IP-MAC address mapping table. + * + * All data structures defined in this file are optimized for IP v4. To + * support other protocol families, write protocol specific handlers. + * Doing so may require data structures changes to expand various address + * storages to fit the protocol specific needs, for example, IPX needs 10 + * octets for its network address. Also one may need to define the data + * structures in a more generic way so that they work with all supported + * protocol families, for example, the wet_sta strcture may be defined + * as follow: + * + * struct wet_sta { + * uint8 nal; network address length + * uint8 na[NETA_MAX_LEN]; network address + * uint8 mac[ETHER_ADDR_LEN]; + * ... + * }; + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** + * @file + * @brief + * XXX Twiki: [WirelessEthernet] + */ +#include +#include +#include +#include +#include +#include +#include <802.11.h> +#include +#include +#include <802.3.h> +#include +#include +#include +#include +#include +#include +#include + +#include + +/* IP/MAC address mapping entry */ +typedef struct wet_sta wet_sta_t; +struct wet_sta { + /* client */ + uint8 ip[IPV4_ADDR_LEN]; /* client IP addr */ + struct ether_addr mac; /* client MAC addr */ + uint8 flags[DHCP_FLAGS_LEN]; /* orig. dhcp flags */ + /* internal */ + wet_sta_t *next; /* free STA link */ + wet_sta_t *next_ip; /* hash link by IP */ + wet_sta_t *next_mac; /* hash link by MAC */ +}; +#define WET_NUMSTAS (1 << 8) /* max. # clients, must be multiple of 2 */ +#define WET_STA_HASH_SIZE (WET_NUMSTAS/2) /* must be <= WET_NUMSTAS */ +#define WET_STA_HASH_IP(ip) ((ip)[3]&(WET_STA_HASH_SIZE-1)) /* hash by IP */ +#define WET_STA_HASH_MAC(ea) (((ea)[3]^(ea)[4]^(ea)[5])&(WET_STA_HASH_SIZE-1)) /* hash by MAC */ +#define WET_STA_HASH_UNK -1 /* Unknown hash */ +#define IP_ISMULTI(ip) (((ip) & 0xf0000000) == 0xe0000000) /* Check for multicast by IP */ + +/* WET private info structure */ +struct dhd_wet_info { + /* pointer to dhdpublic info struct */ + dhd_pub_t *pub; + /* Host addresses */ + uint8 ip[IPV4_ADDR_LEN]; + struct ether_addr mac; + /* STA storage, one entry per eth. client */ + wet_sta_t sta[WET_NUMSTAS]; + /* Free sta list */ + wet_sta_t *stafree; + /* Used sta hash by IP */ + wet_sta_t *stahash_ip[WET_STA_HASH_SIZE]; + /* Used sta hash by MAC */ + wet_sta_t *stahash_mac[WET_STA_HASH_SIZE]; +}; + +/* forward declarations */ +static int wet_eth_proc(dhd_wet_info_t *weth, void *sdu, + uint8 *frame, int length, int send); +static int wet_vtag_proc(dhd_wet_info_t *weth, void *sdu, + uint8 * eh, uint8 *vtag, int length, int send); +static int wet_ip_proc(dhd_wet_info_t *weth, void *sdu, + uint8 * eh, uint8 *iph, int length, int send); +static int wet_arp_proc(dhd_wet_info_t *weth, void *sdu, + uint8 *eh, uint8 *arph, int length, int send); +static int wet_udp_proc(dhd_wet_info_t *weth, + uint8 *eh, uint8 *iph, uint8 *udph, int length, int send); +static int wet_dhcpc_proc(dhd_wet_info_t *weth, + uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send); +static int wet_dhcps_proc(dhd_wet_info_t *weth, + uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send); +static int wet_sta_alloc(dhd_wet_info_t *weth, wet_sta_t **saddr); +static int wet_sta_update_all(dhd_wet_info_t *weth, + uint8 *iaddr, struct ether_addr *eaddr, wet_sta_t **saddr); +static int wet_sta_update_mac(dhd_wet_info_t *weth, + struct ether_addr *eaddr, wet_sta_t **saddr); +static int wet_sta_remove_mac_entry(dhd_wet_info_t *weth, struct ether_addr *eaddr); +static int wet_sta_find_ip(dhd_wet_info_t *weth, + uint8 *iaddr, wet_sta_t **saddr); +static int wet_sta_find_mac(dhd_wet_info_t *weth, + struct ether_addr *eaddr, wet_sta_t **saddr); +static void csum_fixup_16(uint8 *chksum, + uint8 *optr, int olen, uint8 *nptr, int nlen); + +/* + * Protocol handler. 'ph' points to protocol specific header, + * for example, it points to IP header if it is IP packet. + */ +typedef int (*prot_proc_t)(dhd_wet_info_t *weth, void *sdu, uint8 *eh, + uint8 *ph, int length, int send); +/* Protocol handlers hash table - hash by ether type */ +typedef struct prot_hdlr prot_hdlr_t; +struct prot_hdlr { + uint16 type; /* ether type */ + prot_proc_t prot_proc; + prot_hdlr_t *next; /* next proto handler that has the same hash */ +}; +#define WET_PROT_HASH_SIZE (1 << 3) +#define WET_PROT_HASH(t) ((t)[1]&(WET_PROT_HASH_SIZE-1)) +static prot_hdlr_t ept_tbl[] = { + /* 0 */ {HTON16(ETHER_TYPE_8021Q), wet_vtag_proc, NULL}, /* 0x8100 */ +}; +static prot_hdlr_t prot_hash[WET_PROT_HASH_SIZE] = { + /* 0 */ {HTON16(ETHER_TYPE_IP), wet_ip_proc, &ept_tbl[0]}, /* 0x0800 */ + /* 1 */ {0, NULL, NULL}, /* unused */ + /* 2 */ {0, NULL, NULL}, /* unused */ + /* 3 */ {0, NULL, NULL}, /* unused */ + /* 4 */ {0, NULL, NULL}, /* unused */ + /* 5 */ {0, NULL, NULL}, /* unused */ + /* 6 */ {HTON16(ETHER_TYPE_ARP), wet_arp_proc, NULL}, /* 0x0806 */ + /* 7 */ {0, NULL, NULL}, /* unused */ +}; + +/* + * IPv4 handler. 'ph' points to protocol specific header, + * for example, it points to UDP header if it is UDP packet. + */ +typedef int (*ipv4_proc_t)(dhd_wet_info_t *weth, uint8 *eh, + uint8 *iph, uint8 *ph, int length, int send); +/* IPv4 handlers hash table - hash by protocol type */ +typedef struct ipv4_hdlr ipv4_hdlr_t; +struct ipv4_hdlr { + uint8 type; /* protocol type */ + ipv4_proc_t ipv4_proc; + ipv4_hdlr_t *next; /* next proto handler that has the same hash */ +}; +#define WET_IPV4_HASH_SIZE (1 << 1) +#define WET_IPV4_HASH(p) ((p)&(WET_IPV4_HASH_SIZE-1)) +static ipv4_hdlr_t ipv4_hash[WET_IPV4_HASH_SIZE] = { + /* 0 */ {0, NULL, NULL}, /* unused */ + /* 1 */ {IP_PROT_UDP, wet_udp_proc, NULL}, /* 0x11 */ +}; + +/* + * UDP handler. 'ph' points to protocol specific header, + * for example, it points to DHCP header if it is DHCP packet. + */ +typedef int (*udp_proc_t)(dhd_wet_info_t *weth, uint8 *eh, + uint8 *iph, uint8 *udph, uint8 *ph, int length, int send); +/* UDP handlers hash table - hash by port number */ +typedef struct udp_hdlr udp_hdlr_t; +struct udp_hdlr { + uint16 port; /* udp dest. port */ + udp_proc_t udp_proc; + udp_hdlr_t *next; /* next proto handler that has the same hash */ +}; +#define WET_UDP_HASH_SIZE (1 << 3) +#define WET_UDP_HASH(p) ((p)[1]&(WET_UDP_HASH_SIZE-1)) +static udp_hdlr_t udp_hash[WET_UDP_HASH_SIZE] = { + /* 0 */ {0, NULL, NULL}, /* unused */ + /* 1 */ {0, NULL, NULL}, /* unused */ + /* 2 */ {0, NULL, NULL}, /* unused */ + /* 3 */ {HTON16(DHCP_PORT_SERVER), wet_dhcpc_proc, NULL}, /* 0x43 */ + /* 4 */ {HTON16(DHCP_PORT_CLIENT), wet_dhcps_proc, NULL}, /* 0x44 */ + /* 5 */ {0, NULL, NULL}, /* unused */ + /* 6 */ {0, NULL, NULL}, /* unused */ + /* 7 */ {0, NULL, NULL}, /* unused */ +}; + +#define WETHWADDR(weth) ((weth)->pub->mac.octet) +#define WETOSH(weth) ((weth)->pub->osh) + +/* special values */ +/* 802.3 llc/snap header */ +static uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; +static uint8 ipv4_bcast[IPV4_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff}; /* IP v4 broadcast address */ +static uint8 ipv4_null[IPV4_ADDR_LEN] = {0x00, 0x00, 0x00, 0x00}; /* IP v4 NULL address */ + +dhd_wet_info_t * +dhd_get_wet_info(dhd_pub_t *pub) +{ + dhd_wet_info_t *p; + int i; + p = (dhd_wet_info_t *)MALLOCZ(pub->osh, sizeof(dhd_wet_info_t)); + if (p == NULL) { + return 0; + } + for (i = 0; i < WET_NUMSTAS - 1; i ++) + p->sta[i].next = &p->sta[i + 1]; + p->stafree = &p->sta[0]; + p->pub = pub; + return p; +} + +void +dhd_free_wet_info(dhd_pub_t *pub, void *wet) +{ + if (wet) { + MFREE(pub->osh, wet, sizeof(dhd_wet_info_t)); + } +} + +void dhd_set_wet_host_ipv4(dhd_pub_t *pub, void *parms, uint32 len) +{ + dhd_wet_info_t *p; + p = (dhd_wet_info_t *)pub->wet_info; + bcopy(parms, p->ip, len); +} + +void dhd_set_wet_host_mac(dhd_pub_t *pub, void *parms, uint32 len) +{ + dhd_wet_info_t *p; + p = (dhd_wet_info_t *)pub->wet_info; + bcopy(parms, &p->mac, len); +} +/* process Ethernet frame */ +/* +* Return: +* = 0 if frame is done ok +* < 0 if unable to handle the frame +* > 0 if no further process +*/ +static int +BCMFASTPATH(wet_eth_proc)(dhd_wet_info_t *weth, void *sdu, uint8 *frame, int length, int send) +{ + uint8 *pt = frame + ETHER_TYPE_OFFSET; + uint16 type; + uint8 *ph; + prot_hdlr_t *phdlr; + /* intercept Ethernet II frame (type > 1500) */ + if (length >= ETHER_HDR_LEN && (pt[0] > (ETHER_MAX_DATA >> 8) || + (pt[0] == (ETHER_MAX_DATA >> 8) && pt[1] > (ETHER_MAX_DATA & 0xff)))) + ; + /* intercept 802.3 LLC/SNAP frame (type <= 1500) */ + else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN) { + uint8 *llc = frame + ETHER_HDR_LEN; + if (bcmp(llc_snap_hdr, llc, SNAP_HDR_LEN)) + return 0; + pt = llc + SNAP_HDR_LEN; + } + /* frame too short bail out */ + else { + DHD_ERROR(("wet_eth_proc: %s short eth frame, ignored\n", + send ? "send" : "recv")); + return -1; + } + ph = pt + ETHER_TYPE_LEN; + length -= ph - frame; + + /* Call protocol specific handler to process frame. */ + type = *(uint16 *)pt; + + for (phdlr = &prot_hash[WET_PROT_HASH(pt)]; + phdlr != NULL; phdlr = phdlr->next) { + if (phdlr->type != type || !phdlr->prot_proc) + continue; + return (phdlr->prot_proc)(weth, sdu, frame, ph, length, send); + } + + if (!bcmp(WETHWADDR(weth), frame + ETHER_SRC_OFFSET, ETHER_ADDR_LEN)) { + return 0; + } + else { + DHD_INFO(("%s: %s unknown type (0x%X), ignored %s\n", + __FUNCTION__, send ? "send" : "recv", type, + (type == 0xDD86) ? "IPv6":"")); + /* ignore unsupported protocol from different mac addr than us */ + return BCME_UNSUPPORTED; + } +} + +/* process 8021p/Q tagged frame */ +/* +* Return: +* = 0 if frame is done ok +* < 0 if unable to handle the frame +* > 0 if no further process +*/ +static int +BCMFASTPATH(wet_vtag_proc)(dhd_wet_info_t *weth, void *sdu, + uint8 * eh, uint8 *vtag, int length, int send) +{ + uint16 type; + uint8 *pt; + prot_hdlr_t *phdlr; + + /* check minimum length */ + if (length < ETHERVLAN_HDR_LEN) { + DHD_ERROR(("wet_vtag_proc: %s short VLAN frame, ignored\n", + send ? "send" : "recv")); + return -1; + } + + /* + * FIXME: check recursiveness to prevent stack from overflow + * in case someone sent frames 8100xxxxxxxx8100xxxxxxxx... + */ + + /* Call protocol specific handler to process frame. */ + type = *(uint16 *)(pt = vtag + VLAN_TAG_LEN); + + for (phdlr = &prot_hash[WET_PROT_HASH(pt)]; + phdlr != NULL; phdlr = phdlr->next) { + if (phdlr->type != type || !phdlr->prot_proc) + continue; + return (phdlr->prot_proc)(weth, sdu, eh, + pt + ETHER_TYPE_LEN, length, send); + } + + return 0; +} + +/* process IP frame */ +/* +* Return: +* = 0 if frame is done ok +* < 0 if unable to handle the frame +* > 0 if no further process +*/ +static int +BCMFASTPATH(wet_ip_proc)(dhd_wet_info_t *weth, void *sdu, + uint8 *eh, uint8 *iph, int length, int send) +{ + uint8 type; + int ihl; + wet_sta_t *sta; + ipv4_hdlr_t *iphdlr; + uint8 *iaddr; + struct ether_addr *ea = NULL; + int ret, ea_off = 0; + char eabuf[ETHER_ADDR_STR_LEN]; + BCM_REFERENCE(eabuf); + + /* IPv4 only */ + if (length < 1 || (IP_VER(iph) != IP_VER_4)) { + DHD_INFO(("wet_ip_proc: %s non IPv4 frame, ignored\n", + send ? "send" : "recv")); + return -1; + } + + ihl = IPV4_HLEN(iph); + + /* minimum length */ + if (length < ihl) { + DHD_ERROR(("wet_ip_proc: %s short IPv4 frame, ignored\n", + send ? "send" : "recv")); + return -1; + } + + /* protocol specific handling */ + type = IPV4_PROT(iph); + for (iphdlr = &ipv4_hash[WET_IPV4_HASH(type)]; + iphdlr; iphdlr = iphdlr->next) { + if (iphdlr->type != type || !iphdlr->ipv4_proc) + continue; + if ((ret = (iphdlr->ipv4_proc)(weth, eh, + iph, iph + ihl, length - ihl, send))) + return ret; + } + + /* generic IP packet handling + * Replace source MAC in Ethernet header with wireless's and + * keep track of IP MAC mapping when sending frame. + */ + if (send) { + uint32 iaddr_dest, iaddr_src; + bool wet_table_upd = TRUE; + iaddr = iph + IPV4_SRC_IP_OFFSET; + iaddr_dest = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + iaddr_src = ntoh32(*(uint32 *)(iaddr)); + + /* Do not process and update knowledge base on receipt of a local IP + * multicast frame + */ + if (IP_ISMULTI(iaddr_dest) && !iaddr_src) { + DHD_INFO(("recv multicast frame from %s.Don't update hash table\n", + bcm_ether_ntoa((struct ether_addr*) + (eh + ETHER_SRC_OFFSET), eabuf))); + wet_table_upd = FALSE; + } + if (wet_table_upd && wet_sta_update_all(weth, iaddr, + (struct ether_addr*)(eh + ETHER_SRC_OFFSET), &sta) < 0) { + DHD_INFO(("wet_ip_proc: unable to update STA %u.%u.%u.%u %s\n", + iaddr[0], iaddr[1], iaddr[2], iaddr[3], + bcm_ether_ntoa( + (struct ether_addr*)(eh + ETHER_SRC_OFFSET), eabuf))); + return -1; + } + ea = (struct ether_addr *)WETHWADDR(weth); + ea_off = ETHER_SRC_OFFSET; + eacopy(ea, eh + ea_off); + } + /* + * Replace dest MAC in Ethernet header using the found one + * when receiving frame. + */ + /* no action for received bcast/mcast ethernet frame */ + else if (!ETHER_ISMULTI(eh)) { + iaddr = iph + IPV4_DEST_IP_OFFSET; + if (wet_sta_find_ip(weth, iaddr, &sta) < 0) { + DHD_ERROR(("wet_ip_proc: unable to find STA %u.%u.%u.%u\n", + iaddr[0], iaddr[1], iaddr[2], iaddr[3])); + return -1; + } + ea = &sta->mac; + ea_off = ETHER_DEST_OFFSET; + eacopy(ea, eh + ea_off); + } + + return 0; +} + +/* process ARP frame - ARP proxy */ +/* + * Return: + * = 0 if frame is done ok + * < 0 if unable to handle the frame + * > 0 if no further process + */ +static int +BCMFASTPATH(wet_arp_proc)(dhd_wet_info_t *weth, void *sdu, + uint8 *eh, uint8 *arph, int length, int send) +{ + wet_sta_t *sta; + uint8 *iaddr; + char eabuf[ETHER_ADDR_STR_LEN]; + BCM_REFERENCE(eabuf); + + /* + * FIXME: validate ARP header: + * h/w Ethernet 2, proto IP x800, h/w addr size 6, proto addr size 4. + */ + + /* + * Replace source MAC in Ethernet header as well as source MAC in + * ARP protocol header when processing frame sent. + */ + if (send) { + iaddr = arph + ARP_SRC_IP_OFFSET; + if (wet_sta_update_all(weth, iaddr, + (struct ether_addr*)(eh + ETHER_SRC_OFFSET), &sta) < 0) { + DHD_INFO(("wet_arp_proc: unable to update STA %u.%u.%u.%u %s\n", + iaddr[0], iaddr[1], iaddr[2], iaddr[3], + bcm_ether_ntoa( + (struct ether_addr*)(eh + ETHER_SRC_OFFSET), eabuf))); + return -1; + } + bcopy(WETHWADDR(weth), eh + ETHER_SRC_OFFSET, ETHER_ADDR_LEN); + bcopy(WETHWADDR(weth), arph+ARP_SRC_ETH_OFFSET, ETHER_ADDR_LEN); + } + /* + * Replace dest MAC in Ethernet header as well as dest MAC in + * ARP protocol header when processing frame recv'd. Process ARP + * replies and Unicast ARP requests. + */ + else if ((*(uint16 *)(arph + ARP_OPC_OFFSET) == HTON16(ARP_OPC_REPLY)) || + ((*(uint16 *)(arph + ARP_OPC_OFFSET) == HTON16(ARP_OPC_REQUEST)) && + (!ETHER_ISMULTI(eh)))) { + iaddr = arph + ARP_TGT_IP_OFFSET; + if (wet_sta_find_ip(weth, iaddr, &sta) < 0) { + DHD_INFO(("wet_arp_proc: unable to find STA %u.%u.%u.%u\n", + iaddr[0], iaddr[1], iaddr[2], iaddr[3])); + return -1; + } + bcopy(&sta->mac, arph + ARP_TGT_ETH_OFFSET, ETHER_ADDR_LEN); + bcopy(&sta->mac, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + + return 0; +} + +/* process UDP frame */ +/* + * Return: + * = 0 if frame is done ok + * < 0 if unable to handle the frame + * > 0 if no further process + */ +static int +BCMFASTPATH(wet_udp_proc)(dhd_wet_info_t *weth, + uint8 *eh, uint8 *iph, uint8 *udph, int length, int send) +{ + udp_hdlr_t *udphdlr; + uint16 port; + + /* check frame length, at least UDP_HDR_LEN */ + if ((length -= UDP_HDR_LEN) < 0) { + DHD_ERROR(("wet_udp_proc: %s short UDP frame, ignored\n", + send ? "send" : "recv")); + return -1; + } + + /* + * Unfortunately we must spend some time here to deal with + * some higher layer protocol special processings. + * See individual handlers for protocol specific details. + */ + port = *(uint16 *)(udph + UDP_DEST_PORT_OFFSET); + for (udphdlr = &udp_hash[WET_UDP_HASH((uint8 *)&port)]; + udphdlr; udphdlr = udphdlr->next) { + if (udphdlr->port != port || !udphdlr->udp_proc) + continue; + return (udphdlr->udp_proc)(weth, eh, iph, udph, + udph + UDP_HDR_LEN, length, send); + } + + return 0; +} + +/* + * DHCP is a 'complex' protocol for WET, mainly because it + * uses its protocol body to convey IP/MAC info. It is impossible + * to forward frames correctly back and forth without looking + * into the DHCP's body and interpreting it. See RFC2131 sect. + * 4.1 'Constructing and sending DHCP messages' for details + * of using/parsing various fields in the body. + * + * DHCP pass through: + * + * Must alter DHCP flag to broadcast so that the server + * can reply with the broadcast address before we can + * provide DHCP relay functionality. Otherwise the DHCP + * server will send DHCP replies using the DHCP client's + * MAC address. Such replies will not be delivered simply + * because: + * + * 1. The AP's bridge will not forward the replies back to + * this device through the wireless link because it does not + * know such node exists on this link. The bridge's forwarding + * table on the AP will have this device's MAC address only. + * It does not know anything else behind this device. + * + * 2. The AP's wireless driver won't allow such frames out + * either even if they made their way out the AP's bridge + * through the bridge's DLF broadcasting because there is + * no such STA associated with the AP. + * + * 3. This device's MAC won't allow such frames pass + * through in non-promiscuous mode even when they made + * their way out of the AP's wireless interface somehow. + * + * DHCP relay: + * + * Once the WET is configured with the host MAC address it can + * relay the host request as if it were sent from WET itself. + * + * Once the WET is configured with the host IP address it can + * pretend to be the host and act as a relay agent. + * + * process DHCP client frame (client to server, or server to relay agent) + * Return: + * = 0 if frame is done ok + * < 0 if unable to handle the frame + * > 0 if no further process + */ +static int +BCMFASTPATH(wet_dhcpc_proc)(dhd_wet_info_t *weth, + uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send) +{ + wet_sta_t *sta; + uint16 flags; + char eabuf[ETHER_ADDR_STR_LEN]; + uint16 port; + uint8 *ipv4; + const struct ether_addr *ether; + BCM_REFERENCE(eabuf); + + /* + * FIXME: validate DHCP body: + * htype Ethernet 1, hlen Ethernet 6, frame length at least 242. + */ + + /* only interested in requests when sending to server */ + if (send && *(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REQUEST) + return 0; + /* only interested in replies when receiving from server as a relay agent */ + if (!send && *(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY) + return 0; + + /* send request */ + if (send) { + /* find existing or alloc new IP/MAC mapping entry */ + if (wet_sta_update_mac(weth, + (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), &sta) < 0) { + DHD_INFO(("wet_dhcpc_proc: unable to update STA %s\n", + bcm_ether_ntoa( + (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf))); + return -1; + } + bcopy(dhcp + DHCP_FLAGS_OFFSET, &flags, DHCP_FLAGS_LEN); + /* We can always relay the host's request when we know its MAC addr. */ + if (!ETHER_ISNULLADDR(weth->mac.octet) && + !bcmp(dhcp + DHCP_CHADDR_OFFSET, &weth->mac, ETHER_ADDR_LEN)) { + /* replace chaddr with host's MAC */ + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN, + WETHWADDR(weth), ETHER_ADDR_LEN); + bcopy(WETHWADDR(weth), dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN); + /* force reply to be unicast */ + flags &= ~HTON16(DHCP_FLAG_BCAST); + } + /* We can relay other clients' requests when we know the host's IP addr. */ + else if (!IPV4_ADDR_NULL(weth->ip)) { + /* we can only handle the first hop otherwise drop it */ + if (!IPV4_ADDR_NULL(dhcp + DHCP_GIADDR_OFFSET)) { + DHD_INFO(("wet_dhcpc_proc: not first hop, ignored\n")); + return -1; + } + /* replace giaddr with host's IP */ + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN, + weth->ip, IPV4_ADDR_LEN); + bcopy(weth->ip, dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN); + /* force reply to be unicast */ + flags &= ~HTON16(DHCP_FLAG_BCAST); + } + /* + * Request comes in when we don't know the host's MAC and/or IP + * addresses hence we can't relay the request. We must notify the + * server of our addressing limitation by turning on the broadcast + * bit at this point as what the function comments point out. + */ + else + flags |= HTON16(DHCP_FLAG_BCAST); + /* update flags */ + bcopy(dhcp + DHCP_FLAGS_OFFSET, sta->flags, DHCP_FLAGS_LEN); + if (flags != *(uint16 *)sta->flags) { + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN, + (uint8 *)&flags, DHCP_FLAGS_LEN); + bcopy((uint8 *)&flags, dhcp + DHCP_FLAGS_OFFSET, + DHCP_FLAGS_LEN); + } + /* replace the Ethernet source MAC with ours */ + bcopy(WETHWADDR(weth), eh + ETHER_SRC_OFFSET, ETHER_ADDR_LEN); + } + /* relay recv'd reply to its destiny */ + else if (!IPV4_ADDR_NULL(weth->ip) && + !bcmp(dhcp + DHCP_GIADDR_OFFSET, weth->ip, IPV4_ADDR_LEN)) { + /* find IP/MAC mapping entry */ + if (wet_sta_find_mac(weth, + (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), &sta) < 0) { + DHD_INFO(("wet_dhcpc_proc: unable to find STA %s\n", + bcm_ether_ntoa( + (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf))); + return -1; + } + /* + * XXX the following code works for the first hop only + */ + /* restore the DHCP giaddr with its original */ + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN, + ipv4_null, IPV4_ADDR_LEN); + bcopy(ipv4_null, dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN); + /* restore the original client's dhcp flags */ + if (bcmp(dhcp + DHCP_FLAGS_OFFSET, sta->flags, DHCP_FLAGS_LEN)) { + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN, + sta->flags, DHCP_FLAGS_LEN); + bcopy(sta->flags, dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN); + } + /* replace the dest UDP port with DHCP client port */ + port = HTON16(DHCP_PORT_CLIENT); + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + udph + UDP_DEST_PORT_OFFSET, UDP_PORT_LEN, + (uint8 *)&port, UDP_PORT_LEN); + bcopy((uint8 *)&port, udph + UDP_DEST_PORT_OFFSET, UDP_PORT_LEN); + /* replace the dest MAC & IP addr with the client's */ + if (*(uint16 *)sta->flags & HTON16(DHCP_FLAG_BCAST)) { + ipv4 = ipv4_bcast; + ether = ðer_bcast; + } + else { + ipv4 = dhcp + DHCP_YIADDR_OFFSET; + ether = &sta->mac; + } + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + iph + IPV4_DEST_IP_OFFSET, IPV4_ADDR_LEN, + ipv4, IPV4_ADDR_LEN); + csum_fixup_16(iph + IPV4_CHKSUM_OFFSET, + iph + IPV4_DEST_IP_OFFSET, IPV4_ADDR_LEN, + ipv4, IPV4_ADDR_LEN); + bcopy(ipv4, iph + IPV4_DEST_IP_OFFSET, IPV4_ADDR_LEN); + bcopy(ether, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + /* it should not recv non-relay reply at all, but just in case */ + else { + DHD_INFO(("wet_dhcpc_proc: ignore recv'd frame from %s\n", + bcm_ether_ntoa((struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf))); + return -1; + } + + /* no further processing! */ + return 1; +} + +/* process DHCP server frame (server to client) */ +/* + * Return: + * = 0 if frame is done ok + * < 0 if unable to handle the frame + * > 0 if no further process + */ +static int +BCMFASTPATH(wet_dhcps_proc)(dhd_wet_info_t *weth, + uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send) +{ + wet_sta_t *sta; + char eabuf[ETHER_ADDR_STR_LEN]; + BCM_REFERENCE(eabuf); + + /* + * FIXME: validate DHCP body: + * htype Ethernet 1, hlen Ethernet 6, frame length at least 242. + */ + + /* only interested in replies when receiving from server */ + if (send || *(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY) + return 0; + + /* find IP/MAC mapping entry */ + if (wet_sta_find_mac(weth, (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), &sta) < 0) { + DHD_INFO(("wet_dhcps_proc: unable to find STA %s\n", + bcm_ether_ntoa((struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf))); + return -1; + } + /* relay the reply to the host when we know the host's MAC addr */ + if (!ETHER_ISNULLADDR(weth->mac.octet) && + !bcmp(dhcp + DHCP_CHADDR_OFFSET, WETHWADDR(weth), ETHER_ADDR_LEN)) { + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN, + weth->mac.octet, ETHER_ADDR_LEN); + bcopy(&weth->mac, dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN); + } + /* restore the original client's dhcp flags if necessary */ + if (bcmp(dhcp + DHCP_FLAGS_OFFSET, sta->flags, DHCP_FLAGS_LEN)) { + csum_fixup_16(udph + UDP_CHKSUM_OFFSET, + dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN, + sta->flags, DHCP_FLAGS_LEN); + bcopy(sta->flags, dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN); + } + /* replace the dest MAC with that of client's */ + if (*(uint16 *)sta->flags & HTON16(DHCP_FLAG_BCAST)) + bcopy((const uint8 *)ðer_bcast, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + else + bcopy(&sta->mac, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + + /* no further processing! */ + return 1; +} + +/* alloc IP/MAC mapping entry + * Returns 0 if succeeded; < 0 otherwise. + */ +static int +wet_sta_alloc(dhd_wet_info_t *weth, wet_sta_t **saddr) +{ + wet_sta_t *sta; + + /* allocate a new one */ + if (!weth->stafree) { + DHD_INFO(("wet_sta_alloc: no room for another STA\n")); + return -1; + } + sta = weth->stafree; + weth->stafree = sta->next; + + /* init them just in case */ + sta->next = NULL; + sta->next_ip = NULL; + sta->next_mac = NULL; + + *saddr = sta; + return 0; +} + +/* update IP/MAC mapping entry and hash + * Returns 0 if succeeded; < 0 otherwise. + */ +static int +BCMFASTPATH(wet_sta_update_all)(dhd_wet_info_t *weth, uint8 *iaddr, struct ether_addr *eaddr, + wet_sta_t **saddr) +{ + wet_sta_t *sta; + int i; + char eabuf[ETHER_ADDR_STR_LEN]; + BCM_REFERENCE(eabuf); + + /* find the existing one and remove it from the old IP hash link */ + if (!wet_sta_find_mac(weth, eaddr, &sta)) { + i = WET_STA_HASH_IP(sta->ip); + if (bcmp(sta->ip, iaddr, IPV4_ADDR_LEN)) { + wet_sta_t *sta2, **next; + for (next = &weth->stahash_ip[i], sta2 = *next; + sta2; sta2 = sta2->next_ip) { + if (sta2 == sta) + break; + next = &sta2->next_ip; + } + if (sta2) { + *next = sta2->next_ip; + sta2->next_ip = NULL; + } + i = WET_STA_HASH_UNK; + } + } + /* allocate a new one and hash it by MAC */ + else if (!wet_sta_alloc(weth, &sta)) { + i = WET_STA_HASH_MAC(eaddr->octet); + bcopy(eaddr, &sta->mac, ETHER_ADDR_LEN); + sta->next_mac = weth->stahash_mac[i]; + weth->stahash_mac[i] = sta; + i = WET_STA_HASH_UNK; + } + /* bail out if we can't find nor create any */ + else { + DHD_INFO(("wet_sta_update_all: unable to alloc STA %u.%u.%u.%u %s\n", + iaddr[0], iaddr[1], iaddr[2], iaddr[3], + bcm_ether_ntoa(eaddr, eabuf))); + return -1; + } + + /* update IP and hash by new IP */ + if (i == WET_STA_HASH_UNK) { + i = WET_STA_HASH_IP(iaddr); + bcopy(iaddr, sta->ip, IPV4_ADDR_LEN); + sta->next_ip = weth->stahash_ip[i]; + weth->stahash_ip[i] = sta; + + /* start here and look for other entries with same IP address */ + { + wet_sta_t *sta2, *prev; + prev = sta; + for (sta2 = sta->next_ip; sta2; sta2 = sta2->next_ip) { + /* does this entry have the same IP address? */ + if (!bcmp(sta->ip, sta2->ip, IPV4_ADDR_LEN)) { + /* sta2 currently points to the entry we need to remove */ + /* fix next pointers */ + prev->next_ip = sta2->next_ip; + sta2->next_ip = NULL; + /* now we need to find this guy in the MAC list and + remove it from that list too. + */ + wet_sta_remove_mac_entry(weth, &sta2->mac); + /* entry should be completely out of the table now, + add it to the free list + */ + memset(sta2, 0, sizeof(wet_sta_t)); + sta2->next = weth->stafree; + weth->stafree = sta2; + + sta2 = prev; + } + prev = sta2; + } + } + } + + *saddr = sta; + return 0; +} + +/* update IP/MAC mapping entry and hash */ +static int +BCMFASTPATH(wet_sta_update_mac)(dhd_wet_info_t *weth, struct ether_addr *eaddr, wet_sta_t **saddr) +{ + wet_sta_t *sta; + int i; + char eabuf[ETHER_ADDR_STR_LEN]; + BCM_REFERENCE(eabuf); + + /* find the existing one */ + if (!wet_sta_find_mac(weth, eaddr, &sta)) + ; + /* allocate a new one and hash it */ + else if (!wet_sta_alloc(weth, &sta)) { + i = WET_STA_HASH_MAC(eaddr->octet); + bcopy(eaddr, &sta->mac, ETHER_ADDR_LEN); + sta->next_mac = weth->stahash_mac[i]; + weth->stahash_mac[i] = sta; + } + /* bail out if we can't find nor create any */ + else { + DHD_INFO(("wet_sta_update_mac: unable to alloc STA %s\n", + bcm_ether_ntoa(eaddr, eabuf))); + return -1; + } + + *saddr = sta; + return 0; +} + +/* Remove MAC entry from hash list + * NOTE: This only removes the entry matching "eaddr" from the MAC + * list. The caller needs to remove from the IP list and + * put back onto the free list to completely remove the entry + * from the WET table. + */ +static int +BCMFASTPATH(wet_sta_remove_mac_entry)(dhd_wet_info_t *weth, struct ether_addr *eaddr) +{ + wet_sta_t *sta, *prev; + int i = WET_STA_HASH_MAC(eaddr->octet); + char eabuf[ETHER_ADDR_STR_LEN]; + int found = 0; + BCM_REFERENCE(eabuf); + + /* find the existing one */ + for (sta = prev = weth->stahash_mac[i]; sta; sta = sta->next_mac) { + if (!bcmp(&sta->mac, eaddr, ETHER_ADDR_LEN)) { + found = 1; + break; + } + prev = sta; + } + + /* bail out if we can't find */ + if (!found) { + DHD_INFO(("wet_sta_remove_mac_entry: unable to find STA %s entry\n", + bcm_ether_ntoa(eaddr, eabuf))); + return -1; + } + + /* fix the list */ + if (prev == sta) + weth->stahash_mac[i] = sta->next_mac; /* removing first entry in this bucket */ + else + prev->next_mac = sta->next_mac; + + return 0; +} + +/* find IP/MAC mapping entry by IP address + * Returns 0 if succeeded; < 0 otherwise. + */ +static int +BCMFASTPATH(wet_sta_find_ip)(dhd_wet_info_t *weth, uint8 *iaddr, wet_sta_t **saddr) +{ + int i = WET_STA_HASH_IP(iaddr); + wet_sta_t *sta; + + /* find the existing one by IP */ + for (sta = weth->stahash_ip[i]; sta; sta = sta->next_ip) { + if (bcmp(sta->ip, iaddr, IPV4_ADDR_LEN)) + continue; + *saddr = sta; + return 0; + } + + /* sta has not been learned */ + DHD_INFO(("wet_sta_find_ip: unable to find STA %u.%u.%u.%u\n", + iaddr[0], iaddr[1], iaddr[2], iaddr[3])); + return -1; +} + +/* find IP/MAC mapping entry by MAC address + * Returns 0 if succeeded; < 0 otherwise. + */ +static int +BCMFASTPATH(wet_sta_find_mac)(dhd_wet_info_t *weth, struct ether_addr *eaddr, wet_sta_t **saddr) +{ + int i = WET_STA_HASH_MAC(eaddr->octet); + wet_sta_t *sta; + char eabuf[ETHER_ADDR_STR_LEN]; + BCM_REFERENCE(eabuf); + + /* find the existing one by MAC */ + for (sta = weth->stahash_mac[i]; sta; sta = sta->next_mac) { + if (bcmp(&sta->mac, eaddr, ETHER_ADDR_LEN)) + continue; + *saddr = sta; + return 0; + } + + /* sta has not been learnt */ + DHD_INFO(("wet_sta_find_mac: unable to find STA %s\n", + bcm_ether_ntoa(eaddr, eabuf))); + return -1; +} + +/* Adjust 16 bit checksum - taken from RFC 3022. + * + * The algorithm below is applicable only for even offsets (i.e., optr + * below must be at an even offset from start of header) and even lengths + * (i.e., olen and nlen below must be even). + */ +static void +BCMFASTPATH(csum_fixup_16)(uint8 *chksum, uint8 *optr, int olen, uint8 *nptr, int nlen) +{ + long x, old, new; + + ASSERT(!((uintptr_t)optr&1) && !(olen&1)); + ASSERT(!((uintptr_t)nptr&1) && !(nlen&1)); + + x = (chksum[0]<< 8)+chksum[1]; + if (!x) + return; + x = ~x & 0xFFFF; + while (olen) + { + old = (optr[0]<< 8)+optr[1]; optr += 2; + x -= old & 0xffff; + if (x <= 0) { x--; x &= 0xffff; } + olen -= 2; + } + while (nlen) + { + new = (nptr[0]<< 8)+nptr[1]; nptr += 2; + x += new & 0xffff; + if (x & 0x10000) { x++; x &= 0xffff; } + nlen -= 2; + } + x = ~x & 0xFFFF; + chksum[0] = (uint8)(x >> 8); chksum[1] = (uint8)x; +} + +/* Process frames in transmit direction by replacing source MAC with + * wireless's and keep track of IP MAC address mapping table. + * Return: + * = 0 if frame is done ok; + * < 0 if unable to handle the frame; + * + * To avoid other interfaces to see our changes specially + * changes to broadcast frame which definitely will be seen by + * other bridged interfaces we must copy the frame to our own + * buffer, modify it, and then sent it. + * Return the new sdu in 'new'. + */ +int +BCMFASTPATH(dhd_wet_send_proc)(void *wet, void *sdu, void **new) +{ + dhd_wet_info_t *weth = (dhd_wet_info_t *)wet; + uint8 *frame = PKTDATA(WETOSH(weth), sdu); + int length = PKTLEN(WETOSH(weth), sdu); + void *pkt = sdu; + + /* + * FIXME: need to tell if buffer is shared and only + * do copy on shared buffer. + */ + /* + * copy broadcast/multicast frame to our own packet + * otherwise we will screw up others because we alter + * the frame content. + */ + if (length < ETHER_HDR_LEN) { + DHD_ERROR(("dhd_wet_send_proc: unable to process short frame\n")); + return -1; + } + if (ETHER_ISMULTI(frame)) { + length = pkttotlen(WETOSH(weth), sdu); + if (!(pkt = PKTGET(WETOSH(weth), length, TRUE))) { + DHD_ERROR(("dhd_wet_send_proc: unable to alloc, dropped\n")); + return -1; + } + frame = PKTDATA(WETOSH(weth), pkt); + pktcopy(WETOSH(weth), sdu, 0, length, frame); + /* Transfer priority */ + PKTSETPRIO(pkt, PKTPRIO(sdu)); + PKTFREE(WETOSH(weth), sdu, TRUE); + PKTSETLEN(WETOSH(weth), pkt, length); + } + *new = pkt; + + /* process frame */ + return wet_eth_proc(weth, sdu, frame, length, 1) < 0 ? -1 : 0; +} + +/* + * Process frames in receive direction by replacing destination MAC with + * the one found in IP MAC address mapping table. + * Return: + * = 0 if frame is done ok; + * < 0 if unable to handle the frame; + */ +int +BCMFASTPATH(dhd_wet_recv_proc)(void *wet, void *sdu) +{ + dhd_wet_info_t *weth = (dhd_wet_info_t *)wet; + /* process frame */ + return wet_eth_proc(weth, sdu, PKTDATA(WETOSH(weth), sdu), + PKTLEN(WETOSH(weth), sdu), 0) < 0 ? -1 : 0; +} + +/* Delete WET Database */ +void +dhd_wet_sta_delete_list(dhd_pub_t *dhd_pub) +{ + wet_sta_t *sta; + int i, j; + dhd_wet_info_t *weth = dhd_pub->wet_info; + + for (i = 0; i < WET_STA_HASH_SIZE; i ++) { + for (sta = weth->stahash_mac[i]; sta; sta = sta->next_mac) { + wet_sta_t *sta2, **next; + j = WET_STA_HASH_IP(sta->ip); + for (next = &weth->stahash_ip[j], sta2 = *next; + sta2; sta2 = sta2->next_ip) { + if (sta2 == sta) + break; + next = &sta2->next_ip; + } + if (sta2) { + *next = sta2->next_ip; + sta2->next_ip = NULL; + } + j = WET_STA_HASH_UNK; + + wet_sta_remove_mac_entry(weth, &sta->mac); + memset(sta, 0, sizeof(wet_sta_t)); + } + } +} +void +dhd_wet_dump(dhd_pub_t *dhdp, struct bcmstrbuf *b) +{ + char eabuf[ETHER_ADDR_STR_LEN]; + wet_sta_t *sta; + int i; + dhd_wet_info_t *weth = dhdp->wet_info; + + bcm_bprintf(b, "Host MAC: %s\n", bcm_ether_ntoa(&weth->mac, eabuf)); + bcm_bprintf(b, "Host IP: %u.%u.%u.%u\n", + weth->ip[0], weth->ip[1], weth->ip[2], weth->ip[3]); + bcm_bprintf(b, "Entry\tEnetAddr\t\tInetAddr\n"); + for (i = 0; i < WET_NUMSTAS; i ++) { + /* FIXME: it leaves the last sta entry unfiltered, who cares! */ + if (weth->sta[i].next) + continue; + /* format the entry dump */ + sta = &weth->sta[i]; + bcm_bprintf(b, "%u\t%s\t%u.%u.%u.%u\n", + i, bcm_ether_ntoa(&sta->mac, eabuf), + sta->ip[0], sta->ip[1], sta->ip[2], sta->ip[3]); + } +} diff --git a/bcmdhd.101.10.361.x/dhd_wet.h b/bcmdhd.101.10.361.x/dhd_wet.h new file mode 100755 index 0000000..21b8429 --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_wet.h @@ -0,0 +1,60 @@ +/* + * Wireless Ethernet (WET) interface + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +/** XXX Twiki: [WirelessEthernet] */ + +#ifndef _dhd_wet_h_ +#define _dhd_wet_h_ + +#include +#include +#include + +#define DHD_WET_ENAB 1 +#define WET_ENABLED(dhdp) ((dhdp)->info->wet_mode == DHD_WET_ENAB) + +/* forward declaration */ +typedef struct dhd_wet_info dhd_wet_info_t; + +extern dhd_wet_info_t *dhd_get_wet_info(dhd_pub_t *pub); +extern void dhd_free_wet_info(dhd_pub_t *pub, void *wet); + +/* Process frames in transmit direction */ +extern int dhd_wet_send_proc(void *weth, void *sdu, void **new); +extern void dhd_set_wet_host_ipv4(dhd_pub_t *pub, void *parms, uint32 len); +extern void dhd_set_wet_host_mac(dhd_pub_t *pub, void *parms, uint32 len); +/* Process frames in receive direction */ +extern int dhd_wet_recv_proc(void *weth, void *sdu); +extern void dhd_wet_sta_delete_list(dhd_pub_t *dhd_pub); + +#ifdef PLC_WET +extern void dhd_wet_bssid_upd(dhd_wet_info_t *weth, dhd_bsscfg_t *cfg); +#endif /* PLC_WET */ + +int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val); +int dhd_get_wet_mode(dhd_pub_t *dhdp); +extern void dhd_wet_dump(dhd_pub_t *dhdp, struct bcmstrbuf *b); + +#endif /* _dhd_wet_h_ */ diff --git a/bcmdhd.101.10.361.x/dhd_wlfc.c b/bcmdhd.101.10.361.x/dhd_wlfc.c new file mode 100755 index 0000000..11283aa --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_wlfc.c @@ -0,0 +1,4988 @@ +/* + * DHD PROP_TXSTATUS Module. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + */ + +/** XXX Twiki [PropTxStatus] */ + +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include +#include + +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ +#include +#include +#endif + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* + * wlfc naming and lock rules: + * + * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation. + * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed. + * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation. + * + */ + +#if defined (DHD_WLFC_THREAD) +#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */ +#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */ +#endif /* defined (DHD_WLFC_THREAD) */ + +#ifdef PROP_TXSTATUS + +#ifdef QMONITOR +#define DHD_WLFC_QMON_COMPLETE(entry) dhd_qmon_txcomplete(&entry->qmon) +#else +#define DHD_WLFC_QMON_COMPLETE(entry) +#endif /* QMONITOR */ + +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + +/** for 'out of order' debug */ +static void +_dhd_wlfc_bprint(athost_wl_status_info_t* wlfc, const char *fmt, ...) +{ + va_list ap; + int r, size; + uint8 *buf; + bool bRetry = FALSE; + + if (!wlfc || !wlfc->log_buf) { + return; + } + + va_start(ap, fmt); + +retry: + buf = wlfc->log_buf + wlfc->log_buf_offset; + size = WLFC_LOG_BUF_SIZE -1 - wlfc->log_buf_offset; + + r = vsnprintf(buf, size, fmt, ap); + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + /* r == 0 is also the case when strlen(fmt) is zero. + * typically the case when "" is passed as argument. + */ + if ((r == -1) || (r >= size)) { + bRetry = TRUE; + } else { + wlfc->log_buf_offset += r; + } + + if ((wlfc->log_buf_offset >= (WLFC_LOG_BUF_SIZE -1)) || bRetry) { + wlfc->log_buf[wlfc->log_buf_offset] = 0; + wlfc->log_buf_offset = 0; + if (!wlfc->log_buf_full) { + wlfc->log_buf_full = TRUE; + } + + if (bRetry) { + bRetry = FALSE; + goto retry; + } + } + + va_end(ap); + + return; +} /* _dhd_wlfc_bprint */ + +/** for 'out of order' debug */ +static void _dhd_wlfc_print_1k_buf(uint8* buf, int size) +{ + /* print last 1024 bytes */ + if (size > 1024) { + buf += (size - 1024); + } + printf("%s", buf); +} + +/** for 'out of order' debug */ +static void +_dhd_wlfc_print_log(athost_wl_status_info_t* wlfc) +{ + if (!wlfc || !wlfc->log_buf) { + return; + } + + printf("%s: log_buf_full(%d), log_buf_offset(%d)\n", + __FUNCTION__, wlfc->log_buf_full, wlfc->log_buf_offset); + if (wlfc->log_buf_full) { + _dhd_wlfc_print_1k_buf(wlfc->log_buf + wlfc->log_buf_offset, + WLFC_LOG_BUF_SIZE - wlfc->log_buf_offset); + } + wlfc->log_buf[wlfc->log_buf_offset] = 0; + _dhd_wlfc_print_1k_buf(wlfc->log_buf, wlfc->log_buf_offset); + printf("\n%s: done\n", __FUNCTION__); + + wlfc->log_buf_offset = 0; + wlfc->log_buf_full = FALSE; +} + +/** for 'out of order' debug */ +static void +_dhd_wlfc_check_send_order(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry, void* p) +{ + uint8 seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + + if ((entry->last_send_gen[prec] == gen) && + ((uint8)(entry->last_send_seq[prec] + 1) > seq)) { + printf("%s: prec(%d), last(%u), p(%u)\n", + __FUNCTION__, prec, entry->last_send_seq[prec], seq); + _dhd_wlfc_print_log(wlfc); + } + + entry->last_send_seq[prec] = seq; + entry->last_send_gen[prec] = gen; +} + +/** for 'out of order' debug */ +static void +_dhd_wlfc_check_complete_order(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry, void* p) +{ + uint8 seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + + entry->last_complete_seq[prec] = seq; +} + +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + +/** reordering related */ + +#if defined(DHD_WLFC_THREAD) +static void +_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp) +{ +#if defined(LINUX) + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); +#endif /* LINUX */ +} +#endif /* DHD_WLFC_THREAD */ + +static uint16 +_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq) +{ + uint16 seq; + + if (!p) { + return 0xffff; + } + + seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + if (seq < current_seq) { + /* wrap around */ + seq += 256; + } + + return seq; +} + +/** + * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder + * suppressed packets. + * @param[in] pq caller supplied packet queue to enqueue the packet on + * @param[in] prec precedence of the to-be-queued packet + * @param[in] p transmit packet to enqueue + * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order. + * @param[in] current_seq + * @param[in] reOrder reOrder on odd precedence (=suppress queue) + */ +static void +_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, + uint8 current_seq, bool reOrder) +{ + struct pktq_prec *q; + uint16 seq, seq2; + void *p2, *p2_prev; + + if (!p) + return; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) { + /* empty queue */ + q->head = p; + q->tail = p; + } else { + if (reOrder && (prec & 1)) { + seq = _dhd_wlfc_adjusted_seq(p, current_seq); + p2 = qHead ? q->head : q->tail; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) { + /* need reorder */ + p2 = q->head; + p2_prev = NULL; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + while (seq > seq2) { + p2_prev = p2; + p2 = PKTLINK(p2); + if (!p2) { + break; + } + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + } + + if (p2_prev == NULL) { + /* insert head */ + PKTSETLINK(p, q->head); + q->head = p; + } else if (p2 == NULL) { + /* insert tail */ + PKTSETLINK(p2_prev, p); + q->tail = p; + } else { + /* insert after p2_prev */ + PKTSETLINK(p, PKTLINK(p2_prev)); + PKTSETLINK(p2_prev, p); + } + goto exit; + } + } + + if (qHead) { + PKTSETLINK(p, q->head); + q->head = p; + } else { + PKTSETLINK(q->tail, p); + q->tail = p; + } + } + +exit: + + q->n_pkts++; + pq->n_pkts_tot++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; +} /* _dhd_wlfc_prec_enque */ + +/** + * Create a place to store all packet pointers submitted to the firmware until a status comes back, + * suppress or otherwise. + * + * hang-er: noun, a contrivance on which things are hung, as a hook. + */ +/** @deprecated soon */ +static void* +_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items) +{ + int i; + wlfc_hanger_t* hanger; + + /* allow only up to a specific size for now */ + ASSERT(max_items == WLFC_HANGER_MAXITEMS); + + if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER, + WLFC_HANGER_SIZE(max_items))) == NULL) { + return NULL; + } + memset(hanger, 0, WLFC_HANGER_SIZE(max_items)); + hanger->max_items = max_items; + + for (i = 0; i < hanger->max_items; i++) { + hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + } + return hanger; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger) +{ + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items)); + return BCME_OK; + } + return BCME_BADARG; +} + +/** @deprecated soon */ +static uint16 +_dhd_wlfc_hanger_get_free_slot(void* hanger) +{ + uint32 i; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + i = h->slot_pos + 1; + if (i == h->max_items) { + i = 0; + } + while (i != h->slot_pos) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->slot_pos = i; + return (uint16)i; + } + i++; + if (i == h->max_items) + i = 0; + } + h->failed_slotfind++; + } + return WLFC_HANGER_MAXITEMS; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *gen = 0xff; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *gen = h->items[slot_id].gen; + } + else { + DHD_ERROR(("Error: %s():%d item not used\n", + __FUNCTION__, __LINE__)); + rc = BCME_NOTFOUND; + } + + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h && (slot_id < WLFC_HANGER_MAXITEMS)) { + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE; + h->items[slot_id].pkt = pkt; + h->items[slot_id].pkt_state = 0; + h->items[slot_id].pkt_txstatus = 0; + h->pushed++; + } else { + h->failed_to_push++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *pktout = NULL; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *pktout = h->items[slot_id].pkt; + if (remove_from_hanger) { + h->items[slot_id].state = + WLFC_HANGER_ITEM_STATE_FREE; + h->items[slot_id].pkt = NULL; + h->items[slot_id].gen = 0xff; + h->items[slot_id].identifier = 0; + h->popped++; + } + } else { + h->failed_to_pop++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + if (h) { + h->items[slot_id].gen = gen; + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED; + } else { + rc = BCME_BADARG; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** remove reference of specific packet in hanger */ +/** @deprecated soon */ +static bool +_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt) +{ + int i; + + if (!h || !pkt) { + return FALSE; + } + + i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt))); + + if ((i < h->max_items) && (pkt == h->items[i].pkt)) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + h->items[i].pkt = NULL; + h->items[i].gen = 0xff; + h->items[i].identifier = 0; + return TRUE; + } else { + DHD_ERROR(("Error: %s():%d item not suppressed\n", + __FUNCTION__, __LINE__)); + } + } + + return FALSE; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p) +{ + wlfc_mac_descriptor_t* entry; + uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + + if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[entry_idx]; + else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pktq_penq(&entry->afq, prec, p); + + return BCME_OK; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec, + void **pktout) +{ + wlfc_mac_descriptor_t *entry; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!ctx) { + DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout)); + return BCME_BADARG; + } + + if (pktout) { + *pktout = NULL; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->afq; + + ASSERT(prec < pq->num_prec); + + q = &pq->q[prec]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) + { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + bcm_pkt_validate_chk(p, "_dhd_wlfc_deque_afq"); + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt, + WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head))))); + ctx->stats.ooo_pkts[prec]++; + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(p, NULL); + + if (pktout) { + *pktout = p; + } + + return BCME_OK; +} /* _dhd_wlfc_deque_afq */ + +/** + * Flow control information piggy backs on packets, in the form of one or more TLVs. This function + * pushes one or more TLVs onto a packet that is going to be sent towards the dongle. + * + * @param[in] ctx + * @param[in/out] packet + * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid + * @param[in] tim_bmp + * @param[in] mac_handle + * @param[in] htodtag + * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon + * earlier between host and firmware. + * @param[in] skip_wlfc_hdr + */ +static int +_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal, + uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr) +{ + uint32 wl_pktinfo = 0; + uint8* wlh; + uint8 dataOffset = 0; + uint8 fillers; + uint8 tim_signal_len = 0; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + struct bdc_header *h; + void *p = *packet; + + if (skip_wlfc_hdr) + goto push_bdc_hdr; + + if (tim_signal) { + tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + } + + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len; + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + dataOffset += WLFC_CTL_VALUE_LEN_SEQ; + } + + fillers = ROUNDUP(dataOffset, 4) - dataOffset; + dataOffset += fillers; + + PKTPUSH(ctx->osh, p, dataOffset); + wlh = (uint8*) PKTDATA(ctx->osh, p); + + wl_pktinfo = htol32(htodtag); + + wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG; + wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG; + memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32)); + + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + uint16 wl_seqinfo = htol16(htodseq); + wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ; + memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo, + WLFC_CTL_VALUE_LEN_SEQ); + } + + if (tim_signal_len) { + wlh[dataOffset - fillers - tim_signal_len ] = + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 1] = + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle; + wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp; + } + if (fillers) + memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers); + +push_bdc_hdr: + PKTPUSH(ctx->osh, p, BDC_HEADER_LEN); + h = (struct bdc_header *)PKTDATA(ctx->osh, p); + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(p)) + h->flags |= BDC_FLAG_SUM_NEEDED; + +#ifdef EXT_STA + /* save pkt encryption exemption info for dongle */ + h->flags &= ~BDC_FLAG_EXEMPT; + h->flags |= (DHD_PKTTAG_EXEMPT(PKTTAG(p)) & BDC_FLAG_EXEMPT); +#endif /* EXT_STA */ + + h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = dataOffset >> 2; + BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p))); + *packet = p; + return BCME_OK; +} /* _dhd_wlfc_pushheader */ + +/** + * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg + * when a packet is about to be freed. + */ +static int +_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) +{ + struct bdc_header *h; + + if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf); + + /* pull BDC header */ + PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN); + + if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2))); + return BCME_ERROR; + } + + /* pull wl-header */ + PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2)); + return BCME_OK; +} + +/** + * @param[in/out] p packet + */ +static wlfc_mac_descriptor_t* +_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) +{ + int i; + wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes; + uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p)); + uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p)); + wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p)); + int iftype = ctx->destination_entries.interfaces[ifid].iftype; + + /* saved one exists, return it */ + if (entry) + return entry; + + /* Multicast destination, STA and P2P clients get the interface entry. + * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations + * have their own entry. + */ + if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) || + iftype == WLC_E_IF_ROLE_P2P_CLIENT) && + (ctx->destination_entries.interfaces[ifid].occupied)) { + entry = &ctx->destination_entries.interfaces[ifid]; + } + + if (entry && ETHER_ISMULTI(dstn)) { + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + return entry; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (table[i].occupied) { + if (table[i].interface_id == ifid) { + if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) { + entry = &table[i]; + break; + } + } + } + } + + if (entry == NULL) + entry = &ctx->destination_entries.other; + + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + + return entry; +} /* _dhd_wlfc_find_table_entry */ + +/** + * In case a packet must be dropped (because eg the queues are full), various tallies have to be + * be updated. Called from several other functions. + * @param[in] dhdp pointer to public DHD structure + * @param[in] prec precedence of the packet + * @param[in] p the packet to be dropped + * @param[in] bPktInQ TRUE if packet is part of a queue + */ +static int +_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) +{ + athost_wl_status_info_t* ctx; + void *pout = NULL; + + ASSERT(dhdp && p); + if (prec < 0 || prec >= WLFC_PSQ_PREC_COUNT) { + ASSERT(0); + return BCME_BADARG; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + /* suppressed queue, need pop from hanger */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG + (PKTTAG(p))), &pout, TRUE); + ASSERT(p == pout); + } + + if (!(prec & 1)) { +#ifdef DHDTCPACK_SUPPRESS + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE); + + /* This packet is about to be freed, so remove it from tcp_ack_info_tbl + * This must be one of... + * 1. A pkt already in delayQ is evicted by another pkt with higher precedence + * in _dhd_wlfc_prec_enq_with_drop() + * 2. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_enque_delayq(). + * 3. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_rollback_packet_toq(). + */ + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + + if (bPktInQ) { + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + } + + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--; + ctx->stats.pktout++; + ctx->stats.drop_pkts[prec]++; + + dhd_txcomplete(dhdp, p, FALSE); + PKTFREE(ctx->osh, p, TRUE); + + return 0; +} /* _dhd_wlfc_prec_drop */ + +/** + * Called when eg the host handed a new packet over to the driver, or when the dongle reported + * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit + * packet in the host driver to be (re)transmitted at a later opportunity. + * @param[in] dhdp pointer to public DHD structure + * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence + */ +static bool +_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead, + uint8 current_seq) +{ + void *p = NULL; + int eprec = -1; /* precedence to evict from */ + athost_wl_status_info_t* ctx; + + ASSERT(dhdp && pq && pkt); + ASSERT(prec >= 0 && prec < pq->num_prec); + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktqprec_full(pq, prec) && !pktq_full(pq)) { + goto exit; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktqprec_full(pq, prec)) { + eprec = prec; + } else if (pktq_full(pq)) { + p = pktq_peek_tail(pq, &eprec); + if (!p) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + if ((eprec > prec) || (eprec < 0)) { + if (!pktqprec_empty(pq, prec)) { + eprec = prec; + } else { + return FALSE; + } + } + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktqprec_empty(pq, eprec)); + /* Evict all fragmented frames */ + dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop); + } + +exit: + /* Enqueue */ + _dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq, + WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)); + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++; + ctx->pkt_cnt_per_ac[prec>>1]++; + ctx->pkt_cnt_in_psq++; + + return TRUE; +} /* _dhd_wlfc_prec_enq_with_drop */ + +/** + * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in + * the event that this 'commit' failed. + */ +static int +_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, + void* p, ewlfc_packet_state_t pkt_type, uint32 hslot) +{ + /* + * put the packet back to the head of queue + * - suppressed packet goes back to suppress sub-queue + * - pull out the header, if new or delayed packet + * + * Note: hslot is used only when header removal is done. + */ + wlfc_mac_descriptor_t* entry; + int rc = BCME_OK; + int prec, fifo_id; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + fifo_id = prec << 1; + if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) + fifo_id += 1; + if (entry != NULL) { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the firmware (for pspoll etc.) + */ + if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) + entry->requested_credit++; + + if (pkt_type == eWLFC_PKTTYPE_DELAYED) { + /* decrement sequence count */ + WLFC_DECR_SEQCOUNT(entry, prec); + /* remove header first */ + rc = _dhd_wlfc_pullheader(ctx, p); + if (rc != BCME_OK) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + goto exit; + } + } + + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE, + WLFC_SEQCOUNT(entry, fifo_id>>1)) + == FALSE) { + /* enque failed */ + DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n", + __FUNCTION__, __LINE__, fifo_id)); + rc = BCME_ERROR; + } + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + +exit: + if (rc != BCME_OK) { + ctx->stats.rollback_failed++; + _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE); + } else { + ctx->stats.rollback++; + } + + return rc; +} /* _dhd_wlfc_rollback_packet_toq */ + +/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */ +static bool +_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) +{ + int prec, ac_traffic = WLFC_NO_TRAFFIC; + + for (prec = 0; prec < AC_COUNT; prec++) { + if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) { + if (ac_traffic == WLFC_NO_TRAFFIC) + ac_traffic = prec + 1; + else if (ac_traffic != (prec + 1)) + ac_traffic = WLFC_MULTI_TRAFFIC; + } + } + + if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) { + /* single AC (BE/BK/VI/VO) in queue */ + if (ctx->allow_fc) { + return TRUE; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (ctx->fc_defer_timestamp == 0) { + /* first single ac scenario */ + ctx->fc_defer_timestamp = curr_t; + return FALSE; + } + + /* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */ + delta = curr_t - ctx->fc_defer_timestamp; + if (delta >= WLFC_FC_DEFER_PERIOD_MS) { + ctx->allow_fc = TRUE; + } + } + } else { + /* multiple ACs or BCMC in queue */ + ctx->allow_fc = FALSE; + ctx->fc_defer_timestamp = 0; + } + + return ctx->allow_fc; +} /* _dhd_wlfc_allow_fc */ + +/** + * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on + * low/high watermarks. + */ +static void +_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id) +{ + dhd_pub_t *dhdp; + + ASSERT(ctx); + + dhdp = (dhd_pub_t *)ctx->dhdp; + ASSERT(dhdp); + + if (dhdp->skip_fc && dhdp->skip_fc((void *)dhdp, if_id)) + return; + + if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id)) + return; + + if ((pq->n_pkts_tot <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) { + /* start traffic */ + ctx->hostif_flow_state[if_id] = OFF; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n", + pq->n_pkts_tot, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("F")); + + dhd_txflowcontrol(dhdp, if_id, OFF); + + ctx->toggle_host_if = 0; + } + + if (pq->n_pkts_tot >= WLFC_FLOWCONTROL_HIWATER && ctx->hostif_flow_state[if_id] == OFF) { + /* stop traffic */ + ctx->hostif_flow_state[if_id] = ON; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n", + pq->n_pkts_tot, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("N")); + + dhd_txflowcontrol(dhdp, if_id, ON); + + ctx->host_ifidx = if_id; + ctx->toggle_host_if = 1; + } + + return; +} /* _dhd_wlfc_flow_control_check */ + +/** XXX: Warning: this function directly accesses bus-transmit function */ +static int +_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 ta_bmp) +{ + int rc = BCME_OK; + void* p = NULL; + int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + if (dhdp->proptxstatus_txoff) { + rc = BCME_NORESOURCE; + return rc; + } + + /* allocate a dummy packet */ + p = PKTGET(ctx->osh, dummylen, TRUE); + if (p) { + PKTPULL(ctx->osh, p, dummylen); + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0); + _dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE); + DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1); +#ifdef PROP_TXSTATUS_DEBUG + ctx->stats.signal_only_pkts_sent++; +#endif + +#if defined(BCMPCIE) + /* XXX : RAHUL : Verify the ifidx */ + rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx); +#else + rc = dhd_bus_txdata(dhdp->bus, p); +#endif + if (rc != BCME_OK) { + _dhd_wlfc_pullheader(ctx, p); + PKTFREE(ctx->osh, p, TRUE); + } + } else { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, dummylen)); + rc = BCME_NOMEM; + dhdp->tx_pktgetfail++; + } + + return rc; +} /* _dhd_wlfc_send_signalonly_packet */ + +/** + * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration + * maintained in caller supplied parameter 'entry'. + * + * @param[in/out] entry administration about a remote MAC entity + * @param[in] prec precedence queue for this remote MAC entitity + * + * Return value: TRUE if traffic availability changed + */ +static bool +_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + int prec) +{ + bool rc = FALSE; + + if (entry->state == WLFC_STATE_CLOSE) { + if ((pktqprec_n_pkts(&entry->psq, (prec << 1)) == 0) && + (pktqprec_n_pkts(&entry->psq, ((prec << 1) + 1)) == 0)) { + /* no packets in both 'normal' and 'suspended' queues */ + if (entry->traffic_pending_bmp & NBITVAL(prec)) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp & ~ NBITVAL(prec); + } + } else { + /* packets are queued in host for transmission to dongle */ + if (!(entry->traffic_pending_bmp & NBITVAL(prec))) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp | NBITVAL(prec); + } + } + } + + if (rc) { + /* request a TIM update to firmware at the next piggyback opportunity */ + if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) { + entry->send_tim_signal = 1; + /* + XXX: send a header only packet from the same context. + --this should change to sending from a timeout or similar. + */ + _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp); + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + entry->send_tim_signal = 0; + } else { + rc = FALSE; + } + } + + return rc; +} /* _dhd_wlfc_traffic_pending_check */ + +/** + * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues + * the packet to transmit to firmware again at a later opportunity. + */ +static int +_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) +{ + wlfc_mac_descriptor_t* entry; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_NOTFOUND; + } + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE, + WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + ctx->stats.delayq_full_error++; + /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */ + WLFC_DBGMESG(("s")); + return BCME_ERROR; + } + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); + return BCME_OK; +} + +/** + * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer + * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet. + * + * @param[in/out] ctx driver specific flow control administration + * @param[in/out] entry The remote MAC entity for which the packet is destined. + * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet. + * @param[in] header_needed True if packet is 'new' to flow control + * @param[out] slot Handle to container in which the packet was 'parked' + */ +static int +_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot) +{ + int rc = BCME_OK; + int hslot = WLFC_HANGER_MAXITEMS; + bool send_tim_update = FALSE; + uint32 htod = 0; + uint16 htodseq = 0; + uint8 free_ctr; + int gen = 0xff; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + void * p = *packet; + + *slot = hslot; + + if (entry == NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, p); + } + + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + if (entry->send_tim_signal) { + /* sends a traffic indication bitmap to the dongle */ + send_tim_update = TRUE; + entry->send_tim_signal = 0; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + } + + if (header_needed) { + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + hslot = (uint)(entry - &ctx->destination_entries.nodes[0]); + } else { + hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger); + } + gen = entry->generation; + free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(ctx, "d%u.%u.%u-", + (uint8)(entry - &ctx->destination_entries.nodes[0]), gen, free_ctr); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } else { + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p)); + } + + hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + + if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) { + gen = entry->generation; + } else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + } else { + _dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen); + } + + free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(ctx, "s%u.%u.%u-", + (uint8)(entry - &ctx->destination_entries.nodes[0]), gen, free_ctr); + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + _dhd_wlfc_bprint(ctx, "%u.%u-", + IS_WL_TO_REUSE_SEQ(DHD_PKTTAG_H2DSEQ(PKTTAG(p))), + WL_SEQ_GET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(p)))); + } +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + /* remove old header */ + _dhd_wlfc_pullheader(ctx, p); + } + + if (hslot >= WLFC_HANGER_MAXITEMS) { + DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__)); + return BCME_ERROR; + } + + WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr); + WL_TXSTATUS_SET_HSLOT(htod, hslot); + WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + WL_TXSTATUS_SET_GENERATION(htod, gen); + DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); + + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { + /* + Indicate that this packet is being sent in response to an + explicit request from the firmware side. + */ + WLFC_PKTFLAG_SET_PKTREQUESTED(htod); + } else { + WLFC_PKTFLAG_CLR_PKTREQUESTED(htod); + } + + rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update, + entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE); + if (rc == BCME_OK) { + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger); + if (header_needed) { + /* + a new header was created for this packet. + push to hanger slot and scrub q. Since bus + send succeeded, increment seq number as well. + */ + rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + if (rc == BCME_OK) { +#ifdef PROP_TXSTATUS_DEBUG + h->items[hslot].push_time = + OSL_SYSUPTIME(); +#endif + } else { + DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n", + __FUNCTION__, rc)); + } + } else { + /* clear hanger state */ + if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p) + DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n", + __FUNCTION__, p, h->items[hslot].pkt)); + ASSERT(h->items[hslot].pkt == p); + bcm_object_feature_set(h->items[hslot].pkt, + BCM_OBJECT_FEATURE_PKT_STATE, 0); + h->items[hslot].pkt_state = 0; + h->items[hslot].pkt_txstatus = 0; + h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE; + } + } + + if ((rc == BCME_OK) && header_needed) { + /* increment free running sequence count */ + WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } + } + *slot = hslot; + *packet = p; + return rc; +} /* _dhd_wlfc_pretx_pktprocess */ + +/** + * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote + * mac is in the 'open' state, otherwise '0'. + */ +static int +_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, int prec) +{ + wlfc_mac_descriptor_t* interfaces = ctx->destination_entries.interfaces; + + if (entry->interface_id >= WLFC_MAX_IFNUM) { + ASSERT(&ctx->destination_entries.other == entry); + return 1; + } + + if (interfaces[entry->interface_id].iftype == + WLC_E_IF_ROLE_P2P_GO) { + /* - destination interface is of type p2p GO. + For a p2pGO interface, if the destination is OPEN but the interface is + CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is + destination-specific-credit left send packets. This is because the + firmware storing the destination-specific-requested packet in queue. + */ + /* XXX: This behavior will change one PM1 protocol mod is complete */ + if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) { + return 0; + } + } + + /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ + if ((((entry->state == WLFC_STATE_CLOSE) || + (interfaces[entry->interface_id].state == WLFC_STATE_CLOSE)) && + (entry->requested_credit == 0) && + (entry->requested_packet == 0)) || + (!(entry->ac_bitmap & (1 << prec)))) { + return 0; + } + + return 1; +} /* _dhd_wlfc_is_destination_open */ + +/** + * Dequeues a suppressed or delayed packet from a queue + * @param[in/out] ctx Driver specific flow control administration + * @param[in] prec Precedence of queue to dequeue from + * @param[out] ac_credit_spent Boolean, returns 0 or 1 + * @param[out] needs_hdr Boolean, returns 0 or 1 + * @param[out] entry_out The remote MAC for which the packet is destined + * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones + * + * Return value: the dequeued packet + */ + +static void* +_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, + uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out, + bool only_no_credit) +{ + wlfc_mac_descriptor_t* entry; + int total_entries; + void* p = NULL; + int i; + uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1; + uint16 qlen; + bool change_entry = FALSE; + + BCM_REFERENCE(qlen); + BCM_REFERENCE(change_entry); + + *entry_out = NULL; + /* most cases a packet will count against FIFO credit */ + *ac_credit_spent = credit_spent; + + /* search all entries, include nodes as well as interfaces */ + if (only_no_credit) { + total_entries = ctx->requested_entry_count; + } else { + total_entries = ctx->active_entry_count; + } + + for (i = 0; i < total_entries; i++) { + if (only_no_credit) { + entry = ctx->requested_entry[i]; + } else { + entry = ctx->active_entry_head; + } + ASSERT(entry); + + if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) && +#ifdef PROPTX_MAXCOUNT + (entry->transit_count < entry->transit_maxcount) && +#endif /* PROPTX_MAXCOUNT */ + (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) && + (!entry->suppressed)) { + *ac_credit_spent = credit_spent; + if (entry->state == WLFC_STATE_CLOSE) { + *ac_credit_spent = 0; + } + + /* higher precedence will be picked up first, + * i.e. suppressed packets before delayed ones + */ + p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec)); + *needs_hdr = 0; + if (p == NULL) { + /* De-Q from delay Q */ + p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec)); + *needs_hdr = 1; + } + + if (p != NULL) { + bcm_pkt_validate_chk(p, "_dhd_wlfc_deque_afq"); + /* did the packet come from suppress sub-queue? */ + if (entry->requested_credit > 0) { + entry->requested_credit--; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_sent_packets++; +#endif + } else if (entry->requested_packet > 0) { + entry->requested_packet--; + DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p)); + } + + *entry_out = entry; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + ctx->pkt_cnt_in_psq--; +#ifdef BULK_DEQUEUE + /* Check pkts in delayq */ + if (entry->state == WLFC_STATE_OPEN) { + entry->release_count[prec]++; + qlen = pktq_mlen(&entry->psq, + (1 << PSQ_SUP_IDX(prec) | 1 << PSQ_DLY_IDX(prec))); + + if (entry->release_count[prec] == ctx->max_release_count || + qlen == 0) { + change_entry = TRUE; + entry->release_count[prec] = 0; + } + + if (change_entry) { + /* move head */ + ctx->active_entry_head = + ctx->active_entry_head->next; + } + } +#endif /* BULK_DEQUEUE */ + _dhd_wlfc_flow_control_check(ctx, &entry->psq, + DHD_PKTTAG_IF(PKTTAG(p))); + /* + * A packet has been picked up, update traffic availability bitmap, + * if applicable. + */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + return p; + } + } + if (!only_no_credit) { + /* move head */ + ctx->active_entry_head = ctx->active_entry_head->next; + } + } + return NULL; +} /* _dhd_wlfc_deque_delayedq */ + +/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */ +static int +_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec) +{ + wlfc_mac_descriptor_t* entry; + + if (pktbuf != NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, pktbuf); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1), + FALSE, WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + WLFC_DBGMESG(("D")); + ctx->stats.delayq_full_error++; + return BCME_ERROR; + } + +#ifdef QMONITOR + dhd_qmon_tx(&entry->qmon); +#endif + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + } + + return BCME_OK; +} /* _dhd_wlfc_enque_delayq */ + +/** Returns TRUE if caller supplied packet is destined for caller supplied interface */ +static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid) +{ + if (!p || !p_ifid) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p)))); +} + +/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */ +static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry) +{ + if (!p || !entry) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p)))); +} + +static void +_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) +{ + dhd_pub_t *dhdp; + bool credit_return = FALSE; + + if (!wlfc || !pkt) { + return; + } + + dhdp = (dhd_pub_t *)(wlfc->dhdp); + if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) && + DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + int lender, credit_returned = 0; + uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt)); + + credit_return = TRUE; + + /* Note that borrower is fifo_id */ + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + + BCM_REFERENCE(credit_return); +#if defined(DHD_WLFC_THREAD) + if (credit_return) { + _dhd_wlfc_thread_wakeup(dhdp); + } +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** Removes and frees a packet from the hanger. Called during eg tx complete. */ +static void +_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state, + int pkt_txstatus) +{ + wlfc_hanger_t* hanger; + wlfc_hanger_item_t* item; + + if (!wlfc) + return; + + hanger = (wlfc_hanger_t*)wlfc->hanger; + if (!hanger) + return; + + if (slot_id == WLFC_HANGER_MAXITEMS) + return; + + item = &hanger->items[slot_id]; + + if (item->pkt) { + item->pkt_state |= pkt_state; + if (pkt_txstatus != -1) + item->pkt_txstatus = (uint8)pkt_txstatus; + bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state); + if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) { + void *p = NULL; + void *pkt = item->pkt; + uint8 old_state = item->state; + int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE); + BCM_REFERENCE(ret); + BCM_REFERENCE(pkt); + ASSERT((ret == BCME_OK) && p && (pkt == p)); + if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n", + pkt, old_state, item->pkt_state); + } + ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED); + + /* free packet */ + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))] + [DHD_PKTTAG_FIFO(PKTTAG(p))]--; + wlfc->stats.pktout++; + dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus); + PKTFREE(wlfc->osh, p, TRUE); + } + } else { + /* free slot */ + if (item->state == WLFC_HANGER_ITEM_STATE_FREE) + DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n", + __FUNCTION__, __LINE__, item->pkt_state, pkt_state)); + item->state = WLFC_HANGER_ITEM_STATE_FREE; + } +} /* _dhd_wlfc_hanger_free_pkt */ + +/** Called during eg detach() */ +static void +_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, + bool dir, f_processpkt_t fn, void *arg, q_type_t q_type) +{ + int prec; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + ASSERT(dhdp); + + /* Optimize flush, if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->n_pkts_tot == 0) { + return; + } + + for (prec = 0; prec < pq->num_prec; prec++) { + struct pktq_prec *q; + void *p, *prev = NULL; + + q = &pq->q[prec]; + p = q->head; + while (p) { + bcm_pkt_validate_chk(p, "_dhd_wlfc_pktq_flush"); + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = PKTLINK(p); + else + PKTSETLINK(prev, PKTLINK(p)); + if (q_type == Q_TYPE_PSQ) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + _dhd_wlfc_hanger_remove_reference(ctx->hanger, p); + } + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + ctx->stats.cleanup_psq_cnt++; + if (!(prec & 1)) { + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, + 0, 0, TRUE); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, + TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + } else if (q_type == Q_TYPE_AFQ) { + wlfc_mac_descriptor_t* entry = + _dhd_wlfc_find_table_entry(ctx, p); + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(ctx, "[sc]-"); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + _dhd_wlfc_return_implied_credit(ctx, p); + ctx->stats.cleanup_fw_cnt++; + } + PKTSETLINK(p, NULL); + if (dir) { + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->stats.pktout++; + dhd_txcomplete(dhdp, p, FALSE); + } + PKTFREE(ctx->osh, p, dir); + + q->n_pkts--; + pq->n_pkts_tot--; +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + p = (head ? q->head : PKTLINK(prev)); + } else { + prev = p; + p = PKTLINK(p); + } + } + + if (q->head == NULL) { + ASSERT(q->n_pkts == 0); + q->tail = NULL; + } + + } + + if (fn == NULL) + ASSERT(pq->n_pkts_tot == 0); +} /* _dhd_wlfc_pktq_flush */ + +#ifndef BCMDBUS +/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */ +static void* +_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + return NULL; + + bcm_pkt_validate_chk(p, "_dhd_wlfc_pktq_flush"); + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(p, NULL); + + return p; +} + +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + wlfc_mac_descriptor_t* entry; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode) && + !_dhd_wlfc_hanger_remove_reference(h, pkt)) { + DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n", + __FUNCTION__, pkt)); + } + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[sc]-"); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + _dhd_wlfc_return_implied_credit(wlfc, pkt); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--; + wlfc->stats.pktout++; + wlfc->stats.cleanup_txq_cnt++; + dhd_txcomplete(dhd, pkt, FALSE); + PKTFREE(wlfc->osh, pkt, TRUE); + } +} /* _dhd_wlfc_cleanup_txq */ + +#endif /* !BCMDBUS */ + +/** called during eg detach */ +void +_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int i; + int total_entries; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + + wlfc->stats.cleanup_txq_cnt = 0; + wlfc->stats.cleanup_psq_cnt = 0; + wlfc->stats.cleanup_fw_cnt = 0; + + /* + * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one + */ +#ifndef BCMDBUS + /* flush bus->txq */ + _dhd_wlfc_cleanup_txq(dhd, fn, arg); +#endif /* BCMDBUS */ + + /* flush psq, search all entries, include nodes as well as interfaces */ + total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t); + table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries; + + for (i = 0; i < total_entries; i++) { + if (table[i].occupied) { + /* release packets held in PSQ (both delayed and suppressed) */ + if (table[i].psq.n_pkts_tot) { + WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n", + __FUNCTION__, i, table[i].psq.n_pkts_tot)); + _dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE, + fn, arg, Q_TYPE_PSQ); + } + + /* free packets held in AFQ */ + if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.n_pkts_tot)) { + _dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE, + fn, arg, Q_TYPE_AFQ); + } + + if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) { + table[i].occupied = 0; + if (table[i].transit_count || table[i].suppr_transit_count) { + DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n", + __FUNCTION__, i, + table[i].transit_count, + table[i].suppr_transit_count)); + } + } + } + } + + /* + . flush remained pkt in hanger queue, not in bus->txq nor psq. + . the remained pkt was successfully downloaded to dongle already. + . hanger slot state cannot be set to free until receive txstatus update. + */ + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + for (i = 0; i < h->max_items; i++) { + if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) || + (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { + if (fn == NULL || (*fn)(h->items[i].pkt, arg)) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED; + } + } + } + } + + return; +} /* _dhd_wlfc_cleanup */ + +/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */ +static int +_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea, + f_processpkt_t fn, void *arg) +{ + int rc = BCME_OK; + uint8 i; + + BCM_REFERENCE(i); + +#ifdef QMONITOR + dhd_qmon_reset(&entry->qmon); +#endif + + if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) { + entry->occupied = 1; + entry->state = WLFC_STATE_OPEN; + entry->requested_credit = 0; + entry->interface_id = ifid; + entry->iftype = iftype; + entry->ac_bitmap = 0xff; /* update this when handling APSD */ +#ifdef BULK_DEQUEUE + for (i = 0; i < AC_COUNT + 1; i++) { + entry->release_count[i] = 0; + } +#endif /* BULK_DEQUEUE */ + /* for an interface entry we may not care about the MAC address */ + if (ea != NULL) + memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN); + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + entry->suppressed = FALSE; + entry->transit_count = 0; +#if defined(WL_EXT_IAPSTA) && defined(PROPTX_MAXCOUNT) + entry->transit_maxcount = wl_ext_get_wlfc_maxcount(ctx->dhdp, ifid); +#endif /* PROPTX_MAXCOUNT */ + entry->suppr_transit_count = 0; + entry->onbus_pkts_count = 0; + } + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); + + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN); + } + + if (entry->next == NULL) { + /* not linked to anywhere, add to tail */ + if (ctx->active_entry_head) { + entry->prev = ctx->active_entry_head->prev; + ctx->active_entry_head->prev->next = entry; + ctx->active_entry_head->prev = entry; + entry->next = ctx->active_entry_head; + } else { + ASSERT(ctx->active_entry_count == 0); + entry->prev = entry->next = entry; + ctx->active_entry_head = entry; + } + ctx->active_entry_count++; + } else { + DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__, + (int)(entry - &ctx->destination_entries.nodes[0]))); + } + } +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + for (i = 0; i < (AC_COUNT + 1); i++) { + entry->last_send_seq[i] = 255; + entry->last_complete_seq[i] = 255; + } +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) { + /* When the entry is deleted, the packets that are queued in the entry must be + cleanup. The cleanup action should be before the occupied is set as 0. + */ + _dhd_wlfc_cleanup(ctx->dhdp, fn, arg); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); + + entry->occupied = 0; + entry->state = WLFC_STATE_CLOSE; + memset(&entry->ea[0], 0, ETHER_ADDR_LEN); + + if (entry->next) { + /* not floating, remove from Q */ + if (ctx->active_entry_count <= 1) { + /* last item */ + ctx->active_entry_head = NULL; + ctx->active_entry_count = 0; + } else { + entry->prev->next = entry->next; + entry->next->prev = entry->prev; + if (entry == ctx->active_entry_head) { + ctx->active_entry_head = entry->next; + } + ctx->active_entry_count--; + } + entry->next = entry->prev = NULL; + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + } + } + return rc; +} /* _dhd_wlfc_mac_entry_update */ + +#ifdef LIMIT_BORROW + +/** LIMIT_BORROW specific function */ +static int +_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac, + bool bBorrowAll) +{ + int lender_ac, borrow_limit = 0; + int rc = -1; + + if (ctx == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return -1; + } + + /* Borrow from lowest priority available AC (including BC/MC credits) */ + for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) { + if (!bBorrowAll) { + borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO; + } else { + borrow_limit = 0; + } + + if (ctx->FIFO_credit[lender_ac] > borrow_limit) { + ctx->credits_borrowed[borrower_ac][lender_ac]++; + ctx->FIFO_credit[lender_ac]--; + rc = lender_ac; + break; + } + } + + return rc; +} + +/** LIMIT_BORROW specific function */ +static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac) +{ + if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) || + (borrower_ac < 0) || (borrower_ac > AC_COUNT)) { + DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n", + __FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac)); + + return BCME_BADARG; + } + + ctx->credits_borrowed[borrower_ac][lender_ac]--; + ctx->FIFO_credit[lender_ac]++; + + return BCME_OK; +} + +#endif /* LIMIT_BORROW */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware. + * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +static int +_dhd_wlfc_interface_entry_update(void* state, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + wlfc_mac_descriptor_t* entry; + + if (ifid >= WLFC_MAX_IFNUM) + return BCME_BADARG; + + entry = &ctx->destination_entries.interfaces[ifid]; + + return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea, + _dhd_wlfc_ifpkt_fn, &ifid); +} + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +static int +_dhd_wlfc_BCMCCredit_support_update(void* state) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + + ctx->bcmc_credit_supported = TRUE; + return BCME_OK; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +static int +_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + int i; + + for (i = 0; i <= 4; i++) { + if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) { + DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n", + __FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i])); + } + } + + /* update the AC FIFO credit map */ + ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]); + ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]); + ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]); + ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]); + ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]); + + ctx->Init_FIFO_credit[0] = credits[0]; + ctx->Init_FIFO_credit[1] = credits[1]; + ctx->Init_FIFO_credit[2] = credits[2]; + ctx->Init_FIFO_credit[3] = credits[3]; + ctx->Init_FIFO_credit[4] = credits[4]; + + /* credit for ATIM FIFO is not used yet. */ + ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0; + + return BCME_OK; +} + +/** + * Called during committing of a transmit packet from the OS DHD layer to the next layer towards + * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer. + * + * @param[in/out] ctx Driver specific flow control administration + * @param[in] ac Access Category (QoS) of called supplied packet + * @param[in] commit_info Contains eg the packet to send + * @param[in] fcommit Function pointer to transmit function of next software layer + * @param[in] commit_ctx Opaque context used when calling next layer + */ +static int +_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, + dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) +{ + uint32 hslot; + int rc; + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + /* + if ac_fifo_credit_spent = 0 + + This packet will not count against the FIFO credit. + To ensure the txstatus corresponding to this packet + does not provide an implied credit (default behavior) + mark the packet accordingly. + + if ac_fifo_credit_spent = 1 + + This is a normal packet and it counts against the FIFO + credit count. + */ + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent); + rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p, + commit_info->needs_hdr, &hslot); + +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_check_send_order(ctx, commit_info->mac_entry, commit_info->p); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + if (rc == BCME_OK) { + rc = fcommit(commit_ctx, commit_info->p); + if (rc == BCME_OK) { + uint8 gen = WL_TXSTATUS_GET_GENERATION( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))); + dhd_txpkt_log_and_dump(dhdp, commit_info->p, NULL); + ctx->stats.pkt2bus++; + if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) { + ctx->stats.send_pkts[ac]++; + WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); + } + + if (gen != commit_info->mac_entry->generation) { + /* will be suppressed back by design */ + if (!commit_info->mac_entry->suppressed) { + commit_info->mac_entry->suppressed = TRUE; + } + commit_info->mac_entry->suppr_transit_count++; +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(ctx, "[si%u]-", + commit_info->mac_entry->suppr_transit_count); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + commit_info->mac_entry->transit_count++; + commit_info->mac_entry->onbus_pkts_count++; + } else if (commit_info->needs_hdr) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + void *pout = NULL; + /* pop hanger for delayed packet */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE); + ASSERT(commit_info->p == pout); + } + } + } else { + ctx->stats.generic_error++; + } + + if (rc != BCME_OK) { + /* + pretx pkt process or bus commit has failed, rollback. + - remove wl-header for a delayed packet + - save wl-header header for suppressed packets + - reset credit check flag + */ + _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot); + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0); + } + + return rc; +} /* _dhd_wlfc_handle_packet_commit */ + +/** Returns remote MAC descriptor for caller supplied MAC address */ +static uint8 +_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea) +{ + wlfc_mac_descriptor_t* table = + ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes; + uint8 table_index; + + if (ea != NULL) { + for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) { + if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) && + table[table_index].occupied) + return table_index; + } + } + return WLFC_MAC_DESC_ID_INVALID; +} + +/** + * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the + * status of a frame that the dongle attempted to transmit over the wireless medium. + */ +static int +dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt) +{ + athost_wl_status_info_t* ctx; + wlfc_mac_descriptor_t* entry = NULL; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!dhd) { + DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd)); + return BCME_BADARG; + } + ctx = (athost_wl_status_info_t*)dhd->wlfc_state; + if (!ctx) { + DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx)); + return BCME_ERROR; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->psq; + + ASSERT(((prec << 1) + 1) < pq->num_prec); + + q = &pq->q[((prec << 1) + 1)]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + + PKTSETLINK(p, NULL); + + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(ctx, p); + } else { + _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + } + + entry->transit_count++; + + return BCME_OK; +} + +static int +_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac) +{ + uint8 status_flag_ori, status_flag; + uint32 status; + int ret = BCME_OK; + int remove_from_hanger_ori, remove_from_hanger = 1; + void* pktbuf = NULL; + uint8 fifo_id = 0, gen = 0, count = 0, hcnt; + uint16 hslot; + wlfc_mac_descriptor_t* entry = NULL; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + uint16 seq = 0, seq_fromfw = 0, seq_num = 0; + uint16 pktfate_status; + + memcpy(&status, pkt_info, sizeof(uint32)); + status = ltoh32(status); + status_flag = WL_TXSTATUS_GET_FLAGS(status); + hcnt = WL_TXSTATUS_GET_FREERUNCTR(status); + hslot = WL_TXSTATUS_GET_HSLOT(status); + fifo_id = WL_TXSTATUS_GET_FIFO(status); + gen = WL_TXSTATUS_GET_GENERATION(status); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ); + seq = ltoh16(seq); + seq_fromfw = GET_WL_HAS_ASSIGNED_SEQ(seq); + seq_num = WL_SEQ_GET_NUM(seq); + } + + wlfc->stats.txstatus_in += len; + + if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { + wlfc->stats.d11_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { + wlfc->stats.wl_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { + wlfc->stats.wlc_tossed_pkts += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_EXPIRED) { + wlfc->stats.pkt_exptime += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_DROPPED) { + wlfc->stats.pkt_dropped += len; + } + + if (dhd->proptxstatus_txstatus_ignore) { + if (!remove_from_hanger) { + DHD_ERROR(("suppress txstatus: %d\n", status_flag)); + } + return BCME_OK; + } + + status_flag_ori = status_flag; + remove_from_hanger_ori = remove_from_hanger; + + while (count < len) { + if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf); + } else { + status_flag = status_flag_ori; + remove_from_hanger = remove_from_hanger_ori; + ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE); + if (!pktbuf) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, -1); + goto cont; + } else { + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) { + status_flag = WLFC_CTL_PKTFLAG_DISCARD; + remove_from_hanger = 1; + } + } + } + + if ((ret != BCME_OK) || !pktbuf) { + goto cont; + } + + bcm_pkt_validate_chk(pktbuf, "_dhd_wlfc_compressed_txstatus_update"); + + pktfate_status = ltoh16(status_flag_ori) & WLFC_CTL_PKTFLAG_MASK; + dhd_txpkt_log_and_dump(dhd, pktbuf, &pktfate_status); + + /* set fifo_id to correct value because not all FW does that */ + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + + entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf); + + if (!remove_from_hanger) { + /* this packet was suppressed */ + if (!entry->suppressed || (entry->generation != gen)) { + if (!entry->suppressed) { + entry->suppr_transit_count = entry->transit_count; + if (p_mac) { + *p_mac = entry; + } + } else { + DHD_ERROR(("gen(%d), entry->generation(%d)\n", + gen, entry->generation)); + } + entry->suppressed = TRUE; + +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[ss%u.%u.%u]-", + (uint8)(entry - &wlfc->destination_entries.nodes[0]), + entry->generation, + entry->suppr_transit_count); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + entry->generation = gen; +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + if (gen == WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)))) { + printf("==%d.%d==\n", gen, hcnt); + } +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + +#ifdef PROP_TXSTATUS_DEBUG + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) + { + uint32 new_t = OSL_SYSUPTIME(); + uint32 old_t; + uint32 delta; + old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time; + + wlfc->stats.latency_sample_count++; + if (new_t > old_t) + delta = new_t - old_t; + else + delta = 0xffffffff + new_t - old_t; + wlfc->stats.total_status_latency += delta; + wlfc->stats.latency_most_recent = delta; + + wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta; + if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32)) + wlfc->stats.idx_delta = 0; + } +#endif /* PROP_TXSTATUS_DEBUG */ + + /* pick up the implicit credit from this packet */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) { + _dhd_wlfc_return_implied_credit(wlfc, pktbuf); + } else { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the destination entry (for pspoll etc.) + */ + if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) { + entry->requested_credit++; +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* DHD_WLFC_THREAD */ + } +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_acks++; +#endif + } + + if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) || + (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) { + /* save generation bit inside packet */ + WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + WL_SEQ_SET_REUSE(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw); + WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num); + } + + ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf); + if (ret != BCME_OK) { +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "f%u.%u.%u-", + (uint8)(entry - &wlfc->destination_entries.nodes[0]), + gen, + hcnt); + _dhd_wlfc_check_complete_order(wlfc, entry, pktbuf); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + /* delay q is full, drop this packet */ + DHD_WLFC_QMON_COMPLETE(entry); + _dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE); + } else { + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + /* Mark suppressed to avoid a double free + during wlfc cleanup + */ + _dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen); + } +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "r%u.%u.%u.%u-", + status_flag, + (uint8)(entry - &wlfc->destination_entries.nodes[0]), + gen, + hcnt); + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + _dhd_wlfc_bprint(wlfc, "%u.%u-", seq_fromfw, seq_num); + } + +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + } else { +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "c%u.%u.%u.%u-", + status_flag, + (uint8)(entry - &wlfc->destination_entries.nodes[0]), + gen, + hcnt); + _dhd_wlfc_check_complete_order(wlfc, entry, pktbuf); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + + DHD_WLFC_QMON_COMPLETE(entry); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE); + } else { + dhd_txcomplete(dhd, pktbuf, TRUE); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))] + [DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--; + wlfc->stats.pktout++; + /* free the packet */ + PKTFREE(wlfc->osh, pktbuf, TRUE); + } + } + /* pkt back from firmware side */ + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[sc]-"); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + +cont: + hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK; + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK; + } + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) { + seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK; + } + + count++; + } + + return BCME_OK; +} /* _dhd_wlfc_compressed_txstatus_update */ + +/** + * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle. + * @param[in] credits caller supplied credit that will be added to the host credit. + */ +static int +_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) +{ + int i; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.fifo_credits_back[i] += credits[i]; +#endif + + /* update FIFO credits */ + if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT) + { + int lender; /* Note that borrower is i */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) { + if (wlfc->credits_borrowed[i][lender] > 0) { + if (credits[i] >= wlfc->credits_borrowed[i][lender]) { + credits[i] -= + (uint8)wlfc->credits_borrowed[i][lender]; + wlfc->FIFO_credit[lender] += + wlfc->credits_borrowed[i][lender]; + wlfc->credits_borrowed[i][lender] = 0; + } else { + wlfc->credits_borrowed[i][lender] -= credits[i]; + wlfc->FIFO_credit[lender] += credits[i]; + credits[i] = 0; + } + } + } + + /* If we have more credits left over, these must belong to the AC */ + if (credits[i] > 0) { + wlfc->FIFO_credit[i] += credits[i]; + } + + if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) { + wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i]; + } + } + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} /* _dhd_wlfc_fifocreditback_indicate */ + +#ifndef BCMDBUS +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* entry; + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + uint8 results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ]; + uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0}; + uint32 htod = 0; + uint16 htodseq = 0; + bool bCreditUpdate = FALSE; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + if (!entry) { + PKTFREE(dhd->osh, pkt, TRUE); + continue; + } + if (entry->onbus_pkts_count > 0) { + entry->onbus_pkts_count--; + } + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) { + entry->suppressed = FALSE; + } + /* fake a suppression txstatus */ + htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt)); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS); + WL_TXSTATUS_SET_GENERATION(htod, entry->generation); + htod = htol32(htod); + memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS); + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt)); + if (IS_WL_TO_REUSE_SEQ(htodseq)) { + SET_WL_HAS_ASSIGNED_SEQ(htodseq); + RESET_WL_TO_REUSE_SEQ(htodseq); + } + htodseq = htol16(htodseq); + memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq, + WLFC_CTL_VALUE_LEN_SEQ); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, pkt); + } + _dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL); + + /* fake a fifo credit back */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++; + bCreditUpdate = TRUE; + } + } + + if (bCreditUpdate) { + _dhd_wlfc_fifocreditback_indicate(dhd, credits); + } +} /* _dhd_wlfc_suppress_txq */ + +#endif /* !BCMDBUS */ + +static int +_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value) +{ + uint32 timestamp; + + (void)dhd; + + bcopy(&value[2], ×tamp, sizeof(uint32)); + timestamp = ltoh32(timestamp); + DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp)); + return BCME_OK; +} + +static int +_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi) +{ + (void)dhd; + (void)rssi; + return BCME_OK; +} + +static void +_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i == wlfc->requested_entry_count) { + /* no match entry found */ + ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1)); + wlfc->requested_entry[wlfc->requested_entry_count++] = entry; + } +} + +/** called on eg receiving 'mac open' event from the dongle. */ +static void +_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i < wlfc->requested_entry_count) { + /* found */ + ASSERT(wlfc->requested_entry_count > 0); + wlfc->requested_entry_count--; + if (i != wlfc->requested_entry_count) { + wlfc->requested_entry[i] = + wlfc->requested_entry[wlfc->requested_entry_count]; + } + wlfc->requested_entry[wlfc->requested_entry_count] = NULL; + } +} + +/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */ +static int +_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + int rc; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 existing_index; + uint8 table_index; + uint8 ifid; + uint8* ea; + + WLFC_DBGMESG(("%s(), mac ["MACDBG"],%s,idx:%d,id:0x%02x\n", + __FUNCTION__, MAC2STRDBG(&value[2]), + ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"), + WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0])); + + table = wlfc->destination_entries.nodes; + table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]); + ifid = value[1]; + ea = &value[2]; + + _dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]); + if (type == WLFC_CTL_TYPE_MACDESC_ADD) { + existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]); + if ((existing_index != WLFC_MAC_DESC_ID_INVALID) && + (existing_index != table_index) && table[existing_index].occupied) { + /* + there is an existing different entry, free the old one + and move it to new index if necessary. + */ + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index], + eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id, + table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn, + &table[existing_index]); + } + + if (!table[table_index].occupied) { + /* this new MAC entry does not exist, create one */ + table[table_index].mac_handle = value[0]; + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_ADD, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, NULL, NULL); + } else { + /* the space should have been empty, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + + if (type == WLFC_CTL_TYPE_MACDESC_DEL) { + if (table[table_index].occupied) { + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_DEL, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, _dhd_wlfc_entrypkt_fn, &table[table_index]); + } else { + /* the space should have been occupied, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + BCM_REFERENCE(rc); + return BCME_OK; +} /* _dhd_wlfc_mac_table_update */ + +/** Called on a 'mac open' or 'mac close' event indicated by the dongle */ +static int +_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */ + uint8 mac_handle = value[0]; + int i; + + table = wlfc->destination_entries.nodes; + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { +#ifdef BULK_DEQUEUE + for (i = 0; i < AC_COUNT + 1; i++) { + desc->release_count[i] = 0; + } +#endif /* BULK_DEQUEUE */ + if (type == WLFC_CTL_TYPE_MAC_OPEN) { + desc->state = WLFC_STATE_OPEN; + desc->ac_bitmap = 0xff; + DHD_WLFC_CTRINC_MAC_OPEN(desc); + desc->requested_credit = 0; + desc->requested_packet = 0; + _dhd_wlfc_remove_requested_entry(wlfc, desc); +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[op%u.%u]-", + (uint8)(table - &wlfc->destination_entries.nodes[0]), + OSL_SYSUPTIME()); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } else { + desc->state = WLFC_STATE_CLOSE; + DHD_WLFC_CTRINC_MAC_CLOSE(desc); +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[cl%u.%u]-", + (uint8)(table - &wlfc->destination_entries.nodes[0]), + OSL_SYSUPTIME()); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + /* Indicate to firmware if there is any traffic pending. */ + for (i = 0; i < AC_COUNT; i++) { + _dhd_wlfc_traffic_pending_check(wlfc, desc, i); + } + } + } else { + wlfc->stats.psmode_update_failed++; + } + + return BCME_OK; +} /* _dhd_wlfc_psmode_update */ + +/** called upon receiving 'interface open' or 'interface close' event from the dongle */ +static int +_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 if_id = value[0]; + uint8 i; + + BCM_REFERENCE(i); + + if (if_id < WLFC_MAX_IFNUM) { + table = wlfc->destination_entries.interfaces; + if (table[if_id].occupied) { +#ifdef BULK_DEQUEUE + for (i = 0; i < AC_COUNT + 1; i++) { + table->release_count[i] = 0; + } +#endif /* BULK_DEQUEUE */ + if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) { + table[if_id].state = WLFC_STATE_OPEN; + /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */ +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[op%u.%u]-", + (uint8)(table - &wlfc->destination_entries.nodes[0]), + OSL_SYSUPTIME()); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } else { + table[if_id].state = WLFC_STATE_CLOSE; + /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */ +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + _dhd_wlfc_bprint(wlfc, "[cl%u.%u]-", + (uint8)(table - &wlfc->destination_entries.nodes[0]), + OSL_SYSUPTIME()); +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + } + return BCME_OK; + } + } + wlfc->stats.interface_update_failed++; + + /* XXX: what is an appropriate error? */ + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */ +static int +_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 credit; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + credit = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_credit = credit; + + /* XXX: toggle AC prec bitmap based on received bmp, exclude ac/bc pkt */ + desc->ac_bitmap = value[2] & (~(1<stats.credit_request_failed++; + } + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */ +static int +_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 packet_count; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + packet_count = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_packet = packet_count; + + /* XXX: toggle AC prec bitmap based on received bmp, exclude ac/bc pkt */ + desc->ac_bitmap = value[2] & (~(1<stats.packet_request_failed++; + } + + return BCME_OK; +} + +/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */ +static void +_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len) +{ + if (info_len) { + /* Check copy length to avoid buffer overrun. In case of length exceeding + * WLHOST_REORDERDATA_TOTLEN, return failure instead sending incomplete result + * of length WLHOST_REORDERDATA_TOTLEN + */ + if ((info_buf) && (len <= WLHOST_REORDERDATA_TOTLEN)) { + bcopy(val, info_buf, len); + *info_len = len; + } else { + *info_len = 0; + } + } +} + +/* + * public functions + */ + +bool dhd_wlfc_is_supported(dhd_pub_t *dhd) +{ + bool rc = TRUE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rc = FALSE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +#ifdef BULK_DEQUEUE +#ifndef WLFC_MAX_RELEASE_CNT +#ifdef CUSTOM_AMPDU_MPDU +#define WLFC_MAX_RELEASE_CNT CUSTOM_AMPDU_MPDU +#else +#define WLFC_MAX_RELEASE_CNT 16 +#endif /* CUSTOM_AMPDU_MPDU */ +#endif /* WLFC_MAX_RELEASE_CNT */ +#endif /* BULK_DEQUEUE */ + +int dhd_wlfc_enable(dhd_pub_t *dhd) +{ + int i, rc = BCME_OK; + athost_wl_status_info_t* wlfc; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_enabled || dhd->wlfc_state) { + rc = BCME_OK; + goto exit; + } + + /* allocate space to track txstatus propagated from firmware */ + dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO, + sizeof(athost_wl_status_info_t)); + if (dhd->wlfc_state == NULL) { + rc = BCME_NOMEM; + goto exit; + } + + /* initialize state space */ + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + memset(wlfc, 0, sizeof(athost_wl_status_info_t)); + + /* remember osh & dhdp */ + wlfc->osh = dhd->osh; + wlfc->dhdp = dhd; +#ifdef BULK_DEQUEUE + wlfc->max_release_count = WLFC_MAX_RELEASE_CNT; +#endif /* BULK_DEQUEUE */ + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS); + if (wlfc->hanger == NULL) { + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + rc = BCME_NOMEM; + goto exit; + } + } + + dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; + /* default to check rx pkt */ + dhd->wlfc_rxpkt_chk = TRUE; +#if defined (LINUX) || defined(linux) + if (dhd->op_mode & DHD_FLAG_IBSS_MODE) { + dhd->wlfc_rxpkt_chk = FALSE; + } +#endif /* defined (LINUX) || defined(linux) */ + + /* initialize all interfaces to accept traffic */ + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + wlfc->hostif_flow_state[i] = OFF; + } + + _dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other, + eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL); + + wlfc->allow_credit_borrow = 0; + wlfc->single_ac = 0; + wlfc->single_ac_timestamp = 0; + +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + wlfc->log_buf = MALLOC(dhd->osh, WLFC_LOG_BUF_SIZE); + wlfc->log_buf[WLFC_LOG_BUF_SIZE - 1] = 0; + wlfc->log_buf_offset = 0; + wlfc->log_buf_full = FALSE; +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + +exit: + DHD_ERROR(("%s: ret=%d\n", __FUNCTION__, rc)); + dhd_os_wlfc_unblock(dhd); + + return rc; +} /* dhd_wlfc_enable */ + +#ifdef SUPPORT_P2P_GO_PS + +/** + * Called when the host platform enters a lower power mode, eg right before a system hibernate. + * SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_suspend(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0) + return 0; + tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +/** + * Called when the host platform resumes from a power management operation, eg resume after a + * system hibernate. SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_resume(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == + (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) + return 0; + tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +#endif /* SUPPORT_P2P_GO_PS */ + +/** A flow control header was received from firmware, containing one or more TLVs */ +int +dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf, + uint *reorder_info_len) +{ + uint8 type, len; + uint8* value; + uint8* tmpbuf; + uint16 remainder = (uint16)tlv_hdr_len; + uint16 processed = 0; + athost_wl_status_info_t* wlfc = NULL; + void* entry; + + if ((dhd == NULL) || (pktbuf == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) { + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + } + + tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf); + + if (remainder) { + while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) { + type = tmpbuf[processed]; + if (type == WLFC_CTL_TYPE_FILLER) { + remainder -= 1; + processed += 1; + continue; + } + + len = tmpbuf[processed + 1]; + value = &tmpbuf[processed + 2]; + + if (remainder < (2 + len)) + break; + + remainder -= 2 + len; + processed += 2 + len; + entry = NULL; + + DHD_INFO(("%s():%d type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + + if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS) + _dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf, + reorder_info_len); + + if (wlfc == NULL) { + ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER); + + if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS && + type != WLFC_CTL_TYPE_TRANS_ID) + DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!" + " type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + continue; + } + + if (type == WLFC_CTL_TYPE_TXSTATUS) { + _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry); + } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) { + uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS; + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ; + } + _dhd_wlfc_compressed_txstatus_update(dhd, value, + value[compcnt_offset], &entry); + } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) { + _dhd_wlfc_fifocreditback_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_RSSI) { + _dhd_wlfc_rssi_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) { + _dhd_wlfc_credit_request(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) { + _dhd_wlfc_packet_request(dhd, value); + } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || + (type == WLFC_CTL_TYPE_MAC_CLOSE)) { + _dhd_wlfc_psmode_update(dhd, value, type); + } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || + (type == WLFC_CTL_TYPE_MACDESC_DEL)) { + _dhd_wlfc_mac_table_update(dhd, value, type); + } else if (type == WLFC_CTL_TYPE_TRANS_ID) { + _dhd_wlfc_dbg_senum_check(dhd, value); + } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || + (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) { + _dhd_wlfc_interface_update(dhd, value, type); + } + +#ifndef BCMDBUS + if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) { + /* suppress all packets for this mac entry from bus->txq */ + _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry); + } +#endif /* !BCMDBUS */ + } /* while */ + + if (remainder != 0 && wlfc) { + /* trouble..., something is not right */ + wlfc->stats.tlv_parse_failed++; + } + } /* if */ + + if (wlfc) + wlfc->stats.dhd_hdrpulls++; + + dhd_os_wlfc_unblock(dhd); + return BCME_OK; +} + +KERNEL_THREAD_RETURN_TYPE +dhd_wlfc_transfer_packets(void *data) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)data; + int ac, single_ac = 0, rc = BCME_OK; + dhd_wlfc_commit_info_t commit_info; + athost_wl_status_info_t* ctx; + int bus_retry_count = 0; + int pkt_send = 0; + int pkt_send_per_ac = 0; + + uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */ + uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */ + uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */ + bool no_credit = FALSE; + + int lender; + int pkt_bound = WLFC_PACKET_BOUND; + int highest_lender_ac; + + BCM_REFERENCE(highest_lender_ac); + +#if defined(DHD_WLFC_THREAD) + /* wait till someone wakeup me up, will change it at running time */ +#if defined(LINUX) + int wait_msec = msecs_to_jiffies(0xFFFFFFFF); +#endif /* LINUX */ +#endif /* defined(DHD_WLFC_THREAD) */ + +#if defined(DHD_WLFC_THREAD) + while (1) { + bus_retry_count = 0; + pkt_send = 0; + tx_map = 0; + rx_map = 0; + packets_map = 0; +#if defined(LINUX) + wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead, + dhdp->wlfc_thread_go, wait_msec); + if (kthread_should_stop()) { + break; + } + dhdp->wlfc_thread_go = FALSE; +#endif /* LINUX */ + + dhd_os_wlfc_block(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; +#if defined(DHD_WLFC_THREAD) + if (!ctx) + goto exit; +#endif /* defined(DHD_WLFC_THREAD) */ + + memset(&commit_info, 0, sizeof(commit_info)); + + /* + Commit packets for regular AC traffic. Higher priority first. + First, use up FIFO credits available to each AC. Based on distribution + and credits left, borrow from other ACs as applicable + + -NOTE: + If the bus between the host and firmware is overwhelmed by the + traffic from host, it is possible that higher priority traffic + starves the lower priority queue. If that occurs often, we may + have to employ weighted round-robin or ucode scheme to avoid + low priority packet starvation. + */ + +#ifdef BULK_DEQUEUE + pkt_bound = ctx->max_release_count; +#endif + + for (ac = AC_COUNT; ac >= 0; ac--) { + if (dhdp->wlfc_rxpkt_chk) { + /* check rx packet */ + uint32 curr_t = OSL_SYSUPTIME(), delta; + + delta = curr_t - ctx->rx_timestamp[ac]; + if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) { + rx_map |= (1 << ac); + } + } + + if (ctx->pkt_cnt_per_ac[ac] == 0) { + continue; + } + + tx_map |= (1 << ac); + single_ac = ac + 1; + pkt_send_per_ac = 0; + while ((FALSE == dhdp->proptxstatus_txoff) && + (pkt_send_per_ac < pkt_bound)) { + /* packets from delayQ with less priority are fresh and + * they'd need header and have no MAC entry + */ + no_credit = (ctx->FIFO_credit[ac] < 1); + if (dhdp->proptxstatus_credit_ignore || + ((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) { + no_credit = FALSE; + } + + lender = -1; +#ifdef LIMIT_BORROW + if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map) && + dhdp->wlfc_borrow_allowed) { + /* try borrow from lower priority */ +#ifdef BULK_DEQUEUE + /* Enable credit borrow from higher AC + * to make packet chain longer + */ + highest_lender_ac = AC_COUNT; +#else + highest_lender_ac = ac - 1; +#endif /* BULK_DEQUEUE */ + lender = _dhd_wlfc_borrow_credit(ctx, highest_lender_ac, ac, FALSE); + if (lender != -1) { + no_credit = FALSE; + } + } +#endif /* LIMIT_BORROW */ + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + no_credit); + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + if (commit_info.p == NULL) { +#ifdef LIMIT_BORROW + if (lender != -1 && dhdp->wlfc_borrow_allowed) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + break; + } + + if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) { + ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent); + } + /* here we can ensure have credit or no credit needed */ + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + pkt_send_per_ac++; + if (commit_info.ac_fifo_credit_spent && (lender == -1)) { + ctx->FIFO_credit[ac]--; + } +#ifdef LIMIT_BORROW + else if (!commit_info.ac_fifo_credit_spent && (lender != -1) && + dhdp->wlfc_borrow_allowed) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + } else { +#ifdef LIMIT_BORROW + if (lender != -1 && dhdp->wlfc_borrow_allowed) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + if (ctx->pkt_cnt_per_ac[ac]) { + packets_map |= (1 << ac); + } + } + + if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) { + /* nothing send out or remain in queue */ + rc = BCME_OK; + goto exit; + } + + if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) { + /* only one tx ac exist and no higher rx ac */ + if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) { + ac = single_ac - 1; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (single_ac != ctx->single_ac) { + /* new single ac traffic (first single ac or different single ac) */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = curr_t; + ctx->single_ac = (uint8)single_ac; + rc = BCME_OK; + goto exit; + } + /* same ac traffic, check if it lasts enough time */ + delta = curr_t - ctx->single_ac_timestamp; + + if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) { + /* wait enough time, can borrow now */ + ctx->allow_credit_borrow = 1; + ac = single_ac - 1; + } else { + rc = BCME_OK; + goto exit; + } + } + } else { + /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = 0; + ctx->single_ac = 0; + rc = BCME_OK; + goto exit; + } + + if (packets_map == 0) { + /* nothing to send, skip borrow */ + rc = BCME_OK; + goto exit; + } + + /* At this point, borrow all credits only for ac */ + while (FALSE == dhdp->proptxstatus_txoff) { +#ifdef LIMIT_BORROW + if (dhdp->wlfc_borrow_allowed) { + if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) { + break; + } + } + else + break; +#endif + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + FALSE); + if (commit_info.p == NULL) { + /* before borrow only one ac exists and now this only ac is empty */ +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + break; + } + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + if (commit_info.ac_fifo_credit_spent) { +#ifndef LIMIT_BORROW + ctx->FIFO_credit[ac]--; +#endif + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + } + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + BCM_REFERENCE(pkt_send); + +exit: +#if defined(DHD_WLFC_THREAD) + dhd_os_wlfc_unblock(dhdp); +#if defined(LINUX) + if (ctx && ctx->pkt_cnt_in_psq && pkt_send) { + wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS); + } else { + wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS); + } +#endif /* LINUX */ + } + return 0; +#else + return rc; +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** + * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by + * eg dhd_sendpkt(). + * @param[in] dhdp Pointer to public DHD structure + * @param[in] fcommit Pointer to transmit function of next layer + * @param[in] commit_ctx Opaque context used when calling next layer + * @param[in] pktbuf Packet to send + * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if + */ +int +dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, + struct dhd_bus *commit_ctx, void *pktbuf, bool need_toggle_host_if) +{ + int rc = BCME_OK; + athost_wl_status_info_t* ctx; + +#if defined(DHD_WLFC_THREAD) + if (!pktbuf) + return BCME_OK; +#endif /* defined(DHD_WLFC_THREAD) */ + + if ((dhdp == NULL) || (fcommit == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + if (pktbuf) { + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0); + } + rc = WLFC_UNSUPPORTED; + goto exit; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + +#ifdef BCMDBUS + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + if (pktbuf) { + PKTFREE(ctx->osh, pktbuf, TRUE); + rc = BCME_OK; + } + goto exit; + } +#endif + + if (dhdp->proptxstatus_module_ignore) { + if (pktbuf) { + uint32 htod = 0; + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE); + if (fcommit(commit_ctx, pktbuf)) { + /* free it if failed, otherwise do it in tx complete cb */ + PKTFREE(ctx->osh, pktbuf, TRUE); + } + rc = BCME_OK; + } + goto exit; + } + + if (pktbuf) { + int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + ASSERT(ac <= AC_COUNT); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1); + /* en-queue the packets to respective queue. */ + rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac); + if (rc) { + _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE); + } else { + ctx->stats.pktin++; + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++; + } + } + + if (!ctx->fcommit) { + ctx->fcommit = fcommit; + } else { + ASSERT(ctx->fcommit == fcommit); + } + if (!ctx->commit_ctx) { + ctx->commit_ctx = commit_ctx; + } else { + ASSERT(ctx->commit_ctx == commit_ctx); + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhdp); +#else + dhd_wlfc_transfer_packets(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + +exit: + dhd_os_wlfc_unblock(dhdp); + return rc; +} /* dhd_wlfc_commit_packets */ + +/** + * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet + */ +int +dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) +{ + athost_wl_status_info_t* wlfc; + wlfc_mac_descriptor_t *entry; + void* pout = NULL; + int rtn = BCME_OK; + if ((dhd == NULL) || (txp == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + bcm_pkt_validate_chk(txp, "_dhd_wlfc_compressed_txstatus_update"); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rtn = WLFC_UNSUPPORTED; + goto EXIT; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.signal_only_pkts_freed++; +#endif + /* is this a signal-only packet? */ + _dhd_wlfc_pullheader(wlfc, txp); + PKTFREE(wlfc->osh, txp, TRUE); + goto EXIT; + } + + entry = _dhd_wlfc_find_table_entry(wlfc, txp); + ASSERT(entry); + + if (!success || dhd->proptxstatus_txstatus_ignore) { + WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n", + __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp)))); + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE); + ASSERT(txp == pout); + } + + /* indicate failure and free the packet */ + dhd_txcomplete(dhd, txp, success); + + /* return the credit, if necessary */ + _dhd_wlfc_return_implied_credit(wlfc, txp); + + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) + entry->suppr_transit_count--; + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--; + wlfc->stats.pktout++; + PKTFREE(wlfc->osh, txp, TRUE); + } else { + /* bus confirmed pkt went to firmware side */ + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, txp); + } else { + int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp))); + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_BUSRETURNED, -1); + } + } + + ASSERT(entry->onbus_pkts_count > 0); + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +EXIT: + dhd_os_wlfc_unblock(dhd); + return rtn; +} /* dhd_wlfc_txcomplete */ + +int +dhd_wlfc_init(dhd_pub_t *dhd) +{ + /* enable all signals & indicate host proptxstatus logic is active */ + uint32 tlv, mode, fw_caps; + int ret = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (dhd->wlfc_enabled) { + DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + dhd->wlfc_enabled = TRUE; + dhd_os_wlfc_unblock(dhd); + + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | + WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + /* XXX dhd->wlfc_state = NULL; */ + /* XXX ANDREY:may erase pointer to already created wlfc_state, PR#97824 */ + + /* + try to enable/disable signaling by sending "tlv" iovar. if that fails, + fallback to no flow control? Print a message for now. + */ + + /* enable proptxtstatus signaling by default */ + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", + dhd->wlfc_enabled?"enabled":"disabled", tlv)); + } + + mode = 0; + + /* query caps */ + ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0); + + if (!ret) { + DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); + + if (WLFC_IS_OLD_DEF(fw_caps)) { +#ifdef BCMDBUS + mode = WLFC_MODE_HANGER; +#else + /* enable proptxtstatus v2 by default */ + mode = WLFC_MODE_AFQ; +#endif /* BCMDBUS */ + } else { + WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps)); +#ifdef BCMDBUS + WLFC_SET_AFQ(mode, 0); +#endif /* BCMDBUS */ + WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps)); + WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps)); + } + ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0); + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_mode = 0; + if (ret >= 0) { + if (WLFC_IS_OLD_DEF(mode)) { + WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ)); + } else { + dhd->wlfc_mode = mode; + } + } + + DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); +#ifdef LIMIT_BORROW + dhd->wlfc_borrow_allowed = TRUE; +#endif + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_init) + dhd->plat_init((void *)dhd); + + return BCME_OK; +} /* dhd_wlfc_init */ + +/** AMPDU host reorder specific function */ +int +dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) +{ + /* enable only ampdu hostreorder here */ + uint32 tlv; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__)); + + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + /* enable proptxtstatus signaling by default */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n", + __FUNCTION__)); + } else { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n", + __FUNCTION__, tlv)); + } + + dhd_os_wlfc_block(dhd); + dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER; + dhd_os_wlfc_unblock(dhd); + /* terence 20161229: enable ampdu_hostreorder if tlv enable hostreorder */ + dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE); + + return BCME_OK; +} + +int +dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + +#ifndef BCMDBUS + _dhd_wlfc_cleanup_txq(dhd, fn, arg); +#endif /* !BCMDBUS */ + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** release all packet resources */ +int +dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + _dhd_wlfc_cleanup(dhd, fn, arg); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int +dhd_wlfc_deinit(dhd_pub_t *dhd) +{ + /* cleanup all psq related resources */ + athost_wl_status_info_t* wlfc; + uint32 tlv = 0; + uint32 hostreorder = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (!dhd->wlfc_enabled) { + DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + dhd->wlfc_enabled = FALSE; + dhd_os_wlfc_unblock(dhd); + + /* query ampdu hostreorder */ + (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder", + &hostreorder, WLC_GET_VAR, FALSE, 0); + + if (hostreorder) { + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n", + __FUNCTION__, __LINE__)); + } + + /* Disable proptxtstatus signaling for deinit */ + (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + _dhd_wlfc_cleanup(dhd, NULL, NULL); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + int i; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) { + _dhd_wlfc_hanger_free_pkt(wlfc, i, + WLFC_HANGER_PKT_STATE_COMPLETE, TRUE); + } + } + + /* delete hanger */ + _dhd_wlfc_hanger_delete(dhd, h); + } + +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + if (wlfc->log_buf) { + MFREE(dhd->osh, wlfc->log_buf, WLFC_LOG_BUF_SIZE); + wlfc->log_buf_offset = 0; + wlfc->log_buf_full = FALSE; + } +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + + /* free top structure */ + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + dhd->proptxstatus_mode = hostreorder ? + WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE; + + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_deinit) + dhd->plat_deinit((void *)dhd); + return BCME_OK; +} /* dhd_wlfc_init */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware + * @param[in] dhdp Pointer to public DHD structure + * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data); + + dhd_os_wlfc_unblock(dhdp); + + return rc; +} +#ifdef LIMIT_BORROW +int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data) +{ + if (dhdp == NULL || event_data == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + dhd_os_wlfc_block(dhdp); + dhdp->wlfc_borrow_allowed = (bool)(*(uint32 *)event_data); + dhd_os_wlfc_unblock(dhdp); + + return BCME_OK; +} +#endif /* LIMIT_BORROW */ + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** debug specific function */ +int +dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + int i; + uint8* ea; + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* h; + wlfc_mac_descriptor_t* mac_table; + wlfc_mac_descriptor_t* interfaces; + char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"}; + + if (!dhdp || !strbuf) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state; + + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } + + mac_table = wlfc->destination_entries.nodes; + interfaces = wlfc->destination_entries.interfaces; + bcm_bprintf(strbuf, "---- wlfc stats ----\n"); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } else { + bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push," + "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n", + h->pushed, + h->popped, + h->failed_to_push, + h->failed_to_pop, + h->failed_slotfind, + (h->pushed - h->popped)); + } + } + + bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), " + "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n", + wlfc->stats.tlv_parse_failed, + wlfc->stats.credit_request_failed, + wlfc->stats.mac_update_failed, + wlfc->stats.psmode_update_failed, + wlfc->stats.delayq_full_error, + wlfc->stats.rollback_failed); + + bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) " + "(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d]," + "AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n", + wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0], + wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0], + wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1], + wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1], + wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2], + wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2], + wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3], + wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3], + wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4], + wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]); + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (interfaces[i].occupied) { + char* iftype_desc; + + if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT) + iftype_desc = "hostif_flow_state[i] == OFF) + ? " OFF":" ON")); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.n_pkts_tot, + ((interfaces[i].state == + WLFC_STATE_OPEN) ? "OPEN":"CLOSE"), + interfaces[i].requested_credit, + interfaces[i].transit_count, + interfaces[i].suppr_transit_count, + interfaces[i].onbus_pkts_count); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.q[0].n_pkts, + interfaces[i].psq.q[1].n_pkts, + interfaces[i].afq.q[0].n_pkts, + interfaces[i].psq.q[2].n_pkts, + interfaces[i].psq.q[3].n_pkts, + interfaces[i].afq.q[1].n_pkts, + interfaces[i].psq.q[4].n_pkts, + interfaces[i].psq.q[5].n_pkts, + interfaces[i].afq.q[2].n_pkts, + interfaces[i].psq.q[6].n_pkts, + interfaces[i].psq.q[7].n_pkts, + interfaces[i].afq.q[3].n_pkts, + interfaces[i].psq.q[8].n_pkts, + interfaces[i].psq.q[9].n_pkts, + interfaces[i].afq.q[4].n_pkts); + } + } + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (mac_table[i].occupied) { + ea = mac_table[i].ea; + bcm_bprintf(strbuf, "MAC_table[%d].ea = " + "["MACDBG"], if:%d \n", i, + MAC2STRDBG(ea), mac_table[i].interface_id); + + bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.n_pkts_tot, + ((mac_table[i].state == + WLFC_STATE_OPEN) ? " OPEN":"CLOSE"), + mac_table[i].requested_credit, + mac_table[i].transit_count, + mac_table[i].suppr_transit_count, + mac_table[i].onbus_pkts_count); +#ifdef PROP_TXSTATUS_DEBUG + bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n", + i, mac_table[i].opened_ct, mac_table[i].closed_ct); +#endif + bcm_bprintf(strbuf, "MAC_table[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.q[0].n_pkts, + mac_table[i].psq.q[1].n_pkts, + mac_table[i].afq.q[0].n_pkts, + mac_table[i].psq.q[2].n_pkts, + mac_table[i].psq.q[3].n_pkts, + mac_table[i].afq.q[1].n_pkts, + mac_table[i].psq.q[4].n_pkts, + mac_table[i].psq.q[5].n_pkts, + mac_table[i].afq.q[2].n_pkts, + mac_table[i].psq.q[6].n_pkts, + mac_table[i].psq.q[7].n_pkts, + mac_table[i].afq.q[3].n_pkts, + mac_table[i].psq.q[8].n_pkts, + mac_table[i].psq.q[9].n_pkts, + mac_table[i].afq.q[4].n_pkts); + + } + } + +#ifdef PROP_TXSTATUS_DEBUG + { + int avg; + int moving_avg = 0; + int moving_samples; + + if (wlfc->stats.latency_sample_count) { + moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32); + + for (i = 0; i < moving_samples; i++) + moving_avg += wlfc->stats.deltas[i]; + moving_avg /= moving_samples; + + avg = (100 * wlfc->stats.total_status_latency) / + wlfc->stats.latency_sample_count; + bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = " + "(%d.%d, %03d, %03d)\n", + moving_samples, avg/100, (avg - (avg/100)*100), + wlfc->stats.latency_most_recent, + moving_avg); + } + } + + bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), " + "back = (%d,%d,%d,%d,%d,%d)\n", + wlfc->stats.fifo_credits_sent[0], + wlfc->stats.fifo_credits_sent[1], + wlfc->stats.fifo_credits_sent[2], + wlfc->stats.fifo_credits_sent[3], + wlfc->stats.fifo_credits_sent[4], + wlfc->stats.fifo_credits_sent[5], + + wlfc->stats.fifo_credits_back[0], + wlfc->stats.fifo_credits_back[1], + wlfc->stats.fifo_credits_back[2], + wlfc->stats.fifo_credits_back[3], + wlfc->stats.fifo_credits_back[4], + wlfc->stats.fifo_credits_back[5]); + { + uint32 fifo_cr_sent = 0; + uint32 fifo_cr_acked = 0; + uint32 request_cr_sent = 0; + uint32 request_cr_ack = 0; + uint32 bc_mc_cr_ack = 0; + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) { + fifo_cr_sent += wlfc->stats.fifo_credits_sent[i]; + } + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) { + fifo_cr_acked += wlfc->stats.fifo_credits_back[i]; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_sent += + wlfc->destination_entries.nodes[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_sent += + wlfc->destination_entries.interfaces[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_ack += + wlfc->destination_entries.nodes[i].dstncredit_acks; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_ack += + wlfc->destination_entries.interfaces[i].dstncredit_acks; + } + } + bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d)," + "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)", + fifo_cr_sent, fifo_cr_acked, + request_cr_sent, request_cr_ack, + wlfc->destination_entries.other.dstncredit_acks, + bc_mc_cr_ack, + wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed); + } +#endif /* PROP_TXSTATUS_DEBUG */ + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out)," + "(dropped,hdr_only,wlc_tossed,wlc_dropped,wlc_exptime)" + "(freed,free_err,rollback)) = " + "((%d,%d,%d,%d,%d),(%d,%d,%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.pktin, + wlfc->stats.pkt2bus, + wlfc->stats.txstatus_in, + wlfc->stats.dhd_hdrpulls, + wlfc->stats.pktout, + + wlfc->stats.pktdropped, + wlfc->stats.wlfc_header_only_pkt, + wlfc->stats.wlc_tossed_pkts, + wlfc->stats.pkt_dropped, + wlfc->stats.pkt_exptime, + + wlfc->stats.pkt_freed, + wlfc->stats.pkt_free_err, wlfc->stats.rollback); + + bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = " + "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.d11_suppress, + wlfc->stats.wl_suppress, + wlfc->stats.bad_suppress, + + wlfc->stats.psq_d11sup_enq, + wlfc->stats.psq_wlsup_enq, + wlfc->stats.psq_hostq_enq, + wlfc->stats.mac_handle_notfound, + + wlfc->stats.psq_d11sup_retx, + wlfc->stats.psq_wlsup_retx, + wlfc->stats.psq_hostq_retx); + + bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n", + wlfc->stats.cleanup_txq_cnt, + wlfc->stats.cleanup_psq_cnt, + wlfc->stats.cleanup_fw_cnt); + + bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error); + + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i, + wlfc->pkt_cnt_in_q[i][0], + wlfc->pkt_cnt_in_q[i][1], + wlfc->pkt_cnt_in_q[i][2], + wlfc->pkt_cnt_in_q[i][3], + wlfc->pkt_cnt_in_q[i][4]); + } + bcm_bprintf(strbuf, "\n"); + + dhd_os_wlfc_unblock(dhdp); + return BCME_OK; +} /* dhd_wlfc_dump */ + +int dhd_wlfc_clear_counts(dhd_pub_t *dhd) +{ + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* hanger; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t)); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hanger = (wlfc_hanger_t*)wlfc->hanger; + + hanger->pushed = 0; + hanger->popped = 0; + hanger->failed_slotfind = 0; + hanger->failed_to_pop = 0; + hanger->failed_to_push = 0; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** returns TRUE if flow control is enabled */ +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_enabled; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->wlfc_state) { + dhd->proptxstatus_mode = val & 0xff; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called when rx frame is received from the dongle */ +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf) +{ + athost_wl_status_info_t* wlfc; + bool rc = FALSE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return FALSE; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + if (PKTLEN(wlfc->osh, pktbuf) == 0) { + wlfc->stats.wlfc_header_only_pkt++; + rc = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock) +{ + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + if (bAcquireLock) { + dhd_os_wlfc_block(dhdp); + } + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) || + dhdp->proptxstatus_module_ignore) { + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + return WLFC_UNSUPPORTED; + } + + if (state != dhdp->proptxstatus_txoff) { + dhdp->proptxstatus_txoff = state; + } + + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + + return BCME_OK; +} + +/** Called when eg an rx frame is received from the dongle */ +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio) +{ + athost_wl_status_info_t* wlfc; + int rx_path_ac = -1; + + if ((dhd == NULL) || (prio >= NUMPRIO)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_rxpkt_chk) { + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + rx_path_ac = prio2fifo[prio]; + wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME(); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_module_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) +{ + uint32 tlv = 0; + bool bChanged = FALSE; + + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if ((bool)val != dhd->proptxstatus_module_ignore) { + dhd->proptxstatus_module_ignore = (val != 0); + /* force txstatus_ignore sync with proptxstatus_module_ignore */ + dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore; + if (FALSE == dhd->proptxstatus_module_ignore) { + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE; + } + /* always enable host reorder */ + tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + bChanged = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + if (bChanged) { + /* select enable proptxtstatus signaling */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } else { + DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_credit_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_credit_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_txstatus_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_txstatus_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_rxpkt_chk; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_rxpkt_chk = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int dhd_txpkt_log_and_dump(dhd_pub_t *dhdp, void* pkt, uint16 *pktfate_status) +{ + uint32 pktid; + uint32 pktlen = PKTLEN(dhdp->osh, pkt); + uint8 *pktdata = PKTDATA(dhdp->osh, pkt); +#ifdef BDC + struct bdc_header *bdch; + uint32 bdc_len; +#endif /* BDC */ + uint8 ifidx = DHD_PKTTAG_IF(PKTTAG(pkt)); + uint8 hcnt = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(pkt))); + uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt)); + + if (!pkt) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + pktid = (ifidx << DHD_PKTID_IF_SHIFT) | (fifo_id << DHD_PKTID_FIFO_SHIFT) | hcnt; +#ifdef BDC + bdch = (struct bdc_header *)pktdata; + bdc_len = BDC_HEADER_LEN + (bdch->dataOffset << DHD_WORD_TO_LEN_SHIFT); + pktlen -= bdc_len; + pktdata = pktdata + bdc_len; +#endif /* BDC */ + dhd_handle_pktdata(dhdp, ifidx, pkt, pktdata, pktid, pktlen, + pktfate_status, NULL, TRUE, FALSE, TRUE); + return BCME_OK; +} + +#ifdef PROPTX_MAXCOUNT +int dhd_wlfc_update_maxcount(dhd_pub_t *dhdp, uint8 ifid, int maxcount) +{ + athost_wl_status_info_t* ctx; + int rc = 0; + + if (dhdp == NULL) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rc = WLFC_UNSUPPORTED; + goto exit; + } + + if (ifid >= WLFC_MAX_IFNUM) { + DHD_ERROR(("%s: bad ifid\n", __FUNCTION__)); + rc = BCME_BADARG; + goto exit; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + ctx->destination_entries.interfaces[ifid].transit_maxcount = maxcount; +exit: + dhd_os_wlfc_unblock(dhdp); + return rc; +} +#endif /* PROPTX_MAXCOUNT */ +#endif /* PROP_TXSTATUS */ diff --git a/bcmdhd.101.10.361.x/dhd_wlfc.h b/bcmdhd.101.10.361.x/dhd_wlfc.h new file mode 100755 index 0000000..1089a2f --- /dev/null +++ b/bcmdhd.101.10.361.x/dhd_wlfc.h @@ -0,0 +1,596 @@ +/* + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + * + */ +#ifndef __wlfc_host_driver_definitions_h__ +#define __wlfc_host_driver_definitions_h__ + +#ifdef QMONITOR +#include +#endif + +/* #define OOO_DEBUG */ + +#define KERNEL_THREAD_RETURN_TYPE int + +typedef int (*f_commitpkt_t)(struct dhd_bus *ctx, void* p); +typedef bool (*f_processpkt_t)(void* p, void* arg); + +#define WLFC_UNSUPPORTED -9999 + +#define WLFC_NO_TRAFFIC -1 +#define WLFC_MULTI_TRAFFIC 0 + +#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ + +/** 16 bits will provide an absolute max of 65536 slots */ +#define WLFC_HANGER_MAXITEMS 3072 + +#define WLFC_HANGER_ITEM_STATE_FREE 1 +#define WLFC_HANGER_ITEM_STATE_INUSE 2 +#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3 +#define WLFC_HANGER_ITEM_STATE_FLUSHED 4 + +#define WLFC_HANGER_PKT_STATE_TXSTATUS 1 +#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2 +#define WLFC_HANGER_PKT_STATE_COMPLETE \ + (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED) + +typedef enum { + Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */ + Q_TYPE_AFQ /**< At Firmware Queue */ +} q_type_t; + +typedef enum ewlfc_packet_state { + eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */ + eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */ + eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */ + eWLFC_PKTTYPE_MAX +} ewlfc_packet_state_t; + +typedef enum ewlfc_mac_entry_action { + eWLFC_MAC_ENTRY_ACTION_ADD, + eWLFC_MAC_ENTRY_ACTION_DEL, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + eWLFC_MAC_ENTRY_ACTION_MAX +} ewlfc_mac_entry_action_t; + +typedef struct wlfc_hanger_item { + uint8 state; + uint8 gen; + uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */ + uint8 pkt_txstatus; + uint32 identifier; + void* pkt; +#ifdef PROP_TXSTATUS_DEBUG + uint32 push_time; +#endif + struct wlfc_hanger_item *next; +} wlfc_hanger_item_t; + +/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */ +typedef struct wlfc_hanger { + int max_items; + uint32 pushed; + uint32 popped; + uint32 failed_to_push; + uint32 failed_to_pop; + uint32 failed_slotfind; + uint32 slot_pos; + /** XXX: items[1] should be the last element here. Do not add new elements below it. */ + wlfc_hanger_item_t items[1]; +} wlfc_hanger_t; + +#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \ + sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t))) + +#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */ +#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */ + +#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */ +#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1) + +#define WLFC_PSQ_LEN (4096 * 8) + +#ifdef BCMDBUS +#define WLFC_FLOWCONTROL_HIWATER 512 +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#else +#define WLFC_FLOWCONTROL_HIWATER ((4096 * 8) - 256) +#define WLFC_FLOWCONTROL_LOWATER 256 +#endif + +#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256)) +#undef WLFC_FLOWCONTROL_HIWATER +#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256) +#undef WLFC_FLOWCONTROL_LOWATER +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#endif + +#define WLFC_LOG_BUF_SIZE (1024*1024) + +/** Properties related to a remote MAC entity */ +typedef struct wlfc_mac_descriptor { + uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */ + uint8 interface_id; + uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */ + uint8 state; /**< eg WLFC_STATE_OPEN */ + uint8 ac_bitmap; /**< automatic power save delivery (APSD) */ + uint8 requested_credit; + uint8 requested_packet; /**< unit: [number of packets] */ + uint8 ea[ETHER_ADDR_LEN]; + + /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */ + uint8 seq[AC_COUNT + 1]; + uint8 generation; /**< toggles between 0 and 1 */ + struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */ + /** packets at firmware queue */ + struct pktq afq; +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + uint8 last_send_gen[AC_COUNT+1]; + uint8 last_send_seq[AC_COUNT+1]; + uint8 last_complete_seq[AC_COUNT+1]; +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + /** The AC pending bitmap that was reported to the fw at last change */ + uint8 traffic_lastreported_bmp; + /** The new AC pending bitmap */ + uint8 traffic_pending_bmp; + /** 1= send on next opportunity */ + uint8 send_tim_signal; + uint8 mac_handle; /**< mac handles are assigned by the dongle */ + /** Number of packets at dongle for this entry. */ + int transit_count; + /** Number of suppression to wait before evict from delayQ */ + int suppr_transit_count; + /** pkt sent to bus but no bus TX complete yet */ + int onbus_pkts_count; + /** flag. TRUE when remote MAC is in suppressed state */ + uint8 suppressed; + +#ifdef QMONITOR + dhd_qmon_t qmon; +#endif /* QMONITOR */ + +#ifdef PROP_TXSTATUS_DEBUG + uint32 dstncredit_sent_packets; + uint32 dstncredit_acks; + uint32 opened_ct; + uint32 closed_ct; +#endif +#ifdef PROPTX_MAXCOUNT + /** Max Number of packets at dongle for this entry. */ + int transit_maxcount; +#endif /* PROPTX_MAXCOUNT */ + struct wlfc_mac_descriptor* prev; + struct wlfc_mac_descriptor* next; +#ifdef BULK_DEQUEUE + uint16 release_count[AC_COUNT + 1]; +#endif +} wlfc_mac_descriptor_t; + +/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */ +typedef struct dhd_wlfc_commit_info { + uint8 needs_hdr; + uint8 ac_fifo_credit_spent; + ewlfc_packet_state_t pkt_type; + wlfc_mac_descriptor_t* mac_entry; + void* p; +} dhd_wlfc_commit_info_t; + +#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\ + entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0) + +#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++ +#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)] + +typedef struct athost_wl_stat_counters { + uint32 pktin; + uint32 pktout; + uint32 pkt2bus; + uint32 pktdropped; + uint32 tlv_parse_failed; + uint32 rollback; + uint32 rollback_failed; + uint32 delayq_full_error; + uint32 credit_request_failed; + uint32 packet_request_failed; + uint32 mac_update_failed; + uint32 psmode_update_failed; + uint32 interface_update_failed; + uint32 wlfc_header_only_pkt; + uint32 txstatus_in; + uint32 d11_suppress; + uint32 wl_suppress; + uint32 bad_suppress; + uint32 pkt_dropped; + uint32 pkt_exptime; + uint32 pkt_freed; + uint32 pkt_free_err; + uint32 psq_wlsup_retx; + uint32 psq_wlsup_enq; + uint32 psq_d11sup_retx; + uint32 psq_d11sup_enq; + uint32 psq_hostq_retx; + uint32 psq_hostq_enq; + uint32 mac_handle_notfound; + uint32 wlc_tossed_pkts; + uint32 dhd_hdrpulls; + uint32 generic_error; + /* an extra one for bc/mc traffic */ + uint32 send_pkts[AC_COUNT + 1]; + uint32 drop_pkts[WLFC_PSQ_PREC_COUNT]; + uint32 ooo_pkts[AC_COUNT + 1]; +#ifdef PROP_TXSTATUS_DEBUG + /** all pkt2bus -> txstatus latency accumulated */ + uint32 latency_sample_count; + uint32 total_status_latency; + uint32 latency_most_recent; + int idx_delta; + uint32 deltas[10]; + uint32 fifo_credits_sent[6]; + uint32 fifo_credits_back[6]; + uint32 dropped_qfull[6]; + uint32 signal_only_pkts_sent; + uint32 signal_only_pkts_freed; +#endif + uint32 cleanup_txq_cnt; + uint32 cleanup_psq_cnt; + uint32 cleanup_fw_cnt; +} athost_wl_stat_counters_t; + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_back[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \ + (ctx)->stats.dropped_qfull[(ac)]++;} while (0) +#else +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0) +#endif +#define WLFC_PACKET_BOUND 10 +#define WLFC_FCMODE_NONE 0 +#define WLFC_FCMODE_IMPLIED_CREDIT 1 +#define WLFC_FCMODE_EXPLICIT_CREDIT 2 +#define WLFC_ONLY_AMPDU_HOSTREORDER 3 + +/** Reserved credits ratio when borrowed by hihger priority */ +#define WLFC_BORROW_LIMIT_RATIO 4 + +/** How long to defer borrowing in milliseconds */ +#define WLFC_BORROW_DEFER_PERIOD_MS 100 + +/** How long to defer flow control in milliseconds */ +#define WLFC_FC_DEFER_PERIOD_MS 200 + +/** How long to detect occurance per AC in miliseconds */ +#define WLFC_RX_DETECTION_THRESHOLD_MS 100 + +/** Mask to represent available ACs (note: BC/MC is ignored) */ +#define WLFC_AC_MASK 0xF + +/** flow control specific information, only 1 instance during driver lifetime */ +typedef struct athost_wl_status_info { + uint8 last_seqid_to_wlc; + + /** OSL handle */ + osl_t *osh; + /** dhd public struct pointer */ + void *dhdp; + + f_commitpkt_t fcommit; + void* commit_ctx; + + /** statistics */ + athost_wl_stat_counters_t stats; + + /** incremented on eg receiving a credit map event from the dongle */ + int Init_FIFO_credit[AC_COUNT + 2]; + /** the additional ones are for bc/mc and ATIM FIFO */ + int FIFO_credit[AC_COUNT + 2]; + /** Credit borrow counts for each FIFO from each of the other FIFOs */ + int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; + + /** packet hanger and MAC->handle lookup table */ + void *hanger; + + struct { + /** table for individual nodes */ + wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE]; + /** table for interfaces */ + wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM]; + /* OS may send packets to unknown (unassociated) destinations */ + /** A place holder for bc/mc and packets to unknown destinations */ + wlfc_mac_descriptor_t other; + } destination_entries; + + wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */ + int active_entry_count; + + wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE]; + int requested_entry_count; + + /* pkt counts for each interface and ac */ + int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_per_ac[AC_COUNT+1]; + int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_in_psq; + uint8 allow_fc; /**< Boolean */ + uint32 fc_defer_timestamp; + uint32 rx_timestamp[AC_COUNT+1]; + + /** ON/OFF state for flow control to the host network interface */ + uint8 hostif_flow_state[WLFC_MAX_IFNUM]; + uint8 host_ifidx; + + /** to flow control an OS interface */ + uint8 toggle_host_if; + + /** To borrow credits */ + uint8 allow_credit_borrow; + + /** ac number for the first single ac traffic */ + uint8 single_ac; + + /** Timestamp for the first single ac traffic */ + uint32 single_ac_timestamp; + + bool bcmc_credit_supported; + +#if defined(BCMINTERNAL) && defined(OOO_DEBUG) + uint8* log_buf; + uint32 log_buf_offset; + bool log_buf_full; +#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */ + +#ifdef BULK_DEQUEUE + uint8 max_release_count; +#endif /* total_credit */ +} athost_wl_status_info_t; + +/** Please be mindful that total pkttag space is 32 octets only */ +typedef struct dhd_pkttag { + +#ifdef BCM_OBJECT_TRACE + /* if use this field, keep it at the first 4 bytes */ + uint32 sn; +#endif /* BCM_OBJECT_TRACE */ + + /** + b[15] - 1 = wlfc packet + b[14:13] - encryption exemption + b[12 ] - 1 = event channel + b[11 ] - 1 = this packet was sent in response to one time packet request, + do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET]. + b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on] + b[9 ] - 1 = packet is host->firmware (transmit direction) + - 0 = packet received from firmware (firmware->host) + b[8 ] - 1 = packet was sent due to credit_request (pspoll), + packet does not count against FIFO credit. + - 0 = normal transaction, packet counts against FIFO credit + b[7 ] - 1 = AP, 0 = STA + b[6:4] - AC FIFO number + b[3:0] - interface index + */ + uint16 if_flags; + + /** + * destination MAC address for this packet so that not every module needs to open the packet + * to find this + */ + uint8 dstn_ether[ETHER_ADDR_LEN]; + + /** This 32-bit goes from host to device for every packet. */ + uint32 htod_tag; + + /** This 16-bit is original d11seq number for every suppressed packet. */ + uint16 htod_seq; + + /** This address is mac entry for every packet. */ + void *entry; + + /** bus specific stuff */ + union { + struct { + void *stuff; + uint32 thing1; + uint32 thing2; + } sd; + + /* XXX: using the USB typedef here will complicate life for anybody using dhd.h */ + struct { + void *bus; + void *urb; + } usb; + } bus_specific; +} dhd_pkttag_t; + +#define DHD_PKTTAG_WLFCPKT_MASK 0x1 +#define DHD_PKTTAG_WLFCPKT_SHIFT 15 +#define DHD_PKTTAG_WLFCPKT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \ + (((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT) +#define DHD_PKTTAG_WLFCPKT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK) + +#define DHD_PKTTAG_EXEMPT_MASK 0x3 +#define DHD_PKTTAG_EXEMPT_SHIFT 13 +#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \ + (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT) +#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK) + +#define DHD_PKTTAG_EVENT_MASK 0x1 +#define DHD_PKTTAG_EVENT_SHIFT 12 +#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \ + (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT) +#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK) + +#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1 +#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11 +#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \ + (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) +#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK) + +#define DHD_PKTTAG_SIGNALONLY_MASK 0x1 +#define DHD_PKTTAG_SIGNALONLY_SHIFT 10 +#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \ + (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT) +#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK) + +#define DHD_PKTTAG_PKTDIR_MASK 0x1 +#define DHD_PKTTAG_PKTDIR_SHIFT 9 +#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \ + (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT) +#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK) + +#define DHD_PKTTAG_CREDITCHECK_MASK 0x1 +#define DHD_PKTTAG_CREDITCHECK_SHIFT 8 +#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \ + (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT) +#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK) + +#define DHD_PKTTAG_IFTYPE_MASK 0x1 +#define DHD_PKTTAG_IFTYPE_SHIFT 7 +#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \ + (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT) +#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK) + +#define DHD_PKTTAG_FIFO_MASK 0x7 +#define DHD_PKTTAG_FIFO_SHIFT 4 +#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \ + (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT) +#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK) + +#define DHD_PKTTAG_IF_MASK 0xf +#define DHD_PKTTAG_IF_SHIFT 0 +#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \ + (((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT) +#define DHD_PKTTAG_IF(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK) + +#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \ + (dstn_MAC_ea), ETHER_ADDR_LEN) +#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether + +#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue) +#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag) + +#define DHD_PKTTAG_SET_H2DSEQ(tag, seq) ((dhd_pkttag_t*)(tag))->htod_seq = (seq) +#define DHD_PKTTAG_H2DSEQ(tag) (((dhd_pkttag_t*)(tag))->htod_seq) + +#define DHD_PKTTAG_SET_ENTRY(tag, entry) ((dhd_pkttag_t*)(tag))->entry = (entry) +#define DHD_PKTTAG_ENTRY(tag) (((dhd_pkttag_t*)(tag))->entry) + +#define PSQ_SUP_IDX(x) (x * 2 + 1) +#define PSQ_DLY_IDX(x) (x * 2) + +#ifdef PROP_TXSTATUS_DEBUG +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0) +#else +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0) +#endif + +#ifdef BCM_OBJECT_TRACE +#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val) +#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn) +#endif /* BCM_OBJECT_TRACE */ + +#define DHD_PKTID_IF_SHIFT (16u) +#define DHD_PKTID_FIFO_SHIFT (8u) + +/* public functions */ +int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, + uchar *reorder_info_buf, uint *reorder_info_len); +KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data); +int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, + struct dhd_bus *commit_ctx, void *pktbuf, bool need_toggle_host_if); +int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); +int dhd_wlfc_init(dhd_pub_t *dhd); +#ifdef SUPPORT_P2P_GO_PS +int dhd_wlfc_suspend(dhd_pub_t *dhd); +int dhd_wlfc_resume(dhd_pub_t *dhd); +#endif /* SUPPORT_P2P_GO_PS */ +int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd); +int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg); +int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg); +int dhd_wlfc_deinit(dhd_pub_t *dhd); +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea); +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data); +#ifdef LIMIT_BORROW +int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data); +#endif /* LIMIT_BORROW */ +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp); +int dhd_wlfc_enable(dhd_pub_t *dhdp); +int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +int dhd_wlfc_clear_counts(dhd_pub_t *dhd); +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val); +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val); +bool dhd_wlfc_is_supported(dhd_pub_t *dhd); +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf); +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock); +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio); + +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val); + +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val); +int dhd_txpkt_log_and_dump(dhd_pub_t *dhdp, void* pkt, uint16 *pktfate_status); +#ifdef PROPTX_MAXCOUNT +int dhd_wlfc_update_maxcount(dhd_pub_t *dhdp, uint8 ifid, int maxcount); +#endif /* PROPTX_MAXCOUNT */ + +#endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/bcmdhd.101.10.361.x/frag.c b/bcmdhd.101.10.361.x/frag.c new file mode 100755 index 0000000..e49c335 --- /dev/null +++ b/bcmdhd.101.10.361.x/frag.c @@ -0,0 +1,108 @@ +/* + * IE/TLV fragmentation/defragmentation support for + * Broadcom 802.11bang Networking Device Driver + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include <802.11.h> + +/* defrag a fragmented dot11 ie/tlv. if space does not permit, return the needed + * ie length to contain all the fragments with status BCME_BUFTOOSHORT. + * out_len is in/out parameter, max length on input, used/required length on output + */ +int +bcm_tlv_dot11_defrag(const void *buf, uint buf_len, uint8 id, bool id_ext, + uint8 *out, uint *out_len) +{ + int err = BCME_OK; + const bcm_tlv_t *ie; + uint tot_len = 0; + uint out_left; + + /* find the ie; includes validation */ + ie = bcm_parse_tlvs_dot11(buf, buf_len, id, id_ext); + if (!ie) { + err = BCME_IE_NOTFOUND; + goto done; + } + + out_left = (out && out_len) ? *out_len : 0; + + /* first fragment */ + tot_len = id_ext ? ie->len - 1 : ie->len; + + /* copy out if output space permits */ + if (out_left < tot_len) { + err = BCME_BUFTOOSHORT; + out_left = 0; /* prevent further copy */ + } else { + memcpy(out, &ie->data[id_ext ? 1 : 0], tot_len); + out += tot_len; + out_left -= tot_len; + } + + /* if not fragmened or not fragmentable per 802.11 table 9-77 11md0.1 bail + * we can introduce the latter check later + */ + if (ie->len != BCM_TLV_MAX_DATA_SIZE) { + goto done; + } + + /* adjust buf_len to length after ie including it */ + buf_len -= (uint)(((const uint8 *)ie - (const uint8 *)buf)); + + /* update length from fragments, okay if no next ie */ + while ((ie = bcm_next_tlv(ie, &buf_len)) && + (ie->id == DOT11_MNG_FRAGMENT_ID)) { + /* note: buf_len starts at next ie and last frag may be partial */ + if (out_left < ie->len) { + err = BCME_BUFTOOSHORT; + out_left = 0; + } else { + memcpy(out, &ie->data[0], ie->len); + out += ie->len; + out_left -= ie->len; + } + + tot_len += ie->len + BCM_TLV_HDR_SIZE; + + /* all but last should be of max size */ + if (ie->len < BCM_TLV_MAX_DATA_SIZE) { + break; + } + } + +done: + if (out_len) { + *out_len = tot_len; + } + + return err; +} + +int +bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len, + uint8 id, bool id_ext, uint *ie_len) +{ + return bcm_tlv_dot11_defrag(buf, buf_len, id, id_ext, NULL, ie_len); +} diff --git a/bcmdhd.101.10.361.x/frag.h b/bcmdhd.101.10.361.x/frag.h new file mode 100755 index 0000000..e14edd9 --- /dev/null +++ b/bcmdhd.101.10.361.x/frag.h @@ -0,0 +1,32 @@ +/* + * IE/TLV (de)fragmentation declarations/definitions for + * Broadcom 802.11abgn Networking Device Driver + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + */ + +#ifndef __FRAG_H__ +#define __FRAG_H__ + +int bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len, + uint8 id, bool id_ext, uint *ie_len); + +#endif /* __FRAG_H__ */ diff --git a/bcmdhd.101.10.361.x/ftdi_sio_external.h b/bcmdhd.101.10.361.x/ftdi_sio_external.h new file mode 100755 index 0000000..8c021a1 --- /dev/null +++ b/bcmdhd.101.10.361.x/ftdi_sio_external.h @@ -0,0 +1,39 @@ +/* + * External driver API to ftdi_sio_brcm driver. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id: $ + */ + +typedef struct usb_serial_port * gpio_handle; + +#define BITMODE_RESET 0x00 +#define BITMODE_BITBANG 0x01 + +int ftdi_usb_reset(int handle); +int ftdi_set_bitmode(int handle, unsigned char bitmask, unsigned char mode); +int gpio_write_port(int handle, unsigned char pins); +int gpio_write_port_non_block(int handle, unsigned char pins); +int gpio_read_port(int handle, unsigned char *pins); +int handle_add(gpio_handle pointer); +int handle_remove(gpio_handle pointer); +int get_handle(const char *dev_filename); +gpio_handle get_pointer_by_handle(int handle); diff --git a/bcmdhd.101.10.361.x/hnd_pktpool.c b/bcmdhd.101.10.361.x/hnd_pktpool.c new file mode 100755 index 0000000..eee518a --- /dev/null +++ b/bcmdhd.101.10.361.x/hnd_pktpool.c @@ -0,0 +1,2130 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include +#include +#ifdef BCMRESVFRAGPOOL +#include +#endif /* BCMRESVFRAGPOOL */ +#ifdef BCMFRWDPOOLREORG +#include +#endif /* BCMFRWDPOOLREORG */ + +#if defined(DONGLEBUILD) && defined(SRMEM) +#include +#endif /* DONGLEBUILD && SRMEM */ +#if defined(DONGLEBUILD) +#include +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + +/* Registry size is one larger than max pools, as slot #0 is reserved */ +#define PKTPOOLREG_RSVD_ID (0U) +#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead)) +#define PKTPOOLREG_FREE_PTR (POOLPTR(NULL)) + +#define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp))) +#define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp))) + +/* Tag a registry entry as free for use */ +#define PKTPOOL_REGISTRY_CLR(id) \ + PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR) +#define PKTPOOL_REGISTRY_ISCLR(id) \ + (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR)) + +/* Tag registry entry 0 as reserved */ +#define PKTPOOL_REGISTRY_RSV() \ + PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR) +#define PKTPOOL_REGISTRY_ISRSVD() \ + (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)) + +/* Walk all un-reserved entries in registry */ +#define PKTPOOL_REGISTRY_FOREACH(id) \ + for ((id) = 1U; (id) <= pktpools_max; (id)++) + +enum pktpool_empty_cb_state { + EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */ + EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */ + EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */ +}; + +uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */ +pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ + +/* number of pktids that are reserved for pktpool usage at the moment + * initializing this with max pktids reserved for pktpool + * pktpool_init, pktpool_fill and pktpool_refill decrements this + * pktpool_reclaim, pktpool_empty and heap_pkt_release increments this + */ +#ifdef DONGLEBUILD +uint32 total_pool_pktid_count = PKTID_POOL; +#else +uint32 total_pool_pktid_count = 0U; +#endif /* DONGLEBUILD */ + +#ifdef POOL_HEAP_RECONFIG +typedef struct pktpool_heap_cb_reg { + pktpool_heap_cb_t fn; + void *ctxt; + uint32 flag; +} pktpool_heap_cb_reg_t; +#define PKTPOOL_MAX_HEAP_CB 2 +pktpool_heap_cb_reg_t pktpool_heap_cb_reg[PKTPOOL_MAX_HEAP_CB]; +uint32 pktpool_heap_rel_active = 0U; + +static void hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag); +static void hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag); +static int hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize); +static void hnd_pktpool_lbuf_free_cb(uint8 poolid); +static pktpool_heap_cb_reg_t *BCMRAMFN(hnd_pool_get_cb_registry)(void); +#endif /* POOL_HEAP_RECONFIG */ + +/* Register/Deregister a pktpool with registry during pktpool_init/deinit */ +static int pktpool_register(pktpool_t * poolptr); +static int pktpool_deregister(pktpool_t * poolptr); + +/** add declaration */ +static void pktpool_avail_notify(pktpool_t *pktp); + +/** accessor functions required when ROMming this file, forced into RAM */ + +pktpool_t * +BCMPOSTTRAPRAMFN(get_pktpools_registry)(int id) +{ + return pktpools_registry[id]; +} + +static void +BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp) +{ + pktpools_registry[id] = pp; +} + +static bool +BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp) +{ + return pktpools_registry[id] == pp; +} + +/** Constructs a pool registry to serve a maximum of total_pools */ +int +BCMATTACHFN(pktpool_attach)(osl_t *osh, uint32 total_pools) +{ + uint32 poolid; + BCM_REFERENCE(osh); + + if (pktpools_max != 0U) { + return BCME_ERROR; + } + + ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID); + + /* Initialize registry: reserve slot#0 and tag others as free */ + PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */ + PKTPOOL_REGISTRY_CLR(poolid); + } + + pktpools_max = total_pools; + + return (int)pktpools_max; +} + +/** Destructs the pool registry. Ascertain all pools were first de-inited */ +int +BCMATTACHFN(pktpool_dettach)(osl_t *osh) +{ + uint32 poolid; + BCM_REFERENCE(osh); + + if (pktpools_max == 0U) { + return BCME_OK; + } + + /* Ascertain that no pools are still registered */ + ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */ + ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid)); + } + + pktpools_max = 0U; /* restore boot state */ + + return BCME_OK; +} + +/** Registers a pool in a free slot; returns the registry slot index */ +static int +BCMATTACHFN(pktpool_register)(pktpool_t * poolptr) +{ + uint32 poolid; + + if (pktpools_max == 0U) { + return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */ + } + + ASSERT(pktpools_max != 0U); + + /* find an empty slot in pktpools_registry */ + PKTPOOL_REGISTRY_FOREACH(poolid) { + if (PKTPOOL_REGISTRY_ISCLR(poolid)) { + PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */ + return (int)poolid; /* return pool ID */ + } + } /* FOREACH */ + + return PKTPOOL_INVALID_ID; /* error: registry is full */ +} + +/** Deregisters a pktpool, given the pool pointer; tag slot as free */ +static int +BCMATTACHFN(pktpool_deregister)(pktpool_t * poolptr) +{ + uint32 poolid; + + ASSERT(POOLPTR(poolptr) != POOLPTR(NULL)); + + poolid = POOLID(poolptr); + ASSERT(poolid <= pktpools_max); + + /* Asertain that a previously registered poolptr is being de-registered */ + if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) { + PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */ + } else { + ASSERT(0); + return BCME_ERROR; /* mismatch in registry */ + } + + return BCME_OK; +} + +/** + * pktpool_init: + * User provides a pktpool_t structure and specifies the number of packets to + * be pre-filled into the pool (n_pkts). + * pktpool_init first attempts to register the pool and fetch a unique poolid. + * If registration fails, it is considered an BCME_ERR, caused by either the + * registry was not pre-created (pktpool_attach) or the registry is full. + * If registration succeeds, then the requested number of packets will be filled + * into the pool as part of initialization. In the event that there is no + * available memory to service the request, then BCME_NOMEM will be returned + * along with the count of how many packets were successfully allocated. + * In dongle builds, prior to memory reclaimation, one should limit the number + * of packets to be allocated during pktpool_init and fill the pool up after + * reclaim stage. + * + * @param n_pkts Number of packets to be pre-filled into the pool + * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ. + * @param type e.g. 'lbuf_frag' + */ +int +BCMATTACHFN(pktpool_init)(osl_t *osh, + pktpool_t *pktp, + int *n_pkts, + int max_pkt_bytes, + bool istx, + uint8 type, + bool is_heap_pool, + uint32 heap_pool_flag, + uint16 min_backup_buf) +{ + int i, err = BCME_OK; + int pktplen; + uint8 pktp_id; + + ASSERT(pktp != NULL); + ASSERT(osh != NULL); + ASSERT(n_pkts != NULL); + + pktplen = *n_pkts; + + bzero(pktp, sizeof(pktpool_t)); + + /* assign a unique pktpool id */ + if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) { + return BCME_ERROR; + } + POOLSETID(pktp, pktp_id); + + pktp->inited = TRUE; + pktp->istx = istx ? TRUE : FALSE; + pktp->max_pkt_bytes = (uint16)max_pkt_bytes; + pktp->type = type; + +#ifdef POOL_HEAP_RECONFIG + pktp->poolheap_flag = heap_pool_flag; + pktp->poolheap_count = 0; + pktp->min_backup_buf = min_backup_buf; + if (is_heap_pool) { + if (rte_freelist_mgr_register(&pktp->mem_handle, + hnd_pktpool_heap_get_cb, + lb_get_pktalloclen(type, max_pkt_bytes), + pktp) != BCME_OK) { + return BCME_ERROR; + } + } + pktp->is_heap_pool = is_heap_pool; +#endif + if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + pktp->maxlen = PKTPOOL_LEN_MAX; + pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen); + + for (i = 0; i < pktplen; i++) { + void *p; +#ifdef _RTE_ + /* For rte builds, use PKTALLOC rather than PKTGET + * Avoid same pkts being dequed and enqued to pool + * when allocation fails. + */ + p = PKTALLOC(osh, max_pkt_bytes, type); +#else + p = PKTGET(osh, max_pkt_bytes, TRUE); +#endif + + if (p == NULL) { + /* Not able to allocate all requested pkts + * so just return what was actually allocated + * We can add to the pool later + */ + if (pktp->freelist == NULL) /* pktpool free list is empty */ + err = BCME_NOMEM; + + goto exit; + } + + PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */ + + PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */ + pktp->freelist = p; + + pktp->avail++; + + ASSERT(total_pool_pktid_count > 0); + total_pool_pktid_count--; + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif + } + +exit: + pktp->n_pkts = pktp->avail; + + *n_pkts = pktp->n_pkts; /* number of packets managed by pool */ + return err; +} /* pktpool_init */ + +/** + * pktpool_deinit: + * Prior to freeing a pktpool, all packets must be first freed into the pktpool. + * Upon pktpool_deinit, all packets in the free pool will be freed to the heap. + * An assert is in place to ensure that there are no packets still lingering + * around. Packets freed to a pool after the deinit will cause a memory + * corruption as the pktpool_t structure no longer exists. + */ +int +BCMATTACHFN(pktpool_deinit)(osl_t *osh, pktpool_t *pktp) +{ + uint16 freed = 0; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + +#ifdef BCMDBG_POOL + { + int i; + for (i = 0; i <= pktp->n_pkts; i++) { + pktp->dbg_q[i].p = NULL; + } + } +#endif + + while (pktp->freelist != NULL) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + total_pool_pktid_count++; + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + ASSERT(freed <= pktp->n_pkts); + } + + pktp->avail -= freed; + ASSERT(pktp->avail == 0); + + pktp->n_pkts -= freed; + + pktpool_deregister(pktp); /* release previously acquired unique pool id */ + POOLSETID(pktp, PKTPOOL_INVALID_ID); + + if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->inited = FALSE; + + /* Are there still pending pkts? */ + ASSERT(pktp->n_pkts == 0); + + return 0; +} + +int +pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) +{ + void *p; + int err = 0; + int n_pkts, psize, maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + +#ifdef BCMRXDATAPOOL + ASSERT((pktp->max_pkt_bytes != 0) || (pktp->type == lbuf_rxfrag)); +#else + ASSERT(pktp->max_pkt_bytes != 0); +#endif /* BCMRXDATAPOOL */ + + maxlen = pktp->maxlen; + psize = minimal ? (maxlen >> 2) : maxlen; + n_pkts = (int)pktp->n_pkts; +#ifdef POOL_HEAP_RECONFIG + /* + * Consider the packets released to freelist mgr also + * as part of pool size + */ + n_pkts += pktp->is_heap_pool ? + pktp->poolheap_count : 0; +#endif + for (; n_pkts < psize; n_pkts++) { + +#ifdef _RTE_ + /* For rte builds, use PKTALLOC rather than PKTGET + * Avoid same pkts being dequed and enqued to pool when allocation fails. + * All pkts in pool have same length. + */ + p = PKTALLOC(osh, pktp->max_pkt_bytes, pktp->type); +#else + p = PKTGET(osh, pktp->n_pkts, TRUE); +#endif + + if (p == NULL) { + err = BCME_NOMEM; + break; + } + + if (pktpool_add(pktp, p) != BCME_OK) { + PKTFREE(osh, p, FALSE); + err = BCME_ERROR; + break; + } + ASSERT(total_pool_pktid_count > 0); + total_pool_pktid_count--; + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (pktp->cbcnt) { + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } + + return err; +} + +#ifdef BCMPOOLRECLAIM +/* New API to decrease the pkts from pool, but not deinit +*/ +uint16 +pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt, uint8 action) +{ + uint16 freed = 0; + + pktpool_cb_extn_t cb = NULL; + void *arg = NULL; + void *rem_list_head = NULL; + void *rem_list_tail = NULL; + bool dont_free = FALSE; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) { + return freed; + } + + if (pktp->avail < free_cnt) { + free_cnt = pktp->avail; + } + + if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) { + /* If pool is shared rx frag pool, use call back fn to reclaim host address + * and Rx cpl ID associated with the pkt. + */ + ASSERT(pktp->cbext.cb != NULL); + + cb = pktp->cbext.cb; + arg = pktp->cbext.arg; + + } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) { + /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID + * associated with the pkt. + */ + cb = pktp->rxcplidfn.cb; + arg = pktp->rxcplidfn.arg; + } + + while ((pktp->freelist != NULL) && (free_cnt)) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + dont_free = FALSE; + + if (action == FREE_ALL_FRAG_PKTS) { + /* Free lbufs which are marked as frag_free_mem */ + if (!PKTISFRMFRAG(p)) { + dont_free = TRUE; + } + } + + if (dont_free) { + if (rem_list_head == NULL) { + rem_list_head = p; + } else { + PKTSETFREELIST(rem_list_tail, p); + } + rem_list_tail = p; + continue; + } + if (cb != NULL) { + if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) { + PKTSETFREELIST(p, pktp->freelist); + pktp->freelist = p; + break; + } + } + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + pktp->avail--; + pktp->n_pkts--; + + total_pool_pktid_count++; + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + free_cnt--; + } + + if (rem_list_head) { + PKTSETFREELIST(rem_list_tail, pktp->freelist); + pktp->freelist = rem_list_head; + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) { + return freed; + } + + return freed; +} +#endif /* #ifdef BCMPOOLRECLAIM */ + +/* New API to empty the pkts from pool, but not deinit +* NOTE: caller is responsible to ensure, +* all pkts are available in pool for free; else LEAK ! +*/ +int +pktpool_empty(osl_t *osh, pktpool_t *pktp) +{ + uint16 freed = 0; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + +#ifdef BCMDBG_POOL + { + int i; + for (i = 0; i <= pktp->n_pkts; i++) { + pktp->dbg_q[i].p = NULL; + } + } +#endif + + while (pktp->freelist != NULL) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + total_pool_pktid_count++; + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + ASSERT(freed <= pktp->n_pkts); + } + + pktp->avail -= freed; + ASSERT(pktp->avail == 0); + + pktp->n_pkts -= freed; + + ASSERT(pktp->n_pkts == 0); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +BCMPOSTTRAPFN(pktpool_avail)(pktpool_t *pktpool) +{ + int avail = pktpool->avail; + + if (avail == 0) { + pktpool_emptycb_disable(pktpool, FALSE); + } + + return avail; +} + +static void * +BCMPOSTTRAPFASTPATH(pktpool_deq)(pktpool_t *pktp) +{ + void *p = NULL; + + if (pktp->avail == 0) + return NULL; + + ASSERT_FP(pktp->freelist != NULL); + + p = pktp->freelist; /* dequeue packet from head of pktpool free list */ + pktp->freelist = PKTFREELIST(p); /* free list points to next packet */ + +#if defined(DONGLEBUILD) && defined(SRMEM) + if (SRMEM_ENAB()) { + PKTSRMEM_INC_INUSE(p); + } +#endif /* DONGLEBUILD && SRMEM */ + + PKTSETFREELIST(p, NULL); + + pktp->avail--; + + return p; +} + +static void +BCMPOSTTRAPFASTPATH(pktpool_enq)(pktpool_t *pktp, void *p) +{ + ASSERT_FP(p != NULL); + + PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */ + pktp->freelist = p; /* free list points to newly inserted packet */ + +#if defined(DONGLEBUILD) && defined(SRMEM) + if (SRMEM_ENAB()) { + PKTSRMEM_DEC_INUSE(p); + } +#endif /* DONGLEBUILD && SRMEM */ + + pktp->avail++; + ASSERT_FP(pktp->avail <= pktp->n_pkts); +} + +/** utility for registering host addr fill function called from pciedev */ +int +BCMATTACHFN(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + ASSERT(pktp->cbext.cb == NULL); + pktp->cbext.cb = cb; + pktp->cbext.arg = arg; + return 0; +} + +int +BCMATTACHFN(pktpool_rxcplid_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + if (pktp == NULL) + return BCME_ERROR; + ASSERT(pktp->rxcplidfn.cb == NULL); + pktp->rxcplidfn.cb = cb; + pktp->rxcplidfn.arg = arg; + return 0; +} + +/** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */ +void +pktpool_invoke_dmarxfill(pktpool_t *pktp) +{ + ASSERT(pktp->dmarxfill.cb); + ASSERT(pktp->dmarxfill.arg); + + if (pktp->dmarxfill.cb) + pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg); +} + +/** Registers callback functions for split rx mode */ +int +BCMATTACHFN(pkpool_haddr_avail_register_cb)(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + pktp->dmarxfill.cb = cb; + pktp->dmarxfill.arg = arg; + + return 0; +} + +/** + * Registers callback functions. + * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function + */ +int +pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + for (i = 0; i < pktp->cbcnt; i++) { + ASSERT(pktp->cbs[i].cb != NULL); + if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) { + pktp->cbs[i].refcnt++; + goto done; + } + } + + i = pktp->cbcnt; + if (i == PKTPOOL_CB_MAX_AVL) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->cbs[i].cb == NULL); + pktp->cbs[i].cb = cb; + pktp->cbs[i].arg = arg; + pktp->cbs[i].refcnt++; + pktp->cbcnt++; + + /* force enable empty callback */ + pktpool_emptycb_disable(pktp, FALSE); +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/* No BCMATTACHFN as it is used in a non-attach function */ +int +pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i, k; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + ASSERT(cb != NULL); + + for (i = 0; i < pktp->cbcnt; i++) { + ASSERT(pktp->cbs[i].cb != NULL); + if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) { + pktp->cbs[i].refcnt--; + if (pktp->cbs[i].refcnt) { + /* Still there are references to this callback */ + goto done; + } + /* Moving any more callbacks to fill the hole */ + for (k = i+1; k < pktp->cbcnt; i++, k++) { + pktp->cbs[i].cb = pktp->cbs[k].cb; + pktp->cbs[i].arg = pktp->cbs[k].arg; + pktp->cbs[i].refcnt = pktp->cbs[k].refcnt; + } + + /* reset the last callback */ + pktp->cbs[i].cb = NULL; + pktp->cbs[i].arg = NULL; + pktp->cbs[i].refcnt = 0; + + pktp->cbcnt--; + goto done; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + return err; +} + +/** Registers callback functions */ +int +BCMATTACHFN(pktpool_empty_register)(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + i = pktp->ecbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->ecbs[i].cb == NULL); + pktp->ecbs[i].cb = cb; + pktp->ecbs[i].arg = arg; + pktp->ecbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/** Calls registered callback functions */ +static int +BCMPOSTTRAPFN(pktpool_empty_notify)(pktpool_t *pktp) +{ + int i; + + pktp->empty = TRUE; + for (i = 0; i < pktp->ecbcnt; i++) { + ASSERT(pktp->ecbs[i].cb != NULL); + pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg); + } + pktp->empty = FALSE; + + return 0; +} + +#ifdef BCMDBG_POOL +int +pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb); + + i = pktp->dbg_cbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->dbg_cbs[i].cb == NULL); + pktp->dbg_cbs[i].cb = cb; + pktp->dbg_cbs[i].arg = arg; + pktp->dbg_cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +int pktpool_dbg_notify(pktpool_t *pktp); + +int +pktpool_dbg_notify(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + for (i = 0; i < pktp->dbg_cbcnt; i++) { + ASSERT(pktp->dbg_cbs[i].cb); + pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_dbg_dump(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p); + printf("%d, p: 0x%x dur:%lu us state:%d\n", i, + pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) +{ + int i; + int state; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + bzero(stats, sizeof(pktpool_stats_t)); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + state = PKTPOOLSTATE(pktp->dbg_q[i].p); + switch (state) { + case POOL_TXENQ: + stats->enq++; break; + case POOL_TXDH: + stats->txdh++; break; + case POOL_TXD11: + stats->txd11++; break; + case POOL_RXDH: + stats->rxdh++; break; + case POOL_RXD11: + stats->rxd11++; break; + case POOL_RXFILL: + stats->rxfill++; break; + case POOL_IDLE: + stats->idle++; break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_start_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + pktp->dbg_q[i].cycles = cycles; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int pktpool_stop_trigger(pktpool_t *pktp, void *p); + +int +pktpool_stop_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + if (pktp->dbg_q[i].cycles == 0) + break; + + if (cycles >= pktp->dbg_q[i].cycles) + pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles; + else + pktp->dbg_q[i].dur = + (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1; + + pktp->dbg_q[i].cycles = 0; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} +#endif /* BCMDBG_POOL */ + +int +pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) +{ + BCM_REFERENCE(osh); + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->availcb_excl = NULL; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) +{ + int i; + int err; + BCM_REFERENCE(osh); + + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->availcb_excl == NULL); + for (i = 0; i < pktp->cbcnt; i++) { + if (cb == pktp->cbs[i].cb) { + pktp->availcb_excl = &pktp->cbs[i]; + break; + } + } + + if (pktp->availcb_excl == NULL) + err = BCME_ERROR; + else + err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +static void +BCMPOSTTRAPFN(pktpool_avail_notify)(pktpool_t *pktp) +{ + int i, k, idx; + + ASSERT(pktp); + pktpool_emptycb_disable(pktp, TRUE); + + if (pktp->availcb_excl != NULL) { + pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); + return; + } + + k = pktp->cbcnt - 1; + for (i = 0; i < pktp->cbcnt; i++) { + /* callbacks are getting disabled at this func entry. + * For the case of avail is say 5, and first callback + * consumes exactly 5 due to dma rxpost setting, then + * further callbacks will not getting notified if avail check + * is present. + * so calling all cbs even if pktp->avail is zero, so that + * cbs get oppurtunity to enable callbacks if their + * operation is in progress / not completed. + */ + if (pktp->cbtoggle) + idx = i; + else + idx = k--; + + ASSERT(pktp->cbs[idx].cb != NULL); + pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg); + } + + /* Alternate between filling from head or tail + */ + pktp->cbtoggle ^= 1; + + return; +} + +#ifdef APP_RX +/* Update freelist and avail count for a given packet pool */ +void +BCMFASTPATH(pktpool_update_freelist)(pktpool_t *pktp, void *p, uint pkts_consumed) +{ + ASSERT_FP(pktp->avail >= pkts_consumed); + + pktp->freelist = p; + pktp->avail -= pkts_consumed; +} +#endif /* APP_RX */ + +/** Gets an empty packet from the caller provided pool */ +void * +BCMPOSTTRAPFASTPATH(pktpool_get_ext)(pktpool_t *pktp, uint8 type, uint *pktcnt) +{ + void *p = NULL; + uint pkts_requested = 1; +#if defined(DONGLEBUILD) + uint pkts_avail; + bool rxcpl = (pktp->rxcplidfn.cb != NULL) ? TRUE : FALSE; +#endif /* DONGLEBUILD */ + + if (pktcnt) { + pkts_requested = *pktcnt; + if (pkts_requested == 0) { + goto done; + } + } + +#if defined(DONGLEBUILD) + pkts_avail = pkts_requested; +#endif /* DONGLEBUILD */ + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + /* If there are lesser packets in the pool than requested, call + * pktpool_empty_notify() to reclaim more pkts. + */ + if (pktp->avail < pkts_requested) { + /* Notify and try to reclaim tx pkts */ + if (pktp->ecbcnt) { + pktpool_empty_notify(pktp); + } + + if (pktp->avail < pkts_requested) { + pktpool_emptycb_disable(pktp, FALSE); + if (pktp->avail == 0) { + goto done; + } + } + } + +#ifdef APP_RX + if (pktcnt) { + p = pktp->freelist; + if (pktp->avail < pkts_requested) { + pkts_avail = pktp->avail; + } + + /* For rx frags in APP, we need to return only the head of freelist and + * the caller operates on it and updates the avail count and freelist pointer + * using pktpool_update_freelist(). + */ + if (BCMSPLITRX_ENAB() && ((type == lbuf_rxfrag) || (type == lbuf_rxdata))) { + *pktcnt = pkts_avail; + goto done; + } + } else +#endif /* APP_RX */ + { + ASSERT_FP(pkts_requested == 1); + p = pktpool_deq(pktp); + } + + ASSERT_FP(p); + +#if defined(DONGLEBUILD) +#ifndef APP_RX + if (BCMSPLITRX_ENAB() && (type == lbuf_rxfrag)) { + /* If pool is shared rx pool, use call back fn to populate host address. + * In case of APP, callback may use lesser number of packets than what + * we have given to callback because of some resource crunch and the exact + * number of packets that are used by the callback are returned using + * (*pktcnt) and the pktpool freelist head is updated accordingly. + */ + ASSERT_FP(pktp->cbext.cb != NULL); + if (pktp->cbext.cb(pktp, pktp->cbext.arg, p, rxcpl, &pkts_avail)) { + pktpool_enq(pktp, p); + p = NULL; + } + } +#endif /* APP_RX */ + + if ((type == lbuf_basic) && rxcpl) { + /* If pool is shared rx pool, use call back fn to populate Rx cpl ID */ + ASSERT_FP(pktp->rxcplidfn.cb != NULL); + /* If rxcplblock is allocated */ + if (pktp->rxcplidfn.cb(pktp, pktp->rxcplidfn.arg, p, TRUE, NULL)) { + pktpool_enq(pktp, p); + p = NULL; + } + } +#endif /* _DONGLEBUILD_ */ + +done: + if ((pktp->avail == 0) && (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { + pktp->emptycb_disable = EMPTYCB_DISABLED; + } + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +BCMFASTPATH(pktpool_nfree)(pktpool_t *pktp, void *head, void *tail, uint count) +{ +#ifdef BCMRXDATAPOOL + void *_head = head; +#endif /* BCMRXDATAPOOL */ + + if (count > 1) { + pktp->avail += (count - 1); + +#ifdef BCMRXDATAPOOL + while (--count) { + _head = PKTLINK(_head); + ASSERT_FP(_head); + pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, _head)); + } +#endif /* BCMRXDATAPOOL */ + + PKTSETFREELIST(tail, pktp->freelist); + pktp->freelist = PKTLINK(head); + PKTSETLINK(head, NULL); + } + pktpool_free(pktp, head); +} + +void +BCMPOSTTRAPFASTPATH(pktpool_free)(pktpool_t *pktp, void *p) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + ASSERT_FP(p != NULL); +#ifdef BCMDBG_POOL + /* pktpool_stop_trigger(pktp, p); */ +#endif + +#ifdef BCMRXDATAPOOL + /* Free rx data buffer to rx data buffer pool */ + if (PKT_IS_RX_PKT(OSH_NULL, p)) { + pktpool_t *_pktp = pktpool_shared_rxdata; + if (PKTISRXFRAG(OSH_NULL, p)) { + _pktp->cbext.cb(_pktp, _pktp->cbext.arg, p, REMOVE_RXCPLID, NULL); + PKTRESETRXFRAG(OSH_NULL, p); + } + pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, p)); + } +#endif /* BCMRXDATAPOOL */ + + pktpool_enq(pktp, p); + + /** + * Feed critical DMA with freshly freed packets, to avoid DMA starvation. + * If any avail callback functions are registered, send a notification + * that a new packet is available in the pool. + */ + if (pktp->cbcnt) { + /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * This allows to feed on burst basis as opposed to inefficient per-packet basis. + */ + if (pktp->emptycb_disable == EMPTYCB_ENABLED) { + /** + * If the call originated from pktpool_empty_notify, the just freed packet + * is needed in pktpool_get. + * Therefore don't call pktpool_avail_notify. + */ + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } else { + /** + * The callback is temporarily disabled, log that a packet has been freed. + */ + pktp->emptycb_disable = EMPTYCB_SKIPPED; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return; +} + +/** Adds a caller provided (empty) packet to the caller provided pool */ +int +pktpool_add(pktpool_t *pktp, void *p) +{ + int err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(p != NULL); + + if (pktp->n_pkts == pktp->maxlen) { + err = BCME_RANGE; + goto done; + } + + /* pkts in pool have same length */ + ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p)); + PKTSETPOOL(OSH_NULL, p, TRUE, pktp); + + pktp->n_pkts++; + pktpool_enq(pktp, p); + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/** + * Force pktpool_setmaxlen () into RAM as it uses a constant + * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips. + */ +int +BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (maxlen > PKTPOOL_LEN_MAX) + maxlen = PKTPOOL_LEN_MAX; + + /* if pool is already beyond maxlen, then just cap it + * since we currently do not reduce the pool len + * already allocated + */ + pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return pktp->maxlen; +} + +void +BCMPOSTTRAPFN(pktpool_emptycb_disable)(pktpool_t *pktp, bool disable) +{ + bool notify = FALSE; + ASSERT(pktp); + + /** + * To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * If callback is going to be re-enabled, check if any packet got + * freed and added back to the pool while callback was disabled. + * When this is the case do the callback now, provided that callback functions + * are registered and this call did not originate from pktpool_empty_notify. + */ + if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) && + (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { + notify = TRUE; + } + + /* Enable or temporarily disable callback when packet becomes available. */ + if (disable) { + if (pktp->emptycb_disable == EMPTYCB_ENABLED) { + /* mark disabled only if enabled. + * if state is EMPTYCB_SKIPPED, it means already + * disabled and some pkts are freed. So don't lose the state + * of skipped to ensure calling pktpool_avail_notify(). + */ + pktp->emptycb_disable = EMPTYCB_DISABLED; + } + } else { + pktp->emptycb_disable = EMPTYCB_ENABLED; + } + if (notify) { + /* pktpool_emptycb_disable() is called from pktpool_avail_notify() and + * pktp->cbs. To have the result of most recent call, notify after + * emptycb_disable is modified. + * This change also prevents any recursive calls of pktpool_avail_notify() + * from pktp->cbs if pktpool_emptycb_disable() is called from them. + */ + pktpool_avail_notify(pktp); + } +} + +bool +pktpool_emptycb_disabled(pktpool_t *pktp) +{ + ASSERT(pktp); + return pktp->emptycb_disable != EMPTYCB_ENABLED; +} + +#ifdef BCMPKTPOOL +#include + +pktpool_t *pktpool_shared = NULL; + +#ifdef BCMFRAGPOOL +pktpool_t *pktpool_shared_lfrag = NULL; +#ifdef BCMRESVFRAGPOOL +pktpool_t *pktpool_resv_lfrag = NULL; +struct resv_info *resv_pool_info = NULL; +#endif /* BCMRESVFRAGPOOL */ +#endif /* BCMFRAGPOOL */ + +#ifdef BCMALFRAGPOOL +pktpool_t *pktpool_shared_alfrag = NULL; +pktpool_t *pktpool_shared_alfrag_data = NULL; +#endif /* BCMCTFRAGPOOL */ + +pktpool_t *pktpool_shared_rxlfrag = NULL; + +/* Rx data pool w/o rxfrag structure */ +pktpool_t *pktpool_shared_rxdata = NULL; + +static osl_t *pktpool_osh = NULL; + +/** + * Initializes several packet pools and allocates packets within those pools. + */ +int +BCMATTACHFN(hnd_pktpool_init)(osl_t *osh) +{ + int err = BCME_OK; + int n, pktsz; + bool is_heap_pool; + + BCM_REFERENCE(pktsz); + BCM_REFERENCE(is_heap_pool); + + /* Construct a packet pool registry before initializing packet pools */ + n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); + if (n != PKTPOOL_MAXIMUM_ID) { + ASSERT(0); + err = BCME_ERROR; + goto error; + } + + pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_lfrag == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } + +#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED) + resv_pool_info = hnd_resv_pool_alloc(osh); + if (resv_pool_info == NULL) { + err = BCME_NOMEM; + ASSERT(0); + goto error; + } + pktpool_resv_lfrag = resv_pool_info->pktp; + if (pktpool_resv_lfrag == NULL) { + err = BCME_ERROR; + ASSERT(0); + goto error; + } +#endif /* RESVFRAGPOOL */ +#endif /* FRAGPOOL */ + +#if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED) + pktpool_shared_alfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_alfrag == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } + + pktpool_shared_alfrag_data = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_alfrag_data == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } +#endif /* BCMCTFRAGPOOL */ + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_rxlfrag == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } +#endif + +#if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLE) + pktpool_shared_rxdata = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_rxdata == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } +#endif + + /* + * At this early stage, there's not enough memory to allocate all + * requested pkts in the shared pool. Need to add to the pool + * after reclaim + * + * n = NRXBUFPOST + SDPCMD_RXBUFS; + * + * Initialization of packet pools may fail (BCME_ERROR), if the packet pool + * registry is not initialized or the registry is depleted. + * + * A BCME_NOMEM error only indicates that the requested number of packets + * were not filled into the pool. + */ + n = 1; + MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */ + if ((err = pktpool_init(osh, pktpool_shared, + &n, PKTBUFSZ, FALSE, lbuf_basic, FALSE, 0, 0)) != BCME_OK) { + ASSERT(0); + goto error; + } + pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN); + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + n = 1; +#if (((defined(EVENTLOG_D3_PRESERVE) && !defined(EVENTLOG_D3_PRESERVE_DISABLED)) || \ + defined(BCMPOOLRECLAIM))) + is_heap_pool = TRUE; +#else + is_heap_pool = FALSE; +#endif /* (( EVENTLOG_D3_PRESERVE && !EVENTLOG_D3_PRESERVE_DISABLED) || BCMPOOLRECLAIM) */ + + if ((err = pktpool_init(osh, pktpool_shared_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag, + is_heap_pool, POOL_HEAP_FLAG_D3, SHARED_FRAG_POOL_LEN >> 3)) != + BCME_OK) { + ASSERT(0); + goto error; + } + pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN); + +#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED) + n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */ +#ifdef RESV_POOL_HEAP + is_heap_pool = TRUE; +#else + is_heap_pool = FALSE; +#endif /* RESV_POOL_HEAP */ + + if ((err = pktpool_init(osh, pktpool_resv_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag, + is_heap_pool, POOL_HEAP_FLAG_RSRVPOOL, 0)) != BCME_OK) { + ASSERT(0); + goto error; + } + pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN); +#endif /* RESVFRAGPOOL */ +#endif /* BCMFRAGPOOL */ + +#if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED) + n = 1; + is_heap_pool = FALSE; + + if ((err = pktpool_init(osh, pktpool_shared_alfrag, &n, PKTFRAGSZ, TRUE, lbuf_alfrag, + is_heap_pool, 0, SHARED_ALFRAG_POOL_LEN >> 3)) != BCME_OK) { + ASSERT(0); + goto error; + } + pktpool_setmaxlen(pktpool_shared_alfrag, SHARED_ALFRAG_POOL_LEN); + + n = 0; + if ((err = pktpool_init(osh, pktpool_shared_alfrag_data, &n, TXPKTALFRAG_DATA_BUFSZ, TRUE, + lbuf_alfrag_data, FALSE, 0, SHARED_ALFRAG_DATA_POOL_LEN >> 3)) != BCME_OK) { + ASSERT(0); + goto error; + } + pktpool_setmaxlen(pktpool_shared_alfrag_data, SHARED_ALFRAG_DATA_POOL_LEN); + +#endif /* BCMCTFRAGPOOL */ + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) +#if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED) + n = 1; + if ((err = pktpool_init(osh, pktpool_shared_rxdata, &n, RXPKTFRAGDATASZ, TRUE, lbuf_rxdata, + FALSE, 0, 0)) != BCME_OK) { + ASSERT(0); + goto error; + } + pktpool_setmaxlen(pktpool_shared_rxdata, SHARED_RXDATA_POOL_LEN); + + pktsz = 0; +#else + pktsz = RXPKTFRAGDATASZ; +#endif /* defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED) */ + +#ifdef RESV_POOL_HEAP + is_heap_pool = BCMPOOLRECLAIM_ENAB() ? TRUE : FALSE; +#else + is_heap_pool = FALSE; +#endif /* RESV_POOL_HEAP */ + + n = 1; + if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, &n, pktsz, TRUE, lbuf_rxfrag, + is_heap_pool, POOL_HEAP_FLAG_D3, 0)) != BCME_OK) { + ASSERT(0); + goto error; + } + + pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN); +#endif /* defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) */ + +#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) + /* Attach poolreorg module */ + if ((frwd_poolreorg_info = poolreorg_attach(osh, +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag, +#else + NULL, +#endif /* defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) */ +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag, +#else + NULL, +#endif /* BCMRXFRAGPOOL */ + pktpool_shared)) == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error; + } +#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */ + + pktpool_osh = osh; + MALLOC_CLEAR_NOPERSIST(osh); + +#ifdef POOL_HEAP_RECONFIG + lbuf_free_cb_set(hnd_pktpool_lbuf_free_cb); +#endif + + return BCME_OK; + +error: + hnd_pktpool_deinit(osh); + + return err; +} /* hnd_pktpool_init */ + +void +BCMATTACHFN(hnd_pktpool_deinit)(osl_t *osh) +{ +#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) + if (frwd_poolreorg_info != NULL) { + poolreorg_detach(frwd_poolreorg_info); + } +#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */ + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + if (pktpool_shared_rxlfrag != NULL) { + if (pktpool_shared_rxlfrag->inited) { + pktpool_deinit(osh, pktpool_shared_rxlfrag); + } + + hnd_free(pktpool_shared_rxlfrag); + pktpool_shared_rxlfrag = (pktpool_t *)NULL; + } +#endif + +#if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED) + if (pktpool_shared_rxdata != NULL) { + if (pktpool_shared_rxdata->inited) { + pktpool_deinit(osh, pktpool_shared_rxdata); + } + + hnd_free(pktpool_shared_rxdata); + pktpool_shared_rxdata = (pktpool_t *)NULL; + } +#endif + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + if (pktpool_shared_lfrag != NULL) { + if (pktpool_shared_lfrag->inited) { + pktpool_deinit(osh, pktpool_shared_lfrag); + } + hnd_free(pktpool_shared_lfrag); + pktpool_shared_lfrag = (pktpool_t *)NULL; + } +#endif /* BCMFRAGPOOL */ + +#if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED) + if (pktpool_shared_alfrag != NULL) { + if (pktpool_shared_alfrag->inited) { + pktpool_deinit(osh, pktpool_shared_alfrag); + } + hnd_free(pktpool_shared_alfrag); + pktpool_shared_alfrag = (pktpool_t *)NULL; + } + + if (pktpool_shared_alfrag_data != NULL) { + if (pktpool_shared_alfrag_data->inited) { + pktpool_deinit(osh, pktpool_shared_alfrag_data); + } + + hnd_free(pktpool_shared_alfrag_data); + pktpool_shared_alfrag_data = (pktpool_t *)NULL; + } +#endif /* BCMFRAGPOOL */ + +#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED) + if (resv_pool_info != NULL) { + if (pktpool_resv_lfrag != NULL) { + pktpool_resv_lfrag = NULL; + } + hnd_free(resv_pool_info); + } +#endif /* RESVFRAGPOOL */ + + if (pktpool_shared != NULL) { + if (pktpool_shared->inited) { + pktpool_deinit(osh, pktpool_shared); + } + + hnd_free(pktpool_shared); + pktpool_shared = (pktpool_t *)NULL; + } + + pktpool_dettach(osh); + + MALLOC_CLEAR_NOPERSIST(osh); +} + +/** is called at each 'wl up' */ +int +hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) +{ + return (pktpool_fill(pktpool_osh, pktpool, minimal)); +} + +/** refills pktpools after reclaim, is called once */ +void +hnd_pktpool_refill(bool minimal) +{ + if (POOL_ENAB(pktpool_shared)) { +#if defined(SRMEM) + if (SRMEM_ENAB()) { + int maxlen = pktpool_max_pkts(pktpool_shared); + int n_pkts = pktpool_tot_pkts(pktpool_shared); + + for (; n_pkts < maxlen; n_pkts++) { + void *p; + if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL) + break; + pktpool_add(pktpool_shared, p); + } + } +#endif /* SRMEM */ + pktpool_fill(pktpool_osh, pktpool_shared, minimal); + } +/* fragpool reclaim */ +#ifdef BCMFRAGPOOL + if (POOL_ENAB(pktpool_shared_lfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); + } +#endif /* BCMFRAGPOOL */ + +/* alfragpool reclaim */ +#ifdef BCMALFRAGPOOL + if (POOL_ENAB(pktpool_shared_alfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_alfrag, minimal); + } + + if (POOL_ENAB(pktpool_shared_alfrag_data)) { + pktpool_fill(pktpool_osh, pktpool_shared_alfrag_data, minimal); + } +#endif /* BCMALFRAGPOOL */ + +/* rx fragpool reclaim */ +#ifdef BCMRXFRAGPOOL + if (POOL_ENAB(pktpool_shared_rxlfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal); + } +#endif + +#ifdef BCMRXDATAPOOL + if (POOL_ENAB(pktpool_shared_rxdata)) { + pktpool_fill(pktpool_osh, pktpool_shared_rxdata, minimal); + } +#endif + +#if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL) + if (POOL_ENAB(pktpool_resv_lfrag)) { + int resv_size = (pktpool_resv_lfrag->max_pkt_bytes + LBUFFRAGSZ) * + pktpool_resv_lfrag->maxlen; + hnd_resv_pool_init(resv_pool_info, resv_size); + hnd_resv_pool_enable(resv_pool_info); + } +#endif /* BCMRESVFRAGPOOL */ +} + +#ifdef POOL_HEAP_RECONFIG +#define hnd_pktpool_release_active_set(pktp) (pktpool_heap_rel_active |= (1 << pktp->id)) +#define hnd_pktpool_release_active_reset(pktp) (pktpool_heap_rel_active &= ~(1 << pktp->id)) +/* Function enable/disable heap pool usage */ + +void +hnd_pktpool_heap_handle(osl_t *osh, uint32 flag, bool enable) +{ + int i = 0; + pktpool_t *pktp; + /* + * Loop through all the registerd pktpools. + * Trigger retreave of pkts from the heap back to pool if no + * flags are active. + */ + for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) { + if ((pktp = get_pktpools_registry(i)) != NULL) { + if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) { + if (enable) { + hnd_pktpool_heap_pkt_release(pktpool_osh, pktp, flag); + } else { + hnd_pktpool_heap_pkt_retrieve(pktp, flag); + } + } + } + } +} + +/* Do memory allocation from pool heap memory */ +void * +hnd_pktpool_freelist_alloc(uint size, uint alignbits, uint32 flag) +{ + int i = 0; + pktpool_t *pktp; + void *p = NULL; + for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) { + if ((pktp = get_pktpools_registry(i)) != NULL) { + if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) { + p = rte_freelist_mgr_alloc(size, alignbits, pktp->mem_handle); + if (p) + break; + } + } + } + return p; +} + +/* Release pkts from pool to free heap */ +static void +hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag) +{ + pktpool_cb_extn_t cb = NULL; + void *arg = NULL; + int i = 0; + pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry(); + + pktp->release_active = FALSE; + hnd_pktpool_release_active_reset(pktp); + + if (pktp->n_pkts <= pktp->min_backup_buf) + return; + /* call module specific callbacks */ + if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) { + /* If pool is shared rx frag pool, use call back fn to reclaim host address + * and Rx cpl ID associated with the pkt. + */ + ASSERT(pktp->cbext.cb != NULL); + cb = pktp->cbext.cb; + arg = pktp->cbext.arg; + } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) { + /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID + * associated with the pkt. + */ + cb = pktp->rxcplidfn.cb; + arg = pktp->rxcplidfn.arg; + } + + while (pktp->avail > pktp->min_backup_buf) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + if (cb != NULL) { + if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) { + PKTSETFREELIST(p, pktp->freelist); + pktp->freelist = p; + break; + } + } + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + lb_set_nofree(p); + total_pool_pktid_count++; + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + rte_freelist_mgr_add(p, pktp->mem_handle); + pktp->avail--; + pktp->n_pkts--; + pktp->poolheap_count++; + } + + /* Execute call back for upper layer which used pkt from heap */ + for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) { + if ((pktp_heap_cb[i].fn != NULL) && + (flag == pktp_heap_cb[i].flag)) + (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, TRUE); + } + +} + +static pktpool_heap_cb_reg_t * +BCMRAMFN(hnd_pool_get_cb_registry)(void) +{ + return pktpool_heap_cb_reg; +} + +static void +BCMFASTPATH(hnd_pktpool_lbuf_free_cb)(uint8 poolid) +{ + int i = 0; + pktpool_t *pktp; + + if (poolid == PKTPOOL_INVALID_ID && pktpool_heap_rel_active) { + for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) { + if ((pktp = get_pktpools_registry(i)) != NULL) { + if (pktp->is_heap_pool && (pktp->release_active)) { + rte_freelist_mgr_release(pktp->mem_handle); + } + } + } + } +} + +/* Take back pkts from free mem and refill pool */ +static void +hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag) +{ + int i = 0; + pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry(); + pktp->release_active = TRUE; + hnd_pktpool_release_active_set(pktp); + + /* Execute call back for upper layer which used pkt from heap */ + for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) { + if ((pktp_heap_cb[i].fn != NULL) && + (flag == pktp_heap_cb[i].flag)) + (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, FALSE); + } + + rte_freelist_mgr_release(pktp->mem_handle); +} + +/* Function to add back the pkt to pktpool */ +static int +hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize) +{ + pktpool_t *pktp = (pktpool_t *)ctxt; + struct lbuf *lb; + int ret = BCME_ERROR; + if (pktp != NULL) { + if ((lb = PKTALLOC_ON_LOC(pktpool_osh, pktp->max_pkt_bytes, + pktp->type, pkt, pktsize)) != NULL) { + if ((ret = pktpool_add(pktp, lb)) == BCME_OK) { + pktp->poolheap_count--; + ASSERT(total_pool_pktid_count > 0); + total_pool_pktid_count--; + if (pktp->poolheap_count == 0) { + pktp->release_active = FALSE; + hnd_pktpool_release_active_reset(pktp); + } + if (pktp->cbcnt) { + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } + } else { + /* + * pktpool_add failed indicate already max + * number of pkts are available in pool. So + * free this buffer to heap + */ + PKTFREE(pktpool_osh, lb, pktsize); + } + ret = BCME_OK; + } + } + return ret; +} + +int +hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn, void *ctxt, uint32 flag) +{ + int i = 0; + int err = BCME_ERROR; + pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry(); + + /* Search for free entry */ + for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) { + if (pktp_heap_cb[i].fn == NULL) + break; + } + + if (i < PKTPOOL_MAX_HEAP_CB) { + pktp_heap_cb[i].fn = fn; + pktp_heap_cb[i].ctxt = ctxt; + pktp_heap_cb[i].flag = flag; + err = BCME_OK; + } + return err; +} + +int +hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn) +{ + int i = 0; + int err = BCME_ERROR; + pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry(); + + /* Search for matching entry */ + for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) { + if (pktp_heap_cb[i].fn == fn) + break; + } + + if (i < PKTPOOL_MAX_HEAP_CB) { + pktp_heap_cb[i].fn = NULL; + err = BCME_OK; + } + return err; +} + +uint16 +hnd_pktpool_get_min_bkup_buf(pktpool_t *pktp) +{ + return pktp->min_backup_buf; +} +#endif /* POOL_HEAP_RECONFIG */ + +uint32 +hnd_pktpool_get_total_poolheap_count(void) +{ + return total_pool_pktid_count; +} +#endif /* BCMPKTPOOL */ diff --git a/bcmdhd.101.10.361.x/hnd_pktq.c b/bcmdhd.101.10.361.x/hnd_pktq.c new file mode 100755 index 0000000..bd5cc81 --- /dev/null +++ b/bcmdhd.101.10.361.x/hnd_pktq.c @@ -0,0 +1,1548 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif /* HND_PKTQ_THREAD_SAFE */ + +/* status during txfifo sync */ +#if defined(PROP_TXSTATUS) +#define TXQ_PKT_DEL 0x01 +#define HEAD_PKT_FLUSHED 0xFF +#endif /* defined(PROP_TXSTATUS) */ +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +void * +BCMFASTPATH(pktq_penq)(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + ASSERT_FP(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT_FP(!pktq_full(pq)); + ASSERT_FP(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->n_pkts++; + + pq->n_pkts_tot++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(spktq_enq_chain)(struct spktq *dspq, struct spktq *sspq) +{ + struct pktq_prec *dq; + struct pktq_prec *sq; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&dspq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&sspq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + dq = &dspq->q; + sq = &sspq->q; + + if (dq->head) { + PKTSETLINK(OSL_PHYS_TO_VIRT_ADDR(dq->tail), OSL_VIRT_TO_PHYS_ADDR(sq->head)); + } + else { + dq->head = sq->head; + } + + dq->tail = sq->tail; + dq->n_pkts += sq->n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&dspq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&sspq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return dspq; +} + +/* + * osl simple, non-priority packet queue + */ +void * +BCMFASTPATH(spktq_enq)(struct spktq *spq, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(!spktq_full(spq)); + + PKTSETLINK(p, NULL); + + q = &spq->q; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->n_pkts++; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMPOSTTRAPFASTPATH(pktq_penq_head)(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + ASSERT_FP(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT_FP(!pktq_full(pq)); + ASSERT_FP(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->n_pkts++; + + pq->n_pkts_tot++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(spktq_enq_head)(struct spktq *spq, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(!spktq_full(spq)); + + PKTSETLINK(p, NULL); + + q = &spq->q; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->n_pkts++; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(pktq_pdeq)(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(spktq_deq)(struct spktq *spq) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + q = &spq->q; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void* +BCMFASTPATH(spktq_deq_virt)(struct spktq *spq) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + q = &spq->q; + + if ((p = q->head) == NULL) + goto done; + + p = (void *)OSL_PHYS_TO_VIRT_ADDR(p); + + if ((q->head = (void*)PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(pktq_pdeq_tail)(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p, *prev; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(spktq_deq_tail)(struct spktq *spq) +{ + struct pktq_prec *q; + void *p, *prev; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + q = &spq->q; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->n_pkts--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].tail; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +/* + * Append spktq 'list' to the tail of pktq 'pq' + */ +void +BCMFASTPATH(pktq_append)(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT_FP(!pktq_full(pq)); + ASSERT_FP(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, list_q->head); + else + q->head = list_q->head; + + q->tail = list_q->tail; + q->n_pkts += list_q->n_pkts; + pq->n_pkts_tot += list_q->n_pkts; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Append spktq 'list' to the tail of spktq 'spq' + */ +void +BCMFASTPATH(spktq_append)(struct spktq *spq, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT_FP(!spktq_full(spq)); + + q = &spq->q; + + if (q->head) + PKTSETLINK(q->tail, list_q->head); + else + q->head = list_q->head; + + q->tail = list_q->tail; + q->n_pkts += list_q->n_pkts; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Prepend spktq 'list' to the head of pktq 'pq' + */ +void +BCMFASTPATH(pktq_prepend)(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT_FP(!pktq_full(pq)); + ASSERT_FP(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + /* set the tail packet of list to point at the former pq head */ + PKTSETLINK(list_q->tail, q->head); + /* the new q head is the head of list */ + q->head = list_q->head; + + /* If the q tail was non-null, then it stays as is. + * If the q tail was null, it is now the tail of list + */ + if (q->tail == NULL) { + q->tail = list_q->tail; + } + + q->n_pkts += list_q->n_pkts; + pq->n_pkts_tot += list_q->n_pkts; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Prepend spktq 'list' to the head of spktq 'spq' + */ +void +BCMFASTPATH(spktq_prepend)(struct spktq *spq, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT_FP(!spktq_full(spq)); + + q = &spq->q; + + /* set the tail packet of list to point at the former pq head */ + PKTSETLINK(list_q->tail, q->head); + /* the new q head is the head of list */ + q->head = list_q->head; + + /* If the q tail was non-null, then it stays as is. + * If the q tail was null, it is now the tail of list + */ + if (q->tail == NULL) { + q->tail = list_q->tail; + } + + q->n_pkts += list_q->n_pkts; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * +BCMFASTPATH(pktq_pdeq_prev)(struct pktq *pq, int prec, void *prev_p) +{ + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if (prev_p == NULL) + goto done; + + if ((p = PKTLINK(prev_p)) == NULL) + goto done; + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + PKTSETLINK(prev_p, PKTLINK(p)); + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(pktq_pdeq_with_fn)(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + goto done; + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +bool +BCMFASTPATH(pktq_pdel)(struct pktq *pq, void *pktbuf, int prec) +{ + bool ret = FALSE; + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + + /* Should this just assert pktbuf? */ + if (!pktbuf) + goto done; + + q = &pq->q[prec]; + + if (q->head == pktbuf) { + if ((q->head = PKTLINK(pktbuf)) == NULL) + q->tail = NULL; + } else { + for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) + ; + if (p == NULL) + goto done; + + PKTSETLINK(p, PKTLINK(pktbuf)); + if (q->tail == pktbuf) + q->tail = p; + } + + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + PKTSETLINK(pktbuf, NULL); + ret = TRUE; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +static void +_pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx) +{ + struct pktq_prec wq; + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* move the prec queue aside to a work queue */ + q = &pq->q[prec]; + + wq = *q; + + q->head = NULL; + q->tail = NULL; + q->n_pkts = 0; + +#ifdef WL_TXQ_STALL + q->dequeue_count += wq.n_pkts; +#endif + + pq->n_pkts_tot -= wq.n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + /* start with the head of the work queue */ + while ((p = wq.head) != NULL) { + /* unlink the current packet from the list */ + wq.head = PKTLINK(p); + PKTSETLINK(p, NULL); + wq.n_pkts--; + +#ifdef WL_TXQ_STALL + wq.dequeue_count++; +#endif + + /* call the filter function on current packet */ + ASSERT(fltr != NULL); + switch ((*fltr)(fltr_ctx, p)) { + case PKT_FILTER_NOACTION: + /* put this packet back */ + pktq_penq(pq, prec, p); + break; + + case PKT_FILTER_DELETE: + /* delete this packet */ + ASSERT(defer != NULL); + (*defer)(defer_ctx, p); + break; + + case PKT_FILTER_REMOVE: + /* pkt already removed from list */ + break; + + default: + ASSERT(0); + break; + } + } + + ASSERT(wq.n_pkts == 0); +} + +void +pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx); + + ASSERT(flush != NULL); + (*flush)(flush_ctx); +} + +void +pktq_filter(struct pktq *pq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + bool filter = FALSE; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize if pktq n_pkts = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->n_pkts_tot > 0) { + filter = TRUE; + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + if (filter) { + int prec; + + PKTQ_PREC_ITER(pq, prec) { + _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx); + } + + ASSERT(flush != NULL); + (*flush)(flush_ctx); + } +} + +void +spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + struct pktq_prec wq; + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + q = &spq->q; + + /* Optimize if pktq_prec n_pkts = 0, just return. */ + if (q->n_pkts == 0) { + (void)HND_PKTQ_MUTEX_RELEASE(&spq->mutex); + return; + } + + wq = *q; + + q->head = NULL; + q->tail = NULL; + q->n_pkts = 0; + +#ifdef WL_TXQ_STALL + q->dequeue_count += wq.n_pkts; +#endif + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return; + + /* start with the head of the work queue */ + + while ((p = wq.head) != NULL) { + /* unlink the current packet from the list */ + wq.head = PKTLINK(p); + PKTSETLINK(p, NULL); + wq.n_pkts--; + +#ifdef WL_TXQ_STALL + wq.dequeue_count++; +#endif + + /* call the filter function on current packet */ + ASSERT(fltr != NULL); + switch ((*fltr)(fltr_ctx, p)) { + case PKT_FILTER_NOACTION: + /* put this packet back */ + spktq_enq(spq, p); + break; + + case PKT_FILTER_DELETE: + /* delete this packet */ + ASSERT(defer != NULL); + (*defer)(defer_ctx, p); + break; + + case PKT_FILTER_REMOVE: + /* pkt already removed from list */ + break; + + default: + ASSERT(0); + break; + } + } + + ASSERT(wq.n_pkts == 0); + + ASSERT(flush != NULL); + (*flush)(flush_ctx); +} + +bool +pktq_init(struct pktq *pq, int num_prec, uint max_pkts) +{ + int prec; + + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); + + /* pq is variable size; only zero out what's requested */ + bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + pq->num_prec = (uint16)num_prec; + + pq->max_pkts = (uint16)max_pkts; + + for (prec = 0; prec < num_prec; prec++) + pq->q[prec].max_pkts = pq->max_pkts; + + return TRUE; +} + +bool +spktq_init(struct spktq *spq, uint max_pkts) +{ + bzero(spq, sizeof(struct spktq)); + + if (HND_PKTQ_MUTEX_CREATE("spktq", &spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + spq->q.max_pkts = (uint16)max_pkts; + + return TRUE; +} + +bool +spktq_init_list(struct spktq *spq, uint max_pkts, void *head, void *tail, uint16 n_pkts) +{ + if (HND_PKTQ_MUTEX_CREATE("spktq", &spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(PKTLINK(tail) == NULL); + PKTSETLINK(tail, NULL); + spq->q.head = head; + spq->q.tail = tail; + spq->q.max_pkts = (uint16)max_pkts; + spq->q.n_pkts = n_pkts; + spq->q.stall_count = 0; + spq->q.dequeue_count = 0; + + return TRUE; +} + +bool +pktq_deinit(struct pktq *pq) +{ + BCM_REFERENCE(pq); + if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; +} + +bool +spktq_deinit(struct spktq *spq) +{ + BCM_REFERENCE(spq); + if (HND_PKTQ_MUTEX_DELETE(&spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; +} + +void +pktq_set_max_plen(struct pktq *pq, int prec, uint max_pkts) +{ + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + if (prec < pq->num_prec) + pq->q[prec].max_pkts = (uint16)max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * +BCMFASTPATH(pktq_deq)(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +BCMFASTPATH(pktq_deq_tail)(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL, *prev; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +spktq_peek(struct spktq *spq) +{ + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (spq->q.n_pkts == 0) + goto done; + + p = spq->q.head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir) +{ + void *p; + + /* no need for a mutex protection! */ + + /* start with the head of the list */ + while ((p = pktq_pdeq(pq, prec)) != NULL) { + + /* delete this packet */ + PKTFREE(osh, p, dir); + } +} + +void +spktq_flush_ext(osl_t *osh, struct spktq *spq, bool dir, + void (*pktq_flush_cb)(void *ctx, void *pkt), void *pktq_flush_ctx) +{ + void *pkt; + + /* no need for a mutex protection! */ + + /* start with the head of the list */ + while ((pkt = spktq_deq(spq)) != NULL) { + if (pktq_flush_cb != NULL) { + pktq_flush_cb(pktq_flush_ctx, pkt); + } + /* delete this packet */ + PKTFREE(osh, pkt, dir); + } +} + +typedef struct { + spktq_cb_t cb; + void *arg; +} spktq_cbinfo_t; +static spktq_cbinfo_t spktq_cbinfo = {NULL, NULL}; +static spktq_cbinfo_t *spktq_cbinfo_get(void); + +/* Accessor function forced into RAM to keep spktq_cbinfo out of shdat */ +static spktq_cbinfo_t* +BCMRAMFN(spktq_cbinfo_get)(void) +{ + return (&spktq_cbinfo); +} + +void +BCMATTACHFN(spktq_free_register)(spktq_cb_t cb, void *arg) +{ + spktq_cbinfo_t *cbinfop = spktq_cbinfo_get(); + cbinfop->cb = cb; + cbinfop->arg = arg; +} + +void +spktq_cb(void *spq) +{ + spktq_cbinfo_t *cbinfop = spktq_cbinfo_get(); + if (cbinfop->cb) { + cbinfop->cb(cbinfop->arg, spq); + } +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir) +{ + bool flush = FALSE; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize flush, if pktq n_pkts_tot = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->n_pkts_tot > 0) { + flush = TRUE; + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + if (flush) { + int prec; + + PKTQ_PREC_ITER(pq, prec) { + pktq_pflush(osh, pq, prec, dir); + } + } +} + +/* Return sum of lengths of a specific set of precedences */ +int +pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return len; +} + +/* Priority peek from a specific set of precedences */ +void * +BCMFASTPATH(pktq_mpeek)(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if (prec_out) + *prec_out = prec; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} +/* Priority dequeue from a specific set of precedences */ +void * +BCMPOSTTRAPFASTPATH(pktq_mdeq)(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0)) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + + // terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit + if (q->n_pkts == 0) { + q->head = NULL; + q->tail = NULL; + } + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif + + if (prec_out) + *prec_out = prec; + + pq->n_pkts_tot--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +#ifdef HND_PKTQ_THREAD_SAFE +int +pktqprec_avail_pkts(struct pktq *pq, int prec) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].max_pkts - pq->q[prec].n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +BCMFASTPATH(pktqprec_full)(struct pktq *pq, int prec) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT_FP(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].n_pkts >= pq->q[prec].max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +int +pktq_avail(struct pktq *pq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = pq->max_pkts - pq->n_pkts_tot; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +int +spktq_avail(struct spktq *spq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = spq->q.max_pkts - spq->q.n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_full(struct pktq *pq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = pq->n_pkts_tot >= pq->max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +bool +spktq_full(struct spktq *spq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = spq->q.n_pkts >= spq->q.max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +#endif /* HND_PKTQ_THREAD_SAFE */ diff --git a/bcmdhd.101.10.361.x/hndlhl.c b/bcmdhd.101.10.361.x/hndlhl.c new file mode 100755 index 0000000..0695e09 --- /dev/null +++ b/bcmdhd.101.10.361.x/hndlhl.c @@ -0,0 +1,1241 @@ +/* + * Misc utility routines for accessing lhl specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SI_LHL_EXT_WAKE_REQ_MASK_MAGIC 0x7FBBF7FF /* magic number for LHL EXT */ + +/* PmuRev1 has a 24-bit PMU RsrcReq timer. However it pushes all other bits + * upward. To make the code to run for all revs we use a variable to tell how + * many bits we need to shift. + */ +#define FLAGS_SHIFT 14 +#define LHL_ERROR(args) printf args +static const char BCMATTACHDATA(rstr_rfldo3p3_cap_war)[] = "rfldo3p3_cap_war"; +static const char BCMATTACHDATA(rstr_abuck_volt_sleep)[] = "abuck_volt_sleep"; +static const char BCMATTACHDATA(rstr_cbuck_volt_sleep)[] = "cbuck_volt_sleep"; + +void +si_lhl_setup(si_t *sih, osl_t *osh) +{ + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID)) { + /* Enable PMU sleep mode0 */ +#ifdef BCMQT + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_0); +#else + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_2); +#endif + /* Modify as per the + BCM43012/LHL#LHL-RecommendedsettingforvariousPMUSleepModes: + */ + LHL_REG(sih, lhl_top_pwrup_ctl_adr, LHL_PWRUP_CTL_MASK, LHL_PWRUP_CTL); + LHL_REG(sih, lhl_top_pwrup2_ctl_adr, LHL_PWRUP2_CTL_MASK, LHL_PWRUP2_CTL); + LHL_REG(sih, lhl_top_pwrdn_ctl_adr, LHL_PWRDN_CTL_MASK, LHL_PWRDN_SLEEP_CNT); + LHL_REG(sih, lhl_top_pwrdn2_ctl_adr, LHL_PWRDN2_CTL_MASK, LHL_PWRDN2_CTL); + } + + if (!FWSIGN_ENAB() && si_hib_ext_wakeup_isenab(sih)) { + /* + * Enable wakeup on GPIO1, PCIE clkreq and perst signal, + * GPIO[0] is mapped to GPIO1 + * GPIO[1] is mapped to PCIE perst + * GPIO[2] is mapped to PCIE clkreq + */ + + /* GPIO1 */ + /* Clear any old interrupt status */ + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN); + /* active high level trigger */ + LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_GPIO1_GPIO_PIN], ~0, + 1 << GCI_GPIO_STS_WL_DIN_SELECT); + LHL_REG(sih, gpio_int_en_port_adr[0], + 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN); + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN); + si_gci_set_functionsel(sih, 1, CC_FNSEL_SAMEASPIN); + + /* PCIE perst */ + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN); + LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_PERST_GPIO_PIN], ~0, + (1 << GCI_GPIO_STS_EDGE_TRIG_BIT | + 1 << GCI_GPIO_STS_WL_DIN_SELECT)); + LHL_REG(sih, gpio_int_en_port_adr[0], + 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN); + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN); + + /* PCIE clkreq */ + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN); + LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_CLKREQ_GPIO_PIN], ~0, + (1 << GCI_GPIO_STS_NEG_EDGE_TRIG_BIT) | + (1 << GCI_GPIO_STS_WL_DIN_SELECT)); + LHL_REG(sih, gpio_int_en_port_adr[0], + 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN); + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN); + } +} + +static const uint32 lpo_opt_tab[4][2] = { + { LPO1_PD_EN, LHL_LPO1_SEL }, + { LPO2_PD_EN, LHL_LPO2_SEL }, + { OSC_32k_PD, LHL_32k_SEL}, + { EXTLPO_BUF_PD, LHL_EXT_SEL } +}; + +#define LPO_EN_OFFSET 0u +#define LPO_SEL_OFFSET 1u + +static int +si_lhl_get_lpo_sel(si_t *sih, uint32 lpo) +{ + int sel; + if (lpo <= LHL_EXT_SEL) { + LHL_REG(sih, lhl_main_ctl_adr, lpo_opt_tab[lpo - 1u][LPO_EN_OFFSET], 0u); + sel = lpo_opt_tab[lpo - 1u][LPO_SEL_OFFSET]; + } else { + sel = BCME_NOTFOUND; + } + return sel; +} + +static void +si_lhl_detect_lpo(si_t *sih, osl_t *osh) +{ + uint clk_det_cnt; + int timeout = 0; + gciregs_t *gciregs; + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + ASSERT(gciregs != NULL); + + LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN, 0); + LHL_REG(sih, lhl_clk_det_ctl_adr, + LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR); + timeout = 0; + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + while (clk_det_cnt != 0 && timeout <= LPO_SEL_TIMEOUT) { + OSL_DELAY(10); + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + timeout++; + } + + if (clk_det_cnt != 0) { + LHL_ERROR(("Clock not present as clear did not work timeout = %d\n", timeout)); + ROMMABLE_ASSERT(0); + } + LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, 0); + LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN, + LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN); + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + timeout = 0; + + while (clk_det_cnt <= CLK_DET_CNT_THRESH && timeout <= LPO_SEL_TIMEOUT) { + OSL_DELAY(10); + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + timeout++; + } + + if (timeout >= LPO_SEL_TIMEOUT) { + LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout)); + ROMMABLE_ASSERT(0); + } +} + +static void +si_lhl_select_lpo(si_t *sih, osl_t *osh, int sel, uint32 lpo) +{ + uint status; + int timeout = 0u; + gciregs_t *gciregs; + uint32 final_clk_sel; + uint32 final_lpo_sel; + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + ASSERT(gciregs != NULL); + + LHL_REG(sih, lhl_main_ctl_adr, + LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL, (sel) << LPO_SEL_SHIFT); + final_clk_sel = (R_REG(osh, &gciregs->lhl_clk_status_adr) + & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL); + final_lpo_sel = (unsigned)(((1u << sel) << LPO_FINAL_SEL_SHIFT)); + + status = (final_clk_sel == final_lpo_sel) ? 1u : 0u; + timeout = 0; + while (!status && timeout <= LPO_SEL_TIMEOUT) { + OSL_DELAY(10); + final_clk_sel = (R_REG(osh, &gciregs->lhl_clk_status_adr) + & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL); + status = (final_clk_sel == final_lpo_sel) ? 1u : 0u; + timeout++; + } + + if (timeout >= LPO_SEL_TIMEOUT) { + LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout)); + ROMMABLE_ASSERT(0); + } + + /* for 4377 and chiprev B0 and greater do not power-off other LPOs */ + if (BCM4389_CHIP(sih->chip) || BCM4378_CHIP(sih->chip) || BCM4397_CHIP(sih->chip) || + BCM4388_CHIP(sih->chip) || BCM4387_CHIP(sih->chip) || + (CHIPID(sih->chip) == BCM4377_CHIP_ID)) { + LHL_ERROR(("NOT Power Down other LPO\n")); + } else { + /* Power down the rest of the LPOs */ + + if (lpo != LHL_EXT_LPO_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, EXTLPO_BUF_PD, EXTLPO_BUF_PD); + } + + if (lpo != LHL_LPO1_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_EN, LPO1_PD_EN); + LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_SEL, LPO1_PD_SEL_VAL); + } + if (lpo != LHL_LPO2_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_EN, LPO2_PD_EN); + LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_SEL, LPO2_PD_SEL_VAL); + } + if (lpo != LHL_OSC_32k_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, OSC_32k_PD, OSC_32k_PD); + } + if (lpo != RADIO_LPO_ENAB) { + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, LPO_SEL, 0); + } + } + +} + +/* To skip this function, specify a invalid "lpo_select" value in nvram */ +int +BCMATTACHFN(si_lhl_set_lpoclk)(si_t *sih, osl_t *osh, uint32 lpo_force) +{ + int lhl_wlclk_sel; + uint32 lpo = 0; + + /* Apply nvram override to lpo */ + if (!FWSIGN_ENAB()) { + if ((lpo = (uint32)getintvar(NULL, "lpo_select")) == 0) { + if (lpo_force == LHL_LPO_AUTO) { + lpo = LHL_OSC_32k_ENAB; + } else { + lpo = lpo_force; + } + } + } else { + lpo = lpo_force; + } + + lhl_wlclk_sel = si_lhl_get_lpo_sel(sih, lpo); + + if (lhl_wlclk_sel < 0) { + return BCME_OK; + } + + LHL_REG(sih, lhl_clk_det_ctl_adr, + LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL, lhl_wlclk_sel); + + /* Detect the desired LPO */ + si_lhl_detect_lpo(sih, osh); + + /* Select the desired LPO */ + si_lhl_select_lpo(sih, osh, lhl_wlclk_sel, lpo); + + return BCME_OK; +} + +void +BCMATTACHFN(si_lhl_timer_config)(si_t *sih, osl_t *osh, int timer_type) +{ + uint origidx; + pmuregs_t *pmu = NULL; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + + switch (timer_type) { + case LHL_MAC_TIMER: + /* Enable MAC Timer interrupt */ + LHL_REG(sih, lhl_wl_mactim0_intrp_adr, + (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER), + (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER)); + + /* Programs bits for MACPHY_CLK_AVAIL and all its dependent bits in + * MacResourceReqMask0. + */ + PMU_REG(sih, mac_res_req_mask, ~0, si_pmu_rsrc_macphy_clk_deps(sih, osh, 0)); + + /* One time init of mac_res_req_timer to enable interrupt and clock request */ + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, mac_res_req_timer), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + + /* + * Reset MAC Main Timer if in case it is running due to previous instance + * This also resets the interrupt status + */ + LHL_REG(sih, lhl_wl_mactim_int0_adr, LHL_WL_MACTIMER_MASK, 0x0); + + if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 1) { + LHL_REG(sih, lhl_wl_mactim1_intrp_adr, + (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER), + (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER)); + + PMU_REG(sih, mac_res_req_mask1, ~0, + si_pmu_rsrc_macphy_clk_deps(sih, osh, 1)); + + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, mac_res_req_timer1), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + + /* + * Reset MAC Aux Timer if in case it is running due to previous instance + * This also resets the interrupt status + */ + LHL_REG(sih, lhl_wl_mactim_int1_adr, LHL_WL_MACTIMER_MASK, 0x0); + } + + if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 2) { + LHL_REG(sih, lhl_wl_mactim2_intrp_adr, + (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER), + (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER)); + + PMU_REG_NEW(sih, mac_res_req_mask2, ~0, + si_pmu_rsrc_macphy_clk_deps(sih, osh, 2)); + + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, mac_res_req_timer2), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + + /* + * Reset Scan MAC Timer if in case it is running due to previous instance + * This also resets the interrupt status + */ + LHL_REG(sih, lhl_wl_mactim_int2_adr, LHL_WL_MACTIMER_MASK, 0x0); + } + + break; + + case LHL_ARM_TIMER: + /* Enable ARM Timer interrupt */ + LHL_REG(sih, lhl_wl_armtim0_intrp_adr, + (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER), + (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER)); + + /* Programs bits for HT_AVAIL and all its dependent bits in ResourceReqMask0 */ + /* Programs bits for CORE_RDY_CB and all its dependent bits in ResourceReqMask0 */ + PMU_REG(sih, res_req_mask, ~0, (si_pmu_rsrc_ht_avail_clk_deps(sih, osh) | + si_pmu_rsrc_cb_ready_deps(sih, osh))); + + /* One time init of res_req_timer to enable interrupt and clock request + * For low power request only ALP (HT_AVAIL is anyway requested by res_req_mask) + */ + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, res_req_timer), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + break; + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +BCMATTACHFN(si_lhl_timer_enable)(si_t *sih) +{ + /* Enable clks for pmu int propagation */ + PMU_REG(sih, pmuintctrl0, PMU_INTC_ALP_REQ, PMU_INTC_ALP_REQ); + + PMU_REG(sih, pmuintmask0, RSRC_INTR_MASK_TIMER_INT_0, RSRC_INTR_MASK_TIMER_INT_0); +#ifndef BCMQT + LHL_REG(sih, lhl_main_ctl_adr, LHL_FAST_WRITE_EN, LHL_FAST_WRITE_EN); +#endif /* BCMQT */ + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_USE_LHL_TIMER, PCTL_EXT_USE_LHL_TIMER); +} + +void +BCMPOSTTRAPFN(si_lhl_timer_reset)(si_t *sih, uint coreid, uint coreunit) +{ + switch (coreid) { + case D11_CORE_ID: + switch (coreunit) { + case 0: /* MAC_CORE_UNIT_0 */ + LHL_REG(sih, lhl_wl_mactim_int0_adr, LHL_WL_MACTIMER_MASK, 0x0); + LHL_REG(sih, lhl_wl_mactim0_st_adr, + LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK); + break; + case 1: /* MAC_CORE_UNIT_1 */ + LHL_REG(sih, lhl_wl_mactim_int1_adr, LHL_WL_MACTIMER_MASK, 0x0); + LHL_REG(sih, lhl_wl_mactim1_st_adr, + LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK); + break; + case 2: /* SCAN_CORE_UNIT */ + LHL_REG(sih, lhl_wl_mactim_int2_adr, LHL_WL_MACTIMER_MASK, 0x0); + LHL_REG(sih, lhl_wl_mactim2_st_adr, + LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK); + break; + default: + LHL_ERROR(("Cannot reset lhl timer, wrong coreunit = %d\n", coreunit)); + } + break; + case ARMCR4_CORE_ID: /* intentional fallthrough */ + case ARMCA7_CORE_ID: + LHL_REG(sih, lhl_wl_armtim0_adr, LHL_WL_MACTIMER_MASK, 0x0); + LHL_REG(sih, lhl_wl_armtim0_st_adr, + LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK); + break; + default: + LHL_ERROR(("Cannot reset lhl timer, wrong coreid = 0x%x\n", coreid)); + } +} + +void +si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period) +{ + gciregs_t *gciregs; + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID)) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + ASSERT(gciregs != NULL); + W_REG(osh, &gciregs->lhl_wl_ilp_val_adr, ilp_period); + } +} + +lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4369_lhl_reg_set)[] = +{ + /* set wl_sleep_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)}, + + /* set top_pwrsw_en, top_slb_en, top_iso_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)}, + + /* set VMUX_asr_sel_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E9F97}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4369_CSR_MODE_DWN_CNT << 8) | (LHL4369_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4369_CSR_MODE_UP_CNT << 8) | (LHL4369_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4369_ASR_ADJ_DWN_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4369_ASR_ADJ_UP_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4369_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4369_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4369_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4369_ASR_MODE_SEL_UP_CNT << 16)| (LHL4369_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4369_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4369_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4369_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4369_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4369_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4369_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4369_HPBG_PU_EN_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_DWN_CNT << 16)}, + + /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_UP_CNT << 16)}, + + /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_DWN_CNT << 0)}, + + /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_UP_CNT << 0)}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4369_PWRSW_EN_DWN_CNT << 24) | + (LHL4369_SLB_EN_DWN_CNT << 16) | (LHL4369_ISO_EN_DWN_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4369_VMUX_ASR_SEL_DWN_CNT << 16)}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4369_PWRSW_EN_UP_CNT << 24) | + (LHL4369_SLB_EN_UP_CNT << 16) | (LHL4369_ISO_EN_UP_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4369_VMUX_ASR_SEL_UP_CNT << 16))}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 1 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)} +}; + +lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4378_lhl_reg_set)[] = +{ + /* set wl_sleep_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)}, + + /* set top_pwrsw_en, top_slb_en, top_iso_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)}, + + /* set VMUX_asr_sel_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.66V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E9F97}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4378_CSR_MODE_DWN_CNT << 8) | (LHL4378_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4378_CSR_MODE_UP_CNT << 8) | (LHL4378_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4378_ASR_ADJ_DWN_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4378_ASR_ADJ_UP_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4378_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4378_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4378_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4378_ASR_MODE_SEL_UP_CNT << 16)| (LHL4378_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4378_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4378_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4378_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4378_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4378_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4378_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4378_HPBG_PU_EN_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, + (LHL4378_CSR_TRIM_ADJ_DWN_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)}, + + /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, + (LHL4378_CSR_TRIM_ADJ_UP_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)}, + + /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4378_ASR_TRIM_ADJ_DWN_CNT << 0)}, + + /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, + (LHL4378_ASR_TRIM_ADJ_UP_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4378_PWRSW_EN_DWN_CNT << 24) | + (LHL4378_SLB_EN_DWN_CNT << 16) | (LHL4378_ISO_EN_DWN_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4378_VMUX_ASR_SEL_DWN_CNT << 16)}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4378_PWRSW_EN_UP_CNT << 24) | + (LHL4378_SLB_EN_UP_CNT << 16) | (LHL4378_ISO_EN_UP_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4378_VMUX_ASR_SEL_UP_CNT << 16))}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 1 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)} +}; + +lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4387_lhl_reg_set)[] = +{ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), + LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK | + LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK | + LHL_TOP_PWRSEQ_SERDES_SLB_EN_MASK | + LHL_TOP_PWRSEQ_SERDES_CLK_DIS_EN_MASK, + LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK | + LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK | + LHL_TOP_PWRSEQ_SERDES_SLB_EN_MASK | + LHL_TOP_PWRSEQ_SERDES_CLK_DIS_EN_MASK}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9ED797}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.64V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x076D}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4378_CSR_MODE_DWN_CNT << 8) | (LHL4378_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4378_CSR_MODE_UP_CNT << 8) | (LHL4378_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4378_ASR_ADJ_DWN_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4378_ASR_ADJ_UP_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4378_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4378_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4378_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4378_ASR_MODE_SEL_UP_CNT << 16)| (LHL4378_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4378_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4378_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4378_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4378_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4378_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4378_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4378_HPBG_PU_EN_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, + (LHL4378_CSR_TRIM_ADJ_DWN_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)}, + + /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, + (LHL4378_CSR_TRIM_ADJ_UP_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)}, + + /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, + (LHL4378_ASR_TRIM_ADJ_DWN_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)}, + + /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, + (LHL4378_ASR_TRIM_ADJ_UP_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4378_PWRSW_EN_DWN_CNT << 24) | + (LHL4378_SLB_EN_DWN_CNT << 16) | (LHL4378_ISO_EN_DWN_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4387_VMUX_ASR_SEL_DWN_CNT << 16)}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4378_PWRSW_EN_UP_CNT << 24) | + (LHL4378_SLB_EN_UP_CNT << 16) | (LHL4378_ISO_EN_UP_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4387_VMUX_ASR_SEL_UP_CNT << 16))}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 1 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)}, + + /* serdes_clk_dis dn=2, miscldo_pu dn=6; Also include CRWLLHL-48 WAR set bit31 */ + {LHL_REG_OFF(lhl_top_pwrdn3_ctl_adr), ~0, 0x80040c02}, + + /* serdes_clk_dis dn=11, miscldo_pu dn=0 */ + {LHL_REG_OFF(lhl_top_pwrup3_ctl_adr), ~0, 0x00160010} +}; + +lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4387_lhl_reg_set_top_off)[] = +{ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), + LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK | + LHL_TOP_PWRSEQ_TOP_ISO_EN_MASK | + LHL_TOP_PWRSEQ_TOP_SLB_EN_MASK | + LHL_TOP_PWRSEQ_TOP_PWRSW_EN_MASK | + LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK, + LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK | + LHL_TOP_PWRSEQ_TOP_ISO_EN_MASK | + LHL_TOP_PWRSEQ_TOP_SLB_EN_MASK | + LHL_TOP_PWRSEQ_TOP_PWRSW_EN_MASK | + LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F87DB}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9ED7B7}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.64V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x076D}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4387_TO_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4387_TO_CSR_MODE_DWN_CNT << 8) | (LHL4387_TO_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4387_TO_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4387_TO_CSR_MODE_UP_CNT << 8) | (LHL4387_TO_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, lp_mode_dn_cnt, + * ASR_adj, vddc_sw_dis + */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4387_TO_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4387_TO_ASR_ADJ_DWN_CNT << 16) | (LHL4387_TO_LP_MODE_DWN_CNT << 8) | + (LHL4387_TO_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, lp_mode_dn_cnt, + * ASR_adj, vddc_sw_dis + */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4387_TO_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4387_TO_ASR_ADJ_UP_CNT << 16) | (LHL4387_TO_LP_MODE_UP_CNT << 8) | + (LHL4387_TO_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4387_TO_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4387_TO_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4387_TO_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4387_TO_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4387_TO_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4387_TO_ASR_MODE_SEL_UP_CNT << 16)| (LHL4387_TO_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4387_TO_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4387_TO_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4387_TO_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4387_TO_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4387_TO_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4387_TO_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4387_TO_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4387_TO_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4387_TO_HPBG_PU_EN_UP_CNT << 0))}, + + /* ASR_trim_adj downcount=0x3, [30:24] is default value for spmi_*io_sel */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, 0x3}, + + /* ASR_trim_adj upcount=0x1, [30:24] is default value for spmi_*io_sel */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, 0x1}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4387_TO_PWRSW_EN_DWN_CNT << 24) | + (LHL4387_TO_SLB_EN_DWN_CNT << 16) | (LHL4387_TO_ISO_EN_DWN_CNT << 8) | + (LHL4387_TO_TOP_SLP_EN_DWN_CNT))}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4387_TO_PWRSW_EN_UP_CNT << 24) | + (LHL4387_TO_SLB_EN_UP_CNT << 16) | (LHL4387_TO_ISO_EN_UP_CNT << 8) | + (LHL4387_TO_TOP_SLP_EN_UP_CNT))}, + + /* lhl_top_pwrup2_ctl, serdes_slb_en_up_cnt=0x7 */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, 0xe0000}, + + /* lhl_top_pwrdn2_ctl, serdes_slb_en_dn_cnt=0x2 */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, 0x40000}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 1 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)}, + + /* lhl_top_pwrup3_ctl, FLL pu power up count=0x8, miscldo pu power up count=0x0, + * serdes_clk_dis up count=0x7 + */ + {LHL_REG_OFF(lhl_top_pwrup3_ctl_adr), ~0, 0xe0010}, + + /* lhl_top_pwrdn3_ctl, FLL pu power up count=0x1,miscldo pu power up count=0x3, + * serdes_clk_dis up count=0x1 + */ + {LHL_REG_OFF(lhl_top_pwrdn3_ctl_adr), ~0, 0x20602} +}; + +lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4389_lhl_reg_set)[] = +{ + /* set wl_sleep_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)}, + + /* set top_pwrsw_en, top_slb_en, top_iso_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)}, + + /* set VMUX_asr_sel_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9EDF97}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.64V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07ED}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4378_CSR_MODE_DWN_CNT << 8) | (LHL4378_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4378_CSR_MODE_UP_CNT << 8) | (LHL4378_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4378_ASR_ADJ_DWN_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4378_ASR_ADJ_UP_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4378_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4378_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4378_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4378_ASR_MODE_SEL_UP_CNT << 16)| (LHL4378_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4378_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4378_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4378_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4378_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4378_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4378_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4378_HPBG_PU_EN_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4378_CSR_TRIM_ADJ_DWN_CNT << 16)}, + + /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4378_CSR_TRIM_ADJ_UP_CNT << 16)}, + + /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, + (LHL4378_ASR_TRIM_ADJ_DWN_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)}, + + /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, + (LHL4378_ASR_TRIM_ADJ_UP_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4378_PWRSW_EN_DWN_CNT << 24) | + (LHL4378_SLB_EN_DWN_CNT << 16) | (LHL4378_ISO_EN_DWN_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4387_VMUX_ASR_SEL_DWN_CNT << 16)}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4378_PWRSW_EN_UP_CNT << 24) | + (LHL4378_SLB_EN_UP_CNT << 16) | (LHL4378_ISO_EN_UP_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4387_VMUX_ASR_SEL_UP_CNT << 16))}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 1 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)}, + + /* serdes_clk_dis dn=2, miscldo_pu dn=6; Also include CRWLLHL-48 WAR set bit31 */ + {LHL_REG_OFF(lhl_top_pwrdn3_ctl_adr), ~0, 0x80040c02}, + + /* serdes_clk_dis dn=11, miscldo_pu dn=0 */ + {LHL_REG_OFF(lhl_top_pwrup3_ctl_adr), ~0, 0x00160010} +}; + +/* LV sleep mode summary: + * LV mode is where both ABUCK and CBUCK are programmed to low voltages during + * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off. + * With ASR ON, LPLDO OFF + */ +void +BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4369)(si_t *sih) +{ + uint i; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + lhl_reg_set_t *regs = lv_sleep_mode_4369_lhl_reg_set; + + /* Enable LHL LV mode: + * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en + */ + for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4369_lhl_reg_set); i++) { + si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val); + } + if (getintvar(NULL, rstr_rfldo3p3_cap_war)) { + si_corereg(sih, coreidx, LHL_REG_OFF(lhl_lp_main_ctl1_adr), + BCM_MASK32(23, 0), 0x9E9F9F); + } +} + +void +BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4378)(si_t *sih) +{ + uint i; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + lhl_reg_set_t *regs = lv_sleep_mode_4378_lhl_reg_set; + + /* Enable LHL LV mode: + * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en + */ + for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4378_lhl_reg_set); i++) { + si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val); + } +} + +void +BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4387)(si_t *sih) +{ + uint i; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + lhl_reg_set_t *regs; + uint32 abuck_volt_sleep, cbuck_volt_sleep; + uint regs_size; + + if (BCMSRTOPOFF_ENAB()) { + regs = lv_sleep_mode_4387_lhl_reg_set_top_off; + regs_size = ARRAYSIZE(lv_sleep_mode_4387_lhl_reg_set_top_off); + } else { + /* Enable LHL LV mode: + * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en + */ + regs = lv_sleep_mode_4387_lhl_reg_set; + regs_size = ARRAYSIZE(lv_sleep_mode_4387_lhl_reg_set); + } + + for (i = 0; i < regs_size; i++) { + si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val); + } + + if (getvar(NULL, rstr_cbuck_volt_sleep) != NULL) { + cbuck_volt_sleep = getintvar(NULL, rstr_cbuck_volt_sleep); + LHL_REG(sih, lhl_lp_main_ctl1_adr, LHL_CBUCK_VOLT_SLEEP_MASK, + (cbuck_volt_sleep << LHL_CBUCK_VOLT_SLEEP_SHIFT)); + } + + if (getvar(NULL, rstr_abuck_volt_sleep) != NULL) { + abuck_volt_sleep = getintvar(NULL, rstr_abuck_volt_sleep); + LHL_REG(sih, lhl_lp_main_ctl2_adr, LHL_ABUCK_VOLT_SLEEP_MASK, + (abuck_volt_sleep << LHL_ABUCK_VOLT_SLEEP_SHIFT)); + } + + if (BCMSRTOPOFF_ENAB()) { + /* Serdes AFE retention control enable */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_05, + CC_GCI_05_4387C0_AFE_RET_ENB_MASK, + CC_GCI_05_4387C0_AFE_RET_ENB_MASK); + } +} + +void +BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4389)(si_t *sih) +{ + uint i; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + lhl_reg_set_t *regs = lv_sleep_mode_4389_lhl_reg_set; + uint32 abuck_volt_sleep, cbuck_volt_sleep; + + /* Enable LHL LV mode: + * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en + */ + for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4389_lhl_reg_set); i++) { + si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val); + } + + if (getvar(NULL, rstr_cbuck_volt_sleep) != NULL) { + cbuck_volt_sleep = getintvar(NULL, rstr_cbuck_volt_sleep); + LHL_REG(sih, lhl_lp_main_ctl1_adr, LHL_CBUCK_VOLT_SLEEP_MASK, + (cbuck_volt_sleep << LHL_CBUCK_VOLT_SLEEP_SHIFT)); + } + + if (getvar(NULL, rstr_abuck_volt_sleep) != NULL) { + abuck_volt_sleep = getintvar(NULL, rstr_abuck_volt_sleep); + LHL_REG(sih, lhl_lp_main_ctl2_adr, LHL_ABUCK_VOLT_SLEEP_MASK, + (abuck_volt_sleep << LHL_ABUCK_VOLT_SLEEP_SHIFT)); + } + + OSL_DELAY(100); + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, ~0, 0x00000101); + + /* Clear Misc_LDO override */ + si_pmu_vreg_control(sih, PMU_VREG_5, VREG5_4387_MISCLDO_PU_MASK, 0); +} + +lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4362_lhl_reg_set)[] = +{ + /* set wl_sleep_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)}, + + /* set top_pwrsw_en, top_slb_en, top_iso_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)}, + + /* set VMUX_asr_sel_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.66V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E9F97}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4362_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4362_CSR_MODE_DWN_CNT << 8) | (LHL4362_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4362_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4362_CSR_MODE_UP_CNT << 8) | (LHL4362_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4362_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4362_ASR_ADJ_DWN_CNT << 16) | (LHL4362_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4362_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4362_ASR_ADJ_UP_CNT << 16) | (LHL4362_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4362_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4362_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4362_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4362_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4362_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4362_ASR_MODE_SEL_UP_CNT << 16)| (LHL4362_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4362_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4362_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4362_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4362_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4362_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4362_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4362_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4362_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4362_HPBG_PU_EN_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4362_CSR_TRIM_ADJ_DWN_CNT << 16)}, + + /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4362_CSR_TRIM_ADJ_UP_CNT << 16)}, + + /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4362_ASR_TRIM_ADJ_DWN_CNT << 0)}, + + /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), ~0, (LHL4362_ASR_TRIM_ADJ_UP_CNT << 0)}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4362_PWRSW_EN_DWN_CNT << 24) | + (LHL4362_SLB_EN_DWN_CNT << 16) | (LHL4362_ISO_EN_DWN_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4362_VMUX_ASR_SEL_DWN_CNT << 16)}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4362_PWRSW_EN_UP_CNT << 24) | + (LHL4362_SLB_EN_UP_CNT << 16) | (LHL4362_ISO_EN_UP_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4362_VMUX_ASR_SEL_UP_CNT << 16))}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 1 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)} +}; + +/* LV sleep mode summary: + * LV mode is where both ABUCK and CBUCK are programmed to low voltages during + * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off. + * With ASR ON, LPLDO OFF + */ +void +BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4362)(si_t *sih) +{ + uint i; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + lhl_reg_set_t *regs = lv_sleep_mode_4362_lhl_reg_set; + + /* Enable LHL LV mode: + * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en + */ + for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4362_lhl_reg_set); i++) { + si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val); + } +} + +void +si_lhl_mactim0_set(si_t *sih, uint32 val) +{ + LHL_REG(sih, lhl_wl_mactim_int0_adr, LHL_WL_MACTIMER_MASK, val); +} diff --git a/bcmdhd.101.10.361.x/hndmem.c b/bcmdhd.101.10.361.x/hndmem.c new file mode 100755 index 0000000..18dbced --- /dev/null +++ b/bcmdhd.101.10.361.x/hndmem.c @@ -0,0 +1,423 @@ +/* + * Utility routines for configuring different memories in Broadcom chips. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IS_MEMTYPE_VALID(mem) ((mem >= MEM_SOCRAM) && (mem < MEM_MAX)) +#define IS_MEMCONFIG_VALID(cfg) ((cfg >= PDA_CONFIG_CLEAR) && (cfg < PDA_CONFIG_MAX)) + +/* Returns the number of banks in a given memory */ +int +hndmem_num_banks(si_t *sih, int mem) +{ + uint32 savecore, mem_info; + int num_banks = 0; + gciregs_t *gciregs; + osl_t *osh = si_osh(sih); + + if (!IS_MEMTYPE_VALID(mem)) { + goto exit; + } + + savecore = si_coreidx(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + goto exit; + } + + if (GCIREV(sih->gcirev) >= 9) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + mem_info = R_REG(osh, &gciregs->wlan_mem_info); + + switch (mem) { + case MEM_SOCRAM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK) >> + WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT; + break; + case MEM_BM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACBM_MASK) >> + WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT; + break; + case MEM_UCM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK) >> + WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT; + break; + case MEM_SHM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK) >> + WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT; + break; + default: + ASSERT(0); + break; + } + } else { + /* TODO: Figure out bank information using SOCRAM registers */ + } + + si_setcoreidx(sih, savecore); +exit: + return num_banks; +} + +/* Returns the size of a give bank in a given memory */ +int +hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num) +{ + uint32 savecore, bank_info, reg_data; + int bank_sz = 0; + gciregs_t *gciregs; + osl_t *osh = si_osh(sih); + + if (!IS_MEMTYPE_VALID(mem)) { + goto exit; + } + + savecore = si_coreidx(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + goto exit; + } + + if (GCIREV(sih->gcirev) >= 9) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + reg_data = ((mem & + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) << + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) | + ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK) + << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT); + W_REG(osh, &gciregs->gci_indirect_addr, reg_data); + + bank_info = R_REG(osh, &gciregs->wlan_bankxinfo); + bank_sz = (bank_info & WLAN_BANKXINFO_BANK_SIZE_MASK) >> + WLAN_BANKXINFO_BANK_SIZE_SHIFT; + } else { + /* TODO: Figure out bank size using SOCRAM registers */ + } + + si_setcoreidx(sih, savecore); +exit: + return bank_sz; +} + +/* Returns the start address of given memory */ +uint32 +hndmem_mem_base(si_t *sih, hndmem_type_t mem) +{ + uint32 savecore, base_addr = 0; + + /* Currently only support of SOCRAM is available in hardware */ + if (mem != MEM_SOCRAM) { + goto exit; + } + + savecore = si_coreidx(sih); + + if (si_setcore(sih, SOCRAM_CORE_ID, 0)) + { + base_addr = si_get_slaveport_addr(sih, CORE_SLAVE_PORT_1, + CORE_BASE_ADDR_0, SOCRAM_CORE_ID, 0); + } else { + /* TODO: Add code to get the base address of TCM */ + base_addr = 0; + } + + si_setcoreidx(sih, savecore); + +exit: + return base_addr; +} + +#ifdef BCMDEBUG +char *hndmem_type_str[] = + { + "SOCRAM", /* 0 */ + "BM", /* 1 */ + "UCM", /* 2 */ + "SHM", /* 3 */ + }; + +/* Dumps the complete memory information */ +void +hndmem_dump_meminfo_all(si_t *sih) +{ + int mem, bank, bank_cnt, bank_sz; + + for (mem = MEM_SOCRAM; mem < MEM_MAX; mem++) { + bank_cnt = hndmem_num_banks(sih, mem); + + printf("\nMemtype: %s\n", hndmem_type_str[mem]); + for (bank = 0; bank < bank_cnt; bank++) { + bank_sz = hndmem_bank_size(sih, mem, bank); + printf("Bank-%d: %d KB\n", bank, bank_sz); + } + } +} +#endif /* BCMDEBUG */ + +/* Configures the Sleep PDA for a particular bank for a given memory type */ +int +hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem, int bank_num, + hndmem_config_t config, uint32 pda) +{ + uint32 savecore, reg_data; + gciregs_t *gciregs; + int err = BCME_OK; + osl_t *osh = si_osh(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + err = BCME_UNSUPPORTED; + goto exit; + } + + /* Sleep PDA is supported only by GCI rev >= 9 */ + if (GCIREV(sih->gcirev) < 9) { + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + savecore = si_coreidx(sih); + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + reg_data = ((mem & + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) << + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) | + ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK) + << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT); + + W_REG(osh, &gciregs->gci_indirect_addr, reg_data); + + if (config == PDA_CONFIG_SET_PARTIAL) { + W_REG(osh, &gciregs->wlan_bankxsleeppda, pda); + W_REG(osh, &gciregs->wlan_bankxkill, 0); + } + else if (config == PDA_CONFIG_SET_FULL) { + W_REG(osh, &gciregs->wlan_bankxsleeppda, WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK); + W_REG(osh, &gciregs->wlan_bankxkill, WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK); + } else { + W_REG(osh, &gciregs->wlan_bankxsleeppda, 0); + W_REG(osh, &gciregs->wlan_bankxkill, 0); + } + + si_setcoreidx(sih, savecore); + +exit: + return err; +} + +/* Configures the Active PDA for a particular bank for a given memory type */ +int +hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem, + int bank_num, hndmem_config_t config, uint32 pda) +{ + uint32 savecore, reg_data; + gciregs_t *gciregs; + int err = BCME_OK; + osl_t *osh = si_osh(sih); + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + savecore = si_coreidx(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + err = BCME_UNSUPPORTED; + goto exit; + } + + if (GCIREV(sih->gcirev) >= 9) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + reg_data = ((mem & + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) << + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) | + ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK) + << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT); + + W_REG(osh, &gciregs->gci_indirect_addr, reg_data); + + if (config == PDA_CONFIG_SET_PARTIAL) { + W_REG(osh, &gciregs->wlan_bankxactivepda, pda); + } + else if (config == PDA_CONFIG_SET_FULL) { + W_REG(osh, &gciregs->wlan_bankxactivepda, + WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK); + } else { + W_REG(osh, &gciregs->wlan_bankxactivepda, 0); + } + } else { + /* TODO: Configure SOCRAM PDA using SOCRAM registers */ + err = BCME_UNSUPPORTED; + } + + si_setcoreidx(sih, savecore); + +exit: + return err; +} + +/* Configures the Sleep PDA for all the banks for a given memory type */ +int +hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config) +{ + int bank; + int num_banks = hndmem_num_banks(sih, mem); + int err = BCME_OK; + + /* Sleep PDA is supported only by GCI rev >= 9 */ + if (GCIREV(sih->gcirev) < 9) { + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + for (bank = 0; bank < num_banks; bank++) + { + err = hndmem_sleeppda_bank_config(sih, mem, bank, config, 0); + } + +exit: + return err; +} + +/* Configures the Active PDA for all the banks for a given memory type */ +int +hndmem_activepda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config) +{ + int bank; + int num_banks = hndmem_num_banks(sih, mem); + int err = BCME_OK; + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + for (bank = 0; bank < num_banks; bank++) + { + err = hndmem_activepda_bank_config(sih, mem, bank, config, 0); + } + +exit: + return err; +} + +/* Turn off/on all the possible banks in a given memory range. + * Currently this works only for SOCRAM as this is restricted by HW. + */ +int +hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem, uint32 mem_start, + uint32 size, hndmem_config_t config) +{ + int bank, bank_sz, num_banks; + int mem_end; + int bank_start_addr, bank_end_addr; + int err = BCME_OK; + + /* We can get bank size for only SOCRAM/TCM only. Support is not avilable + * for other memories (BM, UCM and SHM) + */ + if (mem != MEM_SOCRAM) { + err = BCME_UNSUPPORTED; + goto exit; + } + + num_banks = hndmem_num_banks(sih, mem); + bank_start_addr = hndmem_mem_base(sih, mem); + mem_end = mem_start + size - 1; + + for (bank = 0; bank < num_banks; bank++) + { + /* Bank size is spcified in bankXinfo register in terms on KBs */ + bank_sz = 1024 * hndmem_bank_size(sih, mem, bank); + + bank_end_addr = bank_start_addr + bank_sz - 1; + + if (config == PDA_CONFIG_SET_FULL) { + /* Check if the bank is completely overlapping with the given mem range */ + if ((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) { + err = hndmem_activepda_bank_config(sih, mem, bank, config, 0); + } + } else { + /* Check if the bank is completely overlaped with the given mem range */ + if (((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) || + /* Check if the bank is partially overlaped with the given range */ + ((mem_start <= bank_end_addr) && (mem_end >= bank_start_addr))) { + err = hndmem_activepda_bank_config(sih, mem, bank, config, 0); + } + } + + bank_start_addr += bank_sz; + } + +exit: + return err; +} diff --git a/bcmdhd.101.10.361.x/hndpmu.c b/bcmdhd.101.10.361.x/hndpmu.c new file mode 100755 index 0000000..7bd12e3 --- /dev/null +++ b/bcmdhd.101.10.361.x/hndpmu.c @@ -0,0 +1,9929 @@ +/* + * Misc utility routines for accessing PMU corerev specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/** + * @file + * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs. + * However, in the context of this file the baseband ('BB') PLL/FLL is referred to. + * + * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used. + * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012) + * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports + * fractional frequency generation. pmu2_ does not support fractional frequency generation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined DONGLEBUILD +#include +#ifdef __arm__ +#include +#endif +#endif /* DONGLEBUILD */ +#if !defined(BCMDONGLEHOST) +#include +#include +#ifdef BCM_OTP_API +#include +#endif /* BCM_OTP_API */ +#endif /* !BCMDONGLEHOST */ +#if !defined(BCMDONGLEHOST) +#include +#endif +#include +#include +#ifdef EVENT_LOG_COMPILE +#include +#endif +#include +#include + +#include "siutils_priv.h" + +#ifdef BCM_AVS +#include +#endif + +#if defined(EVENT_LOG_COMPILE) && defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG) +#if defined(ERR_USE_EVENT_LOG_RA) +#define PMU_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_PMU_ERROR, args) +#else +#define PMU_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_PMU_ERROR, args) +#endif /* ERR_USE_EVENT_LOG_RA */ +#elif defined(BCMDBG_ERR) +#define PMU_ERROR(args) printf args +#else +#define PMU_ERROR(args) +#endif /* defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG) */ + +#ifdef BCMDBG +//#define BCMDBG_PMU +#endif + +#ifdef BCMDBG_PMU +#define PMU_MSG(args) printf args +#else +#define PMU_MSG(args) +#endif /* BCMDBG_MPU */ + +/* To check in verbose debugging messages not intended + * to be on except on private builds. + */ +#define PMU_NONE(args) +#define flags_shift 14 + +/** contains resource bit positions for a specific chip */ +struct rsc_per_chip { + uint8 ht_avail; + uint8 macphy_clkavail; + uint8 ht_start; + uint8 otp_pu; + uint8 macphy_aux_clkavail; + uint8 macphy_scan_clkavail; + uint8 cb_ready; + uint8 dig_ready; +}; + +typedef struct rsc_per_chip rsc_per_chip_t; + +#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED) +bool _pmustatsenab = TRUE; +#else +bool _pmustatsenab = FALSE; +#endif /* BCMPMU_STATS */ + +/* 1MHz lpo enable */ +/* PLEASE USE THIS MACRO IN ATTACH PATH ONLY! */ +#if defined(BCM_FASTLPO) && !defined(BCM_FASTLPO_DISABLED) + #define FASTLPO_ENAB() (TRUE) +#else + #define FASTLPO_ENAB() (FALSE) +#endif + +/* Disable the power optimization feature */ +bool _bcm_pwr_opt_dis = FALSE; + +#ifdef BCMSRTOPOFF +bool _srtopoff_enab = FALSE; +#endif + +pmuregs_t *hnd_pmur = NULL; /* PMU core regs */ + +#if !defined(BCMDONGLEHOST) +static void si_pmu_chipcontrol_xtal_settings_4369(si_t *sih); +static void si_pmu_chipcontrol_xtal_settings_4362(si_t *sih); +static void si_pmu_chipcontrol_xtal_settings_4378(si_t *sih); + +/* PLL controls/clocks */ +static void si_pmu1_pllinit1(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 xtal); +static void si_pmu_pll_off(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *min_mask, + uint32 *max_mask, uint32 *clk_ctl_st); +static void si_pmu_pll_on(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 min_mask, + uint32 max_mask, uint32 clk_ctl_st); +static void si_pmu_otp_pllcontrol(si_t *sih, osl_t *osh); +static void si_pmu_otp_vreg_control(si_t *sih, osl_t *osh); +static void si_pmu_otp_chipcontrol(si_t *sih, osl_t *osh); +static uint32 si_pmu_def_alp_clock(si_t *sih, osl_t *osh); +static bool si_pmu_update_pllcontrol(si_t *sih, osl_t *osh, uint32 xtal, bool update_required); +static uint32 si_pmu_htclk_mask(si_t *sih); + +static uint32 si_pmu1_cpuclk0(si_t *sih, osl_t *osh, pmuregs_t *pmu); +static uint32 si_pmu1_alpclk0(si_t *sih, osl_t *osh, pmuregs_t *pmu); + +static uint32 si_pmu1_cpuclk0_pll2(si_t *sih); + +/* PMU resources */ +static uint32 si_pmu_res_deps(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 rsrcs, bool all); +static uint si_pmu_res_uptime(si_t *sih, osl_t *osh, pmuregs_t *pmu, + uint8 rsrc, bool pmu_fast_trans_en); +static void si_pmu_res_masks(si_t *sih, uint32 *pmin, uint32 *pmax); + +uint32 si_pmu_get_pmutime_diff(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *prev); +bool si_pmu_wait_for_res_pending(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint usec, + bool cond, uint32 *elapsed_time); + +#ifdef __ARM_ARCH_7A__ +static uint32 si_pmu_mem_ca7clock(si_t *sih, osl_t *osh); +#endif + +static uint8 fastlpo_dis_get(void); +static uint8 fastlpo_pcie_dis_get(void); + +static uint32 si_pmu_bpclk_4387(si_t *sih); + +static int si_pmu_openloop_cal_43012(si_t *sih, uint16 currtemp); + +static uint32 si_pmu_pll6val_armclk_calc(osl_t *osh, pmuregs_t *pmu, uint32 armclk, uint32 xtal, + bool write); +static bool si_pmu_armpll_write_required(si_t *sih, uint32 xtal); + +uint8 si_pmu_pll28nm_calc_ndiv(uint32 fvco, uint32 xtal, uint32 *ndiv_int, uint32 *ndiv_frac); + +void si_pmu_armpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac); +void si_pmu_bbpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac); +void si_pmu_armpll_chmdiv_upd(si_t *sih, uint32 ch0_mdiv, uint32 ch1_mdiv); + +#ifdef BCM_LDO3P3_SOFTSTART +static int si_pmu_ldo3p3_soft_start_get(si_t *sih, osl_t *osh, uint32 bt_or_wl, int *res); +static int si_pmu_ldo3p3_soft_start_set(si_t *sih, osl_t *osh, uint32 bt_or_wl, uint32 slew_rate); +#endif /* BCM_LDO3P3_SOFTSTART */ +#ifdef XTAL_BIAS_FROM_OTP +static void si_pmu_chipcontrol_xtal_bias_from_otp(si_t *sih, uint8* flag, uint8* val); +#ifndef BCM_OTP_API +static void si_pmu_chipcontrol_xtal_bias_cal_done_offsets(si_t *sih, uint16* wrd_offset, + uint8* wrd_shift, uint8* wrd_mask); +static void si_pmu_chipcontrol_xtal_bias_val_offsets(si_t *sih, uint16* wrd_offset, + uint8* wrd_shift, uint8* wrd_mask); +#endif /* !BCM_OTP_API */ +#endif /* XTAL_BIAS_FROM_OTP */ + +/* PMU timer ticks once in 32uS */ +#define PMU_US_STEPS (32) + +void *g_si_pmutmr_lock_arg = NULL; +si_pmu_callback_t g_si_pmutmr_lock_cb = NULL, g_si_pmutmr_unlock_cb = NULL; + +/* FVCO frequency in [KHz] */ +#define FVCO_640 640000 /**< 640MHz */ +#define FVCO_880 880000 /**< 880MHz */ +#define FVCO_1760 1760000 /**< 1760MHz */ +#define FVCO_1440 1440000 /**< 1440MHz */ +#define FVCO_960 960000 /**< 960MHz */ +#define FVCO_960p1 960100 /**< 960.1MHz */ +#define FVCO_960010 960010 /**< 960.0098MHz */ +#define FVCO_961 961000 /**< 961MHz */ +#define FVCO_960p5 960500 /**< 960.5MHz */ +#define FVCO_963 963000 /**< 963MHz */ +#define FVCO_963p01 963010 /**< 963.01MHz */ +#define FVCO_1000 1000000 /**< 1000MHz */ +#define FVCO_1600 1600000 /**< 1600MHz */ +#define FVCO_1920 1920000 /**< 1920MHz */ +#define FVCO_1938 1938000 /* 1938MHz */ +#define FVCO_385 385000 /**< 385MHz */ +#define FVCO_400 400000 /**< 400MHz */ +#define FVCO_720 720000 /**< 720MHz */ +#define FVCO_2880 2880000 /**< 2880 MHz */ +#define FVCO_2946 2946000 /**< 2946 MHz */ +#define FVCO_3000 3000000 /**< 3000 MHz */ +#define FVCO_3200 3200000 /**< 3200 MHz */ +#define FVCO_1002p8 1002823 /**< 1002.823MHz */ + +/* defines to make the code more readable */ +/* But 0 is a valid resource number! */ +#define NO_SUCH_RESOURCE 0 /**< means: chip does not have such a PMU resource */ + +/* uses these defines instead of 'magic' values when writing to register pllcontrol_addr */ +#define PMU_PLL_CTRL_REG0 0 +#define PMU_PLL_CTRL_REG1 1 +#define PMU_PLL_CTRL_REG2 2 +#define PMU_PLL_CTRL_REG3 3 +#define PMU_PLL_CTRL_REG4 4 +#define PMU_PLL_CTRL_REG5 5 +#define PMU_PLL_CTRL_REG6 6 +#define PMU_PLL_CTRL_REG7 7 +#define PMU_PLL_CTRL_REG8 8 +#define PMU_PLL_CTRL_REG9 9 +#define PMU_PLL_CTRL_REG10 10 +#define PMU_PLL_CTRL_REG11 11 +#define PMU_PLL_CTRL_REG12 12 +#define PMU_PLL_CTRL_REG13 13 +#define PMU_PLL_CTRL_REG14 14 +#define PMU_PLL_CTRL_REG15 15 + +#ifndef BCM_OTP_API +#define OTP_XTAL_BIAS_CAL_DONE_4378_WRD_OFFSET 743 +#define OTP_XTAL_BIAS_CAL_DONE_4378_WRD_SHIFT 8 +#define OTP_XTAL_BIAS_CAL_DONE_4378_WRD_MASK 0x1 + +#define OTP_XTAL_BIAS_VAL_4378_WRD_OFFSET 743 +#define OTP_XTAL_BIAS_VAL_4378_WRD_SHIFT 0 +#define OTP_XTAL_BIAS_VAL_4378_WRD_MASK 0xFF +#endif /* !BCM_OTP_API */ + +/* changes the drive strength of gpio_12 and gpio_14 from 0x3 to 0x01 */ +#define GPIO_DRIVE_4378_MASK 0x3Fu +#define GPIO_DRIVE_4378_VAL 0x09u + +/** + * The chip has one or more PLLs/FLLs (e.g. baseband PLL, USB PHY PLL). The settings of each PLL are + * contained within one or more 'PLL control' registers. Since the PLL hardware requires that + * changes for one PLL are committed at once, the PMU has a provision for 'updating' all PLL control + * registers at once. + * + * When software wants to change the any PLL parameters, it withdraws requests for that PLL clock, + * updates the PLL control registers being careful not to alter any control signals for the other + * PLLs, and then writes a 1 to PMUCtl.PllCtnlUpdate to commit the changes. Best usage model would + * be bring PLL down then update the PLL control register. + */ +void +si_pmu_pllupd(si_t *sih) +{ + pmu_corereg(sih, SI_CC_IDX, pmucontrol, + PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD); +} + +/* 4360_OTP_PU is used for 4352, not a typo */ +static rsc_per_chip_t rsc_4352 = {NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, + NO_SUCH_RESOURCE, RES4360_OTP_PU, NO_SUCH_RESOURCE, + NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE}; +static rsc_per_chip_t rsc_4360 = {RES4360_HT_AVAIL, NO_SUCH_RESOURCE, + NO_SUCH_RESOURCE, RES4360_OTP_PU, NO_SUCH_RESOURCE, + NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE}; +static rsc_per_chip_t rsc_43602 = {RES43602_HT_AVAIL, RES43602_MACPHY_CLKAVAIL, + RES43602_HT_START, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, + NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE}; +static rsc_per_chip_t rsc_43012 = {RES43012_HT_AVAIL, RES43012_MACPHY_CLK_AVAIL, + RES43012_HT_START, RES43012_OTP_PU, NO_SUCH_RESOURCE, + NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE}; +/* As per the chip team OTP doesn't have the resource in 4369 */ +static rsc_per_chip_t rsc_4369 = {RES4369_HT_AVAIL, RES4369_MACPHY_MAIN_CLK_AVAIL, + RES4369_HT_AVAIL, NO_SUCH_RESOURCE, RES4369_MACPHY_AUX_CLK_AVAIL, + NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, RES4369_DIG_CORE_RDY}; +static rsc_per_chip_t rsc_4378 = {RES4378_HT_AVAIL, RES4378_MACPHY_MAIN_CLK_AVAIL, + RES4378_HT_AVAIL, RES4378_PMU_SLEEP, RES4378_MACPHY_AUX_CLK_AVAIL, + NO_SUCH_RESOURCE, RES4378_CORE_RDY_CB, RES4378_CORE_RDY_DIG}; +static rsc_per_chip_t rsc_4387 = {RES4387_HT_AVAIL, RES4387_MACPHY_CLK_MAIN, + RES4387_HT_AVAIL, RES4387_PMU_SLEEP, RES4387_MACPHY_CLK_AUX, + RES4387_MACPHY_CLK_SCAN, RES4387_CORE_RDY_CB, RES4387_CORE_RDY_DIG}; +static rsc_per_chip_t rsc_4388 = {RES4388_HT_AVAIL, RES4388_MACPHY_CLK_MAIN, + RES4388_HT_AVAIL, RES4388_PMU_LP, RES4388_MACPHY_CLK_AUX, + RES4388_MACPHY_CLK_SCAN, RES4388_CORE_RDY_CB, RES4388_CORE_RDY_DIG}; +static rsc_per_chip_t rsc_4389 = {RES4389_HT_AVAIL, RES4389_MACPHY_CLK_MAIN, + RES4389_HT_AVAIL, RES4389_PMU_LP, RES4389_MACPHY_CLK_AUX, + RES4389_MACPHY_CLK_SCAN, RES4389_CORE_RDY_CB, RES4389_CORE_RDY_DIG}; +static rsc_per_chip_t rsc_4397 = {RES4397_HT_AVAIL, RES4397_MACPHY_CLK_MAIN, + RES4397_HT_AVAIL, RES4397_PMU_LP, RES4397_MACPHY_CLK_AUX, + RES4397_MACPHY_CLK_SCAN, RES4397_CORE_RDY_CB, RES4397_CORE_RDY_DIG}; + +static rsc_per_chip_t rsc_4362 = {RES4362_HT_AVAIL, RES4362_MACPHY_MAIN_CLK_AVAIL, + RES4362_HT_AVAIL, /* macphy aux clk */ + NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, + RES4362_DIG_CORE_RDY}; + +/** +* For each chip, location of resource bits (e.g., ht bit) in resource mask registers may differ. +* This function abstracts the bit position of commonly used resources, thus making the rest of the +* code in hndpmu.c cleaner. +*/ +static rsc_per_chip_t* BCMRAMFN(si_pmu_get_rsc_positions)(si_t *sih) +{ + rsc_per_chip_t *rsc = NULL; + + switch (CHIPID(sih->chip)) { + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: /* usb variant of 4352 */ + rsc = &rsc_4352; + break; + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + rsc = &rsc_4360; + break; + CASE_BCM43602_CHIP: + rsc = &rsc_43602; + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + rsc = &rsc_43012; + break; + case BCM4369_CHIP_GRPID: + rsc = &rsc_4369; + break; + case BCM4362_CHIP_GRPID: + rsc = &rsc_4362; + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + rsc = &rsc_4378; + break; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + rsc = &rsc_4387; + break; + case BCM4388_CHIP_GRPID: + rsc = &rsc_4388; + break; + case BCM4389_CHIP_GRPID: + rsc = &rsc_4389; + break; + case BCM4397_CHIP_GRPID: + rsc = &rsc_4397; + break; + default: + ASSERT(0); + break; + } + + return rsc; +}; /* si_pmu_get_rsc_positions */ + +static const char BCMATTACHDATA(rstr_pllD)[] = "pll%d"; +static const char BCMATTACHDATA(rstr_regD)[] = "reg%d"; +static const char BCMATTACHDATA(rstr_chipcD)[] = "chipc%d"; +static const char BCMATTACHDATA(rstr_rDt)[] = "r%dt"; +static const char BCMATTACHDATA(rstr_rDd)[] = "r%dd"; +static const char BCMATTACHDATA(rstr_Invalid_Unsupported_xtal_value_D)[] = + "Invalid/Unsupported xtal value %d"; +static const char BCMATTACHDATA(rstr_xtalfreq)[] = "xtalfreq"; +#if defined(SAVERESTORE) && defined(LDO3P3_MIN_RES_MASK) +static const char BCMATTACHDATA(rstr_ldo_prot)[] = "ldo_prot"; +#endif /* SAVERESTORE && LDO3P3_MIN_RES_MASK */ +static const char BCMATTACHDATA(rstr_btldo3p3pu)[] = "btldopu"; +#if defined(BCM_FASTLPO_PMU) && !defined(BCM_FASTLPO_PMU_DISABLED) +static const char BCMATTACHDATA(rstr_fastlpo_dis)[] = "fastlpo_dis"; +#endif /* BCM_FASTLPO_PMU */ +static const char BCMATTACHDATA(rstr_fastlpo_pcie_dis)[] = "fastlpo_pcie_dis"; +static const char BCMATTACHDATA(rstr_memlpldo_volt)[] = "memlpldo_volt"; +static const char BCMATTACHDATA(rstr_lpldo_volt)[] = "lpldo_volt"; +static const char BCMATTACHDATA(rstr_dyn_clksw_en)[] = "dyn_clksw_en"; +static const char BCMATTACHDATA(rstr_abuck_volt)[] = "abuck_volt"; +static const char BCMATTACHDATA(rstr_cbuck_volt)[] = "cbuck_volt"; +static const char BCMATTACHDATA(rstr_csrtune)[] = "csr_tune"; + +/* The check for OTP parameters for the PLL control registers is done and if found the + * registers are updated accordingly. + */ + +/** + * As a hardware bug workaround, OTP can contain variables in the form 'pll%d=%d'. + * If these variables are present, the corresponding PLL control register(s) are + * overwritten, but not yet 'updated'. + */ +static void +BCMATTACHFN(si_pmu_otp_pllcontrol)(si_t *sih, osl_t *osh) +{ + char name[16]; + const char *otp_val; + uint8 i; + uint32 val; + uint8 pll_ctrlcnt = 0; + + if (FWSIGN_ENAB()) { + return; + } + + if (PMUREV(sih->pmurev) >= 5) { + pll_ctrlcnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT; + } else { + pll_ctrlcnt = (sih->pmucaps & PCAP_PC_MASK) >> PCAP_PC_SHIFT; + } + + for (i = 0; i < pll_ctrlcnt; i++) { + snprintf(name, sizeof(name), rstr_pllD, i); + if ((otp_val = getvar(NULL, name)) == NULL) + continue; + + val = (uint32)bcm_strtoul(otp_val, NULL, 0); + si_pmu_pllcontrol(sih, i, ~0, val); + } +} + +/** + * The check for OTP parameters for the Voltage Regulator registers is done and if found the + * registers are updated accordingly. + */ +static void +BCMATTACHFN(si_pmu_otp_vreg_control)(si_t *sih, osl_t *osh) +{ + char name[16]; + const char *otp_val; + uint8 i; + uint32 val; + uint8 vreg_ctrlcnt = 0; + + if (FWSIGN_ENAB()) { + return; + } + + if (PMUREV(sih->pmurev) >= 5) { + vreg_ctrlcnt = (sih->pmucaps & PCAP5_VC_MASK) >> PCAP5_VC_SHIFT; + } else { + vreg_ctrlcnt = (sih->pmucaps & PCAP_VC_MASK) >> PCAP_VC_SHIFT; + } + + for (i = 0; i < vreg_ctrlcnt; i++) { + snprintf(name, sizeof(name), rstr_regD, i); + if ((otp_val = getvar(NULL, name)) == NULL) + continue; + + val = (uint32)bcm_strtoul(otp_val, NULL, 0); + si_pmu_vreg_control(sih, i, ~0, val); + } +} + +/** + * The check for OTP parameters for the chip control registers is done and if found the + * registers are updated accordingly. + */ +static void +BCMATTACHFN(si_pmu_otp_chipcontrol)(si_t *sih, osl_t *osh) +{ + uint32 val, cc_ctrlcnt, i; + char name[16]; + const char *otp_val; + + if (FWSIGN_ENAB()) { + return; + } + if (PMUREV(sih->pmurev) >= 5) { + cc_ctrlcnt = (sih->pmucaps & PCAP5_CC_MASK) >> PCAP5_CC_SHIFT; + } else { + cc_ctrlcnt = (sih->pmucaps & PCAP_CC_MASK) >> PCAP_CC_SHIFT; + } + + for (i = 0; i < cc_ctrlcnt; i++) { + snprintf(name, sizeof(name), rstr_chipcD, i); + if ((otp_val = getvar(NULL, name)) == NULL) + continue; + + val = (uint32)bcm_strtoul(otp_val, NULL, 0); + si_pmu_chipcontrol(sih, i, 0xFFFFFFFF, val); /* writes to PMU chipctrl reg 'i' */ + } +} + +/** + * A chip contains one or more LDOs (Low Drop Out regulators). During chip bringup, it can turn out + * that the default (POR) voltage of a regulator is not right or optimal. + * This function is called only by si_pmu_swreg_init() for specific chips + */ +void +si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, uint8 ldo, uint8 voltage) +{ + uint8 sr_cntl_shift = 0, rc_shift = 0, shift = 0, mask = 0; + uint8 addr = 0; + uint8 do_reg2 = 0, rshift2 = 0, rc_shift2 = 0, mask2 = 0, addr2 = 0; + + BCM_REFERENCE(osh); + + ASSERT(sih->cccaps & CC_CAP_PMU); + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: + switch (ldo) { + case SET_LDO_VOLTAGE_PAREF: + addr = 1; + rc_shift = 0; + mask = 0xf; + break; + default: + ASSERT(FALSE); + break; + } + break; + CASE_BCM43602_CHIP: + switch (ldo) { + case SET_LDO_VOLTAGE_PAREF: + addr = 0; + rc_shift = 29; + mask = 0x7; + do_reg2 = 1; + addr2 = 1; + rshift2 = 3; + mask2 = 0x8; + break; + default: + ASSERT(FALSE); + break; + } + break; + default: + ASSERT(FALSE); + return; + } + + shift = sr_cntl_shift + rc_shift; + + pmu_corereg(sih, SI_CC_IDX, regcontrol_addr, /* PMU VREG register */ + ~0, addr); + pmu_corereg(sih, SI_CC_IDX, regcontrol_data, + mask << shift, (voltage & mask) << shift); + if (do_reg2) { + /* rshift2 - right shift moves mask2 to bit 0, rc_shift2 - left shift in reg */ + si_pmu_vreg_control(sih, addr2, (mask2 >> rshift2) << rc_shift2, + ((voltage & mask2) >> rshift2) << rc_shift2); + } +} /* si_pmu_set_ldo_voltage */ + +/* d11 slow to fast clock transition time in slow clock cycles */ +#define D11SCC_SLOW2FAST_TRANSITION 2 + +/* For legacy chips only, will be discarded eventually */ +static uint16 +BCMINITFN(si_pmu_fast_pwrup_delay_legacy)(si_t *sih, osl_t *osh, pmuregs_t *pmu) +{ + uint pmudelay = PMU_MAX_TRANSITION_DLY; + uint32 ilp; /* ILP clock frequency in [Hz] */ + rsc_per_chip_t *rsc; /* chip specific resource bit positions */ + + /* Should be calculated based on the PMU updown/depend tables */ + switch (CHIPID(sih->chip)) { + case BCM43460_CHIP_ID: + case BCM43526_CHIP_ID: + pmudelay = 3700; + break; + case BCM4360_CHIP_ID: + case BCM4352_CHIP_ID: + if (CHIPREV(sih->chiprev) < 4) { + pmudelay = 1500; + } else { + pmudelay = 3000; + } + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + pmudelay = 1500; /* In micro seconds for 43012 chip */ + break; + CASE_BCM43602_CHIP: + rsc = si_pmu_get_rsc_positions(sih); + /* Retrieve time by reading it out of the hardware */ + ilp = si_ilp_clock(sih); + if (ilp != 0) { + pmudelay = (si_pmu_res_uptime(sih, osh, pmu, rsc->macphy_clkavail, FALSE) + + D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp - 1) / ilp); + pmudelay = (11 * pmudelay) / 10; + } + break; + case BCM4362_CHIP_GRPID: + rsc = si_pmu_get_rsc_positions(sih); + /* Retrieve time by reading it out of the hardware */ + ilp = si_ilp_clock(sih); + if (ilp != 0) { + pmudelay = si_pmu_res_uptime(sih, osh, pmu, rsc->ht_avail, FALSE) + + D11SCC_SLOW2FAST_TRANSITION; + pmudelay = (11 * pmudelay) / 10; + /* With PWR SW optimization, Need to add this addtional + time to fast power up delay to avoid beacon loss + */ + pmudelay += 600; + } + break; + default: + break; + } + + return (uint16)pmudelay; +} /* si_pmu_fast_pwrup_delay_legacy */ + +/** + * d11 core has a 'fastpwrup_dly' register that must be written to. + * This function returns d11 slow to fast clock transition time in [us] units. + * It does not write to the d11 core. + */ +uint16 +BCMINITFN(si_pmu_fast_pwrup_delay)(si_t *sih, osl_t *osh) +{ + uint pmudelay = PMU_MAX_TRANSITION_DLY; + pmuregs_t *pmu; + uint origidx; + rsc_per_chip_t *rsc; /* chip specific resource bit positions */ + uint macunit; + bool pmu_fast_trans_en; + + ASSERT(sih->cccaps & CC_CAP_PMU); + + if (ISSIM_ENAB(sih)) { + pmudelay = 1000; + goto exit; + } + + macunit = si_coreunit(sih); + + origidx = si_coreidx(sih); + /* Still support 43602 so need AOB check, + * 43602 is the only non-AOB chip supported now + */ + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmu_fast_trans_en = (R_REG(osh, &pmu->pmucontrol_ext) & PCTL_EXT_FAST_TRANS_ENAB) ? + TRUE : FALSE; + + rsc = si_pmu_get_rsc_positions(sih); + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + if (macunit == 0) { + pmudelay = si_pmu_res_uptime(sih, osh, pmu, + rsc->macphy_clkavail, pmu_fast_trans_en); + } else if (macunit == 1) { + pmudelay = si_pmu_res_uptime(sih, osh, pmu, + rsc->macphy_aux_clkavail, pmu_fast_trans_en); + } else { + ASSERT(0); + } + break; + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + if (macunit == 0) { + pmudelay = si_pmu_res_uptime(sih, osh, pmu, + rsc->macphy_clkavail, pmu_fast_trans_en); + } else if (macunit == 1) { + pmudelay = si_pmu_res_uptime(sih, osh, pmu, + rsc->macphy_aux_clkavail, pmu_fast_trans_en); + } else if (macunit == 2) { + pmudelay = si_pmu_res_uptime(sih, osh, pmu, + rsc->macphy_scan_clkavail, pmu_fast_trans_en); + } else { + ASSERT(0); + } + break; + + default: + pmudelay = si_pmu_fast_pwrup_delay_legacy(sih, osh, pmu); + break; + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + +exit: + return (uint16)pmudelay; +} /* si_pmu_fast_pwrup_delay */ + +/* + * Get fast pwrup delay for given resource + */ +static uint +BCMINITFN(si_pmu_fast_pwrup_delay_rsrc)(si_t *sih, osl_t *osh, uint8 rsrc) +{ + uint pmudelay = PMU_MAX_TRANSITION_DLY; + pmuregs_t *pmu = NULL; + bool pmu_fast_trans_en = TRUE; + uint origidx; + + origidx = si_coreidx(sih); + pmu = si_setcore(sih, PMU_CORE_ID, 0); + ASSERT(pmu != NULL); + + pmu_fast_trans_en = (R_REG(osh, &pmu->pmucontrol_ext) & PCTL_EXT_FAST_TRANS_ENAB) ? + TRUE : FALSE; + + pmudelay = si_pmu_res_uptime(sih, osh, pmu, rsrc, pmu_fast_trans_en); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return pmudelay; +} + +/* + * Get fast pwrup delay for given DIG_READY resource + */ +uint +BCMINITFN(si_pmu_fast_pwrup_delay_dig)(si_t *sih, osl_t *osh) +{ + uint delay = 0; + rsc_per_chip_t *rsc = si_pmu_get_rsc_positions(sih); + ASSERT(rsc); + + if (rsc) { + delay = si_pmu_fast_pwrup_delay_rsrc(sih, osh, rsc->dig_ready); + } + return delay; +} + +/* + * During chip bringup, it can turn out that the 'hard wired' PMU dependencies are not fully + * correct, or that up/down time values can be optimized. The following data structures and arrays + * deal with that. + */ + +/* Setup resource up/down timers */ +typedef struct { + uint8 resnum; + uint32 updown; +} pmu_res_updown_t; + +#define PMU_RES_SUBSTATE_SHIFT 8 + +/* Setup resource substate transition timer value */ +typedef struct { + uint8 resnum; + uint8 substate; + uint32 tmr; +} pmu_res_subst_trans_tmr_t; + +/* Change resource dependencies masks */ +typedef struct { + uint32 res_mask; /* resources (chip specific) */ + int8 action; /* action, e.g. RES_DEPEND_SET */ + uint32 depend_mask; /* changes to the dependencies mask */ + bool (*filter)(si_t *sih); /* action is taken when filter is NULL or return TRUE */ +} pmu_res_depend_t; + +/* Resource dependencies mask change action */ +#define RES_DEPEND_SET 0 /* Override the dependencies mask */ +#define RES_DEPEND_ADD 1 /* Add to the dependencies mask */ +#define RES_DEPEND_REMOVE -1 /* Remove from the dependencies mask */ + +/* Using a safe SAVE_RESTORE up/down time, it will get updated after openloop cal */ +static const pmu_res_updown_t BCMATTACHDATA(bcm43012a0_res_updown_ds0)[] = { + {RES43012_MEMLPLDO_PU, 0x00200020}, + {RES43012_PMU_SLEEP, 0x00a600a6}, + {RES43012_FAST_LPO, 0x00D20000}, + {RES43012_BTLPO_3P3, 0x007D0000}, + {RES43012_SR_POK, 0x00c80000}, + {RES43012_DUMMY_PWRSW, 0x01400000}, + {RES43012_DUMMY_LDO3P3, 0x00000000}, + {RES43012_DUMMY_BT_LDO3P3, 0x00000000}, + {RES43012_DUMMY_RADIO, 0x00000000}, + {RES43012_VDDB_VDDRET, 0x0020000a}, + {RES43012_HV_LDO3P3, 0x002C0000}, + {RES43012_XTAL_PU, 0x04000000}, + {RES43012_SR_CLK_START, 0x00080000}, + {RES43012_XTAL_STABLE, 0x00000000}, + {RES43012_FCBS, 0x00000000}, + {RES43012_CBUCK_MODE, 0x00000000}, + {RES43012_CORE_READY, 0x00000000}, + {RES43012_ILP_REQ, 0x00000000}, + {RES43012_ALP_AVAIL, 0x00280008}, + {RES43012_RADIOLDO_1P8, 0x00220000}, + {RES43012_MINI_PMU, 0x00220000}, + {RES43012_SR_SAVE_RESTORE, 0x02600260}, + {RES43012_PHY_PWRSW, 0x00800005}, + {RES43012_VDDB_CLDO, 0x0020000a}, + {RES43012_SUBCORE_PWRSW, 0x0060000a}, + {RES43012_SR_SLEEP, 0x00000000}, + {RES43012_HT_START, 0x00A00000}, + {RES43012_HT_AVAIL, 0x00000000}, + {RES43012_MACPHY_CLK_AVAIL, 0x00000000}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4360_res_updown)[] = { + {RES4360_BBPLLPWRSW_PU, 0x00200001} +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm43602_res_updown)[] = { + {RES43602_SR_SAVE_RESTORE, 0x00190019}, + {RES43602_XTAL_PU, 0x00280002}, + {RES43602_RFLDO_PU, 0x00430005} +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm43012a0_res_depend_ds0)[] = { + {0, 0, 0, NULL} +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm43602_res_depend)[] = { + /* JIRA HW43602-131 : PCIe SERDES dependency problem */ + { + PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_SR_CLK_STABLE) | + PMURES_BIT(RES43602_SR_SAVE_RESTORE) | PMURES_BIT(RES43602_SR_SLEEP) | + PMURES_BIT(RES43602_LQ_START) | PMURES_BIT(RES43602_LQ_AVAIL) | + PMURES_BIT(RES43602_WL_CORE_RDY) | PMURES_BIT(RES43602_ILP_REQ) | + PMURES_BIT(RES43602_ALP_AVAIL) | PMURES_BIT(RES43602_RFLDO_PU) | + PMURES_BIT(RES43602_HT_START) | PMURES_BIT(RES43602_HT_AVAIL) | + PMURES_BIT(RES43602_MACPHY_CLKAVAIL), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_SERDES_PU), + NULL + }, + /* set rsrc 7, 8, 9, 12, 13, 14 & 17 add (1<<10 | 1<<4 )] */ + { + PMURES_BIT(RES43602_SR_CLK_START) | PMURES_BIT(RES43602_SR_PHY_PWRSW) | + PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_SR_CLK_STABLE) | + PMURES_BIT(RES43602_SR_SAVE_RESTORE) | PMURES_BIT(RES43602_SR_SLEEP) | + PMURES_BIT(RES43602_WL_CORE_RDY), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_XTALLDO_PU) | PMURES_BIT(RES43602_XTAL_PU), + NULL + }, + /* set rsrc 11 add (1<<13 | 1<<12 | 1<<9 | 1<<8 | 1<<7 )] */ + { + PMURES_BIT(RES43602_PERST_OVR), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_SR_CLK_START) | PMURES_BIT(RES43602_SR_PHY_PWRSW) | + PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_SR_CLK_STABLE) | + PMURES_BIT(RES43602_SR_SAVE_RESTORE), + NULL + }, + /* set rsrc 19, 21, 22, 23 & 24 remove ~(1<<16 | 1<<15 )] */ + { + PMURES_BIT(RES43602_ALP_AVAIL) | PMURES_BIT(RES43602_RFLDO_PU) | + PMURES_BIT(RES43602_HT_START) | PMURES_BIT(RES43602_HT_AVAIL) | + PMURES_BIT(RES43602_MACPHY_CLKAVAIL), + RES_DEPEND_REMOVE, + PMURES_BIT(RES43602_LQ_START) | PMURES_BIT(RES43602_LQ_AVAIL), + NULL + } +}; + +#ifndef BCM_BOOTLOADER +/** switch off LPLDO for 12x12 package because it can cause a problem when chip is reset */ +static const pmu_res_depend_t BCMATTACHDATA(bcm43602_12x12_res_depend)[] = { + /* set rsrc 19, 21, 22, 23 & 24 remove ~(1<<16 | 1<<15 )] */ + { /* resources no longer dependent on resource that is going to be removed */ + PMURES_BIT(RES43602_LPLDO_PU) | PMURES_BIT(RES43602_REGULATOR) | + PMURES_BIT(RES43602_PMU_SLEEP) | PMURES_BIT(RES43602_RSVD_3) | + PMURES_BIT(RES43602_XTALLDO_PU) | PMURES_BIT(RES43602_SERDES_PU) | + PMURES_BIT(RES43602_BBPLL_PWRSW_PU) | PMURES_BIT(RES43602_SR_CLK_START) | + PMURES_BIT(RES43602_SR_PHY_PWRSW) | PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | + PMURES_BIT(RES43602_XTAL_PU) | PMURES_BIT(RES43602_PERST_OVR) | + PMURES_BIT(RES43602_SR_CLK_STABLE) | PMURES_BIT(RES43602_SR_SAVE_RESTORE) | + PMURES_BIT(RES43602_SR_SLEEP) | PMURES_BIT(RES43602_LQ_START) | + PMURES_BIT(RES43602_LQ_AVAIL) | PMURES_BIT(RES43602_WL_CORE_RDY) | + PMURES_BIT(RES43602_ILP_REQ) | PMURES_BIT(RES43602_ALP_AVAIL) | + PMURES_BIT(RES43602_RADIO_PU) | PMURES_BIT(RES43602_RFLDO_PU) | + PMURES_BIT(RES43602_HT_START) | PMURES_BIT(RES43602_HT_AVAIL) | + PMURES_BIT(RES43602_MACPHY_CLKAVAIL) | PMURES_BIT(RES43602_PARLDO_PU) | + PMURES_BIT(RES43602_RSVD_26), + RES_DEPEND_REMOVE, + /* resource that is going to be removed */ + PMURES_BIT(RES43602_LPLDO_PU), + NULL + } +}; + +static const pmu_res_depend_t BCMATTACHDATA(bcm43602_res_pciewar)[] = { + { + PMURES_BIT(RES43602_PERST_OVR), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_REGULATOR) | + PMURES_BIT(RES43602_PMU_SLEEP) | + PMURES_BIT(RES43602_XTALLDO_PU) | + PMURES_BIT(RES43602_XTAL_PU) | + PMURES_BIT(RES43602_RADIO_PU), + NULL + }, + { + PMURES_BIT(RES43602_WL_CORE_RDY), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + }, + { + PMURES_BIT(RES43602_LQ_START), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + }, + { + PMURES_BIT(RES43602_LQ_AVAIL), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + }, + { + PMURES_BIT(RES43602_ALP_AVAIL), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + }, + { + PMURES_BIT(RES43602_HT_START), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + }, + { + PMURES_BIT(RES43602_HT_AVAIL), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + }, + { + PMURES_BIT(RES43602_MACPHY_CLKAVAIL), + RES_DEPEND_ADD, + PMURES_BIT(RES43602_PERST_OVR), + NULL + } +}; +#endif /* BCM_BOOTLOADER */ + +static const pmu_res_updown_t BCMATTACHDATA(bcm4360B1_res_updown)[] = { + /* Need to change elements here, should get default values for this - 4360B1 */ + {RES4360_XTAL_PU, 0x00430002}, /* Changed for 4360B1 */ +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4369a0_res_depend)[] = { + {PMURES_BIT(RES4369_DUMMY), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4369_ABUCK), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4369_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4369_MISCLDO), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4369_LDO3P3), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4369_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4369_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4369_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL}, + {PMURES_BIT(RES4369_PWRSW_DIG), RES_DEPEND_SET, 0x060000cf, NULL}, + {PMURES_BIT(RES4369_SR_DIG), RES_DEPEND_SET, 0x060001cf, NULL}, + {PMURES_BIT(RES4369_SLEEP_DIG), RES_DEPEND_SET, 0x060003cf, NULL}, + {PMURES_BIT(RES4369_PWRSW_AUX), RES_DEPEND_SET, 0x040000cf, NULL}, + {PMURES_BIT(RES4369_SR_AUX), RES_DEPEND_SET, 0x040008cf, NULL}, + {PMURES_BIT(RES4369_SLEEP_AUX), RES_DEPEND_SET, 0x040018cf, NULL}, + {PMURES_BIT(RES4369_PWRSW_MAIN), RES_DEPEND_SET, 0x040000cf, NULL}, + {PMURES_BIT(RES4369_SR_MAIN), RES_DEPEND_SET, 0x040040cf, NULL}, + {PMURES_BIT(RES4369_SLEEP_MAIN), RES_DEPEND_SET, 0x0400c0cf, NULL}, + {PMURES_BIT(RES4369_DIG_CORE_RDY), RES_DEPEND_SET, 0x060007cf, NULL}, + {PMURES_BIT(RES4369_CORE_RDY_AUX), RES_DEPEND_SET, 0x040038cf, NULL}, + {PMURES_BIT(RES4369_ALP_AVAIL), RES_DEPEND_SET, 0x060207cf, NULL}, + {PMURES_BIT(RES4369_RADIO_AUX_PU), RES_DEPEND_SET, 0x040438df, NULL}, + {PMURES_BIT(RES4369_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x041438df, NULL}, + {PMURES_BIT(RES4369_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0401c0cf, NULL}, + {PMURES_BIT(RES4369_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0441c0df, NULL}, + {PMURES_BIT(RES4369_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x04c1c0df, NULL}, + {PMURES_BIT(RES4369_PCIE_EP_PU), RES_DEPEND_SET, 0x040000cf, NULL}, + {PMURES_BIT(RES4369_COLD_START_WAIT), RES_DEPEND_SET, 0x0000000f, NULL}, + {PMURES_BIT(RES4369_ARMHTAVAIL), RES_DEPEND_SET, 0x060a07cf, NULL}, + {PMURES_BIT(RES4369_HT_AVAIL), RES_DEPEND_SET, 0x060a07cf, NULL}, + {PMURES_BIT(RES4369_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fdf, NULL}, + {PMURES_BIT(RES4369_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7df, NULL}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4369a0_res_depend_fastlpo_pcie)[] = { + {PMURES_BIT(RES4369_DUMMY), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4369_ABUCK), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4369_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4369_MISCLDO), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4369_LDO3P3), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4369_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4369_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4369_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL}, + {PMURES_BIT(RES4369_PWRSW_DIG), RES_DEPEND_SET, 0x060000ef, NULL}, + {PMURES_BIT(RES4369_SR_DIG), RES_DEPEND_SET, 0x060001ef, NULL}, + {PMURES_BIT(RES4369_SLEEP_DIG), RES_DEPEND_SET, 0x060003ef, NULL}, + {PMURES_BIT(RES4369_PWRSW_AUX), RES_DEPEND_SET, 0x040000ef, NULL}, + {PMURES_BIT(RES4369_SR_AUX), RES_DEPEND_SET, 0x040008ef, NULL}, + {PMURES_BIT(RES4369_SLEEP_AUX), RES_DEPEND_SET, 0x040018ef, NULL}, + {PMURES_BIT(RES4369_PWRSW_MAIN), RES_DEPEND_SET, 0x040000ef, NULL}, + {PMURES_BIT(RES4369_SR_MAIN), RES_DEPEND_SET, 0x040040ef, NULL}, + {PMURES_BIT(RES4369_SLEEP_MAIN), RES_DEPEND_SET, 0x0400c0ef, NULL}, + {PMURES_BIT(RES4369_DIG_CORE_RDY), RES_DEPEND_SET, 0x060007ef, NULL}, + {PMURES_BIT(RES4369_CORE_RDY_AUX), RES_DEPEND_SET, 0x040038ef, NULL}, + {PMURES_BIT(RES4369_ALP_AVAIL), RES_DEPEND_SET, 0x060207ef, NULL}, + {PMURES_BIT(RES4369_RADIO_AUX_PU), RES_DEPEND_SET, 0x040438ff, NULL}, + {PMURES_BIT(RES4369_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x041438ff, NULL}, + {PMURES_BIT(RES4369_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0401c0ef, NULL}, + {PMURES_BIT(RES4369_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0441c0ff, NULL}, + {PMURES_BIT(RES4369_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x04c1c0ff, NULL}, + {PMURES_BIT(RES4369_PCIE_EP_PU), RES_DEPEND_SET, 0x0400002f, NULL}, + {PMURES_BIT(RES4369_COLD_START_WAIT), RES_DEPEND_SET, 0x0000002f, NULL}, + {PMURES_BIT(RES4369_ARMHTAVAIL), RES_DEPEND_SET, 0x060a07ef, NULL}, + {PMURES_BIT(RES4369_HT_AVAIL), RES_DEPEND_SET, 0x060a07ef, NULL}, + {PMURES_BIT(RES4369_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fff, NULL}, + {PMURES_BIT(RES4369_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7ff, NULL}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4369a0_res_updown)[] = { + {RES4369_DUMMY, 0x00220022}, + {RES4369_ABUCK, 0x00c80022}, + {RES4369_PMU_SLEEP, 0x00c80022}, + {RES4369_MISCLDO, 0x00bd0022}, + {RES4369_LDO3P3, 0x00bd0022}, + {RES4369_FAST_LPO_AVAIL, 0x01500022}, + {RES4369_XTAL_PU, 0x07d00022}, + {RES4369_XTAL_STABLE, 0x00220022}, + {RES4369_PWRSW_DIG, 0x02100087}, + {RES4369_SR_DIG, 0x02000200}, + {RES4369_SLEEP_DIG, 0x00220022}, + {RES4369_PWRSW_AUX, 0x03900087}, + {RES4369_SR_AUX, 0x01cc01cc}, + {RES4369_SLEEP_AUX, 0x00220022}, + {RES4369_PWRSW_MAIN, 0x03900087}, + {RES4369_SR_MAIN, 0x02000200}, + {RES4369_SLEEP_MAIN, 0x00220022}, + {RES4369_DIG_CORE_RDY, 0x00220044}, + {RES4369_CORE_RDY_AUX, 0x00220044}, + {RES4369_ALP_AVAIL, 0x00220044}, + {RES4369_RADIO_AUX_PU, 0x006e0022}, + {RES4369_MINIPMU_AUX_PU, 0x00460022}, + {RES4369_CORE_RDY_MAIN, 0x00220022}, + {RES4369_RADIO_MAIN_PU, 0x006e0022}, + {RES4369_MINIPMU_MAIN_PU, 0x00460022}, + {RES4369_PCIE_EP_PU, 0x02100087}, + {RES4369_COLD_START_WAIT, 0x00220022}, + {RES4369_ARMHTAVAIL, 0x00a80022}, + {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL}, + {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022}, + {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4369a0_res_updown_fastlpo_pmu)[] = { + {RES4369_DUMMY, 0x00220022}, + {RES4369_ABUCK, 0x00c80022}, + {RES4369_PMU_SLEEP, 0x00c80022}, + {RES4369_MISCLDO, 0x00bd0022}, + {RES4369_LDO3P3, 0x00bd0022}, + {RES4369_FAST_LPO_AVAIL, 0x01500022}, + {RES4369_XTAL_PU, 0x07d00022}, + {RES4369_XTAL_STABLE, 0x00220022}, + {RES4369_PWRSW_DIG, 0x02100087}, + {RES4369_SR_DIG, 0x02000200}, + {RES4369_SLEEP_DIG, 0x00220022}, + {RES4369_PWRSW_AUX, 0x03900087}, + {RES4369_SR_AUX, 0x01cc01cc}, + {RES4369_SLEEP_AUX, 0x00220022}, + {RES4369_PWRSW_MAIN, 0x03900087}, + {RES4369_SR_MAIN, 0x02000200}, + {RES4369_SLEEP_MAIN, 0x00220022}, + {RES4369_DIG_CORE_RDY, 0x00220044}, + {RES4369_CORE_RDY_AUX, 0x00220044}, + {RES4369_ALP_AVAIL, 0x00220044}, + {RES4369_RADIO_AUX_PU, 0x006e0022}, + {RES4369_MINIPMU_AUX_PU, 0x00460022}, + {RES4369_CORE_RDY_MAIN, 0x00220022}, + {RES4369_RADIO_MAIN_PU, 0x006e0022}, + {RES4369_MINIPMU_MAIN_PU, 0x00460022}, + {RES4369_PCIE_EP_PU, 0x01200087}, + {RES4369_COLD_START_WAIT, 0x00220022}, + {RES4369_ARMHTAVAIL, 0x00a80022}, + {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL}, + {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022}, + {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4369b0_res_updown)[] = { + {RES4369_DUMMY, 0x00220022}, + {RES4369_ABUCK, 0x00c80022}, + {RES4369_PMU_SLEEP, 0x00c80022}, + {RES4369_MISCLDO, 0x00bd0022}, + {RES4369_LDO3P3, 0x01ad0022}, + {RES4369_FAST_LPO_AVAIL, 0x01500022}, + {RES4369_XTAL_PU, 0x05dc0022}, + {RES4369_XTAL_STABLE, 0x00220022}, + {RES4369_PWRSW_DIG, 0x02100087}, + {RES4369_SR_DIG, 0x00A000A0}, + {RES4369_SLEEP_DIG, 0x00220022}, + {RES4369_PWRSW_AUX, 0x03900087}, + {RES4369_SR_AUX, 0x01400140}, + {RES4369_SLEEP_AUX, 0x00220022}, + {RES4369_PWRSW_MAIN, 0x03900087}, + {RES4369_SR_MAIN, 0x01A001A0}, + {RES4369_SLEEP_MAIN, 0x00220022}, + {RES4369_DIG_CORE_RDY, 0x00220044}, + {RES4369_CORE_RDY_AUX, 0x00220044}, + {RES4369_ALP_AVAIL, 0x00220044}, + {RES4369_RADIO_AUX_PU, 0x006e0022}, + {RES4369_MINIPMU_AUX_PU, 0x00460022}, + {RES4369_CORE_RDY_MAIN, 0x00220022}, + {RES4369_RADIO_MAIN_PU, 0x006e0022}, + {RES4369_MINIPMU_MAIN_PU, 0x00460022}, + {RES4369_PCIE_EP_PU, 0x02100087}, + {RES4369_COLD_START_WAIT, 0x00220022}, + {RES4369_ARMHTAVAIL, 0x00a80022}, + {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL}, + {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022}, + {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4369b0_res_updown_fastlpo_pmu)[] = { + {RES4369_DUMMY, 0x00220022}, + {RES4369_ABUCK, 0x00c80022}, + {RES4369_PMU_SLEEP, 0x00c80022}, + {RES4369_MISCLDO, 0x00bd0022}, + {RES4369_LDO3P3, 0x01ad0022}, + {RES4369_FAST_LPO_AVAIL, 0x01500022}, + {RES4369_XTAL_PU, 0x05dc0022}, + {RES4369_XTAL_STABLE, 0x00220022}, + {RES4369_PWRSW_DIG, 0x02100087}, + {RES4369_SR_DIG, 0x02000200}, + {RES4369_SLEEP_DIG, 0x00220022}, + {RES4369_PWRSW_AUX, 0x03900087}, + {RES4369_SR_AUX, 0x01cc01cc}, + {RES4369_SLEEP_AUX, 0x00220022}, + {RES4369_PWRSW_MAIN, 0x03900087}, + {RES4369_SR_MAIN, 0x02000200}, + {RES4369_SLEEP_MAIN, 0x00220022}, + {RES4369_DIG_CORE_RDY, 0x00220044}, + {RES4369_CORE_RDY_AUX, 0x00220044}, + {RES4369_ALP_AVAIL, 0x00220044}, + {RES4369_RADIO_AUX_PU, 0x006e0022}, + {RES4369_MINIPMU_AUX_PU, 0x00460022}, + {RES4369_CORE_RDY_MAIN, 0x00220022}, + {RES4369_RADIO_MAIN_PU, 0x006e0022}, + {RES4369_MINIPMU_MAIN_PU, 0x00460022}, + {RES4369_PCIE_EP_PU, 0x01200087}, + {RES4369_COLD_START_WAIT, 0x00220022}, + {RES4369_ARMHTAVAIL, 0x00a80022}, + {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL}, + {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022}, + {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4362_res_depend)[] = { + {PMURES_BIT(RES4362_DUMMY), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4362_ABUCK), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4362_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4362_MISCLDO_PU), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4362_LDO3P3_PU), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4362_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4362_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4362_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL}, + {PMURES_BIT(RES4362_PWRSW_DIG), RES_DEPEND_SET, 0x060000cf, NULL}, + {PMURES_BIT(RES4362_SR_DIG), RES_DEPEND_SET, 0x060001cf, NULL}, + {PMURES_BIT(RES4362_SLEEP_DIG), RES_DEPEND_SET, 0x060003cf, NULL}, + {PMURES_BIT(RES4362_PWRSW_AUX), RES_DEPEND_SET, 0x040000cf, NULL}, + {PMURES_BIT(RES4362_SR_AUX), RES_DEPEND_SET, 0x040008cf, NULL}, + {PMURES_BIT(RES4362_SLEEP_AUX), RES_DEPEND_SET, 0x040018cf, NULL}, + {PMURES_BIT(RES4362_PWRSW_MAIN), RES_DEPEND_SET, 0x040000cf, NULL}, + {PMURES_BIT(RES4362_SR_MAIN), RES_DEPEND_SET, 0x040040cf, NULL}, + {PMURES_BIT(RES4362_SLEEP_MAIN), RES_DEPEND_SET, 0x0400c0cf, NULL}, + {PMURES_BIT(RES4362_DIG_CORE_RDY), RES_DEPEND_SET, 0x060007cf, NULL}, + {PMURES_BIT(RES4362_CORE_RDY_AUX), RES_DEPEND_SET, 0x040038cf, NULL}, + {PMURES_BIT(RES4362_ALP_AVAIL), RES_DEPEND_SET, 0x060207cf, NULL}, + {PMURES_BIT(RES4362_RADIO_AUX_PU), RES_DEPEND_SET, 0x040438df, NULL}, + {PMURES_BIT(RES4362_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x041438df, NULL}, + {PMURES_BIT(RES4362_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0401c0cf, NULL}, + {PMURES_BIT(RES4362_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0441c0df, NULL}, + {PMURES_BIT(RES4362_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x04c1c0df, NULL}, + {PMURES_BIT(RES4362_PCIE_EP_PU), RES_DEPEND_SET, 0x040000cf, NULL}, + {PMURES_BIT(RES4362_COLD_START_WAIT), RES_DEPEND_SET, 0x0000000f, NULL}, + {PMURES_BIT(RES4362_ARMHTAVAIL), RES_DEPEND_SET, 0x060a07cf, NULL}, + {PMURES_BIT(RES4362_HT_AVAIL), RES_DEPEND_SET, 0x060a07cf, NULL}, + {PMURES_BIT(RES4362_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fdf, NULL}, + {PMURES_BIT(RES4362_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7df, NULL}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4362_res_updown)[] = { + {RES4362_DUMMY, 0x00220022}, + {RES4362_ABUCK, 0x00c80022}, + {RES4362_PMU_SLEEP, 0x00c80022}, + {RES4362_MISCLDO_PU, 0x00bd0022}, + {RES4362_LDO3P3_PU, 0x01ad0022}, + {RES4362_FAST_LPO_AVAIL, 0x01500022}, + {RES4362_XTAL_PU, 0x05dc0022}, + {RES4362_XTAL_STABLE, 0x00220022}, + {RES4362_PWRSW_DIG, 0x009000ca}, + {RES4362_SR_DIG, 0x00A000A0}, + {RES4362_SLEEP_DIG, 0x00220022}, + {RES4362_PWRSW_AUX, 0x039000ca}, + {RES4362_SR_AUX, 0x01400140}, + {RES4362_SLEEP_AUX, 0x00220022}, + {RES4362_PWRSW_MAIN, 0x039000ca}, + {RES4362_SR_MAIN, 0x01a001a0}, + {RES4362_SLEEP_MAIN, 0x00220022}, + {RES4362_DIG_CORE_RDY, 0x00220044}, + {RES4362_CORE_RDY_AUX, 0x00220044}, + {RES4362_ALP_AVAIL, 0x00220044}, + {RES4362_RADIO_AUX_PU, 0x006e0022}, + {RES4362_MINIPMU_AUX_PU, 0x00460022}, + {RES4362_CORE_RDY_MAIN, 0x00220022}, + {RES4362_RADIO_MAIN_PU, 0x006e0022}, + {RES4362_MINIPMU_MAIN_PU, 0x00460022}, + {RES4362_PCIE_EP_PU, 0x009000ca}, + {RES4362_COLD_START_WAIT, 0x00220022}, + {RES4362_ARMHTAVAIL, 0x00a80022}, + {RES4362_HT_AVAIL, 0x00a80022}, + {RES4362_MACPHY_AUX_CLK_AVAIL, 0x00640022}, + {RES4362_MACPHY_MAIN_CLK_AVAIL, 0x00640022}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4378b0_res_updown)[] = { + {RES4378_ABUCK, 0x00c80022}, + {RES4378_PMU_SLEEP, 0x011c0022}, + {RES4378_MISC_LDO, 0x00c80022}, + {RES4378_XTAL_PU, 0x05dc0022}, + {RES4378_SR_DIG, 0x00700070}, + {RES4378_SR_AUX, 0x01800180}, + {RES4378_SR_MAIN, 0x01a001a0}, + {RES4378_RADIO_AUX_PU, 0x006e0022}, + {RES4378_MINIPMU_AUX_PU, 0x00460022}, + {RES4378_RADIO_MAIN_PU, 0x006e0022}, + {RES4378_MINIPMU_MAIN_PU, 0x00460022}, + {RES4378_CORE_RDY_CB, 0x00220022}, +#ifdef BCMPCIE_TREFUP_HW_SUPPORT + {RES4378_PWRSW_CB, 0x015e00ca}, +#endif + {RES4378_MACPHY_AUX_CLK_AVAIL, 0x00640022}, + {RES4378_MACPHY_MAIN_CLK_AVAIL, 0x00640022}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4378b0_res_depend)[] = { + {PMURES_BIT(RES4378_ABUCK), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4378_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4378_MISC_LDO), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4378_LDO3P3_PU), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4378_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4378_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL}, + {PMURES_BIT(RES4378_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL}, + {PMURES_BIT(RES4378_PWRSW_DIG), RES_DEPEND_SET, 0x060000ef, NULL}, + {PMURES_BIT(RES4378_SR_DIG), RES_DEPEND_SET, 0x060001ef, NULL}, + {PMURES_BIT(RES4378_SLEEP_DIG), RES_DEPEND_SET, 0x060003ef, NULL}, + {PMURES_BIT(RES4378_PWRSW_AUX), RES_DEPEND_SET, 0x060000ef, NULL}, + {PMURES_BIT(RES4378_SR_AUX), RES_DEPEND_SET, 0x060008ef, NULL}, + {PMURES_BIT(RES4378_SLEEP_AUX), RES_DEPEND_SET, 0x060018ef, NULL}, + {PMURES_BIT(RES4378_PWRSW_MAIN), RES_DEPEND_SET, 0x060000ef, NULL}, + {PMURES_BIT(RES4378_SR_MAIN), RES_DEPEND_SET, 0x060040ef, NULL}, + {PMURES_BIT(RES4378_SLEEP_MAIN), RES_DEPEND_SET, 0x0600c0ef, NULL}, + {PMURES_BIT(RES4378_CORE_RDY_DIG), RES_DEPEND_SET, 0x060007ef, NULL}, + {PMURES_BIT(RES4378_CORE_RDY_AUX), RES_DEPEND_SET, 0x06023fef, NULL}, + {PMURES_BIT(RES4378_ALP_AVAIL), RES_DEPEND_SET, 0x000000c7, NULL}, + {PMURES_BIT(RES4378_RADIO_AUX_PU), RES_DEPEND_SET, 0x06063fff, NULL}, + {PMURES_BIT(RES4378_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x06163fff, NULL}, + {PMURES_BIT(RES4378_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0603c7ef, NULL}, + {PMURES_BIT(RES4378_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0643c7ff, NULL}, + {PMURES_BIT(RES4378_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x06c3c7ff, NULL}, +#ifdef BCMPCIE_TREFUP_HW_SUPPORT + {PMURES_BIT(RES4378_CORE_RDY_CB), RES_DEPEND_SET, 0x0400002f, NULL}, +#else + {PMURES_BIT(RES4378_CORE_RDY_CB), RES_DEPEND_SET, 0x040000ef, NULL}, +#endif + {PMURES_BIT(RES4378_PWRSW_CB), RES_DEPEND_SET, 0x0000002f, NULL}, + {PMURES_BIT(RES4378_ARMHTAVAIL), RES_DEPEND_SET, 0x000800c7, NULL}, + {PMURES_BIT(RES4378_HT_AVAIL), RES_DEPEND_SET, 0x000800c7, NULL}, + {PMURES_BIT(RES4378_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fff, NULL}, + {PMURES_BIT(RES4378_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7ff, NULL}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4387b0_res_updown_qt)[] = { + {RES4387_XTAL_PU, 0x012c0033}, + {RES4387_PWRSW_DIG, 0x38993899}, + {RES4387_PWRSW_AUX, 0x38993899}, + {RES4387_PWRSW_SCAN, 0x38993899}, + {RES4387_PWRSW_MAIN, 0x38993899}, + {RES4387_CORE_RDY_CB, 0x00960033}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4387b0_res_subst_trans_tmr_qt)[] = { + {RES4387_PWRSW_DIG, 0, 0x38993800}, + {RES4387_PWRSW_DIG, 1, 0x36000600}, + {RES4387_PWRSW_DIG, 2, 0x01000002}, + + {RES4387_PWRSW_AUX, 0, 0x38993800}, + {RES4387_PWRSW_AUX, 1, 0x36000600}, + {RES4387_PWRSW_AUX, 2, 0x01000002}, + + {RES4387_PWRSW_SCAN, 0, 0x38993800}, + {RES4387_PWRSW_SCAN, 1, 0x36000600}, + {RES4387_PWRSW_SCAN, 2, 0x01000002}, + + {RES4387_PWRSW_MAIN, 0, 0x38993800}, + {RES4387_PWRSW_MAIN, 1, 0x36000600}, + {RES4387_PWRSW_MAIN, 2, 0x01000002}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4387b0_res_updown)[] = { + {RES4387_PMU_SLEEP, 0x00960022}, + {RES4387_MISC_LDO, 0x00320022}, + {RES4387_XTAL_HQ, 0x00210021}, + {RES4387_XTAL_PU, 0x03e80033}, + {RES4387_PWRSW_DIG, 0x04b002bc}, + {RES4387_PWRSW_AUX, 0x060e03bc}, + {RES4387_PWRSW_SCAN, 0x060e03bc}, + {RES4387_PWRSW_MAIN, 0x060e03bc}, + {RES4387_CORE_RDY_CB, 0x000a0033}, + {RES4387_PWRSW_CB, 0x006400ca}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4387b0_res_subst_trans_tmr)[] = { + {RES4387_PWRSW_DIG, 0, 0x04b002bc}, + {RES4387_PWRSW_DIG, 1, 0x02500210}, + {RES4387_PWRSW_DIG, 2, 0x00a00010}, + + {RES4387_PWRSW_AUX, 0, 0x060e03ac}, + {RES4387_PWRSW_AUX, 1, 0x028a0134}, + {RES4387_PWRSW_AUX, 2, 0x00320002}, + + {RES4387_PWRSW_MAIN, 0, 0x060e03b2}, + {RES4387_PWRSW_MAIN, 1, 0x028a0134}, + {RES4387_PWRSW_MAIN, 2, 0x00320002}, + + {RES4387_PWRSW_SCAN, 0, 0x060e03b2}, + {RES4387_PWRSW_SCAN, 1, 0x028a0134}, + {RES4387_PWRSW_SCAN, 2, 0x00320002}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4387b0_res_depend)[] = { + {PMURES_BIT(RES4387_DUMMY), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_RESERVED_1), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_PMU_SLEEP), RES_DEPEND_SET, 0x1, NULL}, + {PMURES_BIT(RES4387_MISC_LDO), RES_DEPEND_SET, 0x5, NULL}, + {PMURES_BIT(RES4387_RESERVED_4), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_XTAL_HQ), RES_DEPEND_SET, 0xc5, NULL}, + {PMURES_BIT(RES4387_XTAL_PU), RES_DEPEND_SET, 0x5, NULL}, + {PMURES_BIT(RES4387_XTAL_STABLE), RES_DEPEND_SET, 0x45, NULL}, + {PMURES_BIT(RES4387_PWRSW_DIG), RES_DEPEND_SET, 0x060000CD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_BTMAIN), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_BTSC), RES_DEPEND_SET, 0xC5, NULL}, + {PMURES_BIT(RES4387_PWRSW_AUX), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_PWRSW_SCAN), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060010CD, NULL}, + {PMURES_BIT(RES4387_PWRSW_MAIN), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_RESERVED_15), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_RESERVED_16), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001CD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209CD, NULL}, + {PMURES_BIT(RES4387_ALP_AVAIL), RES_DEPEND_SET, 0xC5, NULL}, + {PMURES_BIT(RES4387_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609CD, NULL}, + {PMURES_BIT(RES4387_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060030CD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241CD, NULL}, + {PMURES_BIT(RES4387_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241CD, NULL}, + {PMURES_BIT(RES4387_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x162830CD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_CB), RES_DEPEND_SET, 0x0400000D, NULL}, + {PMURES_BIT(RES4387_PWRSW_CB), RES_DEPEND_SET, 0x0000000D, NULL}, + {PMURES_BIT(RES4387_ARMCLK_AVAIL), RES_DEPEND_SET, 0x000800CD, NULL}, + {PMURES_BIT(RES4387_HT_AVAIL), RES_DEPEND_SET, 0x000800CD, NULL}, + {PMURES_BIT(RES4387_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161E09ED, NULL}, + {PMURES_BIT(RES4387_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16CA41ED, NULL}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4387c0_res_updown_topoff)[] = { + {RES4387_PMU_SLEEP, 0x02000022}, + {RES4387_MISC_LDO, 0x00320022}, + {RES4387_SERDES_AFE_RET, 0x00010001}, + {RES4387_XTAL_HQ, 0x00210021}, + {RES4387_XTAL_PU, 0x03e80033}, + {RES4387_PWRSW_DIG, 0x00d20102}, + {RES4387_PWRSW_AUX, 0x01c201e2}, + {RES4387_PWRSW_SCAN, 0x01020122}, + {RES4387_PWRSW_MAIN, 0x02220242}, + {RES4387_CORE_RDY_CB, 0x000a0033}, + {RES4387_PWRSW_CB, 0x006400ca}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4387c0_res_updown)[] = { +#ifdef BCM_PMU_FLL_PU_MANAGE + {RES4387_FAST_LPO_AVAIL, 0x00960001}, +#endif + {RES4387_PMU_SLEEP, 0x00960022}, + {RES4387_MISC_LDO, 0x00320022}, + {RES4387_XTAL_HQ, 0x00210021}, + {RES4387_XTAL_PU, 0x03e80033}, + {RES4387_PWRSW_DIG, 0x01320172}, + {RES4387_PWRSW_AUX, 0x01c201e2}, + {RES4387_PWRSW_SCAN, 0x019201b2}, + {RES4387_PWRSW_MAIN, 0x02220242}, + {RES4387_CORE_RDY_CB, 0x000a0033}, + {RES4387_PWRSW_CB, 0x006400ca}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4387c0_res_subst_trans_tmr)[] = { + {RES4387_PWRSW_DIG, 0, 0x01320142}, + {RES4387_PWRSW_DIG, 1, 0x00e2005a}, + {RES4387_PWRSW_DIG, 2, 0x00c20052}, + {RES4387_PWRSW_DIG, 3, 0x00020002}, + + {RES4387_PWRSW_AUX, 0, 0x01c201b2}, + {RES4387_PWRSW_AUX, 1, 0x0172005a}, + {RES4387_PWRSW_AUX, 2, 0x01520052}, + {RES4387_PWRSW_AUX, 3, 0x00020002}, + + {RES4387_PWRSW_MAIN, 0, 0x02220212}, + {RES4387_PWRSW_MAIN, 1, 0x01d2005a}, + {RES4387_PWRSW_MAIN, 2, 0x01b20052}, + {RES4387_PWRSW_MAIN, 3, 0x00020002}, + + {RES4387_PWRSW_SCAN, 0, 0x01920182}, + {RES4387_PWRSW_SCAN, 1, 0x0142005a}, + {RES4387_PWRSW_SCAN, 2, 0x01220052}, + {RES4387_PWRSW_SCAN, 3, 0x00020002}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4387c0_res_depend)[] = { + {PMURES_BIT(RES4387_DUMMY), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_PMU_LP), RES_DEPEND_SET, 0x1, NULL}, + {PMURES_BIT(RES4387_MISC_LDO), RES_DEPEND_SET, 0x5, NULL}, + {PMURES_BIT(RES4387_SERDES_AFE_RET), RES_DEPEND_SET, 0xD, NULL}, + {PMURES_BIT(RES4387_XTAL_HQ), RES_DEPEND_SET, 0xC5, NULL}, + {PMURES_BIT(RES4387_XTAL_PU), RES_DEPEND_SET, 0x5, NULL}, + {PMURES_BIT(RES4387_XTAL_STABLE), RES_DEPEND_SET, 0x45, NULL}, + {PMURES_BIT(RES4387_PWRSW_DIG), RES_DEPEND_SET, 0x060000DD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_BTMAIN), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_BTSC), RES_DEPEND_SET, 0xC5, NULL}, + {PMURES_BIT(RES4387_PWRSW_AUX), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_PWRSW_SCAN), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060010DD, NULL}, + {PMURES_BIT(RES4387_PWRSW_MAIN), RES_DEPEND_SET, 0xCD, NULL}, + {PMURES_BIT(RES4387_XTAL_PM_CLK), RES_DEPEND_SET, 0xC5, NULL}, + {PMURES_BIT(RES4387_RESERVED_16), RES_DEPEND_SET, 0x0, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001DD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209DD, NULL}, + {PMURES_BIT(RES4387_ALP_AVAIL), RES_DEPEND_SET, 0x80C5, NULL}, + {PMURES_BIT(RES4387_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609DD, NULL}, + {PMURES_BIT(RES4387_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060030DD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241DD, NULL}, + {PMURES_BIT(RES4387_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241DD, NULL}, + {PMURES_BIT(RES4387_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x1628B0DD, NULL}, + {PMURES_BIT(RES4387_CORE_RDY_CB), RES_DEPEND_SET, 0x0400001D, NULL}, + {PMURES_BIT(RES4387_PWRSW_CB), RES_DEPEND_SET, 0x0000001D, NULL}, + {PMURES_BIT(RES4387_ARMCLK_AVAIL), RES_DEPEND_SET, 0x000880CD, NULL}, + {PMURES_BIT(RES4387_HT_AVAIL), RES_DEPEND_SET, 0x000880CD, NULL}, + {PMURES_BIT(RES4387_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161E89FD, NULL}, + {PMURES_BIT(RES4387_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16CAC1FD, NULL}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4388a0_res_updown_qt)[] = { + {RES4388_XTAL_PU, 0x012c0033}, + {RES4388_PWRSW_DIG, 0x38993899}, + {RES4388_PWRSW_AUX, 0x38993899}, + {RES4388_PWRSW_SCAN, 0x38993899}, + {RES4388_PWRSW_MAIN, 0x38993899}, + {RES4388_CORE_RDY_CB, 0x00960033}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4388a0_res_subst_trans_tmr_qt)[] = { + {RES4388_PWRSW_DIG, 0, 0x38993800}, + {RES4388_PWRSW_DIG, 1, 0x36c00600}, + {RES4388_PWRSW_DIG, 2, 0x360005a0}, + {RES4388_PWRSW_DIG, 3, 0x01000002}, + + {RES4388_PWRSW_AUX, 0, 0x38993800}, + {RES4388_PWRSW_AUX, 1, 0x36c00600}, + {RES4388_PWRSW_AUX, 2, 0x360005a0}, + {RES4388_PWRSW_AUX, 3, 0x01000002}, + + {RES4388_PWRSW_MAIN, 0, 0x38993800}, + {RES4388_PWRSW_MAIN, 1, 0x36c00600}, + {RES4388_PWRSW_MAIN, 2, 0x360005a0}, + {RES4388_PWRSW_MAIN, 3, 0x01000002}, + + {RES4388_PWRSW_SCAN, 0, 0x38993800}, + {RES4388_PWRSW_SCAN, 1, 0x33c00600}, + {RES4388_PWRSW_SCAN, 2, 0x330005a0}, + {RES4388_PWRSW_SCAN, 3, 0x01000002}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4388a0_res_updown)[] = { +#ifdef BCM_PMU_FLL_PU_MANAGE + {RES4388_FAST_LPO_AVAIL, 0x00960001}, +#endif /* BCM_PMU_FLL_PU_MANAGE */ + {RES4388_PMU_LP, 0x00960022}, + {RES4388_MISC_LDO, 0x00320022}, + {RES4388_XTAL_HQ, 0x00210021}, + {RES4388_XTAL_PU, 0x03e80033}, + {RES4388_PWRSW_DIG, 0x042c0349}, + {RES4388_PWRSW_AUX, 0x0740046a}, + {RES4388_PWRSW_SCAN, 0x03c802e8}, + {RES4388_PWRSW_MAIN, 0x08080532}, + {RES4388_CORE_RDY_CB, 0x000a0033}, + {RES4388_PWRSW_CB, 0x006400ca}, + {RES4388_MACPHY_CLK_MAIN, 0x00860022}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4388a0_res_subst_trans_tmr)[] = { + {RES4388_PWRSW_DIG, 0, 0x0428033c}, + {RES4388_PWRSW_DIG, 1, 0x028c0210}, + {RES4388_PWRSW_DIG, 2, 0x01cc01b0}, + {RES4388_PWRSW_DIG, 3, 0x00a00010}, + + {RES4388_PWRSW_AUX, 0, 0x0740045a}, + {RES4388_PWRSW_AUX, 1, 0x03580202}, + {RES4388_PWRSW_AUX, 2, 0x02f801a2}, + {RES4388_PWRSW_AUX, 3, 0x00a00002}, + + {RES4388_PWRSW_MAIN, 0, 0x08080522}, + {RES4388_PWRSW_MAIN, 1, 0x04200202}, + {RES4388_PWRSW_MAIN, 2, 0x03c001a2}, + {RES4388_PWRSW_MAIN, 3, 0x00a00002}, + + {RES4388_PWRSW_SCAN, 0, 0x03c402d8}, + {RES4388_PWRSW_SCAN, 1, 0x02280210}, + {RES4388_PWRSW_SCAN, 2, 0x016801b0}, + {RES4388_PWRSW_SCAN, 3, 0x00a00010}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4388a0_res_depend)[] = { + {PMURES_BIT(RES4388_DUMMY), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4388_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4388_PMU_LP), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4388_MISC_LDO), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4388_SERDES_AFE_RET), RES_DEPEND_SET, 0x0000000d, NULL}, + {PMURES_BIT(RES4388_XTAL_HQ), RES_DEPEND_SET, 0x000000c5, NULL}, + {PMURES_BIT(RES4388_XTAL_PU), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4388_XTAL_STABLE), RES_DEPEND_SET, 0x00000045, NULL}, + {PMURES_BIT(RES4388_PWRSW_DIG), RES_DEPEND_SET, 0x060000dd, NULL}, + {PMURES_BIT(RES4388_BTMC_TOP_RDY), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4388_BTSC_TOP_RDY), RES_DEPEND_SET, 0x000000c5, NULL}, + {PMURES_BIT(RES4388_PWRSW_AUX), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4388_PWRSW_SCAN), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4388_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060211dd, NULL}, + {PMURES_BIT(RES4388_PWRSW_MAIN), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4388_RESERVED_15), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4388_RESERVED_16), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4388_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001dd, NULL}, + {PMURES_BIT(RES4388_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209dd, NULL}, + {PMURES_BIT(RES4388_ALP_AVAIL), RES_DEPEND_SET, 0x000000c5, NULL}, + {PMURES_BIT(RES4388_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609dd, NULL}, + {PMURES_BIT(RES4388_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060231dd, NULL}, + {PMURES_BIT(RES4388_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241dd, NULL}, + {PMURES_BIT(RES4388_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241dd, NULL}, + {PMURES_BIT(RES4388_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x162a31fd, NULL}, + {PMURES_BIT(RES4388_CORE_RDY_CB), RES_DEPEND_SET, 0x040000dd, NULL}, + {PMURES_BIT(RES4388_PWRSW_CB), RES_DEPEND_SET, 0x000000dd, NULL}, + {PMURES_BIT(RES4388_ARMCLKAVAIL), RES_DEPEND_SET, 0x000800cd, NULL}, + {PMURES_BIT(RES4388_HT_AVAIL), RES_DEPEND_SET, 0x000800cd, NULL}, + {PMURES_BIT(RES4388_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161e09fd, NULL}, + {PMURES_BIT(RES4388_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16ca41fd, NULL}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4389b0_res_updown_qt)[] = { + {RES4389_XTAL_PU, 0x012c0033}, + {RES4389_PWRSW_DIG, 0x38993899}, + {RES4389_PWRSW_AUX, 0x38993899}, + {RES4389_PWRSW_SCAN, 0x38993899}, + {RES4389_PWRSW_MAIN, 0x38993899}, + {RES4389_CORE_RDY_CB, 0x00960033}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4389b0_res_subst_trans_tmr_qt)[] = { + {RES4389_PWRSW_DIG, 0, 0x38993800}, + {RES4389_PWRSW_DIG, 1, 0x36c00600}, + {RES4389_PWRSW_DIG, 2, 0x360005a0}, + {RES4389_PWRSW_DIG, 3, 0x01000002}, + + {RES4389_PWRSW_AUX, 0, 0x38993800}, + {RES4389_PWRSW_AUX, 1, 0x36c00600}, + {RES4389_PWRSW_AUX, 2, 0x360005a0}, + {RES4389_PWRSW_AUX, 3, 0x01000002}, + + {RES4389_PWRSW_MAIN, 0, 0x38993800}, + {RES4389_PWRSW_MAIN, 1, 0x36c00600}, + {RES4389_PWRSW_MAIN, 2, 0x360005a0}, + {RES4389_PWRSW_MAIN, 3, 0x01000002}, + + {RES4389_PWRSW_SCAN, 0, 0x38993800}, + {RES4389_PWRSW_SCAN, 1, 0x33c00600}, + {RES4389_PWRSW_SCAN, 2, 0x330005a0}, + {RES4389_PWRSW_SCAN, 3, 0x01000002}, +}; + +static const pmu_res_updown_t BCMATTACHDATA(bcm4389b0_res_updown)[] = { +#ifdef BCM_PMU_FLL_PU_MANAGE + {RES4389_FAST_LPO_AVAIL, 0x001e0001}, +#endif /* BCM_PMU_FLL_PU_MANAGE */ + {RES4389_PMU_LP, 0x00960022}, + {RES4389_MISC_LDO, 0x00320022}, + {RES4389_XTAL_HQ, 0x00210021}, + {RES4389_XTAL_PU, 0x03e80033}, + {RES4389_PWRSW_DIG, 0x042c0349}, + {RES4389_PWRSW_AUX, 0x0740046a}, + {RES4389_PWRSW_SCAN, 0x03c802e8}, + {RES4389_PWRSW_MAIN, 0x08080532}, + {RES4389_CORE_RDY_CB, 0x000a0033}, + {RES4389_PWRSW_CB, 0x006400ca}, + {RES4389_MACPHY_CLK_MAIN, 0x00860022}, +}; + +static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4389b0_res_subst_trans_tmr)[] = { + {RES4389_PWRSW_DIG, 0, 0x0428033c}, + {RES4389_PWRSW_DIG, 1, 0x028c0210}, + {RES4389_PWRSW_DIG, 2, 0x01cc01b0}, + {RES4389_PWRSW_DIG, 3, 0x00a00010}, + + {RES4389_PWRSW_AUX, 0, 0x0740045a}, + {RES4389_PWRSW_AUX, 1, 0x03580202}, + {RES4389_PWRSW_AUX, 2, 0x02f801a2}, + {RES4389_PWRSW_AUX, 3, 0x00a00002}, + + {RES4389_PWRSW_MAIN, 0, 0x08080522}, + {RES4389_PWRSW_MAIN, 1, 0x04200202}, + {RES4389_PWRSW_MAIN, 2, 0x03c001a2}, + {RES4389_PWRSW_MAIN, 3, 0x00a00002}, + + {RES4389_PWRSW_SCAN, 0, 0x03c402d8}, + {RES4389_PWRSW_SCAN, 1, 0x02280210}, + {RES4389_PWRSW_SCAN, 2, 0x016801b0}, + {RES4389_PWRSW_SCAN, 3, 0x00a00010}, +}; + +static pmu_res_depend_t BCMATTACHDATA(bcm4389b0_res_depend)[] = { + {PMURES_BIT(RES4389_DUMMY), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4389_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4389_PMU_LP), RES_DEPEND_SET, 0x00000001, NULL}, + {PMURES_BIT(RES4389_MISC_LDO), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4389_SERDES_AFE_RET), RES_DEPEND_SET, 0x0000000d, NULL}, + {PMURES_BIT(RES4389_XTAL_HQ), RES_DEPEND_SET, 0x000000c5, NULL}, + {PMURES_BIT(RES4389_XTAL_PU), RES_DEPEND_SET, 0x00000005, NULL}, + {PMURES_BIT(RES4389_XTAL_STABLE), RES_DEPEND_SET, 0x00000045, NULL}, + {PMURES_BIT(RES4389_PWRSW_DIG), RES_DEPEND_SET, 0x060000dd, NULL}, + {PMURES_BIT(RES4389_BTMC_TOP_RDY), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4389_BTSC_TOP_RDY), RES_DEPEND_SET, 0x000000c5, NULL}, + {PMURES_BIT(RES4389_PWRSW_AUX), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4389_PWRSW_SCAN), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4389_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060211dd, NULL}, + {PMURES_BIT(RES4389_PWRSW_MAIN), RES_DEPEND_SET, 0x000000cd, NULL}, + {PMURES_BIT(RES4389_RESERVED_15), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4389_RESERVED_16), RES_DEPEND_SET, 0x00000000, NULL}, + {PMURES_BIT(RES4389_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001dd, NULL}, + {PMURES_BIT(RES4389_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209dd, NULL}, + {PMURES_BIT(RES4389_ALP_AVAIL), RES_DEPEND_SET, 0x000000c5, NULL}, + {PMURES_BIT(RES4389_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609dd, NULL}, + {PMURES_BIT(RES4389_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060231dd, NULL}, + {PMURES_BIT(RES4389_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241dd, NULL}, + {PMURES_BIT(RES4389_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241dd, NULL}, + {PMURES_BIT(RES4389_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x162a31fd, NULL}, + {PMURES_BIT(RES4389_CORE_RDY_CB), RES_DEPEND_SET, 0x040000dd, NULL}, + {PMURES_BIT(RES4389_PWRSW_CB), RES_DEPEND_SET, 0x000000dd, NULL}, + {PMURES_BIT(RES4389_ARMCLKAVAIL), RES_DEPEND_SET, 0x000800cd, NULL}, + {PMURES_BIT(RES4389_HT_AVAIL), RES_DEPEND_SET, 0x000800cd, NULL}, + {PMURES_BIT(RES4389_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161e09fd, NULL}, + {PMURES_BIT(RES4389_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16ca41fd, NULL}, +}; + +/** To enable avb timer clock feature */ +void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag) +{ + uint32 min_mask = 0, max_mask = 0; + pmuregs_t *pmu; + uint origidx; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if ((CHIPID(sih->chip) == BCM4360_CHIP_ID || CHIPID(sih->chip) == BCM43460_CHIP_ID) && + CHIPREV(sih->chiprev) >= 0x3) { + int cst_ht = CST4360_RSRC_INIT_MODE(sih->chipst) & 0x1; + if (cst_ht == 0) { + /* Enable the AVB timers for proxd feature */ + min_mask = R_REG(osh, &pmu->min_res_mask); + max_mask = R_REG(osh, &pmu->max_res_mask); + if (set_flag) { + max_mask |= PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU); + max_mask |= PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL); + min_mask |= PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU); + min_mask |= PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL); + W_REG(osh, &pmu->min_res_mask, min_mask); + W_REG(osh, &pmu->max_res_mask, max_mask); + } else { + AND_REG(osh, &pmu->min_res_mask, + ~PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU)); + AND_REG(osh, &pmu->min_res_mask, + ~PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL)); + AND_REG(osh, &pmu->max_res_mask, + ~PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU)); + AND_REG(osh, &pmu->max_res_mask, + ~PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL)); + } + /* Need to wait 100 millisecond for the uptime */ + OSL_DELAY(100); + } + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +/** + * Determines min/max rsrc masks. Normally hardware contains these masks, and software reads the + * masks from hardware. Note that masks are sometimes dependent on chip straps. + */ +static void +si_pmu_res_masks(si_t *sih, uint32 *pmin, uint32 *pmax) +{ + uint32 min_mask = 0, max_mask = 0; + + /* determine min/max rsrc masks */ + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM4352_CHIP_ID: + if (CHIPREV(sih->chiprev) >= 0x4) { + min_mask = 0x103; + } + /* Continue - Don't break */ + case BCM43460_CHIP_ID: + case BCM43526_CHIP_ID: + if (CHIPREV(sih->chiprev) >= 0x3) { + /* PR 110203 */ + int cst_ht = CST4360_RSRC_INIT_MODE(sih->chipst) & 0x1; + if (cst_ht == 0) + max_mask = 0x1ff; + } + break; + + CASE_BCM43602_CHIP: + /* as a bare minimum, have ALP clock running */ + min_mask = PMURES_BIT(RES43602_LPLDO_PU) | PMURES_BIT(RES43602_REGULATOR) | + PMURES_BIT(RES43602_PMU_SLEEP) | PMURES_BIT(RES43602_XTALLDO_PU) | + PMURES_BIT(RES43602_SERDES_PU) | PMURES_BIT(RES43602_BBPLL_PWRSW_PU) | + PMURES_BIT(RES43602_SR_CLK_START) | PMURES_BIT(RES43602_SR_PHY_PWRSW) | + PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_XTAL_PU) | + PMURES_BIT(RES43602_PERST_OVR) | PMURES_BIT(RES43602_SR_CLK_STABLE) | + PMURES_BIT(RES43602_SR_SAVE_RESTORE) | PMURES_BIT(RES43602_SR_SLEEP) | + PMURES_BIT(RES43602_LQ_START) | PMURES_BIT(RES43602_LQ_AVAIL) | + PMURES_BIT(RES43602_WL_CORE_RDY) | + PMURES_BIT(RES43602_ALP_AVAIL); + + if (sih->chippkg == BCM43602_12x12_PKG_ID) /* LPLDO WAR */ + min_mask &= ~PMURES_BIT(RES43602_LPLDO_PU); + + max_mask = (1<<3) | min_mask | PMURES_BIT(RES43602_RADIO_PU) | + PMURES_BIT(RES43602_RFLDO_PU) | PMURES_BIT(RES43602_HT_START) | + PMURES_BIT(RES43602_HT_AVAIL) | PMURES_BIT(RES43602_MACPHY_CLKAVAIL); + +#if defined(SAVERESTORE) + /* min_mask is updated after SR code is downloaded to txfifo */ + if (SR_ENAB() && sr_isenab(sih)) { + ASSERT(sih->chippkg != BCM43602_12x12_PKG_ID); + min_mask = PMURES_BIT(RES43602_LPLDO_PU); + } +#endif /* SAVERESTORE */ + break; + + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + /* Set the bits for all resources in the max mask except for the SR Engine */ + max_mask = 0x7FFFFFFF; + break; + case BCM4369_CHIP_GRPID: + min_mask = 0x64fffff; +#if defined(SAVERESTORE) + if (SR_ENAB() && sr_isenab(sih)) { + if (si_get_nvram_rfldo3p3_war(sih)) { + min_mask = 0x0000011; + } else { + min_mask = 0x0000001; + } + } +#endif /* SAVERESTORE */ + max_mask = 0x7FFFFFFF; + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + min_mask = 0x064fffff; +#if defined(SAVERESTORE) + if (SR_ENAB()) { + if (!sr_isenab(sih)) { + min_mask = 0x064fffff; + } else { + min_mask = PMURES_BIT(RES4378_DUMMY); + } + } +#endif /* SAVERESTORE */ + max_mask = 0x7FFFFFFF; + break; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + min_mask = 0x64fffff; +#if defined(SAVERESTORE) + if (SR_ENAB()) { + if (sr_isenab(sih)) { + min_mask = PMURES_BIT(RES4387_DUMMY); + } else { + min_mask = pmu_corereg(sih, SI_CC_IDX, min_res_mask, 0, 0); + if (PMU_FLL_PU_ENAB()) { + min_mask |= PMURES_BIT(RES4387_FAST_LPO_AVAIL) | + PMURES_BIT(RES4387_PMU_LP); + } + } + } +#endif /* SAVERESTORE */ + + max_mask = 0x7FFFFFFF; + break; + + case BCM4388_CHIP_GRPID: + min_mask = 0x64fffff; +#if defined(SAVERESTORE) + if (SR_ENAB()) { + if (sr_isenab(sih)) { + min_mask = PMURES_BIT(RES4388_DUMMY); + } else { + min_mask = pmu_corereg(sih, SI_CC_IDX, min_res_mask, 0, 0); + if (PMU_FLL_PU_ENAB()) { + min_mask |= PMURES_BIT(RES4388_FAST_LPO_AVAIL) | + PMURES_BIT(RES4388_PMU_LP); + } + } + } +#endif /* SAVERESTORE */ + max_mask = 0x7FFFFFFF; + break; + + case BCM4389_CHIP_GRPID: + /* + * check later if this can be replaced with chip default value read from + * PMU register - min_res_mask and remove the code in SR_ENAB() portion + */ + min_mask = 0x64fffff; +#if defined(SAVERESTORE) + if (SR_ENAB()) { + if (sr_isenab(sih)) { + min_mask = PMURES_BIT(RES4389_DUMMY); + } else { + min_mask = pmu_corereg(sih, SI_CC_IDX, min_res_mask, 0, 0); + if (PMU_FLL_PU_ENAB()) { + min_mask |= PMURES_BIT(RES4389_FAST_LPO_AVAIL) | + PMURES_BIT(RES4389_PMU_LP); + } + } + } +#endif /* SAVERESTORE */ + max_mask = 0x7FFFFFFF; + break; + + case BCM4397_CHIP_GRPID: + min_mask = 0x64fffff; + max_mask = 0x7FFFFFFF; + break; + + case BCM4362_CHIP_GRPID: + min_mask = 0x64fffff; +#if defined(SAVERESTORE) + if (SR_ENAB() && sr_isenab(sih)) { + min_mask = (PMURES_BIT(RES4362_DUMMY)); + } +#endif /* SAVERESTORE */ + max_mask = 0x7FFFFFFF; + break; + + default: + PMU_ERROR(("MIN and MAX mask is not programmed\n")); + break; + } + + if (!FWSIGN_ENAB()) { + /* nvram override */ + si_nvram_res_masks(sih, &min_mask, &max_mask); + } + + *pmin = min_mask; + *pmax = max_mask; +} /* si_pmu_res_masks */ + +/** + * resource dependencies can change because of the host interface + * selected, to work around an issue, or for more optimal power + * savings after tape out + */ +#ifdef DUAL_PMU_SEQUENCE +static void +si_pmu_resdeptbl_upd(si_t *sih, osl_t *osh, pmuregs_t *pmu, + const pmu_res_depend_t *restable, uint tablesz) +#else +static void +BCMATTACHFN(si_pmu_resdeptbl_upd)(si_t *sih, osl_t *osh, pmuregs_t *pmu, + const pmu_res_depend_t *restable, uint tablesz) +#endif /* DUAL_PMU_SEQUENCE */ +{ + uint i, rsrcs; + + if (tablesz == 0) + return; + + ASSERT(restable != NULL); + + rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; + /* Program resource dependencies table */ + while (tablesz--) { + if (restable[tablesz].filter != NULL && + !(restable[tablesz].filter)(sih)) + continue; + for (i = 0; i < rsrcs; i ++) { + if ((restable[tablesz].res_mask & + PMURES_BIT(i)) == 0) + continue; + W_REG(osh, &pmu->res_table_sel, i); + switch (restable[tablesz].action) { + case RES_DEPEND_SET: + PMU_MSG(("Changing rsrc %d res_dep_mask to 0x%x\n", i, + restable[tablesz].depend_mask)); + W_REG(osh, &pmu->res_dep_mask, + restable[tablesz].depend_mask); + break; + case RES_DEPEND_ADD: + PMU_MSG(("Adding 0x%x to rsrc %d res_dep_mask\n", + restable[tablesz].depend_mask, i)); + OR_REG(osh, &pmu->res_dep_mask, + restable[tablesz].depend_mask); + break; + case RES_DEPEND_REMOVE: + PMU_MSG(("Removing 0x%x from rsrc %d res_dep_mask\n", + restable[tablesz].depend_mask, i)); + AND_REG(osh, &pmu->res_dep_mask, + ~restable[tablesz].depend_mask); + break; + default: + ASSERT(0); + break; + } + } + } +} /* si_pmu_resdeptbl_upd */ + +static void +BCMATTACHFN(si_pmu_dep_table_fll_pu_fixup)(si_t *sih, osl_t *osh, + pmu_res_depend_t *pmu_res_depend_table, uint pmu_res_depend_table_sz) +{ + uint i; + + if (!PMU_FLL_PU_ENAB()) { + return; + } + + switch (CHIPID(sih->chip)) { + case BCM4387_CHIP_GRPID: + for (i = 0; i < pmu_res_depend_table_sz; i ++) { + if (pmu_res_depend_table[i].res_mask == + PMURES_BIT(RES4387_FAST_LPO_AVAIL)) { + pmu_res_depend_table[i].depend_mask = PMURES_BIT(RES4387_DUMMY) | + PMURES_BIT(RES4387_PMU_LP); + } else if ((pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4387_DUMMY)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4387_PMU_LP)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4387_RESERVED_16))) { + pmu_res_depend_table[i].depend_mask |= + PMURES_BIT(RES4387_FAST_LPO_AVAIL); + } + } + break; + case BCM4388_CHIP_GRPID: + for (i = 0; i < pmu_res_depend_table_sz; i ++) { + if (pmu_res_depend_table[i].res_mask == + PMURES_BIT(RES4388_FAST_LPO_AVAIL)) { + pmu_res_depend_table[i].depend_mask = PMURES_BIT(RES4388_DUMMY) | + PMURES_BIT(RES4388_PMU_LP); + } else if ((pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4388_DUMMY)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4388_PMU_LP)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4388_RESERVED_15)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4388_RESERVED_16))) { + pmu_res_depend_table[i].depend_mask |= + PMURES_BIT(RES4388_FAST_LPO_AVAIL); + } + } + break; + case BCM4389_CHIP_GRPID: + for (i = 0; i < pmu_res_depend_table_sz; i ++) { + if (pmu_res_depend_table[i].res_mask == + PMURES_BIT(RES4389_FAST_LPO_AVAIL)) { + pmu_res_depend_table[i].depend_mask = PMURES_BIT(RES4389_DUMMY) | + PMURES_BIT(RES4389_PMU_LP); + } else if ((pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4389_DUMMY)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4389_PMU_LP)) && + (pmu_res_depend_table[i].res_mask != + PMURES_BIT(RES4389_RESERVED_16))) { + pmu_res_depend_table[i].depend_mask |= + PMURES_BIT(RES4389_FAST_LPO_AVAIL); + } + } + break; + default: + PMU_MSG(("si_pmu_dep_table_fll_pu_fixup: unsupported chip!\n")); + ASSERT(0); + break; + } +} + +/** Initialize PMU hardware resources. */ +void +BCMATTACHFN(si_pmu_res_init)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + const pmu_res_updown_t *pmu_res_updown_table = NULL; + uint pmu_res_updown_table_sz = 0; + const pmu_res_subst_trans_tmr_t *pmu_res_subst_trans_tmr_table = NULL; + uint pmu_res_subst_trans_tmr_table_sz = 0; + pmu_res_depend_t *pmu_res_depend_table = NULL; + uint pmu_res_depend_table_sz = 0; +#ifndef BCM_BOOTLOADER + const pmu_res_depend_t *pmu_res_depend_pciewar_table[2] = {NULL, NULL}; + uint pmu_res_depend_pciewar_table_sz[2] = {0, 0}; +#endif /* BCM_BOOTLOADER */ + uint32 min_mask = 0, max_mask = 0; + char name[8]; + const char *val; + uint i, rsrcs; + uint8 fastlpo_dis = fastlpo_dis_get(); + uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get(); + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + /* + * Hardware contains the resource updown and dependency tables. Only if a chip has a + * hardware problem, software tables can be used to override hardware tables. + */ + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM4352_CHIP_ID: + if (CHIPREV(sih->chiprev) < 4) { + pmu_res_updown_table = bcm4360_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4360_res_updown); + } else { + /* FOR 4360B1 */ + pmu_res_updown_table = bcm4360B1_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4360B1_res_updown); + } + break; + CASE_BCM43602_CHIP: + pmu_res_updown_table = bcm43602_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm43602_res_updown); + pmu_res_depend_table = bcm43602_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm43602_res_depend); +#ifndef BCM_BOOTLOADER + pmu_res_depend_pciewar_table[0] = bcm43602_res_pciewar; + pmu_res_depend_pciewar_table_sz[0] = ARRAYSIZE(bcm43602_res_pciewar); + if (sih->chippkg == BCM43602_12x12_PKG_ID) { /* LPLDO WAR */ + pmu_res_depend_pciewar_table[1] = bcm43602_12x12_res_depend; + pmu_res_depend_pciewar_table_sz[1] = ARRAYSIZE(bcm43602_12x12_res_depend); + } +#endif /* !BCM_BOOTLOADER */ + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + pmu_res_updown_table = bcm43012a0_res_updown_ds0; + pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds0); + pmu_res_depend_table = bcm43012a0_res_depend_ds0; + pmu_res_depend_table_sz = ARRAYSIZE(bcm43012a0_res_depend_ds0); + break; + case BCM4369_CHIP_GRPID: + /* fastlpo_dis is override for PMU1M, updown times are updated accordingly + * if PMU 1M is enabled only Resource Up/Down times are changed + * Also the Up/Down times are different for A0 and B0 + */ + if (fastlpo_dis) { + /* Only Resource Up/Down times are different b/w A0 and B0 */ + if (CHIPREV(sih->chiprev) == 0) { + pmu_res_updown_table = bcm4369a0_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4369a0_res_updown); + } else { + pmu_res_updown_table = bcm4369b0_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4369b0_res_updown); + } + } else { + if (fastlpo_pcie_dis) { + PMU_ERROR(("INVALID: PCIE 1MHz disabled but PMU 1MHz enabled\n")); + ASSERT(0); + } + /* Only Resource Up/Down times are different b/w A0 and B0 */ + if (CHIPREV(sih->chiprev) == 0) { + pmu_res_updown_table = bcm4369a0_res_updown_fastlpo_pmu; + pmu_res_updown_table_sz = + ARRAYSIZE(bcm4369a0_res_updown_fastlpo_pmu); + } else { + pmu_res_updown_table = bcm4369b0_res_updown_fastlpo_pmu; + pmu_res_updown_table_sz = + ARRAYSIZE(bcm4369b0_res_updown_fastlpo_pmu); + } + } + + /* fastlpo_pcie_dis is override for PCIE1M, resource dependencies are updated + * if pcie 1M is enabled resource dependency are different + * for A0 and B0 chiprev there is no resource dependency change + */ + if (fastlpo_pcie_dis) { + pmu_res_depend_table = bcm4369a0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4369a0_res_depend); + } else { + pmu_res_depend_table = bcm4369a0_res_depend_fastlpo_pcie; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4369a0_res_depend_fastlpo_pcie); + } + break; + + case BCM4362_CHIP_GRPID: + pmu_res_updown_table = bcm4362_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4362_res_updown); + + GCI_REG_NEW(sih, bt_smem_control1, (0xFF<<16), 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL14, + (PMU_CC14_MAIN_VDDB2VDDRET_UP_DLY_MASK | + PMU_CC14_MAIN_VDDB2VDD_UP_DLY_MASK | + PMU_CC14_AUX_VDDB2VDDRET_UP_DLY_MASK | + PMU_CC14_AUX_VDDB2VDD_UP_DLY_MASK | + PMU_CC14_PCIE_VDDB2VDDRET_UP_DLY_MASK | + PMU_CC14_PCIE_VDDB2VDD_UP_DLY_MASK), 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL15, + (PMU_CC15_PCIE_VDDB_CURRENT_LIMIT_DELAY_MASK | + PMU_CC15_PCIE_VDDB_FORCE_RPS_PWROK_DELAY_MASK), 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL10, + (PMU_CC10_PCIE_RESET0_CNT_SLOW_MASK | + PMU_CC10_PCIE_RESET1_CNT_SLOW_MASK), 0); + + GCI_REG_NEW(sih, bt_smem_control0, (0xF<<16), 0); + GCI_REG_NEW(sih, bt_smem_control0, (0xF<<24), 0); + + pmu_res_depend_table = bcm4362_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4362_res_depend); + break; + + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + if (SR_ENAB()) { + pmu_res_updown_table = bcm4378b0_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4378b0_res_updown); + pmu_res_depend_table = bcm4378b0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4378b0_res_depend); + } + break; + + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + if (SR_ENAB()) { + if (ISSIM_ENAB(sih)) { + if (PMUREV(sih->pmurev) == 39) { + pmu_res_updown_table = bcm4387c0_res_updown; + pmu_res_updown_table_sz = + ARRAYSIZE(bcm4387c0_res_updown); + + pmu_res_subst_trans_tmr_table = + bcm4387c0_res_subst_trans_tmr; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4387c0_res_subst_trans_tmr); + + pmu_res_depend_table = bcm4387c0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4387c0_res_depend); + } else { + pmu_res_updown_table = bcm4387b0_res_updown_qt; + pmu_res_updown_table_sz = + ARRAYSIZE(bcm4387b0_res_updown_qt); + + pmu_res_subst_trans_tmr_table = + bcm4387b0_res_subst_trans_tmr_qt; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4387b0_res_subst_trans_tmr_qt); + + pmu_res_depend_table = bcm4387b0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4387b0_res_depend); + } + } else { + if (PMUREV(sih->pmurev) == 39) { + if (BCMSRTOPOFF_ENAB()) { + pmu_res_updown_table = bcm4387c0_res_updown_topoff; + pmu_res_updown_table_sz = + ARRAYSIZE(bcm4387c0_res_updown_topoff); + } else { + pmu_res_updown_table = bcm4387c0_res_updown; + pmu_res_updown_table_sz = + ARRAYSIZE(bcm4387c0_res_updown); + } + + pmu_res_subst_trans_tmr_table = + bcm4387c0_res_subst_trans_tmr; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4387c0_res_subst_trans_tmr); + + pmu_res_depend_table = bcm4387c0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4387c0_res_depend); + + if (PMU_FLL_PU_ENAB()) { + si_pmu_dep_table_fll_pu_fixup(sih, osh, + pmu_res_depend_table, + pmu_res_depend_table_sz); + } + } else { + pmu_res_updown_table = bcm4387b0_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4387b0_res_updown); + + pmu_res_subst_trans_tmr_table = + bcm4387b0_res_subst_trans_tmr; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4387b0_res_subst_trans_tmr); + + pmu_res_depend_table = bcm4387b0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4387b0_res_depend); + } + } + } + break; + + case BCM4388_CHIP_GRPID: + if (SR_ENAB()) { + if (ISSIM_ENAB(sih)) { + pmu_res_updown_table = bcm4388a0_res_updown_qt; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4388a0_res_updown_qt); + + pmu_res_subst_trans_tmr_table = bcm4388a0_res_subst_trans_tmr_qt; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4388a0_res_subst_trans_tmr_qt); + } else { + pmu_res_updown_table = bcm4388a0_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4388a0_res_updown); + + pmu_res_subst_trans_tmr_table = bcm4388a0_res_subst_trans_tmr; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4388a0_res_subst_trans_tmr); + } + + pmu_res_depend_table = bcm4388a0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4388a0_res_depend); + + if (PMU_FLL_PU_ENAB()) { + si_pmu_dep_table_fll_pu_fixup(sih, osh, + pmu_res_depend_table, + pmu_res_depend_table_sz); + } + } + break; + + case BCM4389_CHIP_GRPID: + if (SR_ENAB()) { + if (ISSIM_ENAB(sih)) { + pmu_res_updown_table = bcm4389b0_res_updown_qt; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4389b0_res_updown_qt); + + pmu_res_subst_trans_tmr_table = bcm4389b0_res_subst_trans_tmr_qt; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4389b0_res_subst_trans_tmr_qt); + } else { + pmu_res_updown_table = bcm4389b0_res_updown; + pmu_res_updown_table_sz = ARRAYSIZE(bcm4389b0_res_updown); + + pmu_res_subst_trans_tmr_table = bcm4389b0_res_subst_trans_tmr; + pmu_res_subst_trans_tmr_table_sz = + ARRAYSIZE(bcm4389b0_res_subst_trans_tmr); + } + + pmu_res_depend_table = bcm4389b0_res_depend; + pmu_res_depend_table_sz = ARRAYSIZE(bcm4389b0_res_depend); + if (PMU_FLL_PU_ENAB()) { + si_pmu_dep_table_fll_pu_fixup(sih, osh, + pmu_res_depend_table, pmu_res_depend_table_sz); + } + } + break; + + default: + break; + } + + /* Program up/down timers */ + while (pmu_res_updown_table_sz--) { + ASSERT(pmu_res_updown_table != NULL); + PMU_MSG(("Changing rsrc %d res_updn_timer to 0x%x\n", + pmu_res_updown_table[pmu_res_updown_table_sz].resnum, + pmu_res_updown_table[pmu_res_updown_table_sz].updown)); + W_REG(osh, &pmu->res_table_sel, + pmu_res_updown_table[pmu_res_updown_table_sz].resnum); + W_REG(osh, &pmu->res_updn_timer, + pmu_res_updown_table[pmu_res_updown_table_sz].updown); + } + + if (!FWSIGN_ENAB()) { + /* # resources */ + rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; + + /* Apply nvram overrides to up/down timers */ + for (i = 0; i < rsrcs; i ++) { + uint32 r_val; + snprintf(name, sizeof(name), rstr_rDt, i); + if ((val = getvar(NULL, name)) == NULL) + continue; + r_val = (uint32)bcm_strtoul(val, NULL, 0); + /* PMUrev = 13, pmu resource updown times are 12 bits(0:11 DT, 16:27 UT) */ + /* OLD values are 8 bits for UT/DT, handle the old nvram format */ + if (PMUREV(sih->pmurev) >= 13) { + if (r_val < (1 << 16)) { + uint16 up_time = (r_val >> 8) & 0xFF; + r_val &= 0xFF; + r_val |= (up_time << 16); + } + } + PMU_MSG(("Applying %s=%s to rsrc %d res_updn_timer\n", name, val, i)); + W_REG(osh, &pmu->res_table_sel, (uint32)i); + W_REG(osh, &pmu->res_updn_timer, r_val); + } + } + + /* Program Rsrc Substate Transition Timer */ + while (pmu_res_subst_trans_tmr_table_sz --) { + ASSERT(pmu_res_subst_trans_tmr_table != NULL); + PMU_MSG(("Changing rsrc %d substate %d res_subst_trans_timer to 0x%x\n", + pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].resnum, + pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].substate, + pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].tmr)); + W_REG(osh, &pmu->res_table_sel, + pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].resnum | + (pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].substate + << PMU_RES_SUBSTATE_SHIFT)); + W_REG(osh, &pmu->rsrc_substate_trans_tmr, + pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].tmr); + } + + /* Program resource dependencies table */ + si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table, pmu_res_depend_table_sz); + + if (!FWSIGN_ENAB()) { + /* Apply nvram overrides to dependencies masks */ + for (i = 0; i < rsrcs; i ++) { + snprintf(name, sizeof(name), rstr_rDd, i); + if ((val = getvar(NULL, name)) == NULL) + continue; + PMU_MSG(("Applying %s=%s to rsrc %d res_dep_mask\n", name, val, i)); + W_REG(osh, &pmu->res_table_sel, (uint32)i); + W_REG(osh, &pmu->res_dep_mask, (uint32)bcm_strtoul(val, NULL, 0)); + } + } + +#if !defined(BCM_BOOTLOADER) + /* Initial any chip interface dependent PMU rsrc by looking at the + * chipstatus register to figure the selected interface + */ + /* this should be a general change to cover all the chips. + * this also should validate the build where the dongle is + * built for SDIO but downloaded on PCIE dev + */ + if (BUSTYPE(sih->bustype) == PCI_BUS || BUSTYPE(sih->bustype) == SI_BUS) { + bool is_pciedev = BCM43602_CHIP(sih->chip); + + for (i = 0; i < ARRAYSIZE(pmu_res_depend_pciewar_table); i++) { + if (is_pciedev && pmu_res_depend_pciewar_table[i] && + pmu_res_depend_pciewar_table_sz[i]) { + si_pmu_resdeptbl_upd(sih, osh, pmu, + pmu_res_depend_pciewar_table[i], + pmu_res_depend_pciewar_table_sz[i]); + } + } + } +#endif /* !BCM_BOOTLOADER */ + /* Determine min/max rsrc masks */ + si_pmu_res_masks(sih, &min_mask, &max_mask); + /* Add min mask dependencies */ + min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, FALSE); + +#ifdef BCM_BOOTLOADER + /* Apply nvram override to max mask */ + if ((val = getvar(NULL, "brmax")) != NULL) { + PMU_MSG(("Applying brmax=%s to max_res_mask\n", val)); + max_mask = (uint32)bcm_strtoul(val, NULL, 0); + } + + /* Apply nvram override to min mask */ + if ((val = getvar(NULL, "brmin")) != NULL) { + PMU_MSG(("Applying brmin=%s to min_res_mask\n", val)); + min_mask = (uint32)bcm_strtoul(val, NULL, 0); + } +#endif /* BCM_BOOTLOADER */ + + /* apply new PLL setting if is ALP strap (need to close out + * if possible apply if is HT strap) + */ + if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || (CHIPID(sih->chip) == BCM4352_CHIP_ID)) && + (CHIPREV(sih->chiprev) < 4) && + ((CST4360_RSRC_INIT_MODE(sih->chipst) & 1) == 0)) { + /* BBPLL */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, ~0, 0x09048562); + /* AVB PLL */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG14, ~0, 0x09048562); + si_pmu_pllupd(sih); + } else if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID)) && + (CHIPREV(sih->chiprev) >= 4) && + ((CST4360_RSRC_INIT_MODE(sih->chipst) & 1) == 0)) { + /* Changes for 4360B1 */ + + /* Enable REFCLK bit 11 */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL1, 0x800, 0x800); + + /* BBPLL */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, ~0, 0x080004e2); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG7, ~0, 0xE); + /* AVB PLL */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG14, ~0, 0x080004e2); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG15, ~0, 0xE); + si_pmu_pllupd(sih); + } + /* disable PLL open loop operation */ + si_pll_closeloop(sih); + + if (max_mask) { + /* Ensure there is no bit set in min_mask which is not set in max_mask */ + max_mask |= min_mask; + + /* First set the bits which change from 0 to 1 in max, then update the + * min_mask register and then reset the bits which change from 1 to 0 + * in max. This is required as the bit in MAX should never go to 0 when + * the corresponding bit in min is still 1. Similarly the bit in min cannot + * be 1 when the corresponding bit in max is still 0. + */ + OR_REG(osh, &pmu->max_res_mask, max_mask); + } else { + /* First set the bits which change from 0 to 1 in max, then update the + * min_mask register and then reset the bits which change from 1 to 0 + * in max. This is required as the bit in MAX should never go to 0 when + * the corresponding bit in min is still 1. Similarly the bit in min cannot + * be 1 when the corresponding bit in max is still 0. + */ + if (min_mask) + OR_REG(osh, &pmu->max_res_mask, min_mask); + } + + /* Program min resource mask */ + if (min_mask) { + PMU_MSG(("Changing min_res_mask to 0x%x\n", min_mask)); + W_REG(osh, &pmu->min_res_mask, min_mask); + } + + /* Program max resource mask */ + if (max_mask) { + PMU_MSG(("Changing max_res_mask to 0x%x\n", max_mask)); + W_REG(osh, &pmu->max_res_mask, max_mask); + } +#if defined(SAVERESTORE) && defined(LDO3P3_MIN_RES_MASK) + if (SR_ENAB()) { + /* Set the default state for LDO3P3 protection */ + if (getintvar(NULL, rstr_ldo_prot) == 1) { + si_pmu_min_res_ldo3p3_set(sih, osh, TRUE); + } + } +#endif /* SAVERESTORE && LDO3P3_MIN_RES_MASK */ + + /* request htavail thru pcie core */ + if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || (CHIPID(sih->chip) == BCM4352_CHIP_ID)) && + (BUSTYPE(sih->bustype) == PCI_BUS) && + (CHIPREV(sih->chiprev) < 4)) { + uint32 pcie_clk_ctl_st; + + pcie_clk_ctl_st = si_corereg(sih, 3, 0x1e0, 0, 0); + si_corereg(sih, 3, 0x1e0, ~0, (pcie_clk_ctl_st | CCS_HTAREQ)); + } + + si_pmu_wait_for_steady_state(sih, osh, pmu); + /* Add some delay; allow resources to come up and settle. */ + OSL_DELAY(2000); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} /* si_pmu_res_init */ + +/* setup pll and query clock speed */ +typedef struct { + uint16 fref; /* x-tal frequency in [hz] */ + uint8 xf; /* x-tal index as contained in PMU control reg, see PMU programmers guide */ + uint8 p1div; + uint8 p2div; + uint8 ndiv_int; + uint32 ndiv_frac; +} pmu1_xtaltab0_t; + +/* 'xf' values corresponding to the 'xf' definition in the PMU control register */ +/* unclear why this enum contains '_640_' since the PMU prog guide says nothing about that */ +enum xtaltab0_640 { + XTALTAB0_640_12000K = 1, + XTALTAB0_640_13000K, + XTALTAB0_640_14400K, + XTALTAB0_640_15360K, + XTALTAB0_640_16200K, + XTALTAB0_640_16800K, + XTALTAB0_640_19200K, + XTALTAB0_640_19800K, + XTALTAB0_640_20000K, + XTALTAB0_640_24000K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_640_25000K, + XTALTAB0_640_26000K, + XTALTAB0_640_30000K, + XTALTAB0_640_33600K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_640_37400K, + XTALTAB0_640_38400K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_640_40000K, + XTALTAB0_640_48000K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_640_52000K +}; + +/* the following table is based on 880Mhz fvco */ +static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_880)[] = { + {12000, 1, 3, 22, 0x9, 0xFFFFEF}, + {13000, 2, 1, 6, 0xb, 0x483483}, + {14400, 3, 1, 10, 0xa, 0x1C71C7}, + {15360, 4, 1, 5, 0xb, 0x755555}, + {16200, 5, 1, 10, 0x5, 0x6E9E06}, + {16800, 6, 1, 10, 0x5, 0x3Cf3Cf}, + {19200, 7, 1, 4, 0xb, 0x755555}, + {19800, 8, 1, 11, 0x4, 0xA57EB}, + {20000, 9, 1, 11, 0x4, 0x0}, + {24000, 10, 3, 11, 0xa, 0x0}, + {25000, 11, 5, 16, 0xb, 0x0}, + {26000, 12, 1, 2, 0x10, 0xEC4EC4}, + {30000, 13, 3, 8, 0xb, 0x0}, + {33600, 14, 1, 2, 0xd, 0x186186}, + {38400, 15, 1, 2, 0xb, 0x755555}, + {40000, 16, 1, 2, 0xb, 0}, + {0, 0, 0, 0, 0, 0} +}; + +/* indices into pmu1_xtaltab0_880[] */ +#define PMU1_XTALTAB0_880_12000K 0 +#define PMU1_XTALTAB0_880_13000K 1 +#define PMU1_XTALTAB0_880_14400K 2 +#define PMU1_XTALTAB0_880_15360K 3 +#define PMU1_XTALTAB0_880_16200K 4 +#define PMU1_XTALTAB0_880_16800K 5 +#define PMU1_XTALTAB0_880_19200K 6 +#define PMU1_XTALTAB0_880_19800K 7 +#define PMU1_XTALTAB0_880_20000K 8 +#define PMU1_XTALTAB0_880_24000K 9 +#define PMU1_XTALTAB0_880_25000K 10 +#define PMU1_XTALTAB0_880_26000K 11 +#define PMU1_XTALTAB0_880_30000K 12 +#define PMU1_XTALTAB0_880_37400K 13 +#define PMU1_XTALTAB0_880_38400K 14 +#define PMU1_XTALTAB0_880_40000K 15 + +/* the following table is based on 1760Mhz fvco */ +static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_1760)[] = { + {12000, 1, 3, 44, 0x9, 0xFFFFEF}, + {13000, 2, 1, 12, 0xb, 0x483483}, + {14400, 3, 1, 20, 0xa, 0x1C71C7}, + {15360, 4, 1, 10, 0xb, 0x755555}, + {16200, 5, 1, 20, 0x5, 0x6E9E06}, + {16800, 6, 1, 20, 0x5, 0x3Cf3Cf}, + {19200, 7, 1, 18, 0x5, 0x17B425}, + {19800, 8, 1, 22, 0x4, 0xA57EB}, + {20000, 9, 1, 22, 0x4, 0x0}, + {24000, 10, 3, 22, 0xa, 0x0}, + {25000, 11, 5, 32, 0xb, 0x0}, + {26000, 12, 1, 4, 0x10, 0xEC4EC4}, + {30000, 13, 3, 16, 0xb, 0x0}, + {38400, 14, 1, 10, 0x4, 0x955555}, + {40000, 15, 1, 4, 0xb, 0}, + {0, 0, 0, 0, 0, 0} +}; + +#define XTAL_FREQ_24000MHZ 24000 +#define XTAL_FREQ_29985MHZ 29985 +#define XTAL_FREQ_30000MHZ 30000 +#define XTAL_FREQ_37400MHZ 37400 +#define XTAL_FREQ_48000MHZ 48000 +#define XTAL_FREQ_59970MHZ 59970 + +/* 'xf' values corresponding to the 'xf' definition in the PMU control register */ +/* unclear why this enum contains '_960_' since the PMU prog guide says nothing about that */ +enum xtaltab0_960 { + XTALTAB0_960_12000K = 1, + XTALTAB0_960_13000K, + XTALTAB0_960_14400K, + XTALTAB0_960_15360K, + XTALTAB0_960_16200K, + XTALTAB0_960_16800K, + XTALTAB0_960_19200K, + XTALTAB0_960_19800K, + XTALTAB0_960_20000K, + XTALTAB0_960_24000K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_960_25000K, + XTALTAB0_960_26000K, + XTALTAB0_960_30000K, + XTALTAB0_960_33600K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_960_37400K, + XTALTAB0_960_38400K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_960_40000K, + XTALTAB0_960_48000K, /* warning: unknown in PMU programmers guide. seems incorrect. */ + XTALTAB0_960_52000K, + XTALTAB0_960_59970K +}; + +/** + * given an x-tal frequency, this table specifies the PLL params to use to generate a 960Mhz output + * clock. This output clock feeds the clock divider network. The defines of the form + * PMU1_XTALTAB0_960_* index into this array. + */ +static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_960)[] = { +/* fref xf p1div p2div ndiv_int ndiv_frac */ + {12000, 1, 1, 1, 0x50, 0x0 }, /* array index 0 */ + {13000, 2, 1, 1, 0x49, 0xD89D89}, + {14400, 3, 1, 1, 0x42, 0xAAAAAA}, + {15360, 4, 1, 1, 0x3E, 0x800000}, + {16200, 5, 1, 1, 0x3B, 0x425ED0}, + {16800, 6, 1, 1, 0x39, 0x249249}, + {19200, 7, 1, 1, 0x32, 0x0 }, + {19800, 8, 1, 1, 0x30, 0x7C1F07}, + {20000, 9, 1, 1, 0x30, 0x0 }, + {24000, 10, 1, 1, 0x28, 0x0 }, + {25000, 11, 1, 1, 0x26, 0x666666}, /* array index 10 */ + {26000, 12, 1, 1, 0x24, 0xEC4EC4}, + {30000, 13, 1, 1, 0x20, 0x0 }, + {33600, 14, 1, 1, 0x1C, 0x924924}, + {37400, 15, 2, 1, 0x33, 0x563EF9}, + {38400, 16, 2, 1, 0x32, 0x0 }, + {40000, 17, 2, 1, 0x30, 0x0 }, + {48000, 18, 2, 1, 0x28, 0x0 }, + {52000, 19, 2, 1, 0x24, 0xEC4EC4}, /* array index 18 */ + {59970, 20, 0, 0, 0, 0 }, + /* TBD: will separate 59970 for 4387B0 for new pll scheme */ + {0, 0, 0, 0, 0, 0 } +}; + +static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_4369_963)[] = { +/* fref xf p1div NA ndiv_int ndiv_frac */ + {12000, 1, 1, 1, 0x50, 0x40000}, /* array index 0 */ + {13000, 2, 1, 1, 0x4A, 0x13B14}, + {14400, 3, 1, 1, 0x42, 0xE0000}, + {15360, 4, 1, 1, 0x3E, 0xB2000}, + {16200, 5, 1, 1, 0x3B, 0x71C72}, + {16800, 6, 1, 1, 0x39, 0x52492}, + {19200, 7, 1, 1, 0x32, 0x28000}, + {19800, 8, 1, 1, 0x30, 0xA2E8C}, + {20000, 9, 1, 1, 0x30, 0x26666}, + {24000, 10, 1, 1, 0x28, 0x20000}, + {25000, 11, 1, 1, 0x26, 0x851EC}, /* array index 10 */ + {26000, 12, 1, 1, 0x25, 0x09D8A}, + {30000, 13, 1, 1, 0x20, 0x1999A}, + {33600, 14, 1, 1, 0x1C, 0xA9249}, + {37400, 15, 1, 1, 0x19, 0xBFA86}, + {38400, 16, 1, 1, 0x19, 0x14000}, + {40000, 17, 1, 1, 0x18, 0x13333}, + {48000, 18, 1, 1, 0x14, 0x10000}, + {52000, 19, 1, 1, 0x12, 0x84EC5}, /* array index 18 */ + {0, 0, 0, 0, 0, 0 } +}; + +static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_4362_963)[] = { +/* fref xf p1div NA ndiv_int ndiv_frac */ + {12000, 1, 1, 1, 0x50, 0x40000}, /* array index 0 */ + {13000, 2, 1, 1, 0x4A, 0x13B14}, + {14400, 3, 1, 1, 0x42, 0xE0000}, + {15360, 4, 1, 1, 0x3E, 0xB2000}, + {16200, 5, 1, 1, 0x3B, 0x71C72}, + {16800, 6, 1, 1, 0x39, 0x52492}, + {19200, 7, 1, 1, 0x32, 0x28000}, + {19800, 8, 1, 1, 0x30, 0xA2E8C}, + {20000, 9, 1, 1, 0x30, 0x26666}, + {24000, 10, 1, 1, 0x28, 0x20000}, + {25000, 11, 1, 1, 0x26, 0x851EC}, /* array index 10 */ + {26000, 12, 1, 1, 0x25, 0x09D8A}, + {30000, 13, 1, 1, 0x20, 0x1999A}, + {33600, 14, 1, 1, 0x1C, 0xA9249}, + {37400, 15, 1, 1, 0x19, 0xBFA86}, + {38400, 16, 1, 1, 0x19, 0x14000}, + {40000, 17, 1, 1, 0x18, 0x13333}, + {48000, 18, 1, 1, 0x14, 0x10000}, + {52000, 19, 1, 1, 0x12, 0x84EC5}, /* array index 18 */ + {0, 0, 0, 0, 0, 0 } +}; + +/* Indices into array pmu1_xtaltab0_960[]. Keep array and these defines synchronized. */ +#define PMU1_XTALTAB0_960_12000K 0 +#define PMU1_XTALTAB0_960_13000K 1 +#define PMU1_XTALTAB0_960_14400K 2 +#define PMU1_XTALTAB0_960_15360K 3 +#define PMU1_XTALTAB0_960_16200K 4 +#define PMU1_XTALTAB0_960_16800K 5 +#define PMU1_XTALTAB0_960_19200K 6 +#define PMU1_XTALTAB0_960_19800K 7 +#define PMU1_XTALTAB0_960_20000K 8 +#define PMU1_XTALTAB0_960_24000K 9 +#define PMU1_XTALTAB0_960_25000K 10 +#define PMU1_XTALTAB0_960_26000K 11 +#define PMU1_XTALTAB0_960_30000K 12 +#define PMU1_XTALTAB0_960_33600K 13 +#define PMU1_XTALTAB0_960_37400K 14 +#define PMU1_XTALTAB0_960_38400K 15 +#define PMU1_XTALTAB0_960_40000K 16 +#define PMU1_XTALTAB0_960_48000K 17 +#define PMU1_XTALTAB0_960_52000K 18 +#define PMU1_XTALTAB0_960_59970K 19 + +#define PMU15_XTALTAB0_12000K 0 +#define PMU15_XTALTAB0_20000K 1 +#define PMU15_XTALTAB0_26000K 2 +#define PMU15_XTALTAB0_37400K 3 +#define PMU15_XTALTAB0_52000K 4 +#define PMU15_XTALTAB0_END 5 + +/* For having the pllcontrol data (info) + * The table with the values of the registers will have one - one mapping. + */ +typedef struct { + uint16 clock; /**< x-tal frequency in [KHz] */ + uint8 mode; /**< spur mode */ + uint8 xf; /**< corresponds with xf bitfield in PMU control register */ +} pllctrl_data_t; + +/* ***************************** tables for 43012a0 *********************** */ + +/** + * PLL control register table giving info about the xtal supported for 43012 + * There should be a one to one mapping between pmu1_pllctrl_tab_43012_960mhz[] and this table. + */ +static const pllctrl_data_t(pmu1_xtaltab0_43012)[] = { +/* clock mode xf */ + {37400, 0, XTALTAB0_960_37400K}, + {37400, 100, XTALTAB0_960_37400K}, + {26000, 0, XTALTAB0_960_26000K}, + {24000, 0, XTALTAB0_960_24000K} +}; + +/* +There should be a one to one mapping between pmu1_pllctrl_tab_43012_640mhz[] +* and this table. PLL control5 register is related to HSIC which is not supported in 43012 +* Use a safe DCO code=56 by default, Across PVT openloop VCO Max=320MHz, Min=100 +* Mhz +*/ +#ifdef BCMQT +static const uint32 (pmu1_pllctrl_tab_43012_1600mhz)[] = { +/* Fvco is taken as 160.1 */ +/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 */ + 0x072fe811, 0x00800000, 0x00000000, 0x038051e8, 0x00000000, 0x00000000, + 0x0e5fd422, 0x00800000, 0x00000000, 0x000011e8, 0x00000000, 0x00000000 +}; +#else +static const uint32 (pmu1_pllctrl_tab_43012_1600mhz)[] = { +/* Fvco is taken as 160.1 */ +/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 */ + 0x07df2411, 0x00800000, 0x00000000, 0x038051e8, 0x00000000, + 0x0e5fd422, 0x00800000, 0x00000000, 0x000011e8, 0x00000000, + 0x1d89dc12, 0x00800000, 0x00000000, 0x06d04de8, 0x00000000, + 0x072fe828, 0x00800000, 0x00000000, 0x06d04de8, 0x00000000 +}; +#endif /* BCMQT */ +/* ************************ tables for 43012a0 END *********************** */ + +/* ***************************** tables for 4369a0 *********************** */ +/* should get real value from hardware guys */ +/** + * PLL control register table giving info about the xtal supported for 4369 + * There should be a one to one mapping between pmu1_pllctrl_tab_4369_960mhz[] and this table. + * Even though macro suggests XTALTAB0_960_37400K --> BBPLL VCO is set to 963MHz + */ +static const pllctrl_data_t BCMATTACHDATA(pmu1_xtaltab0_4369)[] = { +/* clock mode xf */ + {37400, 0, XTALTAB0_960_37400K} +}; + +/** + * PLL control register table giving info about the xtal supported for 4369. + * There should be a one to one mapping between pmu1_pllctrl_tab_4369_963mhz[] and this table. + */ + +/* For 4369, 960.1MHz BBPLL freq is chosen to avoid the spurs +* freq table : pll1 : fvco 960.1M, pll2 for arm : 400 MHz +*/ +#define PMU_PLL3_4369B0_DEFAULT 0x006ABF86 +static const uint32 BCMATTACHDATA(pmu1_pllctrl_tab_4369_960p1mhz)[] = { +/* Default values for unused registers 4-7 as sw loop execution will go for 8 times */ +/* Fvco is taken as 963M */ +/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 PLL 6 PLL 7 PLL 8 PLL 9 PLL 10 */ + 0x15000000, 0x06050603, 0x01910806, PMU_PLL3_4369B0_DEFAULT, + 0x00000000, 0x32800000, 0xC7AE00A9, 0x40800000, + 0x00000000, 0x00000000, 0x00000000 +}; + +/* ************************ tables for 4369a0 END *********************** */ + +/* ***************************** tables for 4362a0 *********************** */ +/* should get real value from hardware guys */ +/** + * PLL control register table giving info about the xtal supported for 4362 + * There should be a one to one mapping between pmu1_pllctrl_tab_4362_960mhz[] and this table. + * Even though macro suggests XTALTAB0_960_37400K --> BBPLL VCO is set to 963MHz + */ +static const pllctrl_data_t BCMATTACHDATA(pmu1_xtaltab0_4362)[] = { +/* clock mode xf */ + {37400, 0, XTALTAB0_960_37400K} +}; + +/* For 4362, 960.1MHz BBPLL freq is chosen to avoid the spurs +* freq table : pll1 : fvco 960.1M, pll2 for arm : 400 MHz +*/ +/* This freq actually around 960.123 */ +#define PMU_PLL3_4362A0_DEFAULT 0x006ABF86 + +static const uint32 BCMATTACHDATA(pmu1_pllctrl_tab_4362_960p1mhz)[] = { +/* Default values for unused registers 4-7 as sw loop execution will go for 8 times */ +/* Fvco is taken as 963M */ +/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 PLL 6 PLL 7 PLL 8 PLL 9 PLL 10 */ + 0x15000000, 0x06050603, 0x01910806, PMU_PLL3_4362A0_DEFAULT, + 0x00000000, 0x32800000, 0xC7AE00A9, 0x40800000, + 0x00000000, 0x00000000, 0x00000000 +}; + +/* ************************ tables for 4362a0 END *********************** */ + +/* ***************************** tables for 4389 *********************** */ +static const pllctrl_data_t BCMATTACHDATA(pmu1_xtaltab0_4389)[] = { +/* clock mode xf */ + {XTAL_FREQ_59970MHZ, 0, XTALTAB0_960_59970K} +}; + +static const uint32 BCMATTACHDATA(pmu1_pllctrl_tab_4389_963mhz)[] = { +/* Default values for all registers */ +/* Fvco (BBPLL) is taken as 963M */ +/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 PLL 6 PLL 7 PLL 8 PLL 9 PLL 10 */ + 0x29d00000, 0x30100c03, 0x00240c06, 0x597ff060, + 0x00000000, 0x00000800, 0x00321d3a, 0x000551ff, + 0x00000000, 0x10000000, 0x00000000 +}; + +/* ************************ tables for 4389 END *********************** */ + +/** returns a table that instructs how to program the BBPLL for a particular xtal frequency */ +static const pmu1_xtaltab0_t * +BCMPOSTTRAPFN(si_pmu1_xtaltab0)(si_t *sih) +{ +#ifdef BCMDBG_PMU + char chn[8]; +#endif + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: + CASE_BCM43602_CHIP: + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + return pmu1_xtaltab0_960; + case BCM4369_CHIP_GRPID: + return pmu1_xtaltab0_4369_963; + case BCM4362_CHIP_GRPID: + return pmu1_xtaltab0_4362_963; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + return pmu1_xtaltab0_960; + default: + PMU_MSG(("si_pmu1_xtaltab0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8))); + break; + } + ASSERT(0); + return NULL; +} /* si_pmu1_xtaltab0 */ + +/** returns chip specific PLL settings for default xtal frequency and VCO output frequency */ +static const pmu1_xtaltab0_t * +BCMPOSTTRAPFN(si_pmu1_xtaldef0)(si_t *sih) +{ +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM43526_CHIP_ID: + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + /* Default to 37400Khz */ + return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_37400K]; + case BCM43014_CHIP_ID: + /* Default to 24000Khz */ + return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_24000K]; + CASE_BCM43602_CHIP: + return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_40000K]; + + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_37400K]; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_59970K]; + case BCM4369_CHIP_GRPID: + return &pmu1_xtaltab0_4369_963[PMU1_XTALTAB0_960_37400K]; + case BCM4362_CHIP_GRPID: + return &pmu1_xtaltab0_4362_963[PMU1_XTALTAB0_960_37400K]; + default: + PMU_MSG(("si_pmu1_xtaldef0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8))); + break; + } + ASSERT(0); + return NULL; +} /* si_pmu1_xtaldef0 */ + +static uint32 fvco_4360 = 0; + +/** + * store the val on init, then if func is called during normal operation + * don't touch core regs anymore + */ +static uint32 +BCMPOSTTRAPFN(si_pmu_pll1_fvco_4360)(si_t *sih, osl_t *osh) +{ + uint32 xf, ndiv_int, ndiv_frac, fvco, pll_reg, p1_div_scale; + uint32 r_high, r_low, int_part, frac_part, rounding_const; + uint8 p1_div; + uint origidx = 0; + bcm_int_bitmask_t intr_val; + + if (fvco_4360) { + printf("si_pmu_pll1_fvco_4360:attempt to query fvco during normal operation\n"); + /* this will insure that the func is called only once upon init */ + return fvco_4360; + } + + /* Remember original core before switch to chipc */ + si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + + xf = si_pmu_alp_clock(sih, osh)/1000; + + /* pll reg 10 , p1div, ndif_mode, ndiv_int */ + pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG10, 0, 0); + p1_div = pll_reg & 0xf; + ndiv_int = (pll_reg >> 7) & 0x1f; + + /* pllctrl11 */ + pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG11, 0, 0); + ndiv_frac = pll_reg & 0xfffff; + + int_part = xf * ndiv_int; + + rounding_const = 1 << (BBPLL_NDIV_FRAC_BITS - 1); + math_uint64_multiple_add(&r_high, &r_low, ndiv_frac, xf, rounding_const); + math_uint64_right_shift(&frac_part, r_high, r_low, BBPLL_NDIV_FRAC_BITS); + + if (!p1_div) { + PMU_ERROR(("p1_div calc returned 0! [%d]\n", __LINE__)); + ROMMABLE_ASSERT(0); + } + + if (p1_div == 0) { + ASSERT(p1_div != 0); + p1_div_scale = 0; + } else + + p1_div_scale = (1 << P1_DIV_SCALE_BITS) / p1_div; + rounding_const = 1 << (P1_DIV_SCALE_BITS - 1); + + math_uint64_multiple_add(&r_high, &r_low, (int_part + frac_part), + p1_div_scale, rounding_const); + math_uint64_right_shift(&fvco, r_high, r_low, P1_DIV_SCALE_BITS); + + /* Return to original core */ + si_restore_core(sih, origidx, &intr_val); + + fvco_4360 = fvco; + return fvco; +} /* si_pmu_pll1_fvco_4360 */ + +/** + * Specific to 43012 and calculate the FVCO frequency from XTAL freq + * Returns the FCVO frequency in [khz] units + */ +static uint32 +BCMPOSTTRAPFN(si_pmu_pll1_fvco_43012)(si_t *sih, osl_t *osh) +{ + uint32 xf, ndiv_int, ndiv_frac, fvco, pll_reg, p1_div_scale; + uint32 r_high, r_low, int_part, frac_part, rounding_const; + uint8 p_div; + chipcregs_t *cc; + uint origidx = 0; + bcm_int_bitmask_t intr_val; + + /* Remember original core before switch to chipc */ + cc = (chipcregs_t *)si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + ASSERT(cc != NULL); + BCM_REFERENCE(cc); + + xf = si_pmu_alp_clock(sih, osh)/1000; + + pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG0, 0, 0); + + ndiv_int = (pll_reg & PMU43012_PLL0_PC0_NDIV_INT_MASK) >> + PMU43012_PLL0_PC0_NDIV_INT_SHIFT; + + ndiv_frac = (pll_reg & PMU43012_PLL0_PC0_NDIV_FRAC_MASK) >> + PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT; + + pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0); + + p_div = (pll_reg & PMU43012_PLL0_PC3_PDIV_MASK) >> + PMU43012_PLL0_PC3_PDIV_SHIFT; + + /* If the p_div value read from PLL control register is zero, + * then return default FVCO value instead of computing the FVCO frequency + * using XTAL frequency + */ + if (!p_div) { + PMU_ERROR(("pll control register read failed [%d]\n", __LINE__)); + ROMMABLE_ASSERT(0); + fvco = 0; + goto done; + } + /* Actual expression is as below */ + /* fvco1 = ((xf * (1/p1_div)) * (ndiv_int + (ndiv_frac /(1 << 20)))); */ + + int_part = xf * ndiv_int; + rounding_const = 1 << (PMU43012_PLL_NDIV_FRAC_BITS - 1); + math_uint64_multiple_add(&r_high, &r_low, ndiv_frac, xf, rounding_const); + math_uint64_right_shift(&frac_part, r_high, r_low, PMU43012_PLL_NDIV_FRAC_BITS); + + p1_div_scale = (1 << PMU43012_PLL_P_DIV_SCALE_BITS) / p_div; + rounding_const = 1 << (PMU43012_PLL_P_DIV_SCALE_BITS - 1); + + math_uint64_multiple_add(&r_high, &r_low, (int_part + frac_part), + p1_div_scale, rounding_const); + math_uint64_right_shift(&fvco, r_high, r_low, PMU43012_PLL_P_DIV_SCALE_BITS); + +done: + /* Return to original core */ + si_restore_core(sih, origidx, &intr_val); + return fvco; +} /* si_pmu_pll1_fvco_43012 */ + +/** returns chip specific default BaseBand pll fvco frequency in [khz] units */ +static uint32 +BCMPOSTTRAPFN(si_pmu1_pllfvco0)(si_t *sih) +{ +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + switch (CHIPID(sih->chip)) { + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: + return FVCO_960; + + CASE_BCM43602_CHIP: + return FVCO_960; + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + { + osl_t *osh; + osh = si_osh(sih); + return si_pmu_pll1_fvco_4360(sih, osh); + } + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + { + osl_t *osh; + osh = si_osh(sih); + return si_pmu_pll1_fvco_43012(sih, osh); + } + case BCM4369_CHIP_GRPID: + return FVCO_960p1; + case BCM4362_CHIP_GRPID: + return FVCO_960p1; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + return FVCO_960p1; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + return FVCO_963p01; + default: + PMU_MSG(("si_pmu1_pllfvco0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8))); + break; + } + ASSERT(0); + return 0; +} /* si_pmu1_pllfvco0 */ + +/** + * returns chip specific default pll fvco frequency in [khz] units + */ +static uint32 +BCMPOSTTRAPFN(si_pmu1_pllfvco0_pll2)(si_t *sih) +{ +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + return si_get_armpllclkfreq(sih) * 1000; + case BCM4389_CHIP_GRPID: + return SI_INFO(sih)->armpllclkfreq ? si_get_armpllclkfreq(sih) * 1000 : FVCO_1002p8; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + return FVCO_400; + default: + PMU_MSG(("si_pmu1_pllfvco0_pll2 : Unknown chipid %s\n", + bcm_chipname(sih->chip, chn, 8))); + ASSERT(0); + break; + } + return 0; +} /* si_pmu1_pllfvco0_pll2 */ + +/** query alp/xtal clock frequency */ +static uint32 +BCMPOSTTRAPFN(si_pmu1_alpclk0)(si_t *sih, osl_t *osh, pmuregs_t *pmu) +{ + const pmu1_xtaltab0_t *xt; + uint32 xf; + uint8 xtdiv = 1; + + BCM_REFERENCE(sih); + + /* Find the frequency in the table */ + xf = (R_REG(osh, &pmu->pmucontrol) & PCTL_XTALFREQ_MASK) >> + PCTL_XTALFREQ_SHIFT; + for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt ++) + if (xt->xf == xf) + break; + /* Could not find it so assign a default value */ + if (xt == NULL || xt->fref == 0) + xt = si_pmu1_xtaldef0(sih); + ASSERT(xt != NULL && xt->fref != 0); + + switch (CHIPID(sih->chip)) + { + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + /* xtalfreq for 4378B0 is 59.97 MHz and + * but ALP clk is xtal / 2 (29.985 MHz) by default. + */ + xtdiv = 2; + break; + default: + break; + } + + return (xt->fref * 1000) / xtdiv; +} + +/** + * Before the PLL is switched off, the HT clocks need to be deactivated, and reactivated + * when the PLL is switched on again. + * This function returns the chip specific HT clock resources (HT and MACPHY clocks). + */ +static uint32 +si_pmu_htclk_mask(si_t *sih) +{ + /* chip specific bit position of various resources */ + rsc_per_chip_t *rsc = si_pmu_get_rsc_positions(sih); + + uint32 ht_req = (PMURES_BIT(rsc->ht_avail) | PMURES_BIT(rsc->macphy_clkavail)); + + switch (CHIPID(sih->chip)) + { + CASE_BCM43602_CHIP: + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + ht_req |= PMURES_BIT(rsc->ht_start); + break; + default: + ASSERT(0); + break; + } + + return ht_req; +} /* si_pmu_htclk_mask */ + +/** returns ALP frequency in [Hz] */ +static uint32 +BCMATTACHFN(si_pmu_def_alp_clock)(si_t *sih, osl_t *osh) +{ + uint32 clock = ALP_CLOCK; + + BCM_REFERENCE(osh); + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + +#ifdef UNRELEASEDCHIP +#endif + + clock = 37400*1000; + break; + CASE_BCM43602_CHIP: + clock = 40000 * 1000; + break; + } + + return clock; +} + +/** + * The BBPLL register set needs to be reprogrammed because the x-tal frequency is not known at + * compile time, or a different spur mode is selected. This function writes appropriate values into + * the BBPLL registers. It returns the 'xf', corresponding to the 'xf' bitfield in the PMU control + * register. + * 'xtal' : xtal frequency in [KHz] + * 'pllctrlreg_update': contains info on what entries to use in 'pllctrlreg_val' for the given + * x-tal frequency and spur mode + * 'pllctrlreg_val' : contains a superset of the BBPLL values to write + * + * Note: if pmu is NULL, this function returns xf, without programming PLL registers. + * This function is only called for pmu1_ type chips, perhaps we should rename it. + */ +static uint8 +si_pmu_pllctrlreg_update(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 xtal, + uint8 spur_mode, const pllctrl_data_t *pllctrlreg_update, uint32 array_size, + const uint32 *pllctrlreg_val) +{ + uint8 indx, reg_offset, xf = 0; + uint8 pll_ctrlcnt = 0; + + ASSERT(pllctrlreg_update); + + if (PMUREV(sih->pmurev) >= 5) { + pll_ctrlcnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT; + } else { + pll_ctrlcnt = (sih->pmucaps & PCAP_PC_MASK) >> PCAP_PC_SHIFT; + } + + /* Program the PLL control register if the xtal value matches with the table entry value */ + for (indx = 0; indx < array_size; indx++) { + /* If the entry does not match the xtal and spur_mode just continue the loop */ + if (!((pllctrlreg_update[indx].clock == (uint16)xtal) && + (pllctrlreg_update[indx].mode == spur_mode))) + continue; + /* + * Don't program the PLL registers if register base is NULL. + * If NULL just return the xref. + */ + if (pmu) { + for (reg_offset = 0; reg_offset < pll_ctrlcnt; reg_offset++) { + si_pmu_pllcontrol(sih, reg_offset, ~0, + pllctrlreg_val[indx*pll_ctrlcnt + reg_offset]); + } + + /* for 4369, arm clk cycle can be set from nvram - default is 400 MHz */ + if ((BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip)) && + (pll_ctrlcnt > PMU1_PLL0_PLLCTL6)) { + si_pmu_pll6val_armclk_calc(osh, pmu, + si_get_armpllclkfreq(sih), xtal, TRUE); + } + } + xf = pllctrlreg_update[indx].xf; + break; + } + return xf; +} /* si_pmu_pllctrlreg_update */ + +/* + * Calculate p1div, ndiv_int, ndiv_frac for clock ratio. + * Input: fvco, xtal + * Output: ndiv_int, ndiv_frac + * Returns: p1div + * + */ +uint8 +si_pmu_pll28nm_calc_ndiv(uint32 fvco, uint32 xtal, uint32 *ndiv_int, uint32 *ndiv_frac) +{ + uint8 p1div; + uint32 temp_high, temp_low; + ASSERT(xtal <= 0xFFFFFFFF / 1000); + p1div = 1 + (uint8) ((xtal * 1000) / 54000000UL); + *ndiv_int = (fvco * p1div) / xtal; + /* nfrac = 20 */ + /* ndiv_frac = (uint32) (((uint64) (fvco * p1div - xtal * ndiv_int) * (1 << 20)) / xtal) */ + math_uint64_multiple_add(&temp_high, &temp_low, fvco * p1div - xtal * (*ndiv_int), 1 << 20, + 0); + math_uint64_divide(ndiv_frac, temp_high, temp_low, xtal); + return p1div; +} + +void +si_pmu_armpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac) +{ + switch (CHIPID(sih->chip)) { + case BCM4388_CHIP_GRPID: + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4388_ARMPLL_I_NDIV_INT_MASK, + ndiv_int << PMU4388_ARMPLL_I_NDIV_INT_SHIFT); + si_pmu_pllupd(sih); + break; + case BCM4389_CHIP_GRPID: + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4389_ARMPLL_I_NDIV_INT_MASK, + ndiv_int << PMU4389_ARMPLL_I_NDIV_INT_SHIFT); + si_pmu_pllupd(sih); + break; + case BCM4369_CHIP_GRPID: + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, PMU4369_PLL1_PC5_P1DIV_MASK, + ((p1div >> PMU4369_P1DIV_LO_SHIFT) << PMU4369_PLL1_PC5_P1DIV_SHIFT)); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_P1DIV_MASK, + (p1div >> PMU4369_P1DIV_HI_SHIFT)); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_INT_MASK, + ndiv_int << PMU4369_PLL1_PC6_NDIV_INT_SHIFT); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_FRAC_MASK, + ndiv_frac << PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT); + si_pmu_pllupd(sih); + break; + case BCM4362_CHIP_GRPID: + /* 4362/69 PLL definitions are same. so reusing definitions */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, PMU4369_PLL1_PC5_P1DIV_MASK, + ((p1div >> PMU4369_P1DIV_LO_SHIFT) << PMU4369_PLL1_PC5_P1DIV_SHIFT)); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_P1DIV_MASK, + (p1div >> PMU4369_P1DIV_HI_SHIFT)); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_INT_MASK, + ndiv_int << PMU4369_PLL1_PC6_NDIV_INT_SHIFT); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_FRAC_MASK, + ndiv_frac << PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT); + si_pmu_pllupd(sih); + break; + default: + ASSERT(0); + break; + } +} + +void +si_pmu_bbpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac) +{ + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + /* PLL Control 2 Register are the same for 4368, 4369 */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, PMU4369_PLL0_PC2_PDIV_MASK, p1div); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, PMU4369_PLL0_PC2_NDIV_INT_MASK, + ndiv_int << PMU4369_PLL0_PC2_NDIV_INT_SHIFT); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, PMU4369_PLL0_PC3_NDIV_FRAC_MASK, + ndiv_frac << PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT); + si_pmu_pllupd(sih); + break; + default: + ASSERT(0); + break; + } +} + +void +si_pmu_armpll_chmdiv_upd(si_t *sih, uint32 ch0_mdiv, uint32 ch1_mdiv) +{ + switch (CHIPID(sih->chip)) { + default: + ASSERT(0); + break; + } +} + +static bool +si_pmu_armpll_write_required(si_t *sih, uint32 xtal) +{ + uint32 def_xtal = 0; + uint32 def_armclk_mhz = 0; + uint32 armclk_mhz = si_get_armpllclkfreq(sih); + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + def_xtal = XTAL_FREQ_37400MHZ; + def_armclk_mhz = ARMPLL_FREQ_400MHZ; + break; + case BCM4388_CHIP_GRPID: + def_xtal = XTAL_FREQ_59970MHZ; + def_armclk_mhz = ARMPLL_FREQ_1000MHZ; + break; + case BCM4389_CHIP_GRPID: + def_xtal = XTAL_FREQ_59970MHZ; + def_armclk_mhz = ARMPLL_FREQ_1000MHZ; + break; + default: + break; + } + + /* + * If programmed xtalfreq is same as xtal, no need to enable pll write. Check for + * armclk and xtalfreq instead of comparing calculated value and pll register value. + */ + return (((armclk_mhz == def_armclk_mhz) && (xtal == def_xtal)) ? FALSE : TRUE); +} + +/** + * Chip-specific overrides to PLLCONTROL registers during init. If certain conditions (dependent on + * x-tal frequency and current ALP frequency) are met, an update of the PLL is required. + * + * This takes less precedence over OTP PLLCONTROL overrides. + * If update_required=FALSE, it returns TRUE if a update is about to occur. + * No write happens. + * + * Return value: TRUE if the BBPLL registers 'update' field should be written by the caller. + * + * This function is only called for pmu1_ type chips, perhaps we should rename it. + */ +static bool +BCMATTACHFN(si_pmu_update_pllcontrol)(si_t *sih, osl_t *osh, uint32 xtal, bool update_required) +{ + pmuregs_t *pmu; + uint origidx; + bool write_en = FALSE; + uint8 xf = 0; + const pmu1_xtaltab0_t *xt; + uint32 tmp; + const pllctrl_data_t *pllctrlreg_update = NULL; + uint32 array_size = 0; + /* points at a set of PLL register values to write for a given x-tal frequency: */ + const uint32 *pllctrlreg_val = NULL; + uint8 ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MASH; + uint32 xtalfreq = 0; + uint32 ndiv_int; + uint32 ndiv_frac; + uint8 pdiv; + + BCM_REFERENCE(ndiv_int); + BCM_REFERENCE(ndiv_frac); + BCM_REFERENCE(pdiv); + /* If there is OTP or NVRAM entry for xtalfreq, program the + * PLL control register even if it is default xtal. + */ + xtalfreq = getintvar(NULL, rstr_xtalfreq); + /* CASE1 */ + if (xtalfreq) { + write_en = TRUE; + xtal = xtalfreq; + } else { + /* There is NO OTP value */ + if (xtal) { + /* CASE2: If the xtal value was calculated, program the PLL control + * registers only if it is not default xtal value. + */ + if (xtal != (si_pmu_def_alp_clock(sih, osh)/1000)) + write_en = TRUE; + } else { + /* CASE3: If the xtal obtained is "0", ie., clock is not measured, then + * leave the PLL control register as it is but program the xf in + * pmucontrol register with the default xtal value. + */ + xtal = si_pmu_def_alp_clock(sih, osh)/1000; + } + } + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + pllctrlreg_update = pmu1_xtaltab0_43012; + array_size = ARRAYSIZE(pmu1_xtaltab0_43012); + pllctrlreg_val = pmu1_pllctrl_tab_43012_1600mhz; + break; + case BCM4369_CHIP_GRPID: + pllctrlreg_update = pmu1_xtaltab0_4369; + array_size = ARRAYSIZE(pmu1_xtaltab0_4369); + pllctrlreg_val = pmu1_pllctrl_tab_4369_960p1mhz; + /* default pll programming be true, later based on need disable it */ + write_en = TRUE; + break; + case BCM4362_CHIP_GRPID: + pllctrlreg_update = pmu1_xtaltab0_4362; + array_size = ARRAYSIZE(pmu1_xtaltab0_4362); + pllctrlreg_val = pmu1_pllctrl_tab_4362_960p1mhz; + /* default pll programming be true, later based on need disable it */ + write_en = TRUE; + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + /* TBD : bypass PLL programming, So use chip default values */ + pllctrlreg_update = NULL; + array_size = 0; + pllctrlreg_val = NULL; + write_en = FALSE; + break; + case BCM4388_CHIP_GRPID: + /* TBD : bypass PLL programming, So use chip default values */ + pllctrlreg_update = NULL; + array_size = 0; + pllctrlreg_val = NULL; + write_en = FALSE; + break; + case BCM4389_CHIP_GRPID: + pllctrlreg_update = pmu1_xtaltab0_4389; + array_size = ARRAYSIZE(pmu1_xtaltab0_4389); + pllctrlreg_val = pmu1_pllctrl_tab_4389_963mhz; + break; + CASE_BCM43602_CHIP: + /* + * 43602 has only 1 x-tal value, possibly insert case when an other BBPLL + * frequency than 960Mhz is required (e.g., for spur avoidance) + */ + /* fall through */ + default: + /* write_en is FALSE in this case. So returns from the function */ + write_en = FALSE; + break; + } + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + /* Check if the table has PLL control register values for the requested xtal */ + if (!update_required && pllctrlreg_update) { + /* Here the chipcommon register base is passed as NULL, so that we just get + * the xf for the xtal being programmed but don't program the registers now + * as the PLL is not yet turned OFF. + */ + xf = si_pmu_pllctrlreg_update(sih, osh, NULL, xtal, 0, pllctrlreg_update, + array_size, pllctrlreg_val); + + /* Program the PLL based on the xtal value. */ + if (xf != 0) { + /* Write XtalFreq. Set the divisor also. */ + tmp = R_REG(osh, &pmu->pmucontrol) & + ~(PCTL_ILP_DIV_MASK | PCTL_XTALFREQ_MASK); + tmp |= (((((xtal + 127) / 128) - 1) << PCTL_ILP_DIV_SHIFT) & + PCTL_ILP_DIV_MASK) | + ((xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK); + W_REG(osh, &pmu->pmucontrol, tmp); + } else { + write_en = FALSE; + if (!FWSIGN_ENAB()) { + printf(rstr_Invalid_Unsupported_xtal_value_D, xtal); + } + } + + write_en = si_pmu_armpll_write_required(sih, xtal); + } + + /* If its a check sequence or if there is nothing to write, return here */ + if ((update_required == FALSE) || (write_en == FALSE)) { + goto exit; + } + + /* Update the PLL control register based on the xtal used. */ + if (pllctrlreg_val) { + si_pmu_pllctrlreg_update(sih, osh, pmu, xtal, 0, pllctrlreg_update, array_size, + pllctrlreg_val); + } + + /* Chip specific changes to PLL Control registers is done here. */ + switch (CHIPID(sih->chip)) { + case BCM4388_CHIP_ID: { + uint32 armclk_mhz = si_get_armpllclkfreq(sih); + uint32 vco_freq = (armclk_mhz * PMU4388_APLL_PDIV * 1000); + + ASSERT(vco_freq <= FVCO_3200); + + /* + * ndiv_init = Fvco / Frefeff + * Frefeff = Fref / pdiv + * Fref = xtal / 2 + * pdiv = 3 + * + * ndiv_init = ((Fvco * pdiv * 1000000) / ((xtal * 1000) / 2) + */ + ndiv_int = (vco_freq / (xtal / 2)); + si_pmu_armpll_freq_upd(sih, 0, ndiv_int, 0); + break; + } + + case BCM4389_CHIP_ID: { + uint32 armclk_mhz = si_get_armpllclkfreq(sih); + uint32 vco_freq = (armclk_mhz * PMU4389_APLL_PDIV * 1000); + + ASSERT(vco_freq <= FVCO_3200); + + /* + * ndiv_init = Fvco / Frefeff + * Frefeff = Fref / pdiv + * Fref = xtal / 2 + * pdiv = 3 + * + * ndiv_init = ((Fvco * pdiv * 1000000) / ((xtal * 1000) / 2) + */ + ndiv_int = (vco_freq / (xtal / 2)); + si_pmu_armpll_freq_upd(sih, 0, ndiv_int, 0); + break; + } + + default: + break; + } + + /* Program the PLL based on the xtal value. */ + if (xtal != 0) { + /* Find the frequency in the table */ + for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt ++) + if (xt->fref == xtal) { + break; + } + + /* Check current PLL state, bail out if it has been programmed or + * we don't know how to program it. But we might still have some programming + * like changing the ARM clock, etc. So cannot return from here. + */ + if (xt == NULL || xt->fref == 0) { + goto exit; + } + + /* If the PLL is already programmed exit from here. */ + if (((R_REG(osh, &pmu->pmucontrol) & + PCTL_XTALFREQ_MASK) >> PCTL_XTALFREQ_SHIFT) == xt->xf) { + goto exit; + } + + PMU_MSG(("XTAL %d.%d MHz (%d)\n", xtal / 1000, xtal % 1000, xt->xf)); + PMU_MSG(("Programming PLL for %d.%d MHz\n", xt->fref / 1000, xt->fref % 1000)); + + if (BCM4389_CHIP(sih->chip)) { + /* Write ndiv_int to pllcontrol[6] */ + tmp = ((xt->ndiv_int << PMU4389_ARMPLL_I_NDIV_INT_SHIFT) + & PMU4389_ARMPLL_I_NDIV_INT_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, + (PMU4389_ARMPLL_I_NDIV_INT_MASK), tmp); + } else if (BCM4388_CHIP(sih->chip)) { + /* Write ndiv_int to pllcontrol[6] */ + tmp = ((xt->ndiv_int << PMU4388_ARMPLL_I_NDIV_INT_SHIFT) + & PMU4388_ARMPLL_I_NDIV_INT_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, + (PMU4388_ARMPLL_I_NDIV_INT_MASK), tmp); + } else if (BCM4369_CHIP(sih->chip) || + BCM4362_CHIP(sih->chip) || + FALSE) { + /* Write pdiv (Actually it is mapped to p1div in the struct) + to pllcontrol[2] + */ + tmp = ((xt->p1div << PMU4369_PLL0_PC2_PDIV_SHIFT) & + PMU4369_PLL0_PC2_PDIV_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, + (PMU4369_PLL0_PC2_PDIV_MASK), tmp); + + /* Write ndiv_int to pllcontrol[2] */ + tmp = ((xt->ndiv_int << PMU4369_PLL0_PC2_NDIV_INT_SHIFT) + & PMU4369_PLL0_PC2_NDIV_INT_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, + (PMU4369_PLL0_PC2_NDIV_INT_MASK), tmp); + + /* Write ndiv_frac to pllcontrol[3] */ + tmp = ((xt->ndiv_frac << PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT) & + PMU4369_PLL0_PC3_NDIV_FRAC_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, + PMU4369_PLL0_PC3_NDIV_FRAC_MASK, tmp); + } else { + /* Write p1div and p2div to pllcontrol[0] */ + tmp = ((xt->p1div << PMU1_PLL0_PC0_P1DIV_SHIFT) & + PMU1_PLL0_PC0_P1DIV_MASK) | + ((xt->p2div << PMU1_PLL0_PC0_P2DIV_SHIFT) & + PMU1_PLL0_PC0_P2DIV_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG0, + (PMU1_PLL0_PC0_P1DIV_MASK | PMU1_PLL0_PC0_P2DIV_MASK), tmp); + + /* Write ndiv_int and ndiv_mode to pllcontrol[2] */ + tmp = ((xt->ndiv_int << PMU1_PLL0_PC2_NDIV_INT_SHIFT) + & PMU1_PLL0_PC2_NDIV_INT_MASK) | + ((ndiv_mode << PMU1_PLL0_PC2_NDIV_MODE_SHIFT) + & PMU1_PLL0_PC2_NDIV_MODE_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, + (PMU1_PLL0_PC2_NDIV_INT_MASK | PMU1_PLL0_PC2_NDIV_MODE_MASK), tmp); + /* Write ndiv_frac to pllcontrol[3] */ + tmp = ((xt->ndiv_frac << PMU1_PLL0_PC3_NDIV_FRAC_SHIFT) & + PMU1_PLL0_PC3_NDIV_FRAC_MASK); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, + PMU1_PLL0_PC3_NDIV_FRAC_MASK, tmp); + } + + /* Write XtalFreq. Set the divisor also. */ + tmp = R_REG(osh, &pmu->pmucontrol) & + ~(PCTL_ILP_DIV_MASK | PCTL_XTALFREQ_MASK); + tmp |= (((((xt->fref + 127) / 128) - 1) << PCTL_ILP_DIV_SHIFT) & + PCTL_ILP_DIV_MASK) | + ((xt->xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK); + W_REG(osh, &pmu->pmucontrol, tmp); + } + +exit: + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return write_en; +} /* si_pmu_update_pllcontrol */ + +/* returns current value of PMUTimer. + also taking care of PR88659 by multiple reads. +*/ +uint32 +BCMPOSTTRAPFN(si_pmu_get_pmutimer)(si_t *sih) +{ + osl_t *osh = si_osh(sih); + pmuregs_t *pmu; + uint origidx; + uint32 start; + BCM_REFERENCE(sih); + + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + start = R_REG(osh, &pmu->pmutimer); + if (start != R_REG(osh, &pmu->pmutimer)) + start = R_REG(osh, &pmu->pmutimer); + + si_setcoreidx(sih, origidx); + + return (start); +} + +/* Get current pmu time API */ +uint32 +si_cur_pmu_time(si_t *sih) +{ + uint origidx; + uint32 pmu_time; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + + pmu_time = si_pmu_get_pmutimer(sih); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + return (pmu_time); +} + +/** + * returns + * a) diff between a 'prev' value of pmu timer and current value + * b) the current pmutime value in 'prev' + * So, 'prev' is an IO parameter. + */ +uint32 +BCMPOSTTRAPFN(si_pmu_get_pmutime_diff)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *prev) +{ + uint32 pmutime_diff = 0, pmutime_val = 0; + uint32 prev_val = *prev; + BCM_REFERENCE(osh); + BCM_REFERENCE(pmu); + /* read current value */ + pmutime_val = si_pmu_get_pmutimer(sih); + /* diff btween prev and current value, take on wraparound case as well. */ + pmutime_diff = (pmutime_val >= prev_val) ? + (pmutime_val - prev_val) : + (~prev_val + pmutime_val + 1); + *prev = pmutime_val; + return pmutime_diff; +} + +/** + * wait for usec for the res_pending register to change. + * NOTE: usec SHOULD be > 32uS + * if cond = TRUE, res_pending will be read until it becomes == 0; + * If cond = FALSE, res_pending will be read until it becomes != 0; + * returns TRUE if timedout. + * returns elapsed time in this loop in elapsed_time + */ +bool +BCMPOSTTRAPFN(si_pmu_wait_for_res_pending)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint usec, + bool cond, uint32 *elapsed_time) +{ + /* add 32uSec more */ + uint countdown = usec; + uint32 pmutime_prev = 0, pmutime_elapsed = 0, res_pend; + bool pending = FALSE; + + /* store current time */ + pmutime_prev = si_pmu_get_pmutimer(sih); + while (1) { + res_pend = R_REG(osh, &pmu->res_pending); + + /* based on the condition, check */ + if (cond == TRUE) { + if (res_pend == 0) break; + } else { + if (res_pend != 0) break; + } + + /* if required time over */ + if ((pmutime_elapsed * PMU_US_STEPS) >= countdown) { + /* timeout. so return as still pending */ + pending = TRUE; + break; + } + + /* get elapsed time after adding diff between prev and current + * pmutimer value + */ + pmutime_elapsed += si_pmu_get_pmutime_diff(sih, osh, pmu, &pmutime_prev); + } + + *elapsed_time = pmutime_elapsed * PMU_US_STEPS; + return pending; +} /* si_pmu_wait_for_res_pending */ + +/** + * The algorithm for pending check is that, + * step1: wait till (res_pending !=0) OR pmu_max_trans_timeout. + * if max_trans_timeout, flag error and exit. + * wait for 1 ILP clk [64uS] based on pmu timer, + * polling to see if res_pending again goes high. + * if res_pending again goes high, go back to step1. + * Note: res_pending is checked repeatedly because, in between switching + * of dependent + * resources, res_pending resets to 0 for a short duration of time before + * it becomes 1 again. + * Note: return 0 is GOOD, 1 is BAD [mainly timeout]. + */ +int +BCMPOSTTRAPFN(si_pmu_wait_for_steady_state)(si_t *sih, osl_t *osh, pmuregs_t *pmu) +{ + si_info_t *sii = SI_INFO(sih); + int stat = 0; + bool timedout = FALSE; + uint32 elapsed = 0, pmutime_total_elapsed = 0; + uint32 pmutime_prev; + + sii->res_pend_count = 0; + + pmutime_prev = si_pmu_get_pmutimer(sih); + + while (1) { + /* wait until all resources are settled down [till res_pending becomes 0] */ + timedout = si_pmu_wait_for_res_pending(sih, osh, pmu, + PMU_MAX_TRANSITION_DLY, TRUE, &elapsed); + + sii->res_state[sii->res_pend_count].low_time = + si_pmu_get_pmutime_diff(sih, osh, pmu, &pmutime_prev); + sii->res_state[sii->res_pend_count].low = R_REG(osh, &pmu->res_pending); + + if (timedout) { + stat = 1; + break; + } + + pmutime_total_elapsed += elapsed; + /* wait to check if any resource comes back to non-zero indicating + * that it pends again. The res_pending goes 0 for 1 ILP clock before + * getting set for next resource in the sequence , so if res_pending + * is 0 for more than 1 ILP clk it means nothing is pending + * to indicate some pending dependency. + */ + pmutime_prev = R_REG(osh, &pmu->pmutimer); + timedout = si_pmu_wait_for_res_pending(sih, osh, pmu, + 64, FALSE, &elapsed); + + pmutime_total_elapsed += elapsed; + + sii->res_state[sii->res_pend_count].high_time = + si_pmu_get_pmutime_diff(sih, osh, pmu, &pmutime_prev); + sii->res_state[sii->res_pend_count].high = R_REG(osh, &pmu->res_pending); + + /* Here, we can also check timedout, but we make sure that, + * we read the res_pending again. + */ + + if (timedout) { + stat = 0; + break; + } + + /* Total wait time for all the waits above added should be + * less than PMU_MAX_TRANSITION_DLY + */ + if (pmutime_total_elapsed >= PMU_MAX_TRANSITION_DLY) { + /* timeout. so return as still pending */ + stat = 1; + break; + } + + sii->res_pend_count++; + sii->res_pend_count %= RES_PEND_STATS_COUNT; + pmutime_prev = R_REG(osh, &pmu->pmutimer); + } + return stat; +} /* si_pmu_wait_for_steady_state */ + +static uint32 +si_pmu_pll_delay_43012(si_t *sih, uint32 delay_us, uint32 poll) +{ + uint32 delay = 0; + + /* In case of NIC builds, we can use OSL_DELAY() for 1 us delay. But in case of DONGLE + * builds, we can't rely on the OSL_DELAY() as it is internally relying on HT clock and + * we are calling this function when ALP clock is present. + */ +#if defined(DONGLEBUILD) + uint32 initial, current; + + initial = get_arm_cyclecount(); + while (delay < delay_us) { + if (poll == 1) { + if (si_gci_chipstatus(sih, GCI_CHIPSTATUS_07) & + GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK) { + goto exit; + } + } + current = get_arm_cyclecount(); + delay = ((current - initial) * 1000) / si_xtalfreq(sih); + } +#else + for (delay = 0; delay < delay_us; delay++) { + if (poll == 1) { + if (si_gci_chipstatus(sih, GCI_CHIPSTATUS_07) & + GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK) { + goto exit; + } + } + OSL_DELAY(1); + } +#endif /* BCMDONGLEHOST */ + + if (poll == 1) { + PMU_ERROR(("si_pmu_pll_delay_43012: PLL not locked!")); + ASSERT(0); + } +exit: + return delay; +} + +static void +si_pmu_pll_on_43012(si_t *sih, osl_t *osh, pmuregs_t *pmu, bool openloop_cal) +{ + uint32 rsrc_ht, total_time = 0; + + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_PWROFF, 0); + total_time += si_pmu_pll_delay_43012(sih, 2, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH | + PMUCCTL04_43012_FORCE_BBPLL_PWRDN, 0); + total_time += si_pmu_pll_delay_43012(sih, 2, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_ARESET, 0); + + rsrc_ht = R_REG(osh, &pmu->res_state) & + ((1 << RES43012_HT_AVAIL) | (1 << RES43012_HT_START)); + + if (rsrc_ht) + { + /* Wait for PLL to lock in close-loop */ + total_time += si_pmu_pll_delay_43012(sih, 200, 1); + } + else { + /* Wait for 1 us for the open-loop clock to start */ + total_time += si_pmu_pll_delay_43012(sih, 1, 0); + } + + if (!openloop_cal) { + /* Allow clk to be used if its not calibration */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_DRESET, 0); + total_time += si_pmu_pll_delay_43012(sih, 1, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_DISABLE_LQ_AVAIL, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_DISABLE_HT_AVAIL, 0); + } + + PMU_MSG(("si_pmu_pll_on_43012: time taken: %d us\n", total_time)); +} + +static void +si_pmu_pll_off_43012(si_t *sih, osl_t *osh, pmuregs_t *pmu) +{ + uint32 total_time = 0; + BCM_REFERENCE(osh); + BCM_REFERENCE(pmu); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + PMUCCTL04_43012_DISABLE_LQ_AVAIL | PMUCCTL04_43012_DISABLE_HT_AVAIL, + PMUCCTL04_43012_DISABLE_LQ_AVAIL | PMUCCTL04_43012_DISABLE_HT_AVAIL); + total_time += si_pmu_pll_delay_43012(sih, 1, 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMUCCTL04_43012_FORCE_BBPLL_ARESET | PMUCCTL04_43012_FORCE_BBPLL_DRESET | + PMUCCTL04_43012_FORCE_BBPLL_PWRDN |PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH), + (PMUCCTL04_43012_FORCE_BBPLL_ARESET | PMUCCTL04_43012_FORCE_BBPLL_DRESET | + PMUCCTL04_43012_FORCE_BBPLL_PWRDN |PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH)); + total_time += si_pmu_pll_delay_43012(sih, 1, 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + PMUCCTL04_43012_FORCE_BBPLL_PWROFF, + PMUCCTL04_43012_FORCE_BBPLL_PWROFF); + + PMU_MSG(("si_pmu_pll_off_43012: time taken: %d us\n", total_time)); +} + +/** Turn Off the PLL - Required before setting the PLL registers */ +static void +si_pmu_pll_off(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *min_mask, + uint32 *max_mask, uint32 *clk_ctl_st) +{ + uint32 ht_req; + + /* Save the original register values */ + *min_mask = R_REG(osh, &pmu->min_res_mask); + *max_mask = R_REG(osh, &pmu->max_res_mask); + *clk_ctl_st = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0); + + ht_req = si_pmu_htclk_mask(sih); + if (ht_req == 0) + return; + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID) || + (BCM4369_CHIP(sih->chip)) || + (BCM4362_CHIP(sih->chip)) || + (BCM4376_CHIP(sih->chip)) || + (BCM4378_CHIP(sih->chip)) || + (BCM4385_CHIP(sih->chip)) || + (BCM4387_CHIP(sih->chip)) || + (BCM4388_CHIP(sih->chip)) || + (BCM4389_CHIP(sih->chip)) || + BCM43602_CHIP(sih->chip) || + 0) { + /* + * If HT_AVAIL is not set, wait to see if any resources are availing HT. + */ + if (((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) != CCS_HTAVAIL)) + si_pmu_wait_for_steady_state(sih, osh, pmu); + } else { + OR_REG(osh, &pmu->max_res_mask, ht_req); + /* wait for HT to be ready before taking the HT away...HT could be coming up... */ + SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY); + ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL)); + } + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID)) { + si_pmu_pll_off_43012(sih, osh, pmu); + } else { + AND_REG(osh, &pmu->min_res_mask, ~ht_req); + AND_REG(osh, &pmu->max_res_mask, ~ht_req); + + SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) == CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY); + ASSERT(!(si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL)); + OSL_DELAY(100); + } +} /* si_pmu_pll_off */ + +/* below function are for BBPLL parallel purpose */ +/** Turn Off the PLL - Required before setting the PLL registers */ +void +si_pmu_pll_off_PARR(si_t *sih, osl_t *osh, uint32 *min_mask, +uint32 *max_mask, uint32 *clk_ctl_st) +{ + pmuregs_t *pmu; + uint origidx; + bcm_int_bitmask_t intr_val; + uint32 ht_req; + + /* Block ints and save current core */ + si_introff(sih, &intr_val); + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + /* Save the original register values */ + *min_mask = R_REG(osh, &pmu->min_res_mask); + *max_mask = R_REG(osh, &pmu->max_res_mask); + *clk_ctl_st = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0); + ht_req = si_pmu_htclk_mask(sih); + if (ht_req == 0) { + /* Return to original core */ + si_setcoreidx(sih, origidx); + si_intrrestore(sih, &intr_val); + return; + } + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID) || + (BCM4369_CHIP(sih->chip)) || + (BCM4362_CHIP(sih->chip)) || + (BCM4376_CHIP(sih->chip)) || + (BCM4378_CHIP(sih->chip)) || + (BCM4385_CHIP(sih->chip)) || + (BCM4387_CHIP(sih->chip)) || + (BCM4388_CHIP(sih->chip)) || + (BCM4389_CHIP(sih->chip)) || + (BCM4397_CHIP(sih->chip)) || + (BCM43602_CHIP(sih->chip)) || + 0) { + /* + * If HT_AVAIL is not set, wait to see if any resources are availing HT. + */ + if (((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) + != CCS_HTAVAIL)) + si_pmu_wait_for_steady_state(sih, osh, pmu); + } else { + OR_REG(osh, &pmu->max_res_mask, ht_req); + /* wait for HT to be ready before taking the HT away...HT could be coming up... */ + SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY); + ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL)); + } + + AND_REG(osh, &pmu->min_res_mask, ~ht_req); + AND_REG(osh, &pmu->max_res_mask, ~ht_req); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + si_intrrestore(sih, &intr_val); +} /* si_pmu_pll_off_PARR */ + +/** Turn ON/restore the PLL based on the mask received */ +static void +si_pmu_pll_on(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 min_mask_mask, + uint32 max_mask_mask, uint32 clk_ctl_st_mask) +{ + uint32 ht_req; + + ht_req = si_pmu_htclk_mask(sih); + if (ht_req == 0) + return; + + max_mask_mask &= ht_req; + min_mask_mask &= ht_req; + + if (max_mask_mask != 0) + OR_REG(osh, &pmu->max_res_mask, max_mask_mask); + + if (min_mask_mask != 0) + OR_REG(osh, &pmu->min_res_mask, min_mask_mask); + + if (clk_ctl_st_mask & CCS_HTAVAIL) { + /* Wait for HT_AVAIL to come back */ + SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY); + ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL)); + } + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID)) { + si_pmu_pll_on_43012(sih, osh, pmu, 0); + } +} + +/** + * Set up PLL registers in the PMU as per the (optional) OTP values, or, if no OTP values are + * present, optionally update with POR override values contained in firmware. Enables the BBPLL + * when done. + */ +static void +BCMATTACHFN(si_pmu1_pllinit1)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 xtal) +{ + char name[16]; + const char *otp_val; + uint8 i, otp_entry_found = FALSE; + uint32 pll_ctrlcnt; + uint32 min_mask = 0, max_mask = 0, clk_ctl_st = 0; +#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) + uint32 otpval = 0, regval = 0; +#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */ + + if (!FWSIGN_ENAB()) { + if (PMUREV(sih->pmurev) >= 5) { + pll_ctrlcnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT; + } else { + pll_ctrlcnt = (sih->pmucaps & PCAP_PC_MASK) >> PCAP_PC_SHIFT; + } + + /* Check if there is any otp enter for PLLcontrol registers */ + for (i = 0; i < pll_ctrlcnt; i++) { + snprintf(name, sizeof(name), rstr_pllD, i); + if ((otp_val = getvar(NULL, name)) == NULL) + continue; + + /* If OTP entry is found for PLL register, then turn off the PLL + * and set the status of the OTP entry accordingly. + */ + otp_entry_found = TRUE; + break; + } + } + + /* If no OTP parameter is found and no chip-specific updates are needed, return. */ + if ((otp_entry_found == FALSE) && + (si_pmu_update_pllcontrol(sih, osh, xtal, FALSE) == FALSE)) { +#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) + /* + * For 4369/4362 PLL3 could be prorammed by BT, check the value is default and not + * overrided by BT + */ + if ((BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip)) && + (regval = si_pmu_pllcontrol(sih, 3, 0, 0)) != PMU_PLL3_4369B0_DEFAULT) { + PMU_ERROR(("Default PLL3 value 0x%x is not same as programmed" + "value 0x%x\n", PMU_PLL3_4369B0_DEFAULT, regval)); + hnd_gcisem_set_err(GCI_PLL_LOCK_SEM); + return; + } + + /* Update SW_READY bit indicating WLAN is ready and verified PLL3 */ + si_gci_output(sih, GCI_ECI_SW1(GCI_WLAN_IP_ID), GCI_SWREADY, GCI_SWREADY); +#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */ + return; + } + +#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) + if ((hnd_gcisem_acquire(GCI_PLL_LOCK_SEM, TRUE, GCI_PLL_LOCK_SEM_TIMEOUT) != BCME_OK)) { + PMU_ERROR(("Failed to get GCI PLL Lock semaphore...\n")); + hnd_gcisem_set_err(GCI_PLL_LOCK_SEM); + return; + } + + /* Skip BB PLL programming if BT has already done it, which is indicated by SW_READY bit */ + if (si_gci_input(sih, GCI_ECI_SW1(GCI_BT_IP_ID)) & GCI_SWREADY) { + PMU_MSG(("PLL is already programmed\n")); + + /* Program ARM PLL only if xtalfreq(pllctrl6) programmed is different from xtal */ + if (si_pmu_update_pllcontrol(sih, osh, xtal, FALSE)) { + /* Make sure PLL is off */ + si_pmu_pll_off(sih, osh, pmu, &min_mask, &max_mask, &clk_ctl_st); + + /* for 4369, arm clk cycle can be set from nvram - default is 400 MHz */ + if ((BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip)) && + (pll_ctrlcnt > PMU1_PLL0_PLLCTL6)) { + PMU_MSG(("Programming ARM CLK\n")); + si_pmu_pll6val_armclk_calc(osh, pmu, + si_get_armpllclkfreq(sih), xtal, TRUE); + } + + /* Flush ('update') the deferred pll control registers writes */ + if (PMUREV(sih->pmurev) >= 2) + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + /* Restore back the register values. This ensures PLL remains on if it + * was originally on and remains off if it was originally off. + */ + si_pmu_pll_on(sih, osh, pmu, min_mask, max_mask, clk_ctl_st); + } + + snprintf(name, sizeof(name), rstr_pllD, 3); + if ((otp_val = getvar(NULL, name)) != NULL) { + otpval = (uint32)bcm_strtoul(otp_val, NULL, 0); + if ((regval = si_pmu_pllcontrol(sih, 3, 0, 0)) != otpval) { + PMU_ERROR(("PLL3 programming value 0x%x is not same as programmed" + "value 0x%x\n", otpval, regval)); + hnd_gcisem_set_err(GCI_PLL_LOCK_SEM); + } + } + goto done; + } +#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */ + + /* Make sure PLL is off */ + si_pmu_pll_off(sih, osh, pmu, &min_mask, &max_mask, &clk_ctl_st); + + /* Update any chip-specific PLL registers. Does not write PLL 'update' bit yet. */ + si_pmu_update_pllcontrol(sih, osh, xtal, TRUE); + + /* Update the PLL register if there is a OTP entry for PLL registers */ + si_pmu_otp_pllcontrol(sih, osh); + + /* Flush ('update') the deferred pll control registers writes */ + if (PMUREV(sih->pmurev) >= 2) + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + /* Restore back the register values. This ensures PLL remains on if it + * was originally on and remains off if it was originally off. + */ + si_pmu_pll_on(sih, osh, pmu, min_mask, max_mask, clk_ctl_st); + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID)) { + uint32 origidx; + /* PMU clock stretch to be decreased to 8 for HT and ALP + * to reduce DS0 current during high traffic + */ + W_REG(osh, &pmu->clkstretch, CSTRETCH_REDUCE_8); + + /* SDIOD to request for ALP + * to reduce DS0 current during high traffic + */ + origidx = si_coreidx(sih); + si_setcore(sih, SDIOD_CORE_ID, 0); + /* Clear the Bit 8 for ALP REQUEST change */ + si_wrapperreg(sih, AI_OOBSELOUTB30, (AI_OOBSEL_MASK << AI_OOBSEL_1_SHIFT), + OOB_B_ALP_REQUEST << AI_OOBSEL_1_SHIFT); + si_setcoreidx(sih, origidx); + } + +#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) +done: + /* Update SW_READY bit indicating WLAN is done programming PLL registers */ + si_gci_output(sih, GCI_ECI_SW1(GCI_WLAN_IP_ID), GCI_SWREADY, GCI_SWREADY); + if ((hnd_gcisem_release(GCI_PLL_LOCK_SEM) != BCME_OK)) { + PMU_ERROR(("Failed to release GCI PLL Lock semaphore...\n")); + hnd_gcisem_set_err(GCI_PLL_LOCK_SEM); + } +#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */ +} /* si_pmu1_pllinit1 */ + +#if defined(EDV) +/* returns backplane clk programmed in pll cntl 1 */ +/* WHY NOT JUST CALL si_pmu_si_clock()? */ +uint32 si_pmu_get_backplaneclkspeed(si_t *sih) +{ + uint32 FVCO; + uint32 tmp, mdiv = 1; + + switch (CHIPID(sih->chip)) { + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + return si_pmu_bpclk_4387(sih); + default: + break; + } + + FVCO = si_pmu1_pllfvco0(sih); + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0); + mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT; + break; + default: + ASSERT(FALSE); + break; + } + return FVCO / mdiv * 1000u; +} + +/* Update backplane clock speed */ +void +si_pmu_update_backplane_clock(si_t *sih, osl_t *osh, uint reg, uint32 mask, uint32 val) +{ + + pmuregs_t *pmu; + uint origidx; + uint32 max_mask = 0, min_mask = 0, clk_ctl_st = 0; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + + /* Make sure PLL is off */ + si_pmu_pll_off(sih, osh, pmu, &min_mask, &max_mask, &clk_ctl_st); + + si_pmu_pllcontrol(sih, reg, mask, val); + + /* Flush ('update') the deferred pll control registers writes */ + if (PMUREV(sih->pmurev) >= 2) + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + /* Restore back the register values. This ensures PLL remains on if it + * was originally on and remains off if it was originally off. + */ + si_pmu_pll_on(sih, osh, pmu, min_mask, max_mask, clk_ctl_st); + si_setcoreidx(sih, origidx); +} +#endif /* si_pmu_update_backplane_clock */ + +/** + * returns the backplane clock frequency. + * Does this by determining current Fvco and the setting of the + * clock divider that leads up to the backplane. Returns value in [Hz] units. + */ +static uint32 +BCMPOSTTRAPFN(si_pmu_bpclk_4387)(si_t *sih) +{ + uint32 tmp, mdiv; + uint32 FVCO; /* in [khz] units */ + + FVCO = si_pmu1_pllfvco0(sih); + + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, 0, 0); + mdiv = (tmp & PMU4387_PLL0_PC2_ICH3_MDIV_MASK); + ASSERT(mdiv != 0); + + return FVCO / mdiv * 1000; +} + +/** + * returns the CPU clock frequency. Does this by determining current Fvco and the setting of the + * clock divider that leads up to the ARM. Returns value in [Hz] units. + */ +static uint32 +BCMPOSTTRAPFN(si_pmu1_cpuclk0)(si_t *sih, osl_t *osh, pmuregs_t *pmu) +{ + uint32 tmp, mdiv = 1; +#ifdef BCMDBG + uint32 ndiv_int, ndiv_frac, p2div, p1div, fvco; + uint32 fref; +#endif +#ifdef BCMDBG_PMU + char chn[8]; +#endif + uint32 FVCO; /* in [khz] units */ + + FVCO = si_pmu1_pllfvco0(sih); + + if (BCM43602_CHIP(sih->chip) && +#ifdef DONGLEBUILD +#ifdef __arm__ + (si_arm_clockratio(sih, 0) == 1) && +#endif +#endif /* DONGLEBUILD */ + TRUE) { + /* CR4 running on backplane_clk */ + return si_pmu_si_clock(sih, osh); /* in [hz] units */ + } + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM43526_CHIP_ID: + case BCM4352_CHIP_ID: + /* Read m6div from pllcontrol[5] */ + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, 0, 0); + mdiv = (tmp & PMU1_PLL0_PC2_M6DIV_MASK) >> PMU1_PLL0_PC2_M6DIV_SHIFT; + break; +#ifdef DONGLEBUILD + CASE_BCM43602_CHIP: +#ifdef __arm__ + ASSERT(si_arm_clockratio(sih, 0) == 2); +#endif + /* CR4 running on armcr4_clk (Ch5). Read 'bbpll_i_m5div' from pllctl[5] */ + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, 0, 0); + mdiv = (tmp & PMU1_PLL0_PC2_M5DIV_MASK) >> PMU1_PLL0_PC2_M5DIV_SHIFT; + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + /* mdiv is not supported for 43012 and FVCO frequency should be divided by 2 */ + mdiv = 2; + break; +#endif /* DONGLEBUILD */ + + case BCM4369_CHIP_GRPID: + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0); + mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT; + break; + case BCM4362_CHIP_GRPID: + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0); + mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT; + break; + + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0); + mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT; + break; + + default: + PMU_MSG(("si_pmu1_cpuclk0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8))); + ASSERT(0); + break; + } + + ASSERT(mdiv != 0); + +#ifdef BCMDBG + /* Read p2div/p1div from pllcontrol[0] */ + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG0, 0, 0); + p2div = (tmp & PMU1_PLL0_PC0_P2DIV_MASK) >> PMU1_PLL0_PC0_P2DIV_SHIFT; + p1div = (tmp & PMU1_PLL0_PC0_P1DIV_MASK) >> PMU1_PLL0_PC0_P1DIV_SHIFT; + + /* Calculate fvco based on xtal freq and ndiv and pdiv */ + tmp = PMU1_PLL0_PLLCTL2; + + tmp = si_pmu_pllcontrol(sih, tmp, 0, 0); + + if (BCM4362_CHIP(sih->chip) || + BCM4369_CHIP(sih->chip)) { + p2div = 1; + p1div = (tmp & PMU4369_PLL0_PC2_PDIV_MASK) >> PMU4369_PLL0_PC2_PDIV_SHIFT; + ndiv_int = (tmp & PMU4369_PLL0_PC2_NDIV_INT_MASK) >> + PMU4369_PLL0_PC2_NDIV_INT_SHIFT; + } else if (BCM4378_CHIP(sih->chip) || BCM4376_CHIP(sih->chip)) { + p2div = 1; + p1div = (tmp & PMU4378_PLL0_PC2_P1DIV_MASK) >> PMU4378_PLL0_PC2_P1DIV_SHIFT; + ndiv_int = (tmp & PMU4378_PLL0_PC2_NDIV_INT_MASK) >> + PMU4378_PLL0_PC2_NDIV_INT_SHIFT; + } else { + ndiv_int = (tmp & PMU1_PLL0_PC2_NDIV_INT_MASK) >> PMU1_PLL0_PC2_NDIV_INT_SHIFT; + } + + ASSERT(p1div != 0); + + tmp = PMU1_PLL0_PLLCTL3; + + tmp = si_pmu_pllcontrol(sih, tmp, 0, 0); + + if (BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip) || + BCM4376_CHIP(sih->chip) || + BCM4378_CHIP(sih->chip) || + FALSE) { + ndiv_frac = + (tmp & PMU4369_PLL0_PC3_NDIV_FRAC_MASK) >> + PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT; + fref = si_pmu1_alpclk0(sih, osh, pmu) / 1000; /* [KHz] */ + + fvco = (fref * ndiv_int) << 8; + fvco += (fref * ((ndiv_frac & 0xfffff) >> 4)) >> 8; + fvco >>= 8; + fvco *= p1div; + fvco /= 1000; + fvco *= 1000; + } else { + ndiv_frac = + (tmp & PMU1_PLL0_PC3_NDIV_FRAC_MASK) >> PMU1_PLL0_PC3_NDIV_FRAC_SHIFT; + + fref = si_pmu1_alpclk0(sih, osh, pmu) / 1000; + + fvco = (fref * ndiv_int) << 8; + fvco += (fref * (ndiv_frac >> 12)) >> 4; + fvco += (fref * (ndiv_frac & 0xfff)) >> 12; + fvco >>= 8; + fvco *= p2div; + fvco /= p1div; + fvco /= 1000; + fvco *= 1000; + } + + PMU_MSG(("si_pmu1_cpuclk0: ndiv_int %u ndiv_frac %u p2div %u p1div %u fvco %u\n", + ndiv_int, ndiv_frac, p2div, p1div, fvco)); + + FVCO = fvco; +#endif /* BCMDBG */ + + return FVCO / mdiv * 1000; /* Return CPU clock in [Hz] */ +} /* si_pmu1_cpuclk0 */ + +/** + * BCM4369/4378/4387 specific function returning the CPU clock frequency. + * Does this by determining current Fvco and the setting of the clock divider that leads up to + * the ARM. + * Returns value in [Hz] units. + */ +static uint32 +BCMPOSTTRAPFN(si_pmu1_cpuclk0_pll2)(si_t *sih) +{ + uint32 FVCO = si_pmu1_pllfvco0_pll2(sih); /* in [khz] units */ + + /* Return ARM/SB clock */ + return FVCO * 1000; +} /* si_pmu1_cpuclk0_pll2 */ + +/** + * Returns the MAC clock frequency. Called when e.g. MAC clk frequency has to change because of + * interference mitigation. + */ +uint32 +si_mac_clk(si_t *sih, osl_t *osh) +{ + uint8 mdiv2 = 0; + uint32 mac_clk = 0; + chipcregs_t *cc; + uint origidx; + bcm_int_bitmask_t intr_val; +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + uint32 FVCO = si_pmu1_pllfvco0(sih); /* in [khz] units */ + + BCM_REFERENCE(osh); + + /* Remember original core before switch to chipc */ + cc = (chipcregs_t *)si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + ASSERT(cc != NULL); + BCM_REFERENCE(cc); + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + mdiv2 = 2; + mac_clk = FVCO / mdiv2; + break; + default: + PMU_MSG(("si_mac_clk: Unknown chipid %s\n", + bcm_chipname(CHIPID(sih->chip), chn, 8))); + ASSERT(0); + break; + } + + /* Return to original core */ + si_restore_core(sih, origidx, &intr_val); + + return mac_clk; +} /* si_mac_clk */ + +/* 4387 pll MAC channel divisor - for ftm */ +static uint32 +si_pmu_macdiv_4387(si_t *sih) +{ + uint32 tmp, mdiv; + + /* TODO: when it's needed return different MAC clock freq. + * for different MAC/slice! + */ + tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0); + mdiv = (tmp & PMU4387_PLL0_PC1_ICH2_MDIV_MASK) >> PMU4387_PLL0_PC1_ICH2_MDIV_SHIFT; + ASSERT(mdiv != 0); + + return mdiv; +} + +/** Get chip's FVCO and PLLCTRL1 register value */ +int +si_pmu_fvco_macdiv(si_t *sih, uint32 *fvco, uint32 *div) +{ + chipcregs_t *cc; + uint origidx; + bcm_int_bitmask_t intr_val; + int err = BCME_OK; +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + if (fvco) + *fvco = si_pmu1_pllfvco0(sih)/1000; + + /* Remember original core before switch to chipc */ + cc = (chipcregs_t *)si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + ASSERT(cc != NULL); + BCM_REFERENCE(cc); + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + if (div) + *div = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG12, 0, 0) & + PMU1_PLL0_PC1_M1DIV_MASK; + break; + + case BCM43602_CHIP_ID: + if (div) { + *div = (si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG4, 0, 0) & + PMU1_PLL0_PC1_M3DIV_MASK) >> PMU1_PLL0_PC1_M3DIV_SHIFT; + } + break; + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + if (div) { + *div = (si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, 0, 0) + & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT; + } + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + /* mDIV is not supported for 43012 & divisor value is always 2 */ + if (div) + *div = 2; + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + if (div) { + *div = (si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, 0, 0) + & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT; + } + break; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + if (div) { + *div = si_pmu_macdiv_4387(sih); + } + break; + default: + PMU_MSG(("si_mac_clk: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8))); + err = BCME_ERROR; + } + + /* Return to original core */ + si_restore_core(sih, origidx, &intr_val); + + return err; +} + +/** Return TRUE if scan retention memory's sleep/pm signal was asserted */ +bool +BCMPOSTTRAPFN(si_pmu_reset_ret_sleep_log)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + uint32 ret_ctl; + bool was_sleep = FALSE; + + /* Remember original core before switch to chipc */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + ret_ctl = R_REG(osh, &pmu->retention_ctl); + if (ret_ctl & RCTL_MEM_RET_SLEEP_LOG_MASK) { + W_REG(osh, &pmu->retention_ctl, ret_ctl); + was_sleep = TRUE; + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return was_sleep; +} + +/** Return TRUE if pmu rsrc XTAL_PU was de-asserted */ +bool +BCMPOSTTRAPFN(si_pmu_reset_chip_sleep_log)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + bool was_sleep = FALSE; + + /* Remember original core before switch to chipc */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if (PMUREV(sih->pmurev) >= 36) { + uint32 pmu_int_sts = R_REG(osh, &pmu->pmuintstatus); + if (pmu_int_sts & PMU_INT_STAT_RSRC_EVENT_INT0_MASK) { + /* write 1 to clear the status */ + W_REG(osh, &pmu->pmuintstatus, PMU_INT_STAT_RSRC_EVENT_INT0_MASK); + was_sleep = TRUE; + } + } else { + was_sleep = si_pmu_reset_ret_sleep_log(sih, osh); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return was_sleep; +} + +/* For 43602a0 MCH2/MCH5 boards: power up PA Reference LDO */ +void +si_pmu_switch_on_PARLDO(si_t *sih, osl_t *osh) +{ + uint32 mask; + pmuregs_t *pmu; + uint origidx; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + mask = R_REG(osh, &pmu->min_res_mask) | PMURES_BIT(RES43602_PARLDO_PU); + W_REG(osh, &pmu->min_res_mask, mask); + mask = R_REG(osh, &pmu->max_res_mask) | PMURES_BIT(RES43602_PARLDO_PU); + W_REG(osh, &pmu->max_res_mask, mask); + break; + default: + break; + } + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +/* For 43602a0 MCH2/MCH5 boards: power off PA Reference LDO */ +void +si_pmu_switch_off_PARLDO(si_t *sih, osl_t *osh) +{ + uint32 mask; + pmuregs_t *pmu; + uint origidx; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + case BCM43602_CHIP_ID: + case BCM43462_CHIP_ID: + mask = R_REG(osh, &pmu->min_res_mask) & ~PMURES_BIT(RES43602_PARLDO_PU); + W_REG(osh, &pmu->min_res_mask, mask); + mask = R_REG(osh, &pmu->max_res_mask) & ~PMURES_BIT(RES43602_PARLDO_PU); + W_REG(osh, &pmu->max_res_mask, mask); + break; + default: + break; + } + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +/** + * Change VCO frequency (slightly), e.g. to avoid PHY errors due to spurs. + */ +static void +BCMATTACHFN(si_set_bb_vcofreq_frac)(si_t *sih, osl_t *osh, int vcofreq, int frac, int xtalfreq) +{ + uint32 vcofreq_withfrac, p1div, ndiv_int, fraca, ndiv_mode, reg; + /* shifts / masks for PMU PLL control register #2 : */ + uint32 ndiv_int_shift, ndiv_mode_shift, p1div_shift, pllctrl2_mask; + /* shifts / masks for PMU PLL control register #3 : */ + uint32 pllctrl3_mask; + BCM_REFERENCE(osh); + + if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM43526_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + BCM43602_CHIP(sih->chip)) { + if (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) { + PMU_MSG(("HTAVAIL is set, so not updating BBPLL Frequency \n")); + return; + } + + ndiv_int_shift = 7; + ndiv_mode_shift = 4; + p1div_shift = 0; + pllctrl2_mask = 0xffffffff; + pllctrl3_mask = 0xffffffff; + } else { + /* put more chips here */ + PMU_ERROR(("si_set_bb_vcofreq_frac: only work on 4360, 4352\n")); + return; + } + + vcofreq_withfrac = vcofreq * 10000 + frac; + p1div = 0x1; + ndiv_int = vcofreq / xtalfreq; + ndiv_mode = (vcofreq_withfrac % (xtalfreq * 10000)) ? 3 : 0; + PMU_ERROR(("ChangeVCO => vco:%d, xtalF:%d, frac: %d, ndivMode: %d, ndivint: %d\n", + vcofreq, xtalfreq, frac, ndiv_mode, ndiv_int)); + + reg = (ndiv_int << ndiv_int_shift) | + (ndiv_mode << ndiv_mode_shift) | + (p1div << p1div_shift); + PMU_ERROR(("Data written into the PLL_CNTRL_ADDR2: %08x\n", reg)); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, pllctrl2_mask, reg); + + if (ndiv_mode) { + /* frac = (vcofreq_withfrac % (xtalfreq * 10000)) * 2^24) / (xtalfreq * 10000) */ + uint32 r1, r0; + math_uint64_multiple_add( + &r1, &r0, vcofreq_withfrac % (xtalfreq * 10000), 1 << 24, 0); + math_uint64_divide(&fraca, r1, r0, xtalfreq * 10000); + PMU_ERROR(("Data written into the PLL_CNTRL_ADDR3 (Fractional): %08x\n", fraca)); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, pllctrl3_mask, fraca); + } + + si_pmu_pllupd(sih); +} /* si_set_bb_vcofreq_frac */ + +/** + * given x-tal frequency, returns BaseBand vcofreq with fraction in 100Hz + * @param xtalfreq In [Mhz] units. + * @return In [100Hz] units. + */ +uint32 +si_pmu_get_bb_vcofreq(si_t *sih, osl_t *osh, int xtalfreq) +{ + uint32 ndiv_int, /* 9 bits integer divider */ + ndiv_mode, + frac = 0, /* 24 bits fractional divider */ + p1div; /* predivider: divides x-tal freq */ + uint32 xtal1, vcofrac = 0, vcofreq; + uint32 r1, r0, reg; + + BCM_REFERENCE(osh); + + if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM43526_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + BCM43602_CHIP(sih->chip)) { + reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, 0, 0); + ndiv_int = reg >> 7; + ndiv_mode = (reg >> 4) & 7; + p1div = 1; /* do not divide x-tal frequency */ + + if (ndiv_mode) + frac = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0); + } else if ((BCM4369_CHIP(sih->chip) && + CST4369_CHIPMODE_PCIE(sih->chipst)) || + BCM4376_CHIP(sih->chip) || + BCM4378_CHIP(sih->chip) || + (BCM4362_CHIP(sih->chip) && + CST4362_CHIPMODE_PCIE(sih->chipst))) { + reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, 0, 0); + ndiv_int = reg >> 20; + p1div = (reg >> 16) & 0xf; + frac = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0) & 0x00fffff; + ndiv_mode = 1; + } else { + /* put more chips here */ + PMU_ERROR(("si_pmu_get_bb_vcofreq: only work on 4360, 4352, 4369, 4378\n")); + ASSERT(FALSE); + return 0; + } + + xtal1 = 10000 * xtalfreq / p1div; /* in [100Hz] units */ + + if (ndiv_mode) { + /* vcofreq fraction = (xtal1 * frac + (1 << 23)) / (1 << 24); + * handle overflow + */ + math_uint64_multiple_add(&r1, &r0, xtal1, frac, 1 << 23); + vcofrac = (r1 << 8) | (r0 >> 24); + } + + if (ndiv_int == 0) { + ASSERT(0); + return 0; + } + + if ((int)xtal1 > (int)((0xffffffff - vcofrac) / ndiv_int)) { + PMU_ERROR(("si_pmu_get_bb_vcofreq: xtalfreq is too big, %d\n", xtalfreq)); + return 0; + } + + vcofreq = xtal1 * ndiv_int + vcofrac; + return vcofreq; +} /* si_pmu_get_bb_vcofreq */ + +/** Enable PMU 1Mhz clock */ +static void +si_pmu_enb_slow_clk(si_t *sih, osl_t *osh, uint32 xtalfreq) +{ + uint32 val; + pmuregs_t *pmu; + uint origidx; + + if (PMUREV(sih->pmurev) < 24) { + PMU_ERROR(("si_pmu_enb_slow_clk: Not supported %d\n", PMUREV(sih->pmurev))); + return; + } + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + /* twiki PmuRev30, OneMhzToggleEn:31, AlpPeriod[23:0] */ + if (PMUREV(sih->pmurev) >= 38) { + /* Use AlpPeriod[23:0] only chip default value for PmuRev >= 38 chips + * eg. ROUND(POWER(2,26) / (55.970 / 2 MHz) for 4387/4385, etc + */ + val = R_REG(osh, &pmu->slowclkperiod) | PMU30_ALPCLK_ONEMHZ_ENAB; + } else { + if (PMUREV(sih->pmurev) >= 30) { + /* AlpPeriod = ROUND(POWER(2,26)/ALP_CLK_FREQ_IN_MHz,0) */ + /* Calculation will be accurate for only one decimal of xtal (like 37.4), + * and will not be accurate for more than one decimal + * of xtal freq (like 37.43) + * Also no rounding is done on final result + */ + ROMMABLE_ASSERT((xtalfreq/100)*100 == xtalfreq); + val = (((1 << 26)*10)/(xtalfreq/100)); + /* set the 32 bit to enable OneMhzToggle + * -usec wide toggle signal will be generated + */ + val |= PMU30_ALPCLK_ONEMHZ_ENAB; + } else { /* twiki PmuRev24, OneMhzToggleEn:16, AlpPeriod[15:0] */ + if (xtalfreq == 37400) { + val = 0x101B6; + } else if (xtalfreq == 40000) { + val = 0x10199; + } else { + PMU_ERROR(("si_pmu_enb_slow_clk: xtalfreq is not supported, %d\n", + xtalfreq)); + /* Return to original core */ + si_setcoreidx(sih, origidx); + return; + } + } + } + + W_REG(osh, &pmu->slowclkperiod, val); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +/** + * Initializes PLL given an x-tal frequency. + * Calls si_pmuX_pllinitY() type of functions, where the reasoning behind 'X' and 'Y' is historical + * rather than logical. + * + * xtalfreq : x-tal frequency in [KHz] + */ +void +BCMATTACHFN(si_pmu_pll_init)(si_t *sih, osl_t *osh, uint xtalfreq) +{ + pmuregs_t *pmu; + uint origidx; +#ifdef BCMDBG_PMU + char chn[8]; +#endif + BCM_REFERENCE(pmu1_xtaltab0_880); + BCM_REFERENCE(pmu1_xtaltab0_1760); + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM4352_CHIP_ID: { + if (CHIPREV(sih->chiprev) > 2) + si_set_bb_vcofreq_frac(sih, osh, 960, 98, 40); + break; + } + CASE_BCM43602_CHIP: + si_set_bb_vcofreq_frac(sih, osh, 960, 98, 40); + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + si_pmu1_pllinit1(sih, osh, pmu, xtalfreq); /* nvram PLL overrides + enables PLL */ + break; + default: + PMU_MSG(("No PLL init done for chip %s rev %d pmurev %d\n", + bcm_chipname( + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), PMUREV(sih->pmurev))); + break; + } + +#ifdef BCMDBG_FORCEHT + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), CCS_FORCEHT, CCS_FORCEHT) +#endif + + si_pmu_enb_slow_clk(sih, osh, xtalfreq); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} /* si_pmu_pll_init */ + +/** get alp clock frequency in [Hz] units */ +uint32 +BCMPOSTTRAPFN(si_pmu_alp_clock)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + uint32 clock = ALP_CLOCK; +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: + if (sih->chipst & CST4360_XTAL_40MZ) + clock = 40000 * 1000; + else + clock = 20000 * 1000; + break; + + CASE_BCM43602_CHIP: + /* always 40Mhz */ + clock = 40000 * 1000; + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: +#ifndef BCMSDIOLITE + case BCM4369_CHIP_GRPID: +#endif /* BCMSDIOLITE */ + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + clock = si_pmu1_alpclk0(sih, osh, pmu); + break; +#ifdef BCMSDIOLITE + case BCM4369_CHIP_ID: + /* always 25Mhz */ + clock = 25000 * 1000; + break; +#endif /* BCMSDIOLITE */ + default: + PMU_MSG(("No ALP clock specified " + "for chip %s rev %d pmurev %d, using default %d Hz\n", + bcm_chipname( + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), + PMUREV(sih->pmurev), clock)); + break; + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return clock; /* in [Hz] units */ +} /* si_pmu_alp_clock */ + +/** + * Find the output of the "m" pll divider given pll controls that start with + * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc. + */ +static uint32 +BCMPOSTTRAPFN(si_pmu5_clock)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint pll0, uint m) +{ + uint32 tmp, div, ndiv, p1, p2, fc; + + if ((pll0 & 3) || (pll0 > PMU4716_MAINPLL_PLL0)) { + PMU_ERROR(("si_pmu5_clock: Bad pll0: %d\n", pll0)); + return 0; + } + + /* Strictly there is an m5 divider, but I'm not sure we use it */ + if ((m == 0) || (m > 4)) { + PMU_ERROR(("si_pmu5_clock: Bad m divider: %d\n", m)); + return 0; + } + + W_REG(osh, &pmu->pllcontrol_addr, pll0 + PMU5_PLL_P1P2_OFF); + (void)R_REG(osh, &pmu->pllcontrol_addr); + tmp = R_REG(osh, &pmu->pllcontrol_data); + p1 = (tmp & PMU5_PLL_P1_MASK) >> PMU5_PLL_P1_SHIFT; + p2 = (tmp & PMU5_PLL_P2_MASK) >> PMU5_PLL_P2_SHIFT; + + W_REG(osh, &pmu->pllcontrol_addr, pll0 + PMU5_PLL_M14_OFF); + (void)R_REG(osh, &pmu->pllcontrol_addr); + tmp = R_REG(osh, &pmu->pllcontrol_data); + div = (tmp >> ((m - 1) * PMU5_PLL_MDIV_WIDTH)) & PMU5_PLL_MDIV_MASK; + + W_REG(osh, &pmu->pllcontrol_addr, pll0 + PMU5_PLL_NM5_OFF); + (void)R_REG(osh, &pmu->pllcontrol_addr); + tmp = R_REG(osh, &pmu->pllcontrol_data); + ndiv = (tmp & PMU5_PLL_NDIV_MASK) >> PMU5_PLL_NDIV_SHIFT; + + /* Do calculation in Mhz */ + fc = si_pmu_alp_clock(sih, osh) / 1000000; + fc = (p1 * ndiv * fc) / p2; + + PMU_NONE(("si_pmu5_clock: p1=%d, p2=%d, ndiv=%d(0x%x), m%d=%d; fc=%d, clock=%d\n", + p1, p2, ndiv, ndiv, m, div, fc, fc / div)); + + /* Return clock in Hertz */ + return ((fc / div) * 1000000); +} /* si_pmu5_clock */ + +/** + * Get backplane clock frequency, returns a value in [hz] units. + * For designs that feed the same clock to both backplane and CPU just return the CPU clock speed. + */ +uint32 +BCMPOSTTRAPFN(si_pmu_si_clock)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + uint32 clock = HT_CLOCK; /* in [hz] units */ +#ifdef BCMDBG_PMU + char chn[8]; +#endif + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM43526_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + clock = si_pmu1_cpuclk0(sih, osh, pmu); + break; + + CASE_BCM43602_CHIP: { + uint32 mdiv; + /* Ch3 is connected to backplane_clk. Read 'bbpll_i_m3div' from pllctl[4] */ + mdiv = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG4, 0, 0); + mdiv = (mdiv & PMU1_PLL0_PC1_M3DIV_MASK) >> PMU1_PLL0_PC1_M3DIV_SHIFT; + ASSERT(mdiv != 0); + clock = si_pmu1_pllfvco0(sih) / mdiv * 1000; + break; + } + + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + clock = si_pmu1_cpuclk0(sih, osh, pmu); + break; + + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + clock = si_pmu_bpclk_4387(sih); + break; + + default: + PMU_MSG(("No backplane clock specified " + "for chip %s rev %d pmurev %d, using default %d Hz\n", + bcm_chipname( + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), + PMUREV(sih->pmurev), clock)); + break; + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return clock; +} /* si_pmu_si_clock */ + +/** returns CPU clock frequency in [hz] units */ +uint32 +BCMPOSTTRAPFN(si_pmu_cpu_clock)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + uint32 clock; /* in [hz] units */ + + uint32 tmp; + uint32 armclk_offcnt, armclk_oncnt; + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if (BCM4369_CHIP(sih->chip) || + BCM4376_CHIP(sih->chip) || + BCM4378_CHIP(sih->chip) || + BCM4385_CHIP(sih->chip) || + BCM4387_CHIP(sih->chip) || + BCM4388_CHIP(sih->chip) || + BCM4389_CHIP(sih->chip) || + BCM4397_CHIP(sih->chip) || + BCM4362_CHIP(sih->chip)) { + clock = si_pmu1_cpuclk0_pll2(sih); /* for chips with separate CPU PLL */ + } else if ((PMUREV(sih->pmurev) >= 5) && + !((CHIPID(sih->chip) == BCM4360_CHIP_ID) || + (CHIPID(sih->chip) == BCM4352_CHIP_ID) || + (CHIPID(sih->chip) == BCM43526_CHIP_ID) || + (CHIPID(sih->chip) == BCM43460_CHIP_ID) || + (CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID) || + 0)) { + uint pll = PMU4716_MAINPLL_PLL0; + + if (BCM43602_CHIP(sih->chip)) { + clock = si_pmu1_cpuclk0(sih, osh, pmu); + } else { + clock = si_pmu5_clock(sih, osh, pmu, pll, PMU5_MAINPLL_CPU); + } + } else { + clock = si_pmu_si_clock(sih, osh); + } + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID)) { + /* Fout = (on_count + 1) * Fin/(on_count + 1 + off_count) + * ARM clock using Fast divider calculation + * Fin = FVCO/2 + */ + tmp = si_pmu_chipcontrol(sih, PMU1_PLL0_CHIPCTL1, 0, 0); + armclk_offcnt = + (tmp & CCTL_43012_ARM_OFFCOUNT_MASK) >> CCTL_43012_ARM_OFFCOUNT_SHIFT; + armclk_oncnt = + (tmp & CCTL_43012_ARM_ONCOUNT_MASK) >> CCTL_43012_ARM_ONCOUNT_SHIFT; + clock = (armclk_oncnt + 1) * clock/(armclk_oncnt + 1 + armclk_offcnt); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + return clock; +} /* si_pmu_cpu_clock */ + +#ifdef __ARM_ARCH_7A__ +static uint32 +si_pmu_mem_ca7clock(si_t *sih, osl_t *osh) +{ + uint32 clock = 0; + int8 mdiv = 1; + uint idx = si_coreidx(sih); + bool fastclk; + ca7regs_t *regs = si_setcore(sih, ARMCA7_CORE_ID, 0); + + if (regs == NULL) { + goto end; + } + + fastclk = ((R_REG(osh, ARMREG(regs, clk_ctl_st)) & CCS_ARMFASTCLOCKREQ) != 0); + + if (fastclk) { + uint32 fvco = si_pmu_pll28nm_fvco(sih); + if (si_corerev(sih) >= 7) { + mdiv = (R_REG(osh, ARMREG(regs, corecontrol)) & ACC_CLOCKRATIO_MASK) >> + ACC_CLOCKRATIO_SHIFT; + } else { + ASSERT(0); + } + + if (mdiv == 0) { + ASSERT(0); + clock = 0; + } else { + clock = (fvco / mdiv); + } + } else { + clock = si_pmu_si_clock(sih, osh); + } + +end: + si_setcoreidx(sih, idx); + return clock; + +} +#endif /* __ARM_ARCH_7A__ */ + +/** get memory clock frequency, which is the same as the HT clock for newer chips. Returns [Hz]. */ +uint32 +BCMINITFN(si_pmu_mem_clock)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + uint32 clock; + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if ((PMUREV(sih->pmurev) >= 5) && + !((BCM4369_CHIP(sih->chip)) || + (BCM4362_CHIP(sih->chip)) || + BCM43602_CHIP(sih->chip) || + (CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID) || + BCM4376_CHIP(sih->chip) || + BCM4378_CHIP(sih->chip) || + BCM4387_CHIP(sih->chip) || + BCM4388_CHIP(sih->chip) || + BCM4389_CHIP(sih->chip) || + BCM4397_CHIP(sih->chip) || + 0)) { + uint pll = PMU4716_MAINPLL_PLL0; + + clock = si_pmu5_clock(sih, osh, pmu, pll, PMU5_MAINPLL_MEM); + } else { +#ifdef __ARM_ARCH_7A__ + clock = si_pmu_mem_ca7clock(sih, osh); +#else /* !__ARM_ARCH_7A__ */ + clock = si_pmu_si_clock(sih, osh); /* mem clk same as backplane clk */ +#endif /* __ARM_ARCH_7A__ */ + } + /* Return to original core */ + si_setcoreidx(sih, origidx); + return clock; +} /* si_pmu_mem_clock */ + +/* + * ilpcycles per sec are now calculated during CPU init in a new way + * for better accuracy. We set it here for compatability. + * + * On platforms that do not do this we resort to the old way. + */ + +#define ILP_CALC_DUR 10 /* ms, make sure 1000 can be divided by it. */ + +static uint32 ilpcycles_per_sec = 0; + +void +BCMPOSTTRAPFN(si_pmu_ilp_clock_set)(uint32 cycles_per_sec) +{ + ilpcycles_per_sec = cycles_per_sec; +} + +/** + * Measure ILP clock frequency. Returns a value in [Hz] units. + * + * The variable ilpcycles_per_sec is used to store the ILP clock speed. The value + * is calculated when the function is called the first time and then cached. + * The change in PMU timer count is measured across a delay of ILP_CALC_DUR msec. + * Before the first time the function is called, one must make sure the HT clock is + * turned on and used to feed the CPU and that OSL_DELAY() is calibrated. + */ +uint32 +BCMINITFN(si_pmu_ilp_clock)(si_t *sih, osl_t *osh) +{ + if (ISSIM_ENAB(sih)) + return ILP_CLOCK; + + if (ilpcycles_per_sec == 0) { + uint32 start, end, delta; + pmuregs_t *pmu; + uint origidx = si_coreidx(sih); + + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + start = R_REG(osh, &pmu->pmutimer); + /* PR88659: verify pmutimer reads */ + if (start != R_REG(osh, &pmu->pmutimer)) + start = R_REG(osh, &pmu->pmutimer); + OSL_DELAY(ILP_CALC_DUR * 1000); + end = R_REG(osh, &pmu->pmutimer); + if (end != R_REG(osh, &pmu->pmutimer)) + end = R_REG(osh, &pmu->pmutimer); + delta = end - start; + ilpcycles_per_sec = delta * (1000 / ILP_CALC_DUR); + /* Return to original core */ + si_setcoreidx(sih, origidx); + } + + ASSERT(ilpcycles_per_sec != 0); + return ilpcycles_per_sec; +} +#endif /* !defined(BCMDONGLEHOST) */ + +/** + * Reads/writes a chipcontrol reg. Performes core switching if required, at function exit the + * original core is restored. Depending on chip type, read/writes to chipcontrol regs in CC core + * (older chips) or to chipcontrol regs in PMU core (later chips). + */ +uint32 +BCMPOSTTRAPFN(si_pmu_chipcontrol)(si_t *sih, uint reg, uint32 mask, uint32 val) +{ + pmu_corereg(sih, SI_CC_IDX, chipcontrol_addr, ~0, reg); + return pmu_corereg(sih, SI_CC_IDX, chipcontrol_data, mask, val); +} + +/** + * Reads/writes a voltage regulator (vreg) register. Performes core switching if required, at + * function exit the original core is restored. Depending on chip type, writes to regulator regs + * in CC core (older chips) or to regulator regs in PMU core (later chips). + */ +uint32 +BCMPOSTTRAPFN(si_pmu_vreg_control)(si_t *sih, uint reg, uint32 mask, uint32 val) +{ + pmu_corereg(sih, SI_CC_IDX, regcontrol_addr, ~0, reg); + return pmu_corereg(sih, SI_CC_IDX, regcontrol_data, mask, val); +} + +/** + * Reads/writes a PLL control register. Performes core switching if required, at function exit the + * original core is restored. Depending on chip type, writes to PLL control regs in CC core (older + * chips) or to PLL control regs in PMU core (later chips). + */ +uint32 +BCMPOSTTRAPFN(si_pmu_pllcontrol)(si_t *sih, uint reg, uint32 mask, uint32 val) +{ + pmu_corereg(sih, SI_CC_IDX, pllcontrol_addr, ~0, reg); + return pmu_corereg(sih, SI_CC_IDX, pllcontrol_data, mask, val); +} + +/** + * Balance between stable SDIO operation and power consumption is achieved using this function. + * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this + * function should read the VDDIO itself to select the correct table. For now it has been solved + * with the 'BCM_SDIO_VDDIO' preprocessor constant. + * + * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if + * hardware supports this), if no hw support drive strength is not programmed. + */ +void +BCMINITFN(si_sdiod_drive_strength_init)(si_t *sih, osl_t *osh, uint32 drivestrength) +{ + /* + * Note: + * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for the + * 43143, 4330, 4334, 4336, 43362 chips. These chips are now no longer supported, so + * the code has been deleted. + * Newer chips have the SDIO drive strength setting via a GCI Chip Control register, + * but the bit definitions are chip-specific. We are keeping this function available + * (accessed via DHD 'sdiod_drive' IOVar) in case these newer chips need to provide access. + */ + UNUSED_PARAMETER(sih); + UNUSED_PARAMETER(osh); + UNUSED_PARAMETER(drivestrength); +} + +#if !defined(BCMDONGLEHOST) +/** initialize PMU */ +void +BCMATTACHFN(si_pmu_init)(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + +#if defined(BT_WLAN_REG_ON_WAR) + si_pmu_reg_on_war_ext_wake_perst_clear(sih); + si_pmu_reg_on_war_ext_wake_perst_set(sih); +#endif /* BT_WLAN_REG_ON_WAR */ + + /* Feature is added in PMU rev. 1 but doesn't work until rev. 2 */ + if (PMUREV(sih->pmurev) == 1) + AND_REG(osh, &pmu->pmucontrol, ~PCTL_NOILP_ON_WAIT); + else if (PMUREV(sih->pmurev) >= 2) + OR_REG(osh, &pmu->pmucontrol, PCTL_NOILP_ON_WAIT); + + /* Changes from PMU revision 26 are not included in revision 27 */ + if ((PMUREV(sih->pmurev) >= 26) && (PMUREV(sih->pmurev) != 27)) { + uint32 val = PMU_INTC_ALP_REQ | PMU_INTC_HT_REQ | PMU_INTC_HQ_REQ; + pmu_corereg(sih, SI_CC_IDX, pmuintctrl0, val, val); + + val = RSRC_INTR_MASK_TIMER_INT_0; + pmu_corereg(sih, SI_CC_IDX, pmuintmask0, val, val); + (void)pmu_corereg(sih, SI_CC_IDX, pmuintmask0, 0, 0); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +uint32 +si_pmu_rsrc_macphy_clk_deps(si_t *sih, osl_t *osh, int macunit) +{ + uint32 deps = 0; + rsc_per_chip_t *rsc; + uint origidx; + pmuregs_t *pmu = NULL; + uint8 rsc_num; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + + rsc = si_pmu_get_rsc_positions(sih); + if (macunit == 0) { + rsc_num = rsc->macphy_clkavail; + } else if (macunit == 1) { + rsc_num = rsc->macphy_aux_clkavail; + } else if (macunit == 2) { + rsc_num = rsc->macphy_scan_clkavail; + } else { + PMU_ERROR(("si_pmu_rsrc_macphy_clk_deps: slice %d is not supported\n", macunit)); + rsc_num = NO_SUCH_RESOURCE; /* to satisfy the compiler */ + ASSERT(0); + } + deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsc_num), TRUE); + deps |= PMURES_BIT(rsc_num); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return deps; +} + +void +si_pmu_set_mac_rsrc_req_sc(si_t *sih, osl_t *osh) +{ + uint32 deps = 0; + rsc_per_chip_t *rsc; + uint origidx; + pmuregs_t *pmu = NULL; + uint32 rsrc = 0; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + pmu = si_setcore(sih, PMU_CORE_ID, 0); + ASSERT(pmu != NULL); + + rsc = si_pmu_get_rsc_positions(sih); + + rsrc = (PMURES_BIT(rsc->macphy_scan_clkavail) | + PMURES_BIT(rsc->dig_ready)); + + deps = si_pmu_res_deps(sih, osh, pmu, rsrc, TRUE); + deps |= rsrc; + + W_REG(osh, &pmu->mac_res_req_timer2, PMU32_MAC_SCAN_RSRC_REQ_TIMER); + W_REG(osh, &pmu->mac_res_req_mask2, deps); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +uint32 +BCMATTACHFN(si_pmu_rsrc_ht_avail_clk_deps)(si_t *sih, osl_t *osh) +{ + uint32 deps; + rsc_per_chip_t *rsc; + uint origidx; + pmuregs_t *pmu = NULL; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + + rsc = si_pmu_get_rsc_positions(sih); + deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsc->ht_avail), FALSE); + deps |= PMURES_BIT(rsc->ht_avail); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return deps; +} + +uint32 +BCMATTACHFN(si_pmu_rsrc_cb_ready_deps)(si_t *sih, osl_t *osh) +{ + uint32 deps; + rsc_per_chip_t *rsc; + uint origidx; + pmuregs_t *pmu = NULL; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + + rsc = si_pmu_get_rsc_positions(sih); + if (rsc->cb_ready == NO_SUCH_RESOURCE) { + deps = 0; + } else { + deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsc->cb_ready), FALSE); + deps |= PMURES_BIT(rsc->cb_ready); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return deps; +} + +void +si_pmu_set_mac_rsrc_req(si_t *sih, int macunit) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if (macunit == 0) { + W_REG(osh, &pmu->mac_res_req_timer, PMU32_MAC_MAIN_RSRC_REQ_TIMER); + W_REG(osh, &pmu->mac_res_req_mask, si_pmu_rsrc_macphy_clk_deps(sih, osh, macunit)); + } else if (macunit == 1) { + W_REG(osh, &pmu->mac_res_req_timer1, PMU32_MAC_AUX_RSRC_REQ_TIMER); + W_REG(osh, &pmu->mac_res_req_mask1, si_pmu_rsrc_macphy_clk_deps(sih, osh, macunit)); + } else if (macunit == 2) { + W_REG(osh, &pmu->mac_res_req_timer2, PMU32_MAC_SCAN_RSRC_REQ_TIMER); + W_REG(osh, &pmu->mac_res_req_mask2, si_pmu_rsrc_macphy_clk_deps(sih, osh, macunit)); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +/** + * Return worst case up time in [ILP cycles] for the given resource. + * + * Example use case: the d11 core needs to be programmed with the max time it + * takes to make the HT clock available. + * + * need to check circular dependancies and prevent dead recursion. + */ +static uint +BCMINITFN(si_pmu_res_uptime)(si_t *sih, osl_t *osh, + pmuregs_t *pmu, uint8 rsrc, bool pmu_fast_trans_en) +{ + uint32 deps; + uint uptime, i, dup, dmax, uptrans, ret; + uint32 min_mask = 0; +#ifndef SR_DEBUG + uint32 max_mask = 0; +#endif /* SR_DEBUG */ + + /* uptime of resource 'rsrc' */ + W_REG(osh, &pmu->res_table_sel, rsrc); + if (PMUREV(sih->pmurev) >= 30) + uptime = (R_REG(osh, &pmu->res_updn_timer) >> 16) & 0x7fff; + else if (PMUREV(sih->pmurev) >= 13) + uptime = (R_REG(osh, &pmu->res_updn_timer) >> 16) & 0x3ff; + else + uptime = (R_REG(osh, &pmu->res_updn_timer) >> 8) & 0xff; + + /* direct dependencies of resource 'rsrc' */ + deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsrc), FALSE); + for (i = 0; i <= PMURES_MAX_RESNUM; i ++) { + if (!(deps & PMURES_BIT(i))) + continue; + deps &= ~si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(i), TRUE); + } +#ifndef SR_DEBUG + si_pmu_res_masks(sih, &min_mask, &max_mask); +#else + /* Recalculate fast pwr up delay if min res mask/max res mask has changed */ + min_mask = R_REG(osh, &pmu->min_res_mask); +#endif /* SR_DEBUG */ + deps &= ~min_mask; + + /* max uptime of direct dependencies */ + dmax = 0; + for (i = 0; i <= PMURES_MAX_RESNUM; i ++) { + if (!(deps & PMURES_BIT(i))) + continue; + dup = si_pmu_res_uptime(sih, osh, pmu, (uint8)i, pmu_fast_trans_en); + if (dmax < dup) + dmax = dup; + } + + PMU_MSG(("si_pmu_res_uptime: rsrc %u uptime %u(deps 0x%08x uptime %u)\n", + rsrc, uptime, deps, dmax)); + + uptrans = pmu_fast_trans_en ? 0 : PMURES_UP_TRANSITION; + ret = uptime + dmax + uptrans; + return ret; +} + +/* Return dependencies (direct or all/indirect) for the given resources */ +/* need to check circular dependencies and prevent dead recursion */ +static uint32 +si_pmu_res_deps(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 rsrcs, bool all) +{ + uint32 deps = 0; + uint32 i; + + for (i = 0; i <= PMURES_MAX_RESNUM; i ++) { + if (!(rsrcs & PMURES_BIT(i))) + continue; + W_REG(osh, &pmu->res_table_sel, i); + deps |= R_REG(osh, &pmu->res_dep_mask); + } + + return !all ? deps : (deps ? (deps | si_pmu_res_deps(sih, osh, pmu, deps, TRUE)) : 0); +} + +static bool +si_pmu_otp_is_ready(si_t *sih) +{ + uint32 otps = 0u; + + if (AOB_ENAB(sih)) { + otps = si_corereg(sih, si_findcoreidx(sih, GCI_CORE_ID, 0u), + OFFSETOF(gciregs_t, otpstatus), 0u, 0u); + } else { + otps = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otpstatus), 0u, 0u); + } + return !!(otps & OTPS_READY); +} + +static bool +si_pmu_otp_is_ready_and_wait(si_t *sih, bool on) +{ + SPINWAIT((si_pmu_otp_is_ready(sih) != on), 3000u); + + if (si_pmu_otp_is_ready(sih) != on) { + PMU_ERROR(("OTP ready bit not %s after wait\n", (on ? "Set" : "Clear"))); + OSL_SYS_HALT(); + } + + return si_pmu_otp_is_ready(sih) == on; +} + +/** + * OTP is powered down/up as a means of resetting it, or for saving current when OTP is unused. + * OTP is powered up/down through PMU resources. + * OTP will turn OFF only if its not in the dependency of any "higher" rsrc in min_res_mask + */ +void +si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask) +{ + pmuregs_t *pmu; + uint origidx; + uint32 rsrcs = 0; /* rsrcs to turn on/off OTP power */ + rsc_per_chip_t *rsc; /* chip specific resource bit positions */ + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Don't do anything if OTP is disabled */ + if (si_is_otp_disabled(sih)) { + PMU_MSG(("si_pmu_otp_power: OTP is disabled\n")); + return; + } + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + /* + * OTP can't be power cycled by toggling OTP_PU for always on OTP chips. For now + * corerev 45 is the only one that has always on OTP. + * Instead, the chipc register OTPCtrl1 (Offset 0xF4) bit 25 (forceOTPpwrDis) is used. + * Please refer to http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/ChipcommonRev45 + */ + if (CCREV(sih->ccrev) == 45) { + uint32 otpctrl1; + otpctrl1 = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otpcontrol1), 0, 0); + if (on) + otpctrl1 &= ~OTPC_FORCE_PWR_OFF; + else + otpctrl1 |= OTPC_FORCE_PWR_OFF; + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otpcontrol1), ~0, otpctrl1); + /* Return to original core */ + si_setcoreidx(sih, origidx); + return; + } + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: + case BCM4376_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + +#ifdef UNRELEASEDCHIP +#endif + + rsc = si_pmu_get_rsc_positions(sih); + rsrcs = PMURES_BIT(rsc->otp_pu); + break; + case BCM4378_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + si_gci_direct(sih, GCI_OFFSETOF(sih, otpcontrol), OTPC_FORCE_OTP_PWR_DIS, + on ? 0u : OTPC_FORCE_OTP_PWR_DIS); + if (!si_pmu_otp_is_ready_and_wait(sih, on)) { + PMU_MSG(("OTP ready bit not %s after wait\n", (on ? "ON" : "OFF"))); + } + break; + default: + break; + } + + if (rsrcs != 0) { + bool on_check = FALSE; /* Stores otp_ready state */ + uint32 min_mask = 0; + + /* Turn on/off the power */ + if (on) { + min_mask = R_REG(osh, &pmu->min_res_mask); + *min_res_mask = min_mask; + + min_mask |= rsrcs; + min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, TRUE); + on_check = TRUE; + /* Assuming max rsc mask defines OTP_PU, so not programming max */ + PMU_MSG(("Adding rsrc 0x%x to min_res_mask\n", min_mask)); + W_REG(osh, &pmu->min_res_mask, min_mask); + si_pmu_wait_for_steady_state(sih, osh, pmu); + OSL_DELAY(1000); + SPINWAIT(!(R_REG(osh, &pmu->res_state) & rsrcs), + PMU_MAX_TRANSITION_DLY); + ASSERT(R_REG(osh, &pmu->res_state) & rsrcs); + } else { + /* + * Restore back the min_res_mask, + * but keep OTP powered off if allowed by dependencies + */ + if (*min_res_mask) + min_mask = *min_res_mask; + else + min_mask = R_REG(osh, &pmu->min_res_mask); + + min_mask &= ~rsrcs; + /* + * OTP rsrc can be cleared only if its not + * in the dependency of any "higher" rsrc in min_res_mask + */ + min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, TRUE); + on_check = ((min_mask & rsrcs) != 0); + + PMU_MSG(("Removing rsrc 0x%x from min_res_mask\n", min_mask)); + W_REG(osh, &pmu->min_res_mask, min_mask); + si_pmu_wait_for_steady_state(sih, osh, pmu); + } + + if (!si_pmu_otp_is_ready_and_wait(sih, on_check)) { + PMU_MSG(("OTP ready bit not %s after wait\n", (on_check ? "ON" : "OFF"))); + } +#ifdef NOT_YET + /* + * FIXME: Temporarily disabling OTPS_READY ASSERT check. Right now ASSERT in + * ROM is enabled only for 4389B0/C0. Therefore this change anyway will not + * affect other chips. Once the correct spin-wait value is updated by the + * HW team, then this ASSERT will be enabled back. + */ + ASSERT(si_pmu_otp_is_ready(sih) == on_check); +#endif /* NOT_YET */ + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} /* si_pmu_otp_power */ + +void +si_pmu_spuravoid(si_t *sih, osl_t *osh, uint8 spuravoid) +{ + uint origidx; + bcm_int_bitmask_t intr_val; + + /* Block ints and save current core */ + si_introff(sih, &intr_val); + origidx = si_coreidx(sih); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + si_intrrestore(sih, &intr_val); +} /* si_pmu_spuravoid */ + +/* below function are only for BBPLL parallel purpose */ +/* For having the pllcontrol data values for spuravoid */ +typedef struct { + uint8 spuravoid_mode; + uint8 pllctrl_reg; + uint32 pllctrl_regval; +} pllctrl_spuravoid_t; + +uint32 +si_pmu_pll28nm_fvco(si_t *sih) +{ + uint32 r_high, r_low, r; + uint32 xf = si_alp_clock(sih); + /* PLL registers for 4368 */ + uint32 pllreg5 = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, 0, 0); + uint32 pllreg4 = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG4, 0, 0); + /* p1div has lower 2 bits in pll4 and high 2 bits in pll5 */ + uint8 p1div_lo = (pllreg4 & PMU4368_PLL1_PC4_P1DIV_MASK) >> PMU4368_PLL1_PC4_P1DIV_SHIFT; + uint8 p1div_hi = (pllreg5 & PMU4368_PLL1_PC5_P1DIV_MASK) >> PMU4368_PLL1_PC5_P1DIV_SHIFT; + uint8 p1div = (p1div_hi << PMU4368_P1DIV_HI_SHIFT) | (p1div_lo << PMU4368_P1DIV_LO_SHIFT); + uint32 ndiv_int = (pllreg5 & PMU4368_PLL1_PC5_NDIV_INT_MASK) >> + PMU4368_PLL1_PC5_NDIV_INT_SHIFT; + uint32 ndiv_frac = (pllreg5 & PMU4368_PLL1_PC5_NDIV_FRAC_MASK) >> + PMU4368_PLL1_PC5_NDIV_FRAC_SHIFT; + + if (ISSIM_ENAB(sih)) { + /* PLL CTRL registers are meaningless under QT, return the pre-configured freq */ + return (FVCO_720 * 1000); + } else if (p1div == 0) { + /* PLL register read fails, return 0 so caller can retry */ + PMU_ERROR(("p1div is invalid\n")); + return 0; + } + + /* Calculate xf * ( ndiv_frac / (1 << 20) + ndiv_int) / p1div) + * To reduce the inaccuracy in division, + * Covert to (xf * ndiv_frac / (1 << 20) + xf * ndiv_int) / p1div + */ + math_uint64_multiple_add(&r_high, &r_low, xf, ndiv_frac, 0); + /* Make sure the caclulated 64 bits number is in the safe rage (with in 52 bits), + * so we have a valid 32 bits result after divided by 1<<20 + */ + ASSERT((r_high & 0xFFE00000) == 0); + math_uint64_right_shift(&r, r_high, r_low, 20); + + return (r + ndiv_int * xf) / p1div; +} + +bool +si_pmu_is_otp_powered(si_t *sih, osl_t *osh) +{ + uint idx; + pmuregs_t *pmu; + bool st; + rsc_per_chip_t *rsc; /* chip specific resource bit positions */ + + /* Remember original core before switch to chipc/pmu */ + idx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + si_pmu_wait_for_steady_state(sih, osh, pmu); + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + case BCM4360_CHIP_ID: + case BCM43460_CHIP_ID: + case BCM43526_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + rsc = si_pmu_get_rsc_positions(sih); + st = (R_REG(osh, &pmu->res_state) & PMURES_BIT(rsc->otp_pu)) != 0; + break; + case BCM4378_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + st = (!(si_gci_direct(sih, GCI_OFFSETOF(sih, otpcontrol), 0u, 0u) & + OTPC_FORCE_OTP_PWR_DIS)) && si_pmu_otp_is_ready_and_wait(sih, TRUE); + break; + default: + st = TRUE; + break; + } + + /* Return to original core */ + si_setcoreidx(sih, idx); + return st; +} /* si_pmu_is_otp_powered */ + +/** + * Some chip/boards can be optionally fitted with an external 32Khz clock source for increased power + * savings (due to more accurate sleep intervals). + */ +static void +BCMATTACHFN(si_pmu_set_lpoclk)(si_t *sih, osl_t *osh) +{ + uint32 ext_lpo_sel, int_lpo_sel, timeout = 0, + ext_lpo_avail = 0, lpo_sel = 0; + uint32 ext_lpo_isclock; /* On e.g. 43602a0, either x-tal or clock can be on LPO pins */ + pmuregs_t *pmu; + uint origidx; + + if (!(getintvar(NULL, "boardflags3"))) + return; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + ext_lpo_sel = getintvar(NULL, "boardflags3") & BFL3_FORCE_EXT_LPO_SEL; + int_lpo_sel = getintvar(NULL, "boardflags3") & BFL3_FORCE_INT_LPO_SEL; + ext_lpo_isclock = getintvar(NULL, "boardflags3") & BFL3_EXT_LPO_ISCLOCK; + + BCM_REFERENCE(ext_lpo_isclock); + + if (ext_lpo_sel != 0) { + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + /* External LPO is POR default enabled */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU43602_CC2_XTAL32_SEL, + ext_lpo_isclock ? 0 : PMU43602_CC2_XTAL32_SEL); + break; + default: + /* Force External LPO Power Up */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_EXT_LPO_PU, CC_EXT_LPO_PU); + si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_EXT_LPO_PU, GC_EXT_LPO_PU); + break; + } + + ext_lpo_avail = R_REG(osh, &pmu->pmustatus) & EXT_LPO_AVAIL; + while (ext_lpo_avail == 0 && timeout < LPO_SEL_TIMEOUT) { + OSL_DELAY(1000); + ext_lpo_avail = R_REG(osh, &pmu->pmustatus) & EXT_LPO_AVAIL; + timeout++; + } + + if (timeout >= LPO_SEL_TIMEOUT) { + PMU_ERROR(("External LPO is not available\n")); + } else { + /* External LPO is available, lets use (=select) it */ + OSL_DELAY(1000); + timeout = 0; + + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU43602_CC2_FORCE_EXT_LPO, + PMU43602_CC2_FORCE_EXT_LPO); /* switches to external LPO */ + break; + default: + /* Force External LPO Sel up */ + si_gci_chipcontrol(sih, CHIPCTRLREG6, EXT_LPO_SEL, EXT_LPO_SEL); + /* Clear Force Internal LPO Sel */ + si_gci_chipcontrol(sih, CHIPCTRLREG6, INT_LPO_SEL, 0x0); + OSL_DELAY(1000); + + lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL; + while (lpo_sel != 0 && timeout < LPO_SEL_TIMEOUT) { + OSL_DELAY(1000); + lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL; + timeout++; + } + } + + if (timeout >= LPO_SEL_TIMEOUT) { + PMU_ERROR(("External LPO is not set\n")); + /* Clear Force External LPO Sel */ + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, + PMU43602_CC2_FORCE_EXT_LPO, 0); + break; + default: + si_gci_chipcontrol(sih, CHIPCTRLREG6, EXT_LPO_SEL, 0x0); + break; + } + } else { + /* Clear Force Internal LPO Power Up */ + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + break; + default: + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_INT_LPO_PU, 0x0); + si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_INT_LPO_PU, 0x0); + break; + } + } /* if (timeout) */ + } /* if (timeout) */ + } else if (int_lpo_sel != 0) { + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + break; /* do nothing, internal LPO is POR default powered and selected */ + default: + /* Force Internal LPO Power Up */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_INT_LPO_PU, CC_INT_LPO_PU); + si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_INT_LPO_PU, GC_INT_LPO_PU); + + OSL_DELAY(1000); + + /* Force Internal LPO Sel up */ + si_gci_chipcontrol(sih, CHIPCTRLREG6, INT_LPO_SEL, INT_LPO_SEL); + /* Clear Force External LPO Sel */ + si_gci_chipcontrol(sih, CHIPCTRLREG6, EXT_LPO_SEL, 0x0); + + OSL_DELAY(1000); + + lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL; + timeout = 0; + while (lpo_sel == 0 && timeout < LPO_SEL_TIMEOUT) { + OSL_DELAY(1000); + lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL; + timeout++; + } + if (timeout >= LPO_SEL_TIMEOUT) { + PMU_ERROR(("Internal LPO is not set\n")); + /* Clear Force Internal LPO Sel */ + si_gci_chipcontrol(sih, CHIPCTRLREG6, INT_LPO_SEL, 0x0); + } else { + /* Clear Force External LPO Power Up */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_EXT_LPO_PU, 0x0); + si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_EXT_LPO_PU, 0x0); + } + break; + } + if ((PMUREV(sih->pmurev) >= 33)) { + /* Enabling FAST_SEQ */ + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTSEQ_ENAB, PCTL_EXT_FASTSEQ_ENAB); + } + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} /* si_pmu_set_lpoclk */ + +static int +si_pmu_fast_lpo_locked(si_t *sih, osl_t *osh) +{ + int lock = 0; + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + lock = CHIPC_REG(sih, chipstatus, 0, 0) & CST43012_FLL_LOCK; + break; + case BCM4369_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + lock = si_gci_chipstatus(sih, GCI_CHIPSTATUS_13) & GCI_CS_4369_FLL1MHZ_LOCK_MASK; + break; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + lock = si_gci_chipstatus(sih, GCI_CHIPSTATUS_15) & GCI_CS_4387_FLL1MHZ_LOCK_MASK; + break; + default: + PMU_MSG(("si_pmu_fast_lpo_locked: LPO enable: unsupported chip!\n")); + } + return lock ? 1 : 0; +} + +/* Turn ON FAST LPO FLL (1MHz) */ +static void +BCMATTACHFN(si_pmu_fast_lpo_enable)(si_t *sih, osl_t *osh) +{ + int i = 0, lock = 0; + + BCM_REFERENCE(i); + BCM_REFERENCE(lock); + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_ENAB, PCTL_EXT_FASTLPO_ENAB); + lock = CHIPC_REG(sih, chipstatus, 0, 0) & CST43012_FLL_LOCK; + + for (i = 0; ((i <= 30) && (!lock)); i++) + { + lock = CHIPC_REG(sih, chipstatus, 0, 0) & CST43012_FLL_LOCK; + OSL_DELAY(10); + } + + PMU_MSG(("si_pmu_fast_lpo_enable: duration: %d\n", i*10)); + + if (!lock) { + PMU_MSG(("si_pmu_fast_lpo_enable: FLL lock not present!")); + ROMMABLE_ASSERT(0); + } + + /* Now switch to using FAST LPO clk */ + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, PCTL_EXT_FASTLPO_SWENAB); + break; + case BCM4369_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + { + uint8 fastlpo_dis = fastlpo_dis_get(); + uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get(); + + if (!fastlpo_dis || !fastlpo_pcie_dis) { + /* LHL rev 6 in 4387 requires this bit to be set first */ + if ((LHLREV(sih->lhlrev) >= 6) && !PMU_FLL_PU_ENAB()) { + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, + LHL_PWRSEQCTL_PMU_LPLDO_PD, LHL_PWRSEQCTL_WL_FLLPU_EN); + } + + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_ENAB, PCTL_EXT_FASTLPO_ENAB); + + lock = si_pmu_fast_lpo_locked(sih, osh); + for (i = 0; ((i < 300) && (!lock)); i++) { + lock = si_pmu_fast_lpo_locked(sih, osh); + OSL_DELAY(10); + } + ASSERT(lock); + } + + if (!fastlpo_dis) { + /* Now switch to using FAST LPO clk */ + PMU_REG(sih, pmucontrol_ext, + PCTL_EXT_FASTLPO_SWENAB, PCTL_EXT_FASTLPO_SWENAB); + + OSL_DELAY(1000); + PMU_MSG(("pmu fast lpo enabled\n")); + } + break; + } + default: + PMU_MSG(("si_pmu_fast_lpo_enable: LPO enable: unsupported chip!\n")); + } +} + +/* Turn ON FAST LPO FLL (1MHz) for PCIE */ +bool +BCMATTACHFN(si_pmu_fast_lpo_enable_pcie)(si_t *sih) +{ + if (!FASTLPO_ENAB()) { + return FALSE; + } + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + { + uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get(); + + if (!fastlpo_pcie_dis) { + PMU_REG(sih, pmucontrol_ext, + PCTL_EXT_FASTLPO_PCIE_SWENAB, PCTL_EXT_FASTLPO_PCIE_SWENAB); + OSL_DELAY(1000); + PMU_MSG(("pcie fast lpo enabled\n")); + return TRUE; + } + break; + } + default: + PMU_MSG(("si_pmu_fast_lpo_enable_pcie: LPO enable: unsupported chip!\n")); + } + + return FALSE; +} + +/* Turn ON FAST LPO FLL (1MHz) for PMU */ +bool +BCMATTACHFN(si_pmu_fast_lpo_enable_pmu)(si_t *sih) +{ + if (!FASTLPO_ENAB()) { + return FALSE; + } + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + { + uint8 fastlpo_dis = fastlpo_dis_get(); + + if (!fastlpo_dis) { + PMU_MSG(("pmu fast lpo enabled\n")); + return TRUE; + } + break; + } + default: + PMU_MSG(("si_pmu_fast_lpo_enable_pmu: LPO enable: unsupported chip!\n")); + } + + return FALSE; +} + +static uint8 +BCMATTACHFN(fastlpo_dis_get)(void) +{ + uint8 fastlpo_dis = 1; + +#if defined(BCM_FASTLPO_PMU) && !defined(BCM_FASTLPO_PMU_DISABLED) + if (FASTLPO_ENAB()) { + fastlpo_dis = 0; + if (getvar(NULL, rstr_fastlpo_dis) != NULL) { + fastlpo_dis = (uint8)getintvar(NULL, rstr_fastlpo_dis); + } + } +#endif /* BCM_FASTLPO_PMU */ + return fastlpo_dis; +} + +static uint8 +BCMATTACHFN(fastlpo_pcie_dis_get)(void) +{ + uint8 fastlpo_pcie_dis = 1; + + if (FASTLPO_ENAB()) { + fastlpo_pcie_dis = 0; + if (getvar(NULL, rstr_fastlpo_pcie_dis) != NULL) { + fastlpo_pcie_dis = (uint8)getintvar(NULL, rstr_fastlpo_pcie_dis); + } + } + return fastlpo_pcie_dis; +} + +static void +BCMATTACHFN(si_pmu_fll_preload_enable)(si_t *sih) +{ + if (!PMU_FLL_PU_ENAB()) { + return; + } + + switch (CHIPID(sih->chip)) { + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + { + uint32 fll_dac_out; + + fll_dac_out = (si_gci_chipstatus(sih, GCI_CHIPSTATUS_15) & + GCI_CS_4387_FLL1MHZ_DAC_OUT_MASK) + >> GCI_CS_4387_FLL1MHZ_DAC_OUT_SHIFT; + + LHL_REG(sih, lhl_wl_hw_ctl_adr[1], + LHL_1MHZ_FLL_DAC_EXT_MASK, + (fll_dac_out) << LHL_1MHZ_FLL_DAC_EXT_SHIFT); + LHL_REG(sih, lhl_wl_hw_ctl_adr[1], + LHL_1MHZ_FLL_PRELOAD_MASK, + LHL_1MHZ_FLL_PRELOAD_MASK); + break; + } + default: + PMU_MSG(("si_pmu_fll_preload_enable: unsupported chip!\n")); + ASSERT(0); + break; + } +} + +/* LV sleep mode summary: + * LV mode is where both ABUCK and CBUCK are programmed to low voltages during + * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off. + * With ASR ON, LPLDO OFF + */ +#if defined(SAVERESTORE) +static void +BCMATTACHFN(si_set_lv_sleep_mode_pmu)(si_t *sih, osl_t *osh) +{ + /* jtag_udr_write USER_REG9W jtag_serdes_pic_enable 1 */ + if (BCM4369_CHIP(sih->chip) && (CHIPREV(sih->chiprev) == 0)) { + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON, 0); + + //JTAG_SEL override. When this bit is set, jtag_sel 0, Required for JTAG writes + /* Temporarily we are disabling this as it is not required.. + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, 0x10, 0x10); + jtag_setbit_128(sih, 9, 103, 1); + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, 0x10, 0x0); + */ + + } + + /* Program pmu VREG resgiter for Resouce based ABUCK and CBUCK modes + * cbuck rsrc 0 - PWM and abuck rsrc 0 - Auto, rsrc 1 - PWM + */ + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC0_CBUCK_MODE_MASK, + 0x3u << PMU_4369_VREG16_RSRC0_CBUCK_MODE_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC0_ABUCK_MODE_MASK, + 0x3u << PMU_4369_VREG16_RSRC0_ABUCK_MODE_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK, + 0x3u << PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC2_ABUCK_MODE_MASK, + 0x3u << PMU_4369_VREG16_RSRC2_ABUCK_MODE_SHIFT); + + /* asr voltage adjust PWM - 0.8V */ + si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK, + 0x10u << PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT); + + /* Enable rsrc_en_asr_msk[0] and msk[1] */ + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN0_ASR_MASK, + 0x1u << PMU_4369_VREG13_RSRC_EN0_ASR_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN1_ASR_MASK, + 0x1u << PMU_4369_VREG13_RSRC_EN1_ASR_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN2_ASR_MASK, + 0x1u << PMU_4369_VREG13_RSRC_EN2_ASR_SHIFT); + + si_pmu_vreg_control(sih, PMU_VREG_14, PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK, + 0x1u << PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT); + + /* disable force_hp_mode and enable wl_pmu_lv_mod */ + si_pmu_vreg_control(sih, PMU_VREG_7, + (PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK | PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK | + PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK), PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK); + + /* Enable MISCLDO only for A0, MEMLPLDO_adj -0.7V, Disable LPLDO power up */ + /* For 4387, should not disable because this is PU when analog PMU is out of sleep + * and bypass when in sleep mode + */ + if (!(BCM4389_CHIP(sih->chip) || BCM4388_CHIP(sih->chip) || BCM4397_CHIP(sih->chip) || + BCM4387_CHIP(sih->chip))) { + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_MISCLDO_POWER_UP_MASK, + ((CHIPREV(sih->chiprev) == 0) ? 1 : 0) << + PMU_4369_VREG_5_MISCLDO_POWER_UP_SHIFT); + } + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_LPLDO_POWER_UP_MASK, 0x0u); + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK, + 0xDu << PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_MASK, + 0xFu << PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_SHIFT); + + /* Enabale MEMLPLDO ( to enable 0x08)and BTLDO is enabled. At sleep RFLDO is disabled */ + si_pmu_vreg_control(sih, PMU_VREG_6, PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK, + 0x1u << PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT); + + /* Program PMU chip cntrl register to control + * cbuck2vddb_pwrsw_force_on =1 and memlpldo2vddb_pwrsw_force_off = 1 + * cbuck2ret_pwrsw_force_on = 1 and memlpldo2vddb_pwrsw_force_off = 1 + * set d11_2x2_bw80_cbuck2vddb_pwrsw_force_on and + * d11_2x2_bw20_cbuck2vddb_pwrsw_force_on cbuck2ret_pwrsw on 4 cores + */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON | + PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON | PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON), + (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON | + PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON | PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON), + (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON)); + + /* set subcore_cbuck2vddb_pwrsw_force_on */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL5, + (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON | + PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON | PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON), + (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON)); + + /* Set subcore_memlpldo2vddb_pwrsw_force_off, d11_2x2_bw80_memlpldo2vddb_pwrsw_force_off + * and d11_2x2_bw20_memlpldo2vddb_pwrsw_force_off + * Set subcore_memlpldo2vddret_pwrsw_force_off,d11_2x2_bw80_memlpldo2vddret_pwrsw_force_off + * and d11_2x2_bw20_memlpldo2vddret_pwrsw_force_off + */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_SUBCORE_CBUCK2VDDB_OFF | PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_MAIN_CBUCK2VDDB_OFF | PMU_CC13_MAIN_CBUCK2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_AUX_CBUCK2VDDB_OFF | PMU_CC13_AUX_CBUCK2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF)); + + /* PCIE retention mode enable */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + PMU_CC6_ENABLE_PCIE_RETENTION, PMU_CC6_ENABLE_PCIE_RETENTION); +} + +static void +BCMATTACHFN(si_set_lv_sleep_mode_4369)(si_t *sih, osl_t *osh) +{ + si_set_lv_sleep_mode_pmu(sih, osh); + + si_set_lv_sleep_mode_lhl_config_4369(sih); + + /* Enable PMU interrupts */ + CHIPC_REG(sih, intmask, (1u << 4u), (1u << 4u)); +} + +void si_set_abuck_mode_4362(si_t *sih, uint8 mode) +{ + + if (mode < 2 || mode > 4) { + ASSERT(0); + return; + } + + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC0_ABUCK_MODE_MASK, + mode << PMU_4362_VREG16_RSRC0_ABUCK_MODE_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC1_ABUCK_MODE_MASK, + mode << PMU_4362_VREG16_RSRC1_ABUCK_MODE_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC2_ABUCK_MODE_MASK, + mode << PMU_4362_VREG16_RSRC2_ABUCK_MODE_SHIFT); +} + +static void +BCMATTACHFN(si_set_lv_sleep_mode_4378)(si_t *sih, osl_t *osh) +{ + si_set_lv_sleep_mode_pmu(sih, osh); + + si_set_lv_sleep_mode_lhl_config_4378(sih); +} + +static void +BCMATTACHFN(si_set_lv_sleep_mode_pmu_4387)(si_t *sih, osl_t *osh) +{ + /* Program pmu VREG resgiter for Resouce based ABUCK and CBUCK modes + * cbuck rsrc 0 - PWM and abuck rsrc 0 - Auto, rsrc 1 - PWM + */ + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK, + 0x2u << PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT); + + /* asr voltage adjust PWM - 0.8V */ + si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK, + 0x10u << PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT); + + /* Enable rsrc_en_asr_msk[0] and msk[1] */ + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN0_ASR_MASK, + 0x1u << PMU_4369_VREG13_RSRC_EN0_ASR_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN1_ASR_MASK, + 0x1u << PMU_4369_VREG13_RSRC_EN1_ASR_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN2_ASR_MASK, + 0x1u << PMU_4369_VREG13_RSRC_EN2_ASR_SHIFT); + + si_pmu_vreg_control(sih, PMU_VREG_14, PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK, + 0x1u << PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT); + + /* disable force_hp_mode and enable wl_pmu_lv_mod */ + si_pmu_vreg_control(sih, PMU_VREG_7, + (PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK | PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK | + PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK), PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK); + + /* Enabale MEMLPLDO ( to enable 0x08)and BTLDO is enabled. At sleep RFLDO is disabled */ + si_pmu_vreg_control(sih, PMU_VREG_6, PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK, + 0x1u << PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT); + + /* For 4387C0, we don't need memlpldo2vddret_on nor cldo2vddb_on. + * We just need to clear the memlpldo2vddb_forceoff to turn on all the memlpldo2vddb pwrsw + */ + if (PMUREV(sih->pmurev) < 39) { + /* Program PMU chip cntrl register to control + * cbuck2vddb_pwrsw_force_on =1 and memlpldo2vddb_pwrsw_force_off = 1 + * cbuck2ret_pwrsw_force_on = 1 and memlpldo2vddb_pwrsw_force_off = 1 + * set d11_2x2_bw80_cbuck2vddb_pwrsw_force_on and + * d11_2x2_bw20_cbuck2vddb_pwrsw_force_on cbuck2ret_pwrsw on 4 cores + */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON | + PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON | + PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON), + (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | + PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON | + PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON | + PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON), + (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON)); + + /* set subcore_cbuck2vddb_pwrsw_force_on */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL5, + (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON | + PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON | + PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON), + (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | + PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON)); + + /* Set subcore_memlpldo2vddb_pwrsw_force_off, + * d11_2x2_bw80_memlpldo2vddb_pwrsw_force_off + * and d11_2x2_bw20_memlpldo2vddb_pwrsw_force_off + * Set subcore_memlpldo2vddret_pwrsw_force_off, + * d11_2x2_bw80_memlpldo2vddret_pwrsw_force_off + * and d11_2x2_bw20_memlpldo2vddret_pwrsw_force_off + */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_SUBCORE_CBUCK2VDDB_OFF | PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_MAIN_CBUCK2VDDB_OFF | PMU_CC13_MAIN_CBUCK2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_AUX_CBUCK2VDDB_OFF | PMU_CC13_AUX_CBUCK2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON, + PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON); + si_pmu_chipcontrol(sih, PMU_CHIPCTL5, + PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON, + PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON), + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF | + PMU_CC13_CMN_MEMLPLDO2VDDRET_ON, + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_CMN_MEMLPLDO2VDDRET_ON); + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON | + PMU_CC17_SCAN_CBUCK2VDDB_ON | + PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF, + PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON | + PMU_CC17_SCAN_CBUCK2VDDB_ON); + } else { + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + PMU_CC13_CMN_MEMLPLDO2VDDRET_ON | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF, + PMU_CC13_CMN_MEMLPLDO2VDDRET_ON); + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF, 0); + } + + /* PCIE retention mode enable */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + PMU_CC6_ENABLE_PCIE_RETENTION, PMU_CC6_ENABLE_PCIE_RETENTION); + + /* H/W JIRA http://jira.broadcom.com/browse/HW4387-825 + * B0 only, the h/w bug is fixed in C0 + */ + if (PMUREV(sih->pmurev) == 38) { + si_pmu_vreg_control(sih, PMU_VREG_14, + PMU_VREG14_RSRC_EN_ASR_PWM_PFM_MASK, + PMU_VREG14_RSRC_EN_ASR_PWM_PFM_MASK); + } + + /* WAR for jira HW4387-922 */ + si_pmu_vreg_control(sih, PMU_VREG_1, + PMU_4387_VREG1_CSR_OVERI_DIS_MASK, + PMU_4387_VREG1_CSR_OVERI_DIS_MASK); + + /* Clear Misc_LDO override */ + si_pmu_vreg_control(sih, PMU_VREG_5, VREG5_4387_MISCLDO_PU_MASK, 0); + + si_pmu_vreg_control(sih, PMU_VREG_8, + PMU_4387_VREG8_ASR_OVERI_DIS_MASK, + PMU_4387_VREG8_ASR_OVERI_DIS_MASK); + + if (BCMSRTOPOFF_ENAB()) { + si_pmu_vreg_control(sih, PMU_VREG_6, + PMU_4387_VREG6_WL_PMU_LV_MODE_MASK, 0); + + /* Clear memldo_pu bit as 4387 doesn't plan to use MEMLDO */ + si_pmu_vreg_control(sih, PMU_VREG_6, + PMU_4387_VREG6_MEMLDO_PU_MASK, 0); + } else { + si_pmu_vreg_control(sih, PMU_VREG_6, + PMU_4387_VREG6_WL_PMU_LV_MODE_MASK, + PMU_4387_VREG6_WL_PMU_LV_MODE_MASK); + } +} + +static void +BCMATTACHFN(si_set_lv_sleep_mode_4387)(si_t *sih, osl_t *osh) +{ + si_set_lv_sleep_mode_pmu_4387(sih, osh); + si_set_lv_sleep_mode_lhl_config_4387(sih); +} + +static void +BCMATTACHFN(si_set_lv_sleep_mode_4389)(si_t *sih, osl_t *osh) +{ + si_set_lv_sleep_mode_pmu(sih, osh); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + PMU_CC4_4387_MAIN_PD_CBUCK2VDDRET_ON | + PMU_CC4_4387_AUX_PD_CBUCK2VDDRET_ON, + 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL5, + PMU_CC5_4387_SUBCORE_CBUCK2VDDRET_ON, + 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + PMU_CC6_RX4_CLK_SEQ_SELECT_MASK, + 0); + /* Disable lq_clk - HW4387-254 */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL12, + PMU_CC12_DISABLE_LQ_CLK_ON, + PMU_CC12_DISABLE_LQ_CLK_ON); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF, + 0); + +#ifdef NOT_YET + /* FIXME: this setting is causing the load switch from CSR to ASR */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF | + PMU_CC13_CMN_MEMLPLDO2VDDRET_ON, 0); +#endif /* NOT_YET */ + + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON | + PMU_CC17_SCAN_CBUCK2VDDB_ON | + PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF, + PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON | + PMU_CC17_SCAN_CBUCK2VDDB_ON); + + si_set_lv_sleep_mode_lhl_config_4389(sih); + + si_pmu_vreg_control(sih, PMU_VREG_6, + (PMU_4389_VREG6_WL_PMU_LV_MODE_MASK | PMU_4389_VREG6_MEMLDO_PU_MASK), + PMU_4389_VREG6_WL_PMU_LV_MODE_MASK); + + /* SW WAR for 4389B0(rev 01) issue - HW4387-922. 4389C0(rev 02) already has HW fix */ + if (CHIPREV(sih->chiprev) == 1) { + si_pmu_vreg_control(sih, PMU_VREG_1, + PMU_4387_VREG1_CSR_OVERI_DIS_MASK, + PMU_4387_VREG1_CSR_OVERI_DIS_MASK); + + si_pmu_vreg_control(sih, PMU_VREG_8, + PMU_4387_VREG8_ASR_OVERI_DIS_MASK, + PMU_4387_VREG8_ASR_OVERI_DIS_MASK); + } +} + +static void +BCMATTACHFN(si_set_lv_sleep_mode_4362)(si_t *sih, osl_t *osh) +{ + /* Program pmu VREG resgiter for Resouce based ABUCK and CBUCK modes + * cbuck rsrc 0 - PWM and abuck rsrc 0 - Auto, rsrc 1 - PWM + */ + si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC0_CBUCK_MODE_MASK, + 0x3u << PMU_4362_VREG16_RSRC0_CBUCK_MODE_SHIFT); + + si_set_abuck_mode_4362(sih, 0x3u); + + /* asr voltage adjust PWM - 0.8V */ + si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_LPPFM_MASK, + 0x10u << PMU_4362_VREG8_ASR_OVADJ_LPPFM_SHIFT); + + /* Enable rsrc_en_asr_msk[0] and msk[1] */ + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4362_VREG13_RSRC_EN0_ASR_MASK, + 0x1u << PMU_4362_VREG13_RSRC_EN0_ASR_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4362_VREG13_RSRC_EN1_ASR_MASK, + 0x1u << PMU_4362_VREG13_RSRC_EN1_ASR_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4362_VREG13_RSRC_EN2_ASR_MASK, + 0x1u << PMU_4362_VREG13_RSRC_EN2_ASR_SHIFT); + + si_pmu_vreg_control(sih, PMU_VREG_14, PMU_4362_VREG14_RSRC_EN_CSR_MASK0_MASK, + 0x1u << PMU_4362_VREG14_RSRC_EN_CSR_MASK0_SHIFT); + + /* disable force_hp_mode and enable wl_pmu_lv_mod */ + si_pmu_vreg_control(sih, PMU_VREG_7, + (PMU_4362_VREG_7_WL_PMU_LV_MODE_MASK | PMU_4362_VREG_7_WL_PMU_LP_MODE_MASK | + PMU_4362_VREG_7_PMU_FORCE_HP_MODE_MASK), PMU_4362_VREG_7_WL_PMU_LV_MODE_MASK); + + /* Enable MISCLDO, MEMLPLDO_adj -0.7V, Disable LPLDO power up */ + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4362_VREG_5_MISCLDO_POWER_UP_MASK, + 0x1u << PMU_4362_VREG_5_MISCLDO_POWER_UP_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4362_VREG_5_LPLDO_POWER_UP_MASK, 0x0u); + si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK, + 0xBu << PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT); + + /* Enabale MEMLPLDO ( to enable 0x08)and BTLDO is enabled. At sleep RFLDO is disabled */ + si_pmu_vreg_control(sih, PMU_VREG_6, PMU_4362_VREG_6_MEMLPLDO_POWER_UP_MASK, + 0x1u << PMU_4362_VREG_6_MEMLPLDO_POWER_UP_SHIFT); + + /* Program PMU chip cntrl register to control + * cbuck2vddb_pwrsw_force_on =1 and memlpldo2vddb_pwrsw_force_off = 1 + * cbuck2ret_pwrsw_force_on = 1 and memlpldo2vddb_pwrsw_force_off = 1 + * set d11_2x2_bw80_cbuck2vddb_pwrsw_force_on and + * d11_2x2_bw20_cbuck2vddb_pwrsw_force_on cbuck2ret_pwrsw on 4 cores + */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4362_PD_CBUCK2VDDB_ON | PMU_CC4_4362_PD_CBUCK2VDDRET_ON | + PMU_CC4_4362_PD_MEMLPLDO2VDDB_ON | PMU_CC4_4362_PD_MEMLPDLO2VDDRET_ON), + (PMU_CC4_4362_PD_CBUCK2VDDB_ON | PMU_CC4_4362_PD_CBUCK2VDDRET_ON)); + + /* set subcore_cbuck2vddb_pwrsw_force_on */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL5, + (PMU_CC5_4362_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4362_SUBCORE_CBUCK2VDDRET_ON | + PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDB_ON | PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDRET_ON), + (PMU_CC5_4362_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4362_SUBCORE_CBUCK2VDDRET_ON)); + + /* Set subcore_memlpldo2vddb_pwrsw_force_off, d11_2x2_bw80_memlpldo2vddb_pwrsw_force_off + * and d11_2x2_bw20_memlpldo2vddb_pwrsw_force_off + * Set subcore_memlpldo2vddret_pwrsw_force_off,d11_2x2_bw80_memlpldo2vddret_pwrsw_force_off + * and d11_2x2_bw20_memlpldo2vddret_pwrsw_force_off + */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_SUBCORE_CBUCK2VDDB_OFF | PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF | + PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF)); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_MAIN_CBUCK2VDDB_OFF | PMU_CC13_MAIN_CBUCK2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF)); + + /* PCIE retention mode enable */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + PMU_CC6_ENABLE_PCIE_RETENTION, PMU_CC6_ENABLE_PCIE_RETENTION); + + si_set_lv_sleep_mode_lhl_config_4362(sih); + + /* Enable PMU interrupts */ + CHIPC_REG(sih, intmask, (1u << 4u), (1u << 4u)); +} + +void +BCMATTACHFN(si_pmu_fis_setup)(si_t *sih) +{ + uint origidx; + pmuregs_t *pmu; + int val; + osl_t *osh = si_osh(sih); + + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + val = R_REG(osh, &pmu->max_res_mask); + W_REG(osh, &pmu->fis_start_min_res_mask, val); + + val = R_REG(osh, &pmu->min_res_mask); + W_REG(osh, &pmu->fis_min_res_mask, val); + + W_REG(osh, &pmu->fis_ctrl_status, + (PMU_FIS_DN_TIMER_VAL_4378 << PMU_FIS_DN_TIMER_VAL_SHIFT) + & PMU_FIS_DN_TIMER_VAL_MASK); + break; + case BCM4388_CHIP_GRPID: + val = R_REG(osh, &pmu->max_res_mask); + W_REG(osh, &pmu->fis_start_min_res_mask, val); + + val = R_REG(osh, &pmu->min_res_mask); + W_REG(osh, &pmu->fis_min_res_mask, val); + + W_REG(osh, &pmu->fis_ctrl_status, + ((PMU_FIS_DN_TIMER_VAL_4388 << PMU_FIS_DN_TIMER_VAL_SHIFT) + & PMU_FIS_DN_TIMER_VAL_MASK) | PMU_FIS_PCIE_SAVE_EN_VALUE); + break; + case BCM4389_CHIP_GRPID: + val = R_REG(osh, &pmu->max_res_mask); + W_REG(osh, &pmu->fis_start_min_res_mask, val); + + val = R_REG(osh, &pmu->min_res_mask); + W_REG(osh, &pmu->fis_min_res_mask, val); + + W_REG(osh, &pmu->fis_ctrl_status, + ((PMU_FIS_DN_TIMER_VAL_4389 << PMU_FIS_DN_TIMER_VAL_SHIFT) + & PMU_FIS_DN_TIMER_VAL_MASK) | PMU_FIS_PCIE_SAVE_EN_VALUE); + break; + + default: + break; + } + si_setcoreidx(sih, origidx); +} +#endif /* defined(SAVERESTORE) */ + +/* + * Enable: Dynamic Clk Switching + * Disable: Mirrored Mode + * use nvram to enable + */ +static void +BCMATTACHFN(si_pmu_dynamic_clk_switch_enab)(si_t *sih) +{ + if (PMUREV(sih->pmurev) >= 36) { + if (getintvar(NULL, rstr_dyn_clksw_en)) { + PMU_REG(sih, pmucontrol_ext, + PCTL_EXT_REQ_MIRROR_ENAB, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, + CC2_4378_USE_WLAN_BP_CLK_ON_REQ_MASK | + CC2_4378_USE_CMN_BP_CLK_ON_REQ_MASK, + 0); + } + } +} + +/* use pmu rsrc XTAL_PU to count deep sleep of chip */ +static void +BCMATTACHFN(si_pmu_enb_slp_cnt_on_rsrc)(si_t *sih, osl_t *osh) +{ + uint origidx; + pmuregs_t *pmu; + uint32 rsrc_slp = 0xffffffff; + + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + rsrc_slp = RES4378_XTAL_PU; + break; + + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + rsrc_slp = RES4387_XTAL_PU; + break; + + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + rsrc_slp = RES4389_XTAL_PU; + break; + + default: + break; + } + + if (rsrc_slp != 0xffffffff) { + W_REG(osh, &pmu->rsrc_event0, PMURES_BIT(rsrc_slp)); + } + + si_setcoreidx(sih, origidx); +} + +#define MISC_LDO_STEPPING_DELAY (150u) /* 150 us, includes 50us additional margin */ + +/** initialize PMU chip controls and other chip level stuff */ +void +BCMATTACHFN(si_pmu_chip_init)(si_t *sih, osl_t *osh) +{ + ASSERT(sih->cccaps & CC_CAP_PMU); + if (AOB_ENAB(sih)) { + if (hnd_pmur == NULL) { + uint coreidx = si_coreidx(sih); + hnd_pmur = si_setcore(sih, PMU_CORE_ID, 0); + ASSERT(hnd_pmur != NULL); + /* Restore to CC */ + si_setcoreidx(sih, coreidx); + } + } + + si_pmu_otp_chipcontrol(sih, osh); + +#ifdef CHIPC_UART_ALWAYS_ON + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP); +#endif /* CHIPC_UART_ALWAYS_ON */ + + si_pmu_enb_slp_cnt_on_rsrc(sih, osh); + + /* Misc. chip control, has nothing to do with PMU */ + switch (CHIPID(sih->chip)) { + + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + { +#ifdef USE_LHL_TIMER + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_LHL_TIMER_SELECT, + PMUCCTL02_43012_LHL_TIMER_SELECT); +#else + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_LHL_TIMER_SELECT, 0); +#endif /* USE_LHL_TIMER */ + + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL14_43012_DISABLE_LQ_AVAIL, 0); + + PMU_REG_NEW(sih, extwakemask0, + PMU_EXT_WAKE_MASK_0_SDIO, PMU_EXT_WAKE_MASK_0_SDIO); + PMU_REG_NEW(sih, extwakereqmask[0], ~0, si_pmu_rsrc_ht_avail_clk_deps(sih, osh)); + + if (sih->lpflags & LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON) { + /* Force PWM when Radio ON */ + /* 2G_Listen/2G_RX/2G_TX/5G_Listen/5G_RX/5G_TX = PWM */ + si_pmu_vreg_control(sih, PMU_VREG_8, + PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK, + PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0); + si_pmu_vreg_control(sih, PMU_VREG_9, + PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK, + PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0); + } + else { + /* LPPFM opt setting for ePA */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL16, PMU_CC16_CLK4M_DIS, 1); + si_pmu_chipcontrol(sih, PMU_CHIPCTL16, PMU_CC16_FF_ZERO_ADJ, 4); + /* 2G_Listen/2G_RX = LPPFM, 2G_TX/5G_Listen/5G_RX/5G_TX = PWM */ + si_pmu_vreg_control(sih, PMU_VREG_8, + PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK, + PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1); + si_pmu_vreg_control(sih, PMU_VREG_9, + PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK, + PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1); + } + /* Set external LPO */ + si_lhl_set_lpoclk(sih, osh, LHL_LPO_AUTO); + + /* Enabling WL2CDIG sleep */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB, + PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL9, + PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_MASK, + PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_VAL << + PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_SHIFT); + + /* Setting MemLPLDO voltage to 0.74 */ + si_pmu_vreg_control(sih, PMU_VREG_6, VREG6_43012_MEMLPLDO_ADJ_MASK, + 0x8 << VREG6_43012_MEMLPLDO_ADJ_SHIFT); + + /* Setting LPLDO voltage to 0.8 */ + si_pmu_vreg_control(sih, PMU_VREG_6, VREG6_43012_LPLDO_ADJ_MASK, + 0xB << VREG6_43012_LPLDO_ADJ_SHIFT); + + /* Turn off power switch 1P8 in sleep */ + si_pmu_vreg_control(sih, PMU_VREG_7, VREG7_43012_PWRSW_1P8_PU_MASK, 0); + + /* Enable PMU sleep mode0 (DS0-PS0) */ + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, ~0, PMU_SLEEP_MODE_0); + + si_pmu_fast_lpo_enable(sih, osh); + + /* Enable the 'power kill' (power off selected retention memories) */ + GCI_REG_NEW(sih, bt_smem_control0, GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL, + GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL); + + break; + } + case BCM4362_CHIP_GRPID: + { + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + uint32 lpo = LHL_LPO_AUTO; + uint32 lhl_tmr_sel = 0; + + /* DMAHANG WAR:SWWLAN:171729 + * Stretch the ALP and HT clocks after de-asserting + * the request. During the RX frame transfer from RXFIFO to + * DP FIFO, in certain cases the clock is getting de-asserted + * by ucode as it does not have visibility beyond BM + */ + W_REG(osh, &pmu->clkstretch, 0x0fff0fff); + +#ifdef USE_LHL_TIMER + lhl_tmr_sel = PMU_CC13_LHL_TIMER_SELECT; +#endif /* USE_LHL_TIMER */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_LHL_TIMER_SELECT, lhl_tmr_sel); + + if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) { + lpo = LHL_EXT_LPO_ENAB; + } + + if (!ISSIM_ENAB(sih)) { + si_lhl_set_lpoclk(sih, osh, lpo); + } + + if (getintvar(NULL, rstr_btldo3p3pu)) { + si_pmu_regcontrol(sih, 4, + PMU_28NM_VREG4_WL_LDO_CNTL_EN, + PMU_28NM_VREG4_WL_LDO_CNTL_EN); + si_pmu_regcontrol(sih, 6, + PMU_28NM_VREG6_BTLDO3P3_PU, + PMU_28NM_VREG6_BTLDO3P3_PU); + } + + /* write the XTAL preferred startup/normal A0/B0 revision */ + si_pmu_chipcontrol_xtal_settings_4362(sih); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION), + (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION)); + + si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_LPPFM_MASK, + 0x02u << PMU_4362_VREG8_ASR_OVADJ_LPPFM_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_PFM_MASK, + 0x02u << PMU_4362_VREG8_ASR_OVADJ_PFM_SHIFT); + si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_PWM_MASK, + 0x02u << PMU_4362_VREG8_ASR_OVADJ_PWM_SHIFT); +#if defined(SAVERESTORE) + if (SR_ENAB()) { + si_set_lv_sleep_mode_4362(sih, osh); + } +#endif /* SAVERESTORE */ + + si_pmu_fast_lpo_enable(sih, osh); + if ((PMUREV(sih->pmurev) >= 33) && FASTLPO_ENAB()) { + /* Enabling FAST_SEQ */ + uint8 fastlpo_dis = fastlpo_dis_get(); + uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get(); + if (!fastlpo_dis || !fastlpo_pcie_dis) { + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTSEQ_ENAB, + PCTL_EXT_FASTSEQ_ENAB); + } + } + + break; + } + + case BCM4369_CHIP_GRPID: + { + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + uint32 lpo = LHL_LPO_AUTO; + uint32 lhl_tmr_sel = 0; + + /* DMAHANG WAR:SWWLAN:171729 + * Stretch the ALP and HT clocks after de-asserting + * the request. During the RX frame transfer from RXFIFO to + * DP FIFO, in certain cases the clock is getting de-asserted + * by ucode as it does not have visibility beyond BM + */ +#ifndef ATE_BUILD + W_REG(osh, &pmu->clkstretch, 0x0fff0fff); +#endif + +#ifdef USE_LHL_TIMER + lhl_tmr_sel = PMU_CC13_4369_LHL_TIMER_SELECT; +#endif + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4369_LHL_TIMER_SELECT, lhl_tmr_sel); + + if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) { + lpo = LHL_EXT_LPO_ENAB; + } + + if (!ISSIM_ENAB(sih)) { + si_lhl_set_lpoclk(sih, osh, lpo); + } + + if (getintvar(NULL, rstr_btldo3p3pu)) { + si_pmu_regcontrol(sih, 4, + PMU_28NM_VREG4_WL_LDO_CNTL_EN, + PMU_28NM_VREG4_WL_LDO_CNTL_EN); + si_pmu_regcontrol(sih, 6, + PMU_28NM_VREG6_BTLDO3P3_PU, + PMU_28NM_VREG6_BTLDO3P3_PU); + } + + /* write the XTAL preferred startup/normal A0/B0 revision */ + si_pmu_chipcontrol_xtal_settings_4369(sih); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION), + (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION)); + + /* write the PWRSW CLK start/stop delay only for A0 revision */ + if (CHIPREV(sih->chiprev) == 0) { + si_pmu_chipcontrol(sih, PMU_CHIPCTL1, PMU_CC1_PWRSW_CLKSTRSTP_DELAY_MASK, + PMU_CC1_PWRSW_CLKSTRSTP_DELAY); + } + +#if defined(SAVERESTORE) + if (SR_ENAB()) { + si_set_lv_sleep_mode_4369(sih, osh); + } +#endif /* SAVERESTORE */ + + si_pmu_fast_lpo_enable(sih, osh); +#ifdef BCM_LDO3P3_SOFTSTART + if (CHIPID(sih->chip) != BCM4377_CHIP_ID) { + si_pmu_ldo3p3_soft_start_wl_set(sih, osh, 3); + } +#endif + if ((PMUREV(sih->pmurev) >= 33) && FASTLPO_ENAB()) { + /* Enabling FAST_SEQ */ + uint8 fastlpo_dis = fastlpo_dis_get(); + uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get(); + if (!fastlpo_dis || !fastlpo_pcie_dis) { + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTSEQ_ENAB, + PCTL_EXT_FASTSEQ_ENAB); + } + } + + break; + } + + CASE_BCM43602_CHIP: /* fall through */ + /* Set internal/external LPO */ + si_pmu_set_lpoclk(sih, osh); + break; + + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + { + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + uint32 lpo = LHL_LPO_AUTO; + uint32 lhl_tmr_sel = 0; + +#ifdef USE_LHL_TIMER + lhl_tmr_sel = PMU_CC13_4378_LHL_TIMER_SELECT; +#endif + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4378_LHL_TIMER_SELECT, lhl_tmr_sel); + + if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) { + lpo = LHL_EXT_LPO_ENAB; + } + + if (!ISSIM_ENAB(sih)) { + si_lhl_set_lpoclk(sih, osh, lpo); + } + + /* JIRA: SWWLAN-228979 + * BT LDO is required for Aux 2G Tx only. Keep powerd down until Aux is up + */ + si_pmu_bt_ldo_pu(sih, FALSE); + + /* Updating xtal pmu registers to combat slow powerup issue */ + si_pmu_chipcontrol_xtal_settings_4378(sih); + + if (LHL_IS_PSMODE_1(sih)) { + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_07, + ((1 << GCI_CC7_AAON_BYPASS_PWRSW_SEL) | + (1 << GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON)), + 0); + } + + si_lhl_setup(sih, osh); + + /* Setting MemLPLDO voltage */ + if (getvar(NULL, rstr_memlpldo_volt) != NULL) { + int memlpldo_volt = getintvar(NULL, rstr_memlpldo_volt); + + if (memlpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 && + memlpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4378_MEMLPLDO_ADJ_MASK, + memlpldo_volt << VREG5_4378_MEMLPLDO_ADJ_SHIFT); + } else { + PMU_MSG(("Invalid memlpldo value: %d\n", memlpldo_volt)); + } + } + + /* Setting LPLDO voltage */ + if (getvar(NULL, rstr_lpldo_volt) != NULL) { + int lpldo_volt = getintvar(NULL, rstr_lpldo_volt); + + if (lpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 && + lpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4378_LPLDO_ADJ_MASK, + lpldo_volt << VREG5_4378_LPLDO_ADJ_SHIFT); + } else { + PMU_MSG(("Invalid lpldo value: %d\n", lpldo_volt)); + } + } + + /* Enable fast LPO */ + si_pmu_fast_lpo_enable(sih, osh); + +#if defined(SAVERESTORE) + if (SR_ENAB()) { + si_set_lv_sleep_mode_4378(sih, osh); + } +#endif /* SAVERESTORE */ + + si_pmu_dynamic_clk_switch_enab(sih); + + if (CHIPID(sih->chip) == BCM4378_CHIP_GRPID) { + si_pmu_vreg_control(sih, PMU_VREG_0, + VREG0_4378_CSR_VOLT_ADJ_PWM_MASK | + VREG0_4378_CSR_VOLT_ADJ_PFM_MASK | + VREG0_4378_CSR_VOLT_ADJ_LP_PFM_MASK | + VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_MASK, + (CSR_VOLT_ADJ_PWM_4378 << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT) | + (CSR_VOLT_ADJ_PFM_4378 << VREG0_4378_CSR_VOLT_ADJ_PFM_SHIFT) | + (CSR_VOLT_ADJ_LP_PFM_4378 << VREG0_4378_CSR_VOLT_ADJ_LP_PFM_SHIFT) | + (CSR_OUT_VOLT_TRIM_ADJ_4378 << + VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_SHIFT)); +#ifdef BCM_LDO3P3_SOFTSTART + si_pmu_ldo3p3_soft_start_wl_set(sih, osh, 0x03u); + si_pmu_ldo3p3_soft_start_bt_set(sih, osh, 0x03u); +#endif + } else { + /* 4368 */ + int nvcsr; + if ((nvcsr = getintvar(NULL, rstr_csrtune))) { + si_pmu_vreg_control(sih, PMU_VREG_0, + VREG0_4378_CSR_VOLT_ADJ_PWM_MASK | + VREG0_4378_CSR_VOLT_ADJ_PFM_MASK | + VREG0_4378_CSR_VOLT_ADJ_LP_PFM_MASK, + (nvcsr << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT) | + (nvcsr << VREG0_4378_CSR_VOLT_ADJ_PFM_SHIFT) | + (nvcsr << VREG0_4378_CSR_VOLT_ADJ_LP_PFM_SHIFT)); + } + } + } + break; + + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + { + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + uint32 lpo = LHL_LPO_AUTO; + uint32 lhl_tmr_sel = 0; + uint32 abuck_volt, cbuck_volt; + uint32 min_mask; + uint32 misc_ldo_volt, curr_misc_ldo_volt, i; + +#ifdef DONGLEBUILD + si_set_arm_clkfreq_high(sih); +#endif + + if (PMU_FLL_PU_ENAB()) { + min_mask = R_REG(osh, &pmu->min_res_mask) | + PMURES_BIT(RES4387_FAST_LPO_AVAIL) | + PMURES_BIT(RES4387_PMU_LP); + W_REG(osh, &pmu->min_res_mask, min_mask); + } + +#ifdef USE_LHL_TIMER + lhl_tmr_sel = PMU_CC13_4387_LHL_TIMER_SELECT; +#endif + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_LHL_TIMER_SELECT, lhl_tmr_sel); + +#if defined(SAVERESTORE) + if (SR_ENAB()) { + si_set_lv_sleep_mode_4387(sih, osh); + } +#endif /* SAVERESTORE */ + + if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) { + lpo = LHL_EXT_LPO_ENAB; + } + + if (!ISSIM_ENAB(sih)) { + si_lhl_set_lpoclk(sih, osh, lpo); + } + + if (getintvar(NULL, rstr_btldo3p3pu)) { + si_pmu_regcontrol(sih, 4, + PMU_28NM_VREG4_WL_LDO_CNTL_EN, + PMU_28NM_VREG4_WL_LDO_CNTL_EN); + si_pmu_regcontrol(sih, 6, + PMU_4387_VREG6_BTLDO3P3_PU, + PMU_4387_VREG6_BTLDO3P3_PU); + } + + if (LHL_IS_PSMODE_1(sih)) { + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_07, + ((1 << GCI_CC7_AAON_BYPASS_PWRSW_SEL) | + (1 << GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON)), + 0); + } + + si_lhl_setup(sih, osh); + + /* Setting MemLPLDO voltage */ + if (getvar(NULL, rstr_memlpldo_volt) != NULL) { + int memlpldo_volt = getintvar(NULL, rstr_memlpldo_volt); + + if (memlpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 && + memlpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_MEMLPLDO_ADJ_MASK, + memlpldo_volt << VREG5_4387_MEMLPLDO_ADJ_SHIFT); + } else { + PMU_MSG(("Invalid memlpldo value: %d\n", memlpldo_volt)); + } + } + + /* Setting LPLDO voltage */ + if (getvar(NULL, rstr_lpldo_volt) != NULL) { + int lpldo_volt = getintvar(NULL, rstr_lpldo_volt); + + if (lpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 && + lpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_LPLDO_ADJ_MASK, + lpldo_volt << VREG5_4387_LPLDO_ADJ_SHIFT); + } else { + PMU_MSG(("Invalid lpldo value: %d\n", lpldo_volt)); + } + } + + /* Setting misc ldo voltage to 0.85625V but need stepping up */ + curr_misc_ldo_volt = (si_pmu_regcontrol(sih, PMU_VREG_5, 0, 0) & + VREG5_4387_MISC_LDO_ADJ_MASK) >> VREG5_4387_MISC_LDO_ADJ_SHIFT; + + /* Only after POR, chip default is 0.8V */ + if (curr_misc_ldo_volt == PMU_VREG5_MISC_LDO_VOLT_0p800) { + misc_ldo_volt = PMU_VREG5_MISC_LDO_VOLT_0p856; /* 0.85625V */ + + for (i = PMU_VREG5_MISC_LDO_VOLT_0p818; i <= misc_ldo_volt; i ++) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_MISC_LDO_ADJ_MASK, + i << VREG5_4387_MISC_LDO_ADJ_SHIFT); + OSL_DELAY(MISC_LDO_STEPPING_DELAY); + } + } + + /* Enable fast LPO */ + si_pmu_fast_lpo_enable(sih, osh); + + if (PMU_FLL_PU_ENAB()) { + /* Wait until fast LPO is stable */ + OSL_DELAY(500u); + si_pmu_fll_preload_enable(sih); + } + + si_pmu_dynamic_clk_switch_enab(sih); + + /* HQ settings */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25, + 0xFFFFFFFF, XTAL_HQ_SETTING_4387); + + /* LQ settings */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_26, + 0xFFFFFFFF, XTAL_LQ_SETTING_4387); + + /* Enable Radiodig Clk Gating */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_ENAB_RADIO_REG_CLK, 0); + + /* Set up HW based switch-off of select BBPLL channels when SCAN-only mode + * + * Assign bbpll_ch_control_grp_pd_trigger_mask = {gci_chip_cntrl[559:554], + * gci_chip_cntrl[543:522], 1'b0, gci_chip_cntrl[521], 1'b0}; + */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_16, + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_MASK | + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_MASK, + (((GRP_PD_TRIGGER_MASK_4387 >> 1) & 0x1) << /* bit 1 */ + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_SHIFT) | + (((GRP_PD_TRIGGER_MASK_4387 >> 3) & 0x3FFFFF) << /* bit 24:3 */ + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_SHIFT)); + + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_17, + CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_MASK | + CC_GCI_17_BBPLL_CH_CTRL_EN_MASK, + (((GRP_PD_TRIGGER_MASK_4387 >> 25) & 0x3F) << /* bits 30:25 */ + CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_SHIFT) | + CC_GCI_17_BBPLL_CH_CTRL_EN_MASK); + + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_20, + CC_GCI_20_BBPLL_CH_CTRL_GRP_MASK, + (GRP_PD_MASK_4387 << CC_GCI_20_BBPLL_CH_CTRL_GRP_SHIFT)); + + if (CHIPID(sih->chip) == BCM4397_CHIP_GRPID) { + /* For Phy Reg Access configure IHRP access */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_28, + GCI_CC28_IHRP_SEL_MASK, + 0u << GCI_CC28_IHRP_SEL_SHIFT); + } + + if (getvar(NULL, rstr_abuck_volt) != NULL) { + abuck_volt = getintvar(NULL, rstr_abuck_volt); + } else { + abuck_volt = ABUCK_VOLT_SW_DEFAULT_4387; + } + + if (CHIPID(sih->chip) == BCM4397_CHIP_GRPID) { + /* For Phy Reg Access configure IHRP access */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_28, + GCI_CC28_IHRP_SEL_MASK, + 0u << GCI_CC28_IHRP_SEL_SHIFT); + } + + /* For 4397, PMU has only 11 Regulator Registers */ + if (sih->chip != BCM4397_CHIP_ID) { + si_pmu_vreg_control(sih, PMU_VREG_13, + PMU_VREG13_ASR_OVADJ_PWM_MASK, + abuck_volt << PMU_VREG13_ASR_OVADJ_PWM_SHIFT); + } + if (BCM_PWR_OPT_ENAB()) { + if (getvar(NULL, rstr_cbuck_volt) != NULL) { + cbuck_volt = getintvar(NULL, rstr_cbuck_volt); + } else { + cbuck_volt = CBUCK_VOLT_SW_DEFAULT_4387; + } + + si_pmu_vreg_control(sih, PMU_VREG_0, + VREG0_4378_CSR_VOLT_ADJ_PWM_MASK, + cbuck_volt << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT); + } + + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FAST_TRANS_ENAB, PCTL_EXT_FAST_TRANS_ENAB); + + if (si_hib_ext_wakeup_isenab(sih)) { + /* pull up common BP */ + int rsrc_num = RES4387_CORE_RDY_CB; + uint32 deps = PMURES_BIT(rsrc_num) | + si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsrc_num), TRUE); + W_REG(osh, &pmu->extwakereqmask[0], deps); + } + + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + PMU_CC17_SCAN_DIG_SR_CLK_MASK, + SCAN_DIG_SR_CLK_40_MHZ << PMU_CC17_SCAN_DIG_SR_CLK_SHIFT); + } + break; + + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + { + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + uint32 lpo = LHL_LPO_AUTO; + uint32 lhl_tmr_sel = 0; + uint32 abuck_volt, cbuck_volt; + uint32 min_mask; + + if (PMU_FLL_PU_ENAB()) { + min_mask = R_REG(osh, &pmu->min_res_mask) | + PMURES_BIT(RES4389_FAST_LPO_AVAIL) | + PMURES_BIT(RES4389_PMU_LP); + W_REG(osh, &pmu->min_res_mask, min_mask); + } + +#ifdef USE_LHL_TIMER + lhl_tmr_sel = PMU_CC13_4387_LHL_TIMER_SELECT; +#endif + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_LHL_TIMER_SELECT, lhl_tmr_sel); + +#if defined(SAVERESTORE) + if (SR_ENAB()) { + si_set_lv_sleep_mode_4389(sih, osh); + } +#endif /* SAVERESTORE */ + /* SET CB2WL Intr PWR Request irrespective of Default value */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU_CC2_CB2WL_INTR_PWRREQ_EN, + PMU_CC2_CB2WL_INTR_PWRREQ_EN); + + if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) { + lpo = LHL_EXT_LPO_ENAB; + } + + if (!ISSIM_ENAB(sih)) { + si_lhl_set_lpoclk(sih, osh, lpo); + } + + if (getintvar(NULL, rstr_btldo3p3pu)) { + si_pmu_regcontrol(sih, 4, + PMU_28NM_VREG4_WL_LDO_CNTL_EN, + PMU_28NM_VREG4_WL_LDO_CNTL_EN); + si_pmu_regcontrol(sih, 6, + PMU_4387_VREG6_BTLDO3P3_PU, + PMU_4387_VREG6_BTLDO3P3_PU); + } + + if (LHL_IS_PSMODE_1(sih)) { + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_07, + ((1 << GCI_CC7_AAON_BYPASS_PWRSW_SEL) | + (1 << GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON)), + 0); + } + + si_lhl_setup(sih, osh); + + /* Setting MemLPLDO voltage */ + if (getvar(NULL, rstr_memlpldo_volt) != NULL) { + int memlpldo_volt = getintvar(NULL, rstr_memlpldo_volt); + + if (memlpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 && + memlpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_MEMLPLDO_ADJ_MASK, + memlpldo_volt << VREG5_4387_MEMLPLDO_ADJ_SHIFT); + } else { + PMU_MSG(("Invalid memlpldo value: %d\n", memlpldo_volt)); + } + } + + /* Setting LPLDO voltage */ + if (getvar(NULL, rstr_lpldo_volt) != NULL) { + int lpldo_volt = getintvar(NULL, rstr_lpldo_volt); + + if (lpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 && + lpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) { + si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_LPLDO_ADJ_MASK, + lpldo_volt << VREG5_4387_LPLDO_ADJ_SHIFT); + } else { + PMU_MSG(("Invalid lpldo value: %d\n", lpldo_volt)); + } + } + + /* Enable fast LPO */ + si_pmu_fast_lpo_enable(sih, osh); + + if (PMU_FLL_PU_ENAB()) { + /* Wait until fast LPO is stable */ + OSL_DELAY(500u); + si_pmu_fll_preload_enable(sih); + } + + si_pmu_dynamic_clk_switch_enab(sih); + + /* HQ settings */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25, + 0xFFFFFFFF, XTAL_HQ_SETTING_4387); + + /* LQ settings */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_26, + 0xFFFFFFFF, XTAL_LQ_SETTING_4387); + + /* Enable Radiodig Clk Gating */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_ENAB_RADIO_REG_CLK, 0); + + /* Set up HW based switch-off of select BBPLL channels when SCAN-only mode + * + * Assign bbpll_ch_control_grp_pd_trigger_mask = {gci_chip_cntrl[559:554], + * gci_chip_cntrl[543:522], 1'b0, gci_chip_cntrl[521], 1'b0}; + */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_16, + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_MASK | + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_MASK, + (((GRP_PD_TRIGGER_MASK_4387 >> 1) & 0x1) << /* bit 1 */ + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_SHIFT) | + (((GRP_PD_TRIGGER_MASK_4387 >> 3) & 0x3FFFFF) << /* bit 24:3 */ + CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_SHIFT)); + + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_17, + CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_MASK | + CC_GCI_17_BBPLL_CH_CTRL_EN_MASK, + (((GRP_PD_TRIGGER_MASK_4387 >> 25) & 0x3F) << /* bits 30:25 */ + CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_SHIFT) | + CC_GCI_17_BBPLL_CH_CTRL_EN_MASK); + + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_20, + CC_GCI_20_BBPLL_CH_CTRL_GRP_MASK, + (GRP_PD_MASK_4387 << CC_GCI_20_BBPLL_CH_CTRL_GRP_SHIFT)); + + if (getvar(NULL, rstr_abuck_volt) != NULL) { + abuck_volt = getintvar(NULL, rstr_abuck_volt); + } else { + abuck_volt = ABUCK_VOLT_SW_DEFAULT_4387; + } + + if (CHIPID(sih->chip) == BCM4397_CHIP_GRPID) { + /* For Phy Reg Access configure IHRP access */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_28, + GCI_CC28_IHRP_SEL_MASK, + 0u << GCI_CC28_IHRP_SEL_SHIFT); + } + + /* For 4397, PMU has only 11 Regulator Registers */ + if (sih->chip != BCM4397_CHIP_ID) { + si_pmu_vreg_control(sih, PMU_VREG_13, + PMU_VREG13_ASR_OVADJ_PWM_MASK, + abuck_volt << PMU_VREG13_ASR_OVADJ_PWM_SHIFT); + } + + if (BCM_PWR_OPT_ENAB()) { + if (getvar(NULL, rstr_cbuck_volt) != NULL) { + cbuck_volt = getintvar(NULL, rstr_cbuck_volt); + } else { + cbuck_volt = CBUCK_VOLT_SW_DEFAULT_4387; + } + + si_pmu_vreg_control(sih, PMU_VREG_0, + VREG0_4378_CSR_VOLT_ADJ_PWM_MASK, + cbuck_volt << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT); + } + + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FAST_TRANS_ENAB, PCTL_EXT_FAST_TRANS_ENAB); + + if (PMUREV(sih->pmurev) == 39) { + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_04, + CC_GCI_04_4387C0_XTAL_PM_CLK, + CC_GCI_04_4387C0_XTAL_PM_CLK); + } + } + break; + + default: + break; + } +} /* si_pmu_chip_init */ + +/** Reference: http://confluence.broadcom.com/display/WLAN/Open+loop+Calibration+Sequence */ +int +si_pmu_openloop_cal(si_t *sih, uint16 currtemp) +{ + int err = BCME_OK; + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + err = si_pmu_openloop_cal_43012(sih, currtemp); + break; + + default: + PMU_MSG(("si_pmu_openloop_cal: chip not supported!\n")); + break; + } + return err; +} + +static int +si_pmu_openloop_cal_43012(si_t *sih, uint16 currtemp) +{ + int32 a1 = -27, a2 = -15, b1 = 18704, b2 = 7531, a3, y1, y2, b3, y3; + int32 xtal, array_size = 0, dco_code = 0, origidx = 0, pll_reg = 0, err; + bcm_int_bitmask_t intr_val; + pmuregs_t *pmu = NULL; + const pllctrl_data_t *pllctrlreg_update; + const uint32 *pllctrlreg_val; + osl_t *osh = si_osh(sih); + uint32 final_dco_code = si_get_openloop_dco_code(sih); + + xtal = si_xtalfreq(sih); + err = BCME_OK; + + origidx = si_coreidx(sih); + pmu = si_setcore(sih, PMU_CORE_ID, 0); + if (!pmu) { + PMU_MSG(("si_pmu_openloop_cal_43012: NULL pmu pointer \n")); + err = BCME_ERROR; + goto done; + } + + if (final_dco_code == 0) { + currtemp = (currtemp == 0)?-1: currtemp; + + SPINWAIT(((si_corereg(sih, SI_CC_IDX, + OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY); + ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0) + & CCS_HTAVAIL)); + + /* Stop using PLL clks, by programming the disable_ht_avail */ + /* and disable_lq_avail in the pmu chip control bit */ + /* Turn Off PLL */ + si_pmu_pll_off_43012(sih, osh, pmu); + + /* Program PLL for 320MHz VCO */ + pllctrlreg_update = pmu1_xtaltab0_43012; + array_size = ARRAYSIZE(pmu1_xtaltab0_43012); + pllctrlreg_val = pmu1_pllctrl_tab_43012_1600mhz; + si_pmu_pllctrlreg_update(sih, osh, pmu, xtal, 100, + pllctrlreg_update, array_size, pllctrlreg_val); + + /* Update PLL control register */ + /* Set the Update bit (bit 10) in PMU for PLL registers */ + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + /* Turn PLL ON but ensure that force_bbpll_dreset */ + /* bit is set , so that PLL 320Mhz clocks cannot be consumed */ + si_pmu_pll_on_43012(sih, osh, pmu, 1); + + /* Settings to get dco_code on PLL test outputs and then read */ + /* from gci chip status */ + pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0); + pll_reg = (pll_reg & (~0x3C000)) | (0x4<<14); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, ~0, pll_reg); + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + pll_reg = pll_reg | (1<<17); + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, ~0, pll_reg); + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + /* Get the DCO code from GCI CHIP STATUS Register 7 , bits 27 downto 16 */ + dco_code = (si_gci_chipstatus(sih, GCI_CHIPSTATUS_07)); + dco_code = ((dco_code & 0x0FFF0000) >> 16); + dco_code = (dco_code >> 4); + + /* The DCO code obtained above and the temperature */ + /* sensed at this time will give us the DCO code */ + /* that needs to be programmed to ensure VCO does not crosses 160 MHz at 125C */ + y1 = ((a1 * currtemp) + b1); + y2 = ((a2 * currtemp) + b2); + dco_code = (dco_code * 100); + b3 = b1 + (((b2-b1)/(y2 - y1)) * (dco_code - y1)); + if (b3 > dco_code) { + a3 = (b3 - dco_code) / currtemp; + y3 = b3 - (a3 * 125); + } + else { + a3 = (dco_code - b3) / currtemp; + y3 = b3 + (a3 * 125); + } + y3 = (y3/100); + PMU_MSG(("DCO_CODE = %d\n", y3)); + + /* Turning ON PLL at 160.1 MHz for Normal Operation */ + si_pmu_pll_off_43012(sih, osh, pmu); + pllctrlreg_update = pmu1_xtaltab0_43012; + array_size = ARRAYSIZE(pmu1_xtaltab0_43012); + pllctrlreg_val = pmu1_pllctrl_tab_43012_1600mhz; + si_pmu_pllctrlreg_update(sih, osh, pmu, xtal, 0, + pllctrlreg_update, array_size, pllctrlreg_val); + + /* Update PLL control register */ + /* Set the Update bit (bit 10) in PMU for PLL registers */ + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + + si_pmu_pll_on_43012(sih, osh, pmu, 0); + y3 = (y3 << 4); + final_dco_code = y3; + PMU_MSG(("openloop_dco_code = %x\n", final_dco_code)); + } + + pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0); + y3 = (pll_reg >> 16) & 0xFFF; + + if (final_dco_code != (uint32)y3) { + + /* Program the DCO code to bits 27 */ + /* downto 16 of the PLL control 3 register */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, + 0x0FFF0000, (final_dco_code << 16)); + + /* Enable Extra post divison for Open Loop */ + /* by writing 1 to bit 14 of above register */ + si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0x00004000, (1<<14)); + + /* Update PLL control register */ + /* Set the Update bit (bit 10) in PMU for PLL registers */ + OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD); + /* After cal openloop VCO Max=320MHz, Min=240Mhz (with extra margin */ + /* 230-220MHz). Update SAVE_RESTORE up/down times accordingly */ + W_REG(osh, &pmu->res_table_sel, RES43012_SR_SAVE_RESTORE); + W_REG(osh, &pmu->res_updn_timer, 0x01800180); + } + + si_restore_core(sih, origidx, &intr_val); + si_set_openloop_dco_code(sih, final_dco_code); +done: + return err; +} + +void +si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh) +{ +#if !defined(BCMDONGLEHOST) + chipcregs_t *cc; + uint origidx; + uint32 xtalfreq; + + /* PMU specific initializations */ + if (!PMUCTL_ENAB(sih)) + return; + /* Remember original core before switch to chipc */ + origidx = si_coreidx(sih); + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT(cc != NULL); + if (cc == NULL) + return; + + xtalfreq = getintvar(NULL, rstr_xtalfreq); + /* + * workaround for chips that don't support external LPO, thus ALP clock + * can not be measured accurately: + */ + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + xtalfreq = XTAL_FREQ_54MHZ; + break; + default: + break; + } + /* If xtalfreq var not available, try to measure it */ + if (xtalfreq == 0) + xtalfreq = si_pmu_measure_alpclk(sih, osh); + si_pmu_enb_slow_clk(sih, osh, xtalfreq); + /* Return to original core */ + si_setcoreidx(sih, origidx); +#endif /* !BCMDONGLEHOST */ +} + +/** initialize PMU registers in case default values proved to be suboptimal */ +void +BCMATTACHFN(si_pmu_swreg_init)(si_t *sih, osl_t *osh) +{ + ASSERT(sih->cccaps & CC_CAP_PMU); + + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + /* adjust PA Vref to 2.80V */ + si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_PAREF, 0x0c); + break; + case BCM4378_CHIP_GRPID: + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_04, GPIO_DRIVE_4378_MASK, + GPIO_DRIVE_4378_VAL); + /* fall through */ + case BCM4376_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: +#ifdef BCM_AVS + if (BCM_AVS_ENAB()) { + si_pmu_set_avs(sih); + } +#endif + break; + default: + break; + } + si_pmu_otp_vreg_control(sih, osh); +} /* si_pmu_swreg_init */ + +/** Wait for a particular clock level to be on the backplane */ +uint32 +si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, uint32 clk, uint32 delay_val) +{ + pmuregs_t *pmu; + uint origidx; + uint32 val; + + ASSERT(sih->cccaps & CC_CAP_PMU); + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if (delay_val) + SPINWAIT(((R_REG(osh, &pmu->pmustatus) & clk) != clk), delay_val); + val = R_REG(osh, &pmu->pmustatus) & clk; + + /* Return to original core */ + si_setcoreidx(sih, origidx); + return (val); +} + +#define EXT_ILP_HZ 32768 + +/** + * Measures the ALP clock frequency in KHz. Returns 0 if not possible. + * Possible only if PMU rev >= 10 and there is an external LPO 32768Hz crystal. + */ +uint32 +BCMATTACHFN(si_pmu_measure_alpclk)(si_t *sih, osl_t *osh) +{ + uint32 alp_khz; + uint32 pmustat_lpo = 0; + pmuregs_t *pmu; + uint origidx; + + if (PMUREV(sih->pmurev) < 10) + return 0; + + ASSERT(sih->cccaps & CC_CAP_PMU); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) || + (CHIPID(sih->chip) == BCM43013_CHIP_ID) || + (CHIPID(sih->chip) == BCM43014_CHIP_ID) || + (PMUREV(sih->pmurev) >= 22)) + pmustat_lpo = !(R_REG(osh, &pmu->pmucontrol) & PCTL_LPO_SEL); + else + pmustat_lpo = R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL; + + if (pmustat_lpo) { + uint32 ilp_ctr, alp_hz; + + /* Enable the reg to measure the freq, in case disabled before */ + W_REG(osh, &pmu->pmu_xtalfreq, 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT); + + /* Delay for well over 4 ILP clocks */ + OSL_DELAY(1000); + + /* Read the latched number of ALP ticks per 4 ILP ticks */ + ilp_ctr = R_REG(osh, &pmu->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK; + + /* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT bit to save power */ + W_REG(osh, &pmu->pmu_xtalfreq, 0); + + /* Calculate ALP frequency */ + alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4; + + /* Round to nearest 100KHz, and at the same time convert to KHz */ + alp_khz = (alp_hz + 50000) / 100000 * 100; + } else + alp_khz = 0; + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return alp_khz; +} /* si_pmu_measure_alpclk */ + +/** Update min/max resources after SR-ASM download to d11 txfifo */ +void +si_pmu_res_minmax_update(si_t *sih, osl_t *osh) +{ + uint32 min_mask = 0, max_mask = 0; + pmuregs_t *pmu; + uint origidx; + bcm_int_bitmask_t intr_val; + + /* Block ints and save current core */ + si_introff(sih, &intr_val); + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + CASE_BCM43602_CHIP: + max_mask = 0; /* Only care about min_mask for now */ + break; + case BCM43012_CHIP_ID: + case BCM43013_CHIP_ID: + case BCM43014_CHIP_ID: + min_mask = RES43012_PMU_SLEEP; + break; + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + si_pmu_res_masks(sih, &min_mask, &max_mask); + max_mask = 0; /* Don't need to update max */ + break; + default: + break; + } + if (min_mask) { + /* Add min mask dependencies */ + min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, FALSE); + W_REG(osh, &pmu->min_res_mask, min_mask); + } + if (max_mask) { + max_mask |= si_pmu_res_deps(sih, osh, pmu, max_mask, FALSE); + W_REG(osh, &pmu->max_res_mask, max_mask); + } + + si_pmu_wait_for_steady_state(sih, osh, pmu); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + si_intrrestore(sih, &intr_val); +} /* si_pmu_res_minmax_update */ + +#ifdef DONGLEBUILD + +#define PMUCAP_DUMP_TAG_SIZE_BYTES 4 + +/* Move the below definitions to .ro_ontrap section so they + * won't be reused when reusing rodata section after trap. + */ +static const uint32 BCMPOST_TRAP_RODATA(chipc_regs_to_dump)[] = { + OFFSETOF(chipcregs_t, clk_ctl_st), + OFFSETOF(chipcregs_t, powerctl) +}; + +static const uint BCMPOST_TRAP_RODATA(pmuregsdump)[] = { + OFFSETOF(pmuregs_t, pmucontrol), + OFFSETOF(pmuregs_t, pmucapabilities), + OFFSETOF(pmuregs_t, pmustatus), + OFFSETOF(pmuregs_t, res_state), + OFFSETOF(pmuregs_t, res_pending), + OFFSETOF(pmuregs_t, pmutimer), + OFFSETOF(pmuregs_t, min_res_mask), + OFFSETOF(pmuregs_t, max_res_mask), + OFFSETOF(pmuregs_t, clkstretch), + OFFSETOF(pmuregs_t, res_req_timer), + OFFSETOF(pmuregs_t, res_req_mask), + OFFSETOF(pmuregs_t, mac_res_req_timer), + OFFSETOF(pmuregs_t, mac_res_req_mask), + OFFSETOF(pmuregs_t, pmuintmask0), + OFFSETOF(pmuregs_t, pmuintstatus), + OFFSETOF(pmuregs_t, pmuintctrl0), + OFFSETOF(pmuregs_t, extwakeupstatus), + OFFSETOF(pmuregs_t, extwakemask0) +}; + +static const uint BCMPOST_TRAP_RODATA(pmuregsdump_mac_res1)[] = { + OFFSETOF(pmuregs_t, mac_res_req_timer1), + OFFSETOF(pmuregs_t, mac_res_req_mask1) +}; + +static const uint BCMPOST_TRAP_RODATA(pmuregsdump_mac_res2)[] = { + OFFSETOF(pmuregs_t, mac_res_req_timer2), + OFFSETOF(pmuregs_t, mac_res_req_mask2) +}; + +static const uint BCMPOST_TRAP_RODATA(pmuregsdump_pmu_int1)[] = { + OFFSETOF(pmuregs_t, pmuintmask1), + OFFSETOF(pmuregs_t, pmuintctrl1) +}; + +/* Pointer to location in ROdata where PMU registers are stored. + * It is good to avoid re-reading PMU registers as: 1. reading regs is slow + * 2. As part of trap, these registers are dumped to RO data section anyway. + * so why not read directly from ROdata section and send to host? + * these registers will be dumped n RODATA first and then hnd_minidump_pmuegs_dump() + * will pick these up. For it to pick these up, it + * needs to know where they are stored. + */ +/* Length of the reg dump containing address, value pair */ +#define SI_PMU_REG_DUMP_BASE_SIZE (ARRAYSIZE(pmuregsdump) * 2u * sizeof(uint32)) +#define SI_PMU_REG_DUMP_MACRSRC1_SIZE (ARRAYSIZE(pmuregsdump_mac_res1) * 2u * sizeof(uint32)) +#define SI_PMU_REG_DUMP_MACRSRC2_SIZE (ARRAYSIZE(pmuregsdump_mac_res2) * 2u * sizeof(uint32)) +#define SI_PMU_REG_DUMP_INTRCV1_SIZE (ARRAYSIZE(pmuregsdump_pmu_int1) * 2u * sizeof(uint32)) + +static uint32 *rodata_pmuregdump_ptr = NULL; + +/** size of the buffer needed to store the PMU register dump specifically PMU indirect registers */ +uint32 +BCMATTACHFN(si_pmu_dump_buf_size_pmucap)(si_t *sih) +{ + uint32 buf_size = 0; + uint32 pmu_size = 0; + uint32 cnt; + + if (PMUREV(sih->pmurev) < 5) + return 0; + + /* pmu resources resource mask and resource updown */ + cnt = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; + if (cnt) { + buf_size += (cnt * 2 * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES; + } + /* pll controls */ + cnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT; + if (cnt) { + buf_size += (cnt * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES; + } + + /* voltage controls */ + cnt = (sih->pmucaps & PCAP5_VC_MASK) >> PCAP5_VC_SHIFT; + if (cnt) { + buf_size += (cnt * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES; + } + + /* chip controls */ + cnt = (sih->pmucaps & PCAP5_CC_MASK) >> PCAP5_CC_SHIFT; + if (cnt) { + buf_size += (cnt * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES; + } + + /* include chip common regsiters from the list */ + /* cnt indicates how many registers, tag_id 0 will say these are address/value */ + if (ARRAYSIZE(chipc_regs_to_dump)) { + buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES; + /* address/value pairs */ + buf_size += sizeof(chipc_regs_to_dump); + buf_size += sizeof(chipc_regs_to_dump); + } + + /* include PMU regsiters from the list 'pmuregsdumpXX' */ + if ((PMUREV(sih->pmurev) > 27) && ARRAYSIZE(pmuregsdump) != 0) { + uint8 rsrc_cnt = si_pmu_get_mac_rsrc_req_tmr_cnt(sih); + buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES; + pmu_size += sizeof(pmuregsdump); + if (ARRAYSIZE(pmuregsdump_mac_res1) != 0 && rsrc_cnt > 1) { + buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES; + pmu_size += sizeof(pmuregsdump_mac_res1); + } + if (ARRAYSIZE(pmuregsdump_mac_res2) != 0 && rsrc_cnt > 2) { + buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES; + pmu_size += sizeof(pmuregsdump_mac_res2); + } + if (ARRAYSIZE(pmuregsdump_pmu_int1) != 0 && + si_pmu_get_pmu_interrupt_rcv_cnt(sih) > 1) { + buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES; + pmu_size += sizeof(pmuregsdump_pmu_int1); + } + /* address/value pairs */ + buf_size += (pmu_size << 1); + } + + return buf_size; +} + +/** + * routine to dump the registers into the user specified buffer + * needed to store the PMU register dump specifically PMU indirect registers + * format is sets of count, base regiser, register values +*/ +uint32 +BCMPOSTTRAPFN(si_pmu_dump_pmucap_binary)(si_t *sih, uchar *p) +{ + uint32 cnt, i; + osl_t *osh; + pmuregs_t *pmu; + uint origidx; + uint mac_res_cnt; + uint pmu_int_rcv_cnt; + uint32 pmu_totalsize = 0; + + uint32 *p32 = (uint32 *)p; + + if (PMUREV(sih->pmurev) < 5) + return 0; + + origidx = si_coreidx(sih); + + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } + else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + osh = si_osh(sih); + + cnt = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; + if (cnt) { + *p32++ = (cnt << 16 | RSRCTABLEADDR); + for (i = 0; i < cnt; i++) { + W_REG(osh, &pmu->res_table_sel, i); + *p32++ = R_REG(osh, &pmu->res_dep_mask); + *p32++ = R_REG(osh, &pmu->res_updn_timer); + } + } + + cnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT; + if (cnt) { + *p32++ = (cnt << 16 | PMU_PLL_CONTROL_ADDR); + for (i = 0; i < cnt; i++) { + *p32++ = si_pmu_pllcontrol(sih, i, 0, 0); + } + } + + cnt = (sih->pmucaps & PCAP5_VC_MASK) >> PCAP5_VC_SHIFT; + if (cnt) { + *p32++ = (cnt << 16 | PMU_REG_CONTROL_ADDR); + for (i = 0; i < cnt; i++) { + *p32++ = si_pmu_vreg_control(sih, i, 0, 0); + } + } + cnt = (sih->pmucaps & PCAP5_CC_MASK) >> PCAP5_CC_SHIFT; + if (cnt) { + *p32++ = (cnt << 16 | CC_CHIPCTL_ADDR); + for (i = 0; i < cnt; i++) { + *p32++ = si_pmu_chipcontrol(sih, i, 0, 0); + } + } + if (ARRAYSIZE(chipc_regs_to_dump)) { + uint32 *addr; + *p32++ = (ARRAYSIZE(chipc_regs_to_dump) << 16 | 0); + for (i = 0; i < ARRAYSIZE(chipc_regs_to_dump); i++) { + addr = (uint32 *)(SI_ENUM_BASE(sih) + chipc_regs_to_dump[i]); + *p32++ = (uint32)addr; + *p32++ = R_REG(osh, addr); + } + } + + if ((PMUREV(sih->pmurev) > 27)) { + volatile uint32 *addr; + *p32++ = (ARRAYSIZE(pmuregsdump) << 16 | 1); + for (i = 0; i < ARRAYSIZE(pmuregsdump); i++) { + addr = (volatile uint32*)((volatile char*)pmu + pmuregsdump[i]); + *p32++ = (uint32)addr; + *p32++ = R_REG(osh, addr); + } + pmu_totalsize += (ARRAYSIZE(pmuregsdump)); + mac_res_cnt = si_pmu_get_mac_rsrc_req_tmr_cnt(sih); + if (mac_res_cnt > 1) { + *p32++ = (ARRAYSIZE(pmuregsdump_mac_res1) << 16 | 1); + for (i = 0; i < ARRAYSIZE(pmuregsdump_mac_res1); i++) { + addr = (volatile uint32*)((volatile char*)pmu + + pmuregsdump_mac_res1[i]); + *p32++ = (uint32)addr; + *p32++ = R_REG(osh, addr); + } + pmu_totalsize += (ARRAYSIZE(pmuregsdump_mac_res1)); + } + if (mac_res_cnt > 2) { + *p32++ = (ARRAYSIZE(pmuregsdump_mac_res2) << 16 | 1); + for (i = 0; i < ARRAYSIZE(pmuregsdump_mac_res2); i++) { + addr = (volatile uint32*)((volatile char*)pmu + + pmuregsdump_mac_res2[i]); + *p32++ = (uint32)addr; + *p32++ = R_REG(osh, addr); + } + pmu_totalsize += (ARRAYSIZE(pmuregsdump_mac_res2)); + } + pmu_int_rcv_cnt = si_pmu_get_pmu_interrupt_rcv_cnt(sih); + if (pmu_int_rcv_cnt > 1) { + *p32++ = (ARRAYSIZE(pmuregsdump_pmu_int1) << 16 | 1); + for (i = 0; i < ARRAYSIZE(pmuregsdump_pmu_int1); i++) { + addr = (volatile uint32*)((volatile char*)pmu + + pmuregsdump_pmu_int1[i]); + *p32++ = (uint32)addr; + *p32++ = R_REG(osh, addr); + } + pmu_totalsize += (ARRAYSIZE(pmuregsdump_pmu_int1)); + } + /* Mark the location where these registers are dumped to avoid a re-read in + * trap context. + */ + rodata_pmuregdump_ptr = (p32 - (2 * pmu_totalsize)); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); + return 1; +} /* si_pmu_dump_pmucap_binary */ + +#endif /* DONGLEBUILD */ +/** + * Function to enable the min_mask with specified resources along with its dependencies. + * Also it can be used for bringing back to the default value of the device. + */ +int +si_pmu_min_res_set(si_t *sih, osl_t *osh, uint min_mask, bool set) +{ + uint32 min_res, max_res; + uint origidx; + bcm_int_bitmask_t intr_val; + pmuregs_t *pmu; + + /* Block ints and save current core */ + si_introff(sih, &intr_val); + + /* Remember original core before switch to chipc */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + si_pmu_res_masks(sih, &min_res, &max_res); + min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, TRUE); + + /* + * If set is enabled, the resources specified in the min_mask is brought up. If not set, + * go to the default min_resource of the device. + */ + if (set) { + OR_REG(osh, &pmu->min_res_mask, min_mask); + } else { + min_mask &= ~min_res; + AND_REG(osh, &pmu->min_res_mask, ~min_mask); + } + + si_pmu_wait_for_steady_state(sih, osh, pmu); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + si_intrrestore(sih, &intr_val); + + return min_mask; +} + +void +si_pmu_bt_ldo_pu(si_t *sih, bool up) +{ + si_pmu_regcontrol(sih, PMU_VREG_6, PMU_28NM_VREG6_BTLDO3P3_PU, + (up == TRUE) ? PMU_28NM_VREG6_BTLDO3P3_PU : 0x00); +} + +#ifdef BCM_LDO3P3_SOFTSTART +int si_pmu_ldo3p3_soft_start_wl_get(si_t *sih, osl_t *osh, int *res) +{ + uint32 bt_or_wl = 0u; + return si_pmu_ldo3p3_soft_start_get(sih, osh, bt_or_wl, res); +} + +int si_pmu_ldo3p3_soft_start_bt_get(si_t *sih, osl_t *osh, int *res) +{ + uint32 bt_or_wl = 1u; + return si_pmu_ldo3p3_soft_start_get(sih, osh, bt_or_wl, res); +} + +static int +si_pmu_soft_start_params(si_t *sih, uint32 bt_or_wl, uint *en_reg, uint32 *en_shift, + uint32 *en_mask, uint32 *en_val, uint *val_reg, uint32 *val_shift, uint32 *val_mask) +{ + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + *en_reg = SOFT_START_EN_REG_4369; + *en_shift = SOFT_START_EN_SHIFT_4369(bt_or_wl); + *en_mask = SOFT_START_EN_MASK_4369; + *en_val = SOFT_START_EN_VALUE_4369; + *val_reg = SLEW_RATE_VALUE_REG_4369; + *val_shift = SLEW_RATE_SHIFT_4369(bt_or_wl); + *val_mask = SLEW_RATE_MASK_4369; + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + *en_reg = SOFT_START_EN_REG_4378; + *en_shift = SOFT_START_EN_SHIFT_4378(bt_or_wl); + *en_mask = SOFT_START_EN_MASK_4378; + *en_val = SOFT_START_EN_VALUE_4378; + *val_reg = SLEW_RATE_VALUE_REG_4378; + *val_shift = SLEW_RATE_SHIFT_4378(bt_or_wl); + *val_mask = SLEW_RATE_MASK_4378; + if (BCM4378_CHIP(sih->chip) && PMUREV(sih->pmurev) == 37) { + *en_val = SOFT_START_EN_VALUE_4378_REV37; + } + break; + case BCM4387_CHIP_GRPID: + if (bt_or_wl == 0) { + return BCME_UNSUPPORTED; + } + *en_reg = SOFT_START_EN_REG_4387; + *en_shift = SOFT_START_EN_SHIFT_4387(bt_or_wl); + *en_mask = SOFT_START_EN_MASK_4387; + *en_val = SOFT_START_EN_VALUE_4387; + *val_reg = SLEW_RATE_VALUE_REG_4387; + *val_shift = SLEW_RATE_SHIFT_4387(bt_or_wl); + *val_mask = SLEW_RATE_MASK_4387; + break; + default: + /* Add support */ + ASSERT(0); + break; + } + return BCME_OK; +} + +static int si_pmu_ldo3p3_soft_start_get(si_t *sih, osl_t *osh, uint32 bt_or_wl, int *res) +{ + uint en_reg = 0, val_reg = 0; + uint32 en_shift = 0, en_mask = 0, en_val = 0, val_shift = 0, val_mask = 0; + uint32 soft_start_en, slew_rate; + int ret = si_pmu_soft_start_params(sih, bt_or_wl, &en_reg, &en_shift, &en_mask, &en_val, + &val_reg, &val_shift, &val_mask); + + if (BCME_OK != ret) { + return ret; + } + soft_start_en = (si_pmu_vreg_control(sih, en_reg, 0, 0) >> en_shift); + soft_start_en &= en_mask; + if (en_val == 0u) { + soft_start_en = !soft_start_en; + } + if (soft_start_en) { + slew_rate = (si_pmu_vreg_control(sih, val_reg, 0, 0) >> val_shift); + slew_rate &= val_mask; + *res = slew_rate; + } else { + *res = -1; + } + return BCME_OK; +} + +int si_pmu_ldo3p3_soft_start_wl_set(si_t *sih, osl_t *osh, uint32 slew_rate) +{ + uint32 bt_or_wl = 0u; + return si_pmu_ldo3p3_soft_start_set(sih, osh, bt_or_wl, slew_rate); +} + +int si_pmu_ldo3p3_soft_start_bt_set(si_t *sih, osl_t *osh, uint32 slew_rate) +{ + uint32 bt_or_wl = 1u; + return si_pmu_ldo3p3_soft_start_set(sih, osh, bt_or_wl, slew_rate); +} + +static int si_pmu_ldo3p3_soft_start_set(si_t *sih, osl_t *osh, uint32 bt_or_wl, uint32 slew_rate) +{ + uint en_reg = 0, val_reg = 0; + uint32 en_shift = 0, en_mask = 0, en_val = 0, val_shift = 0, val_mask = 0; + int ret = si_pmu_soft_start_params(sih, bt_or_wl, &en_reg, &en_shift, &en_mask, &en_val, + &val_reg, &val_shift, &val_mask); + uint32 dis_val = en_val ? 0u : 1u; + + if (BCME_OK != ret) { + return ret; + } + + if (slew_rate != (uint32)(~0u)) { + + /* Without disabling soft start bit + * programming a new slew rate value + * doesn't take effect + */ + + /* Disable soft start */ + si_pmu_vreg_control(sih, en_reg, (en_mask << en_shift), (dis_val << en_shift)); + + /* Program Slew rate */ + si_pmu_vreg_control(sih, val_reg, (val_mask << val_shift), + ((slew_rate & val_mask) << val_shift)); + + /* Enable Soft start */ + si_pmu_vreg_control(sih, en_reg, (en_mask << en_shift), (en_val << en_shift)); + } else { + /* Slew rate value of 0xFFFF is used as a special value + * to disable/reset soft start feature + */ + + /* Disable soft start */ + si_pmu_vreg_control(sih, en_reg, (en_mask << en_shift), (dis_val << en_shift)); + + /* Set slew rate value to zero */ + si_pmu_vreg_control(sih, val_reg, (val_mask << val_shift), 0u); + } + return BCME_OK; +} +#endif /* BCM_LDO3P3_SOFTSTART */ + +#ifdef LDO3P3_MIN_RES_MASK +static bool ldo3p3_min_res_enabled = FALSE; +/** Set ldo 3.3V mask in the min resources mask register */ +int +si_pmu_min_res_ldo3p3_set(si_t *sih, osl_t *osh, bool on) +{ + uint32 min_mask = 0; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + case BCM4362_CHIP_GRPID: + min_mask = PMURES_BIT(RES4369_LDO3P3_PU); + if (on) { + si_corereg(sih, coreidx, LHL_REG_OFF(lhl_lp_main_ctl1_adr), + BCM_MASK32(23, 0), 0x9E9F9F); + } else { + si_corereg(sih, coreidx, LHL_REG_OFF(lhl_lp_main_ctl1_adr), + BCM_MASK32(23, 0), 0x9E9F97); + } + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + min_mask = PMURES_BIT(RES4378_LDO3P3_PU); + break; + default: + return BCME_UNSUPPORTED; + } + + si_pmu_min_res_set(sih, osh, min_mask, on); + ldo3p3_min_res_enabled = on; + + return BCME_OK; +} + +int +si_pmu_min_res_ldo3p3_get(si_t *sih, osl_t *osh, int *res) +{ + *res = (int)ldo3p3_min_res_enabled; + return BCME_OK; +} +#endif /* LDO3P3_MIN_RES_MASK */ +int +si_pmu_min_res_otp_pu_set(si_t *sih, osl_t *osh, bool on) +{ + uint32 min_mask = 0; + rsc_per_chip_t *rsc; + + rsc = si_pmu_get_rsc_positions(sih); + if (rsc) { + min_mask = PMURES_BIT(rsc->otp_pu); + } else { + return BCME_UNSUPPORTED; + } + si_pmu_min_res_set(sih, osh, min_mask, on); + return BCME_OK; +} +#endif /* !defined(BCMDONGLEHOST) */ + +uint32 +BCMPOSTTRAPFN(si_pmu_wake_bit_offset)(si_t *sih) +{ + uint32 wakebit; + + switch (CHIPID(sih->chip)) { + case BCM4369_CHIP_GRPID: + wakebit = PMU_CC2_GCI2_WAKE; + break; + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + wakebit = CC2_4378_GCI2WAKE_MASK; + break; + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + wakebit = CC2_4387_GCI2WAKE_MASK; + break; + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + wakebit = CC2_4389_GCI2WAKE_MASK; + break; + default: + wakebit = 0; + ASSERT(0); + break; + } + + return wakebit; +} + +#ifdef ATE_BUILD +void hnd_pmu_clr_int_sts_req_active(osl_t *hnd_osh, si_t *sih) +{ + uint32 res_req_timer; + pmuregs_t *pmu; + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + W_REG(hnd_osh, &pmu->pmuintstatus, + RSRC_INTR_MASK_TIMER_INT_0); + (void)R_REG(hnd_osh, &pmu->pmuintstatus); + res_req_timer = R_REG(hnd_osh, &pmu->res_req_timer); + W_REG(hnd_osh, &pmu->res_req_timer, + res_req_timer & ~(PRRT_REQ_ACTIVE << flags_shift)); + (void)R_REG(hnd_osh, &pmu->res_req_timer); +} +#endif /* ATE_BUILD */ + +void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask) +{ + pmuregs_t *pmu; + uint origidx; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } + else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->min_res_mask, min_res_mask); + OSL_DELAY(100); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +bool +si_pmu_cap_fast_lpo(si_t *sih) +{ + return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE; +} + +int +si_pmu_fast_lpo_disable(si_t *sih) +{ + if (!si_pmu_cap_fast_lpo(sih)) { + PMU_ERROR(("si_pmu_fast_lpo_disable: No Fast LPO capability\n")); + return BCME_ERROR; + } + + PMU_REG(sih, pmucontrol_ext, + PCTL_EXT_FASTLPO_ENAB | + PCTL_EXT_FASTLPO_SWENAB | + PCTL_EXT_FASTLPO_PCIE_SWENAB, + 0); + OSL_DELAY(1000); + return BCME_OK; +} + +/* +* 4389B0/C0 - WL and BT turn on WAR, +* set below bits in PMU chip control 6 +* - global bit[195] / bit[3] - enable legacy pmu_wakeup to make +* domain 1 (WL) power request +* - global bit[206] / bit[14] - perst_wake_en +*/ +void +si_pmu_dmn1_perst_wakeup(si_t *sih, bool set) +{ + if (PMUREV(sih->pmurev) == 40) { + if (set) { + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + (PMU_CC6_ENABLE_DMN1_WAKEUP | + PMU_CC6_ENABLE_PMU_WAKEUP_PERST), + (PMU_CC6_ENABLE_DMN1_WAKEUP | + PMU_CC6_ENABLE_PMU_WAKEUP_PERST)); + } else { + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, + (PMU_CC6_ENABLE_DMN1_WAKEUP | + PMU_CC6_ENABLE_PMU_WAKEUP_PERST), + 0); + } + } +} + +#if !defined(BCMDONGLEHOST) + +/* write : + * TRUE - Programs the PLLCTRL6 with xtal and returns value written in pllctrl6 register. + * FALSE - returns 0 if xtal programming is same as pllctrl6 register else retruns value of + * pllctrl6 val. This will not program any register. + */ +static uint32 +si_pmu_pll6val_armclk_calc(osl_t *osh, pmuregs_t *pmu, uint32 armclk, uint32 xtal, bool write) +{ + uint32 q, r; + uint32 xtal_scale; + uint32 pll6val; + if (armclk == 0 || xtal == 0) { + PMU_ERROR((" si_pmu_pll6val_armclk_calc: invalid armclk = %d or xtal = %d\n", + armclk, xtal)); + return 0; + } + q = (armclk * 1000 * PMU4369_PLL6VAL_P1DIV) / xtal; + xtal_scale = xtal / 100; + r = ((armclk * 10 * PMU4369_PLL6VAL_P1DIV * PMU4369_PLL6VAL_PRE_SCALE) / xtal_scale) - + (q * PMU4369_PLL6VAL_PRE_SCALE); + r *= PMU4369_PLL6VAL_POST_SCALE; + + pll6val = (r << PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT) | + (q << PMU4369_PLL1_PC6_NDIV_INT_SHIFT) | PMU4369_PLL6VAL_P1DIV_BIT3_2; + + PMU_MSG(("si_pmu_pll6val_armclk_calc, armclk %d, xtal %d, q %d, r 0x%8x, pll6val 0x%8x\n", + armclk, xtal, q, r, pll6val)); + + if (write) { + W_REG(osh, &pmu->pllcontrol_addr, PMU1_PLL0_PLLCTL6); + W_REG(osh, &pmu->pllcontrol_data, pll6val); + } else { + W_REG(osh, &pmu->pllcontrol_addr, PMU1_PLL0_PLLCTL6); + if (pll6val == R_REG(osh, &pmu->pllcontrol_data)) + return 0; + } + + return pll6val; +} + +static void +BCMATTACHFN(si_pmu_chipcontrol_xtal_settings_4369)(si_t *sih) +{ + +/* 4369 XTAL Bias settings */ +/* + Reg name startup Normal + xtal_bias_adj 0xFF 0x1A + xtal_coresize_nmos 0x3f 0x3f + xtal_coresize_pmos 0x3f 0x3f + xtal_sel_bias_res 0x2 0x6 + xt_res_bypass 0x0 0x1 +*/ + uint32 u32Val; + uint32 u32Mask; + u32Val = (PMU_CC0_4369B0_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL | + PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_VAL); + + u32Mask = (PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK | + PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_MASK); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, u32Mask, u32Val); + + u32Val = (PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL); + u32Mask = (PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK); + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, u32Mask, u32Val); + + u32Val = (PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_VAL | + PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_VAL | + PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_VAL); + + u32Mask = (PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_MASK | + PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_MASK | + PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_MASK); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL3, u32Mask, u32Val); + +} + +static void +BCMATTACHFN(si_pmu_chipcontrol_xtal_settings_4362)(si_t *sih) +{ + /* 4369 XTAL Bias settings */ + /* + Reg name startup Normal + xtal_bias_adj 0xFF 0x1A + xtal_coresize_nmos 0x3f 0x3f + xtal_coresize_pmos 0x3f 0x3f + xtal_sel_bias_res 0x2 0x6 + xt_res_bypass 0x0 0x1 + */ + uint32 u32Val; + uint32 u32Mask; + u32Val = (PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL | + PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_VAL); + + u32Mask = (PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK | + PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_MASK); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, u32Mask, u32Val); + + u32Val = (PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL); + u32Mask = (PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK); + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, u32Mask, u32Val); + + u32Val = (PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_VAL | + PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_VAL | + PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_VAL); + + u32Mask = (PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_MASK | + PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_MASK | + PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_MASK); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL3, u32Mask, u32Val); + +} + +/* 4378 based on 4369 XTAL Bias settings + * Reg name startup Normal + * xtal_bias_adj 0xFF 0x1A + * xtal_coresize_nmos 0x3f 0x3f + * xtal_coresize_pmos 0x3f 0x3f + * xtal_sel_bias_res 0x2 0x2 + * xt_res_bypass 0x0 0x2 + */ +static void +BCMATTACHFN(si_pmu_chipcontrol_xtal_settings_4378)(si_t *sih) +{ + uint32 u32Val; + uint32 u32Mask; + uint16 xtal_bias_adj; + uint8 xtal_bias_adj_otp = 0, xtal_bias_cal_otp_done = 0; + +#ifdef XTAL_BIAS_FROM_OTP + /* Read xtal bias cal done bit and xtal biase from OTP */ + si_pmu_chipcontrol_xtal_bias_from_otp(sih, &xtal_bias_cal_otp_done, &xtal_bias_adj_otp); +#endif /* XTAL_BIAS_FROM_OTP */ + + /* + * If xtal_bias_cal_done flag is read as non zero, write the xtal biase in PMU control + * register from OTP otherwise write the default value of 0x1a. + */ + xtal_bias_adj = (uint16)xtal_bias_adj_otp; + xtal_bias_adj = xtal_bias_cal_otp_done != 0 ? (xtal_bias_adj << 6) : + PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL; + + u32Val = (xtal_bias_adj | PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_VAL); + + u32Mask = (PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK | + PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_MASK); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL0, u32Mask, u32Val); + + u32Val = (PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL); + u32Mask = (PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK); + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, u32Mask, u32Val); + + u32Val = (PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_VAL | + PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_VAL | + PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_VAL); + + u32Mask = (PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_MASK | + PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_MASK | + PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_MASK); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL3, u32Mask, u32Val); + +} + +#ifdef XTAL_BIAS_FROM_OTP +static void +BCMATTACHFN(si_pmu_chipcontrol_xtal_bias_from_otp)(si_t *sih, uint8* flag, uint8* val) +{ + uint8 xtal_bias_adj = 0, xtal_bias_cal_otp_done = 0; +#ifndef BCM_OTP_API + uint16 datum, offset; + uint8 shift, mask; +#endif /* !BCM_OTP_API */ + + /* Read the XTAL BIAS CAL value from OTP. + * 1) Read the xtal cal done bit and the xtal biase value from OTP. + * 2) OTP memory is zero by default, so the chips which aren't OTP programmed will read a + * '0' for xtal_bias_cal_otp_done. + */ +#ifdef BCM_OTP_API + otp_read_8b_field(sih, BCM_OTP_FLD_XTAL_BIAS_FLAG, &xtal_bias_cal_otp_done); + if (xtal_bias_cal_otp_done) { + otp_read_8b_field(sih, BCM_OTP_FLD_XTAL_BIAS_ADJ, &xtal_bias_adj); + } +#else + si_pmu_chipcontrol_xtal_bias_cal_done_offsets(sih, &offset, &shift, &mask); + if (!otp_read_word(sih, offset, &datum)) { + xtal_bias_cal_otp_done = ((datum >> shift) & mask); + } + + si_pmu_chipcontrol_xtal_bias_val_offsets(sih, &offset, &shift, &mask); + if (xtal_bias_cal_otp_done && (!otp_read_word(sih, offset, &datum))) + { + xtal_bias_adj = ((datum >> shift) & mask); + } +#endif /* BCM_OTP_API */ + *flag = xtal_bias_cal_otp_done; + *val = xtal_bias_adj; +} + +#ifndef BCM_OTP_API +static void +BCMATTACHFN(si_pmu_chipcontrol_xtal_bias_cal_done_offsets)(si_t *sih, uint16* wrd_offset, + uint8* wrd_shift, uint8* wrd_mask) +{ + /* Offset is 16 bit aligned address, shift is the starting bit position of the value + * mask defines the bitwidth of the value. Each value in the array is for one of the + * cores. + */ + /* XTAL BIAS CAL done 11896 */ + switch (CHIPID(sih->chip)) { + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + *wrd_offset = OTP_XTAL_BIAS_CAL_DONE_4378_WRD_OFFSET; + *wrd_shift = OTP_XTAL_BIAS_CAL_DONE_4378_WRD_SHIFT; + *wrd_mask = OTP_XTAL_BIAS_CAL_DONE_4378_WRD_MASK; + break; + default: + ASSERT(0); + break; + } +} + +static void +BCMATTACHFN(si_pmu_chipcontrol_xtal_bias_val_offsets)(si_t *sih, uint16* wrd_offset, + uint8* wrd_shift, uint8* wrd_mask) +{ + /* Offset is 16 bit aligned address, shift is the starting bit position of the value + * mask defines the bitwidth of the value. Each value in the array is for one of the + * cores. + */ + /* XTAL BIAS value 11888 */ + switch (CHIPID(sih->chip)) { + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + *wrd_offset = OTP_XTAL_BIAS_VAL_4378_WRD_OFFSET; + *wrd_shift = OTP_XTAL_BIAS_VAL_4378_WRD_SHIFT; + *wrd_mask = OTP_XTAL_BIAS_VAL_4378_WRD_MASK; + break; + default: + ASSERT(0); + break; + } +} +#endif /* !BCM_OTP_API */ +#endif /* XTAL_BIAS_FROM_OTP */ + +#endif /* !BCMDONGLEHOST */ + +#ifdef BCMPMU_STATS +/* + * 8 pmu statistics timer default map + * + * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of CORE_RDY_MAIN. + * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX) + * { SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH}, + * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX) + * { SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE} + */ +static pmu_stats_timer_t pmustatstimer[] = { + { SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l12 + { SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l23 + { SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d0 + { SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d3 + //deep-sleep duration : pmu_rsrc_state(XTAL_PU) + { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW}, + //deep-sleep entry count : pmu_rsrc_state(XTAL_PU) + { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL}, + //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN) + { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, + //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN) + { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE} +}; + +static void +si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid) +{ + uint32 stats_timer_ctrl; + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + stats_timer_ctrl = + ((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) & + PMU_ST_SRC_MASK) | + ((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) & + PMU_ST_CNT_MODE_MASK) | + ((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) | + ((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) & PMU_ST_INT_EN_MASK); + W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl); + W_REG(osh, &pmu->pmu_statstimer_N, 0); +} + +void +si_pmustatstimer_int_enable(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_int_disable(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_init(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint32 core_cap_ext; + uint8 max_stats_timer_num; + int8 i; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + core_cap_ext = R_REG(osh, &pmu->core_cap_ext); + + max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1; + + for (i = 0; i < max_stats_timer_num; i++) { + si_pmustatstimer_update(osh, pmu, i); + } + + OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_dump(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0, pmuintstatus; + uint8 max_stats_timer_num, max_stats_timer_src_num; + uint32 stat_timer_ctrl, stat_timer_N; + uint8 i; + uint32 current_time_ms = OSL_SYSUPTIME(); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmucapabilities = R_REG(osh, &pmu->pmucapabilities); + core_cap_ext = R_REG(osh, &pmu->core_cap_ext); + AlpPeriod = R_REG(osh, &pmu->slowclkperiod); + ILPPeriod = R_REG(osh, &pmu->ILPPeriod); + + max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> + PCAP_EXT_ST_NUM_SHIFT) + 1; + max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >> + PCAP_EXT_ST_SRC_NUM_SHIFT) + 1; + + pmuintstatus = R_REG(osh, &pmu->pmuintstatus); + pmuintmask0 = R_REG(osh, &pmu->pmuintmask0); + + PMU_ERROR(("si_pmustatstimer_dump : TIME %d\n", current_time_ms)); + + PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n", + max_stats_timer_num, max_stats_timer_src_num)); + PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, ILPPeriod 0x%8x, " + "pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n", + pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod, + pmuintmask0, pmuintstatus, PMUREV(sih->pmurev))); + + for (i = 0; i < max_stats_timer_num; i++) { + W_REG(osh, &pmu->pmu_statstimer_addr, i); + stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl); + stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N); + PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n", + i, stat_timer_ctrl, stat_timer_N)); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_start(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].enable = TRUE; + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_stop(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].enable = FALSE; + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT)); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_clear(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + W_REG(osh, &pmu->pmu_statstimer_N, 0); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_clear_overflow(si_t *sih) +{ + uint8 i; + uint32 core_cap_ext; + uint8 max_stats_timer_num; + uint32 timerN; + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + core_cap_ext = R_REG(osh, &pmu->core_cap_ext); + max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1; + + for (i = 0; i < max_stats_timer_num; i++) { + W_REG(osh, &pmu->pmu_statstimer_addr, i); + timerN = R_REG(osh, &pmu->pmu_statstimer_N); + if (timerN == 0xFFFFFFFF) { + PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i)); + si_pmustatstimer_clear(sih, i); + } + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +uint32 +si_pmustatstimer_read(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint32 stats_timer_N; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return stats_timer_N; +} + +void +si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].src_num = src_num; + si_pmustatstimer_update(osh, pmu, timerid); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].cnt_mode = cnt_mode; + si_pmustatstimer_update(osh, pmu, timerid); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} +#endif /* BCMPMU_STATS */ + +#ifdef DONGLEBUILD +/* Note this could be called from trap context !! + * So observe caution. Do NOT ASSERT() in this function + * len parameter is dual purpose - On input it is length of the + * buffer provided. On output it is the amount of data written in + * bytes. + */ +/* This includes address data pair + * Note presence of arg2. arg2 could further define what subset of information + * needs to be dumped. Some external entities such as SMD could optionally pass + * arg2 to define subset of information needed + */ +int +BCMPOSTTRAPFN(si_pmu_regs_in_rodata_dump)(void *sih, void *arg2, + uint32 *bufptr, uint16 *len) +{ + int rc = BCME_OK; + uint16 totalsize = SI_PMU_REG_DUMP_BASE_SIZE; + + if ((bufptr == NULL) || (len == NULL)) { + rc = BCME_NOMEM; + goto fail; + } + + /* Are PMU registers available in rodata? If not, bail out + * Avoid re-read. If data is not there, then there could have been + * an error in reading these regs. + */ + if (rodata_pmuregdump_ptr == NULL) { + rc = BCME_ERROR; + goto fail; + } + + if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 1) { + totalsize += SI_PMU_REG_DUMP_MACRSRC1_SIZE; + } + if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 2) { + totalsize += SI_PMU_REG_DUMP_MACRSRC2_SIZE; + } + if (si_pmu_get_pmu_interrupt_rcv_cnt(sih) > 1) { + totalsize += SI_PMU_REG_DUMP_INTRCV1_SIZE; + } + + /* Make sure there is enough space for address value pair */ + if (len && *len < totalsize) { + rc = BCME_BUFTOOSHORT; + goto fail; + } + + /* Write registers to supplied buffer */ + /* Note that rodata_pmuregdump_size needs to be + * a multiple of a word size + */ + memcpy((uint8*)bufptr, rodata_pmuregdump_ptr, totalsize); + + *len = totalsize; +fail: + return rc; + +} +#endif /* DONGLEBUILD */ + +/* query the # of mac resource request timers */ +uint +BCMPOSTTRAPFN(si_pmu_get_mac_rsrc_req_tmr_cnt)(si_t *sih) +{ + if (PMUREV(sih->pmurev) >= 26) { + uint32 core_cap_ext = PMU_REG(sih, core_cap_ext, 0, 0); + uint mac_rsrc_cnt = + ((core_cap_ext & PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_MASK) >> + PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_SHIFT) + 1; + return mac_rsrc_cnt; + } + + return si_numd11coreunits(sih); +} + +/* query the # of pmu interrupt recevier */ +uint +BCMPOSTTRAPFN(si_pmu_get_pmu_interrupt_rcv_cnt)(si_t *sih) +{ + if (PMUREV(sih->pmurev) >= 26) { + uint32 core_cap_ext = PMU_REG(sih, core_cap_ext, 0, 0); + uint pmu_intr_rcvr_cnt = + ((core_cap_ext & PCAP_EXT_PMU_INTR_RCVR_CNT_MASK) >> + PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT) + 1; + return pmu_intr_rcvr_cnt; + } + + return si_numd11coreunits(sih); +} + +#ifdef DONGLEBUILD +int +si_pmu_mem_pwr_off(si_t *sih, int core_idx) +{ + int ret = BCME_OK; + + if (si_setcore(sih, D11_CORE_ID, core_idx) == NULL) { + /* core_idx doesn't exsist */ + return BCME_BADOPTION; + } + + switch (CHIPID(sih->chip)) { + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + if (core_idx == 0) { + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4387_MAIN_PD_CBUCK2VDDB_ON | + PMU_CC4_4387_MAIN_PD_CBUCK2VDDRET_ON | + PMU_CC4_4387_MAIN_PD_MEMLPLDO2VDDB_ON | + PMU_CC4_4387_MAIN_PD_MEMLPDLO2VDDRET_ON), + 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_MAIN_CBUCK2VDDB_OFF | + PMU_CC13_MAIN_CBUCK2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_MAIN_CBUCK2VDDB_OFF | + PMU_CC13_MAIN_CBUCK2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF)); + + /* LQ settings */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25, + 0xFFFFFFFF, XTAL_LQ_SETTING_4387); + } else if (core_idx == 1) { + si_pmu_chipcontrol(sih, PMU_CHIPCTL4, + (PMU_CC4_4387_AUX_PD_CBUCK2VDDB_ON | + PMU_CC4_4387_AUX_PD_CBUCK2VDDRET_ON | + PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDB_ON | + PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDRET_ON), + 0); + + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_AUX_CBUCK2VDDB_OFF | + PMU_CC13_AUX_CBUCK2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF), + (PMU_CC13_AUX_CBUCK2VDDB_OFF | + PMU_CC13_AUX_CBUCK2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF)); + } else if (core_idx == 2) { + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + (PMU_CC17_SCAN_CBUCK2VDDB_ON | + PMU_CC17_SCAN_MEMLPLDO2VDDB_ON | + PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON), + 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + (PMU_CC17_SCAN_CBUCK2VDDB_OFF | + PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF | + PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF), + (PMU_CC17_SCAN_CBUCK2VDDB_OFF | + PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF | + PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF)); + } + break; + + default: + ret = BCME_UNSUPPORTED; + break; + } + + return ret; +} + +int +BCMPOSTTRAPFN(si_pmu_mem_pwr_on)(si_t *sih) +{ + int ret = BCME_OK; + + switch (CHIPID(sih->chip)) { + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_MAIN_CBUCK2VDDB_OFF | + PMU_CC13_MAIN_CBUCK2VDDRET_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF), + PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF); + si_pmu_chipcontrol(sih, PMU_CHIPCTL13, + (PMU_CC13_AUX_CBUCK2VDDB_OFF | + PMU_CC13_AUX_CBUCK2VDDRET_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF), + PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF); + si_pmu_chipcontrol(sih, PMU_CHIPCTL17, + (PMU_CC17_SCAN_CBUCK2VDDB_OFF | + PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF | + PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF), + PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF); + + /* HQ settings */ + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25, + 0xFFFFFFFF, XTAL_HQ_SETTING_4387); + break; + + default: + ret = BCME_UNSUPPORTED; + break; + } + + return ret; +} + +void +BCMPOSTTRAPFN(si_pmu_disable_intr_pwrreq)(si_t *sih) +{ + if (MULTIBP_CAP(sih)) { + switch (CHIPID(sih->chip)) { + case BCM4376_CHIP_GRPID: + case BCM4378_CHIP_GRPID: + case BCM4385_CHIP_GRPID: + case BCM4387_CHIP_GRPID: + case BCM4388_CHIP_GRPID: + case BCM4389_CHIP_GRPID: + case BCM4397_CHIP_GRPID: + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU_CC2_CB2WL_INTR_PWRREQ_EN, 0); + si_pmu_chipcontrol(sih, PMU_CHIPCTL6, PMU_CC6_ENABLE_DMN1_WAKEUP, 0); + break; + default: + PMU_ERROR(("si_pmu_disable_intr_pwrreq: add support for this chip!\n")); + OSL_SYS_HALT(); + break; + } + } +} + +void +BCMPOSTTRAPFN(si_pmu_clear_intmask)(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint pmu_intr_recvr_cnt; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + W_REG(osh, &pmu->pmuintmask0, 0); + + pmu_intr_recvr_cnt = ((R_REG(osh, &pmu->core_cap_ext) & PCAP_EXT_PMU_INTR_RCVR_CNT_MASK) + >> PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT) + 1; + + if (pmu_intr_recvr_cnt > 1) { + W_REG(osh, &pmu->pmuintmask1, 0); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} +#endif /* DONGLEBUILD */ + +int +si_pmu_res_state_pwrsw_main_wait(si_t *sih) +{ + int ret = BCME_OK; + + switch (CHIPID(sih->chip)) { + case BCM4387_CHIP_GRPID: + if (PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(RES4387_PWRSW_MAIN)) { + SPINWAIT((PMU_REG(sih, res_state, 0, 0) & + PMURES_BIT(RES4387_PWRSW_MAIN)), 10000); + OSL_DELAY(1000); + } + ret = (PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(RES4387_PWRSW_MAIN)) ? + BCME_ERROR : BCME_OK; + break; + default: + PMU_ERROR(("si_pmu_res_state_pwrsw_main_wait: add support for this chip!\n")); + OSL_SYS_HALT(); + break; + } + + return ret; +} + +int +si_pmu_lvm_csr_update(si_t *sih, bool lvm) +{ + +#ifdef BCMDVFS + if (BCMDVFS_ENAB() && si_dvfs_enable_status(sih)) { + uint32 ndv_volt = lvm ? DVFS_VOLTAGE_NDV : DVFS_VOLTAGE_NDV_NON_LVM; + si_dvfs_set_ndv_voltage(sih, ndv_volt); + } else +#endif /* BCMDVFS */ + { + uint32 cbuck_volt = lvm ? CBUCK_VOLT_SW_DEFAULT_4387 : CBUCK_VOLT_NON_LVM; + si_pmu_vreg_control(sih, PMU_VREG_0, + VREG0_4378_CSR_VOLT_ADJ_PWM_MASK, + cbuck_volt << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT); + } + return BCME_OK; +} + +#if defined(BT_WLAN_REG_ON_WAR) +void +si_pmu_reg_on_war_ext_wake_perst_set(si_t *sih) +{ + uint origidx = si_coreidx(sih); + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + osl_t *osh = si_osh(sih); + + if (PMUREV(sih->pmurev) == 40) { + /* + * set PCIEPerstReq (bit-5) as a wake-up source in + * ExtWakeMask0 (0x760) register + */ + W_REG(osh, &pmu->extwakemask0, PMU_EXT_WAKE_MASK_0_PCIE_PERST); + + /* + * configure the wakemask as "common backplane" resources to + * be up during wake-up in ExtWakeReqMask0 (0x770) register + */ + W_REG(osh, &pmu->extwakereqmask[0], REG_ON_WAR_PMU_EXT_WAKE_REQ_MASK0_VAL); + } + + si_setcoreidx(sih, origidx); +} + +void +si_pmu_reg_on_war_ext_wake_perst_clear(si_t *sih) +{ + uint32 val = 0; + uint origidx = si_coreidx(sih); + pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0); + osl_t *osh = si_osh(sih); + + if (PMUREV(sih->pmurev) == 40) { + /* clear all set bits in ExtWakeupStatus (0x744) register */ + val = R_REG(osh, &pmu->extwakeupstatus); + W_REG(osh, &pmu->extwakeupstatus, val); + } + + si_setcoreidx(sih, origidx); +} +#endif /* BT_WLAN_REG_ON_WAR */ + +void +si_pmu_res_state_wait(si_t *sih, uint rsrc) +{ + SPINWAIT(!(PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(rsrc)), PMU_MAX_TRANSITION_DLY); + ASSERT(PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(rsrc)); +} diff --git a/bcmdhd.101.10.361.x/include/802.11.h b/bcmdhd.101.10.361.x/include/802.11.h new file mode 100755 index 0000000..c1cc979 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.11.h @@ -0,0 +1,5920 @@ +/* + * Fundamental types and constants relating to 802.11 + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_11_H_ +#define _802_11_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +#ifndef _NET_ETHERNET_H_ +#include +#endif + +/* Include WPA definitions here for compatibility */ +#include + +/* This marks the start of a packed structure section. */ +#include + +#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */ +#define DOT11_SEC_TO_TU 977u /* 1000000 / DOT11_TU_TO_US = ~977 TU */ + +/* Generic 802.11 frame constants */ +#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */ +#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */ +#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */ +#define DOT11_FCS_LEN 4u /* d11 FCS length */ +#define DOT11_ICV_LEN 4 /* d11 ICV length */ +#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */ +#define DOT11_MAX_ICV_AES_LEN 16 /* d11 MAX ICV/AES length */ +#define DOT11_QOS_LEN 2 /* d11 QoS length */ +#define DOT11_HTC_LEN 4 /* d11 HT Control field length */ + +#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */ +#define DOT11_IV_LEN 4 /* d11 IV length */ +#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */ +#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */ +#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */ +#define DOT11_IV_WAPI_LEN 18 /* d11 IV WAPI length */ +/* TODO: Need to change DOT11_IV_MAX_LEN to 18, but currently unable to change as the old + * branches are still referencing to this component. + */ +#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */ + +/* Includes MIC */ +#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */ +/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */ +#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ + DOT11_QOS_LEN + \ + DOT11_IV_AES_CCM_LEN + \ + DOT11_MAX_MPDU_BODY_LEN + \ + DOT11_ICV_LEN + \ + DOT11_FCS_LEN) /* d11 max MPDU length */ + +#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */ + +/* dot11RTSThreshold */ +#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */ +#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */ + +/* dot11FragmentationThreshold */ +#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */ +#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength + * of the attached PHY + */ +#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */ + +/* dot11BeaconPeriod */ +#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */ +#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */ + +/* dot11DTIMPeriod */ +#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */ +#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */ + +/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */ +#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */ +/* minimum LLC header length; DSAP, SSAP, 8 bit Control (unnumbered) */ +#define DOT11_LLC_HDR_LEN_MIN 3 +#define DOT11_OUI_LEN 3 /* d11 OUI length */ +BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* RFC1042 header used by 802.11 per 802.1H */ +#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */ + +#define SFH_LLC_SNAP_SZ (RFC1042_HDR_LEN) + +#define COPY_SFH_LLCSNAP(dst, src) \ + do { \ + *((uint32 *)dst + 0) = *((uint32 *)src + 0); \ + *((uint32 *)dst + 1) = *((uint32 *)src + 1); \ + *((uint32 *)dst + 2) = *((uint32 *)src + 2); \ + *((uint32 *)dst + 3) = *((uint32 *)src + 3); \ + *((uint32 *)dst + 4) = *((uint32 *)src + 4); \ + *(uint16 *)((uint32 *)dst + 5) = *(uint16 *)((uint32 *)src + 5); \ + } while (0) + +/* Generic 802.11 MAC header */ +/** + * N.B.: This struct reflects the full 4 address 802.11 MAC header. + * The fields are defined such that the shorter 1, 2, and 3 + * address headers just use the first k fields. + */ +BWL_PRE_PACKED_STRUCT struct dot11_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr a1; /* address 1 */ + struct ether_addr a2; /* address 2 */ + struct ether_addr a3; /* address 3 */ + uint16 seq; /* sequence control */ + struct ether_addr a4; /* address 4 */ +} BWL_POST_PACKED_STRUCT; + +/* Control frames */ + +BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_RTS_LEN 16 /* d11 RTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTS_LEN 10u /* d11 CTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACK_LEN 10 /* d11 ACK frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { + uint16 fc; /* frame control */ + uint16 durid; /* AID */ + struct ether_addr bssid; /* receiver address, STA in AP */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr bssid; /* transmitter address, STA in AP */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */ + +/** + * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling + * category+OUI+vendor specific content ( this can be variable) + */ +BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1040]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; + +/** generic vendor specific action frame with variable length */ +BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t; + +#define DOT11_ACTION_VS_HDR_LEN 6 + +#define BCM_ACTION_OUI_BYTE0 0x00 +#define BCM_ACTION_OUI_BYTE1 0x90 +#define BCM_ACTION_OUI_BYTE2 0x4c + +/* BA/BAR Control parameters */ +#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */ +#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */ +#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */ + +#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */ +#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */ + +#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */ +#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */ + +#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */ +#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */ + +/** control frame header (BA/BAR) */ +BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */ + +/** BAR frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_bar { + uint16 bar_control; /* BAR Control */ + uint16 seqnum; /* Starting Sequence control */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BAR_LEN 4 /* BAR frame payload length */ + +#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */ +#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */ +/** BA frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_ba { + uint16 ba_control; /* BA Control */ + uint16 seqnum; /* Starting Sequence control */ + uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */ + +/** Management frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_management_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr da; /* receiver address */ + struct ether_addr sa; /* transmitter address */ + struct ether_addr bssid; /* BSS ID */ + uint16 seq; /* sequence control */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_management_header dot11_management_header_t; +#define DOT11_MGMT_HDR_LEN 24u /* d11 management header length */ + +/* Management frame payloads */ + +BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { + uint32 timestamp[2]; + uint16 beacon_interval; + uint16 capability; + uint8 ies[]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */ +#define DOT11_BCN_PRB_FIXED_LEN 12u /* 802.11 beacon/probe frame fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_auth { + uint16 alg; /* algorithm */ + uint16 seq; /* sequence control */ + uint16 status; /* status code */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ +#define DOT11_AUTH_SEQ_STATUS_LEN 4 /* length of auth frame without challenge IE and + * without algorithm + */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ + struct ether_addr ap; /* Current AP address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { + uint16 capability; /* capability information */ + uint16 status; /* status code */ + uint16 aid; /* association ID */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_measure { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { + uint8 category; + uint8 action; + uint8 ch_width; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { + uint8 category; + uint8 action; + uint8 control; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query { + uint8 category; + uint8 action; + uint16 id; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode { + uint8 category; + uint8 action; + uint8 mode; +} BWL_POST_PACKED_STRUCT; + +/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */ +#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */ +#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */ +BWL_PRE_PACKED_STRUCT struct dot11_action_group_id { + uint8 category; + uint8 action; + uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN]; + uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN]; +} BWL_POST_PACKED_STRUCT; + +#define SM_PWRSAVE_ENABLE 1 +#define SM_PWRSAVE_MODE 2 + +/* ************* 802.11h related definitions. ************* */ +BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { + uint8 id; + uint8 len; + uint8 power; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cnst dot11_power_cnst_t; + +BWL_PRE_PACKED_STRUCT struct dot11_power_cap { + int8 min; + int8 max; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cap dot11_power_cap_t; + +BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { + uint8 id; + uint8 len; + uint8 tx_pwr; + uint8 margin; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tpc_rep dot11_tpc_rep_t; +#define DOT11_MNG_IE_TPC_REPORT_SIZE (sizeof(dot11_tpc_rep_t)) +#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */ + +BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { + uint8 id; + uint8 len; + uint8 first_channel; + uint8 num_channels; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_supp_channels dot11_supp_channels_t; + +/** + * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband + * offset for 40MHz operation. The possible 3 values are: + * 1 = above control channel + * 3 = below control channel + * 0 = no extension channel + */ +BWL_PRE_PACKED_STRUCT struct dot11_extch { + uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */ + uint8 len; /* IE length */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extch dot11_extch_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 type; /* type indicates what follows */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; + +#define BRCM_EXTCH_IE_LEN 5 +#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */ +#define DOT11_EXTCH_IE_LEN 1 +#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */ +#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */ +#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */ +#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { + uint8 category; + uint8 action; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_frmhdr dot11_action_frmhdr_t; + +/* Action Field length */ +#define DOT11_ACTION_CATEGORY_LEN 1u +#define DOT11_ACTION_ACTION_LEN 1u +#define DOT11_ACTION_DIALOG_TOKEN_LEN 1u +#define DOT11_ACTION_CAPABILITY_LEN 2u +#define DOT11_ACTION_STATUS_CODE_LEN 2u +#define DOT11_ACTION_REASON_CODE_LEN 2u +#define DOT11_ACTION_TARGET_CH_LEN 1u +#define DOT11_ACTION_OPER_CLASS_LEN 1u + +#define DOT11_ACTION_FRMHDR_LEN 2 + +/** CSA IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { + uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 mode; /* mode 0 or 1 */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch dot11_chan_switch_ie_t; + +#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ +/* CSA mode - 802.11h-2003 $7.3.2.20 */ +#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */ +#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { + uint8 category; + uint8 action; + dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */ + dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_csa_body { + uint8 mode; /* mode 0 or 1 */ + uint8 reg; /* regulatory class */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; + +/** 11n Extended Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { + uint8 id; /* id DOT11_MNG_EXT_CSA_ID */ + uint8 len; /* length of IE */ + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_csa dot11_ext_csa_ie_t; +#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { + uint8 category; + uint8 action; + dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { + uint8 category; + uint8 action; + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; + +/** Wide Bandwidth Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* new channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t; + +#define DOT11_WIDE_BW_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ + +/** Channel Switch Wrapper IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t; + +/* Proposed wide bandwidth channel IE */ +typedef enum wide_bw_chan_width { + WIDE_BW_CHAN_WIDTH_20 = 0, + WIDE_BW_CHAN_WIDTH_40 = 1, + WIDE_BW_CHAN_WIDTH_80 = 2, + WIDE_BW_CHAN_WIDTH_160 = 3, + WIDE_BW_CHAN_WIDTH_80_80 = 4 +} wide_bw_chan_width_t; + +/** Wide Bandwidth Channel IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel dot11_wide_bw_chan_ie_t; + +#define DOT11_WIDE_BW_IE_LEN 3 /* length of IE data, not including 2 byte header */ +/** VHT Transmit Power Envelope IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 transmit_power_info; + uint8 local_max_transmit_power_20; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t; + +/* vht transmit power envelope IE length depends on channel width */ +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ 1 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ 2 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ 3 + +/* TPE Transmit Power Information Field */ +#define DOT11_TPE_INFO_MAX_TX_PWR_CNT_MASK 0x07u +#define DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_MASK 0x38u +#define DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_SHIFT 3u +#define DOT11_TPE_INFO_MAX_TX_PWR_CAT_MASK 0xC0u +#define DOT11_TPE_INFO_MAX_TX_PWR_CAT_SHIFT 6u + +/* TPE Transmit Power Information Field Accessor */ +#define DOT11_TPE_INFO_MAX_TX_PWR_CNT(x) \ + (x & DOT11_TPE_INFO_MAX_TX_PWR_CNT_MASK) +#define DOT11_TPE_INFO_MAX_TX_PWR_INTRPN(x) \ + (((x) & DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_MASK) >> \ + DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_SHIFT) +#define DOT11_TPE_INFO_MAX_TX_PWR_CAT(x) \ + (((x) & DOT11_TPE_INFO_MAX_TX_PWR_CAT_MASK) >> \ + DOT11_TPE_INFO_MAX_TX_PWR_CAT_SHIFT) + +/* Maximum Transmit Power Interpretation subfield */ +#define DOT11_TPE_MAX_TX_PWR_INTRPN_LOCAL_EIRP 0u +#define DOT11_TPE_MAX_TX_PWR_INTRPN_LOCAL_EIRP_PSD 1u +#define DOT11_TPE_MAX_TX_PWR_INTRPN_REG_CLIENT_EIRP 2u +#define DOT11_TPE_MAX_TX_PWR_INTRPN_REG_CLIENT_EIRP_PSD 3u + +/* Maximum Transmit Power category subfield */ +#define DOT11_TPE_MAX_TX_PWR_CAT_DEFAULT 0u + +/* Maximum Transmit Power category subfield in US */ +#define DOT11_TPE_MAX_TX_PWR_CAT_US_DEFAULT 0u +#define DOT11_TPE_MAX_TX_PWR_CAT_US_SUB_DEV 1u + +/* Maximum Transmit Power Count subfield values when + * Maximum Transmit Power Interpretation subfield is 0 or 2 + */ +#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_MHZ 0u +#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_40_MHZ 1u +#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_40_80_MHZ 2u +#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_40_80_160_MHZ 3u + +/* Maximum Transmit Power Count subfield values when + * Maximum Transmit Power Interpretation subfield is 1 or 3 + */ +#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_0 0u +#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_1 1u +#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_2 2u +#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_3 4u +#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_4 8u + +#define DOT11_TPE_MAX_TX_PWR_EIRP_MIN -128 /* 0.5 db step */ +#define DOT11_TPE_MAX_TX_PWR_EIRP_MAX 126 /* 0.5 db step */ +#define DOT11_TPE_MAX_TX_PWR_EIRP_NO_LIMIT 127 /* 0.5 db step */ + +#define DOT11_TPE_MAX_TX_PWR_PSD_BLOCKED -128 +#define DOT11_TPE_MAX_TX_PWR_PSD_NO_LIMIT 127u +/** Transmit Power Envelope IE data structure as per 11ax draft */ +BWL_PRE_PACKED_STRUCT struct dot11_transmit_power_envelope { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 transmit_power_info; + uint8 max_transmit_power[]; /* Variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_transmit_power_envelope dot11_transmit_power_envelope_ie_t; +/* id (1) + len (1) + transmit_power_info(1) + max_transmit_power(1) */ +#define DOT11_TPE_ELEM_MIN_LEN 4u + +BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { + uint8 id; + uint8 len; + uint8 info; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_coex dot11_obss_coex_t; +#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */ + +#define DOT11_OBSS_COEX_INFO_REQ 0x01 +#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 +#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { + uint8 id; + uint8 len; + uint8 regclass; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; +#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */ + +BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { + uint8 id; + uint8 len; + uint8 cap[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap_ie dot11_extcap_ie_t; + +#define DOT11_EXTCAP_LEN_COEX 1 +#define DOT11_EXTCAP_LEN_BT 3 +#define DOT11_EXTCAP_LEN_IW 4 +#define DOT11_EXTCAP_LEN_SI 6 + +#define DOT11_EXTCAP_LEN_TDLS 5 +#define DOT11_11AC_EXTCAP_LEN_TDLS 8 + +#define DOT11_EXTCAP_LEN_FMS 2 +#define DOT11_EXTCAP_LEN_PROXY_ARP 2 +#define DOT11_EXTCAP_LEN_TFS 3 +#define DOT11_EXTCAP_LEN_WNM_SLEEP 3 +#define DOT11_EXTCAP_LEN_TIMBC 3 +#define DOT11_EXTCAP_LEN_BSSTRANS 3 +#define DOT11_EXTCAP_LEN_DMS 4 +#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6 +#define DOT11_EXTCAP_LEN_TDLS_WBW 8 +#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8 +#define DOT11_EXTCAP_LEN_TWT 10u +#define DOT11_EXTCAP_LEN_BCN_PROT 11u + +/* TDLS Capabilities */ +#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */ +#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */ +#define DOT11_TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */ +#define DOT11_TDLS_CAP_CH_SW 30 /* TDLS Channel switch */ +#define DOT11_TDLS_CAP_PROH 38 /* TDLS prohibited */ +#define DOT11_TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */ +#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61 /* TDLS Wider Band-Width */ + +#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */ + +/* FIXME: remove redundant DOT11_CAP_SAE_HASH_TO_ELEMENT */ +#define DOT11_CAP_SAE_HASH_TO_ELEMENT 5u /* SAE Hash-to-element support */ +#define DOT11_EXT_RSN_CAP_SAE_H2E 5u /* SAE Hash-to-element support */ +/* FIXME: Use these temporary IDs until ANA assigns IDs */ +#define DOT11_EXT_RSN_CAP_SAE_PK 6u /* SAE-PK support */ +/* Last bit in extended rsn capabilities (RSNXE) */ +#define DOT11_EXT_RSN_CAP_MAX_BIT DOT11_EXT_RSN_CAP_SAE_PK + +BWL_PRE_PACKED_STRUCT struct dot11_rsnxe { + uint8 id; /* id DOT11_MNG_RSNXE_ID */ + uint8 len; + uint8 cap[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rsnxe dot11_rsnxe_t; + +#define RSNXE_CAP_LENGTH_MASK (0x0f) +#define RSNXE_CAP_LENGTH(cap) ((uint8)(cap) & RSNXE_CAP_LENGTH_MASK) +#define RSNXE_SET_CAP_LENGTH(cap, len)\ + (cap = (cap & ~RSNXE_CAP_LENGTH_MASK) | ((uint8)(len) & RSNXE_CAP_LENGTH_MASK)) + +BWL_PRE_PACKED_STRUCT struct dot11_rejected_groups_ie { + uint8 id; /* DOT11_MNG_EXT_ID */ + uint8 len; + uint8 id_ext; /* DOT11_MNG_REJECTED_GROUPS_ID */ + uint16 groups[]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rejected_groups_ie dot11_rejected_groups_ie_t; + +/* 802.11h/802.11k Measurement Request/Report IEs */ +/* Measurement Type field */ +#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ +#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ +#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ +#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ +#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ +#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ +#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ +#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */ +#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ +#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ +#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */ +#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */ +#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */ +#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */ +#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */ +#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */ +#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */ +#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ + +/* Measurement Request Modes */ +#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */ +#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */ +#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */ +#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */ +#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */ +/* Measurement Report Modes */ +#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */ +#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */ +#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */ +/* Basic Measurement Map bits */ +#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */ +#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */ +#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */ +#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */ +#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 channel; + uint8 start_time[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req dot11_meas_req_t; +#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */ +/* length of Measure Request IE data not including variable len */ +#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 type; /* type of civic location */ + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT locid; + BWL_PRE_PACKED_STRUCT struct { + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + } BWL_POST_PACKED_STRUCT req; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req_loc dot11_meas_req_loc_t; +#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */ + +BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement { + uint8 subelement; + uint8 length; + uint8 lci_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lci_subelement dot11_lci_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_colocated_bssid_list_se { + uint8 sub_id; + uint8 length; + uint8 max_bssid_ind; /* MaxBSSID Indicator */ + struct ether_addr bssid[1]; /* variable */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_colocated_bssid_list_se dot11_colocated_bssid_list_se_t; +#define DOT11_LCI_COLOCATED_BSSID_LIST_FIXED_LEN 3 +#define DOT11_LCI_COLOCATED_BSSID_SUBELEM_ID 7 + +BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 civic_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_civic_subelement dot11_civic_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; + } BWL_POST_PACKED_STRUCT basic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 exp_tsf[8]; + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT locid; + BWL_PRE_PACKED_STRUCT struct { + uint8 entry_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT rep; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep dot11_meas_rep_t; +#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LOCID_FIXED_LEN 13 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4 + +/* length of Measure Report IE data not including variable len */ +#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; +#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */ + +BWL_PRE_PACKED_STRUCT struct dot11_quiet { + uint8 id; + uint8 len; + uint8 count; /* TBTTs until beacon interval in quiet starts */ + uint8 period; /* Beacon intervals between periodic quiet periods ? */ + uint16 duration; /* Length of quiet period, in TU's */ + uint16 offset; /* TU's offset from TBTT in Count field */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_quiet dot11_quiet_t; + +BWL_PRE_PACKED_STRUCT struct chan_map_tuple { + uint8 channel; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct chan_map_tuple chan_map_tuple_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { + uint8 id; + uint8 len; + uint8 eaddr[ETHER_ADDR_LEN]; + uint8 interval; + chan_map_tuple_t map[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; + +/* WME Elements */ +#define WME_OUI "\x00\x50\xf2" /* WME OUI */ +#define WME_OUI_LEN 3 +#define WME_OUI_TYPE 2 /* WME type */ +#define WME_TYPE 2 /* WME type, deprecated */ +#define WME_SUBTYPE_IE 0 /* Information Element */ +#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */ +#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */ +#define WME_VER 1 /* WME version */ + +/* WME Access Category Indices (ACIs) */ +#define AC_BE 0 /* Best Effort */ +#define AC_BK 1 /* Background */ +#define AC_VI 2 /* Video */ +#define AC_VO 3 /* Voice */ +#define AC_COUNT 4 /* number of ACs */ + +typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ + +#define AC_BITMAP_NONE 0x0 /* No ACs */ +#define AC_BITMAP_ALL 0xf /* All ACs */ +#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) +#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) +#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + +/* Management PKT Lifetime indices */ +/* Removing flag checks 'WLTEST' + * while merging MERGE BIS120RC4 to DINGO2 + */ +#define MGMT_ALL 0xffff +#define MGMT_AUTH_LT FC_SUBTYPE_AUTH +#define MGMT_ASSOC_LT FC_SUBTYPE_ASSOC_REQ + +/** WME Information Element (IE) */ +BWL_PRE_PACKED_STRUCT struct wme_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_ie wme_ie_t; +#define WME_IE_LEN 7 /* WME IE length */ + +BWL_PRE_PACKED_STRUCT struct edcf_acparam { + uint8 ACI; + uint8 ECW; + uint16 TXOP; /* stored in network order (ls octet first) */ +} BWL_POST_PACKED_STRUCT; +typedef struct edcf_acparam edcf_acparam_t; + +/** WME Parameter Element (PE) */ +BWL_PRE_PACKED_STRUCT struct wme_param_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_param_ie wme_param_ie_t; +#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */ + +/* QoS Info field for IE as sent from AP */ +#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */ +#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */ +#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */ +#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */ + +/* QoS Info field for IE as sent from STA */ +#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */ +#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */ +#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */ +#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */ +#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */ +#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */ +#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */ +#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */ +#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */ +#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */ +#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */ +#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */ + +/* ACI */ +#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */ +#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */ +#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */ +#define EDCF_ACM_MASK 0x10 /* ACM mask */ +#define EDCF_ACI_MASK 0x60 /* ACI mask */ +#define EDCF_ACI_SHIFT 5 /* ACI shift */ +#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */ + +/* ECW */ +#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */ +#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */ +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) +#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */ +#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */ +#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */ + +/* TXOP */ +#define EDCF_TXOP_MIN 0 /* TXOP minimum value */ +#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */ +#define EDCF_TXOP2USEC(txop) ((txop) << 5) + +/* Default BE ACI value for non-WME connection STA */ +#define NON_EDCF_AC_BE_ACI_STA 0x02 + +/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */ +#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */ +#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */ +#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */ +#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */ +#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */ +#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */ +#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */ +#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */ + +/* Default EDCF parameters that AP uses; WMM draft Table 14 */ +#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */ +#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */ +#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */ +#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */ +#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */ +#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */ +#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */ +#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */ + +/** EDCA Parameter IE */ +BWL_PRE_PACKED_STRUCT struct edca_param_ie { + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct edca_param_ie edca_param_ie_t; +#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */ + +/** QoS Capability IE */ +BWL_PRE_PACKED_STRUCT struct qos_cap_ie { + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct qos_cap_ie qos_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { + uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */ + uint8 length; + uint16 station_count; /* total number of STAs associated */ + uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */ + uint16 aac; /* available admission capacity */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; +#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */ + +#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */ + +/* Estimated Service Parameters (ESP) IE - 802.11-2016 9.4.2.174 */ +typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie { + uint8 id; + uint8 length; + uint8 id_ext; + /* variable len info */ + uint8 esp_info_lists[]; +} BWL_POST_PACKED_STRUCT dot11_esp_ie_t; + +#define DOT11_ESP_IE_HDR_SIZE (OFFSETOF(dot11_esp_ie_t, esp_info_lists)) + +/* ESP Information list - 802.11-2016 9.4.2.174 */ +typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie_info_list { + /* acess category, data format, ba win size */ + uint8 ac_df_baws; + /* estimated air time fraction */ + uint8 eat_frac; + /* data PPDU duration target (50us units) */ + uint8 ppdu_dur; +} BWL_POST_PACKED_STRUCT dot11_esp_ie_info_list_t; + +#define DOT11_ESP_IE_INFO_LIST_SIZE (sizeof(dot11_esp_ie_info_list_t)) + +#define DOT11_ESP_NBR_INFO_LISTS 4u /* max nbr of esp information lists */ +#define DOT11_ESP_INFO_LIST_AC_BK 0u /* access category of esp information list AC_BK */ +#define DOT11_ESP_INFO_LIST_AC_BE 1u /* access category of esp information list AC_BE */ +#define DOT11_ESP_INFO_LIST_AC_VI 2u /* access category of esp information list AC_VI */ +#define DOT11_ESP_INFO_LIST_AC_VO 3u /* access category of esp information list AC_VO */ + +#define DOT11_ESP_INFO_LIST_DF_MASK 0x18 /* Data Format Mask */ +#define DOT11_ESP_INFO_LIST_BAWS_MASK 0xE0 /* BA window size mask */ + +/* nom_msdu_size */ +#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */ +#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */ + +/* surplus_bandwidth */ +/* Represented as 3 bits of integer, binary point, 13 bits fraction */ +#define INTEGER_SHIFT 13 /* integer shift */ +#define FRACTION_MASK 0x1FFF /* fraction mask */ + +/** Management Notification Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_management_notification { + uint8 category; /* DOT11_ACTION_NOTIFICATION */ + uint8 action; + uint8 token; + uint8 status; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */ + +/** Timeout Interval IE */ +BWL_PRE_PACKED_STRUCT struct ti_ie { + uint8 ti_type; + uint32 ti_val; +} BWL_POST_PACKED_STRUCT; +typedef struct ti_ie ti_ie_t; +#define TI_TYPE_REASSOC_DEADLINE 1 +#define TI_TYPE_KEY_LIFETIME 2 + +#ifndef CISCO_AIRONET_OUI +#define CISCO_AIRONET_OUI "\x00\x40\x96" /* Cisco AIRONET OUI */ +#endif +/* QoS FastLane IE. */ +BWL_PRE_PACKED_STRUCT struct ccx_qfl_ie { + uint8 id; /* 221, DOT11_MNG_VS_ID */ + uint8 length; /* 5 */ + uint8 oui[3]; /* 00:40:96 */ + uint8 type; /* 11 */ + uint8 data; +} BWL_POST_PACKED_STRUCT; +typedef struct ccx_qfl_ie ccx_qfl_ie_t; +#define CCX_QFL_IE_TYPE 11 +#define CCX_QFL_ENABLE_SHIFT 5 +#define CCX_QFL_ENALBE (1 << CCX_QFL_ENABLE_SHIFT) + +/* WME Action Codes */ +#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */ +#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */ +#define WME_DELTS_REQUEST 2 /* WME DELTS request */ + +/* WME Setup Response Status Codes */ +#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */ +#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */ +#define WME_ADMISSION_REFUSED 3 /* WME admission refused */ + +/* Macro to take a pointer to a beacon or probe response + * body and return the char* pointer to the SSID info element + */ +#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) + +/* Authentication frame payload constants */ +#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */ +#define DOT11_SHARED_KEY 1 /* d11 shared authentication */ +#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */ +#define DOT11_SAE 3 /* d11 simultaneous authentication of equals */ +#define DOT11_FILS_SKEY 4 /* d11 fils shared key authentication w/o pfs */ +#define DOT11_FILS_SKEY_PFS 5 /* d11 fils shared key authentication w/ pfs */ +#define DOT11_FILS_PKEY 6 /* d11 fils public key authentication */ +#define DOT11_MAX_AUTH_ALG DOT11_FILS_PKEY /* maximum value of an auth alg */ +#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */ + +/* Frame control macros */ +#define FC_PVER_MASK 0x3 /* PVER mask */ +#define FC_PVER_SHIFT 0 /* PVER shift */ +#define FC_TYPE_MASK 0xC /* type mask */ +#define FC_TYPE_SHIFT 2 /* type shift */ +#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */ +#define FC_SUBTYPE_SHIFT 4 /* subtype shift */ +#define FC_TODS 0x100 /* to DS */ +#define FC_TODS_SHIFT 8 /* to DS shift */ +#define FC_FROMDS 0x200 /* from DS */ +#define FC_FROMDS_SHIFT 9 /* from DS shift */ +#define FC_MOREFRAG 0x400 /* more frag. */ +#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */ +#define FC_RETRY 0x800 /* retry */ +#define FC_RETRY_SHIFT 11 /* retry shift */ +#define FC_PM 0x1000 /* PM */ +#define FC_PM_SHIFT 12 /* PM shift */ +#define FC_MOREDATA 0x2000 /* more data */ +#define FC_MOREDATA_SHIFT 13 /* more data shift */ +#define FC_WEP 0x4000 /* WEP */ +#define FC_WEP_SHIFT 14 /* WEP shift */ +#define FC_ORDER 0x8000 /* order */ +#define FC_ORDER_SHIFT 15 /* order shift */ + +/* sequence control macros */ +#define SEQNUM_SHIFT 4 /* seq. number shift */ +#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */ +#define FRAGNUM_MASK 0xF /* frag. number mask */ + +/* Frame Control type/subtype defs */ + +/* FC Types */ +#define FC_TYPE_MNG 0 /* management type */ +#define FC_TYPE_CTL 1 /* control type */ +#define FC_TYPE_DATA 2 /* data type */ + +/* Management Subtypes */ +#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */ +#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */ +#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */ +#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */ +#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */ +#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */ +#define FC_SUBTYPE_BEACON 8 /* beacon */ +#define FC_SUBTYPE_ATIM 9 /* ATIM */ +#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */ +#define FC_SUBTYPE_AUTH 11 /* authentication */ +#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */ +#define FC_SUBTYPE_ACTION 13 /* action */ +#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */ + +/* Control Subtypes */ +#define FC_SUBTYPE_TRIGGER 2 /* Trigger frame */ +#define FC_SUBTYPE_NDPA 5 /* NDPA */ +#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */ +#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */ +#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */ +#define FC_SUBTYPE_PS_POLL 10 /* PS poll */ +#define FC_SUBTYPE_RTS 11 /* RTS */ +#define FC_SUBTYPE_CTS 12 /* CTS */ +#define FC_SUBTYPE_ACK 13 /* ACK */ +#define FC_SUBTYPE_CF_END 14 /* CF-END */ +#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */ + +/* Data Subtypes */ +#define FC_SUBTYPE_DATA 0 /* Data */ +#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */ +#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */ +#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_NULL 4 /* Null */ +#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */ +#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */ +#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */ +#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */ +#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */ +#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */ + +/* Data Subtype Groups */ +#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) +#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) +#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) +#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) +#define FC_SUBTYPE_ANY_PSPOLL(s) (((s) & 10) != 0) + +/* Type/Subtype Combos */ +#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */ + +#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */ + +#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */ +#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */ + +#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */ +#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */ +#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */ +#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */ +#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */ +#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */ +#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */ +#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */ +#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */ +#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */ +#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */ +#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */ +#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */ + +#define FC_CTL_TRIGGER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_TRIGGER) /* Trigger frame */ +#define FC_CTL_NDPA FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_NDPA) /* NDPA frame */ +#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */ +#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */ +#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */ +#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */ +#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */ +#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */ +#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */ +#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */ +#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */ + +#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */ +#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */ +#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */ +#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */ +#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */ + +/* QoS Control Field */ + +/* 802.1D Priority */ +#define QOS_PRIO_SHIFT 0 /* QoS priority shift */ +#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */ +#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */ + +/* Traffic Identifier */ +#define QOS_TID_SHIFT 0 /* QoS TID shift */ +#define QOS_TID_MASK 0x000f /* QoS TID mask */ +#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */ + +/* End of Service Period (U-APSD) */ +#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */ +#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */ +#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */ + +/* Ack Policy */ +#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */ +#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */ +#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */ +#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */ +#define QOS_ACK_SHIFT 5 /* QoS ACK shift */ +#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */ +#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */ + +/* A-MSDU flag */ +#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */ +#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */ + +/* QOS Mesh Flags */ +#define QOS_MESH_CTL_FLAG 0x0100u // Mesh Control Present +#define QOS_MESH_PSL_FLAG 0x0200u // Mesh Power Save Level +#define QOS_MESH_RSPI_FLAG 0x0400u // Mesh RSPI + +/* QOS Mesh Accessor macros */ +#define QOS_MESH_CTL(qos) (((qos) & QOS_MESH_CTL_FLAG) != 0) +#define QOS_MESH_PSL(qos) (((qos) & QOS_MESH_PSL_FLAG) != 0) +#define QOS_MESH_RSPI(qos) (((qos) & QOS_MESH_RSPI_FLAG) != 0) + +/* Management Frames */ + +/* Management Frame Constants */ + +/* Fixed fields */ +#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */ +#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */ +#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */ +#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */ +#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */ +#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */ +#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */ +#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */ +#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */ +#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */ + +/* DUR/ID field in assoc resp is 0xc000 | AID */ +#define DOT11_AID_MASK 0x3fff /* d11 AID mask */ +#define DOT11_AID_OCTET_VAL_SHIFT 3u /* AID octet value shift */ +#define DOT11_AID_BIT_POS_IN_OCTET 0x07 /* AID bit position in octet */ + +/* Reason Codes */ +#define DOT11_RC_RESERVED 0 /* d11 RC reserved */ +#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */ +#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */ +#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station + * is leaving (or has left) IBSS or ESS + */ +#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */ +#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle + * all currently associated stations + */ +#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from + * nonauthenticated station + */ +#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from + * nonassociated station + */ +#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is + * leaving (or has left) BSS + */ +#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not + * authenticated with responding station + */ +#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */ +#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */ + +/* 12 is unused by STA but could be used by AP/GO */ +#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */ + +/* 13-23 are WPA/802.11i reason codes defined in wpa.h */ + +/* 32-39 are QSTA specific reasons added in 11e */ +#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */ +#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */ +#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */ +#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */ +#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */ +#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */ +#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */ +#define DOT11_RC_TIMEOUT 39 /* timeout */ + +#define DOT11_RC_MESH_PEERING_CANCELLED 52 +#define DOT11_RC_MESH_MAX_PEERS 53 +#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54 +#define DOT11_RC_MESH_CLOSE_RECVD 55 +#define DOT11_RC_MESH_MAX_RETRIES 56 +#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57 +#define DOT11_RC_MESH_INVALID_GTK 58 +#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59 + +#define DOT11_RC_MESH_INVALID_SEC_CAP 60 +#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61 +#define DOT11_RC_MESH_PATHERR_NOFWINFO 62 +#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63 +#define DOT11_RC_MESH_MBSSMAC_EXISTS 64 +#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65 +#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66 + +#define DOT11_RC_POOR_RSSI_CONDITIONS 71 /* Poor RSSI */ +#define DOT11_RC_MAX 71 /* Reason codes > 71 are reserved */ + +#define DOT11_RC_TDLS_PEER_UNREACH 25 +#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 + +/* Status Codes */ +#define DOT11_SC_SUCCESS 0 /* Successful */ +#define DOT11_SC_FAILURE 1 /* Unspecified failure */ +#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */ + /* schedule provided */ +#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */ +#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */ +#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */ +#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */ +#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested + * capabilities in the Capability + * Information field + */ +#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability + * to confirm that association exists + */ +#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason + * outside the scope of this standard + */ +#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support + * the specified authentication + * algorithm + */ +#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame + * with authentication transaction + * sequence number out of expected + * sequence + */ +#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of + * challenge failure + */ +#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout + * waiting for next frame in sequence + */ +#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is + * unable to handle additional + * associated stations + */ +#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting + * station not supporting all of the + * data rates in the BSSBasicRateSet + * parameter + */ +#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting + * station not supporting the Short + * Preamble option + */ +#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting + * station not supporting the PBCC + * Modulation option + */ +#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting + * station not supporting the Channel + * Agility option + */ +#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum + * Management capability is required. + */ +#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info + * in the Power Cap element is + * unacceptable. + */ +#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info + * in the Supported Channel element is + * unacceptable + */ +#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting + * station not supporting the Short Slot + * Time option + */ +#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26 /* Association denied because requesting station + * does not support the DSSS-OFDM option + */ +#define DOT11_SC_ASSOC_HT_REQUIRED 27 /* Association denied because the requesting + * station does not support HT features + */ +#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP + * being unable to reach the R0 Key Holder + */ +#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later + */ +#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management + * frame policy violation + */ + +#define DOT11_SC_POOR_RSSI_CONDN 34 /* Association denied due to poor RSSI */ +#define DOT11_SC_DECLINED 37 /* request declined */ +#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */ +#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */ +#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */ +#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */ +#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */ +#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */ +#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */ +#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */ + +#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59 /* ad proto not supported */ +#define DOT11_SC_NO_OUTSTAND_REQ 60 /* no outstanding req */ +#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61 /* no response from server */ +#define DOT11_SC_TIMEOUT 62 /* timeout */ +#define DOT11_SC_QUERY_RSP_TOO_LARGE 63 /* query rsp too large */ +#define DOT11_SC_SERVER_UNREACHABLE 65 /* server unreachable */ + +#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */ +#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */ +#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */ +#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */ +#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */ + +#define DOT11_SC_TCLAS_RESOURCES_EXHAUSTED 81u /* TCLAS resources exhausted */ + +#define DOT11_SC_TCLAS_PROCESSING_TERMINATED 97 /* End traffic classification */ + +#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting + * station does not support VHT features. + */ +#define DOT11_SC_UNKNOWN_PASSWORD_IDENTIFIER 123u /* mismatch of password id */ + +#define DOT11_SC_SAE_HASH_TO_ELEMENT 126u /* SAE Hash-to-element PWE required */ +#define DOT11_SC_SAE_PK 127u /* SAE PK required */ + +/* Requested TCLAS processing has been terminated by the AP due to insufficient QoS capacity. */ +#define DOT11_SC_TCLAS_PROCESSING_TERMINATED_INSUFFICIENT_QOS 128u + +/* Requested TCLAS processing has been terminated by the AP due to conflict with + * higher layer QoS policies. + */ +#define DOT11_SC_TCLAS_PROCESSING_TERMINATED_POLICY_CONFLICT 129u + +/* Info Elts, length of INFORMATION portion of Info Elts */ +#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */ +#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */ + +/* TIM Info element has 3 bytes fixed info in INFORMATION field, + * followed by 1 to 251 bytes of Partial Virtual Bitmap + */ +#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */ +#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */ +#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */ +#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */ +#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */ + +#define DOT11_MNG_TIM_BITMAP_CTL_BCMC_MASK 0x01 /* Mask for bcmc bit in tim bitmap ctrl */ +#define DOT11_MNG_TIM_BITMAP_CTL_PVBOFF_MASK 0xFE /* Mask for partial virtual bitmap */ + +/* TLV defines */ +#define TLV_TAG_OFF 0 /* tag offset */ +#define TLV_LEN_OFF 1 /* length offset */ +#define TLV_HDR_LEN 2 /* header length */ +#define TLV_BODY_OFF 2 /* body offset */ +#define TLV_BODY_LEN_MAX 255 /* max body length */ +#define TLV_EXT_HDR_LEN 3u /* extended IE header length */ +#define TLV_EXT_BODY_OFF 3u /* extended IE body offset */ + +/* Management Frame Information Element IDs */ +enum dot11_tag_ids { + DOT11_MNG_SSID_ID = 0, /* d11 management SSID id */ + DOT11_MNG_RATES_ID = 1, /* d11 management rates id */ + DOT11_MNG_FH_PARMS_ID = 2, /* d11 management FH parameter id */ + DOT11_MNG_DS_PARMS_ID = 3, /* d11 management DS parameter id */ + DOT11_MNG_CF_PARMS_ID = 4, /* d11 management CF parameter id */ + DOT11_MNG_TIM_ID = 5, /* d11 management TIM id */ + DOT11_MNG_IBSS_PARMS_ID = 6, /* d11 management IBSS parameter id */ + DOT11_MNG_COUNTRY_ID = 7, /* d11 management country id */ + DOT11_MNG_HOPPING_PARMS_ID = 8, /* d11 management hopping parameter id */ + DOT11_MNG_HOPPING_TABLE_ID = 9, /* d11 management hopping table id */ + DOT11_MNG_FTM_SYNC_INFO_ID = 9, /* 11mc D4.3 */ + DOT11_MNG_REQUEST_ID = 10, /* d11 management request id */ + DOT11_MNG_QBSS_LOAD_ID = 11, /* d11 management QBSS Load id */ + DOT11_MNG_EDCA_PARAM_ID = 12, /* 11E EDCA Parameter id */ + DOT11_MNG_TSPEC_ID = 13, /* d11 management TSPEC id */ + DOT11_MNG_TCLAS_ID = 14, /* d11 management TCLAS id */ + DOT11_MNG_CHALLENGE_ID = 16, /* d11 management chanllenge id */ + DOT11_MNG_PWR_CONSTRAINT_ID = 32, /* 11H PowerConstraint */ + DOT11_MNG_PWR_CAP_ID = 33, /* 11H PowerCapability */ + DOT11_MNG_TPC_REQUEST_ID = 34, /* 11H TPC Request */ + DOT11_MNG_TPC_REPORT_ID = 35, /* 11H TPC Report */ + DOT11_MNG_SUPP_CHANNELS_ID = 36, /* 11H Supported Channels */ + DOT11_MNG_CHANNEL_SWITCH_ID = 37, /* 11H ChannelSwitch Announcement */ + DOT11_MNG_MEASURE_REQUEST_ID = 38, /* 11H MeasurementRequest */ + DOT11_MNG_MEASURE_REPORT_ID = 39, /* 11H MeasurementReport */ + DOT11_MNG_QUIET_ID = 40, /* 11H Quiet */ + DOT11_MNG_IBSS_DFS_ID = 41, /* 11H IBSS_DFS */ + DOT11_MNG_ERP_ID = 42, /* d11 management ERP id */ + DOT11_MNG_TS_DELAY_ID = 43, /* d11 management TS Delay id */ + DOT11_MNG_TCLAS_PROC_ID = 44, /* d11 management TCLAS processing id */ + DOT11_MNG_HT_CAP = 45, /* d11 mgmt HT cap id */ + DOT11_MNG_QOS_CAP_ID = 46, /* 11E QoS Capability id */ + DOT11_MNG_NONERP_ID = 47, /* d11 management NON-ERP id */ + DOT11_MNG_RSN_ID = 48, /* d11 management RSN id */ + DOT11_MNG_EXT_RATES_ID = 50, /* d11 management ext. rates id */ + DOT11_MNG_AP_CHREP_ID = 51, /* 11k AP Channel report id */ + DOT11_MNG_NEIGHBOR_REP_ID = 52, /* 11k & 11v Neighbor report id */ + DOT11_MNG_RCPI_ID = 53, /* 11k RCPI */ + DOT11_MNG_MDIE_ID = 54, /* 11r Mobility domain id */ + DOT11_MNG_FTIE_ID = 55, /* 11r Fast Bss Transition id */ + DOT11_MNG_FT_TI_ID = 56, /* 11r Timeout Interval id */ + DOT11_MNG_RDE_ID = 57, /* 11r RIC Data Element id */ + DOT11_MNG_REGCLASS_ID = 59, /* d11 management regulatory class id */ + DOT11_MNG_EXT_CSA_ID = 60, /* d11 Extended CSA */ + DOT11_MNG_HT_ADD = 61, /* d11 mgmt additional HT info */ + DOT11_MNG_EXT_CHANNEL_OFFSET = 62, /* d11 mgmt ext channel offset */ + DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID = 63, /* 11k bss average access delay */ + DOT11_MNG_ANTENNA_ID = 64, /* 11k antenna id */ + DOT11_MNG_RSNI_ID = 65, /* 11k RSNI id */ + DOT11_MNG_MEASUREMENT_PILOT_TX_ID = 66, /* 11k measurement pilot tx info id */ + DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID = 67, /* 11k bss aval admission cap id */ + DOT11_MNG_BSS_AC_ACCESS_DELAY_ID = 68, /* 11k bss AC access delay id */ + DOT11_MNG_WAPI_ID = 68, /* d11 management WAPI id */ + DOT11_MNG_TIME_ADVERTISE_ID = 69, /* 11p time advertisement */ + DOT11_MNG_RRM_CAP_ID = 70, /* 11k radio measurement capability */ + DOT11_MNG_MULTIPLE_BSSID_ID = 71, /* 11k multiple BSSID id */ + DOT11_MNG_HT_BSS_COEXINFO_ID = 72, /* d11 mgmt OBSS Coexistence INFO */ + DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID = 73, /* d11 mgmt OBSS Intolerant Channel list */ + DOT11_MNG_HT_OBSS_ID = 74, /* d11 mgmt OBSS HT info */ + DOT11_MNG_MMIE_ID = 76, /* d11 mgmt MIC IE */ + DOT11_MNG_NONTRANS_BSSID_CAP_ID = 83, /* 11k nontransmitted BSSID capability */ + DOT11_MNG_MULTIPLE_BSSIDINDEX_ID = 85, /* 11k multiple BSSID index */ + DOT11_MNG_FMS_DESCR_ID = 86, /* 11v FMS descriptor */ + DOT11_MNG_FMS_REQ_ID = 87, /* 11v FMS request id */ + DOT11_MNG_FMS_RESP_ID = 88, /* 11v FMS response id */ + DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID = 90, /* 11v bss max idle id */ + DOT11_MNG_TFS_REQUEST_ID = 91, /* 11v tfs request id */ + DOT11_MNG_TFS_RESPONSE_ID = 92, /* 11v tfs response id */ + DOT11_MNG_WNM_SLEEP_MODE_ID = 93, /* 11v wnm-sleep mode id */ + DOT11_MNG_TIMBC_REQ_ID = 94, /* 11v TIM broadcast request id */ + DOT11_MNG_TIMBC_RESP_ID = 95, /* 11v TIM broadcast response id */ + DOT11_MNG_CHANNEL_USAGE = 97, /* 11v channel usage */ + DOT11_MNG_TIME_ZONE_ID = 98, /* 11v time zone */ + DOT11_MNG_DMS_REQUEST_ID = 99, /* 11v dms request id */ + DOT11_MNG_DMS_RESPONSE_ID = 100, /* 11v dms response id */ + DOT11_MNG_LINK_IDENTIFIER_ID = 101, /* 11z TDLS Link Identifier IE */ + DOT11_MNG_WAKEUP_SCHEDULE_ID = 102, /* 11z TDLS Wakeup Schedule IE */ + DOT11_MNG_CHANNEL_SWITCH_TIMING_ID = 104, /* 11z TDLS Channel Switch Timing IE */ + DOT11_MNG_PTI_CONTROL_ID = 105, /* 11z TDLS PTI Control IE */ + DOT11_MNG_PU_BUFFER_STATUS_ID = 106, /* 11z TDLS PU Buffer Status IE */ + DOT11_MNG_INTERWORKING_ID = 107, /* 11u interworking */ + DOT11_MNG_ADVERTISEMENT_ID = 108, /* 11u advertisement protocol */ + DOT11_MNG_EXP_BW_REQ_ID = 109, /* 11u expedited bandwith request */ + DOT11_MNG_QOS_MAP_ID = 110, /* 11u QoS map set */ + DOT11_MNG_ROAM_CONSORT_ID = 111, /* 11u roaming consortium */ + DOT11_MNG_EMERGCY_ALERT_ID = 112, /* 11u emergency alert identifier */ + DOT11_MNG_MESH_CONFIG = 113, /* Mesh Configuration */ + DOT11_MNG_MESH_ID = 114, /* Mesh ID */ + DOT11_MNG_MESH_PEER_MGMT_ID = 117, /* Mesh PEER MGMT IE */ + DOT11_MNG_EXT_CAP_ID = 127, /* d11 mgmt ext capability */ + DOT11_MNG_EXT_PREQ_ID = 130, /* Mesh PREQ IE */ + DOT11_MNG_EXT_PREP_ID = 131, /* Mesh PREP IE */ + DOT11_MNG_EXT_PERR_ID = 132, /* Mesh PERR IE */ + DOT11_MNG_VHT_CAP_ID = 191, /* d11 mgmt VHT cap id */ + DOT11_MNG_VHT_OPERATION_ID = 192, /* d11 mgmt VHT op id */ + DOT11_MNG_EXT_BSSLOAD_ID = 193, /* d11 mgmt VHT extended bss load id */ + DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID = 194, /* Wide BW Channel Switch IE */ + DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID= 195, /* VHT transmit Power Envelope IE */ + DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID = 196, /* Channel Switch Wrapper IE */ + DOT11_MNG_AID_ID = 197, /* Association ID IE */ + DOT11_MNG_OPER_MODE_NOTIF_ID = 199, /* d11 mgmt VHT oper mode notif */ + DOT11_MNG_RNR_ID = 201, + /* FIXME: Use these temp. IDs until ANA assigns IDs */ + DOT11_MNG_FTM_PARAMS_ID = 206, /* mcd3.2/2014 this is not final yet */ + DOT11_MNG_TWT_ID = 216, /* 11ah D5.0 */ + DOT11_MNG_WPA_ID = 221, /* d11 management WPA id */ + DOT11_MNG_PROPR_ID = 221, /* d11 management proprietary id */ + /* should start using this one instead of above two */ + DOT11_MNG_VS_ID = 221, /* d11 management Vendor Specific IE */ + DOT11_MNG_MESH_CSP_ID = 222, /* d11 Mesh Channel Switch Parameter */ + DOT11_MNG_FILS_IND_ID = 240, /* 11ai FILS Indication element */ + DOT11_MNG_FRAGMENT_ID = 242, /* IE's fragment ID */ + DOT11_MNG_RSNXE_ID = 244, /* RSN Extension Element (RSNXE) ID */ + + /* The follwing ID extensions should be defined >= 255 + * i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension). + */ + DOT11_MNG_ID_EXT_ID = 255 /* Element ID Extension 11mc D4.3 */ +}; + +/* FILS and OCE ext ids */ +#define FILS_EXTID_MNG_REQ_PARAMS 2u /* FILS Request Parameters element */ +#define DOT11_MNG_FILS_REQ_PARAMS (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_REQ_PARAMS) +#define FILS_EXTID_MNG_KEY_CONFIRMATION_ID 3u /* FILS Key Confirmation element */ +#define DOT11_MNG_FILS_KEY_CONFIRMATION (DOT11_MNG_ID_EXT_ID + \ + FILS_EXTID_MNG_KEY_CONFIRMATION_ID) +#define FILS_EXTID_MNG_SESSION_ID 4u /* FILS Session element */ +#define DOT11_MNG_FILS_SESSION (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_SESSION_ID) +#define FILS_EXTID_MNG_HLP_CONTAINER_ID 5u /* FILS HLP Container element */ +#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID + \ + FILS_EXTID_MNG_HLP_CONTAINER_ID) +#define FILS_EXTID_MNG_KEY_DELIVERY_ID 7u /* FILS Key Delivery element */ +#define DOT11_MNG_FILS_KEY_DELIVERY (DOT11_MNG_ID_EXT_ID + \ + FILS_EXTID_MNG_KEY_DELIVERY_ID) +#define FILS_EXTID_MNG_WRAPPED_DATA_ID 8u /* FILS Wrapped Data element */ +#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID + \ + FILS_EXTID_MNG_WRAPPED_DATA_ID) + +#define OCE_EXTID_MNG_ESP_ID 11u /* Estimated Service Parameters element */ +#define DOT11_MNG_ESP (DOT11_MNG_ID_EXT_ID + OCE_EXTID_MNG_ESP_ID) +#define FILS_EXTID_MNG_PUBLIC_KEY_ID 12u /* FILS Public Key element */ +#define DOT11_MNG_FILS_PUBLIC_KEY (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_PUBLIC_KEY_ID) +#define FILS_EXTID_MNG_NONCE_ID 13u /* FILS Nonce element */ +#define DOT11_MNG_FILS_NONCE (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_NONCE_ID) + +#define EXT_MNG_OWE_DH_PARAM_ID 32u /* OWE DH Param ID - RFC 8110 */ +#define DOT11_MNG_OWE_DH_PARAM_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_OWE_DH_PARAM_ID) +#define EXT_MSG_PASSWORD_IDENTIFIER_ID 33u /* Password ID EID */ +#define DOT11_MSG_PASSWORD_IDENTIFIER_ID (DOT11_MNG_ID_EXT_ID + \ + EXT_MSG_PASSWORD_IDENTIFIER_ID) +#define EXT_MNG_HE_CAP_ID 35u /* HE Capabilities, 11ax */ +#define DOT11_MNG_HE_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_CAP_ID) +#define EXT_MNG_HE_OP_ID 36u /* HE Operation IE, 11ax */ +#define DOT11_MNG_HE_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_OP_ID) +#define EXT_MNG_UORA_ID 37u /* UORA Parameter Set */ +#define DOT11_MNG_UORA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_UORA_ID) +#define EXT_MNG_MU_EDCA_ID 38u /* MU EDCA Parameter Set */ +#define DOT11_MNG_MU_EDCA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MU_EDCA_ID) +#define EXT_MNG_SRPS_ID 39u /* Spatial Reuse Parameter Set */ +#define DOT11_MNG_SRPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SRPS_ID) +#define EXT_MNG_BSSCOLOR_CHANGE_ID 42u /* BSS Color Change Announcement */ +#define DOT11_MNG_BSSCOLOR_CHANGE_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_BSSCOLOR_CHANGE_ID) +#define OCV_EXTID_MNG_OCI_ID 54u /* OCI element */ +#define DOT11_MNG_OCI_ID (DOT11_MNG_ID_EXT_ID + OCV_EXT_OCI_ID) +#define EXT_MNG_SHORT_SSID_ID 58u /* SHORT SSID ELEMENT */ +#define DOT11_MNG_SHORT_SSID_LIST_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SHORT_SSID_ID) +#define EXT_MNG_HE_6G_CAP_ID 59u /* HE Extended Capabilities, 11ax */ +#define DOT11_MNG_HE_6G_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_6G_CAP_ID) + +#define MSCS_EXTID_MNG_DESCR_ID 88u /* Ext ID for the MSCS descriptor */ +#define DOT11_MNG_MSCS_DESCR_ID (DOT11_MNG_ID_EXT_ID + MSCS_EXTID_MNG_DESCR_ID) + +#define TCLAS_EXTID_MNG_MASK_ID 89u /* Ext ID for the TCLAS Mask element */ +#define DOT11_MNG_TCLASS_MASK_ID (DOT11_MNG_ID_EXT_ID + TCLAS_EXTID_MNG_MASK_ID) + +#define SAE_EXT_REJECTED_GROUPS_ID 92u /* SAE Rejected Groups element */ +#define DOT11_MNG_REJECTED_GROUPS_ID (DOT11_MNG_ID_EXT_ID + SAE_EXT_REJECTED_GROUPS_ID) +#define SAE_EXT_ANTICLOG_TOKEN_CONTAINER_ID 93u /* SAE Anti-clogging token container */ +#define DOT11_MNG_ANTICLOG_TOKEN_CONTAINER_ID (DOT11_MNG_ID_EXT_ID + \ + SAE_EXT_ANTICLOG_TOKEN_CONTAINER_ID) +#define EXT_MNG_EHT_CAP_ID 100u /* EHT Capabilities IE FIXME */ +#define DOT11_MNG_EHT_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_EHT_CAP_ID) +#define EXT_MNG_EHT_OP_ID 101u /* EHT Operation IE # FIXME */ +#define DOT11_MNG_EHT_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_EHT_OP_ID) + +/* unassigned IDs for ranging parameter elements. To be updated after final + * assignement. + */ +#define DOT11_MNG_FTM_RANGING_EXT_ID 100u /* 11AZ sounding mode parameter element */ +#define DOT11_MNG_FTM_ISTA_AVAIL_EXT_ID 101u /* 11 AZ TN ISTA avaialability window */ +#define DOT11_MNG_FTM_RSTA_AVAIL_EXT_ID 102u /* 11 AZ TN RSTA avaialability window */ +#define DOT11_MNG_FTM_SECURE_LTF_EXT_ID 103u /* 11 AZ Secure LTF parameter element */ + +#define DOT11_FTM_NTB_SUB_ELT_ID 0u /* non-TB ranging parameter sub-element ID */ +#define DOT11_FTM_TB_SUB_ELT_ID 1u /* TB ranging parameter sub-element ID */ + +/* deprecated definitions, do not use, to be deleted later */ +#define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID +#define DOT11_ESP_EXT_ID OCE_EXTID_MNG_ESP_ID +#define FILS_REQ_PARAMS_EXT_ID FILS_EXTID_MNG_REQ_PARAMS +#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */ +#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID) +/* End of deprecated definitions */ + +#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\ + ((_ie)->id == DOT11_MNG_ID_EXT_ID) && \ + ((_ie)->len > 0) && \ + ((_id) == ((uint8 *)(_ie) + TLV_HDR_LEN)[0])) + +#define DOT11_MNG_IE_ID_EXT_INIT(_ie, _id, _len) do {\ + (_ie)->id = DOT11_MNG_ID_EXT_ID; \ + (_ie)->len = _len; \ + (_ie)->id_ext = _id; \ + } while (0) + +/* Rate Defines */ + +/* Valid rates for the Supported Rates and Extended Supported Rates IEs. + * Encoding is the rate in 500kbps units, rouding up for fractional values. + * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values. + * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates. + * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27}, + * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices. + */ + +#define DOT11_RATE_1M 2 /* 1 Mbps in 500kbps units */ +#define DOT11_RATE_2M 4 /* 2 Mbps in 500kbps units */ +#define DOT11_RATE_5M5 11 /* 5.5 Mbps in 500kbps units */ +#define DOT11_RATE_11M 22 /* 11 Mbps in 500kbps units */ +#define DOT11_RATE_6M 12 /* 6 Mbps in 500kbps units */ +#define DOT11_RATE_9M 18 /* 9 Mbps in 500kbps units */ +#define DOT11_RATE_12M 24 /* 12 Mbps in 500kbps units */ +#define DOT11_RATE_18M 36 /* 18 Mbps in 500kbps units */ +#define DOT11_RATE_24M 48 /* 24 Mbps in 500kbps units */ +#define DOT11_RATE_36M 72 /* 36 Mbps in 500kbps units */ +#define DOT11_RATE_48M 96 /* 48 Mbps in 500kbps units */ +#define DOT11_RATE_54M 108 /* 54 Mbps in 500kbps units */ +#define DOT11_RATE_MAX 108 /* highest rate (54 Mbps) in 500kbps units */ + +/* Supported Rates and Extended Supported Rates IEs + * The supported rates octets are defined a the MSB indicatin a Basic Rate + * and bits 0-6 as the rate value + */ +#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */ +#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */ + +/* BSS Membership Selector parameters + * 802.11-2016 (and 802.11ax-D1.1), Sec 9.4.2.3 + * These selector values are advertised in Supported Rates and Extended Supported Rates IEs + * in the supported rates list with the Basic rate bit set. + * Constants below include the basic bit. + */ +#define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */ +#define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */ +#define DOT11_BSS_MEMBERSHIP_HE 0xFD /* Basic 0x80 + 125, HE Required to join */ +#define DOT11_BSS_SAE_HASH_TO_ELEMENT 123u /* SAE Hash-to-element Required to join */ + +/* ERP info element bit values */ +#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */ +#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present + *in the BSS + */ +#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for + *ERP-OFDM frames + */ +#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed, + * 1 == not allowed + */ +/* TS Delay element offset & size */ +#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */ +#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */ + +/* Capability Information Field */ +#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */ +#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */ +#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */ +#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */ +#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */ +#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */ +#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */ +#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */ +#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */ +#define DOT11_CAP_QOS 0x0200 /* d11 cap. qos */ +#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */ +#define DOT11_CAP_APSD 0x0800 /* d11 cap. apsd */ +#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */ +#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */ +#define DOT11_CAP_DELAY_BA 0x4000 /* d11 cap. delayed block ack */ +#define DOT11_CAP_IMMEDIATE_BA 0x8000 /* d11 cap. immediate block ack */ + +/* Extended capabilities IE bitfields */ +/* 20/40 BSS Coexistence Management support bit position */ +#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0u +/* Extended Channel Switching support bit position */ +#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING 2u +/* scheduled PSMP support bit position */ +#define DOT11_EXT_CAP_SPSMP 6u +/* Flexible Multicast Service */ +#define DOT11_EXT_CAP_FMS 11u +/* proxy ARP service support bit position */ +#define DOT11_EXT_CAP_PROXY_ARP 12u +/* Civic Location */ +#define DOT11_EXT_CAP_CIVIC_LOC 14u +/* Geospatial Location */ +#define DOT11_EXT_CAP_LCI 15u +/* Traffic Filter Service */ +#define DOT11_EXT_CAP_TFS 16u +/* WNM-Sleep Mode */ +#define DOT11_EXT_CAP_WNM_SLEEP 17u +/* TIM Broadcast service */ +#define DOT11_EXT_CAP_TIMBC 18u +/* BSS Transition Management support bit position */ +#define DOT11_EXT_CAP_BSSTRANS_MGMT 19u +/* Multiple BSSID support position */ +#define DOT11_EXT_CAP_MULTIBSSID 22u +/* Direct Multicast Service */ +#define DOT11_EXT_CAP_DMS 26u +/* Interworking support bit position */ +#define DOT11_EXT_CAP_IW 31u +/* QoS map support bit position */ +#define DOT11_EXT_CAP_QOS_MAP 32u +/* service Interval granularity bit position and mask */ +#define DOT11_EXT_CAP_SI 41u +#define DOT11_EXT_CAP_SI_MASK 0x0E +/* Location Identifier service */ +#define DOT11_EXT_CAP_IDENT_LOC 44u +/* WNM notification */ +#define DOT11_EXT_CAP_WNM_NOTIF 46u +/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */ +#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62u +/* Fine timing measurement - D3.0 */ +#define DOT11_EXT_CAP_FTM_RESPONDER 70u +#define DOT11_EXT_CAP_FTM_INITIATOR 71u /* tentative 11mcd3.0 */ +#define DOT11_EXT_CAP_FILS 72u /* FILS Capability */ +/* TWT support */ +#define DOT11_EXT_CAP_TWT_REQUESTER 77u +#define DOT11_EXT_CAP_TWT_RESPONDER 78u +#define DOT11_EXT_CAP_OBSS_NB_RU_OFDMA 79u +/* FIXME: Use these temp. IDs until ANA assigns IDs */ +#define DOT11_EXT_CAP_EMBSS_ADVERTISE 80u +/* SAE password ID */ +#define DOT11_EXT_CAP_SAE_PWD_ID_INUSE 81u +#define DOT11_EXT_CAP_SAE_PWD_ID_USED_EXCLUSIVE 82u +/* Beacon Protection Enabled 802.11 D3.0 - 9.4.2.26 + * This field is reserved for a STA. + */ +#define DOT11_EXT_CAP_BCN_PROT 84u + +/* Mirrored SCS (MSCS) support */ +#define DOT11_EXT_CAP_MSCS 85u + +/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset. + * Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path. + * It will cause ROM invalidation otherwise. + */ +#define DOT11_EXT_CAP_MAX_IDX 85u + +/* Remove this hack (DOT11_EXT_CAP_MAX_BIT_IDX) when no one + * references DOT11_EXTCAP_LEN_MAX + */ +#define DOT11_EXT_CAP_MAX_BIT_IDX 95u /* !!!update this please!!! */ + +/* Remove DOT11_EXTCAP_LEN_MAX when no one references it */ +/* extended capability */ +#ifndef DOT11_EXTCAP_LEN_MAX +#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3) +#endif +/* Remove dot11_extcap when no one references it */ +BWL_PRE_PACKED_STRUCT struct dot11_extcap { + uint8 extcap[DOT11_EXTCAP_LEN_MAX]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap dot11_extcap_t; + +/* VHT Operating mode bit fields - (11ac D8.0/802.11-2016 - 9.4.1.53) */ +#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0 +#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3 +#define DOT11_OPER_MODE_160_8080_BW_SHIFT 2 +#define DOT11_OPER_MODE_160_8080_BW_MASK 0x04 +#define DOT11_OPER_MODE_NOLDPC_SHIFT 3 +#define DOT11_OPER_MODE_NOLDPC_MASK 0x08 +#define DOT11_OPER_MODE_RXNSS_SHIFT 4 +#define DOT11_OPER_MODE_RXNSS_MASK 0x70 +#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7 +#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80 + +#define DOT11_OPER_MODE_RESET_CHAN_WIDTH_160MHZ(oper_mode) \ + (oper_mode & (~(DOT11_OPER_MODE_CHANNEL_WIDTH_MASK | \ + DOT11_OPER_MODE_160_8080_BW_MASK))) +#define DOT11_OPER_MODE_SET_CHAN_WIDTH_160MHZ(oper_mode) \ + (oper_mode = (DOT11_OPER_MODE_RESET_CHAN_WIDTH_160MHZ(oper_mode) | \ + (DOT11_OPER_MODE_80MHZ | DOT11_OPER_MODE_160_8080_BW_MASK))) + +#ifdef DOT11_OPER_MODE_LEFT_SHIFT_FIX + +#define DOT11_OPER_MODE(type, nss, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1u) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1u) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\ + ((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\ + DOT11_OPER_MODE_160_8080_BW_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#else + +/* avoid invalidation from above fix on release branches, can be removed when older release + * branches no longer use component/proto from trunk + */ + +#define DOT11_OPER_MODE(type, nss, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\ + ((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\ + DOT11_OPER_MODE_160_8080_BW_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#endif /* DOT11_OPER_MODE_LEFT_SHIFT_FIX */ + +#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \ + (((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\ + >> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT) +#define DOT11_OPER_MODE_160_8080(mode) \ + (((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)\ + >> DOT11_OPER_MODE_160_8080_BW_SHIFT) +#define DOT11_OPER_MODE_NOLDPC(mode) \ + (((mode) & DOT11_OPER_MODE_NOLDPC_MASK)\ + >> DOT11_OPER_MODE_NOLDPC_SHIFT) +#define DOT11_OPER_MODE_RXNSS(mode) \ + ((((mode) & DOT11_OPER_MODE_RXNSS_MASK) \ + >> DOT11_OPER_MODE_RXNSS_SHIFT) + 1) +#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \ + (((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\ + >> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT) + +#define DOT11_OPER_MODE_20MHZ 0 +#define DOT11_OPER_MODE_40MHZ 1 +#define DOT11_OPER_MODE_80MHZ 2 +#define DOT11_OPER_MODE_160MHZ 3 +#define DOT11_OPER_MODE_8080MHZ 3 +#define DOT11_OPER_MODE_1608080MHZ 1 + +#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)) + +/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */ +BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie { + uint8 mode; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; + +#define DOT11_OPER_MODE_NOTIF_IE_LEN 1 + +/* Extended Capability Information Field */ +#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 /* 20/40 BSS Coexistence Management support */ + +/* + * Action Frame Constants + */ +#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */ +#define DOT11_ACTION_CAT_OFF 0 /* category offset */ +#define DOT11_ACTION_ACT_OFF 1 /* action offset */ + +/* Action Category field (sec 8.4.1.11) */ +#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */ +#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */ +#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */ +#define DOT11_ACTION_CAT_QOS 1 /* category QoS */ +#define DOT11_ACTION_CAT_DLS 2 /* category DLS */ +#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */ +#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */ +#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */ +#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */ +#define DOT11_ACTION_CAT_HT 7 /* category for HT */ +#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */ +#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */ +#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */ +#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */ +#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */ +#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */ +#define DOT11_ACTION_NOTIFICATION 17 + +#define DOT11_ACTION_RAV_STREAMING 19 /* category for Robust AV streaming: + * SCS, MSCS, etc. + */ + +#define DOT11_ACTION_CAT_VHT 21 /* VHT action */ +#define DOT11_ACTION_CAT_S1G 22 /* S1G action */ +/* FIXME: Use temp. ID until ANA assigns one */ +#define DOT11_ACTION_CAT_HE 27 /* HE action frame */ +#define DOT11_ACTION_CAT_FILS 26 /* FILS action frame */ +#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ +#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */ + +/* Spectrum Management Action IDs (sec 7.4.1) */ +#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */ +#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */ +#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */ +#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */ +#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */ + +/* QoS action ids */ +#define DOT11_QOS_ACTION_ADDTS_REQ 0 /* d11 action ADDTS request */ +#define DOT11_QOS_ACTION_ADDTS_RESP 1 /* d11 action ADDTS response */ +#define DOT11_QOS_ACTION_DELTS 2 /* d11 action DELTS */ +#define DOT11_QOS_ACTION_SCHEDULE 3 /* d11 action schedule */ +#define DOT11_QOS_ACTION_QOS_MAP 4 /* d11 action QOS map */ + +/* HT action ids */ +#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */ +#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */ + +/* Public action ids */ +#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ +#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_PUB_ACTION_VENDOR_SPEC 9 /* Vendor specific */ +#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */ +#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */ +#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */ +/* unassigned value. Will change after final assignement. + * for now, use 34(same as FILS DISC) due to QT/TB/chipsim support from uCode + */ +#define DOT11_PUB_ACTION_FTM_LMR 34 /* FTM 11AZ Location Management Report */ + +#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_START 1u /* FTM request start trigger */ +#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_STOP 0u /* FTM request stop trigger */ + +/* Block Ack action types */ +#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */ +#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */ +#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */ + +/* ADDBA action parameters */ +#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */ +#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */ +#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */ +#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */ +#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */ +#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */ +#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */ + +#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */ +#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */ + +/* Fast Transition action types */ +#define DOT11_FT_ACTION_FT_RESERVED 0 +#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */ +#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */ + +/* DLS action types */ +#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */ +#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */ +#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */ + +/* Robust Audio Video streaming action types */ +#define DOT11_RAV_SCS_REQ 0 /* SCS Request */ +#define DOT11_RAV_SCS_RES 1 /* SCS Response */ +#define DOT11_RAV_GM_REQ 2 /* Group Membership Request */ +#define DOT11_RAV_GM_RES 3 /* Group Membership Response */ +#define DOT11_RAV_MSCS_REQ 4 /* MSCS Request */ +#define DOT11_RAV_MSCS_RES 5 /* MSCS Response */ + +/* Wireless Network Management (WNM) action types */ +#define DOT11_WNM_ACTION_EVENT_REQ 0 +#define DOT11_WNM_ACTION_EVENT_REP 1 +#define DOT11_WNM_ACTION_DIAG_REQ 2 +#define DOT11_WNM_ACTION_DIAG_REP 3 +#define DOT11_WNM_ACTION_LOC_CFG_REQ 4 +#define DOT11_WNM_ACTION_LOC_RFG_RESP 5 +#define DOT11_WNM_ACTION_BSSTRANS_QUERY 6 +#define DOT11_WNM_ACTION_BSSTRANS_REQ 7 +#define DOT11_WNM_ACTION_BSSTRANS_RESP 8 +#define DOT11_WNM_ACTION_FMS_REQ 9 +#define DOT11_WNM_ACTION_FMS_RESP 10 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12 +#define DOT11_WNM_ACTION_TFS_REQ 13 +#define DOT11_WNM_ACTION_TFS_RESP 14 +#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ 15 +#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16 +#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17 +#define DOT11_WNM_ACTION_TIMBC_REQ 18 +#define DOT11_WNM_ACTION_TIMBC_RESP 19 +#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20 +#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21 +#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22 +#define DOT11_WNM_ACTION_DMS_REQ 23 +#define DOT11_WNM_ACTION_DMS_RESP 24 +#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25 +#define DOT11_WNM_ACTION_NOTFCTN_REQ 26 +#define DOT11_WNM_ACTION_NOTFCTN_RESP 27 +#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP 28 + +/* Unprotected Wireless Network Management (WNM) action types */ +#define DOT11_UWNM_ACTION_TIM 0 +#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT 1 + +#define DOT11_MNG_COUNTRY_ID_LEN 3 + +/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */ +#define DOT11_VHT_ACTION_CBF 0 /* Compressed Beamforming */ +#define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */ +#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */ + +/* FILS category action types - 802.11ai D11.0 - 9.6.8.1 */ +#define DOT11_FILS_ACTION_DISCOVERY 34 /* FILS Discovery */ + +/** DLS Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_req { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint16 cap; /* capability */ + uint16 timeout; /* timeout value */ + uint8 data[1]; /* IE:support rate, extend support rate, HT cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_req dot11_dls_req_t; +#define DOT11_DLS_REQ_LEN 18 /* Fixed length */ + +/** DLS response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_resp { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + uint16 status; /* status code field */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint8 data[1]; /* optional: capability, rate ... */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_resp dot11_dls_resp_t; +#define DOT11_DLS_RESP_LEN 16 /* Fixed length */ + +/* ************* 802.11v related definitions. ************* */ + +/** BSS Management Transition Query frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_query (6) */ + uint8 token; /* dialog token */ + uint8 reason; /* transition query reason */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_query dot11_bsstrans_query_t; +#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */ + +/* BTM transition reason */ +#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0 +#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1 +#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2 +#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3 +#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4 +#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5 +#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6 +#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7 +#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8 +#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9 +#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10 +#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11 +#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14 +#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15 +#define DOT11_BSSTRANS_REASON_LOW_RSSI 16 +#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17 +#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18 +#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19 +#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20 + +/** BSS Management Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_req (7) */ + uint8 token; /* dialog token */ + uint8 reqmode; /* transition request mode */ + uint16 disassoc_tmr; /* disassociation timer */ + uint8 validity_intrvl; /* validity interval */ + uint8 data[1]; /* optional: BSS term duration, ... */ + /* ...session info URL, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_req dot11_bsstrans_req_t; +#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */ +#define DOT11_BSSTRANS_REQ_FIXED_LEN 7u /* Fixed length */ + +/* BSS Mgmt Transition Request Mode Field - 802.11v */ +#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01 +#define DOT11_BSSTRANS_REQMODE_ABRIDGED 0x02 +#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT 0x04 +#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL 0x08 +#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT 0x10 + +/** BSS Management transition response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_resp (8) */ + uint8 token; /* dialog token */ + uint8 status; /* transition status */ + uint8 term_delay; /* validity interval */ + uint8 data[1]; /* optional: BSSID target, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t; +#define DOT11_BSSTRANS_RESP_LEN 5 /* Fixed length */ + +/* BSS Mgmt Transition Response Status Field */ +#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT 0 +#define DOT11_BSSTRANS_RESP_STATUS_REJECT 1 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN 2 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP 3 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED 4 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ 5 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED 6 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS 7 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8 + +/** BSS Max Idle Period element */ +BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie { + uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */ + uint8 len; + uint16 max_idle_period; /* in unit of 1000 TUs */ + uint8 idle_opt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t; +#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3 /* bss max idle period IE size */ +#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1 /* BSS max idle option */ + +/** TIM Broadcast request element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie { + uint8 id; /* 94, DOT11_MNG_TIMBC_REQ_ID */ + uint8 len; + uint8 interval; /* in unit of beacon interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t; +#define DOT11_TIMBC_REQ_IE_LEN 1 /* Fixed length */ + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast request element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req dot11_timbc_req_t; +#define DOT11_TIMBC_REQ_LEN 3 /* Fixed length */ + +/** TIM Broadcast response element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie { + uint8 id; /* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */ + uint8 len; + uint8 status; /* status of add request */ + uint8 interval; /* in unit of beacon interval */ + int32 offset; /* in unit of ms */ + uint16 high_rate; /* in unit of 0.5 Mb/s */ + uint16 low_rate; /* in unit of 0.5 Mb/s */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t; +#define DOT11_TIMBC_DENY_RESP_IE_LEN 1 /* Deny. Fixed length */ +#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10 /* Accept. Fixed length */ + +#define DOT11_TIMBC_STATUS_ACCEPT 0 +#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP 1 +#define DOT11_TIMBC_STATUS_DENY 2 +#define DOT11_TIMBC_STATUS_OVERRIDDEN 3 +#define DOT11_TIMBC_STATUS_RESERVED 4 + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast response element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp dot11_timbc_resp_t; +#define DOT11_TIMBC_RESP_LEN 3 /* Fixed length */ + +/** TIM element */ +BWL_PRE_PACKED_STRUCT struct dot11_tim_ie { + uint8 id; /* 5, DOT11_MNG_TIM_ID */ + uint8 len; /* 4 - 255 */ + uint8 dtim_count; /* DTIM decrementing counter */ + uint8 dtim_period; /* DTIM period */ + uint8 bitmap_control; /* AID 0 + bitmap offset */ + uint8 pvb[1]; /* Partial Virtual Bitmap, variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tim_ie dot11_tim_ie_t; +#define DOT11_TIM_IE_FIXED_LEN 3 /* Fixed length, without id and len */ +#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5 /* Fixed length, with id and len */ + +/** TIM Broadcast frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc { + uint8 category; /* category of action frame (11) */ + uint8 action; /* action: TIM (0) */ + uint8 check_beacon; /* need to check-beacon */ + uint8 tsf[8]; /* Time Synchronization Function */ + dot11_tim_ie_t tim_ie; /* TIM element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc dot11_timbc_t; +#define DOT11_TIMBC_HDR_LEN (sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t)) +#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1) /* Fixed length */ +#define DOT11_TIMBC_LEN 11 /* Fixed length */ + +/** TCLAS frame classifier type */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr { + uint8 type; + uint8 mask; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t; +#define DOT11_TCLAS_FC_HDR_LEN 2 /* Fixed length */ + +#define DOT11_TCLAS_MASK_0 0x1 +#define DOT11_TCLAS_MASK_1 0x2 +#define DOT11_TCLAS_MASK_2 0x4 +#define DOT11_TCLAS_MASK_3 0x8 +#define DOT11_TCLAS_MASK_4 0x10 +#define DOT11_TCLAS_MASK_5 0x20 +#define DOT11_TCLAS_MASK_6 0x40 +#define DOT11_TCLAS_MASK_7 0x80 + +#define DOT11_TCLAS_FC_0_ETH 0 +#define DOT11_TCLAS_FC_1_IP 1 +#define DOT11_TCLAS_FC_2_8021Q 2 +#define DOT11_TCLAS_FC_3_OFFSET 3 +#define DOT11_TCLAS_FC_4_IP_HIGHER 4 +#define DOT11_TCLAS_FC_5_8021D 5 + +/** TCLAS frame classifier type 0 parameters for Ethernet */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth { + uint8 type; + uint8 mask; + uint8 sa[ETHER_ADDR_LEN]; + uint8 da[ETHER_ADDR_LEN]; + uint16 eth_type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t; +#define DOT11_TCLAS_FC_0_ETH_LEN 16 + +/** TCLAS frame classifier type 1 parameters for IPV4 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 { + uint8 type; + uint8 mask; + uint8 version; + uint32 src_ip; + uint32 dst_ip; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 protocol; + uint8 reserved; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t; +#define DOT11_TCLAS_FC_1_IPV4_LEN 18 + +/** TCLAS frame classifier type 2 parameters for 802.1Q */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q { + uint8 type; + uint8 mask; + uint16 tci; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t; +#define DOT11_TCLAS_FC_2_8021Q_LEN 4 + +/** TCLAS frame classifier type 3 parameters for filter offset */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter { + uint8 type; + uint8 mask; + uint16 offset; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t; +#define DOT11_TCLAS_FC_3_FILTER_LEN 4 + +/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */ +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t; +#define DOT11_TCLAS_FC_4_IPV4_LEN DOT11_TCLAS_FC_1_IPV4_LEN + +/** TCLAS frame classifier type 4 parameters for IPV6 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 { + uint8 type; + uint8 mask; + uint8 version; + uint8 saddr[16]; + uint8 daddr[16]; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 nexthdr; + uint8 flow_lbl[3]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t; +#define DOT11_TCLAS_FC_4_IPV6_LEN 44 + +/** TCLAS frame classifier type 5 parameters for 802.1D */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d { + uint8 type; + uint8 mask; + uint8 pcp; + uint8 cfi; + uint16 vid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t; +#define DOT11_TCLAS_FC_5_8021D_LEN 6 + +/** TCLAS frame classifier type parameters */ +BWL_PRE_PACKED_STRUCT union dot11_tclas_fc { + uint8 data[1]; + dot11_tclas_fc_hdr_t hdr; + dot11_tclas_fc_0_eth_t t0_eth; + dot11_tclas_fc_1_ipv4_t t1_ipv4; + dot11_tclas_fc_2_8021q_t t2_8021q; + dot11_tclas_fc_3_filter_t t3_filter; + dot11_tclas_fc_4_ipv4_t t4_ipv4; + dot11_tclas_fc_4_ipv6_t t4_ipv6; + dot11_tclas_fc_5_8021d_t t5_8021d; +} BWL_POST_PACKED_STRUCT; +typedef union dot11_tclas_fc dot11_tclas_fc_t; + +#define DOT11_TCLAS_FC_MIN_LEN 4 /* Classifier Type 2 has the min size */ +#define DOT11_TCLAS_FC_MAX_LEN 254 + +/** TCLAS element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie { + uint8 id; /* 14, DOT11_MNG_TCLAS_ID */ + uint8 len; + uint8 user_priority; + dot11_tclas_fc_t fc; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_ie dot11_tclas_ie_t; +#define DOT11_TCLAS_IE_LEN 3u /* Fixed length, include id and len */ + +/** TCLAS processing element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie { + uint8 id; /* 44, DOT11_MNG_TCLAS_PROC_ID */ + uint8 len; + uint8 process; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t; +#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */ + +#define DOT11_TCLAS_PROC_LEN 1u /* Proc ie length is always 1 byte */ + +#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */ +#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */ +#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */ + +/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */ +#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */ + +/** TCLAS Mask element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_mask_ie { + uint8 id; /* DOT11_MNG_ID_EXT_ID (255) */ + uint8 len; + uint8 id_ext; /* TCLAS_EXTID_MNG_MASK_ID (89) */ + dot11_tclas_fc_t fc; /* Variable length frame classifier (fc) */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_mask_ie dot11_tclas_mask_ie_t; +#define DOT11_TCLAS_MASK_IE_LEN 1u /* Fixed length, excludes id and len */ +#define DOT11_TCLAS_MASK_IE_HDR_LEN 3u /* Fixed length */ + +/* Bitmap definitions for the User Priority Bitmap + * Each bit in the bitmap corresponds to a user priority. + */ +#define DOT11_UP_CTRL_UP_0 0u +#define DOT11_UP_CTRL_UP_1 1u +#define DOT11_UP_CTRL_UP_2 2u +#define DOT11_UP_CTRL_UP_3 3u +#define DOT11_UP_CTRL_UP_4 4u +#define DOT11_UP_CTRL_UP_5 5u +#define DOT11_UP_CTRL_UP_6 6u +#define DOT11_UP_CTRL_UP_7 7u + +/* User priority control (up_ctl) macros */ +#define DOT11_UPC_UP_BITMAP_MASK 0xFFu /* UP bitmap mask */ +#define DOT11_UPC_UP_BITMAP_SHIFT 0u /* UP bitmap shift */ +#define DOT11_UPC_UP_LIMIT_MASK 0x700u /* UP limit mask */ +#define DOT11_UPC_UP_LIMIT_SHIFT 8u /* UP limit shift */ + +/* MSCS Request Types */ +#define DOT11_MSCS_REQ_TYPE_ADD 0u +#define DOT11_MSCS_REQ_TYPE_REMOVE 1u +#define DOT11_MSCS_REQ_TYPE_CHANGE 2u + +/** MSCS Descriptor element */ +BWL_PRE_PACKED_STRUCT struct dot11_mscs_descr_ie { + uint8 id; /* DOT11_MNG_ID_EXT_ID (255) */ + uint8 len; + uint8 id_ext; /* MSCS_EXTID_MNG_DESCR_ID (88) */ + uint8 req_type; /* MSCS request type */ + uint16 up_ctl; /* User priority control: + * Bits 0..7, up_bitmap(8 bits); + * Bits 8..10, up_limit (3 bits) + * Bits 11..15 reserved (5 bits) + */ + uint32 stream_timeout; + uint8 data[]; + /* optional tclas mask elements */ /* dot11_tclas_mask_ie_t */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mscs_descr_ie dot11_mscs_descr_ie_t; +#define DOT11_MSCS_DESCR_IE_LEN 8u /* Fixed length, exludes id and len */ +#define DOT11_MSCS_DESCR_IE_HDR_LEN 10u /* Entire descriptor header length */ + +/** MSCS Request frame, refer section 9.4.18.6 in the spec P802.11REVmd_D3.1 */ +BWL_PRE_PACKED_STRUCT struct dot11_mscs_req { + uint8 category; /* ACTION_RAV_STREAMING (19) */ + uint8 robust_action; /* action: MSCS Req (4), MSCS Res (5), etc. */ + uint8 dialog_token; /* To identify the MSCS request and response */ + dot11_mscs_descr_ie_t mscs_descr; /* MSCS descriptor */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mscs_req dot11_mscs_req_t; +#define DOT11_MSCS_REQ_HDR_LEN 3u /* Fixed length */ + +/** MSCS Response frame, refer section 9.4.18.7 in the spec P802.11REVmd_D3.1 */ +BWL_PRE_PACKED_STRUCT struct dot11_mscs_res { + uint8 category; /* ACTION_RAV_STREAMING (19) */ + uint8 robust_action; /* action: MSCS Req (4), MSCS Res (5), etc. */ + uint8 dialog_token; /* To identify the MSCS request and response */ + uint16 status; /* status code */ + uint8 data[]; /* optional MSCS descriptor */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mscs_res dot11_mscs_res_t; +#define DOT11_MSCS_RES_HDR_LEN 5u /* Fixed length */ + +/* MSCS subelement */ +#define DOT11_MSCS_SUBELEM_ID_STATUS 1u /* MSCS subelement ID for the status */ + +BWL_PRE_PACKED_STRUCT struct dot11_mscs_subelement { + uint8 id; /* MSCS specific subelement ID */ + uint8 len; /* Length in bytes */ + uint8 data[]; /* Subelement specific data */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mscs_subelement dot11_mscs_subelement_t; +#define DOT11_MSCS_DESCR_SUBELEM_IE_STATUS_LEN 2u /* Subelement ID status length */ + +/** TFS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie { + uint8 id; /* 91, DOT11_MNG_TFS_REQUEST_ID */ + uint8 len; + uint8 tfs_id; + uint8 actcode; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t; +#define DOT11_TFS_REQ_IE_LEN 2 /* Fixed length, without id and len */ + +/** TFS request action codes (bitfield) */ +#define DOT11_TFS_ACTCODE_DELETE 1 +#define DOT11_TFS_ACTCODE_NOTIFY 2 + +/** TFS request subelement IDs */ +#define DOT11_TFS_REQ_TFS_SE_ID 1 +#define DOT11_TFS_REQ_VENDOR_SE_ID 221 + +/** TFS subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_se { + uint8 sub_id; + uint8 len; + uint8 data[1]; /* TCLAS element(s) + optional TCLAS proc */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_se dot11_tfs_se_t; + +/** TFS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie { + uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 tfs_id; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t; +#define DOT11_TFS_RESP_IE_LEN 1u /* Fixed length, without id and len */ + +/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */ +#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1 +#define DOT11_TFS_RESP_TFS_SE_ID 2 +#define DOT11_TFS_RESP_VENDOR_SE_ID 221 + +/** TFS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se { + uint8 sub_id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 resp_st; + uint8 data[1]; /* Potential dot11_tfs_se_t included */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_status_se dot11_tfs_status_se_t; +#define DOT11_TFS_STATUS_SE_LEN 1 /* Fixed length, without id and len */ + +/* Following Definition should be merged to FMS_TFS macro below */ +/* TFS Response status code. Identical to FMS Element status, without N/A */ +#define DOT11_TFS_STATUS_ACCEPT 0 +#define DOT11_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_TFS_STATUS_DENY_POLICY 4 +#define DOT11_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_TFS_STATUS_ALTPREF_POLICY 7 +#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP 14 + +/* FMS Element Status and TFS Response Status Definition */ +#define DOT11_FMS_TFS_STATUS_ACCEPT 0 +#define DOT11_FMS_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI 3 +#define DOT11_FMS_TFS_STATUS_DENY_POLICY 4 +#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI 6 +#define DOT11_FMS_TFS_STATUS_ALT_POLICY 7 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI 8 +#define DOT11_FMS_TFS_STATUS_ALT_MCRATE 9 +#define DOT11_FMS_TFS_STATUS_TERM_POLICY 10 +#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE 11 +#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO 12 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI 13 +#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP 14 + +/** TFS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (13) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req dot11_tfs_req_t; +#define DOT11_TFS_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (14) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp dot11_tfs_resp_t; +#define DOT11_TFS_RESP_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame request header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify request (15) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t; +#define DOT11_TFS_NOTIFY_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame response header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify response (28) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t; +#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */ + +/** WNM-Sleep Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (16) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t; +#define DOT11_WNM_SLEEP_REQ_LEN 3 /* Fixed length */ + +/** WNM-Sleep Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (17) */ + uint8 token; /* dialog token */ + uint16 key_len; /* key data length */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t; +#define DOT11_WNM_SLEEP_RESP_LEN 5 /* Fixed length */ + +#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK 0 +#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK 1 + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk { + uint8 sub_id; + uint8 len; + uint16 key_info; + uint8 key_length; + uint8 rsc[8]; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11 /* without sub_id, len, and key */ +#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43 /* without sub_id and len */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk { + uint8 sub_id; + uint8 len; + uint16 key_id; + uint8 pn[6]; + uint8 key[16]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie { + uint8 id; /* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */ + uint8 len; + uint8 act_type; + uint8 resp_status; + uint16 interval; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t; +#define DOT11_WNM_SLEEP_IE_LEN 4 /* Fixed length */ + +#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER 0 +#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT 1 + +#define DOT11_WNM_SLEEP_RESP_ACCEPT 0 +#define DOT11_WNM_SLEEP_RESP_UPDATE 1 +#define DOT11_WNM_SLEEP_RESP_DENY 2 +#define DOT11_WNM_SLEEP_RESP_DENY_TEMP 3 +#define DOT11_WNM_SLEEP_RESP_DENY_KEY 4 +#define DOT11_WNM_SLEEP_RESP_DENY_INUSE 5 +#define DOT11_WNM_SLEEP_RESP_LAST 6 + +/** DMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (23) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req dot11_dms_req_t; +#define DOT11_DMS_REQ_LEN 3 /* Fixed length */ + +/** DMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (24) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp dot11_dms_resp_t; +#define DOT11_DMS_RESP_LEN 3 /* Fixed length */ + +/** DMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie { + uint8 id; /* 99, DOT11_MNG_DMS_REQUEST_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_ie dot11_dms_req_ie_t; +#define DOT11_DMS_REQ_IE_LEN 2 /* Fixed length */ + +/** DMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie { + uint8 id; /* 100, DOT11_MNG_DMS_RESPONSE_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t; +#define DOT11_DMS_RESP_IE_LEN 2 /* Fixed length */ + +/** DMS request descriptor */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc { + uint8 dms_id; + uint8 len; + uint8 type; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_desc dot11_dms_req_desc_t; +#define DOT11_DMS_REQ_DESC_LEN 3 /* Fixed length */ + +#define DOT11_DMS_REQ_TYPE_ADD 0 +#define DOT11_DMS_REQ_TYPE_REMOVE 1 +#define DOT11_DMS_REQ_TYPE_CHANGE 2 + +/** DMS response status */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st { + uint8 dms_id; + uint8 len; + uint8 type; + uint16 lsc; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_st dot11_dms_resp_st_t; +#define DOT11_DMS_RESP_STATUS_LEN 5 /* Fixed length */ + +#define DOT11_DMS_RESP_TYPE_ACCEPT 0 +#define DOT11_DMS_RESP_TYPE_DENY 1 +#define DOT11_DMS_RESP_TYPE_TERM 2 + +#define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF + +/** WNM-Notification Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_notif_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: Notification request (26) */ + uint8 token; /* dialog token */ + uint8 type; /* type */ + uint8 data[1]; /* Sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_notif_req dot11_wnm_notif_req_t; +#define DOT11_WNM_NOTIF_REQ_LEN 4 /* Fixed length */ + +/** FMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (9) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req dot11_fms_req_t; +#define DOT11_FMS_REQ_LEN 3 /* Fixed length */ + +/** FMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (10) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp dot11_fms_resp_t; +#define DOT11_FMS_RESP_LEN 3 /* Fixed length */ + +/** FMS Descriptor element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_desc { + uint8 id; + uint8 len; + uint8 num_fms_cnt; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_desc dot11_fms_desc_t; +#define DOT11_FMS_DESC_LEN 1 /* Fixed length */ + +#define DOT11_FMS_CNTR_MAX 0x8 +#define DOT11_FMS_CNTR_ID_MASK 0x7 +#define DOT11_FMS_CNTR_ID_SHIFT 0x0 +#define DOT11_FMS_CNTR_COUNT_MASK 0xf1 +#define DOT11_FMS_CNTR_SHIFT 0x3 + +/** FMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie { + uint8 id; + uint8 len; + uint8 fms_token; /* token used to identify fms stream set */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req_ie dot11_fms_req_ie_t; +#define DOT11_FMS_REQ_IE_FIX_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field { + uint8 mask; + uint8 mcs_idx; + uint16 rate; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rate_id_field dot11_rate_id_field_t; +#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK 0x7 +#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET 0 +#define DOT11_RATE_ID_FIELD_RATETYPE_MASK 0x18 +#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET 3 +#define DOT11_RATE_ID_FIELD_LEN sizeof(dot11_rate_id_field_t) + +/** FMS request subelements */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_se { + uint8 sub_id; + uint8 len; + uint8 interval; + uint8 max_interval; + dot11_rate_id_field_t rate; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_se dot11_fms_se_t; +#define DOT11_FMS_REQ_SE_LEN 6 /* Fixed length */ + +#define DOT11_FMS_REQ_SE_ID_FMS 1 /* FMS subelement */ +#define DOT11_FMS_REQ_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie { + uint8 id; + uint8 len; + uint8 fms_token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t; +#define DOT11_FMS_RESP_IE_FIX_LEN 1 /* Fixed length */ + +/* FMS status subelements */ +#define DOT11_FMS_STATUS_SE_ID_FMS 1 /* FMS Status */ +#define DOT11_FMS_STATUS_SE_ID_TCLAS 2 /* TCLAS Status */ +#define DOT11_FMS_STATUS_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se { + uint8 sub_id; + uint8 len; + uint8 status; + uint8 interval; + uint8 max_interval; + uint8 fmsid; + uint8 counter; + dot11_rate_id_field_t rate; + uint8 mcast_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_status_se dot11_fms_status_se_t; +#define DOT11_FMS_STATUS_SE_LEN 15 /* Fixed length */ + +/** TCLAS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se { + uint8 sub_id; + uint8 len; + uint8 fmsid; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_status_se dot11_tclas_status_se_t; +#define DOT11_TCLAS_STATUS_SE_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_req { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint8 token; /* identifier */ + uint16 addba_param_set; /* parameter set */ + uint16 timeout; /* timeout in seconds */ + uint16 start_seqnum; /* starting sequence number */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_req dot11_addba_req_t; +#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba resp */ + uint8 token; /* identifier */ + uint16 status; /* status of add request */ + uint16 addba_param_set; /* negotiated parameter set */ + uint16 timeout; /* negotiated timeout in seconds */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_resp dot11_addba_resp_t; +#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */ + +/* DELBA action parameters */ +#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */ +#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */ +#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */ +#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */ + +BWL_PRE_PACKED_STRUCT struct dot11_delba { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint16 delba_param_set; /* paarmeter set */ + uint16 reason; /* reason for dellba */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_delba dot11_delba_t; +#define DOT11_DELBA_LEN 6 /* length of delba frame */ + +/* SA Query action field value */ +#define SA_QUERY_REQUEST 0 +#define SA_QUERY_RESPONSE 1 + +/* ************* 802.11r related definitions. ************* */ + +/** Over-the-DS Fast Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_req { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft req */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_req dot11_ft_req_t; +#define DOT11_FT_REQ_FIXED_LEN 14 + +/** Over-the-DS Fast Transition Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_res { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft resp */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint16 status; /* status code */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_res dot11_ft_res_t; +#define DOT11_FT_RES_FIXED_LEN 16 + +/** RDE RIC Data Element. */ +BWL_PRE_PACKED_STRUCT struct dot11_rde_ie { + uint8 id; /* 11r, DOT11_MNG_RDE_ID */ + uint8 length; + uint8 rde_id; /* RDE identifier. */ + uint8 rd_count; /* Resource Descriptor Count. */ + uint16 status; /* Status Code. */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rde_ie dot11_rde_ie_t; + +/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */ +#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t) + +/* ************* 802.11k related definitions. ************* */ + +/* Radio measurements enabled capability ie */ +#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */ +#define RCPI_IE_LEN 1 +#define RSNI_IE_LEN 1 +BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie { + uint8 cap[DOT11_RRM_CAP_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; + +/* Bitmap definitions for cap ie */ +#define DOT11_RRM_CAP_LINK 0 +#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1 +#define DOT11_RRM_CAP_PARALLEL 2 +#define DOT11_RRM_CAP_REPEATED 3 +#define DOT11_RRM_CAP_BCN_PASSIVE 4 +#define DOT11_RRM_CAP_BCN_ACTIVE 5 +#define DOT11_RRM_CAP_BCN_TABLE 6 +#define DOT11_RRM_CAP_BCN_REP_COND 7 +#define DOT11_RRM_CAP_FM 8 +#define DOT11_RRM_CAP_CLM 9 +#define DOT11_RRM_CAP_NHM 10 +#define DOT11_RRM_CAP_SM 11 +#define DOT11_RRM_CAP_LCIM 12 +#define DOT11_RRM_CAP_LCIA 13 +#define DOT11_RRM_CAP_TSCM 14 +#define DOT11_RRM_CAP_TTSCM 15 +#define DOT11_RRM_CAP_AP_CHANREP 16 +#define DOT11_RRM_CAP_RMMIB 17 +/* bit18-bit23, not used for RRM_IOVAR */ +#define DOT11_RRM_CAP_MPC0 24 +#define DOT11_RRM_CAP_MPC1 25 +#define DOT11_RRM_CAP_MPC2 26 +#define DOT11_RRM_CAP_MPTI 27 +#define DOT11_RRM_CAP_NBRTSFO 28 +#define DOT11_RRM_CAP_RCPI 29 +#define DOT11_RRM_CAP_RSNI 30 +#define DOT11_RRM_CAP_BSSAAD 31 +#define DOT11_RRM_CAP_BSSAAC 32 +#define DOT11_RRM_CAP_AI 33 +#define DOT11_RRM_CAP_FTM_RANGE 34 +#define DOT11_RRM_CAP_CIVIC_LOC 35 +#define DOT11_RRM_CAP_IDENT_LOC 36 +#define DOT11_RRM_CAP_LAST 36 + +#ifdef WL11K_ALL_MEAS +#define DOT11_RRM_CAP_LINK_ENAB (1 << DOT11_RRM_CAP_LINK) +#define DOT11_RRM_CAP_FM_ENAB (1 << (DOT11_RRM_CAP_FM - 8)) +#define DOT11_RRM_CAP_CLM_ENAB (1 << (DOT11_RRM_CAP_CLM - 8)) +#define DOT11_RRM_CAP_NHM_ENAB (1 << (DOT11_RRM_CAP_NHM - 8)) +#define DOT11_RRM_CAP_SM_ENAB (1 << (DOT11_RRM_CAP_SM - 8)) +#define DOT11_RRM_CAP_LCIM_ENAB (1 << (DOT11_RRM_CAP_LCIM - 8)) +#define DOT11_RRM_CAP_TSCM_ENAB (1 << (DOT11_RRM_CAP_TSCM - 8)) +#ifdef WL11K_AP +#define DOT11_RRM_CAP_MPC0_ENAB (1 << (DOT11_RRM_CAP_MPC0 - 24)) +#define DOT11_RRM_CAP_MPC1_ENAB (1 << (DOT11_RRM_CAP_MPC1 - 24)) +#define DOT11_RRM_CAP_MPC2_ENAB (1 << (DOT11_RRM_CAP_MPC2 - 24)) +#define DOT11_RRM_CAP_MPTI_ENAB (1 << (DOT11_RRM_CAP_MPTI - 24)) +#else +#define DOT11_RRM_CAP_MPC0_ENAB 0 +#define DOT11_RRM_CAP_MPC1_ENAB 0 +#define DOT11_RRM_CAP_MPC2_ENAB 0 +#define DOT11_RRM_CAP_MPTI_ENAB 0 +#endif /* WL11K_AP */ +#define DOT11_RRM_CAP_CIVIC_LOC_ENAB (1 << (DOT11_RRM_CAP_CIVIC_LOC - 32)) +#define DOT11_RRM_CAP_IDENT_LOC_ENAB (1 << (DOT11_RRM_CAP_IDENT_LOC - 32)) +#else +#define DOT11_RRM_CAP_LINK_ENAB 0 +#define DOT11_RRM_CAP_FM_ENAB 0 +#define DOT11_RRM_CAP_CLM_ENAB 0 +#define DOT11_RRM_CAP_NHM_ENAB 0 +#define DOT11_RRM_CAP_SM_ENAB 0 +#define DOT11_RRM_CAP_LCIM_ENAB 0 +#define DOT11_RRM_CAP_TSCM_ENAB 0 +#define DOT11_RRM_CAP_MPC0_ENAB 0 +#define DOT11_RRM_CAP_MPC1_ENAB 0 +#define DOT11_RRM_CAP_MPC2_ENAB 0 +#define DOT11_RRM_CAP_MPTI_ENAB 0 +#define DOT11_RRM_CAP_CIVIC_LOC_ENAB 0 +#define DOT11_RRM_CAP_IDENT_LOC_ENAB 0 +#endif /* WL11K_ALL_MEAS */ +#ifdef WL11K_NBR_MEAS +#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB (1 << DOT11_RRM_CAP_NEIGHBOR_REPORT) +#else +#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB 0 +#endif /* WL11K_NBR_MEAS */ +#ifdef WL11K_BCN_MEAS +#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB (1 << DOT11_RRM_CAP_BCN_PASSIVE) +#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB (1 << DOT11_RRM_CAP_BCN_ACTIVE) +#else +#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB 0 +#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB 0 +#endif /* WL11K_BCN_MEAS */ +#define DOT11_RRM_CAP_MPA_MASK 0x7 +/* Operating Class (formerly "Regulatory Class") definitions */ +#define DOT11_OP_CLASS_NONE 255 + +BWL_PRE_PACKED_STRUCT struct do11_ap_chrep { + uint8 id; + uint8 len; + uint8 reg; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct do11_ap_chrep dot11_ap_chrep_t; + +/* Radio Measurements action ids */ +#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */ +#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */ +#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */ +#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */ +#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */ +#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */ +#define DOT11_PUB_ACTION_MP 7 /* Measurement Pilot public action id */ + +/** Generic radio measurement action frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_rm_action { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_action dot11_rm_action_t; +#define DOT11_RM_ACTION_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint16 reps; /* no. of repetitions */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq dot11_rmreq_t; +#define DOT11_RMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_rm_ie { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_ie dot11_rm_ie_t; +#define DOT11_RM_IE_LEN 5 + +/* Definitions for "mode" bits in rm req */ +#define DOT11_RMREQ_MODE_PARALLEL 1 +#define DOT11_RMREQ_MODE_ENABLE 2 +#define DOT11_RMREQ_MODE_REQUEST 4 +#define DOT11_RMREQ_MODE_REPORT 8 +#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */ + +/* Definitions for "mode" bits in rm rep */ +#define DOT11_RMREP_MODE_LATE 1 +#define DOT11_RMREP_MODE_INCAPABLE 2 +#define DOT11_RMREP_MODE_REFUSED 4 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 bcn_mode; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; +#define DOT11_RMREQ_BCN_LEN 18u + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 frame_info; + uint8 rcpi; + uint8 rsni; + struct ether_addr bssid; + uint8 antenna_id; + uint32 parent_tsf; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; +#define DOT11_RMREP_BCN_LEN 26 + +/* Beacon request measurement mode */ +#define DOT11_RMREQ_BCN_PASSIVE 0 +#define DOT11_RMREQ_BCN_ACTIVE 1 +#define DOT11_RMREQ_BCN_TABLE 2 + +/* Sub-element IDs for Beacon Request */ +#define DOT11_RMREQ_BCN_SSID_ID 0 +#define DOT11_RMREQ_BCN_REPINFO_ID 1 +#define DOT11_RMREQ_BCN_REPDET_ID 2 +#define DOT11_RMREQ_BCN_REQUEST_ID 10 +#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID +#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID 164 + +/* Reporting Detail element definition */ +#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */ +#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */ +#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */ + +/* Reporting Information (reporting condition) element definition */ +#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */ +#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */ + +/* Last Beacon Report Indication Request definition */ +#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ENAB 1 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind_req { + uint8 id; /* DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID */ + uint8 len; /* length of remaining fields */ + uint8 data; /* data = 1 means last bcn rpt ind requested */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_last_bcn_rpt_ind_req dot11_rmrep_last_bcn_rpt_ind_req_t; + +/* Sub-element IDs for Beacon Report */ +#define DOT11_RMREP_BCN_FRM_BODY 1 +#define DOT11_RMREP_BCN_FRM_BODY_FRAG_ID 2 +#define DOT11_RMREP_BCN_LAST_RPT_IND 164 +#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */ + +/* Refer IEEE P802.11-REVmd/D1.0 9.4.2.21.7 Beacon report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_fragmt_id { + uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */ + uint8 len; /* length of remaining fields */ + /* More fragments(B15), fragment Id(B8-B14), Bcn rpt instance ID (B0 - B7) */ + uint16 frag_info_rpt_id; +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_rmrep_bcn_frm_body_fragmt_id dot11_rmrep_bcn_frm_body_fragmt_id_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id { + uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */ + uint8 len; /* length of remaining fields */ + uint8 bcn_rpt_id; /* Bcn rpt instance ID */ + uint8 frag_info; /* fragment Id(7 bits) | More fragments(1 bit) */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_rmrep_bcn_frm_body_frag_id dot11_rmrep_bcn_frm_body_frag_id_t; +#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2u +#define DOT11_RMREP_BCNRPT_FRAG_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_frag_id_t) +#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1u +#define DOT11_RMREP_BCNRPT_FRAGMT_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_fragmt_id_t) +#define DOT11_RMREP_BCNRPT_BCN_RPT_ID_MASK 0x00FFu +#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_SHIFT 8u +#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_MASK 0x7F00u +#define DOT11_RMREP_BCNRPT_MORE_FRAG_SHIFT 15u +#define DOT11_RMREP_BCNRPT_MORE_FRAG_MASK 0x8000u + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind { + uint8 id; /* DOT11_RMREP_BCN_LAST_RPT_IND */ + uint8 len; /* length of remaining fields */ + uint8 data; /* data = 1 is last bcn rpt */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_rmrep_last_bcn_rpt_ind dot11_rmrep_last_bcn_rpt_ind_t; +#define DOT11_RMREP_LAST_BCN_RPT_IND_DATA_LEN 1 +#define DOT11_RMREP_LAST_BCN_RPT_IND_SE_LEN sizeof(dot11_rmrep_last_bcn_rpt_ind_t) + +/* Sub-element IDs for Frame Report */ +#define DOT11_RMREP_FRAME_COUNT_REPORT 1 + +/* Channel load request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t; +#define DOT11_RMREQ_CHANLOAD_LEN 11 + +/** Channel load report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 channel_load; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t; +#define DOT11_RMREP_CHANLOAD_LEN 13 + +/** Noise histogram request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_noise dot11_rmreq_noise_t; +#define DOT11_RMREQ_NOISE_LEN 11 + +/** Noise histogram report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 antid; + uint8 anpi; + uint8 ipi0_dens; + uint8 ipi1_dens; + uint8 ipi2_dens; + uint8 ipi3_dens; + uint8 ipi4_dens; + uint8 ipi5_dens; + uint8 ipi6_dens; + uint8 ipi7_dens; + uint8 ipi8_dens; + uint8 ipi9_dens; + uint8 ipi10_dens; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_noise dot11_rmrep_noise_t; +#define DOT11_RMREP_NOISE_LEN 25 + +/** Frame request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 req_type; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_frame dot11_rmreq_frame_t; +#define DOT11_RMREQ_FRAME_LEN 18 + +/** Frame report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frame dot11_rmrep_frame_t; +#define DOT11_RMREP_FRAME_LEN 12 + +/** Frame report entry */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry { + struct ether_addr ta; + struct ether_addr bssid; + uint8 phy_type; + uint8 avg_rcpi; + uint8 last_rsni; + uint8 last_rcpi; + uint8 ant_id; + uint16 frame_cnt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t; +#define DOT11_RMREP_FRMENTRY_LEN 19 + +/** STA statistics request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + struct ether_addr peer; + uint16 interval; + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_stat dot11_rmreq_stat_t; +#define DOT11_RMREQ_STAT_LEN 16 + +/** STA statistics report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat { + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_stat dot11_rmrep_stat_t; + +/* Statistics Group Report: Group IDs */ +enum { + DOT11_RRM_STATS_GRP_ID_0 = 0, + DOT11_RRM_STATS_GRP_ID_1, + DOT11_RRM_STATS_GRP_ID_2, + DOT11_RRM_STATS_GRP_ID_3, + DOT11_RRM_STATS_GRP_ID_4, + DOT11_RRM_STATS_GRP_ID_5, + DOT11_RRM_STATS_GRP_ID_6, + DOT11_RRM_STATS_GRP_ID_7, + DOT11_RRM_STATS_GRP_ID_8, + DOT11_RRM_STATS_GRP_ID_9, + DOT11_RRM_STATS_GRP_ID_10, + DOT11_RRM_STATS_GRP_ID_11, + DOT11_RRM_STATS_GRP_ID_12, + DOT11_RRM_STATS_GRP_ID_13, + DOT11_RRM_STATS_GRP_ID_14, + DOT11_RRM_STATS_GRP_ID_15, + DOT11_RRM_STATS_GRP_ID_16 +}; + +/* Statistics Group Report: Group Data length */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 +typedef struct rrm_stat_group_0 { + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 rxframe; + uint32 rxmulti; + uint32 rxbadfcs; + uint32 txframe; +} rrm_stat_group_0_t; + +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_1 24 +typedef struct rrm_stat_group_1 { + uint32 txretry; + uint32 txretries; + uint32 rxdup; + uint32 txrts; + uint32 rtsfail; + uint32 ackfail; +} rrm_stat_group_1_t; + +/* group 2-9 use same qos data structure (tid 0-7), total 52 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_2_9 52 +typedef struct rrm_stat_group_qos { + uint32 txfrag; + uint32 txfail; + uint32 txretry; + uint32 txretries; + uint32 rxdup; + uint32 txrts; + uint32 rtsfail; + uint32 ackfail; + uint32 rxfrag; + uint32 txframe; + uint32 txdrop; + uint32 rxmpdu; + uint32 rxretries; +} rrm_stat_group_qos_t; + +/* dot11BSSAverageAccessDelay Group (only available at an AP): 8 byte */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_10 8 +typedef BWL_PRE_PACKED_STRUCT struct rrm_stat_group_10 { + uint8 apavgdelay; + uint8 avgdelaybe; + uint8 avgdelaybg; + uint8 avgdelayvi; + uint8 avgdelayvo; + uint16 stacount; + uint8 chanutil; +} BWL_POST_PACKED_STRUCT rrm_stat_group_10_t; + +/* AMSDU, 40 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_11 40 +typedef struct rrm_stat_group_11 { + uint32 txamsdu; + uint32 amsdufail; + uint32 amsduretry; + uint32 amsduretries; + uint32 txamsdubyte_h; + uint32 txamsdubyte_l; + uint32 amsduackfail; + uint32 rxamsdu; + uint32 rxamsdubyte_h; + uint32 rxamsdubyte_l; +} rrm_stat_group_11_t; + +/* AMPDU, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_12 36 +typedef struct rrm_stat_group_12 { + uint32 txampdu; + uint32 txmpdu; + uint32 txampdubyte_h; + uint32 txampdubyte_l; + uint32 rxampdu; + uint32 rxmpdu; + uint32 rxampdubyte_h; + uint32 rxampdubyte_l; + uint32 ampducrcfail; +} rrm_stat_group_12_t; + +/* BACK etc, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_13 36 +typedef struct rrm_stat_group_13 { + uint32 rximpbarfail; + uint32 rxexpbarfail; + uint32 chanwidthsw; + uint32 txframe20mhz; + uint32 txframe40mhz; + uint32 rxframe20mhz; + uint32 rxframe40mhz; + uint32 psmpgrantdur; + uint32 psmpuseddur; +} rrm_stat_group_13_t; + +/* RD Dual CTS etc, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_14 36 +typedef struct rrm_stat_group_14 { + uint32 grantrdgused; + uint32 grantrdgunused; + uint32 txframeingrantrdg; + uint32 txbyteingrantrdg_h; + uint32 txbyteingrantrdg_l; + uint32 dualcts; + uint32 dualctsfail; + uint32 rtslsi; + uint32 rtslsifail; +} rrm_stat_group_14_t; + +/* bf and STBC etc, 20 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_15 20 +typedef struct rrm_stat_group_15 { + uint32 bfframe; + uint32 stbccts; + uint32 stbcctsfail; + uint32 nonstbccts; + uint32 nonstbcctsfail; +} rrm_stat_group_15_t; + +/* RSNA, 28 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_16 28 +typedef struct rrm_stat_group_16 { + uint32 rsnacmacicverr; + uint32 rsnacmacreplay; + uint32 rsnarobustmgmtccmpreplay; + uint32 rsnatkipicverr; + uint32 rsnatkipicvreplay; + uint32 rsnaccmpdecrypterr; + uint32 rsnaccmpreplay; +} rrm_stat_group_16_t; + +/* Transmit stream/category measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 interval; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 bin0_range; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t; +#define DOT11_RMREQ_TXSTREAM_LEN 17 + +/** Transmit stream/category measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { + uint32 starttime[2]; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 reason; + uint32 txmsdu_cnt; + uint32 msdu_discarded_cnt; + uint32 msdufailed_cnt; + uint32 msduretry_cnt; + uint32 cfpolls_lost_cnt; + uint32 avrqueue_delay; + uint32 avrtx_delay; + uint8 bin0_range; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t; +#define DOT11_RMREP_TXSTREAM_LEN 71 + +typedef struct rrm_tscm { + uint32 msdu_tx; + uint32 msdu_exp; + uint32 msdu_fail; + uint32 msdu_retries; + uint32 cfpolls_lost; + uint32 queue_delay; + uint32 tx_delay_sum; + uint32 tx_delay_cnt; + uint32 bin0_range_us; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} rrm_tscm_t; +enum { + DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */ + DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */ + DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */ +}; + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + + /* Following 3 fields are unused. Keep for ROM compatibility. */ + uint8 lat_res; + uint8 lon_res; + uint8 alt_res; + + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t; +#define DOT11_RMREQ_LCI_LEN 9 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 lci_sub_id; + uint8 lci_sub_len; + /* optional LCI field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t; + +#define DOT11_FTM_LCI_SUBELEM_ID 0 +#define DOT11_FTM_LCI_SUBELEM_LEN 2 +#define DOT11_FTM_LCI_FIELD_LEN 16 +#define DOT11_FTM_LCI_UNKNOWN_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 civloc_type; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t; +#define DOT11_RMREQ_CIVIC_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 civloc_type; + uint8 civloc_sub_id; + uint8 civloc_sub_len; + /* optional location civic field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t; + +#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0 +#define DOT11_FTM_CIVIC_SUBELEM_ID 0 +#define DOT11_FTM_CIVIC_SUBELEM_LEN 2 +#define DOT11_FTM_CIVIC_LOC_SI_NONE 0 +#define DOT11_FTM_CIVIC_TYPE_LEN 1 +#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3 + +/* Location Identifier measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_locid { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 siu; + uint16 si; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_locid dot11_rmreq_locid_t; +#define DOT11_RMREQ_LOCID_LEN 9 + +/* Location Identifier measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_locid { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 exp_tsf[8]; + uint8 locid_sub_id; + uint8 locid_sub_len; + /* optional location identifier field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_locid dot11_rmrep_locid_t; +#define DOT11_LOCID_UNKNOWN_LEN 10 +#define DOT11_LOCID_SUBELEM_ID 0 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel { + uint8 id; + uint8 len; + uint16 max_age; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t; +#define DOT11_FTM_RANGE_SUBELEM_ID 4 +#define DOT11_FTM_RANGE_SUBELEM_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + /* neighbor report sub-elements */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t; +#define DOT11_RMREQ_FTM_RANGE_LEN 8 + +#define DOT11_FTM_RANGE_LEN 3 +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 range[DOT11_FTM_RANGE_LEN]; + uint8 max_err[DOT11_FTM_RANGE_LEN]; + uint8 rsvd; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t; +#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15 + +enum { + DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 2, + DOT11_FTM_RANGE_ERROR_AP_FAILED = 3, + DOT11_FTM_RANGE_ERROR_TX_FAILED = 8, + DOT11_FTM_RANGE_ERROR_MAX +}; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t; +#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t; + +#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */ +#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15 +#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11 +#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */ +/** Measurement pause request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time { + uint8 id; /* use dot11_rm_ie_t ? */ + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 pause_time; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t; +#define DOT11_RMREQ_PAUSE_LEN 7 + +/* Neighbor Report subelements ID (11k & 11v) */ +#define DOT11_NGBR_TSF_INFO_SE_ID 1 +#define DOT11_NGBR_CCS_SE_ID 2 +#define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3 +#define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4 +#define DOT11_NGBR_BEARING_SE_ID 5 +#define DOT11_NGBR_WIDE_BW_CHAN_SE_ID 6 /* proposed */ + +/** Neighbor Report, BSS Transition Candidate Preference subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se { + uint8 sub_id; + uint8 len; + uint8 preference; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t; +#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1 +#define DOT11_NGBR_BSSTRANS_PREF_SE_IE_LEN 3 +#define DOT11_NGBR_BSSTRANS_PREF_SE_HIGHEST 0xff + +/** Neighbor Report, BSS Termination Duration subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se { + uint8 sub_id; + uint8 len; + uint8 tsf[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t; +#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN 10 + +/* Neighbor Report BSSID Information Field */ +#define DOT11_NGBR_BI_REACHABILTY_UNKN 0x0002 +#define DOT11_NGBR_BI_REACHABILTY 0x0003 +#define DOT11_NGBR_BI_SEC 0x0004 +#define DOT11_NGBR_BI_KEY_SCOPE 0x0008 +#define DOT11_NGBR_BI_CAP 0x03f0 +#define DOT11_NGBR_BI_CAP_SPEC_MGMT 0x0010 +#define DOT11_NGBR_BI_CAP_QOS 0x0020 +#define DOT11_NGBR_BI_CAP_APSD 0x0040 +#define DOT11_NGBR_BI_CAP_RDIO_MSMT 0x0080 +#define DOT11_NGBR_BI_CAP_DEL_BA 0x0100 +#define DOT11_NGBR_BI_CAP_IMM_BA 0x0200 +#define DOT11_NGBR_BI_MOBILITY 0x0400 +#define DOT11_NGBR_BI_HT 0x0800 +#define DOT11_NGBR_BI_VHT 0x1000 +#define DOT11_NGBR_BI_FTM 0x2000 + +/** Neighbor Report element (11k & 11v) */ +BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; /* Operating class */ + uint8 channel; + uint8 phytype; + uint8 data[1]; /* Variable size subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t; +#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13u + +/* MLME Enumerations */ +#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */ +#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */ +#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */ +#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */ +#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */ +#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */ + +/** Link Measurement */ +BWL_PRE_PACKED_STRUCT struct dot11_lmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 txpwr; /* Transmit Power Used */ + uint8 maxtxpwr; /* Max Transmit Power */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmreq dot11_lmreq_t; +#define DOT11_LMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_lmrep { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + dot11_tpc_rep_t tpc; /* TPC element */ + uint8 rxant; /* Receive Antenna ID */ + uint8 txant; /* Transmit Antenna ID */ + uint8 rcpi; /* RCPI */ + uint8 rsni; /* RSNI */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmrep dot11_lmrep_t; +#define DOT11_LMREP_LEN 11 + +#define DOT11_MP_CAP_SPECTRUM 0x01 /* d11 cap. spectrum */ +#define DOT11_MP_CAP_SHORTSLOT 0x02 /* d11 cap. shortslot */ +/* Measurement Pilot */ +BWL_PRE_PACKED_STRUCT struct dot11_mprep { + uint8 cap_info; /* Condensed capability Info. */ + uint8 country[2]; /* Condensed country string */ + uint8 opclass; /* Op. Class */ + uint8 channel; /* Channel */ + uint8 mp_interval; /* Measurement Pilot Interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mprep dot11_mprep_t; +#define DOT11_MPREP_LEN 6 + +/* 802.11 BRCM "Compromise" Pre N constants */ +#define PREN_PREAMBLE 24 /* green field preamble time */ +#define PREN_MM_EXT 12 /* extra mixed mode preamble time */ +#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */ + +/* 802.11N PHY constants */ +#define RIFS_11N_TIME 2 /* NPHY RIFS time */ + +/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3 + * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2 + */ +/* HT-SIG1 */ +#define HT_SIG1_MCS_MASK 0x00007F +#define HT_SIG1_CBW 0x000080 +#define HT_SIG1_HT_LENGTH 0xFFFF00 + +/* HT-SIG2 */ +#define HT_SIG2_SMOOTHING 0x000001 +#define HT_SIG2_NOT_SOUNDING 0x000002 +#define HT_SIG2_RESERVED 0x000004 +#define HT_SIG2_AGGREGATION 0x000008 +#define HT_SIG2_STBC_MASK 0x000030 +#define HT_SIG2_STBC_SHIFT 4 +#define HT_SIG2_FEC_CODING 0x000040 +#define HT_SIG2_SHORT_GI 0x000080 +#define HT_SIG2_ESS_MASK 0x000300 +#define HT_SIG2_ESS_SHIFT 8 +#define HT_SIG2_CRC 0x03FC00 +#define HT_SIG2_TAIL 0x1C0000 + +/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */ +#define HT_T_LEG_PREAMBLE 16 +#define HT_T_L_SIG 4 +#define HT_T_SIG 8 +#define HT_T_LTF1 4 +#define HT_T_GF_LTF1 8 +#define HT_T_LTFs 4 +#define HT_T_STF 4 +#define HT_T_GF_STF 8 +#define HT_T_SYML 4 + +#define HT_N_SERVICE 16 /* bits in SERVICE field */ +#define HT_N_TAIL 6 /* tail bits per BCC encoder */ + +/* 802.11 A PHY constants */ +#define APHY_SLOT_TIME 9 /* APHY slot time */ +#define APHY_SIFS_TIME 16 /* APHY SIFS time */ +#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */ +#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */ +#define APHY_SIGNAL_TIME 4 /* APHY signal time */ +#define APHY_SYMBOL_TIME 4 /* APHY symbol time */ +#define APHY_SERVICE_NBITS 16 /* APHY service nbits */ +#define APHY_TAIL_NBITS 6 /* APHY tail nbits */ +#define APHY_CWMIN 15 /* APHY cwmin */ +#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */ + +/* 802.11 B PHY constants */ +#define BPHY_SLOT_TIME 20 /* BPHY slot time */ +#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */ +#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */ +#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */ +#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */ +#define BPHY_CWMIN 31 /* BPHY cwmin */ +#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */ +#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */ + +/* 802.11 G constants */ +#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */ + +#define PHY_CWMAX 1023 /* PHY cwmax */ + +#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */ + +/* 802.11 VHT constants */ + +typedef int vht_group_id_t; + +/* for VHT-A1 */ +/* SIG-A1 reserved bits */ +#define VHT_SIGA1_CONST_MASK 0x800004 + +#define VHT_SIGA1_BW_MASK 0x000003 +#define VHT_SIGA1_20MHZ_VAL 0x000000 +#define VHT_SIGA1_40MHZ_VAL 0x000001 +#define VHT_SIGA1_80MHZ_VAL 0x000002 +#define VHT_SIGA1_160MHZ_VAL 0x000003 + +#define VHT_SIGA1_STBC 0x000008 + +#define VHT_SIGA1_GID_MASK 0x0003f0 +#define VHT_SIGA1_GID_SHIFT 4 +#define VHT_SIGA1_GID_TO_AP 0x00 +#define VHT_SIGA1_GID_NOT_TO_AP 0x3f +#define VHT_SIGA1_GID_MAX_GID 0x3f + +#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00 +#define VHT_SIGA1_NSTS_SHIFT 10 +#define VHT_SIGA1_MAX_USERPOS 3 + +#define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000 +#define VHT_SIGA1_PARTIAL_AID_SHIFT 13 + +#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED 0x400000 + +/* for VHT-A2 */ +#define VHT_SIGA2_GI_NONE 0x000000 +#define VHT_SIGA2_GI_SHORT 0x000001 +#define VHT_SIGA2_GI_W_MOD10 0x000002 +#define VHT_SIGA2_CODING_LDPC 0x000004 +#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM 0x000008 +#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100 +#define VHT_SIGA2_MCS_SHIFT 4 + +#define VHT_SIGA2_B9_RESERVED 0x000200 +#define VHT_SIGA2_TAIL_MASK 0xfc0000 +#define VHT_SIGA2_TAIL_VALUE 0x000000 + +/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */ +#define VHT_T_LEG_PREAMBLE 16 +#define VHT_T_L_SIG 4 +#define VHT_T_SIG_A 8 +#define VHT_T_LTF 4 +#define VHT_T_STF 4 +#define VHT_T_SIG_B 4 +#define VHT_T_SYML 4 + +#define VHT_N_SERVICE 16 /* bits in SERVICE field */ +#define VHT_N_TAIL 6 /* tail bits per BCC encoder */ + +/** dot11Counters Table - 802.11 spec., Annex D */ +typedef struct d11cnt { + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ +} d11cnt_t; + +/* OUI for BRCM proprietary IE */ +#define BRCM_PROP_OUI "\x00\x90\x4C" /* Broadcom proprietary OUI */ + +/* Broadcom Proprietary OUI type list. Please update below page when adding a new type. + * Twiki http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlBrcmPropIE + */ +/* The following BRCM_PROP_OUI types are currently in use (defined in + * relevant subsections). Each of them will be in a separate proprietary(221) IE + * #define RWL_WIFI_DEFAULT 0 + * #define SES_VNDR_IE_TYPE 1 (defined in src/ses/shared/ses.h) + * #define VHT_FEATURES_IE_TYPE 4 + * #define RWL_WIFI_FIND_MY_PEER 9 + * #define RWL_WIFI_FOUND_PEER 10 + * #define PROXD_IE_TYPE 11 + */ + +#define BRCM_FTM_IE_TYPE 14 + +/* #define HT_CAP_IE_TYPE 51 + * #define HT_ADD_IE_TYPE 52 + * #define BRCM_EXTCH_IE_TYPE 53 + * #define MEMBER_OF_BRCM_PROP_IE_TYPE 54 + * #define BRCM_RELMACST_IE_TYPE 55 + * #define BRCM_EVT_WL_BSS_INFO 64 + * #define RWL_ACTION_WIFI_FRAG_TYPE 85 + * #define BTC_INFO_BRCM_PROP_IE_TYPE 90 + * #define ULB_BRCM_PROP_IE_TYPE 91 + * #define SDB_BRCM_PROP_IE_TYPE 92 + */ + +/* Action frame type for RWL */ +#define RWL_WIFI_DEFAULT 0 +#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */ +#define RWL_WIFI_FOUND_PEER 10 /* Server response to the client */ +#define RWL_ACTION_WIFI_FRAG_TYPE 85 /* Fragment indicator for receiver */ + +#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */ +#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */ + +/* Action frame type for FTM Initiator Report */ +#define BRCM_FTM_VS_AF_TYPE 14 +enum { + BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ + BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ +}; + +/* Action frame type for vendor specific action frames */ +#define VS_AF_TYPE 221 + +#ifdef WL_VS_AFTX +/* Vendor specific action frame subtype for transmit using SU EDCA */ +#define VS_AF_SUBTYPE_SUEDCA 1 + +#define VENDOR_PROP_OUI "\x00\x17\xF2" +#endif /* WL_VS_AFTX */ + +/* + * This BRCM_PROP_OUI types is intended for use in events to embed additional + * data, and would not be expected to appear on the air -- but having an IE + * format allows IE frame data with extra data in events in that allows for + * more flexible parsing. + */ +#define BRCM_EVT_WL_BSS_INFO 64 + +/** + * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI). + * DPT uses this format with type set to DPT_IE_TYPE + */ +BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 type; /* type of this IE */ + uint16 cap; /* DPT capabilities */ +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_prop_ie_s brcm_prop_ie_t; + +#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */ + +#define DPT_IE_TYPE 2 + +#define BRCM_SYSCAP_IE_TYPE 3 +#define WET_TUNNEL_IE_TYPE 3 + +/* brcm syscap_ie cap */ +#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */ + +/* BRCM OUI: Used in the proprietary(221) IE in all broadcom devices */ +#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */ + +/** BRCM info element */ +BWL_PRE_PACKED_STRUCT struct brcm_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; /* Proprietary OUI, BRCM_OUI */ + uint8 ver; /* type/ver of this IE */ + uint8 assoc; /* # of assoc STAs */ + uint8 flags; /* misc flags */ + uint8 flags1; /* misc flags */ + uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */ + uint8 flags2; /* Bit 0: DTPC TX cap, Bit 1: DTPC Recv Cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_ie brcm_ie_t; +#define BRCM_IE_LEN 12u /* BRCM IE length */ +#define BRCM_IE_VER 2u /* BRCM IE version */ +#define BRCM_IE_LEGACY_AES_VER 1u /* BRCM IE legacy AES version */ + +/* brcm_ie flags */ +#define BRF_ABCAP 0x1 /* afterburner is obsolete, defined for backward compat */ +#define BRF_ABRQRD 0x2 /* afterburner is obsolete, defined for backward compat */ +#define BRF_LZWDS 0x4 /* lazy wds enabled */ +#define BRF_BLOCKACK 0x8 /* BlockACK capable */ +#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */ +#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */ +#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */ + +/** + * Support for Broadcom proprietary HT MCS rates. Re-uses afterburner bits since + * afterburner is not used anymore. Checks for BRF_ABCAP to stay compliant with 'old' + * images in the field. + */ +#define GET_BRF_PROP_11N_MCS(brcm_ie) \ + (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS)) + +/* brcm_ie flags1 */ +#define BRF1_AMSDU 0x1 /* A-MSDU capable */ +#define BRF1_WNM 0x2 /* WNM capable */ +#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */ +#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */ +#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */ +#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */ +#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */ +#define BRF1_DWDS 0x80 /* DWDS capable */ + +/* brcm_ie flags2 */ +#define BRF2_DTPC_TX 0x1u /* DTPC: DTPC TX Cap */ +#define BRF2_DTPC_RX 0x2u /* DTPC: DTPC RX Cap */ +#define BRF2_DTPC_TX_RX 0x3u /* DTPC: Enable Both DTPC TX and RX Cap */ + +/** Vendor IE structure */ +BWL_PRE_PACKED_STRUCT struct vndr_ie { + uchar id; + uchar len; + uchar oui [3]; + uchar data [1]; /* Variable size data */ +} BWL_POST_PACKED_STRUCT; +typedef struct vndr_ie vndr_ie_t; + +#define VNDR_IE_HDR_LEN 2u /* id + len field */ +#define VNDR_IE_MIN_LEN 3u /* size of the oui field */ +#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN) + +#define VNDR_IE_MAX_LEN 255u /* vendor IE max length, without ID and len */ + +/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */ +BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie { + uchar id; + uchar len; + uchar oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* Device Primary MAC Adrress */ +} BWL_POST_PACKED_STRUCT; +typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t; + +#define MEMBER_OF_BRCM_PROP_IE_LEN 10 /* IE max length */ +#define MEMBER_OF_BRCM_PROP_IE_HDRLEN (sizeof(member_of_brcm_prop_ie_t)) +#define MEMBER_OF_BRCM_PROP_IE_TYPE 54 /* used in prop IE 221 only */ + +/** BRCM Reliable Multicast IE */ +BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* The ack sender's MAC Adrress */ + struct ether_addr mcast_ea; /* The multicast MAC address */ + uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */ +} BWL_POST_PACKED_STRUCT; +typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t; + +/* IE length */ +/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */ +#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8))) + +#define RELMCAST_BRCM_PROP_IE_TYPE 55 /* used in prop IE 221 only */ + +/* BRCM BTC IE */ +BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type inidicates what follows */ + uint32 info; +} BWL_POST_PACKED_STRUCT; +typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t; + +#define BTC_INFO_BRCM_PROP_IE_TYPE 90 +#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8))) + +/* ************* HT definitions. ************* */ +#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */ +#define MAX_MCS_NUM (128) /* max mcs number = 128 */ +#define BASIC_HT_MCS 0xFFu /* HT MCS supported rates */ + +BWL_PRE_PACKED_STRUCT struct ht_cap_ie { + uint16 cap; + uint8 params; + uint8 supp_mcs[MCSSET_LEN]; + uint16 ext_htcap; + uint32 txbf_cap; + uint8 as_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_cap_ie ht_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie { + uint8 id; + uint8 len; + ht_cap_ie_t ht_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t; + +/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the capability IE is primarily used to convey this nodes abilities */ +BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 type; /* type indicates what follows */ + ht_cap_ie_t cap_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; + +#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */ +#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */ +#define HT_CAP_IE_TYPE 51 /* used in prop IE 221 only */ + +#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define HT_CAP_GF 0x0010 /* Greenfield preamble support */ +#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ + +#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + +#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1 +#define HT_CAP_TXBF_CAP_NDP_RX 0x8 +#define HT_CAP_TXBF_CAP_NDP_TX 0x10 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI 0x100 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING 0x200 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING 0x400 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK 0x1800 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT 11 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK 0x6000 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT 13 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK 0x18000 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT 15 +#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT 19 +#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT 21 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT 23 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK 0x1800000 + +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT 27 +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK 0x18000000 + +#define HT_CAP_TXBF_FB_TYPE_NONE 0 +#define HT_CAP_TXBF_FB_TYPE_DELAYED 1 +#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 2 +#define HT_CAP_TXBF_FB_TYPE_BOTH 3 + +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK 0x400 +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT 10 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15 + +#define HT_CAP_MCS_FLAGS_SUPP_BYTE 12 /* byte offset in HT Cap Supported MCS for various flags */ +#define HT_CAP_MCS_RX_8TO15_BYTE_OFFSET 1 +#define HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL 0x02 +#define HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK 0x0C + +#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */ +#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */ +/* Max AMSDU len - per spec */ +#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA) + +#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */ +#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */ + +#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */ +#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */ +#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */ + +/* HT/AMPDU specific define */ +#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/4 usec units */ +#define AMPDU_DENSITY_NONE 0 /* No density requirement */ +#define AMPDU_DENSITY_1over4_US 1 /* 1/4 us density */ +#define AMPDU_DENSITY_1over2_US 2 /* 1/2 us density */ +#define AMPDU_DENSITY_1_US 3 /* 1 us density */ +#define AMPDU_DENSITY_2_US 4 /* 2 us density */ +#define AMPDU_DENSITY_4_US 5 /* 4 us density */ +#define AMPDU_DENSITY_8_US 6 /* 8 us density */ +#define AMPDU_DENSITY_16_US 7 /* 16 us density */ +#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */ +#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */ +#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */ +#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */ + +/* AMPDU RX factors for VHT rates */ +#define AMPDU_RX_FACTOR_128K 4 /* max rcv ampdu len (128kb) */ +#define AMPDU_RX_FACTOR_256K 5 /* max rcv ampdu len (256kb) */ +#define AMPDU_RX_FACTOR_512K 6 /* max rcv ampdu len (512kb) */ +#define AMPDU_RX_FACTOR_1024K 7 /* max rcv ampdu len (1024kb) */ + +#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */ +#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */ + +#define AMPDU_DELIMITER_LEN 4u /* length of ampdu delimiter */ +#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */ + +#define HT_CAP_EXT_PCO 0x0001 +#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006 +#define HT_CAP_EXT_PCO_TTIME_SHIFT 1 +#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300 +#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8 +#define HT_CAP_EXT_HTC 0x0400 +#define HT_CAP_EXT_RD_RESP 0x0800 + +/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */ +BWL_PRE_PACKED_STRUCT struct ht_add_ie { + uint8 ctl_ch; /* control channel number */ + uint8 byte1; /* ext ch,rec. ch. width, RIFS support */ + uint16 opmode; /* operation mode */ + uint16 misc_bits; /* misc bits */ + uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */ +} BWL_POST_PACKED_STRUCT; +typedef struct ht_add_ie ht_add_ie_t; + +/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the additional IE is primarily used to convey the current BSS configuration */ +BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 type; /* indicates what follows */ + ht_add_ie_t add_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_add_ie ht_prop_add_ie_t; + +#define HT_ADD_IE_LEN 22 /* HT capability len (based on .11n d1.0) */ +#define HT_ADD_IE_TYPE 52 /* faked out as current spec is illegal */ + +/* byte1 defn's */ +#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */ +#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */ + +/* opmode defn's */ +#define HT_OPMODE_MASK 0x0003 /* protection mode mask */ +#define HT_OPMODE_SHIFT 0 /* protection mode shift */ +#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */ +#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */ +#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */ +#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */ +#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ +#define DOT11N_TXBURST 0x0008 /* Tx burst limit */ +#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ +#define HT_OPMODE_CCFS2_MASK 0x1fe0 /* Channel Center Frequency Segment 2 mask */ +#define HT_OPMODE_CCFS2_SHIFT 5 /* Channel Center Frequency Segment 2 shift */ + +/* misc_bites defn's */ +#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */ +#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */ +#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */ +#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */ +#define HT_PCO_ACTIVE 0x0400 /* PCO active */ +#define HT_PCO_PHASE 0x0800 /* PCO phase */ +#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */ + +/* Tx Burst Limits */ +#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */ +#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */ + +/* Macros for opmode */ +#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + >> HT_OPMODE_SHIFT) +#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_MIXED) /* mixed mode present */ +#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_HT20IN40) /* 20MHz HT present */ +#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_OPTIONAL) /* Optional protection present */ +#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ + HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */ +#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ + == HT_OPMODE_NONGF) /* non-GF present */ +#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ + == DOT11N_TXBURST) /* Tx Burst present */ +#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ + == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */ +#define HT_OPMODE_CCFS2_GET(add_ie) ((ltoh16_ua(&(add_ie)->opmode) & HT_OPMODE_CCFS2_MASK) \ + >> HT_OPMODE_CCFS2_SHIFT) /* get CCFS2 */ +#define HT_OPMODE_CCFS2_SET(add_ie, ccfs2) do { /* set CCFS2 */ \ + (add_ie)->opmode &= htol16(~HT_OPMODE_CCFS2_MASK); \ + (add_ie)->opmode |= htol16(((ccfs2) << HT_OPMODE_CCFS2_SHIFT) & HT_OPMODE_CCFS2_MASK); \ +} while (0) + +/* Macros for HT MCS field access */ +#define HT_CAP_MCS_BITMASK(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_RX_8TO15_BYTE_OFFSET]) +#define HT_CAP_MCS_TX_RX_UNEQUAL(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL) +#define HT_CAP_MCS_TX_STREAM_SUPPORT(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK) + +BWL_PRE_PACKED_STRUCT struct obss_params { + uint16 passive_dwell; + uint16 active_dwell; + uint16 bss_widthscan_interval; + uint16 passive_total; + uint16 active_total; + uint16 chanwidth_transition_dly; + uint16 activity_threshold; +} BWL_POST_PACKED_STRUCT; +typedef struct obss_params obss_params_t; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { + uint8 id; + uint8 len; + obss_params_t obss_params; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_ie dot11_obss_ie_t; +#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */ + +/* HT control field */ +#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */ +#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */ +#define HT_CTRL_LA_MAI_SHIFT 2 +#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */ +#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */ +#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */ +#define HT_CTRL_LA_MFSI_SHIFT 6 +#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */ +#define HT_CTRL_LA_MFB_ASELC_SH 9 +#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */ +#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */ +#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */ +#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */ +#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */ +#define HT_CTRL_CSI_STEER_SHIFT 22 +#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */ +#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */ +#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */ +#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */ +#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */ +#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */ +#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */ + +/* ************* VHT definitions. ************* */ + +/** + * VHT Capabilites IE (sec 8.4.2.160) + */ + +BWL_PRE_PACKED_STRUCT struct vht_cap_ie { + uint32 vht_cap_info; + /* supported MCS set - 64 bit field */ + uint16 rx_mcs_map; + uint16 rx_max_rate; + uint16 tx_mcs_map; + uint16 tx_max_rate; +} BWL_POST_PACKED_STRUCT; +typedef struct vht_cap_ie vht_cap_ie_t; + +/* 4B cap_info + 8B supp_mcs */ +#define VHT_CAP_IE_LEN 12 + +/* VHT Capabilities Info field - 32bit - in VHT Cap IE */ +#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003 +#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c +#define VHT_CAP_INFO_LDPC 0x00000010 +#define VHT_CAP_INFO_SGI_80MHZ 0x00000020 +#define VHT_CAP_INFO_SGI_160MHZ 0x00000040 +#define VHT_CAP_INFO_TX_STBC 0x00000080 +#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700 +#define VHT_CAP_INFO_RX_STBC_SHIFT 8u +#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800 +#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13u +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16u +#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000 +#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000 +#define VHT_CAP_INFO_TXOPPS 0x00200000 +#define VHT_CAP_INFO_HTCVHT 0x00400000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23u +#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26u +#define VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK 0xc0000000 +#define VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT 30u + +/* get Extended NSS BW Support passing vht cap info */ +#define VHT_CAP_EXT_NSS_BW_SUP(cap_info) \ + (((cap_info) & VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK) >> VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT) + +/* VHT CAP INFO extended NSS BW support - refer to IEEE 802.11 REVmc D8.0 Figure 9-559 */ +#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160 1 /* 160MHz at half NSS CAP */ +#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160_80P80 2 /* 160 & 80p80 MHz at half NSS CAP */ + +/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */ +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0 + +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0 + +/* defines for field(s) in vht_cap_ie->rx_max_rate */ +#define VHT_CAP_MAX_NSTS_MASK 0xe000 +#define VHT_CAP_MAX_NSTS_SHIFT 13 + +/* defines for field(s) in vht_cap_ie->tx_max_rate */ +#define VHT_CAP_EXT_NSS_BW_CAP 0x2000 + +#define VHT_CAP_MCS_MAP_0_7 0 +#define VHT_CAP_MCS_MAP_0_8 1 +#define VHT_CAP_MCS_MAP_0_9 2 +#define VHT_CAP_MCS_MAP_NONE 3 +#define VHT_CAP_MCS_MAP_S 2 /* num bits for 1-stream */ +#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */ +/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */ +#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff + +/* VHT rates bitmap */ +#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff +#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff +#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff +#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP + +#define VHT_PROP_MCS_MAP_10_11 0 +#define VHT_PROP_MCS_MAP_UNUSED1 1 +#define VHT_PROP_MCS_MAP_UNUSED2 2 +#define VHT_PROP_MCS_MAP_NONE 3 +#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff + +/* VHT prop rates bitmap */ +#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00 +#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP + +#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3) +/* remove after moving define to wlc_rate.h */ +/* mcsmap with MCS0-9 for Nss = 3 */ +#define VHT_CAP_MCS_MAP_0_9_NSS3 \ + ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3))) +#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */ + +#define VHT_CAP_MCS_MAP_NSS_MAX 8 + +/* get mcsmap with given mcs for given nss streams */ +#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \ + do { \ + int i; \ + for (i = 1; i <= nss; i++) { \ + VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \ + } \ + } while (0) + +/* Map the mcs code to mcs bit map */ +#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0) + +/* Map the proprietary mcs code to proprietary mcs bitmap */ +#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0) + +/* Map the mcs bit map to mcs code */ +#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \ + ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \ + (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \ + (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE) + +/* Map the proprietary mcs map to proprietary mcs code */ +#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \ + (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE) + +/** VHT Capabilities Supported Channel Width */ +typedef enum vht_cap_chan_width { + VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00, + VHT_CAP_CHAN_WIDTH_SUPPORT_160 = 0x04, + VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080 = 0x08 +} vht_cap_chan_width_t; + +/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */ +typedef enum vht_cap_max_mpdu_len { + VHT_CAP_MPDU_MAX_4K = 0x00, + VHT_CAP_MPDU_MAX_8K = 0x01, + VHT_CAP_MPDU_MAX_11K = 0x02 +} vht_cap_max_mpdu_len_t; + +/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */ +#define VHT_MPDU_LIMIT_4K 3895 +#define VHT_MPDU_LIMIT_8K 7991 +#define VHT_MPDU_LIMIT_11K 11454 + +/** + * VHT Operation IE (sec 8.4.2.161) + */ + +BWL_PRE_PACKED_STRUCT struct vht_op_ie { + uint8 chan_width; + uint8 chan1; + uint8 chan2; + uint16 supp_mcs; /* same def as above in vht cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_op_ie vht_op_ie_t; + +/* 3B VHT Op info + 2B Basic MCS */ +#define VHT_OP_IE_LEN 5 + +typedef enum vht_op_chan_width { + VHT_OP_CHAN_WIDTH_20_40 = 0, + VHT_OP_CHAN_WIDTH_80 = 1, + VHT_OP_CHAN_WIDTH_160 = 2, /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */ + VHT_OP_CHAN_WIDTH_80_80 = 3 /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */ +} vht_op_chan_width_t; + +#define VHT_OP_INFO_LEN 3 + +/* AID length */ +#define AID_IE_LEN 2 +/** + * BRCM vht features IE header + * The header if the fixed part of the IE + * On the 5GHz band this is the entire IE, + * on 2.4GHz the VHT IEs as defined in the 802.11ac + * specification follows + * + * + * VHT features rates bitmap. + * Bit0: 5G MCS 0-9 BW 160MHz + * Bit1: 5G MCS 0-9 support BW 80MHz + * Bit2: 5G MCS 0-9 support BW 20MHz + * Bit3: 2.4G MCS 0-9 support BW 20MHz + * Bits:4-7 Reserved for future use + * + */ +#define VHT_FEATURES_IE_TYPE 0x4 +BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr { + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 type; /* type of this IE = 4 */ + uint8 rate_mask; /* VHT rate mask */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; + +/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */ +#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S) +#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \ + (((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M) +#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \ + do { \ + (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \ + (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \ + } while (0) +#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \ + (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE) + +/* Get the max ss supported from the mcs map */ +#define VHT_MAX_SS_SUPPORTED(mcsMap) \ + VHT_MCS_SS_SUPPORTED(8, mcsMap) ? 8 : \ + VHT_MCS_SS_SUPPORTED(7, mcsMap) ? 7 : \ + VHT_MCS_SS_SUPPORTED(6, mcsMap) ? 6 : \ + VHT_MCS_SS_SUPPORTED(5, mcsMap) ? 5 : \ + VHT_MCS_SS_SUPPORTED(4, mcsMap) ? 4 : \ + VHT_MCS_SS_SUPPORTED(3, mcsMap) ? 3 : \ + VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \ + VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0 + +#ifdef IBSS_RMC +/* customer's OUI */ +#define RMC_PROP_OUI "\x00\x16\x32" +#endif + +/* ************* WPA definitions. ************* */ +#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ +#define WPA_OUI_LEN 3 /* WPA OUI length */ +#define WPA_OUI_TYPE 1 +#define WPA_VERSION 1 /* WPA version */ +#define WPA_VERSION_LEN 2 /* WPA version length */ + +/* ************* WPA2 definitions. ************* */ +#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */ +#define WPA2_OUI_LEN 3 /* WPA2 OUI length */ +#define WPA2_VERSION 1 /* WPA2 version */ +#define WPA2_VERSION_LEN 2 /* WAP2 version length */ +#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */ + +/* ************* WPS definitions. ************* */ +#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */ +#define WPS_OUI_LEN 3 /* WPS OUI length */ +#define WPS_OUI_TYPE 4 + +/* ************* TPC definitions. ************* */ +#define TPC_OUI "\x00\x50\xF2" /* TPC OUI */ +#define TPC_OUI_LEN 3 /* TPC OUI length */ +#define TPC_OUI_TYPE 8 +#define WFA_OUI_TYPE_TPC 8 /* deprecated */ + +/* ************* WFA definitions. ************* */ +#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ +#define WFA_OUI_LEN 3 /* WFA OUI length */ +#define WFA_OUI_TYPE_P2P 9 + +/* WFA definitions for LEGACY P2P */ +#ifdef WL_LEGACY_P2P +#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */ +#define APPLE_OUI_LEN 3 +#define APPLE_OUI_TYPE_P2P 5 +#endif /* WL_LEGACY_P2P */ + +#ifndef WL_LEGACY_P2P +#define P2P_OUI WFA_OUI +#define P2P_OUI_LEN WFA_OUI_LEN +#define P2P_OUI_TYPE WFA_OUI_TYPE_P2P +#else +#define P2P_OUI APPLE_OUI +#define P2P_OUI_LEN APPLE_OUI_LEN +#define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P +#endif /* !WL_LEGACY_P2P */ + +#ifdef WLTDLS +#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */ +#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */ +#define WFA_OUI_TYPE_WFD 10 +#endif /* WTDLS */ +#define WFA_OUI_TYPE_HS20 0x10 +#define WFA_OUI_TYPE_OSEN 0x12 +#define WFA_OUI_TYPE_NAN 0x13 +#define WFA_OUI_TYPE_MBO 0x16 +#define WFA_OUI_TYPE_MBO_OCE 0x16 +#define WFA_OUI_TYPE_OWE 0x1C +#define WFA_OUI_TYPE_SAE_PK 0x1F +#define WFA_OUI_TYPE_TD_INDICATION 0x20 + +#define SAE_PK_MOD_LEN 32u +BWL_PRE_PACKED_STRUCT struct dot11_sae_pk_element { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[WFA_OUI_LEN]; /* WFA_OUI */ + uint8 type; /* SAE-PK */ + uint8 data[SAE_PK_MOD_LEN]; /* Modifier. 32Byte fixed */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_sae_pk_element dot11_sae_pk_element_t; + +/* RSN authenticated key managment suite */ +#define RSN_AKM_NONE 0 /* None (IBSS) */ +#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ +#define RSN_AKM_PSK 2 /* Pre-shared Key */ +#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */ +#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */ +/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more + * Just kept here to avoid build issue in BISON/CARIBOU branch + */ +#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ +#define RSN_AKM_SAE_PSK 8 /* AKM for SAE with 4-way handshake */ +#define RSN_AKM_SAE_FBT 9 /* AKM for SAE with FBT */ +#define RSN_AKM_SUITEB_SHA256_1X 11 /* Suite B SHA256 */ +#define RSN_AKM_SUITEB_SHA384_1X 12 /* Suite B-192 SHA384 */ +#define RSN_AKM_FBT_SHA384_1X 13 /* FBT SHA384 */ +#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */ +#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */ +#define RSN_AKM_FBT_SHA256_FILS 16 +#define RSN_AKM_FBT_SHA384_FILS 17 +#define RSN_AKM_OWE 18 /* RFC 8110 OWE */ +#define RSN_AKM_FBT_SHA384_PSK 19 +#define RSN_AKM_PSK_SHA384 20 +/* OSEN authenticated key managment suite */ +#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */ +/* WFA DPP RSN authenticated key managment */ +#define RSN_AKM_DPP 02u /* DPP RSN */ + +/* Key related defines */ +#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */ +#define DOT11_MAX_IGTK_KEYS 2 +#define DOT11_MAX_BIGTK_KEYS 2 +#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */ +#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */ +#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */ +#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */ + +#define WEP1_KEY_SIZE 5 /* max size of any WEP key */ +#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */ +#define WEP128_KEY_SIZE 13 /* max size of any WEP key */ +#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */ +#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */ +#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */ +#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */ +#define TKIP_KEY_SIZE 32 /* size of any TKIP key, includs MIC keys */ +#define TKIP_TK_SIZE 16 +#define TKIP_MIC_KEY_SIZE 8 +#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */ +#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */ +#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */ +#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */ +#define AES_KEY_SIZE 16 /* size of AES key */ +#define AES_MIC_SIZE 8 /* size of AES MIC */ +#define BIP_KEY_SIZE 16 /* size of BIP key */ +#define BIP_MIC_SIZE 8 /* sizeof BIP MIC */ + +#define AES_GCM_MIC_SIZE 16 /* size of MIC for 128-bit GCM - .11adD9 */ + +#define AES256_KEY_SIZE 32 /* size of AES 256 key - .11acD5 */ +#define AES256_MIC_SIZE 16 /* size of MIC for 256 bit keys, incl BIP */ + +/* WCN */ +#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */ +#define WCN_TYPE 4 /* WCN type */ + +#ifdef BCMWAPI_WPI +#define SMS4_KEY_LEN 16 +#define SMS4_WPI_CBC_MAC_LEN 16 +#endif + +/* 802.11r protocol definitions */ + +/** Mobility Domain IE */ +BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie { + uint8 id; + uint8 len; /* DOT11_MDID_IE_DATA_LEN (3) */ + uint16 mdid; /* Mobility Domain Id */ + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mdid_ie dot11_mdid_ie_t; + +/* length of data portion of Mobility Domain IE */ +#define DOT11_MDID_IE_DATA_LEN 3 +#define DOT11_MDID_LEN 2 +#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */ +#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */ + +/* BITs in FTIE mic control field */ +#define DOT11_FTIE_RSNXE_USED 0x1u + +/* Fast Bss Transition IE */ +#ifdef FT_IE_VER_V2 +typedef BWL_PRE_PACKED_STRUCT struct dot11_ft_ie_v2 { + uint8 id; + uint8 len; + uint16 mic_control; + /* dynamic offset to following mic[], anonce[], snonce[] */ +} BWL_POST_PACKED_STRUCT dot11_ft_ie_v2; +typedef struct dot11_ft_ie_v2 dot11_ft_ie_t; +#else +BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { + uint8 id; + uint8 len; /* At least equal to DOT11_FT_IE_FIXED_LEN (82) */ + uint16 mic_control; /* Mic Control */ + uint8 mic[16]; + uint8 anonce[32]; + uint8 snonce[32]; + /* Optional sub-elements follow */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_ie dot11_ft_ie_t; + +/* Fixed length of data portion of Fast BSS Transition IE. There could be + * optional parameters, which if present, could raise the FT IE length to 255. + */ +#define DOT11_FT_IE_FIXED_LEN 82 +#endif /* FT_IE_VER_V2 */ + +#ifdef FT_IE_VER_V2 +#define DOT11_FT_IE_LEN(mic_len) (sizeof(dot11_ft_ie_v2) + mic_len + EAPOL_WPA_KEY_NONCE_LEN *2) +#define FT_IE_MIC(pos) ((uint8 *)pos + sizeof(dot11_ft_ie_v2)) +#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len) +#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len + \ + EAPOL_WPA_KEY_NONCE_LEN) +#else +#define DOT11_FT_IE_LEN(mic_len) sizeof(dot11_ft_ie) +#define FT_IE_MIC(pos) ((uint8 *)&pos->mic) +#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)&pos->anonce) +#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)&pos->snonce) +#endif /* FT_IE_VER_V2 */ +#define TIE_TYPE_RESERVED 0 +#define TIE_TYPE_REASSOC_DEADLINE 1 +#define TIE_TYPE_KEY_LIEFTIME 2 +#define TIE_TYPE_ASSOC_COMEBACK 3 +BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie { + uint8 id; + uint8 len; + uint8 type; /* timeout interval type */ + uint32 value; /* timeout interval value */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timeout_ie dot11_timeout_ie_t; + +/** GTK ie */ +BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie { + uint8 id; + uint8 len; + uint16 key_info; + uint8 key_len; + uint8 rsc[8]; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_gtk_ie dot11_gtk_ie_t; + +/** Management MIC ie */ +BWL_PRE_PACKED_STRUCT struct mmic_ie { + uint8 id; /* IE ID: DOT11_MNG_MMIE_ID */ + uint8 len; /* IE length */ + uint16 key_id; /* key id */ + uint8 ipn[6]; /* ipn */ + uint8 mic[16]; /* mic */ +} BWL_POST_PACKED_STRUCT; +typedef struct mmic_ie mmic_ie_t; + +#define DOT11_MMIC_IE_HDR_SIZE (OFFSETOF(mmic_ie_t, mic)) + +/* 802.11r-2008, 11A.10.3 - RRB frame format */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame { + uint8 frame_type; /* 1 for RRB */ + uint8 packet_type; /* 0 for Request 1 for Response */ + uint16 len; + uint8 cur_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t; + +#define DOT11_FT_RRB_FIXED_LEN 10 +#define DOT11_FT_REMOTE_FRAME_TYPE 1 +#define DOT11_FT_PACKET_REQ 0 +#define DOT11_FT_PACKET_RESP 1 + +#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" +#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" + +#ifdef BCMWAPI_WAI +#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */ +#define WAPI_VERSION 1 /* WAPI version */ +#define WAPI_VERSION_LEN 2 /* WAPI version length */ +#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */ +#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */ +#endif /* BCMWAPI_WAI */ + +/* ************* WMM Parameter definitions. ************* */ +#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */ +#define WMM_OUI_LEN 3 /* WMM OUI length */ +#define WMM_OUI_TYPE 2 /* WMM OUT type */ +#define WMM_VERSION 1 +#define WMM_VERSION_LEN 1 + +/* WMM OUI subtype */ +#define WMM_OUI_SUBTYPE_PARAMETER 1 +#define WMM_PARAMETER_IE_LEN 24 + +/** Link Identifier Element */ +BWL_PRE_PACKED_STRUCT struct link_id_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + struct ether_addr tdls_init_mac; + struct ether_addr tdls_resp_mac; +} BWL_POST_PACKED_STRUCT; +typedef struct link_id_ie link_id_ie_t; +#define TDLS_LINK_ID_IE_LEN 18u + +/** Link Wakeup Schedule Element */ +BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie { + uint8 id; + uint8 len; + uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */ + uint32 interval; /* in ms bwtween the start of 2 Awake Windows */ + uint32 awake_win_slots; /* in backof slots, duration of Awake Window */ + uint32 max_wake_win; /* in ms, max duration of Awake Window */ + uint16 idle_cnt; /* number of consecutive Awake Windows */ +} BWL_POST_PACKED_STRUCT; +typedef struct wakeup_sch_ie wakeup_sch_ie_t; +#define TDLS_WAKEUP_SCH_IE_LEN 18 + +/** Channel Switch Timing Element */ +BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie { + uint8 id; + uint8 len; + uint16 switch_time; /* in ms, time to switch channels */ + uint16 switch_timeout; /* in ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct channel_switch_timing_ie channel_switch_timing_ie_t; +#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4 + +/** PTI Control Element */ +BWL_PRE_PACKED_STRUCT struct pti_control_ie { + uint8 id; + uint8 len; + uint8 tid; + uint16 seq_control; +} BWL_POST_PACKED_STRUCT; +typedef struct pti_control_ie pti_control_ie_t; +#define TDLS_PTI_CONTROL_IE_LEN 3 + +/** PU Buffer Status Element */ +BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie { + uint8 id; + uint8 len; + uint8 status; +} BWL_POST_PACKED_STRUCT; +typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; +#define TDLS_PU_BUFFER_STATUS_IE_LEN 1 +#define TDLS_PU_BUFFER_STATUS_AC_BK 1 +#define TDLS_PU_BUFFER_STATUS_AC_BE 2 +#define TDLS_PU_BUFFER_STATUS_AC_VI 4 +#define TDLS_PU_BUFFER_STATUS_AC_VO 8 + +/* TDLS Action Field Values */ +#define TDLS_SETUP_REQ 0 +#define TDLS_SETUP_RESP 1 +#define TDLS_SETUP_CONFIRM 2 +#define TDLS_TEARDOWN 3 +#define TDLS_PEER_TRAFFIC_IND 4 +#define TDLS_CHANNEL_SWITCH_REQ 5 +#define TDLS_CHANNEL_SWITCH_RESP 6 +#define TDLS_PEER_PSM_REQ 7 +#define TDLS_PEER_PSM_RESP 8 +#define TDLS_PEER_TRAFFIC_RESP 9 +#define TDLS_DISCOVERY_REQ 10 + +/* 802.11z TDLS Public Action Frame action field */ +#define TDLS_DISCOVERY_RESP 14 + +/* 802.11u GAS action frames */ +#define GAS_REQUEST_ACTION_FRAME 10 +#define GAS_RESPONSE_ACTION_FRAME 11 +#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12 +#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13 + +/* FTM - fine timing measurement public action frames */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_req { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (32) */ + uint8 trigger; /* trigger/continue? */ + /* optional lci, civic loc, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_req dot11_ftm_req_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (33) */ + uint8 dialog; /* dialog token */ + uint8 follow_up; /* follow up dialog token */ + uint8 tod[6]; /* t1 - last depart timestamp */ + uint8 toa[6]; /* t4 - last ack arrival timestamp */ + uint8 tod_err[2]; /* t1 error */ + uint8 toa_err[2]; /* t4 error */ + /* optional lci report, civic loc report, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm dot11_ftm_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_lmr { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (33) */ + uint8 dialog; /* dialog token */ + uint8 tod[6]; /* RSTA t3 or ISTA t1: + * last departure of NDP + */ + uint8 toa[6]; /* RSTA t2 or ISTA t4: + * last arrival of NDP + */ + uint8 tod_err[2]; /* t3 or t1 error */ + uint8 toa_err[2]; /* t2 or t4 error */ + uint16 cfo; /* I2R LMR: clock difference between ISTA and RSTA. */ + uint8 sec_ltf_params[]; /* Optional Secure LTF parameters */ + /* no AOA feedback */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_lmr dot11_ftm_lmr_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_ranging_ndpa { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ + uint8 dialog_token; /* sounding dialog token */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_ranging_ndpa dot11_ftm_ranging_ndpa_t; + +/* NDPA types = dialog token byte lower 2 bits */ +#define DOT11_NDPA_TYPE_MASK 0x03 +#define DOT11_NDPA_TYPE_VHT 0x00 +#define DOT11_NDPA_TYPE_RANGING 0x01 +#define DOT11_NDPA_TYPE_HE 0x02 + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 1 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x80 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 7 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0x7fff +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 0 +#define DOT11_FTM_ERR_MAX_ERR(_err) (((((_err)[1] & 0x7f) << 8) | (_err)[0])) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + uint16 _not_cont; \ + _val2 = (((_val) & DOT11_FTM_ERR_MAX_ERR_MASK) << DOT11_FTM_ERR_MAX_ERR_SHIFT); \ + _val2 = (_val2 > 0x3fff) ? 0 : _val2; /* not expecting > 16ns error */ \ + _not_cont = DOT11_FTM_ERR_NOT_CONT(_err); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = (_val2 >> 8) & 0xff; \ + DOT11_FTM_ERR_SET_NOT_CONT(_err, _not_cont); \ +} while (0) + +#if defined(DOT11_FTM_ERR_ROM_COMPAT) +/* incorrect defs - here for ROM compatibiity */ +#undef DOT11_FTM_ERR_NOT_CONT_OFFSET +#undef DOT11_FTM_ERR_NOT_CONT_MASK +#undef DOT11_FTM_ERR_NOT_CONT_SHIFT +#undef DOT11_FTM_ERR_NOT_CONT +#undef DOT11_FTM_ERR_SET_NOT_CONT + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#undef DOT11_FTM_ERR_MAX_ERR_OFFSET +#undef DOT11_FTM_ERR_MAX_ERR_MASK +#undef DOT11_FTM_ERR_MAX_ERR_SHIFT +#undef DOT11_FTM_ERR_MAX_ERR +#undef DOT11_FTM_ERR_SET_MAX_ERR + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7 +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1 +#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\ + ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = _val2 >> 8 & 0xff; \ +} while (0) +#endif /* DOT11_FTM_ERR_ROM_COMPAT */ + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_params { + uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */ + uint8 len; + uint8 info[9]; +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ftm_params dot11_ftm_params_t; +#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2) + +/* common part for both TB and NTB */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_ranging_params { + uint8 id; /* 255 */ + uint8 len; + uint8 ext_id; /* DOT11_MNG_FTM_RANGING_EXT_ID */ + uint8 info[6]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_ranging_params dot11_ftm_ranging_params_t; +#define DOT11_FTM_CMN_RANGING_PARAMS_IE_LEN (sizeof(dot11_ftm_ranging_params_t) - TLV_EXT_HDR_LEN) + +/* FTM NTB specific */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_ntb_params { + uint8 id; /* DOT11_FTM_NTB_SUB_ELT_ID */ + uint8 len; + uint8 info[6]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_ntb_params dot11_ftm_ntb_params_t; + +#define DOT11_FTM_NTB_PARAMS_SUB_IE_LEN (sizeof(dot11_ftm_ntb_params_t)) +#define DOT11_FTM_NTB_PARAMS_IE_LEN DOT11_FTM_CMN_RANGING_PARAMS_IE_LEN + \ + DOT11_FTM_NTB_PARAMS_SUB_IE_LEN + +/* FTM TB specific */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_tb_params { + uint8 id; /* DOT11_FTM_TB_SUB_ELT_ID */ + uint8 len; + uint8 info[1]; /* variable length, minimum 1 */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ftm_tb_params dot11_ftm_tb_params_t; +#define DOT11_FTM_TB_PARAMS_IE_LEN sizeof(dot11_ftm_tb_params_t) + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_sec_ltf_params { + uint8 id; /* 255 */ + uint8 len; + uint8 ext_id; /* DOT11_MNG_FTM_SECURE_LTF_EXT_ID */ + uint8 info[11]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_sec_ltf_params dot11_ftm_sec_ltf_params_t; +#define DOT11_FTM_SEC_LTF_PARAMS_IE_LEN (sizeof(dot11_ftm_sec_ltf_params_t) - 3) + +#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift)) +#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\ + uint8 _ptmp = (_p)->info[_off] & ~(_mask); \ + (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \ +} while (0) + +#define FTM_PARAMS_STATUS_OFFSET 0 +#define FTM_PARAMS_STATUS_MASK 0x03 +#define FTM_PARAMS_STATUS_SHIFT 0 +#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \ + FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT) +#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status) + +#define FTM_PARAMS_VALUE_OFFSET 0 +#define FTM_PARAMS_VALUE_MASK 0x7c +#define FTM_PARAMS_VALUE_SHIFT 2 +#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \ + FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT) +#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value) +#define FTM_PARAMS_MAX_VALUE 32 + +#define FTM_PARAMS_NBURSTEXP_OFFSET 1 +#define FTM_PARAMS_NBURSTEXP_MASK 0x0f +#define FTM_PARAMS_NBURSTEXP_SHIFT 0 +#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \ + FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT) +#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \ + _bexp) + +#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p)) + +enum { + FTM_PARAMS_NBURSTEXP_NOPREF = 15 +}; + +enum { + FTM_PARAMS_BURSTTMO_NOPREF = 15 +}; + +#define FTM_PARAMS_BURSTTMO_OFFSET 1 +#define FTM_PARAMS_BURSTTMO_MASK 0xf0 +#define FTM_PARAMS_BURSTTMO_SHIFT 4 +#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \ + FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT) +/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */ +#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2) + +#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250) +#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \ + (_val) == FTM_PARAMS_BURSTTMO_NOPREF) +#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */ +#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */ + +#define FTM_PARAMS_MINDELTA_OFFSET 2 +#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100) +#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \ + (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \ +} while (0) + +enum { + FTM_PARAMS_MINDELTA_NOPREF = 0 +}; + +#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3]) +#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \ + (_p)->info[3] = (_partial_tsf) & 0xff; \ + (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL +#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10 +#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16 +#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff + +/* FTM can indicate upto 62k TUs forward and 1k TU backward */ +#define FTM_PARAMS_TSF_FW_HI (63487 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_BW_LOW (64512 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_BW_HI (65535 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_FW_MAX FTM_PARAMS_TSF_FW_HI +#define FTM_PARAMS_TSF_BW_MAX (FTM_PARAMS_TSF_BW_HI - FTM_PARAMS_TSF_BW_LOW) + +#define FTM_PARAMS_PTSFNOPREF_OFFSET 5 +#define FTM_PARAMS_PTSFNOPREF_MASK 0x1 +#define FTM_PARAMS_PTSFNOPREF_SHIFT 0 +#define FTM_PARAMS_PTSFNOPREF(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_PTSFNOPREF_OFFSET, \ + FTM_PARAMS_PTSFNOPREF_MASK, FTM_PARAMS_PTSFNOPREF_SHIFT) +#define FTM_PARAMS_SET_PTSFNOPREF(_p, _nopref) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_PTSFNOPREF_OFFSET, FTM_PARAMS_PTSFNOPREF_MASK, \ + FTM_PARAMS_PTSFNOPREF_SHIFT, _nopref) + +#define FTM_PARAMS_ASAP_OFFSET 5 +#define FTM_PARAMS_ASAP_MASK 0x4 +#define FTM_PARAMS_ASAP_SHIFT 2 +#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \ + FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT) +#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap) + +/* FTM1 - AKA ASAP Capable */ +#define FTM_PARAMS_FTM1_OFFSET 5 +#define FTM_PARAMS_FTM1_MASK 0x02 +#define FTM_PARAMS_FTM1_SHIFT 1 +#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \ + FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT) +#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1) + +#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5 +#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8 +#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3 +#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \ + FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT) +#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \ + FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms) + +enum { + FTM_PARAMS_FTMS_PER_BURST_NOPREF = 0 +}; + +#define FTM_PARAMS_CHAN_INFO_OFFSET 6 +#define FTM_PARAMS_CHAN_INFO_MASK 0xfc +#define FTM_PARAMS_CHAN_INFO_SHIFT 2 +#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \ + FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT) +#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci) + +/* burst period - units of 100ms */ +#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7]) +#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\ + (_p)->info[7] = (_bp) & 0xff; \ + (_p)->info[8] = ((_bp) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100) + +enum { + FTM_PARAMS_BURST_PERIOD_NOPREF = 0 +}; + +/* FTM status values - last updated from 11mcD4.0 */ +enum { + FTM_PARAMS_STATUS_RESERVED = 0, + FTM_PARAMS_STATUS_SUCCESSFUL = 1, + FTM_PARAMS_STATUS_INCAPABLE = 2, + FTM_PARAMS_STATUS_FAILED = 3, + /* Below are obsolte */ + FTM_PARAMS_STATUS_OVERRIDDEN = 4, + FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5, + FTM_PARAMS_STATUS_ASAP_FAILED = 6, + /* rest are reserved */ +}; + +enum { + FTM_PARAMS_CHAN_INFO_NO_PREF = 0, + FTM_PARAMS_CHAN_INFO_RESERVE1 = 1, + FTM_PARAMS_CHAN_INFO_RESERVE2 = 2, + FTM_PARAMS_CHAN_INFO_RESERVE3 = 3, + FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4, + FTM_PARAMS_CHAN_INFO_RESERVE5 = 5, + FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6, + FTM_PARAMS_CHAN_INFO_RESERVE7 = 7, + FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */ + FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9, + FTM_PARAMS_CHAN_INFO_VHT_20 = 10, + FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11, + FTM_PARAMS_CHAN_INFO_VHT_40 = 12, + FTM_PARAMS_CHAN_INFO_VHT_80 = 13, + FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14, + FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15, + FTM_PARAMS_CHAN_INFO_VHT_160 = 16, + /* Reserved from 17 - 30 */ + FTM_PARAMS_CHAN_INFO_DMG_2160 = 31, + /* Reserved from 32 - 63 */ + FTM_PARAMS_CHAN_INFO_MAX = 63 +}; + +/* tag_ID/length/value_buffer tuple */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 id; + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT ftm_vs_tlv_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie { + uint8 id; /* DOT11_MNG_VS_ID */ + uint8 len; /* length following */ + uint8 oui[3]; /* BRCM_PROP_OUI (or Customer) */ + uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */ + uint8 version; + ftm_vs_tlv_t tlvs[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t; + +/* same as payload of dot11_ftm_vs_ie. +* This definition helps in having struct access +* of pay load while building FTM VS IE from other modules(NAN) +*/ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie_pyld { + uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */ + uint8 version; + ftm_vs_tlv_t tlvs[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_ie_pyld dot11_ftm_vs_ie_pyld_t; + +/* ftm vs api version */ +#define BCM_FTM_VS_PARAMS_VERSION 0x01 + +/* ftm vendor specific information tlv types */ +enum { + FTM_VS_TLV_NONE = 0, + FTM_VS_TLV_REQ_PARAMS = 1, /* additional request params (in FTM_REQ) */ + FTM_VS_TLV_MEAS_INFO = 2, /* measurement information (in FTM_MEAS) */ + FTM_VS_TLV_SEC_PARAMS = 3, /* security parameters (in either) */ + FTM_VS_TLV_SEQ_PARAMS = 4, /* toast parameters (FTM_REQ, BRCM proprietary) */ + FTM_VS_TLV_MF_BUF = 5, /* multi frame buffer - may span ftm vs ie's */ + FTM_VS_TLV_TIMING_PARAMS = 6, /* timing adjustments */ + FTM_VS_TLV_MF_STATS_BUF = 7 /* multi frame statistics buffer */ + /* add additional types above */ +}; + +/* the following definitions are *DEPRECATED* and moved to implementation files. They + * are retained here because previous (May 2016) some branches use them + */ +#define FTM_TPK_LEN 16u +#define FTM_RI_RR_BUF_LEN 32u +#define FTM_TPK_RI_RR_LEN 13 +#define FTM_TPK_RI_RR_LEN_SECURE_2_0 28 +#define FTM_TPK_RI_PHY_LEN 7u +#define FTM_TPK_RR_PHY_LEN 7u +#define FTM_TPK_DATA_BUFFER_LEN 88u +#define FTM_TPK_LEN_SECURE_2_0 64u +#define FTM_TPK_RI_PHY_LEN_SECURE_2_0 14u +#define FTM_TPK_RR_PHY_LEN_SECURE_2_0 14u + +#define FTM_RI_RR_BUF_LEN_20MHZ 32u +#define FTM_RI_RR_BUF_LEN_80MHZ 64u + +#define FTM_RI_RR_BUF_LEN_FROM_CHANSPEC(chanspec) \ + (CHSPEC_IS20((chanspec)) ? \ + FTM_RI_RR_BUF_LEN_20MHZ : FTM_RI_RR_BUF_LEN_80MHZ) + +#define FTM_TPK_RI_RR_LEN_SECURE_2_0_20MHZ 28u +#define FTM_TPK_RI_RR_LEN_SECURE_2_0_80MHZ 62u +#define FTM_TPK_RI_RR_LEN_SECURE_2_0_2G FTM_TPK_RI_RR_LEN_SECURE_2_0 +#define FTM_TPK_RI_RR_LEN_SECURE_2_0_5G FTM_TPK_RI_RR_LEN_SECURE_2_0_80MHZ + +#define FTM_TPK_RI_RR_LEN_FROM_CHANSPEC(chanspec) \ + (CHSPEC_IS20((chanspec)) ? FTM_TPK_RI_RR_LEN_SECURE_2_0_20MHZ : \ + FTM_TPK_RI_RR_LEN_SECURE_2_0_80MHZ) + +#define FTM_TPK_RI_PHY_LEN_SECURE_2_0_20MHZ 14u +#define FTM_TPK_RI_PHY_LEN_SECURE_2_0_80MHZ 31u +#define FTM_TPK_RR_PHY_LEN_SECURE_2_0_80MHZ 31u + +#define FTM_TPK_RI_PHY_LEN_FROM_CHANSPEC(chanspec) \ + (CHSPEC_IS20((chanspec)) ? FTM_TPK_RI_PHY_LEN_SECURE_2_0_20MHZ : \ + FTM_TPK_RI_PHY_LEN_SECURE_2_0_80MHZ) + +#define FTM_TPK_RR_PHY_LEN_SECURE_2_0_20MHZ 14u + +#define FTM_TPK_RR_PHY_LEN_FROM_CHANSPEC(chanspec) \ + (CHSPEC_IS20((chanspec)) ? FTM_TPK_RR_PHY_LEN_SECURE_2_0_20MHZ : \ + FTM_TPK_RR_PHY_LEN_SECURE_2_0_80MHZ) + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_params { + uint8 id; /* DOT11_MNG_VS_ID */ + uint8 len; + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 bcm_vs_id; + ftm_vs_tlv_t ftm_tpk_ri_rr[1]; /* ftm_TPK_ri_rr place holder */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_params dot11_ftm_vs_tpk_ri_rr_params_t; +#define DOT11_FTM_VS_LEN (sizeof(dot11_ftm_vs_tpk_ri_rr_params_t) - TLV_HDR_LEN) +/* end *DEPRECATED* ftm definitions */ + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_sync_info { + uint8 id; /* Extended - 255 11mc D4.3 */ + uint8 len; + uint8 id_ext; + uint8 tsf_sync_info[4]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_sync_info dot11_ftm_sync_info_t; + +/* ftm tsf sync info ie len - includes id ext */ +#define DOT11_FTM_SYNC_INFO_IE_LEN (sizeof(dot11_ftm_sync_info_t) - TLV_HDR_LEN) + +#define DOT11_FTM_IS_SYNC_INFO_IE(_ie) (\ + DOT11_MNG_IE_ID_EXT_MATCH(_ie, DOT11_MNG_FTM_SYNC_INFO) && \ + (_ie)->len == DOT11_FTM_SYNC_INFO_IE_LEN) + +BWL_PRE_PACKED_STRUCT struct dot11_dh_param_ie { + uint8 id; /* OWE */ + uint8 len; + uint8 ext_id; /* EXT_MNG_OWE_DH_PARAM_ID */ + uint16 group; + uint8 pub_key[0]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dh_param_ie dot11_dh_param_ie_t; + +#define DOT11_DH_EXTID_OFFSET (OFFSETOF(dot11_dh_param_ie_t, ext_id)) + +#define DOT11_OWE_DH_PARAM_IE(_ie) (\ + DOT11_MNG_IE_ID_EXT_MATCH(_ie, EXT_MNG_OWE_DH_PARAM_ID)) + +#define DOT11_MNG_OWE_IE_ID_EXT_INIT(_ie, _id, _len) do {\ + (_ie)->id = DOT11_MNG_ID_EXT_ID; \ + (_ie)->len = _len; \ + (_ie)->ext_id = _id; \ +} while (0) + +/* 802.11u interworking access network options */ +#define IW_ANT_MASK 0x0f +#define IW_INTERNET_MASK 0x10 +#define IW_ASRA_MASK 0x20 +#define IW_ESR_MASK 0x40 +#define IW_UESA_MASK 0x80 + +/* 802.11u interworking access network type */ +#define IW_ANT_PRIVATE_NETWORK 0 +#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1 +#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2 +#define IW_ANT_FREE_PUBLIC_NETWORK 3 +#define IW_ANT_PERSONAL_DEVICE_NETWORK 4 +#define IW_ANT_EMERGENCY_SERVICES_NETWORK 5 +#define IW_ANT_TEST_NETWORK 14 +#define IW_ANT_WILDCARD_NETWORK 15 + +#define IW_ANT_LEN 1 +#define IW_VENUE_LEN 2 +#define IW_HESSID_LEN 6 +#define IW_HESSID_OFF (IW_ANT_LEN + IW_VENUE_LEN) +#define IW_MAX_LEN (IW_ANT_LEN + IW_VENUE_LEN + IW_HESSID_LEN) + +/* 802.11u advertisement protocol */ +#define ADVP_ANQP_PROTOCOL_ID 0 +#define ADVP_MIH_PROTOCOL_ID 1 + +/* 802.11u advertisement protocol masks */ +#define ADVP_QRL_MASK 0x7f +#define ADVP_PAME_BI_MASK 0x80 + +/* 802.11u advertisement protocol values */ +#define ADVP_QRL_REQUEST 0x00 +#define ADVP_QRL_RESPONSE 0x7f +#define ADVP_PAME_BI_DEPENDENT 0x00 +#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK + +/* 802.11u ANQP information ID */ +#define ANQP_ID_QUERY_LIST 256 +#define ANQP_ID_CAPABILITY_LIST 257 +#define ANQP_ID_VENUE_NAME_INFO 258 +#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259 +#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260 +#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261 +#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262 +#define ANQP_ID_NAI_REALM_LIST 263 +#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264 +#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265 +#define ANQP_ID_AP_CIVIC_LOCATION 266 +#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267 +#define ANQP_ID_DOMAIN_NAME_LIST 268 +#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269 +#define ANQP_ID_EMERGENCY_NAI 271 +#define ANQP_ID_NEIGHBOR_REPORT 272 +#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797 + +/* 802.11u ANQP ID len */ +#define ANQP_INFORMATION_ID_LEN 2 + +/* 802.11u ANQP OUI */ +#define ANQP_OUI_SUBTYPE 9 + +/* 802.11u venue name */ +#define VENUE_LANGUAGE_CODE_SIZE 3 +#define VENUE_NAME_SIZE 255 + +/* 802.11u venue groups */ +#define VENUE_UNSPECIFIED 0 +#define VENUE_ASSEMBLY 1 +#define VENUE_BUSINESS 2 +#define VENUE_EDUCATIONAL 3 +#define VENUE_FACTORY 4 +#define VENUE_INSTITUTIONAL 5 +#define VENUE_MERCANTILE 6 +#define VENUE_RESIDENTIAL 7 +#define VENUE_STORAGE 8 +#define VENUE_UTILITY 9 +#define VENUE_VEHICULAR 10 +#define VENUE_OUTDOOR 11 + +/* 802.11u network authentication type indicator */ +#define NATI_UNSPECIFIED -1 +#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0 +#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1 +#define NATI_HTTP_HTTPS_REDIRECTION 2 +#define NATI_DNS_REDIRECTION 3 + +/* 802.11u IP address type availability - IPv6 */ +#define IPA_IPV6_SHIFT 0 +#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT) +#define IPA_IPV6_NOT_AVAILABLE 0x00 +#define IPA_IPV6_AVAILABLE 0x01 +#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02 + +/* 802.11u IP address type availability - IPv4 */ +#define IPA_IPV4_SHIFT 2 +#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT) +#define IPA_IPV4_NOT_AVAILABLE 0x00 +#define IPA_IPV4_PUBLIC 0x01 +#define IPA_IPV4_PORT_RESTRICT 0x02 +#define IPA_IPV4_SINGLE_NAT 0x03 +#define IPA_IPV4_DOUBLE_NAT 0x04 +#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05 +#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06 +#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07 + +/* 802.11u NAI realm encoding */ +#define REALM_ENCODING_RFC4282 0 +#define REALM_ENCODING_UTF8 1 + +/* 802.11u IANA EAP method type numbers */ +#define REALM_EAP_TLS 13 +#define REALM_EAP_LEAP 17 +#define REALM_EAP_SIM 18 +#define REALM_EAP_TTLS 21 +#define REALM_EAP_AKA 23 +#define REALM_EAP_PEAP 25 +#define REALM_EAP_FAST 43 +#define REALM_EAP_PSK 47 +#define REALM_EAP_AKAP 50 +#define REALM_EAP_EXPANDED 254 + +/* 802.11u authentication ID */ +#define REALM_EXPANDED_EAP 1 +#define REALM_NON_EAP_INNER_AUTHENTICATION 2 +#define REALM_INNER_AUTHENTICATION_EAP 3 +#define REALM_EXPANDED_INNER_EAP 4 +#define REALM_CREDENTIAL 5 +#define REALM_TUNNELED_EAP_CREDENTIAL 6 +#define REALM_VENDOR_SPECIFIC_EAP 221 + +/* 802.11u non-EAP inner authentication type */ +#define REALM_RESERVED_AUTH 0 +#define REALM_PAP 1 +#define REALM_CHAP 2 +#define REALM_MSCHAP 3 +#define REALM_MSCHAPV2 4 + +/* 802.11u credential type */ +#define REALM_SIM 1 +#define REALM_USIM 2 +#define REALM_NFC 3 +#define REALM_HARDWARE_TOKEN 4 +#define REALM_SOFTOKEN 5 +#define REALM_CERTIFICATE 6 +#define REALM_USERNAME_PASSWORD 7 +#define REALM_SERVER_SIDE 8 +#define REALM_RESERVED_CRED 9 +#define REALM_VENDOR_SPECIFIC_CRED 10 + +/* 802.11u 3GPP PLMN */ +#define G3PP_GUD_VERSION 0 +#define G3PP_PLMN_LIST_IE 0 + +/* AP Location Public ID Info encoding */ +#define PUBLIC_ID_URI_FQDN_SE_ID 0 +/* URI/FQDN Descriptor field values */ +#define LOCATION_ENCODING_HELD 1 +#define LOCATION_ENCODING_SUPL 2 +#define URI_FQDN_SIZE 255 + +/** hotspot2.0 indication element (vendor specific) */ +BWL_PRE_PACKED_STRUCT struct hs20_ie { + uint8 oui[3]; + uint8 type; + uint8 config; +} BWL_POST_PACKED_STRUCT; +typedef struct hs20_ie hs20_ie_t; +#define HS20_IE_LEN 5 /* HS20 IE length */ + +/* Short SSID list Extended Capabilities element */ +BWL_PRE_PACKED_STRUCT struct short_ssid_list_ie { + uint8 id; + uint8 len; + uint8 id_ext; + uint8 data[1]; /* Capabilities Information */ +} BWL_POST_PACKED_STRUCT; + +typedef struct short_ssid_list_ie short_ssid_list_ie_t; +#define SHORT_SSID_LIST_IE_FIXED_LEN 3 /* SHORT SSID LIST IE LENGTH */ + +/** IEEE 802.11 Annex E */ +typedef enum { + DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */ + DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */ + DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */ + DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */ + DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */ + DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */ + DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */ + DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */ + DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */ + DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */ + DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */ + DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */ + DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */ + DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */ + DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */ + DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */ +} dot11_op_class_t; + +/* QoS map */ +#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */ + +/* BCM proprietary IE type for AIBSS */ +#define BCM_AIBSS_IE_TYPE 56 + +/* BCM proprietary flag type for WL_DISCO_VSIE */ +#define SSE_OUI "\x00\x00\xF0" +#define VENDOR_ENTERPRISE_STA_OUI_TYPE 0x22 +#define MAX_VSIE_DISASSOC (1) +#define DISCO_VSIE_LEN 0x09u + +/* Single PMK IE */ +#define CCX_SPMK_TYPE 3 /* CCX Extended Cap IE type for SPMK */ +/* CCX Extended Capability IE */ +BWL_PRE_PACKED_STRUCT struct ccx_spmk_cap_ie { + uint8 id; /* 221, DOT11_MNG_PROPR_ID */ + uint8 len; + uint8 oui[DOT11_OUI_LEN]; /* 00:40:96, CISCO_AIRONET_OUI */ + uint8 type; /* 11 */ + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ccx_spmk_cap_ie ccx_spmk_cap_ie_t; + +/* OWE definitions */ +/* ID + len + OUI + OI type + BSSID + SSID_len */ +#define OWE_TRANS_MODE_IE_FIXED_LEN 13u + +/* Supported Operating Classes element */ +BWL_PRE_PACKED_STRUCT struct supp_op_classes_ie { + uint8 id; + uint8 len; + uint8 cur_op_class; + uint8 op_classes[]; /* Supported Operating Classes */ +} BWL_POST_PACKED_STRUCT; +typedef struct supp_op_classes_ie supp_op_classes_ie_t; + +/* Transition mode (bit number) */ +#define TRANSISION_MODE_WPA3_PSK 0u +#define TRANSITION_MODE_SAE_PK 1u +#define TRANSITION_MODE_WPA3_ENTERPRISE 2u +#define TRANSITION_MODE_ENHANCED_OPEN 3u + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11_H_ */ diff --git a/bcmdhd.101.10.361.x/include/802.11ah.h b/bcmdhd.101.10.361.x/include/802.11ah.h new file mode 100755 index 0000000..637284b --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.11ah.h @@ -0,0 +1,281 @@ +/* + * Basic types and constants relating to 802.11ah standard. + * This is a portion of 802.11ah definition. The rest are in 802.11.h. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_11ah_h_ +#define _802_11ah_h_ + +#include + +/* This marks the start of a packed structure section. */ +#include + +/** + * TWT IE (sec 9.4.2.200) + */ + +/* TWT element - top (Figure 9-589av) */ +BWL_PRE_PACKED_STRUCT struct twt_ie_top { + uint8 id; + uint8 len; + uint8 ctrl; /* Control */ +} BWL_POST_PACKED_STRUCT; + +typedef struct twt_ie_top twt_ie_top_t; + +/* S1G Action IDs */ +#define S1G_ACTION_TWT_SETUP 6u +#define S1G_ACTION_TWT_TEARDOWN 7u +#define S1G_ACTION_TWT_INFO 11u + +/* S1G Action frame offsets */ +#define S1G_AF_CAT_OFF 0u +#define S1G_AF_ACT_OFF 1u + +/* TWT Setup */ +#define S1G_AF_TWT_SETUP_TOKEN_OFF 2u +#define S1G_AF_TWT_SETUP_TWT_IE_OFF 3u + +/* TWT Teardown */ +#define S1G_AF_TWT_TEARDOWN_FLOW_OFF 2u + +/* TWT Information */ +#define S1G_AF_TWT_INFO_OFF 2u + +#define TWT_BCAST_WAKE_TIME_OFFSET 10u +#define TWT_BCAST_WAKE_TIME_SHIFT 10u +#define TWT_BCAST_WAKE_TIME_MASK 0x03FFFC00u +#define TWT_BCAST_WAKE_TIME_ZERO_BIT_SZ 10u + +/* Control field (Figure 9-589aw) */ +#define TWT_CTRL_NDP_PAGING_IND 0x01u /* NDP Paging Indication */ +#define TWT_CTRL_RESP_PM_MODE 0x02u /* Respondor PM Mode */ +#define TWT_CTRL_NEGO_TYPE_IDX 2u +#define TWT_CTRL_NEGO_TYPE_MASK 0x0Cu /* TWT Negotiation Type */ +#define TWT_CTRL_NEGO_TYPE_SHIFT 2u +#define TWT_CTRL_INFO_FRM_DISABLED 0x10u /* TWT info frame disabled */ +#define TWT_CTRL_WAKEDUR_UNIT 0x20u /* Wake duration unit */ + +/* TWT Negotiation Type (Table 9-262j1) */ +typedef enum twt_ctrl_nego_type { + TWT_CTRL_NEGO_TYPE_0 = 0, /* Individual TWT Setup */ + TWT_CTRL_NEGO_TYPE_1 = 1, /* Wake TBTT Negotiation */ + TWT_CTRL_NEGO_TYPE_2 = 2, /* Broadcast TWT IE in Beacon */ + TWT_CTRL_NEGO_TYPE_3 = 3, /* Broadcast TWT memberships */ +} twt_ctrl_nego_type_t; + +/* Request Type field (Figure 9-589ay) */ +#define TWT_REQ_TYPE_REQUEST 0x0001u /* Request */ +#define TWT_REQ_TYPE_SETUP_CMD_MASK 0x000eu /* Setup Command */ +#define TWT_REQ_TYPE_SETUP_CMD_SHIFT 1u +#define TWT_REQ_TYPE_TRIGGER 0x0010u /* Trigger */ +#define TWT_REQ_TYPE_IMPLICIT 0x0020u /* Implicit */ +#define TWT_REQ_TYPE_LAST_BCAST_PARAM 0x0020u /* Last Broadcast Parameter Set */ +#define TWT_REQ_TYPE_FLOW_TYPE 0x0040u /* Flow Type */ +#define TWT_REQ_TYPE_FLOW_ID_MASK 0x0380u /* Flow Identifier */ +#define TWT_REQ_TYPE_FLOW_ID_SHIFT 7u +#define TWT_REQ_TYPE_BTWT_RECOMM_MASK 0x0380u /* Broadcast TWT Recommendation */ +#define TWT_REQ_TYPE_BTWT_RECOMM_SHIFT 7u +#define TWT_REQ_TYPE_WAKE_EXP_MASK 0x7c00u /* Wake Interval Exponent */ +#define TWT_REQ_TYPE_WAKE_EXP_SHIFT 10u +#define TWT_REQ_TYPE_PROTECTION 0x8000u /* Protection */ + +/* Setup Command field (Table 9-262k) */ +#define TWT_SETUP_CMD_REQUEST_TWT 0u /* Request TWT */ +#define TWT_SETUP_CMD_SUGGEST_TWT 1u /* Suggest TWT */ +#define TWT_SETUP_CMD_DEMAND_TWT 2u /* Demand TWT */ +#define TWT_SETUP_CMD_GROUPING_TWT 3u /* Grouping TWT */ +#define TWT_SETUP_CMD_ACCEPT_TWT 4u /* Accept TWT */ +#define TWT_SETUP_CMD_ALTERNATE_TWT 5u /* Alternate TWT */ +#define TWT_SETUP_CMD_DICTATE_TWT 6u /* Dictate TWT */ +#define TWT_SETUP_CMD_REJECT_TWT 7u /* Reject TWT */ + +/* Broadcast TWT Recommendation field (Table 9-262k1) */ +#define TWT_BCAST_FRAME_RECOMM_0 0u /* No constrains on frames in Broadcast TWT SP */ +#define TWT_BCAST_FRAME_RECOMM_1 1u /* Do not contain RUs for random access */ +#define TWT_BCAST_FRAME_RECOMM_2 2u /* Can contain RUs for random access */ +#define TWT_BCAST_FRAME_RECOMM_3 3u + +/* Request Type subfield - 2 octets */ +typedef uint16 twt_request_type_t; /* 16 bit request type */ + +/* Target Wake Time - 8 octets or 0 octet */ +typedef uint64 twt_target_wake_time_t; /* 64 bit TSF time of TWT Responding STA */ +typedef uint16 twt_bcast_wake_time_t; /* 16 bit Wake Time of Bcast scheduling STA */ +typedef uint16 twt_bcast_twt_info_t; /* 16 bit Broadcast TWT Info subfield */ + +/* TWT Group Assignment Info - 9 octets (long format) or 3 octets (short format) or 0 octet */ +/* Group Assignment Info field - short format - Zero Offset Preset field is 0 */ +BWL_PRE_PACKED_STRUCT struct twt_grp_short { + uint8 grpid_n_0off; /* Group ID and Zero Offset Present */ + uint16 unit_n_off; /* TWT Unit and TWT Offset */ +} BWL_POST_PACKED_STRUCT; + +typedef struct twt_grp_short twt_grp_short_t; + +/* Group Assignment Info field - long format - Zero Offset Preset field is 1 */ +#define TWT_ZERO_OFF_GRP_LEN 6u +BWL_PRE_PACKED_STRUCT struct twt_grp_long { + uint8 grpid_n_0off; /* Group ID and Zero Offset Present */ + uint8 grp_0off[TWT_ZERO_OFF_GRP_LEN]; /* Zero Offset of Group */ + uint16 unit_n_off; /* Unit and Offset */ +} BWL_POST_PACKED_STRUCT; + +typedef struct twt_grp_long twt_grp_long_t; + +/* TWT Unit and TWT Offset field */ +#define TWT_UNIT_MASK 0x000fu /* TWT Unit */ +#define TWT_OFFSET_MASK 0xfff0u /* TWT Offset */ +#define TWT_OFFSET_SHIFT 4u + +/* TWT Unit field (table 8-248m) */ +#define TWT_UNIT_32us 0u +#define TWT_UNIT_256us 1u +#define TWT_UNIT_1024us 2u +#define TWT_UNIT_8ms192us 3u +#define TWT_UNIT_32ms768us 4u +#define TWT_UNIT_262ms144us 5u +#define TWT_UNIT_1s048576us 6u +#define TWT_UNIT_8s388608us 7u +#define TWT_UNIT_33s554432us 8u +#define TWT_UNIT_268s435456us 9u +#define TWT_UNIT_1073s741824us 10u +#define TWT_UNIT_8589s934592us 11u + +/* TWT element - bottom */ +BWL_PRE_PACKED_STRUCT struct twt_ie_itwt_bottom { + uint8 nom_wake_dur; /* Nominal Minimum Wake Duration */ + uint16 wake_int_mant; /* TWT Wake Interval Mantissa */ + uint8 channel; /* TWT Channel */ + /* NDP Paging field */ +} BWL_POST_PACKED_STRUCT; + +typedef struct twt_ie_itwt_bottom twt_ie_itwt_bottom_t; + +/* TWT element - bottom */ +BWL_PRE_PACKED_STRUCT struct twt_ie_btwt_bottom { + uint8 nom_wake_dur; /* Nominal Minimum Wake Duration */ + uint16 wake_int_mant; /* TWT Wake Interval Mantissa */ + twt_bcast_twt_info_t btwt_info; /* Broadcast TWT Info */ + /* NDP Paging field */ +} BWL_POST_PACKED_STRUCT; + +typedef struct twt_ie_btwt_bottom twt_ie_btwt_bottom_t; + +/* TWT IE structure for broadcast TWT */ +typedef struct twt_last_bcast_ie { + twt_ie_top_t top; /* Element id, len, control fields */ + twt_request_type_t req_type; /* request type field */ + twt_bcast_wake_time_t twt; /* twt field */ + twt_ie_btwt_bottom_t btwt_bottom; /* wake dur, int, BID Info */ +} twt_last_bcast_ie_t; + +/* Nominal Minimum Wake Duration */ +#define TWT_WAKE_DUR_UNIT_256us 256u /* Nom.Min. Wake Duration is in 256us units */ +#define TWT_WAKE_DUR_UNIT_1ms 1024u /* Nom. Min. Wake Duration is in 1ms units */ + +/* to be deprecated */ +#define TWT_NOM_WAKE_DUR_UNIT 256u /* Nominal Minimum Wake Duration is in 256us units */ + +/* TWT IE field lengths */ +#define TWT_IE_NOM_MIN_TWT_WK_DUR_SZ 1u /* 1 byte */ +#define TWT_IE_TWT_WAKE_INT_MANT_SZ 2u /* 2 bytes */ +#define TWT_IE_BCAST_TWT_INFO_SZ 2u /* 2 byte */ +#define TWT_IE_TWT_CHANNEL_SZ 1u /* 1 byte */ + +/* Broadcast TWT info subfield format (figure 9-589ay1) */ +#define TWT_BTWT_PERSIST_EXPO_MASK 0x0007u /* Broadcast TWT Persistence Exponent */ +#define TWT_BCAST_TWT_ID_MASK 0x00F8u /* Broadcast TWT ID */ +#define TWT_BCAST_TWT_ID_SHIFT 3u +#define TWT_BTWT_PERSIST_MANT_MASK 0xFF00u /* Broadcast TWT Persistence Mantissa */ +#define TWT_BTWT_PERSIST_MANT_SHIFT 8u + +#define TWT_BTWT_PERSIST_INDEFINITE 0xFFu + +/* NDP Paging field - 4 octets or 0 octet */ +typedef uint32 twt_ndp_paging_t; + +#define TWT_NDP_PAGING_PID 0x000001ffu /* P-ID */ +#define TWT_NDP_PAGING_MAX_PERIOD 0x0001fe00u /* Max NDP Paging Period */ +#define TWT_NDP_PAGING_PART_TSF_OFF 0x001e0000u /* Partial TSF Offset */ +#define TWT_NDP_PAGING_ACTION 0x00e00000u /* Action */ +#define TWT_NDP_PAGING_MIN_SLEEP 0x3f000000u /* Min Sleep Duration */ + +/* Action field (table 8-248n) */ +#define TWT_ACTION_SEND_PSP_TRIG 0u /* Send a PS-Poll or uplink trigger frame */ +#define TWT_ACTION_WAKE_MIN_SLEEP 1u /* Wake up at the time indicated by + * Min Sleep Duration + */ +#define TWT_ACTION_WAKE_RCV_BCN 2u /* Wake up to receive the Beacon */ +#define TWT_ACTION_WAKE_RCV_DTIM 3u /* Wake up to receive the DTIM Beacon */ +#define TWT_ACTION_WAKE_IND_TIME 4u /* Wakeup at the time indicated by the sum of + * the Min Sleep Duration field and the ASD subfield + * in the APDI field of the NDP Paging frame + */ + +/* TWT Teardown for Negotiation type 0 or 1 */ +#define TWT_TEARDOWN_FLOW_ID_MASK 0x07u +/* TWT Teardown for Negotiation type 3 */ +#define TWT_TEARDOWN_BTWT_ID_MASK 0x1Fu + +#define TWT_TEARDOWN_NEGO_TYPE_MASK 0x60u +#define TWT_TEARDOWN_NEGO_TYPE_SHIFT 5u +/* Teardown All TWT indication */ +#define TWT_TEARDOWN_ALL_TWT 0x80u + +/* TWT Information field byte 0 */ +#define TWT_INFO_FLOW_ID_MASK 0x07u +#define TWT_INFO_RESP_REQ 0x08u +#define TWT_INFO_NEXT_TWT_REQ 0x10u +#define TWT_INFO_NEXT_TWT_SIZE_MASK 0x60u +#define TWT_INFO_NEXT_TWT_SIZE_SHIFT 0x5u +#define TWT_INFO_ALL_TWT 0x80u + +/* Next TWT Subfield Size field encoding */ +#define TWT_INFO_NEXT_TWT_SIZE_0_IDX 0u /* 0 byte */ +#define TWT_INFO_NEXT_TWT_SIZE_32_IDX 1u /* 4 bytes */ +#define TWT_INFO_NEXT_TWT_SIZE_48_IDX 2u /* 6 bytes */ +#define TWT_INFO_NEXT_TWT_SIZE_64_IDX 3u /* 8 bytes */ + +/* Next TWT Subfield Size field */ +#define TWT_INFO_NEXT_TWT_SIZE_0 0u /* 0 byte */ +#define TWT_INFO_NEXT_TWT_SIZE_32 4u /* 4 bytes */ +#define TWT_INFO_NEXT_TWT_SIZE_48 6u /* 6 bytes */ +#define TWT_INFO_NEXT_TWT_SIZE_64 8u /* 8 bytes */ + +/* Old macro definitions - To be removed - Start here */ +#define TWT_BCAST_MAX_VALID_FLOW_ID 3u +#define TWT_CTRL_BCAST 0x04u /* Broadcast */ +#define TWT_CTRL_WAKE_TBTT_NEGO 0x08u /* Wake TBTT Negotiation */ +#define TWT_SETUP_CMD_GRPING_TWT 3u /* Grouping TWT */ +#define TWT_SETUP_CMD_ALTER_TWT 5u /* Alternate TWT */ +#define TWT_IE_BCAST_TWT_ID_SZ 1u /* 1 byte */ +#define TWT_INFO_BROADCAST_RESCHED 0x80u + +typedef struct twt_ie_itwt_bottom twt_ie_bottom_t; +/* Old macro definitions - To be removed - End here */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11ah_h_ */ diff --git a/bcmdhd.101.10.361.x/include/802.11ax.h b/bcmdhd.101.10.361.x/include/802.11ax.h new file mode 100755 index 0000000..49c5e48 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.11ax.h @@ -0,0 +1,1180 @@ +/* + * Basic types and constants relating to 802.11ax/HE STA + * This is a portion of 802.11ax definition. The rest are in 802.11.h. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_11ax_h_ +#define _802_11ax_h_ + +#include +#include <802.11.h> +#include + +/* This marks the start of a packed structure section. */ +#include + +/* HT Control Field: (Table 9-9a) */ +#define HTC_HE_VARIANT 0x03u +#define HTC_HEVAR_SHIFT 0 /* HE VARIANT shift */ +#define HTC_HEVAR(htc) (((htc) & HTC_HE_VARIANT) >> HTC_HEVAR_SHIFT) + +/* HT Control IDs: (Table 9-18a & Table 9-9a) */ +#define HTC_HE_CTLID_SHIFT 0x02u /* HTC HE CTLID shift */ +#define HTC_HE_CTLID_MASK 0x0Fu /* HTC HE CTLID mask */ +#define HTC_HE_CTLID(htc) (((htc) >> HTC_HE_CTLID_SHIFT) & HTC_HE_CTLID_MASK) + +#define HTC_HE_CTLID_TRS 0x0u /* Triggered response scheduling */ +#define HTC_HE_CTLID_OMI 0x1u /* Operating mode */ +#define HTC_HE_CTLID_HLA 0x2u /* HE link adaptation */ +#define HTC_HE_CTLID_BSR 0x3u /* Buffer status report */ +#define HTC_HE_CTLID_UPH 0x4u /* UL power headroom */ +#define HTC_HE_CTLID_BQR 0x5u /* Bandwidth query report */ +#define HTC_HE_CTLID_CAS 0x6u /* Command and status */ + +/* HTC-Control field definitions: (Table 9.9a HTC Control field) */ +#define HTC_HE_CTL_SIZE 30u /* HTC Control field size */ +#define HTC_HE_CTL_DEFAULT 0xFFFFFFFC + +/* A-Control offset definitions: (Figure 9.18a Control ID subfield values) */ +#define HE_ACTRL_TRS_FSZ 26u +#define HE_ACTRL_OMI_FSZ 12u +#define HE_ACTRL_HLA_FSZ 26u +#define HE_ACTRL_BSR_FSZ 26u +#define HE_ACTRL_UPH_FSZ 8u +#define HE_ACTRL_BQR_FSZ 10u +#define HE_ACTRL_CAS_FSZ 8u + +/* OM-Control Field definitions: (Figure 9.15d Control Information subfield for OM Control) */ +#define HE_OMI_RXNSS_FSZ 3 +#define HE_OMI_RXNSS_IDX 0 +#define HE_OMI_RXNSS_MASK 0x07u +#define HE_OMI_CHW_FSZ 2 +#define HE_OMI_CHW_IDX 3 +#define HE_OMI_CHW_MASK 0x18u +#define HE_OMI_ULMU_DIS_FSZ 1 +#define HE_OMI_ULMU_DIS_IDX 5 +#define HE_OMI_ULMU_DIS_MASK 0x20u +#define HE_OMI_TXNSTS_FSZ 3 +#define HE_OMI_TXNSTS_IDX 6 +#define HE_OMI_TXNSTS_MASK 0x1c0u +#define HE_OMI_ERSU_DIS_FSZ 1 +#define HE_OMI_ERSU_DIS_IDX 9 +#define HE_OMI_ERSU_DIS_MASK 0x200u +#define HE_OMI_DLMU_RSD_RCM_FSZ 1 +#define HE_OMI_DLMU_RSD_RCM_IDX 10 +#define HE_OMI_DLMU_RSD_RCM_MASK 0x400u +#define HE_OMI_ULMU_DATA_DIS_FSZ 1 +#define HE_OMI_ULMU_DATA_DIS_IDX 11 +#define HE_OMI_ULMU_DATA_DIS_MASK 0x800u + +/* OM-Control Channel Width Subfield definition, as per 9.2.4.6a.2 OM Control */ +#define OMI_CHW_20MHZ 0 +#define OMI_CHW_40MHZ 1 +#define OMI_CHW_80MHZ 2 +#define OMI_CHW_160MHZ_80P80MHZ 3 + +/* Table 9-18d ACI Bitmap subfield encoding */ +#define HE_BSR_ACI_MAP_BE 0u +#define HE_BSR_ACI_MAP_BK 1u +#define HE_BSR_ACI_MAP_VI 2u +#define HE_BSR_ACI_MAP_VO 3u + +/* GI And LTF Type subfield encoding (Table 9-31d) */ +#define HE_LTF_1_GI_1_6us (0u) +#define HE_LTF_2_GI_1_6us (1u) +#define HE_LTF_4_GI_3_2us (2u) + +/* special STA-IDs (Section 27.11.1) */ +#define HE_STAID_BSS_BCAST 0 +#define HE_STAID_UNASSOCIATED_STA 2045u +#define HE_STAID_NO_USER 2046u +#define HE_STAID_MBSS_BCAST 2047u +#define HE_STAID_MASK 0x07FFu +#define HE_AID12_MASK 0x0FFFu + +/** + * HE Capabilites element (sec 9.4.2.218) + */ + +/* HE MAC Capabilities Information field (figure 9-589ck) */ +#define HE_MAC_CAP_INFO_SIZE 6u +typedef uint8 he_mac_cap_t[HE_MAC_CAP_INFO_SIZE]; + +/* bit position and field width */ +#define HE_MAC_HTC_HE_SUPPORT_IDX 0 /* HTC HE Support */ +#define HE_MAC_HTC_HE_SUPPORT_FSZ 1 +#define HE_MAC_TWT_REQ_SUPPORT_IDX 1 /* TWT Requestor Support */ +#define HE_MAC_TWT_REQ_SUPPORT_FSZ 1 +#define HE_MAC_TWT_RESP_SUPPORT_IDX 2 /* TWT Responder Support */ +#define HE_MAC_TWT_RESP_SUPPORT_FSZ 1 +#define HE_MAC_FRAG_SUPPORT_IDX 3 /* Fragmentation Support */ +#define HE_MAC_FRAG_SUPPORT_FSZ 2 +#define HE_MAC_MAX_MSDU_FRAGS_IDX 5 /* Max. Fragmented MSDUs */ +#define HE_MAC_MAX_MSDU_FRAGS_FSZ 3 +#define HE_MAC_MIN_FRAG_SIZE_IDX 8 /* Min. Fragment Size */ +#define HE_MAC_MIN_FRAG_SIZE_FSZ 2 +#define HE_MAC_TRIG_MAC_PAD_DUR_IDX 10 /* Trigger Frame MAC Pad Dur */ +#define HE_MAC_TRIG_MAC_PAD_DUR_FSZ 2 +#define HE_MAC_MULTI_TID_AGG_RX_IDX 12 /* Multi TID Agg. Rx support */ +#define HE_MAC_MULTI_TID_AGG_RX_FSZ 3 +#define HE_MAC_LINK_ADAPT_IDX 15 /* HE Link Adaptation Support */ +#define HE_MAC_LINK_ADAPT_FSZ 2 +#define HE_MAC_ALL_ACK_SUPPORT_IDX 17 /* All Ack Support */ +#define HE_MAC_ALL_ACK_SUPPORT_FSZ 1 +#define HE_MAC_TRS_SUPPORT_IDX 18 /* TRS Support */ +#define HE_MAC_TRS_SUPPORT_FSZ 1 +#define HE_MAC_BSR_SUPPORT_IDX 19 /* BSR Support */ +#define HE_MAC_BSR_SUPPORT_FSZ 1 +#define HE_MAC_BCAST_TWT_SUPPORT_IDX 20 /* Broadcast TWT Support */ +#define HE_MAC_BCAST_TWT_SUPPORT_FSZ 1 +#define HE_MAC_32BA_BITMAP_SUPPORT_IDX 21 /* 32-bit BA Bitmap Support */ +#define HE_MAC_32BA_BITMAP_SUPPORT_FSZ 1 +#define HE_MAC_MU_CASCADE_SUPPORT_IDX 22 /* MU Cascade Support */ +#define HE_MAC_MU_CASCADE_SUPPORT_FSZ 1 +#define HE_MAC_ACK_ENAB_AGG_SUPPORT_IDX 23 /* Ack Enabled Agg. Support */ +#define HE_MAC_ACK_ENAB_AGG_SUPPORT_FSZ 1 +/* bit 24 - Reserved */ +#define HE_MAC_OMI_CONTROL_SUPPORT_IDX 25 /* OMI Control Support */ +#define HE_MAC_OMI_CONTROL_SUPPORT_FSZ 1 +#define HE_MAC_OFDMA_RA_SUPPORT_IDX 26 /* OFDMA RA Support */ +#define HE_MAC_OFDMA_RA_SUPPORT_FSZ 1 +#define HE_MAC_MAX_AMPDU_LEN_EXP_IDX 27 /* Max AMPDU Length Exponent */ +#define HE_MAC_MAX_AMPDU_LEN_EXP_FSZ 2 +#define HE_MAC_AMSDU_FRAG_SUPPORT_IDX 29 /* AMSDU Fragementation Support */ +#define HE_MAC_AMSDU_FRAG_SUPPORT_FSZ 1 +#define HE_MAC_FLEX_TWT_SCHEDULE_IDX 30 /* Flexible TWT Schedule Support */ +#define HE_MAC_FLEX_TWT_SCHEDULE_FSZ 1 +#define HE_MAC_RX_MBSS_CTL_FRAME_IDX 31 /* Rx control frames to Multi BSS */ +#define HE_MAC_RX_MBSS_CTL_FRAME_FSZ 1 +#define HE_MAC_RX_AGG_BSRP_BQRP_IDX 32 /* Aggregated BSRP BQRP Rx */ +#define HE_MAC_RX_AGG_BSRP_BQRP_FSZ 1 +#define HE_MAC_QTP_SUPPORT_IDX 33 /* Support Quiet time period */ +#define HE_MAC_QTP_SUPPORT_FSZ 1 +#define HE_MAC_BQR_SUPPORT_IDX 34 /* Support BQR */ +#define HE_MAC_BQR_SUPPORT_FSZ 1 +#define HE_MAC_SRP_RESPONDER_IDX 35 /* SRP responder Support */ +#define HE_MAC_SRP_RESPONDER_FSZ 1 +#define HE_MAC_NDP_FDBK_SUPPORT_IDX 36 /* NDP feedback report Support */ +#define HE_MAC_NDP_FDBK_SUPPORT_FSZ 1 +#define HE_MAC_OPS_SUPPORT_IDX 37 /* OPS support */ +#define HE_MAC_OPS_SUPPORT_FSZ 1 +#define HE_MAC_AMSDU_IN_AMPDU_IDX 38 /* AMSDU in AMPDU support */ +#define HE_MAC_AMSDU_IN_AMPDU_FSZ 1 +#define HE_MAC_MULTI_TID_AGG_TX_IDX 39 /* Multi TID Agg. Tx support */ +#define HE_MAC_MULTI_TID_AGG_TX_FSZ 3 +#define HE_MAC_SST_SUPPORT_IDX 42 /* Sub-channel Selective channel */ +#define HE_MAC_SST_SUPPORT_FSZ 1 +#define HE_MAC_UL_2X_996_TONE_RU_SUPP_IDX 43 /* UL 2X 996 tone RU Support */ +#define HE_MAC_UL_2X_996_TONE_RU_SUPP_FSZ 1 +#define HE_MAC_UL_MU_DATA_DISABLE_RX_IDX 44 /* OM - UL MU Data Disable RX */ +#define HE_MAC_UL_MU_DATA_DISABLE_RX_FSZ 1 +#define HE_MAC_DYNAMIC_SM_PWR_SAVE_IDX 45 /* HE Dynamic SM Power Save */ +#define HE_MAC_DYNAMIC_SM_PWR_SAVE_FSZ 1 +#define HE_MAC_PUNCT_SOUNDING_SUPP_IDX 46 /* Punctured Sounding Support */ +#define HE_MAC_PUNCT_SOUNDING_SUPP_FSZ 1 +#define HE_MAC_HT_VHT_TRIG_FRAME_RX_IDX 47 /* HT And VHT Trigger Frame RX Support */ +#define HE_MAC_HT_VHT_TRIG_FRAME_RX_FSZ 1 + +/* HE PHY Capabilities Information field (figure 9-589cl) */ +#define HE_PHY_CAP_INFO_SIZE 11u +typedef uint8 he_phy_cap_t[HE_PHY_CAP_INFO_SIZE]; + +/* bit position and field width */ +/* bit 0 - Reserved */ +#define HE_PHY_CH_WIDTH_SET_IDX 1 /* Channel Width Set */ +#define HE_PHY_CH_WIDTH_SET_FSZ 7 +#define HE_PHY_PUNCT_PREAMBLE_RX_IDX 8 /* Punctured Preamble Rx */ +#define HE_PHY_PUNCT_PREAMBLE_RX_FSZ 4 +#define HE_PHY_DEVICE_CLASS_IDX 12 /* Device Class */ +#define HE_PHY_DEVICE_CLASS_FSZ 1 +#define HE_PHY_LDPC_PYLD_IDX 13 /* LDPC Coding In Payload */ +#define HE_PHY_LDPC_PYLD_FSZ 1 +#define HE_PHY_SU_PPDU_1x_LTF_0_8_GI_IDX 14 /* SU PPDU 1x LTF GI 0.8 us */ +#define HE_PHY_SU_PPDU_1x_LTF_0_8_GI_FSZ 1 +#define HE_PHY_MIDAMBLE_MAX_NSTS_IDX 15 /* Midamble Tx/Rx Max NSTS */ +#define HE_PHY_MIDAMBLE_MAX_NSTS_FSZ 2 +#define HE_PHY_NDP_4x_LTF_3_2_GI_IDX 17 /* NDP with 4xLTF 3.2us GI */ +#define HE_PHY_NDP_4x_LTF_3_2_GI_FSZ 1 +#define HE_PHY_STBC_TX_IDX 18 /* STBC Tx for <= 80 MHz */ +#define HE_PHY_STBC_TX_FSZ 1 +#define HE_PHY_STBC_RX_IDX 19 /* STBC Rx for <= 80 MHz */ +#define HE_PHY_STBC_RX_FSZ 1 +#define HE_PHY_DOPPLER_TX_IDX 20 /* Doppler Tx */ +#define HE_PHY_DOPPLER_TX_FSZ 1 +#define HE_PHY_DOPPLER_RX_IDX 21 /* Doppler Rx */ +#define HE_PHY_DOPPLER_RX_FSZ 1 +#define HE_PHY_FULL_BW_UL_MU_MIMO_IDX 22 /* Full bandwidth UL MU MIMO */ +#define HE_PHY_FULL_BW_UL_MU_MIMO_FSZ 1 +#define HE_PHY_PART_BW_UL_MU_MIMO_IDX 23 /* Partial bandwidth UL MU MIMO */ +#define HE_PHY_PART_BW_UL_MU_MIMO_FSZ 1 +#define HE_PHY_DCM_MAX_CONST_TX_IDX 24 /* DCM Max Constellation Tx */ +#define HE_PHY_DCM_MAX_CONST_TX_FSZ 2 +#define HE_PHY_DCM_MAX_NSS_TX_IDX 26 /* DCM Max NSS Tx */ +#define HE_PHY_DCM_MAX_NSS_TX_FSZ 1 +#define HE_PHY_DCM_MAX_CONST_RX_IDX 27 /* DCM Max Constellation Rx */ +#define HE_PHY_DCM_MAX_CONST_RX_FSZ 2 +#define HE_PHY_DCM_MAX_NSS_RX_IDX 29 /* DCM Max NSS Rx */ +#define HE_PHY_DCM_MAX_NSS_RX_FSZ 1 +#define HE_PHY_RX_MU_PPDU_IDX 30 /* Rx HE MU PPDU From nonAP STA */ +#define HE_PHY_RX_MU_PPDU_FSZ 1 +#define HE_PHY_SU_BEAMFORMER_IDX 31 /* SU Beamformer */ +#define HE_PHY_SU_BEAMFORMER_FSZ 1 +#define HE_PHY_SU_BEAMFORMEE_IDX 32 /* SU Beamformee */ +#define HE_PHY_SU_BEAMFORMEE_FSZ 1 +#define HE_PHY_MU_BEAMFORMER_IDX 33 /* MU Beamformer */ +#define HE_PHY_MU_BEAMFORMER_FSZ 1 +#define HE_PHY_BEAMFORMEE_STS_BELOW80MHZ_IDX 34 /* Beamformee STS For <= 80MHz */ +#define HE_PHY_BEAMFORMEE_STS_BELOW80MHZ_FSZ 3 +#define HE_PHY_BEAMFORMEE_STS_ABOVE80MHZ_IDX 37 /* Beamformee STS For >80 MHz */ +#define HE_PHY_BEAMFORMEE_STS_ABOVE80MHZ_FSZ 3 +#define HE_PHY_SOUND_DIM_BELOW80MHZ_IDX 40 /* Num. Sounding Dim.<= 80 MHz */ +#define HE_PHY_SOUND_DIM_BELOW80MHZ_FSZ 3 +#define HE_PHY_SOUND_DIM_ABOVE80MHZ_IDX 43 /* Num. Sounding Dim.> 80 MHz */ +#define HE_PHY_SOUND_DIM_ABOVE80MHZ_FSZ 3 +#define HE_PHY_SU_FEEDBACK_NG16_SUPPORT_IDX 46 /* Ng=16 For SU Feedback */ +#define HE_PHY_SU_FEEDBACK_NG16_SUPPORT_FSZ 1 +#define HE_PHY_MU_FEEDBACK_NG16_SUPPORT_IDX 47 /* Ng=16 For MU Feedback */ +#define HE_PHY_MU_FEEDBACK_NG16_SUPPORT_FSZ 1 +#define HE_PHY_SU_CODEBOOK_SUPPORT_IDX 48 /* Codebook Sz {4, 2} For SU */ +#define HE_PHY_SU_CODEBOOK_SUPPORT_FSZ 1 +#define HE_PHY_MU_CODEBOOK_SUPPORT_IDX 49 /* Codebook Size {7, 5} For MU */ +#define HE_PHY_MU_CODEBOOK_SUPPORT_FSZ 1 +#define HE_PHY_TRG_SU_BFM_FEEDBACK_IDX 50 /* Triggered SU TXBF Feedback */ +#define HE_PHY_TRG_SU_BFM_FEEDBACK_FSZ 1 +#define HE_PHY_TRG_MU_BFM_FEEDBACK_IDX 51 /* Triggered MU TXBF partial BW Feedback */ +#define HE_PHY_TRG_MU_BFM_FEEDBACK_FSZ 1 +#define HE_PHY_TRG_CQI_FEEDBACK_IDX 52 /* Triggered CQI Feedback */ +#define HE_PHY_TRG_CQI_FEEDBACK_FSZ 1 +#define HE_PHY_PART_BW_EXT_RANGE_IDX 53 /* Partial BW Extended Range */ +#define HE_PHY_PART_BW_EXT_RANGE_FSZ 1 +#define HE_PHY_DL_MU_MIMO_PART_BW_IDX 54 /* Partial Bandwidth DL MU MIMO */ +#define HE_PHY_DL_MU_MIMO_PART_BW_FSZ 1 +#define HE_PHY_PPE_THRESH_PRESENT_IDX 55 /* PPE Threshold Present */ +#define HE_PHY_PPE_THRESH_PRESENT_FSZ 1 +#define HE_PHY_SRP_SR_SUPPORT_IDX 56 /* SRP based SR Support */ +#define HE_PHY_SRP_SR_SUPPORT_FSZ 1 +#define HE_PHY_POWER_BOOST_FACTOR_IDX 57 /* Power Boost Factor Support */ +#define HE_PHY_POWER_BOOST_FACTOR_FSZ 1 +#define HE_PHY_4X_LTF_0_8_GI_SUPPORT_IDX 58 /* HE SU PPDU And HE MU PPDU with + * 4x HE-LTF And 0.8 us GI + */ +#define HE_PHY_4X_LTF_0_8_GI_SUPPORT_FSZ 1 +#define HE_PHY_MAX_NC_IDX 59 /* Maximum NC */ +#define HE_PHY_MAX_NC_FSZ 3 +#define HE_PHY_STBC_TX_ABOVE_80_IDX 62 /* STBC Tx above 80 MHz */ +#define HE_PHY_STBC_TX_ABOVE_80_FSZ 1 +#define HE_PHY_STBC_RX_ABOVE_80_IDX 63 /* STBC Rx above 80 MHz */ +#define HE_PHY_STBC_RX_ABOVE_80_FSZ 1 +#define HE_PHY_ER_SU_4X_LTF_0_8_GI_IDX 64 /* ER SU PPDU 4x HE-LTF 0.8 GI */ +#define HE_PHY_ER_SU_4X_LTF_0_8_GI_FSZ 1 +#define HE_PHY_20_IN_40_2G_IDX 65 /* 20 in 40 MHz HE PPDU in 2G */ +#define HE_PHY_20_IN_40_2G_FSZ 1 +#define HE_PHY_20_IN_160_80P80_IDX 66 /* 20 in 160/80+80 MHz HE PPDU */ +#define HE_PHY_20_IN_160_80P80_FSZ 1 +#define HE_PHY_80_IN_160_80P80_IDX 67 /* 80 in 160/80+80 MHz HE PPDU */ +#define HE_PHY_80_IN_160_80P80_FSZ 1 +#define HE_PHY_ER_SU_1X_LTF_0_8_GI_IDX 68 /* HE ER SU 1x HE-LTF 0.8 GI */ +#define HE_PHY_ER_SU_1X_LTF_0_8_GI_FSZ 1 +#define HE_PHY_MIDAMBLE_2X_1X_LTF_IDX 69 /* Midamble TX/RX 2x & 1x HE LTF */ +#define HE_PHY_MIDAMBLE_2X_1X_LTF_FSZ 1 +#define HE_PHY_DCM_MAX_BW_IDX 70 /* DCM Max BW */ +#define HE_PHY_DCM_MAX_BW_FSZ 2 +#define HE_PHY_ABOVE16_OFDM_SYM_IDX 72 /* Longer than 16 HE-SIGB OFDM + * Symbol support + */ +#define HE_PHY_ABOVE16_OFDM_SYM_FSZ 1 +#define HE_PHY_NON_TRIG_CQI_FDBK_IDX 73 /* Non-triggered CQI feedback Support */ +#define HE_PHY_NON_TRIG_CQI_FDBK_FSZ 1 +#define HE_PHY_1024_QAM_TX_BELOW_242_RU_IDX 74 /* Tx 1024 QAM in < 242 RU Tone Support */ +#define HE_PHY_1024_QAM_TX_BELOW_242_RU_FSZ 1 +#define HE_PHY_1024_QAM_RX_BELOW_242_RU_IDX 75 /* Rx 1024 QAM in < 242 RU Tone Support */ +#define HE_PHY_1024_QAM_RX_BELOW_242_RU_FSZ 1 +#define HE_PHY_RX_FULL_BW_MU_COMP_SIGB_IDX 76 /* Rx Full BW MU PPDU with Comp. SIGB */ +#define HE_PHY_RX_FULL_BW_MU_COMP_SIGB_FSZ 1 +#define HE_PHY_RX_FULL_BW_MU_NON_COMP_SIGB_IDX 77 /* Rx Full BW MU PPDU Non-Comp SIGB */ +#define HE_PHY_RX_FULL_BW_MU_NON_COMP_SIGB_FSZ 1 + +/* HE Mac Capabilities values */ +/* b3-b4: Fragmentation Support field (table 9-262z) */ +#define HE_MAC_FRAG_NOSUPPORT 0 /* dynamic fragmentation not supported */ +#define HE_MAC_FRAG_PER_MPDU 1 /* dynamic fragmentation of MPDU/SMPDU */ +#define HE_MAC_FRAG_ONE_PER_AMPDU 2 /* upto 1 fragment per AMPDU/MMPDU */ +#define HE_MAC_FRAG_MULTI_PER_AMPDU 3 /* multiple fragment per AMPDU */ + +/* b5-b7 : Maximum Number Of Fragmented MSDUs/AMSDUs Exponent */ +#define HE_MAC_MAXFRAG_NUM_NO_RESTRICT 7 + +/* b8-b9: Minimum payload size of first fragment */ +#define HE_MAC_MINFRAG_NO_RESTRICT 0 /* no restriction on min. payload size */ +#define HE_MAC_MINFRAG_SIZE_128 1 /* minimum payload size of 128 Bytes */ +#define HE_MAC_MINFRAG_SIZE_256 2 /* minimum payload size of 256 Bytes */ +#define HE_MAC_MINFRAG_SIZE_512 3 /* minimum payload size of 512 Bytes */ + +/* b10-b11: Trigger Frame MAC Padding Duration */ +#define HE_MAC_TRIG_MAC_PAD_0 0 +#define HE_MAC_TRIG_MAC_PAD_8us 1 +#define HE_MAC_TRIG_MAC_PAD_16us 2 + +/* b15-b16: HE Link Adaptation */ +#define HE_MAC_SEND_NO_MFB 0 /* if STA does not provide HE MFB */ +#define HE_MAC_SEND_UNSOLICATED_MFB 2 /* if STA provides unsolicited HE MFB */ +#define HE_MAC_SEND_MFB_IN_RESPONSE 3 /* if STA can provide HE MFB in response to + * HE MRQ and if the STA provides unsolicited HE MFB. + */ + +/* b27-b28: Max. AMPDU Length HE Exponent */ +/* Use Max AMPDU length exponent from VHT or HT */ +#define HE_MAC_MAX_AMPDU_EXP_ADOPT_VHT (0) +/* Max. AMPDU length = + * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_1) -1 (if this value in VHT CAP is 7) or + * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_1) -1 (if this value in HT CAP is 3). + */ +#define HE_MAC_MAX_AMPDU_EXP_HE_1 (1) +/* Max. AMPDU length = + * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_2) -1 (if this value in VHT CAP is 7) or + * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_2) -1 (if this value in HT CAP is 3). + */ +#define HE_MAC_MAX_AMPDU_EXP_HE_2 (2) +/* Max. AMPDU length = + * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_3) -1 (if this value in VHT CAP is 7) or + * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_3) -1 (if this value in HT CAP is 3). + */ +#define HE_MAC_MAX_AMPDU_EXP_HE_3 (3) + +/* HE PHY Capabilities values */ +/* b1-b7: Channel Width Support field */ +#define HE_PHY_CH_WIDTH_2G_40 0x01 +#define HE_PHY_CH_WIDTH_5G_80 0x02 +#define HE_PHY_CH_WIDTH_5G_160 0x04 +#define HE_PHY_CH_WIDTH_5G_80P80 0x08 +#define HE_PHY_CH_WIDTH_2G_242RU 0x10 +#define HE_PHY_CH_WIDTH_5G_242RU 0x20 + +/* b8-b11: Preamble puncturing Rx */ +/* Rx of 80 MHz preamble where secondary 20 MHz subchannel is punctured */ +#define HE_PHY_PREAMBLE_PUNC_RX_0 0x1 +/* Rx of 80 MHz preamble where one of two 20 MHz subchannels in secondary 40 MHz is punctured */ +#define HE_PHY_PREAMBLE_PUNC_RX_1 0x2 +/* Rx of 160 MHz or 80+80 MHz preamble where primary 80 MHz of + * preamble only the secondary 20 MHz is punctured + */ +#define HE_PHY_PREAMBLE_PUNC_RX_2 0x4 +/* Rx of 160 MHz or 80+80 MHz preamble where primary 80 MHz of + * the preamble, the primary 40 MHz is present + */ +#define HE_PHY_PREAMBLE_PUNC_RX_3 0x8 + +/* b24-b26: DCM Encoding Tx */ +#define HE_PHY_TX_DCM_ENC_NOSUPPORT 0x00 +#define HE_PHY_TX_DCM_ENC_BPSK 0x01 +#define HE_PHY_TX_DCM_ENC_QPSK 0x02 +#define HE_PHY_TX_DCM_ENC_QAM 0x03 + +#define HE_PHY_TX_DCM_1_SS 0x00 +#define HE_PHY_TX_DCM_2_SS 0x01 + +/* b27-b29: DCM Encoding Rx */ +#define HE_PHY_RX_DCM_ENC_NOSUPPORT 0x00 +#define HE_PHY_RX_DCM_ENC_BPSK 0x01 +#define HE_PHY_RX_DCM_ENC_QPSK 0x02 +#define HE_PHY_RX_DCM_ENC_QAM 0x03 + +#define HE_PHY_RX_DCM_1_SS 0x00 +#define HE_PHY_RX_DCM_2_SS 0x01 + +/* b70-b71: DCM Max BW */ +#define HE_PHY_DCM_MAX_BW_20 0 +#define HE_PHY_DCM_MAX_BW_40 1 +#define HE_PHY_DCM_MAX_BW_80 2 +#define HE_PHY_DCM_MAX_BW_160 3 + +/* HE Duration based RTS Threshold Figure 9-589cr */ +#define HE_RTS_THRES_DISABLED 1023 +#define HE_RTS_THRES_ALL_FRAMES 0 +#define HE_RTS_THRES_MASK 0x03ff + +/* Tx Rx HE MCS Support field format : Table 9-589cm */ +#define HE_TX_RX_MCS_NSS_SUP_FIELD_MIN_SIZE 4u + +/** +* Bandwidth configuration indices used in the HE TX-RX MCS support field +* Section 9.4.2.218.4 +*/ +#define HE_BW20_CFG_IDX 0 +#define HE_BW40_CFG_IDX 1 +#define HE_BW80_CFG_IDX 2 +#define HE_BW80P80_CFG_IDX 3 +#define HE_BW160_CFG_IDX 4 +#define HE_MAX_BW_CFG 5 + +#define HE_MCS_CODE_0_7 0u +#define HE_MCS_CODE_0_9 1u +#define HE_MCS_CODE_0_11 2u +#define HE_MCS_CODE_NONE 3u +#define HE_MCS_CODE_SIZE 2u /* num bits */ +#define HE_MCS_CODE_MASK 0x3u /* mask for 1-stream */ + +/* Defines for The Max HE MCS For n SS subfield (where n = 1, ..., 8) */ +#define HE_MCS_MAP_NSS_MAX 8u /* Max number of streams possible */ +#define HE_MCS_NSS_SET_MASK 0xffffu /* Field is to be 16 bits long */ +#define HE_MCS_NSS_GET_SS_IDX(nss) (((nss)-1u) * HE_MCS_CODE_SIZE) +#define HE_MCS_NSS_GET_MCS(nss, mcs_nss_map) \ + (((mcs_nss_map) >> HE_MCS_NSS_GET_SS_IDX(nss)) & HE_MCS_CODE_MASK) +#define HE_MCS_NSS_SET_MCS(nss, mcs_code, mcs_nss_map) \ + do { \ + (mcs_nss_map) &= (~(HE_MCS_CODE_MASK << HE_MCS_NSS_GET_SS_IDX(nss))); \ + (mcs_nss_map) |= (((mcs_code) & HE_MCS_CODE_MASK) << HE_MCS_NSS_GET_SS_IDX(nss)); \ + (mcs_nss_map) &= (HE_MCS_NSS_SET_MASK); \ + } while (0) + +#define HE_BW80_ORDR_IDX 0u +#define HE_BW160_ORDR_IDX 1u +#define HE_BW80P80_ORDR_IDX 2u + +#define HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN 2u /* 2 bytes */ +#define HE_MCS_NSS_SUP_FLD_UNIT_MAP_SZ (HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN * 8u) /* 16 bits */ + +/* Two unit-maps (TX+RX) */ +#define HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN (HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN * 2u) +#define HE_MCS_NSS_SUP_FLD_TXRX_MAP_SZ (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN * 8u) /* 32 bits */ + +/* One TX-RX unit-map (80 MHz) */ +#define HE_MCS_NSS_SUP_FLD_MIN_LEN (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN) +/* Three TX-RX unit-maps (80 MHz, 160MHz, 80+80MHz) */ +#define HE_MCS_NSS_SUP_FLD_MAX_LEN (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN * 3u) + +/* HE Capabilities element */ +BWL_PRE_PACKED_STRUCT struct he_cap_ie { + uint8 id; + uint8 len; + uint8 id_ext; + he_mac_cap_t mac_cap; /* MAC Capabilities Information */ + he_phy_cap_t phy_cap; /* PHY Capabilities Information */ + /* he_tx_rx_mcs_nss_sup_t txx_rx_mcs_nss_sup; */ /* Tx Rx HE MCS NSS Support (variable) */ + /* he_ppe_ths_t ppe_ths; */ /* PPE Thresholds (optional) */ +} BWL_POST_PACKED_STRUCT; + +typedef struct he_cap_ie he_cap_ie_t; + +/* Multiple BSSID element */ +BWL_PRE_PACKED_STRUCT struct nontrans_BSSID_cap { + uint8 id; /* 83 */ + uint8 len; + uint16 capability; +} BWL_POST_PACKED_STRUCT; + +typedef struct nontrans_BSSID_cap nontrans_BSSID_cap_t; + +BWL_PRE_PACKED_STRUCT struct multi_BSSID_index { + uint8 id; /* 85 */ + uint8 len; /* 3 in beacon, 1 in probe response */ + uint8 bssid_index; /* between 1 and 2^n - 1 */ + uint8 dtim_period; /* only valid in beacon */ + uint8 dtim_count; /* only valid in beacon */ +} BWL_POST_PACKED_STRUCT; + +typedef struct multi_BSSID_index multi_BSSID_index_t; + +BWL_PRE_PACKED_STRUCT struct fms_descriptor { + uint8 id; /* 86 */ + uint8 len; + uint8 num_FMS_counters; + uint8 *FMS_counters; + uint8 *FMSID; +} BWL_POST_PACKED_STRUCT; + +typedef struct fms_descriptor fms_descriptor_t; + +BWL_PRE_PACKED_STRUCT struct nontrans_BSSID_profile_subie { + uint8 subie_id; /* 0 */ + uint8 subie_len; + uint8 moreie[1]; +} BWL_POST_PACKED_STRUCT; + +typedef struct nontrans_BSSID_profile_subie nontrans_BSSID_profile_subie_t; + +BWL_PRE_PACKED_STRUCT struct multi_BSSID_ie { + uint8 id; + uint8 len; + uint8 maxBSSID_indicator; + nontrans_BSSID_profile_subie_t profile[1]; +} BWL_POST_PACKED_STRUCT; + +typedef struct multi_BSSID_ie multi_BSSID_ie_t; +#define DOT11_MULTIPLE_BSSID_PROFILE_SUBID 0 + +/* Table 9-262ab, Highest MCS Supported subfield encoding */ +#define HE_CAP_MCS_CODE_0_7 0 +#define HE_CAP_MCS_CODE_0_8 1 +#define HE_CAP_MCS_CODE_0_9 2 +#define HE_CAP_MCS_CODE_0_10 3 +#define HE_CAP_MCS_CODE_0_11 4 +#define HE_CAP_MCS_CODE_SIZE 3 /* num bits for 1-stream */ +#define HE_CAP_MCS_CODE_MASK 0x7 /* mask for 1-stream */ + +#define HE_CAP_MCS_MAP_NSS_MAX 8u /* Max number of streams possible */ + +#define HE_MAX_RU_COUNT 4u /* Max number of RU allocation possible */ + +#define HE_NSSM1_IDX 0 /* Offset of NSSM1 field */ +#define HE_NSSM1_LEN 3 /* length of NSSM1 field in bits */ + +#define HE_RU_INDEX_MASK_IDX 3 /* Offset of RU index mask field */ +#define HE_RU_INDEX_MASK_LEN 4u /* length of RU Index mask field in bits */ + +/* PPE Threshold field (figure 9-589co) */ +#define HE_PPE_THRESH_NSS_RU_FSZ 3u + +/* PPE Threshold Info field (figure 9-589cp) */ +/* ruc: RU Count; NSSnM1: NSSn - 1; RUmM1: RUm - 1 */ +/* bit offset in PPE Threshold field */ +#define HE_PPET16_BIT_OFFSET(ruc, NSSnM1, RUmM1) \ + (HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((NSSnM1) * (ruc) + (RUmM1)) * 6) + +#define HE_PPET8_BIT_OFFSET(ruc, NSSnM1, RUmM1) \ + (HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((NSSnM1) * (ruc) + (RUmM1)) * 6 + 3) + +/* Total PPE Threshold field byte length (Figure 9-589cq) */ +#define HE_PPE_THRESH_LEN(nss, ruc) \ + (CEIL((HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((nss) * (ruc) * 6)), 8)) + +/* RU Allocation Index encoding (table 9-262ae) */ +#define HE_RU_ALLOC_IDX_242 0 /* RU alloc: 282 tones */ +#define HE_RU_ALLOC_IDX_484 1 /* RU alloc: 484 tones - 40Mhz */ +#define HE_RU_ALLOC_IDX_996 2 /* RU alloc: 996 tones - 80Mhz */ +#define HE_RU_ALLOC_IDX_2x996 3 /* RU alloc: 2x996 tones - 80p80/160Mhz */ + +/* Constellation Index encoding (table 9-262ac) */ +#define HE_CONST_IDX_BPSK 0 +#define HE_CONST_IDX_QPSK 1 +#define HE_CONST_IDX_16QAM 2 +#define HE_CONST_IDX_64QAM 3 +#define HE_CONST_IDX_256QAM 4 +#define HE_CONST_IDX_1024QAM 5 +#define HE_CONST_IDX_RSVD 6 +#define HE_CONST_IDX_NONE 7 + +/* Min HE cap ie length when only 80Mhz is supported */ +#define HE_CAP_IE_MIN_LEN (sizeof(he_cap_ie_t) - TLV_HDR_LEN + HE_MCS_NSS_SUP_FLD_MIN_LEN) + +/* Max HE cap ie length considering MAX NSS and RU */ +#define HE_CAP_IE_MAX_LEN (sizeof(he_cap_ie_t) - TLV_HDR_LEN + HE_MCS_NSS_SUP_FLD_MAX_LEN + \ + HE_PPE_THRESH_LEN(HE_CAP_MCS_MAP_NSS_MAX, HE_MAX_RU_COUNT)) +/** + * HE Operation IE (Section 9.4.2.238) + */ +/* HE Operation Parameters field (figure 9-589cr) */ +#define HE_OP_PARAMS_SIZE 3u +typedef uint8 he_op_parms_t[HE_OP_PARAMS_SIZE]; + +/* bit position and field width */ +#define HE_OP_DEF_PE_DUR_IDX 0u /* Default PE Duration */ +#define HE_OP_DEF_PE_DUR_FSZ 3u +#define HE_OP_TWT_REQD_IDX 3u /* TWT Required */ +#define HE_OP_TWT_REQD_FSZ 1u +#define HE_OP_TXOP_DUR_RTS_THRESH_IDX 4u /* TXOP Duration Based RTS Threshold */ +#define HE_OP_TXOP_DUR_RTS_THRESH_FSZ 10u +#define HE_OP_VHT_OP_PRESENT_IDX 14u /* VHT Oper Info Present */ +#define HE_OP_VHT_OP_PRESENT_FSZ 1u +#define HE_OP_COL_LOC_BSS_IDX 15u +#define HE_OP_COL_LOC_BSS_FSZ 1u +#define HE_OP_ER_SU_DISABLE_IDX 16u +#define HE_OP_ER_SU_DISABLE_FSZ 1u +#define HE_OP_6G_OP_INFO_PRESENT_IDX 17u +#define HE_OP_6G_OP_INFO_PRESENT_FSZ 1u + +/* BSS Color Information field (figure 9-589cs) */ +#define HE_OP_BSS_COLOR_IDX 0 /* BSS Color */ +#define HE_OP_BSS_COLOR_FSZ 6 +#define HE_OP_PART_BSS_COLOR_IDX 6 /* Partial BSS Color */ +#define HE_OP_PART_BSS_COLOR_FSZ 1 +#define HE_OP_DISABLE_BSSCOLOR_IDX 7 /* BSS Color Disable */ +#define HE_OP_DISABLE_BSSCOLOR_FSZ 1 + +/* b4-b13: TXOP Duration RTS threshold */ +#define HE_OP_TXOP_RTS_THRESH_DISABLED 1023u + +#define HE_BASIC_MCS_NSS_SIZE 2u +typedef uint8 he_basic_mcs_nss_set_t[HE_BASIC_MCS_NSS_SIZE]; + +#define HE_OP_MAX_BSSID_IND_LEN 1u +#define HE_OP_6G_OPER_INFO_LEN 5u +/* HE Operation element */ +BWL_PRE_PACKED_STRUCT struct he_op_ie { + uint8 id; + uint8 len; + uint8 id_ext; + he_op_parms_t parms; + uint8 bsscolor_info; + he_basic_mcs_nss_set_t mcs_nss_op; /* Basic HE MCS & NSS Set */ +} BWL_POST_PACKED_STRUCT; + +typedef struct he_op_ie he_op_ie_t; + +#define HE_OP_IE_MIN_LEN (sizeof(he_op_ie_t) - TLV_HDR_LEN) +#define HE_OP_IE_MAX_LEN (sizeof(he_op_ie_t) - TLV_HDR_LEN + VHT_OP_INFO_LEN +\ + HE_OP_MAX_BSSID_IND_LEN + HE_OP_6G_OPER_INFO_LEN) + +#define HE_6G_OP_BW_20 0u +#define HE_6G_OP_BW_40 1u +#define HE_6G_OP_BW_80 2u +#define HE_6G_OP_BW_160_80P80 3u + +/* Regulatory Info subfield in the United States */ +#define HE_6G_OP_REG_INFO_INDOOR_AP_US 0u +#define HE_6G_OP_REG_INFO_SP_AP_US 1u + +/* Figure 9-788l Control field format in Draft P802.11ax_D6.0 */ +#define HE_6G_CTL_CHBW_MASK 0x03u +#define HE_6G_OP_CTL_CHBW(ctl) (ctl & HE_6G_CTL_CHBW_MASK) +#define HE_6G_CTL_DUP_BCN_MASK 0x04u +#define HE_6G_CTL_REG_INFO_MASK 0x38u +#define HE_6G_CTL_REG_INFO_SHIFT 3u +#define HE_6G_OP_CTL_REG_INFO(ctl) \ + ((ctl & HE_6G_CTL_REG_INFO_MASK) >> HE_6G_CTL_REG_INFO_SHIFT) + +/* HE 6G Operation info */ +BWL_PRE_PACKED_STRUCT struct he_6g_op_info { + uint8 pri_chan; + uint8 control; + uint8 seg0; + uint8 seg1; + uint8 min_rate; +} BWL_POST_PACKED_STRUCT; + +typedef struct he_6g_op_info he_6g_op_info_t; + +/* HE Extended Capabilities element */ +BWL_PRE_PACKED_STRUCT struct he_6g_cap_ie { + uint8 id; + uint8 len; + uint8 id_ext; + uint16 cap_info; /* Capabilities Information */ +} BWL_POST_PACKED_STRUCT; + +typedef struct he_6g_cap_ie he_6g_cap_ie_t; +#define HE_6G_CAP_IE_LEN sizeof(he_6g_cap_ie_t) + +/* HE Capabilities Information bit position and fieldwidth. + * Figure 9-787ai Capabilities Information field format in + * Draft P802.11ax_D5.0. + */ +#define HE_6G_CAP_MIN_MPDU_START_MASK 0x0007u +#define HE_6G_CAP_MAX_AMPDU_LEN_EXP_MASK 0x0038u +#define HE_6G_CAP_MAX_AMPDU_LEN_EXP_SHIFT 3u +#define HE_6G_CAP_MAX_MPDU_LEN_MASK 0x00C0u +#define HE_6G_CAP_MAX_MPDU_LEN_SHIFT 6u +#define HE_6G_CAP_SM_PW_SAVE_MASK 0x0600u +#define HE_6G_CAP_SM_PW_SAVE_SHIFT 9u +#define HE_6G_CAP_RD_RESPONDER_MASK 0x0800u +#define HE_6G_CAP_RD_RESPONDER_SHIFT 11u +#define HE_6G_CAP_RX_ANT_PATN_CONST_MASK 0x1000u +#define HE_6G_CAP_RX_ANT_PATN_CONST_SHIFT 12u +#define HE_6G_CAP_TX_ANT_PATN_CONST_MASK 0x2000u +#define HE_6G_CAP_TX_ANT_PATN_CONST_SHIFT 13u + +#define HE_6G_CAP_MIN_MPDU_START(cap) ((cap) & HE_6G_CAP_MIN_MPDU_START_MASK) +#define HE_6G_CAP_MAX_AMPDU_LEN_EXP(cap) (((cap) & HE_6G_CAP_MAX_AMPDU_LEN_EXP_MASK) >> \ + HE_6G_CAP_MAX_AMPDU_LEN_EXP_SHIFT) +#define HE_6G_CAP_MAX_MPDU_LEN(cap) (((cap) & HE_6G_CAP_MAX_MPDU_LEN_MASK) >> \ + HE_6G_CAP_MAX_MPDU_LEN_SHIFT) +#define HE_6G_CAP_SM_PW_SAVE(cap) (((cap) & HE_6G_CAP_SM_PW_SAVE_MASK) >> \ + HE_6G_CAP_SM_PW_SAVE_SHIFT) +#define HE_6G_CAP_RD_RESPONDER(cap) (((cap) & HE_6G_CAP_RD_RESPONDER_MASK) != 0) +#define HE_6G_CAP_RX_ANT_PATN_CONST(cap) (((cap) & HE_6G_CAP_RX_ANT_PATN_CONST_MASK) != 0) +#define HE_6G_CAP_TX_ANT_PATN_CONST(cap) (((cap) & HE_6G_CAP_TX_ANT_PATN_CONST_MASK) != 0) + +/** + * UORA parameter set element (sec 9.4.2.244) + */ +BWL_PRE_PACKED_STRUCT struct he_uora_ie { + uint8 id; + uint8 len; + uint8 id_ext; + uint8 ocw_range; +} BWL_POST_PACKED_STRUCT; + +typedef struct he_uora_ie he_uora_ie_t; + +/* Bit field Masks */ +#define HE_UORA_EOCW_MIN_IDX 0u +#define HE_UORA_EOCW_MIN_FSZ 3u +#define HE_UORA_EOCW_MAX_IDX 3u +#define HE_UORA_EOCW_MAX_FSZ 3u +/* Reserved -bit6 -7 */ + +/** + * MU EDCA parameter set element (sec 9.4.2.245) + */ +BWL_PRE_PACKED_STRUCT struct he_mu_ac_param_record { + uint8 aci_aifsn; + uint8 ecw_min_max; + uint8 muedca_timer; +} BWL_POST_PACKED_STRUCT; + +typedef struct he_mu_ac_param_record he_mu_ac_param_record_t; + +BWL_PRE_PACKED_STRUCT struct he_muedca_ie { + uint8 id; + uint8 len; + uint8 id_ext; + uint8 mu_qos_info; + he_mu_ac_param_record_t param_ac[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; + +typedef struct he_muedca_ie he_muedca_ie_t; + +#define HE_MU_EDCA_PARAM_UPD_CNT_IDX 0u /* EDCA Parameter Set Update Count */ +#define HE_MU_EDCA_PARAM_UPD_CNT_LEN 4u + +#define HE_MU_SIGA_SIGB_MCS_DPCU 0 +#define HE_MU_SIGA_SIGB_SYMS_DPCU 3u +#define HE_MU_SIGA_GI_LTF_DPCU 3u + +/** + * Spatial Reuse Parameter Set element (sec 9.4.2.241) + */ +/* bit position and field width */ +#define HE_SRP_CTRL_SRP_DISALLOW_IDX 0 /* SRP Disallowed */ +#define HE_SRP_CTRL_SRP_DISALLOW_FSZ 1 +#define HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW_IDX 1 /* NonSRG OBSS PD SR Disallowed */ +#define HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW_FSZ 1 +#define HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT_IDX 2 /* NonSRG Offset Present */ +#define HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT_FSZ 1 +#define HE_SRP_CTRL_SRG_INFO_PRESENT_IDX 3 /* SRG Information Present */ +#define HE_SRP_CTRL_SRG_INFO_PRESENT_FSZ 1 +#define HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED_IDX 4 /* HESIGA_SRP_value15_allowed */ +#define HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED_FSZ 1 +/* Reserved b5-b7 */ + +/* Spatial reuse element element */ +BWL_PRE_PACKED_STRUCT struct he_srp_ie { + uint8 id; + uint8 len; + uint8 id_ext; + uint8 sr_control; +} BWL_POST_PACKED_STRUCT; + +typedef struct he_srp_ie he_srp_ie_t; + +#define HE_SRP_NON_SRG_OBSS_PD_MAX_OFFSET_LEN 1u +#define HE_SRP_SRG_OBSS_PD_MIN_OFFSET_LEN 1u +#define HE_SRP_SRG_OBSS_PD_MAX_OFFSET_LEN 1u +#define HE_SRP_SRG_BSSCOLOR_BITMAP_LEN 8u +#define HE_SRP_SRG_PARTIAL_BSSID_BITMAP_LEN 8u + +#define HE_SRP_IE_MIN_LEN (sizeof(he_srp_ie_t) - TLV_HDR_LEN) +#define HE_SRP_IE_MAX_LEN (sizeof(he_srp_ie_t) - TLV_HDR_LEN +\ + HE_SRP_NON_SRG_OBSS_PD_MAX_OFFSET_LEN + HE_SRP_SRG_OBSS_PD_MIN_OFFSET_LEN\ + HE_SRP_SRG_OBSS_PD_MAX_OFFSET_LEN + HE_SRP_SRG_BSSCOLOR_BITMAP_LEN\ + HE_SRP_SRG_PARTIAL_BSSID_BITMAP_LEN) + +/* Bit field Masks */ +#define HE_SRP_CTRL_SRP_DISALLOW (1 << HE_SRP_CTRL_SRP_DISALLOW_IDX) +#define HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW (1 << HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW_IDX) +#define HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT (1 << HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT_IDX) +#define HE_SRP_CTRL_SRG_INFO_PRESENT (1 << HE_SRP_CTRL_SRG_INFO_PRESENT_IDX) +#define HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED (1 << HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED_IDX) + +/** + * ref: (Table 28-21 Page 473 D3.0) + * + * -Spatial Reuse field encoding for an HE SU PPDU, HE ER SU PPDU, and HE MU PPDU + */ +#define HE_SRP_DISALLOW 0u /* SRP_DISALLOW */ +/* Values 1 to 12 are reserved */ +#define HE_SR_RESTRICTED 13u /* SR Restricted */ +#define HE_SR_DELAY 14u /* SR Delay */ +#define HE_SRP_AND_NON_SRG_OBSS_PD_PROHIBITED 15u /* SRP_AND_NON_SRG_OBSS_PD_PROHIBITED */ +#define HE_SRP_MASK 0x0Fu + +/** + * BSS Color Change Announcement element (sec 9.4.2.243) + */ +/* bit position and field width */ +#define HE_BSSCOLOR_CHANGE_NEWCOLOR_IDX 0 /* New BSSColor info */ +#define HE_BSSCOLOR_CHANGE_NEWCOLOR_FSZ 6u + +/* HE Bsscolor change element */ +BWL_PRE_PACKED_STRUCT struct he_bsscolor_change_ie { + uint8 id; + uint8 len; + uint8 id_ext; + uint8 color_switch_cntdwn; + uint8 new_bsscolor_info; +} BWL_POST_PACKED_STRUCT; + +typedef struct he_bsscolor_change_ie he_bsscolor_change_ie_t; + +/* HE SU bit position and field width */ +#define HE_SU_PPDU_FORMAT_IDX 0u +#define HE_SU_PPDU_FORMAT_FSZ 1u +#define HE_SU_PPDU_BEAM_CHANGE_IDX 1u +#define HE_SU_PPDU_BEAM_CHANGE_FSZ 1u +#define HE_SU_PPDU_DL_UL_IDX 2u +#define HE_SU_PPDU_DL_UL_FSZ 1u +#define HE_SU_PPDU_MCS_IDX 3u +#define HE_SU_PPDU_MCS_FSZ 4u +#define HE_SU_PPDU_DCM_IDX 7u +#define HE_SU_PPDU_DCM_FSZ 1u +#define HE_SU_PPDU_BSS_COLOR_IDX 8u +#define HE_SU_PPDU_BSS_COLOR_FSZ 6u +#define HE_SU_PPDU_SR_IDX 15 +#define HE_SU_PPDU_SR_FSZ 4u +#define HE_SU_PPDU_BW_IDX 19u +#define HE_SU_PPDU_BW_FSZ 2u +#define HE_SU_PPDU_GI_IDX 21u +#define HE_SU_PPDU_GI_FSZ 2u +#define HE_SU_PPDU_LTF_SIZE_IDX 21u +#define HE_SU_PPDU_LTF_SIZE_FSZ 2u +#define HE_SU_PPDU_NUM_LTF_IDX 21u +#define HE_SU_PPDU_NUM_LTF_FSZ 2u +#define HE_SU_PPDU_NSTS_IDX 23u +#define HE_SU_PPDU_NSTS_FSZ 3u +#define HE_SU_PPDU_DOPPLER_NOTSET_NSTS_IDX 23u +#define HE_SU_PPDU_DOPPLER_NOTSET_NSTS_FSZ 3u +#define HE_SU_PPDU_DOPPLER_SET_NSTS_IDX 23u +#define HE_SU_PPDU_DOPPLER_SET_NSTS_FSZ 2u +#define HE_SU_PPDU_MIDAMBLE_IDX 25u +#define HE_SU_PPDU_MIDAMBLE_FSZ 1u +#define HE_SU_PPDU_TXOP_IDX 26u +#define HE_SU_PPDU_TXOP_FSZ 7u +#define HE_SU_PPDU_CODING_IDX 33u +#define HE_SU_PPDU_CODING_FSZ 1u +#define HE_SU_PPDU_LDPC_IDX 34u +#define HE_SU_PPDU_LDPC_FSZ 1u +#define HE_SU_PPDU_STBC_IDX 35u +#define HE_SU_PPDU_STBC_FSZ 1u +#define HE_SU_PPDU_TXBF_IDX 36u +#define HE_SU_PPDU_TXBF_FSZ 1u +#define HE_SU_PPDU_PADDING_IDX 37u +#define HE_SU_PPDU_PADDING_FSZ 2u +#define HE_SU_PPDU_PE_IDX 39u +#define HE_SU_PPDU_PE_FSZ 1u +#define HE_SU_PPDU_DOPPLER_IDX 41u +#define HE_SU_PPDU_DOPPLER_FSZ 1u + +/* For HE SU/RE SIG A : PLCP0 bit fields [32bit] */ +#define HE_SU_RE_SIGA_FORMAT_MASK 0x00000001u +#define HE_SU_RE_SIGA_RE_VAL 0x00000000u +#define HE_SU_RE_SIGA_SU_VAL 0x00000001u +#define HE_SU_RE_SIGA_FORMAT_SHIFT 0u +#define HE_SU_RE_SIGA_BEAM_CHANGE_SHIFT 1u +#define HE_SU_RE_SIGA_UL_DL_SHIFT 2u +#define HE_SU_RE_SIGA_MCS_MASK 0x00000078u +#define HE_SU_RE_SIGA_MCS_SHIFT 3u +#define HE_SU_RE_SIGA_DCM_MASK 0x00000080u +#define HE_SU_RE_SIGA_DCM_SHIFT 7u +#define HE_SU_RE_SIGA_BSS_COLOR_SHIFT 8u /* Bits 13:8 */ +#define HE_SU_RE_SIGA_BSS_COLOR_MASK 0x00003F00u +#define HE_SU_RE_SIGA_RSVD_PLCP0_VAL 0x00004000u +#define HE_SU_RE_SIGA_SRP_VAL_SHIFT 15u /* Bits 18:15 */ +#define HE_SU_RE_SIGA_SRP_VAL_MASK 0x00078000u +#define HE_SU_SIGA_BW_MASK 0x00180000u +#define HE_SU_SIGA_BW_SHIFT 19u +#define HE_RE_SIGA_TONE_MASK 0x00180000u +#define HE_RE_SIGA_TONE_SHIFT 19u +#define HE_SU_RE_SIGA_20MHZ_VAL 0x00000000u +#define HE_SU_RE_SIGA_40MHZ_VAL 0x00080000u +#define HE_SU_RE_SIGA_80MHZ_VAL 0x00100000u +#define HE_SU_RE_SIGA_160MHZ_VAL 0x00180000u +#define HE_SU_RE_SIGA_GI_LTF_MASK 0x00600000u +#define HE_SU_RE_SIGA_1xLTF_GI8us_VAL 0x00000000u +#define HE_SU_RE_SIGA_2xLTF_GI8us_VAL 0x00200000u +#define HE_SU_RE_SIGA_2xLTF_GI16us_VAL 0x00400000u +#define HE_SU_RE_SIGA_4xLTF_GI32us_VAL 0x00600000u +#define HE_SU_RE_SIGA_GI_LTF_SHIFT 21u +#define HE_SU_RE_SIGA_NSTS_MASK 0x03800000u +#define HE_SU_RE_SIGA_NSTS_SHIFT 23u +#define HE_SU_RE_SIGA_TXOP_PLCP0_MASK 0xFC000000u +#define HE_SU_RE_SIGA_TXOP_PLCP0_SHIFT 26u + +/* For HE SU SIG EXT : PLCP0 bit fields [32bit] */ +#define HE_SU_SIG_EXT_GI_LTF_MASK 0x00000003u +#define HE_SU_SIG_EXT_1xLTF_GI8us_VAL 0x00000000u +#define HE_SU_SIG_EXT_2xLTF_GI8us_VAL 0x00000001u +#define HE_SU_SIG_EXT_2xLTF_GI16us_VAL 0x00000002u +#define HE_SU_SIG_EXT_4xLTF_GI32us_VAL 0x00000003u +#define HE_SU_SIG_EXT_STBC_MASK 0x00000040u +#define HE_SU_SIG_EXT_STBC_SHIFT 6u +#define HE_SU_SIG_EXT_LDPC_MASK 0x00000080u +#define HE_SU_SIG_EXT_LDPC_SHIFT 7u +#define HE_SU_SIG_EXT_MCS_MASK 0x0000f000u +#define HE_SU_SIG_EXT_MCS_SHIFT 12u +#define HE_SU_SIG_EXT_DCM_MASK 0x00010000u +#define HE_SU_SIG_EXT_DCM_SHIFT 16u +#define HE_SU_SIG_EXT_NSTS_MASK 0x000e0000u +#define HE_SU_SIG_EXT_NSTS_SHIFT 17u +#define HE_SU_SIG_EXT_CODING_MASK 0x00800000u +#define HE_SU_SIG_EXT_CODING_SHIFT 23u + +/* HE mu ppdu - bit position and field width */ +#define HE_MU_PPDU_DL_UL_IDX 0u +#define HE_MU_PPDU_DL_UL_FSZ 1u +#define HE_MU_PPDU_SIGB_MCS_IDX 1u +#define HE_MU_PPDU_SIGB_MCS_FSZ 3u +#define HE_MU_PPDU_SIGB_DCM_IDX 4u +#define HE_MU_PPDU_SIGB_DCM_FSZ 1u +#define HE_MU_PPDU_BSS_COLOR_IDX 5u +#define HE_MU_PPDU_BSS_COLOR_FSZ 6u +#define HE_MU_PPDU_SR_IDX 11u +#define HE_MU_PPDU_SR_FSZ 4u + +#define HE_MU_PPDU_SIGB_SYM_MU_MIMO_USER_IDX 18u +#define HE_MU_PPDU_SIGB_SYM_MU_MIMO_USER_FSZ 3u + +#define HE_MU_PPDU_PRE_PUNCR_SIGA_IDX 15u +#define HE_MU_PPDU_PRE_PUNCR_SIGA_FSZ 2u + +#define HE_MU_PPDU_BW_SIGA_IDX 15u +#define HE_MU_PPDU_BW_SIGA_FSZ 2u +#define HE_MU_PPDU_BW_SIGA_KNOWN_IDX 17u +#define HE_MU_PPDU_BW_SIGA_KNOWN_FSZ 1u + +#define HE_MU_PPDU_SIGB_SYMB_IDX 18u +#define HE_MU_PPDU_SIGB_SYMB_FSZ 4u + +#define HE_MU_PPDU_SIGB_COMP_IDX 22u +#define HE_MU_PPDU_SIGB_COMP_FSZ 1u +#define HE_MU_PPDU_GI_IDX 23u +#define HE_MU_PPDU_GI_FSZ 2u +#define HE_MU_PPDU_LTF_SIZE_IDX 23u +#define HE_MU_PPDU_LTF_SIZE_FSZ 2u +#define HE_MU_PPDU_NUM_LTF_IDX 23u +#define HE_MU_PPDU_NUM_LTF_FSZ 2u +#define HE_MU_PPDU_DOPPLER_IDX 25u +#define HE_MU_PPDU_DOPPLER_FSZ 1u +#define HE_MU_PPDU_TXOP_IDX 26u +#define HE_MU_PPDU_TXOP_FSZ 7u +#define HE_MU_PPDU_MIDAMBLE_IDX 34u +#define HE_MU_PPDU_MIDAMBLE_FSZ 3u +#define HE_MU_PPDU_LDPC_IDX 37u +#define HE_MU_PPDU_LDPC_FSZ 1u +#define HE_MU_PPDU_STBC_IDX 38u +#define HE_MU_PPDU_STBC_FSZ 1u +#define HE_MU_PPDU_PADDING_IDX 39u +#define HE_MU_PPDU_PADDING_FSZ 2u +#define HE_MU_PPDU_PE_IDX 41u +#define HE_MU_PPDU_PE_FSZ 1u + +/* he trigger ppdu - bit position and field width */ +#define HE_TRIG_PPDU_BSS_COLOR_IDX 1u +#define HE_TRIG_PPDU_BSS_COLOR_FSZ 6u + +/* full spatial reuse field */ +#define HE_TRIG_PPDU_SR_IDX 7u +#define HE_TRIG_PPDU_SR_FSZ 16u + +#define HE_TRIG_PPDU_SR1_IDX 7u +#define HE_TRIG_PPDU_SR1_FSZ 4u +#define HE_TRIG_PPDU_SR2_IDX 11u +#define HE_TRIG_PPDU_SR2_FSZ 4u +#define HE_TRIG_PPDU_SR3_IDX 15u +#define HE_TRIG_PPDU_SR3_FSZ 4u +#define HE_TRIG_PPDU_SR4_IDX 19u +#define HE_TRIG_PPDU_SR4_FSZ 4u +#define HE_TRIG_PPDU_TXOP_IDX 26u +#define HE_TRIG_PPDU_TXOP_FSZ 7u + +/* For HE MU SIG A : PLCP0 bit fields [32bit] */ +#define HE_MU_SIGA_UL_DL_SHIFT 0 +#define HE_MU_SIGA_UL_TB_PPDU 0 +#define HE_MU_SIGA_SIGB_MCS_MASK 0x000000E +#define HE_MU_SIGA_SIGB_MCS_SHIFT 1 +#define HE_MU_SIGA_SIGB_DCM_SHIFT 4 +#define HE_MU_SIGA_SIGB_DCM_DISABLED 0 +#define HE_MU_SIGA_BW_SHIFT 15 +#define HE_MU_SIGA_BW_80_UNPUNCTURED 2 +#define HE_MU_SIGA_BW_SEC_20_PUNCTURED 4 +#define HE_MU_SIGA_BW_SEC_40_PUNCTURED 5 +#define HE_MU_SIGA_SIGB_SYMS_SHIFT 18 +#define HE_MU_SIGA_GI_LTF_MASK 0x01800000 +#define HE_MU_SIGA_GI_LTF_SHIFT 23 + +/* For HE MU SIG A : PLCP1 bit fields [32bit] */ +#define HE_MU_SIGA_STBC_MASK 0x00000040 +#define HE_MU_SIGA_STBC_SHIFT 6 + +/* For HE SU/RE SIG A : PLCP1 bit fields [16bit] */ +#define HE_SU_RE_SIGA_TXOP_PLCP1_MASK 0x0001 +#define HE_SU_RE_SIGA_TXOP_PLCP1_SHIFT 0 +#define HE_SU_RE_SIGA_CODING_MASK 0x0002 +#define HE_SU_RE_SIGA_CODING_SHIFT 1 +#define HE_SU_RE_SIGA_LDPC_EXTRA_MASK 0x0004 +#define HE_SU_RE_SIGA_LDPC_EXTRA_SHIFT 2 +#define HE_SU_RE_SIGA_STBC_MASK 0x0008 +#define HE_SU_RE_SIGA_STBC_SHIFT 3 +#define HE_SU_RE_SIGA_BEAMFORM_MASK 0x0010 +#define HE_SU_RE_SIGA_BEAMFORM_SHIFT 4 +#define HE_SU_RE_SIGA_RSVD_PLCP1_VAL 0x0100 + +/* For HE MU SIG A : PLCP1 bit fields [16bit] */ +#define HE_MU_SIGA_RSVD_SHIFT 1 +#define HE_MU_SIGA_LTF_SYMS_SHIFT 2 + +/* For HE SU SIG A : RX PLCP4 bit fields [8bit] */ +#define HE_SU_SIGA2_STBC_RX_MASK 0x08u + +/* For HE ER SIG A : RX PLCP4 bit fields [8bit] */ +#define HE_ER_SIGA2_STBC_RX_MASK 0x08u + +/* For HE MU SIG A : RX PLCP4 bit fields [8bit] */ +#define HE_MU_SIGA2_STBC_RX_MASK 0x40u + +/* This marks the end of a packed structure section. */ +#include + +/* HE Action Frame */ +/* FIXME: use temporary Offsets until the spec assigns them */ +#define HE_AF_CAT_OFF 0 +#define HE_AF_ACT_OFF 1 + +/* TWT Setup */ +#define HE_AF_TWT_SETUP_TOKEN_OFF 2 +#define HE_AF_TWT_SETUP_TWT_IE_OFF 3 + +/* TWT Teardown */ +#define HE_AF_TWT_TEARDOWN_FLOW_OFF 2 + +/* TWT Information */ +#define HE_AF_TWT_INFO_OFF 2 + +/* HE Action ID */ +/* FIXME: use temporary IDs until ANA assigns them */ +#define HE_ACTION_TWT_SETUP 1 +#define HE_ACTION_TWT_TEARDOWN 2 +#define HE_ACTION_TWT_INFO 3 + +/* HE Basic trigger frame common info fields */ +#define HE_TRIG_CMNINFO_SZ 8 +typedef uint8 he_trig_cmninfo_set_t[HE_TRIG_CMNINFO_SZ]; + +/* bit position and field width */ +#define HE_TRIG_CMNINFO_FRMTYPE_INDX 0 /* Trigger frame type */ +#define HE_TRIG_CMNINFO_FRMTYPE_FSZ 4 +#define HE_TRIG_CMNINFO_LSIGLEN_INDX 4 /* L-sig length */ +#define HE_TRIG_CMNINFO_LSIGLEN_FSZ 12 +#define HE_TRIG_CMNINFO_CASCADEIND_INDX 16 /* Cascade indication */ +#define HE_TRIG_CMNINFO_CASCADEIND_FSZ 1 +#define HE_TRIG_CMNINFO_CSREQ_INDX 17 /* Carrier sense indication */ +#define HE_TRIG_CMNINFO_CSREQ_FSZ 1 +#define HE_TRIG_CMNINFO_BWINFO_INDX 18 /* Bw info */ +#define HE_TRIG_CMNINFO_BWINFO_FSZ 2 +#define HE_TRIG_CMNINFO_GI_LTF_INDX 20 /* Cp-LTF size */ +#define HE_TRIG_CMNINFO_GI_LTF_FSZ 2 +#define HE_TRIG_CMNINFO_MUMIMO_LTF_INDX 22 /* HE-LTF mask enable */ +#define HE_TRIG_CMNINFO_MUMIMO_LTF_FSZ 1 +#define HE_TRIG_CMNINFO_HELTF_SYM_INDX 23 /* He-LTF sumbols */ +#define HE_TRIG_CMNINFO_HELTF_SYM_FSZ 3 +#define HE_TRIG_CMNINFO_STBC_INDX 26 /* STBC support */ +#define HE_TRIG_CMNINFO_STBC_FSZ 1 +#define HE_TRIG_CMNINFO_LDPC_EXTSYM_INDX 27 /* LDPC extra symbol */ +#define HE_TRIG_CMNINFO_LDPC_EXTSYM_FSZ 1 +#define HE_TRIG_CMNINFO_AP_TXPWR_INDX 28 /* AP TX power */ +#define HE_TRIG_CMNINFO_AP_TXPWR_FSZ 6 +#define HE_TRIG_CMNINFO_AFACT_INDX 34 /* a-factor */ +#define HE_TRIG_CMNINFO_AFACT_FSZ 2 +#define HE_TRIG_CMNINFO_PEDISAMBIG_INDX 36 /* PE disambiguity */ +#define HE_TRIG_CMNINFO_PEDISAMBIG_FSZ 1 +#define HE_TRIG_CMNINFO_SPTIAL_REUSE_INDX 37 /* spatial re-use */ +#define HE_TRIG_CMNINFO_SPTIAL_REUSE_FSZ 16 +#define HE_TRIG_CMNINFO_DOPPLER_INDX 53 /* doppler supoort */ +#define HE_TRIG_CMNINFO_DOPPLER_FSZ 1 +#define HE_TRIG_CMNINFO_HESIGA_RSVD_INDX 54 /* rsvd bits from HE-SIGA */ +#define HE_TRIG_CMNINFO_HESIGA_RSVD_FSZ 9 +#define HE_TRIG_CMNINFO_RSVD_INDX 63 /* reseved bit from HE-SIGA */ +#define HE_TRIG_CMNINFO_RSVD_FSZ 1 + +/* HE Basic trigger frame user info fields */ +#define HE_TRIG_USRINFO_SZ 5 +typedef uint8 he_trig_usrinfo_set_t[HE_TRIG_USRINFO_SZ]; + +/* bit position and field width */ +#define HE_TRIG_USRINFO_AID_INDX 0 /* AID */ +#define HE_TRIG_USRINFO_AID_FSZ 12 +#define HE_TRIG_USRINFO_RU_ALLOC_INDX 12 /* RU allocation index */ +#define HE_TRIG_USRINFO_RU_ALLOC_FSZ 8 +#define HE_TRIG_USRINFO_CODING_INDX 20 /* coding type (BCC/LDPC) */ +#define HE_TRIG_USRINFO_CODING_FSZ 1 +#define HE_TRIG_USRINFO_MCS_INDX 21 /* MCS index value */ +#define HE_TRIG_USRINFO_MCS_FSZ 4 +#define HE_TRIG_USRINFO_DCM_INDX 25 /* Dual carrier modulation */ +#define HE_TRIG_USRINFO_DCM_FSZ 1 +#define HE_TRIG_USRINFO_SSALLOC_STRMOFFSET_INDX 26 /* stream offset */ +#define HE_TRIG_USRINFO_SSALLOC_STRMOFFSET_FSZ 3 +#define HE_TRIG_USRINFO_SSALLOC_NSS_INDX 29 /* number of spatial streams */ +#define HE_TRIG_USRINFO_SSALLOC_NSS_FSZ 3 +#define HE_TRIG_USRINFO_TARGET_RSSI_INDX 32 /* Target RSSI */ +#define HE_TRIG_USRINFO_TARGET_RSSI_FSZ 7 +#define HE_TRIG_USRINFO_RSVD_INDX 39 /* Reserved bit */ +#define HE_TRIG_USRINFO_RSVD_FSZ 1 + +/* Different types of trigger frame */ +#define HE_TRIG_TYPE_BASIC_FRM 0 /* basic trigger frame */ +#define HE_TRIG_TYPE_BEAM_RPT_POLL_FRM 1 /* beamforming report poll frame */ +#define HE_TRIG_TYPE_MU_BAR_FRM 2 /* MU-BAR frame */ +#define HE_TRIG_TYPE_MU_RTS__FRM 3 /* MU-RTS frame */ +#define HE_TRIG_TYPE_BSR_FRM 4 /* Buffer status report poll */ + +/* HE Timing related parameters (Table 28-9) */ +#define HE_T_LEG_STF 8 +#define HE_T_LEG_LTF 8 +#define HE_T_LEG_LSIG 4 +#define HE_T_RL_SIG 4 +#define HE_T_SIGA 8 +#define HE_T_STF 4 /* STF for SU / MU HE PPDUs */ +#define HE_T_TB_PPDU_STF 8 /* STF for HE trigger based PPDUs */ +#define HE_T_LEG_PREAMBLE (HE_T_LEG_STF + HE_T_LEG_LTF + HE_T_LEG_LSIG) +#define HE_T_LEG_SYMB 4 +#define HE_RU_26_TONE 26 +#define HE_RU_52_TONE 52 +#define HE_RU_106_TONE 106 +#define HE_RU_242_TONE 242 +#define HE_RU_484_TONE 484 +#define HE_RU_996_TONE 996 +#define HE_RU_2x996_TONE 1992 +#define HE_MAX_26_TONE_RU_INDX 36 +#define HE_MAX_52_TONE_RU_INDX 52 +#define HE_MAX_106_TONE_RU_INDX 60 +#define HE_MAX_242_TONE_RU_INDX 64 +#define HE_MAX_484_TONE_RU_INDX 66 +#define HE_MAX_996_TONE_RU_INDX 67 +#define HE_MAX_2x996_TONE_RU_INDX 68 + +/** + * ref: (Table 28-9 Page 285) + * + * - for calculation purpose - in multiples of 10 (*10) + */ +#define HE_T_LTF_1X 32 +#define HE_T_LTF_2X 64 +#define HE_T_LTF_4X 128 +#define HE_T_SYM1 136 /* OFDM symbol duration with base GI */ +#define HE_T_SYM2 144 /* OFDM symbol duration with double GI */ +#define HE_T_SYM4 160 /* OFDM symbol duration with quad GI */ + +#define HE_N_LEG_SYM 3 /* bytes per legacy symbol */ +#define HE_N_TAIL 6 /* tail field bits for BCC */ +#define HE_N_SERVICE 16 /* bits in service field */ +#define HE_T_MAX_PE 16 /* max Packet extension duration */ + +#endif /* _802_11ax_h_ */ diff --git a/bcmdhd.101.10.361.x/include/802.11e.h b/bcmdhd.101.10.361.x/include/802.11e.h new file mode 100755 index 0000000..0fbf58c --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.11e.h @@ -0,0 +1,133 @@ +/* + * 802.11e protocol header file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_11e_H_ +#define _802_11e_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* WME Traffic Specification (TSPEC) element */ +#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ +#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ + +#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */ +#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */ +#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */ +#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */ + +BWL_PRE_PACKED_STRUCT struct tsinfo { + uint8 octets[3]; +} BWL_POST_PACKED_STRUCT; + +typedef struct tsinfo tsinfo_t; + +/* 802.11e TSPEC IE */ +typedef BWL_PRE_PACKED_STRUCT struct tspec { + uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */ + uint8 type; /* WME_TYPE */ + uint8 subtype; /* WME_SUBTYPE_TSPEC */ + uint8 version; /* WME_VERSION */ + tsinfo_t tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /* Minimum Service Interval (us) */ + uint32 max_srv_interval; /* Maximum Service Interval (us) */ + uint32 inactivity_interval; /* Inactivity Interval (us) */ + uint32 suspension_interval; /* Suspension Interval (us) */ + uint32 srv_start_time; /* Service Start Time (us) */ + uint32 min_data_rate; /* Minimum Data Rate (bps) */ + uint32 mean_data_rate; /* Mean Data Rate (bps) */ + uint32 peak_data_rate; /* Peak Data Rate (bps) */ + uint32 max_burst_size; /* Maximum Burst Size (bytes) */ + uint32 delay_bound; /* Delay Bound (us) */ + uint32 min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ +} BWL_POST_PACKED_STRUCT tspec_t; + +#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */ + +/* ts_info */ +/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */ +#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */ +#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */ +#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */ +#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */ +#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */ +#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */ +#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */ +#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */ +#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */ +#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */ +#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */ +#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */ +/* TS info. user priority mask */ +#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT) + +/* Macro to get/set bit(s) field in TSINFO */ +#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT) +#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \ + TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT) +#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT) +#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \ + TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT) + +#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \ + ((id) << TS_INFO_TID_SHIFT)) +#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \ + ((prio) << TS_INFO_USER_PRIO_SHIFT)) + +/* 802.11e QBSS Load IE */ +#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */ +#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */ + +#define CAC_ADDTS_RESP_TIMEOUT 1000 /* default ADDTS response timeout in ms */ + /* DEFVAL dot11ADDTSResponseTimeout = 1s */ + +/* 802.11e ADDTS status code */ +#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */ +#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ +#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ +#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ +#ifdef BCMCCX +#define CCX_STATUS_ASSOC_DENIED_UNKNOWN 0xc8 /* unspecified QoS related failure */ +#define CCX_STATUS_ASSOC_DENIED_AP_POLICY 0xc9 /* TSPEC refused due to AP policy */ +#define CCX_STATUS_ASSOC_DENIED_NO_BW 0xca /* Assoc denied due to AP insufficient BW */ +#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM 0xcb /* one or more TSPEC with invalid parameter */ +#endif /* BCMCCX */ + +/* 802.11e DELTS status code */ +#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ +#define DOT11E_STATUS_END_TS 37 /* END TS */ +#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */ +#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11e_CAC_H_ */ diff --git a/bcmdhd.101.10.361.x/include/802.11r.h b/bcmdhd.101.10.361.x/include/802.11r.h new file mode 100755 index 0000000..7bb8728 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.11r.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * Fundamental constants relating to 802.11r + */ + +#ifndef _802_11r_H_ +#define _802_11r_H_ + +#define FBT_R0KH_ID_LEN 49 /* includes null termination */ +#define FBT_REASSOC_TIME_DEF 1000 + +#define DOT11_FBT_SUBELEM_ID_R1KH_ID 1 +#define DOT11_FBT_SUBELEM_ID_GTK 2 +#define DOT11_FBT_SUBELEM_ID_R0KH_ID 3 +#define DOT11_FBT_SUBELEM_ID_IGTK 4 +#define DOT11_FBT_SUBELEM_ID_OCI 5u + +/* +* FBT Subelement id lenths +*/ + +#define DOT11_FBT_SUBELEM_R1KH_LEN 6 +/* GTK_FIXED_LEN = Key_Info (2Bytes) + Key_Length (1Byte) + RSC (8Bytes) */ +#define DOT11_FBT_SUBELEM_GTK_FIXED_LEN 11 +/* GTK_MIN_LEN = GTK_FIXED_LEN + key (min 16 Bytes) + key_wrap (8Bytes) */ +#define DOT11_FBT_SUBELEM_GTK_MIN_LEN (DOT11_FBT_SUBELEM_GTK_FIXED_LEN + 24) +/* GTK_MAX_LEN = GTK_FIXED_LEN + key (max 32 Bytes) + key_wrap (8Bytes) */ +#define DOT11_FBT_SUBELEM_GTK_MAX_LEN (DOT11_FBT_SUBELEM_GTK_FIXED_LEN + 40) +#define DOT11_FBT_SUBELEM_R0KH_MIN_LEN 1 +#define DOT11_FBT_SUBELEM_R0KH_MAX_LEN 48 +/* IGTK_LEN = KeyID (2Bytes) + IPN (6Bytes) + Key_Length (1Byte) + +* Wrapped_Key (key (16Bytes) + key_wrap (8Bytes)) +*/ +#define DOT11_FBT_SUBELEM_IGTK_LEN 33 +#define DOT11_FBT_SUBELEM_OCI_LEN 3u + +#endif /* #ifndef _802_11r_H_ */ diff --git a/bcmdhd.101.10.361.x/include/802.11s.h b/bcmdhd.101.10.361.x/include/802.11s.h new file mode 100755 index 0000000..7c0869f --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.11s.h @@ -0,0 +1,337 @@ +/* + * Fundamental types and constants relating to 802.11s Mesh + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_11s_h_ +#define _802_11s_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define DOT11_MESH_FLAGS_AE_MASK 0x3u +#define DOT11_MESH_FLAGS_AE_SHIFT 0u + +/* Mesh Control Flags: Address Exetension mode values */ +#define DOT11_MESH_AE_NONE 0u +#define DOT11_MESH_AE_A4 1u +#define DOT11_MESH_AE_A5_A6 2u + +#define DOT11_MESH_CONNECTED_AS_SET 7 +#define DOT11_MESH_NUMBER_PEERING_SET 1 +#define DOT11_MESH_MESH_GWSET 0 + +#define DOT11_MESH_ACTION_LINK_MET_REP 0 +#define DOT11_MESH_ACTION_PATH_SEL 1 +#define DOT11_MESH_ACTION_GATE_ANN 2 +#define DOT11_MESH_ACTION_CONG_CONT_NOTIF 3 +#define DOT11_MESH_ACTION_MCCA_SETUP_REQ 4 +#define DOT11_MESH_ACTION_MCCA_SETUP_REP 5 +#define DOT11_MESH_ACTION_MCCA_ADVT_REQ 6 +#define DOT11_MESH_ACTION_MCCA_ADVT 7 +#define DOT11_MESH_ACTION_MCCA_TEARDOWN 8 +#define DOT11_MESH_ACTION_TBTT_ADJ_REQ 9 +#define DOT11_MESH_ACTION_TBTT_ADJ_RESP 10 + +/* self-protected action field values: 7-57v24 */ +#define DOT11_SELFPROT_ACTION_MESH_PEER_OPEN 1 +#define DOT11_SELFPROT_ACTION_MESH_PEER_CONFM 2 +#define DOT11_SELFPROT_ACTION_MESH_PEER_CLOSE 3 +#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_INF 4 +#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_ACK 5 + +#define DOT11_MESH_AUTH_PROTO_NONE 0 +#define DOT11_MESH_AUTH_PROTO_SAE 1 +#define DOT11_MESH_AUTH_PROTO_8021X 2 +#define DOT11_MESH_AUTH_PROTO_VS 255 + +#define DOT11_MESH_PATHSEL_LEN 2 +#define DOT11_MESH_PERR_LEN1 2 /* Least PERR length fixed */ +#define DOT11_MESH_PERR_LEN2 13 /* Least PERR length variable */ +#define DOT11_MESH_PREP_LEN 31 /* Least PREP length */ +#define DOT11_MESH_PREQ_LEN 37 /* Least PREQ length */ + +#define DOT11_MESH_PATHSEL_PROTID_HWMP 1 +#define DOT11_MESH_PATHSEL_METRICID_ALM 1 /* Air link metric */ +#define DOT11_MESH_CONGESTCTRL_NONE 0 +#define DOT11_MESH_CONGESTCTRL_SP 1 +#define DOT11_MESH_SYNCMETHOD_NOFFSET 1 + +BWL_PRE_PACKED_STRUCT struct dot11_meshctrl_hdr { + uint8 flags; /* flag bits such as ae etc */ + uint8 ttl; /* time to live */ + uint32 seq; /* sequence control */ + struct ether_addr a5; /* optional address 5 */ + struct ether_addr a6; /* optional address 6 */ +} BWL_POST_PACKED_STRUCT; + +#define DOT11_MESH_CONTROL_MIN_LEN 6u +#define DOT11_MESH_CONTROL_A4_LEN 12u +#define DOT11_MESH_CONTROL_A5A6_LEN 18u + +/* Mesh Path Selection Action Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_mesh_pathsel { + uint8 category; + uint8 meshaction; + uint8 data[]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mesh_pathsel dot11_mesh_pathsel_t; + +/* Mesh PREQ IE */ +BWL_PRE_PACKED_STRUCT struct mesh_preq_ie { + uint8 id; + uint8 len; + uint8 flags; + uint8 hop_count; + uint8 ttl; + uint32 pathdis_id; + struct ether_addr originator_addr; + uint32 originator_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr target_ext_add; + uint32 lifetime; + uint32 metric; + uint8 target_count; + uint8 data[]; + } BWL_POST_PACKED_STRUCT oea; + + BWL_PRE_PACKED_STRUCT struct { + uint32 lifetime; + uint32 metric; + uint8 target_count; + uint8 data[]; + } BWL_POST_PACKED_STRUCT noea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_preq_ie mesh_preq_ie_t; + +/* Target info (part of Mesh PREQ IE) */ +BWL_PRE_PACKED_STRUCT struct mesh_targetinfo { + uint8 target_flag; + struct ether_addr target_addr; + uint32 target_seq; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_targetinfo mesh_targetinfo_t; + +/* Mesh PREP IE */ +BWL_PRE_PACKED_STRUCT struct mesh_prep_ie { + uint8 id; + uint8 len; + uint8 flags; + uint8 hop_count; + uint8 ttl; + struct ether_addr target_addr; + uint32 target_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr target_ext_add; + uint32 lifetime; + uint32 metric; + uint8 target_count; + struct ether_addr originator_addr; + uint32 originator_seq; + } BWL_POST_PACKED_STRUCT oea; + + BWL_PRE_PACKED_STRUCT struct { + uint32 lifetime; + uint32 metric; + uint8 target_count; + struct ether_addr originator_addr; + uint32 originator_seq; + } BWL_POST_PACKED_STRUCT noea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_prep_ie mesh_prep_ie_t; + +/* Mesh PERR IE */ +struct mesh_perr_ie { + uint8 id; + uint8 len; + uint8 ttl; + uint8 num_dest; + uint8 data[]; +}; +typedef struct mesh_perr_ie mesh_perr_ie_t; + +/* Destination info is part of PERR IE */ +BWL_PRE_PACKED_STRUCT struct mesh_perr_destinfo { + uint8 flags; + struct ether_addr destination_addr; + uint32 dest_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr dest_ext_addr; + } BWL_POST_PACKED_STRUCT dea; + + BWL_PRE_PACKED_STRUCT struct { + /* 1 byte reason code to be populated manually in software */ + uint16 reason_code; + } BWL_POST_PACKED_STRUCT nodea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_perr_destinfo mesh_perr_destinfo_t; + +/* Mesh peering action frame hdr */ +BWL_PRE_PACKED_STRUCT struct mesh_peering_frmhdr { + uint8 category; + uint8 action; + union { + struct { + uint16 capability; + } open; + struct { + uint16 capability; + uint16 AID; + } confirm; + uint8 data[1]; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peering_frmhdr mesh_peering_frmhdr_t; + +/* Mesh peering mgmt IE */ +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_common { + uint16 mesh_peer_prot_id; + uint16 local_link_id; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_common mesh_peer_mgmt_ie_common_t; +#define MESH_PEER_MGMT_IE_OPEN_LEN (4) + +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_cfm { + mesh_peer_mgmt_ie_common_t common; + uint16 peer_link_id; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_cfm mesh_peer_mgmt_ie_cfm_t; +#define MESH_PEER_MGMT_IE_CONF_LEN (6) + +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_close { + mesh_peer_mgmt_ie_common_t common; + /* uint16 peer_link_id; + * simplicity: not supported, TODO for future + */ + uint16 reason_code; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_close mesh_peer_mgmt_ie_close_t; +#define MESH_PEER_MGMT_IE_CLOSE_LEN (6) + +struct mesh_config_ie { + uint8 activ_path_sel_prot_id; + uint8 activ_path_sel_metric_id; + uint8 cong_ctl_mode_id; + uint8 sync_method_id; + uint8 auth_prot_id; + uint8 mesh_formation_info; + uint8 mesh_cap; +}; +typedef struct mesh_config_ie mesh_config_ie_t; +#define MESH_CONFIG_IE_LEN (7) + +/* Mesh peering states */ +#define MESH_PEERING_IDLE 0 +#define MESH_PEERING_OPEN_SNT 1 +#define MESH_PEERING_CNF_RCVD 2 +#define MESH_PEERING_OPEN_RCVD 3 +#define MESH_PEERING_ESTAB 4 +#define MESH_PEERING_HOLDING 5 +#define MESH_PEERING_LAST_STATE 6 +/* for debugging: mapping strings */ +#define MESH_PEERING_STATE_STRINGS \ + {"IDLE ", "OPNSNT", "CNFRCV", "OPNRCV", "ESTAB ", "HOLDNG"} + +#ifdef WLMESH +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info { + /* mesh_peer_instance as given in the spec. Note that, peer address + * is stored in scb + */ + uint16 mesh_peer_prot_id; + uint16 local_link_id; + uint16 peer_link_id; + /* AID generated by *peer* to self & received in peer_confirm */ + uint16 peer_aid; + + /* TODO: no mention in spec? possibly used in PS case. Note that aid generated + * from self to peer is stored in scb. + */ + uint8 state; + /* TODO: struct mesh_peer_info *next; this field is required + * if multiple peerings per same src is allowed, which is + * true as per spec. + */ +} BWL_POST_PACKED_STRUCT mesh_peer_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_ext { + mesh_peer_info_t peer_info; + uint16 local_aid; /* AID generated by *local* to peer */ + struct ether_addr ea; /* peer ea */ + uint32 entry_state; /* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid + * ONLY for internal peering requests + */ + int rssi; +} BWL_POST_PACKED_STRUCT mesh_peer_info_ext_t; + +/* #ifdef WLMESH */ +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_dump { + uint32 buflen; + uint32 version; + uint32 count; /* number of results */ + mesh_peer_info_ext_t mpi_ext[1]; +} BWL_POST_PACKED_STRUCT mesh_peer_info_dump_t; +#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t)) + +#endif /* WLMESH */ + +/* once an entry is added into mesh_peer_list, if peering is lost, it will +* get retried for peering, MAX_MESH_PEER_ENTRY_RETRIES times. after wards, it +* wont get retried and will be moved to MESH_PEER_ENTRY_STATE_TIMEDOUT state, +* until user adds it again explicitely, when its entry_state is changed +* to MESH_PEER_ENTRY_STATE_ACTIVE and tried again. +*/ +#define MAX_MESH_SELF_PEER_ENTRY_RETRIES 3 +#define MESH_SELF_PEER_ENTRY_STATE_ACTIVE 1 +#define MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT 2 + +/** Mesh Channel Switch Parameter IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_mcsp_body { + uint8 ttl; /* remaining number of hops allowed for this element. */ + uint8 flags; /* attributes of this channel switch attempt */ + uint8 reason; /* reason for the mesh channel switch */ + uint16 precedence; /* random value in the range 0 to 65535 */ +} BWL_POST_PACKED_STRUCT; + +#define DOT11_MCSP_TTL_DEFAULT 1 +#define DOT11_MCSP_FLAG_TRANS_RESTRICT 0x1 /* no transmit except frames with mcsp */ +#define DOT11_MCSP_FLAG_INIT 0x2 /* initiates the channel switch attempt */ +#define DOT11_MCSP_FLAG_REASON 0x4 /* validity of reason code field */ +#define DOT11_MCSP_REASON_REGULATORY 0 /* meet regulatory requirements */ +#define DOT11_MCSP_REASON_UNSPECIFIED 1 /* unspecified reason */ + +BWL_PRE_PACKED_STRUCT struct dot11_mesh_csp { + uint8 id; /* id DOT11_MNG_MESH_CSP_ID */ + uint8 len; /* length of IE */ + struct dot11_mcsp_body body; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mesh_csp dot11_mesh_csp_ie_t; +#define DOT11_MESH_CSP_IE_LEN 5 /* length of mesh channel switch parameter IE body */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_11s_H_ */ diff --git a/bcmdhd.101.10.361.x/include/802.1d.h b/bcmdhd.101.10.361.x/include/802.1d.h new file mode 100755 index 0000000..a05bb28 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.1d.h @@ -0,0 +1,47 @@ +/* + * Fundamental types and constants relating to 802.1D + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_1_D_ +#define _802_1_D_ + +/* 802.1D priority defines */ +#define PRIO_8021D_NONE 2 /* None = - */ +#define PRIO_8021D_BK 1 /* BK - Background */ +#define PRIO_8021D_BE 0 /* BE - Best-effort */ +#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ +#define PRIO_8021D_CL 4 /* CL - Controlled Load */ +#define PRIO_8021D_VI 5 /* Vi - Video */ +#define PRIO_8021D_VO 6 /* Vo - Voice */ +#define PRIO_8021D_NC 7 /* NC - Network Control */ +#define MAXPRIO 7 /* 0-7 */ +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 /* All prioirty */ + +/* Converts prio to precedence since the numerical value of + * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. + */ +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif /* _802_1_D__ */ diff --git a/bcmdhd.101.10.361.x/include/802.3.h b/bcmdhd.101.10.361.x/include/802.3.h new file mode 100755 index 0000000..af9de38 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/802.3.h @@ -0,0 +1,49 @@ +/* + * Fundamental constants relating to 802.3 + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _802_3_h_ +#define _802_3_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */ +#define DOT3_OUI_LEN 3 /* 802.3 oui length */ + +BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_3_h_ */ diff --git a/bcmdhd.101.10.361.x/include/aidmp.h b/bcmdhd.101.10.361.x/include/aidmp.h new file mode 100755 index 0000000..57c60ae --- /dev/null +++ b/bcmdhd.101.10.361.x/include/aidmp.h @@ -0,0 +1,438 @@ +/* + * Broadcom AMBA Interconnect definitions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _AIDMP_H +#define _AIDMP_H + +/* Manufacturer Ids */ +#define MFGID_ARM 0x43b +#define MFGID_BRCM 0x4bf +#define MFGID_MIPS 0x4a7 + +/* Component Classes */ +#define CC_SIM 0 +#define CC_EROM 1 +#define CC_CORESIGHT 9 +#define CC_VERIF 0xb +#define CC_OPTIMO 0xd +#define CC_GEN 0xe +#define CC_PRIMECELL 0xf + +/* Enumeration ROM registers */ +#define ER_EROMENTRY 0x000 +#define ER_REMAPCONTROL 0xe00 +#define ER_REMAPSELECT 0xe04 +#define ER_MASTERSELECT 0xe10 +#define ER_ITCR 0xf00 +#define ER_ITIP 0xf04 + +/* Erom entries */ +#define ER_TAG 0xe +#define ER_TAG1 0x6 +#define ER_VALID 1 +#define ER_CI 0 +#define ER_MP 2 +#define ER_ADD 4 +#define ER_END 0xe +#define ER_BAD 0xffffffff +#define ER_SZ_MAX 4096 /* 4KB */ + +/* EROM CompIdentA */ +#define CIA_MFG_MASK 0xfff00000u +#define CIA_MFG_SHIFT 20u +#define CIA_CID_MASK 0x000fff00u +#define CIA_CID_SHIFT 8u +#define CIA_CCL_MASK 0x000000f0u +#define CIA_CCL_SHIFT 4u + +/* EROM CompIdentB */ +#define CIB_REV_MASK 0xff000000u +#define CIB_REV_SHIFT 24u +#define CIB_NSW_MASK 0x00f80000u +#define CIB_NSW_SHIFT 19u +#define CIB_NMW_MASK 0x0007c000u +#define CIB_NMW_SHIFT 14u +#define CIB_NSP_MASK 0x00003e00u +#define CIB_NSP_SHIFT 9u +#define CIB_NMP_MASK 0x000001f0u +#define CIB_NMP_SHIFT 4u + +/* EROM MasterPortDesc */ +#define MPD_MUI_MASK 0x0000ff00u +#define MPD_MUI_SHIFT 8u +#define MPD_MP_MASK 0x000000f0u +#define MPD_MP_SHIFT 4u + +/* EROM AddrDesc */ +#define AD_ADDR_MASK 0xfffff000u +#define AD_SP_MASK 0x00000f00u +#define AD_SP_SHIFT 8u +#define AD_ST_MASK 0x000000c0u +#define AD_ST_SHIFT 6u +#define AD_ST_SLAVE 0x00000000u +#define AD_ST_BRIDGE 0x00000040u +#define AD_ST_SWRAP 0x00000080u +#define AD_ST_MWRAP 0x000000c0u +#define AD_SZ_MASK 0x00000030u +#define AD_SZ_SHIFT 4u +#define AD_SZ_4K 0x00000000u +#define AD_SZ_8K 0x00000010u +#define AD_SZ_16K 0x00000020u +#define AD_SZ_SZD 0x00000030u +#define AD_AG32 0x00000008u +#define AD_ADDR_ALIGN 0x00000fffu +#define AD_SZ_BASE 0x00001000u /* 4KB */ + +/* EROM SizeDesc */ +#define SD_SZ_MASK 0xfffff000u +#define SD_SG32 0x00000008u +#define SD_SZ_ALIGN 0x00000fffu + +#define WRAPPER_TIMEOUT_CONFIG 0x4u + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _aidmp { + uint32 oobselina30; /* 0x000 */ + uint32 oobselina74; /* 0x004 */ + uint32 PAD[6]; + uint32 oobselinb30; /* 0x020 */ + uint32 oobselinb74; /* 0x024 */ + uint32 PAD[6]; + uint32 oobselinc30; /* 0x040 */ + uint32 oobselinc74; /* 0x044 */ + uint32 PAD[6]; + uint32 oobselind30; /* 0x060 */ + uint32 oobselind74; /* 0x064 */ + uint32 PAD[38]; + uint32 oobselouta30; /* 0x100 */ + uint32 oobselouta74; /* 0x104 */ + uint32 PAD[6]; + uint32 oobseloutb30; /* 0x120 */ + uint32 oobseloutb74; /* 0x124 */ + uint32 PAD[6]; + uint32 oobseloutc30; /* 0x140 */ + uint32 oobseloutc74; /* 0x144 */ + uint32 PAD[6]; + uint32 oobseloutd30; /* 0x160 */ + uint32 oobseloutd74; /* 0x164 */ + uint32 PAD[38]; + uint32 oobsynca; /* 0x200 */ + uint32 oobseloutaen; /* 0x204 */ + uint32 PAD[6]; + uint32 oobsyncb; /* 0x220 */ + uint32 oobseloutben; /* 0x224 */ + uint32 PAD[6]; + uint32 oobsyncc; /* 0x240 */ + uint32 oobseloutcen; /* 0x244 */ + uint32 PAD[6]; + uint32 oobsyncd; /* 0x260 */ + uint32 oobseloutden; /* 0x264 */ + uint32 PAD[38]; + uint32 oobaextwidth; /* 0x300 */ + uint32 oobainwidth; /* 0x304 */ + uint32 oobaoutwidth; /* 0x308 */ + uint32 PAD[5]; + uint32 oobbextwidth; /* 0x320 */ + uint32 oobbinwidth; /* 0x324 */ + uint32 oobboutwidth; /* 0x328 */ + uint32 PAD[5]; + uint32 oobcextwidth; /* 0x340 */ + uint32 oobcinwidth; /* 0x344 */ + uint32 oobcoutwidth; /* 0x348 */ + uint32 PAD[5]; + uint32 oobdextwidth; /* 0x360 */ + uint32 oobdinwidth; /* 0x364 */ + uint32 oobdoutwidth; /* 0x368 */ + uint32 PAD[37]; + uint32 ioctrlset; /* 0x400 */ + uint32 ioctrlclear; /* 0x404 */ + uint32 ioctrl; /* 0x408 */ + uint32 PAD[61]; + uint32 iostatus; /* 0x500 */ + uint32 PAD[127]; + uint32 ioctrlwidth; /* 0x700 */ + uint32 iostatuswidth; /* 0x704 */ + uint32 PAD[62]; + uint32 resetctrl; /* 0x800 */ + uint32 resetstatus; /* 0x804 */ + uint32 resetreadid; /* 0x808 */ + uint32 resetwriteid; /* 0x80c */ + uint32 PAD[60]; + uint32 errlogctrl; /* 0x900 */ + uint32 errlogdone; /* 0x904 */ + uint32 errlogstatus; /* 0x908 */ + uint32 errlogaddrlo; /* 0x90c */ + uint32 errlogaddrhi; /* 0x910 */ + uint32 errlogid; /* 0x914 */ + uint32 errloguser; /* 0x918 */ + uint32 errlogflags; /* 0x91c */ + uint32 PAD[56]; + uint32 intstatus; /* 0xa00 */ + uint32 PAD[255]; + uint32 config; /* 0xe00 */ + uint32 PAD[63]; + uint32 itcr; /* 0xf00 */ + uint32 PAD[3]; + uint32 itipooba; /* 0xf10 */ + uint32 itipoobb; /* 0xf14 */ + uint32 itipoobc; /* 0xf18 */ + uint32 itipoobd; /* 0xf1c */ + uint32 PAD[4]; + uint32 itipoobaout; /* 0xf30 */ + uint32 itipoobbout; /* 0xf34 */ + uint32 itipoobcout; /* 0xf38 */ + uint32 itipoobdout; /* 0xf3c */ + uint32 PAD[4]; + uint32 itopooba; /* 0xf50 */ + uint32 itopoobb; /* 0xf54 */ + uint32 itopoobc; /* 0xf58 */ + uint32 itopoobd; /* 0xf5c */ + uint32 PAD[4]; + uint32 itopoobain; /* 0xf70 */ + uint32 itopoobbin; /* 0xf74 */ + uint32 itopoobcin; /* 0xf78 */ + uint32 itopoobdin; /* 0xf7c */ + uint32 PAD[4]; + uint32 itopreset; /* 0xf90 */ + uint32 PAD[15]; + uint32 peripherialid4; /* 0xfd0 */ + uint32 peripherialid5; /* 0xfd4 */ + uint32 peripherialid6; /* 0xfd8 */ + uint32 peripherialid7; /* 0xfdc */ + uint32 peripherialid0; /* 0xfe0 */ + uint32 peripherialid1; /* 0xfe4 */ + uint32 peripherialid2; /* 0xfe8 */ + uint32 peripherialid3; /* 0xfec */ + uint32 componentid0; /* 0xff0 */ + uint32 componentid1; /* 0xff4 */ + uint32 componentid2; /* 0xff8 */ + uint32 componentid3; /* 0xffc */ +} aidmp_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* Out-of-band Router registers */ +#define OOB_BUSCONFIG 0x020 +#define OOB_STATUSA 0x100 +#define OOB_STATUSB 0x104 +#define OOB_STATUSC 0x108 +#define OOB_STATUSD 0x10c +#define OOB_ENABLEA0 0x200 +#define OOB_ENABLEA1 0x204 +#define OOB_ENABLEA2 0x208 +#define OOB_ENABLEA3 0x20c +#define OOB_ENABLEB0 0x280 +#define OOB_ENABLEB1 0x284 +#define OOB_ENABLEB2 0x288 +#define OOB_ENABLEB3 0x28c +#define OOB_ENABLEC0 0x300 +#define OOB_ENABLEC1 0x304 +#define OOB_ENABLEC2 0x308 +#define OOB_ENABLEC3 0x30c +#define OOB_ENABLED0 0x380 +#define OOB_ENABLED1 0x384 +#define OOB_ENABLED2 0x388 +#define OOB_ENABLED3 0x38c +#define OOB_ITCR 0xf00 +#define OOB_ITIPOOBA 0xf10 +#define OOB_ITIPOOBB 0xf14 +#define OOB_ITIPOOBC 0xf18 +#define OOB_ITIPOOBD 0xf1c +#define OOB_ITOPOOBA 0xf30 +#define OOB_ITOPOOBB 0xf34 +#define OOB_ITOPOOBC 0xf38 +#define OOB_ITOPOOBD 0xf3c + +/* DMP wrapper registers */ +#define AI_OOBSELINA30 0x000 +#define AI_OOBSELINA74 0x004 +#define AI_OOBSELINB30 0x020 +#define AI_OOBSELINB74 0x024 +#define AI_OOBSELINC30 0x040 +#define AI_OOBSELINC74 0x044 +#define AI_OOBSELIND30 0x060 +#define AI_OOBSELIND74 0x064 +#define AI_OOBSELOUTA30 0x100 +#define AI_OOBSELOUTA74 0x104 +#define AI_OOBSELOUTB30 0x120 +#define AI_OOBSELOUTB74 0x124 +#define AI_OOBSELOUTC30 0x140 +#define AI_OOBSELOUTC74 0x144 +#define AI_OOBSELOUTD30 0x160 +#define AI_OOBSELOUTD74 0x164 +#define AI_OOBSYNCA 0x200 +#define AI_OOBSELOUTAEN 0x204 +#define AI_OOBSYNCB 0x220 +#define AI_OOBSELOUTBEN 0x224 +#define AI_OOBSYNCC 0x240 +#define AI_OOBSELOUTCEN 0x244 +#define AI_OOBSYNCD 0x260 +#define AI_OOBSELOUTDEN 0x264 +#define AI_OOBAEXTWIDTH 0x300 +#define AI_OOBAINWIDTH 0x304 +#define AI_OOBAOUTWIDTH 0x308 +#define AI_OOBBEXTWIDTH 0x320 +#define AI_OOBBINWIDTH 0x324 +#define AI_OOBBOUTWIDTH 0x328 +#define AI_OOBCEXTWIDTH 0x340 +#define AI_OOBCINWIDTH 0x344 +#define AI_OOBCOUTWIDTH 0x348 +#define AI_OOBDEXTWIDTH 0x360 +#define AI_OOBDINWIDTH 0x364 +#define AI_OOBDOUTWIDTH 0x368 + +#if !defined(IL_BIGENDIAN) +#define AI_IOCTRLSET 0x400 +#define AI_IOCTRLCLEAR 0x404 +#define AI_IOCTRL 0x408 +#define AI_IOCTRL_BOOKER 0x248 /* Starting from OOBR base - 0x18006000 */ +#define AI_IOSTATUS 0x500 +#define AI_RESETCTRL 0x800 +#define AI_RESETSTATUS 0x804 +#endif /* IL_BIGENDIAN */ + +#define AI_IOCTRLWIDTH 0x700 +#define AI_IOSTATUSWIDTH 0x704 + +#define AI_RESETREADID 0x808 +#define AI_RESETWRITEID 0x80c +#define AI_ERRLOGCTRL 0x900 +#define AI_ERRLOGDONE 0x904 +#define AI_ERRLOGSTATUS 0x908 +#define AI_ERRLOGADDRLO 0x90c +#define AI_ERRLOGADDRHI 0x910 +#define AI_ERRLOGID 0x914 +#define AI_ERRLOGUSER 0x918 +#define AI_ERRLOGFLAGS 0x91c +#define AI_INTSTATUS 0xa00 +#define AI_CONFIG 0xe00 +#define AI_ITCR 0xf00 +#define AI_ITIPOOBA 0xf10 +#define AI_ITIPOOBB 0xf14 +#define AI_ITIPOOBC 0xf18 +#define AI_ITIPOOBD 0xf1c +#define AI_ITIPOOBAOUT 0xf30 +#define AI_ITIPOOBBOUT 0xf34 +#define AI_ITIPOOBCOUT 0xf38 +#define AI_ITIPOOBDOUT 0xf3c +#define AI_ITOPOOBA 0xf50 +#define AI_ITOPOOBB 0xf54 +#define AI_ITOPOOBC 0xf58 +#define AI_ITOPOOBD 0xf5c +#define AI_ITOPOOBAIN 0xf70 +#define AI_ITOPOOBBIN 0xf74 +#define AI_ITOPOOBCIN 0xf78 +#define AI_ITOPOOBDIN 0xf7c +#define AI_ITOPRESET 0xf90 +#define AI_PERIPHERIALID4 0xfd0 +#define AI_PERIPHERIALID5 0xfd4 +#define AI_PERIPHERIALID6 0xfd8 +#define AI_PERIPHERIALID7 0xfdc +#define AI_PERIPHERIALID0 0xfe0 +#define AI_PERIPHERIALID1 0xfe4 +#define AI_PERIPHERIALID2 0xfe8 +#define AI_PERIPHERIALID3 0xfec +#define AI_COMPONENTID0 0xff0 +#define AI_COMPONENTID1 0xff4 +#define AI_COMPONENTID2 0xff8 +#define AI_COMPONENTID3 0xffc + +/* resetctrl */ +#define AIRC_RESET 1 + +/* errlogctrl */ +#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */ +#define AIELC_TO_EXP_SHIFT 4 +#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */ + +/* errlogdone */ +#define AIELD_ERRDONE_MASK 0x3 + +/* errlogstatus */ +#define AIELS_SLAVE_ERR 0x1 +#define AIELS_TIMEOUT 0x2 +#define AIELS_DECODE 0x3 +#define AIELS_ERROR_MASK 0x3 +#define AIELS_MULTIPLE_ERRORS 0x4 +#define ERRLOGID_AXIID_MASK 0xF + +/* errorlog status bit map, for SW use */ +#define AXI_WRAP_STS_NONE (0) +#define AXI_WRAP_STS_TIMEOUT (1<<0) +#define AXI_WRAP_STS_SLAVE_ERR (1<<1) +#define AXI_WRAP_STS_DECODE_ERR (1<<2) +#define AXI_WRAP_STS_PCI_RD_ERR (1<<3) +#define AXI_WRAP_STS_WRAP_RD_ERR (1<<4) +#define AXI_WRAP_STS_SET_CORE_FAIL (1<<5) +#define AXI_WRAP_STS_MULTIPLE_ERRORS (1<<6) + +/* errlogFrags */ +#define AXI_ERRLOG_FLAGS_WRITE_REQ (1<<24) + +/* config */ +#define AICFG_OOB 0x00000020 +#define AICFG_IOS 0x00000010 +#define AICFG_IOC 0x00000008 +#define AICFG_TO 0x00000004 +#define AICFG_ERRL 0x00000002 +#define AICFG_RST 0x00000001 + +/* bit defines for AI_OOBSELOUTB74 reg */ +#define OOB_SEL_OUTEN_B_5 15 +#define OOB_SEL_OUTEN_B_6 23 + +/* AI_OOBSEL for A/B/C/D, 0-7 */ +#define AI_OOBSEL_MASK 0x1F +#define AI_OOBSEL_0_SHIFT 0 +#define AI_OOBSEL_1_SHIFT 8 +#define AI_OOBSEL_2_SHIFT 16 +#define AI_OOBSEL_3_SHIFT 24 +#define AI_OOBSEL_4_SHIFT 0 +#define AI_OOBSEL_5_SHIFT 8 +#define AI_OOBSEL_6_SHIFT 16 +#define AI_OOBSEL_7_SHIFT 24 +#define AI_IOCTRL_ENABLE_D11_PME (1 << 14) + +/* bit Specific for AI_OOBSELOUTB30 */ +#define OOB_B_ALP_REQUEST 0 +#define OOB_B_HT_REQUEST 1 +#define OOB_B_ILP_REQUEST 2 +#define OOB_B_ALP_AVAIL_REQUEST 3 +#define OOB_B_HT_AVAIL_REQUEST 4 + +/* mask for interrupts from each core to wrapper */ +#define AI_OOBSELINA74_CORE_MASK 0x80808080 +#define AI_OOBSELINA30_CORE_MASK 0x80808080 + +#define AI_OOBSEL_30_0_INTR_MASK 0x00000080 +#define AI_OOBSEL_30_3_INTR_MASK 0x80000000 + +#define AI_OOBSEL_74_4_INTR_MASK 0x00000080 +#define AI_OOBSEL_74_7_INTR_MASK 0x80000000 + +/* axi id mask in the error log id */ +#define AI_ERRLOGID_AXI_ID_MASK 0x07 +#define AI_ERRLOGID_AXI_ID_MASK_EXTD 0x1F + +#endif /* _AIDMP_H */ diff --git a/bcmdhd.101.10.361.x/include/bcm_fwtrace.h b/bcmdhd.101.10.361.x/include/bcm_fwtrace.h new file mode 100755 index 0000000..ae0836a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcm_fwtrace.h @@ -0,0 +1,111 @@ + +/* + * Firmware trace implementation common header file between DHD and the firmware. + * + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _bcm_fwtrace_h +#define _bcm_fwtrace_h + +#include +#include + +#define FWTRACE_VERSION 1u + +/* + * Number of trace entries per trace buffer. + * Both DHD and FW must use same number. + */ +#define FWTRACE_NUM_ENTRIES (2u * 1024u) /* 2KB, power of 2 */ +/* + * Number of buffers provided by the host. + * DHD may allocate smaller number of trace buffers based on continuous memory availability. + */ +#define FWTRACE_NUM_HOST_BUFFERS 32u + +/* Magic value to differentiate between regural trace data Vs other blobs */ +#define FWTRACE_BLOB_MAGIC (0xFFu) +#define FWTRACE_BLOB_MGIC_SHIFT (24u) + +/* The lower 24 bits of the fwtrace_entry->func_ptr is used to push different type of + * information to host such as ACK bitmap, interrupts DPC is going to process etc. + */ +#define FWTRACE_BLOB_TYPE_MASK (0xFFFFFFu) +#define FWTRACE_BLOB_TYPE_SHIFT (0) + +#define FWTRACE_BLOB_TYPE_NUM_PKTS (0x1u) +#define FWTRACE_BLOB_TYPE_ACK_BMAP1 (0x2u) /* Ack bits (0-31 ) */ +#define FWTRACE_BLOB_TYPE_ACK_BMAP2 (0x4u) /* Ack bits (32-63) */ +#define FWTRACE_BLOB_TYPE_ACK_BMAP3 (0x8u) /* Ack bits (64-95) */ +#define FWTRACE_BLOB_TYPE_ACK_BMAP4 (0x10u) /* Ack bits (96-127) */ +#define FWTRACE_BLOB_TYPE_INTR1 (0x20u) /* interrupts the DPC is going to process */ +#define FWTRACE_BLOB_TYPE_INTR2 (0x40u) /* interrupts the DPC is going to process */ +/* The blob data for LFRAGS_INFO will contain + * Bit31-16: Available buffer/lfrags info + * Bit15-0 : # of lfrags requested by FW in the fetch request + */ +#define FWTRACE_BLOB_TYPE_LFRAGS_INFO (0x80u) /* Available and fetch requested lfrags */ + +#define FWTRACE_BLOB_DATA_MASK (0xFFFFFu) + +#define FWTRACE_BLOB_ADD_CUR (0) /* updates with in the existing trace entry */ +#define FWTRACE_BLOB_ADD_NEW (1u) /* Creates new trace entry */ + +/* + * Host sends host memory location to FW via iovar. + * FW will push trace information here. + */ +typedef struct fwtrace_hostaddr_info { + bcm_addr64_t haddr; /* host address for the firmware to DMA trace data */ + uint32 buf_len; + uint32 num_bufs; /* Number of trace buffers */ +} fwtrace_hostaddr_info_t; + +/* + * Eact trace info buffer pushed to host will have this header. + */ +typedef struct fwtrace_dma_header_info { + uint16 length; /* length in bytes */ + uint16 seq_num; /* sequence number */ + uint32 version; + uint32 hostmem_addr; +} fwtrace_dma_header_info_t; + +/* + * Content of each trace entry + */ +typedef struct fwtrace_entry { + uint32 func_ptr; + /* How the pkts_cycle being used? + * Bit31-23: (If present) Used to indicate the number of packets processed by the + * current function + * Bit22-1 : Used to indicate the CPU cycles(in units of 2Cycles). So to get the actual + * cycles multiply the cycles by 2. + * Bit0 : Used to indicate whether this entry is valid or not + */ + uint32 pkts_cycles; +} fwtrace_entry_t; + +#define FWTRACE_CYCLES_VALID (1u << 0u) + +/* + * Format of firmware trace buffer pushed to host memory + */ +typedef struct fwtrace_buf { + fwtrace_dma_header_info_t info; /* includes the sequence number and the length */ + fwtrace_entry_t entry[FWTRACE_NUM_ENTRIES]; +} fwtrace_buf_t; + +void fwtracing_add_blob(uint32 update_type, uint32 trace_type, uint32 blob); +#endif /* _bcm_fwtrace_h */ diff --git a/bcmdhd.101.10.361.x/include/bcm_l2_filter.h b/bcmdhd.101.10.361.x/include/bcm_l2_filter.h new file mode 100755 index 0000000..d594285 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcm_l2_filter.h @@ -0,0 +1,99 @@ +/* + * L2 Filter handling functions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + */ +#ifndef _l2_filter_h_ +#define _l2_filter_h_ + +/* Proxy ARP processing return values */ +#define PARP_DROP 0 +#define PARP_NOP 1 +#define PARP_TAKEN 2 +/* Adjust for ETHER_HDR_LEN pull in linux + * which makes pkt nonaligned + */ +#define ALIGN_ADJ_BUFLEN 2 + +#define BCM_PARP_TABLE_SIZE 32 /* proxyarp hash table bucket size */ +#define BCM_PARP_TABLE_MASK 0x1f /* proxyarp hash table index mask */ +#define BCM_PARP_TABLE_INDEX(val) (val & BCM_PARP_TABLE_MASK) +#define BCM_PARP_TIMEOUT 600 /* proxyarp cache entry timerout duration(10 min) */ + +#define BCM_PARP_IS_TIMEOUT(pub_tick, entry) \ + (pub_tick - entry->used > BCM_PARP_TIMEOUT) + +#define BCM_PARP_ANNOUNCE_WAIT 2 /* proxyarp announce wait duration(2 sec) */ + +#define BCM_PARP_ANNOUNCE_WAIT_REACH(pub_tick, entry) \ + (pub_tick - entry->used > BCM_PARP_ANNOUNCE_WAIT) + +#define BCM_ARP_TABLE_UPDATE_TIMEOUT 100 + +/* Taken from wlc_tdls.h for block_tdls iovar */ +#define TDLS_PAYLOAD_TYPE 2 +#define TDLS_PAYLOAD_TYPE_LEN 1 + +/* TDLS Action Category code */ +#define TDLS_ACTION_CATEGORY_CODE 12 + +typedef struct parp_entry { + struct parp_entry *next; + uint32 used; /* time stamp */ + struct ether_addr ea; + bcm_tlv_t ip; +} parp_entry_t; + +typedef struct arp_table arp_table_t; + +extern int bcm_l2_filter_gratuitous_arp(osl_t *osh, void *pktbuf); +extern int bcm_l2_filter_block_ping(osl_t *osh, void *pktbuf); +extern int bcm_l2_filter_get_mac_addr_dhcp_pkt(osl_t *osh, void *pktbuf, + int ifidx, uint8** addr); + +arp_table_t* init_l2_filter_arp_table(osl_t* osh); +void deinit_l2_filter_arp_table(osl_t* osh, arp_table_t* ptable); +int get_pkt_ether_type(osl_t *osh, void *skb, uint8 **data_ptr, + int *len_ptr, uint16 *et_ptr, bool *snap_ptr); +int get_pkt_ip_type(osl_t *osh, void *pktbuf, + uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr); +int bcm_l2_filter_parp_addentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea, + uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt); +int bcm_l2_filter_parp_delentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea, + uint8 *ip, uint8 ip_ver, bool cached); +parp_entry_t *bcm_l2_filter_parp_findentry(arp_table_t* arp_tbl, uint8 *ip, + uint8 ip_ver, bool cached, unsigned int entry_tickcnt); + +int bcm_l2_filter_parp_modifyentry(arp_table_t* arp_tbl, struct ether_addr *ea, + uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt); +extern void bcm_l2_filter_arp_table_update(osl_t *osh, arp_table_t* arp_tbl, bool all, + uint8 *del_ea, bool periodic, unsigned int tickcnt); + +void *bcm_l2_filter_proxyarp_alloc_reply(osl_t* osh, uint16 pktlen, struct ether_addr *src_ea, + struct ether_addr *dst_ea, uint16 ea_type, bool snap, void **p); +void bcm_l2_filter_parp_get_smac(arp_table_t* ptable, void* smac); +void bcm_l2_filter_parp_get_cmac(arp_table_t* ptable, void* cmac); +void bcm_l2_filter_parp_set_smac(arp_table_t* ptable, void* smac); +void bcm_l2_filter_parp_set_cmac(arp_table_t* ptable, void* cmac); +bcm_tlv_t* parse_nd_options(void *buf, int buflen, uint key); +uint16 calc_checksum(uint8 *src_ipa, uint8 *dst_ipa, uint32 ul_len, uint8 prot, uint8 *ul_data); +extern int bcm_l2_filter_block_tdls(osl_t *osh, void *pktbuf); +#endif /* _l2_filter_h */ diff --git a/bcmdhd.101.10.361.x/include/bcm_mpool_pub.h b/bcmdhd.101.10.361.x/include/bcm_mpool_pub.h new file mode 100755 index 0000000..76c4ce8 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcm_mpool_pub.h @@ -0,0 +1,344 @@ +/* + * Memory pools library, Public interface + * + * API Overview + * + * This package provides a memory allocation subsystem based on pools of + * homogenous objects. + * + * Instrumentation is available for reporting memory utilization both + * on a per-data-structure basis and system wide. + * + * There are two main types defined in this API. + * + * pool manager: A singleton object that acts as a factory for + * pool allocators. It also is used for global + * instrumentation, such as reporting all blocks + * in use across all data structures. The pool manager + * creates and provides individual memory pools + * upon request to application code. + * + * memory pool: An object for allocating homogenous memory blocks. + * + * Global identifiers in this module use the following prefixes: + * bcm_mpm_* Memory pool manager + * bcm_mp_* Memory pool + * + * There are two main types of memory pools: + * + * prealloc: The contiguous memory block of objects can either be supplied + * by the client or malloc'ed by the memory manager. The objects are + * allocated out of a block of memory and freed back to the block. + * + * heap: The memory pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing statistics + * and instrumentation on top of the heap, without modifying the heap + * allocation implementation. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _BCM_MPOOL_PUB_H +#define _BCM_MPOOL_PUB_H 1 + +#include /* needed for uint16 */ + +/* +************************************************************************** +* +* Type definitions, handles +* +************************************************************************** +*/ + +/* Forward declaration of OSL handle. */ +struct osl_info; + +/* Forward declaration of string buffer. */ +struct bcmstrbuf; + +/* + * Opaque type definition for the pool manager handle. This object is used for global + * memory pool operations such as obtaining a new pool, deleting a pool, iterating and + * instrumentation/debugging. + */ +struct bcm_mpm_mgr; +typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h; + +/* + * Opaque type definition for an instance of a pool. This handle is used for allocating + * and freeing memory through the pool, as well as management/instrumentation on this + * specific pool. + */ +struct bcm_mp_pool; +typedef struct bcm_mp_pool *bcm_mp_pool_h; + +/* + * To make instrumentation more readable, every memory + * pool must have a readable name. Pool names are up to + * 8 bytes including '\0' termination. (7 printable characters.) + */ +#define BCM_MP_NAMELEN 8 + +/* + * Type definition for pool statistics. + */ +typedef struct bcm_mp_stats { + char name[BCM_MP_NAMELEN]; /* Name of this pool. */ + unsigned int objsz; /* Object size allocated in this pool */ + uint16 nobj; /* Total number of objects in this pool */ + uint16 num_alloc; /* Number of objects currently allocated */ + uint16 high_water; /* Max number of allocated objects. */ + uint16 failed_alloc; /* Failed allocations. */ +} bcm_mp_stats_t; + +/* +************************************************************************** +* +* API Routines on the pool manager. +* +************************************************************************** +*/ + +/* + * bcm_mpm_init() - initialize the whole memory pool system. + * + * Parameters: + * osh: INPUT Operating system handle. Needed for heap memory allocation. + * max_pools: INPUT Maximum number of mempools supported. + * mgr: OUTPUT The handle is written with the new pools manager object/handle. + * + * Returns: + * BCME_OK Object initialized successfully. May be used. + * BCME_NOMEM Initialization failed due to no memory. Object must not be used. + */ +int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp); + +/* + * bcm_mpm_deinit() - de-initialize the whole memory pool system. + * + * Parameters: + * mgr: INPUT Pointer to pool manager handle. + * + * Returns: + * BCME_OK Memory pool manager successfully de-initialized. + * other Indicated error occured during de-initialization. + */ +int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp); + +/* + * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The + * pool uses a contiguous block of pre-alloced + * memory. The memory block may either be provided + * by the client or dynamically allocated by the + * pool manager. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * Must be >= sizeof(void *). + * nobj: INPUT Maximum number of concurrently existing objects to support + * memstart INPUT Pointer to the memory to use, or NULL to malloc() + * memsize INPUT Number of bytes referenced from memstart (for error checking). + * Must be 0 if 'memstart' is NULL. + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr, + unsigned int obj_sz, + int nobj, + void *memstart, + unsigned int memsize, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + +/* + * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + +/* + * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory + * pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing + * statistics and instrumentation on top of the heap, + * without modifying the heap allocation implementation. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + +/* + * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + +/* + * bcm_mpm_stats() - Return stats for all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * stats: OUTPUT Array of pool statistics. + * nentries: MOD Max elements in 'stats' array on INPUT. Actual number + * of array elements copied to 'stats' on OUTPUT. + * + * Returns: + * BCME_OK Ok + * other Error getting stats. + * + */ +int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries); + +/* + * bcm_mpm_dump() - Display statistics on all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * b: OUTPUT Output buffer. + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b); + +/* + * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to + * compensate for alignment requirements of the objects. + * This function provides the padded object size. If clients + * pre-allocate a memory slab for a memory pool, the + * padded object size should be used by the client to allocate + * the memory slab (in order to provide sufficent space for + * the maximum number of objects). + * + * Parameters: + * mgr: INPUT The handle to the pools manager. + * obj_sz: INPUT Input object size. + * padded_obj_sz: OUTPUT Padded object size. + * + * Returns: + * BCME_OK Ok + * BCME_BADARG Bad arguments. + * + */ +int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz); + +/* +*************************************************************************** +* +* API Routines on a specific pool. +* +*************************************************************************** +*/ + +/* + * bcm_mp_alloc() - Allocate a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * + * Returns: + * A pointer to the new object. NULL on error. + * + */ +void* bcm_mp_alloc(bcm_mp_pool_h pool); + +/* + * bcm_mp_free() - Free a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * objp: INPUT A pointer to the object to free. + * + * Returns: + * BCME_OK Ok + * other Error during free. + * + */ +int bcm_mp_free(bcm_mp_pool_h pool, void *objp); + +/* + * bcm_mp_stats() - Return stats for this pool + * + * Parameters: + * pool: INPUT The handle to the pool + * stats: OUTPUT Pool statistics + * + * Returns: + * BCME_OK Ok + * other Error getting statistics. + * + */ +void bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); + +/* + * bcm_mp_dump() - Dump a pool + * + * Parameters: + * pool: INPUT The handle to the pool + * b OUTPUT Output buffer + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b); + +#endif /* _BCM_MPOOL_PUB_H */ diff --git a/bcmdhd.101.10.361.x/include/bcm_ring.h b/bcmdhd.101.10.361.x/include/bcm_ring.h new file mode 100755 index 0000000..0a45432 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcm_ring.h @@ -0,0 +1,585 @@ +/* + * bcm_ring.h : Ring context abstraction + * The ring context tracks the WRITE and READ indices where elements may be + * produced and consumed respectively. All elements in the ring need to be + * fixed size. + * + * NOTE: A ring of size N, may only hold N-1 elements. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef __bcm_ring_included__ +#define __bcm_ring_included__ +/* + * API Notes: + * + * Ring manipulation API allows for: + * Pending operations: Often before some work can be completed, it may be + * desired that several resources are available, e.g. space for production in + * a ring. Approaches such as, #1) reserve resources one by one and return them + * if another required resource is not available, or #2) employ a two pass + * algorithm of first testing whether all resources are available, have a + * an impact on performance critical code. The approach taken here is more akin + * to approach #2, where a test for resource availability essentially also + * provides the index for production in an un-committed state. + * The same approach is taken for the consumer side. + * + * - Pending production: Fetch the next index where a ring element may be + * produced. The caller may not commit the WRITE of the element. + * - Pending consumption: Fetch the next index where a ring element may be + * consumed. The caller may not commut the READ of the element. + * + * Producer side API: + * - bcm_ring_is_full : Test whether ring is full + * - bcm_ring_prod : Fetch index where an element may be produced (commit) + * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending) + * - bcm_ring_prod_done: Commit a previous pending produce fetch + * - bcm_ring_prod_avail: Fetch total number free slots eligible for production + * + * Consumer side API: + * - bcm_ring_is_empty : Test whether ring is empty + * - bcm_ring_cons : Fetch index where an element may be consumed (commit) + * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending) + * - bcm_ring_cons_done: Commit a previous pending consume fetch + * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption + * + * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring + * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring + * + * +---------------------------------------------------------------------------- + * + * Design Notes: + * Following items are not tracked in a ring context (design decision) + * - width of a ring element. + * - depth of the ring. + * - base of the buffer, where the elements are stored. + * - count of number of free slots in the ring + * + * Implementation Notes: + * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init(). + * - BCM_RING_EMPTY and BCM_RING_FULL are (-1) + * + * +---------------------------------------------------------------------------- + * + * Usage Notes: + * An application may incarnate a ring of some fixed sized elements, by defining + * - a ring data buffer to store the ring elements. + * - depth of the ring (max number of elements managed by ring context). + * Preferrably, depth may be represented as a constant. + * - width of a ring element: to be used in pointer arithmetic with the ring's + * data buffer base and an index to fetch the ring element. + * + * Use bcm_workq_t to instantiate a pair of workq constructs, one for the + * producer and the other for the consumer, both pointing to the same circular + * buffer. The producer may operate on it's own local workq and flush the write + * index to the consumer. Likewise the consumer may use its local workq and + * flush the read index to the producer. This way we do not repeatedly access + * the peer's context. The two peers may reside on different CPU cores with a + * private L1 data cache. + * +---------------------------------------------------------------------------- + * + * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- + * vim: set ts=4 noet sw=4 tw=80: + * + * +---------------------------------------------------------------------------- + */ + +#ifdef ____cacheline_aligned +#define __ring_aligned ____cacheline_aligned +#else +#define __ring_aligned +#endif + +/* Conditional compile for debug */ +/* #define BCM_RING_DEBUG */ + +#define BCM_RING_EMPTY (-1) +#define BCM_RING_FULL (-1) +#define BCM_RING_NULL ((bcm_ring_t *)NULL) + +#if defined(BCM_RING_DEBUG) +#define RING_ASSERT(exp) ASSERT(exp) +#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \ + ((ring)->self == (ring))) +#else /* ! BCM_RING_DEBUG */ +#define RING_ASSERT(exp) do {} while (0) +#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL) +#endif /* ! BCM_RING_DEBUG */ + +#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0) + +/* + * +---------------------------------------------------------------------------- + * Ring Context + * +---------------------------------------------------------------------------- + */ +typedef struct bcm_ring { /* Ring context */ +#if defined(BCM_RING_DEBUG) + struct bcm_ring *self; /* ptr to self for IS VALID test */ +#endif /* BCM_RING_DEBUG */ + int write __ring_aligned; /* WRITE index in a circular ring */ + int read __ring_aligned; /* READ index in a circular ring */ +} bcm_ring_t; + +static INLINE void bcm_ring_init(bcm_ring_t *ring); +static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from); +static INLINE bool bcm_ring_is_empty(const bcm_ring_t *ring); + +static INLINE int __bcm_ring_next_write(const bcm_ring_t *ring, const int ring_size); + +static INLINE bool __bcm_ring_full(const bcm_ring_t *ring, int next_write); +static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write); +static INLINE int bcm_ring_prod_pend(const bcm_ring_t *ring, int *pend_write, + const int ring_size); +static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read); +static INLINE int bcm_ring_cons_pend(const bcm_ring_t *ring, int *pend_read, + const int ring_size); +static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self); +static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self); + +static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE void bcm_ring_cons_all(bcm_ring_t *ring); + +/** + * bcm_ring_init - initialize a ring context. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_init(bcm_ring_t *ring) +{ + ASSERT(ring != (bcm_ring_t *)NULL); +#if defined(BCM_RING_DEBUG) + ring->self = ring; +#endif /* BCM_RING_DEBUG */ + ring->write = 0; + ring->read = 0; +} + +/** + * bcm_ring_copy - copy construct a ring + * @to: pointer to the new ring context + * @from: pointer to orig ring context + */ +static INLINE void +bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from) +{ + bcm_ring_init(to); + + to->write = from->write; + to->read = from->read; +} + +/** + * bcm_ring_is_empty - "Boolean" test whether ring is empty. + * @ring: pointer to a ring context + * + * PS. does not return BCM_RING_EMPTY value. + */ +static INLINE bool +bcm_ring_is_empty(const bcm_ring_t *ring) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + return (ring->read == ring->write); +} + +/** + * __bcm_ring_next_write - determine the index where the next write may occur + * (with wrap-around). + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE int +__bcm_ring_next_write(const bcm_ring_t *ring, const int ring_size) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + return ((ring->write + 1) % ring_size); +} + +/** + * __bcm_ring_full - support function for ring full test. + * @ring: pointer to a ring context + * @next_write: next location in ring where an element is to be produced + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE bool +__bcm_ring_full(const bcm_ring_t *ring, int next_write) +{ + return (next_write == ring->read); +} + +/** + * bcm_ring_is_full - "Boolean" test whether a ring is full. + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PS. does not return BCM_RING_FULL value. + */ +static INLINE bool +bcm_ring_is_full(bcm_ring_t *ring, const int ring_size) +{ + int next_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + next_write = __bcm_ring_next_write(ring, ring_size); + return __bcm_ring_full(ring, next_write); +} + +/** + * bcm_ring_prod_done - commit a previously pending index where production + * was requested. + * @ring: pointer to a ring context + * @write: index into ring upto where production was done. + * +---------------------------------------------------------------------------- + */ +static INLINE void +bcm_ring_prod_done(bcm_ring_t *ring, int write) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->write = write; +} + +/** + * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be + * produced. + * @ring: pointer to a ring context + * @pend_write: next index, after the returned index + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_pend(const bcm_ring_t *ring, int *pend_write, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + *pend_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, *pend_write)) { + *pend_write = BCM_RING_FULL; + rtn = BCM_RING_FULL; + } else { + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->write; + } + return rtn; +} + +/** + * bcm_ring_prod - Fetch and "commit" the next index where a ring element may + * be produced. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod(bcm_ring_t *ring, const int ring_size) +{ + int next_write, prod_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + + next_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, next_write)) { + prod_write = BCM_RING_FULL; + } else { + prod_write = ring->write; + bcm_ring_prod_done(ring, next_write); /* "commit" production */ + } + return prod_write; +} + +/** + * bcm_ring_cons_done - commit a previously pending read + * @ring: pointer to a ring context + * @read: index upto which elements have been consumed. + */ +static INLINE void +bcm_ring_cons_done(bcm_ring_t *ring, int read) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->read = read; +} + +/** + * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring + * element may be consumed. + * @ring: pointer to a ring context + * @pend_read: index into ring upto which elements may be consumed. + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_pend(const bcm_ring_t *ring, int *pend_read, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + *pend_read = BCM_RING_EMPTY; + rtn = BCM_RING_EMPTY; + } else { + *pend_read = (ring->read + 1) % ring_size; + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->read; + } + return rtn; +} + +/** + * bcm_ring_cons - fetch and "commit" the next index where a ring element may + * be consumed. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons(bcm_ring_t *ring, const int ring_size) +{ + int cons_read; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + cons_read = BCM_RING_EMPTY; + } else { + cons_read = ring->read; + ring->read = (ring->read + 1) % ring_size; /* read is committed */ + } + return cons_read; +} + +/** + * bcm_ring_sync_read - on consumption, update peer's read index. + * @peer: pointer to peer's producer ring context + * @self: pointer to consumer's ring context + */ +static INLINE void +bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->read = self->read; /* flush read update to peer producer */ +} + +/** + * bcm_ring_sync_write - on consumption, update peer's write index. + * @peer: pointer to peer's consumer ring context + * @self: pointer to producer's ring context + */ +static INLINE void +bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->write = self->write; /* flush write update to peer consumer */ +} + +/** + * bcm_ring_prod_avail - fetch total number of available empty slots in the + * ring for production. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size) +{ + int prod_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->write >= ring->read) { + prod_avail = (ring_size - (ring->write - ring->read) - 1); + } else { + prod_avail = (ring->read - (ring->write + 1)); + } + ASSERT(prod_avail < ring_size); + return prod_avail; +} + +/** + * bcm_ring_cons_avail - fetch total number of available elements for consumption. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size) +{ + int cons_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->read == ring->write) { + cons_avail = 0; + } else if (ring->read > ring->write) { + cons_avail = ((ring_size - ring->read) + ring->write); + } else { + cons_avail = ring->write - ring->read; + } + ASSERT(cons_avail < ring_size); + return cons_avail; +} + +/** + * bcm_ring_cons_all - set ring in state where all elements are consumed. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_cons_all(bcm_ring_t *ring) +{ + ring->read = ring->write; +} + +/** + * Work Queue + * A work Queue is composed of a ring of work items, of a specified depth. + * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a + * producer/consumer circular ring. + */ + +struct bcm_workq { + bcm_ring_t ring; /* Ring context abstraction */ + struct bcm_workq *peer; /* Peer workq context */ + void *buffer; /* Buffer storage for work items in workQ */ + int ring_size; /* Depth of workQ */ +} __ring_aligned; + +typedef struct bcm_workq bcm_workq_t; + +/* #define BCM_WORKQ_DEBUG */ +#if defined(BCM_WORKQ_DEBUG) +#define WORKQ_ASSERT(exp) ASSERT(exp) +#else /* ! BCM_WORKQ_DEBUG */ +#define WORKQ_ASSERT(exp) do {} while (0) +#endif /* ! BCM_WORKQ_DEBUG */ + +#define WORKQ_AUDIT(workq) \ + WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \ + WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size); + +#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL) + +#define WORKQ_PEER(workq) ((workq)->peer) +#define WORKQ_RING(workq) (&((workq)->ring)) +#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring)) + +#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \ + WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \ + ((__elem_type *)((__workq)->buffer)) + (__index); \ +}) + +static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size); + +static INLINE bool bcm_workq_is_empty(const bcm_workq_t *workq_prod); + +static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons); + +static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons); + +/** + * bcm_workq_init - initialize a workq + * @workq: pointer to a workq context + * @buffer: pointer to a pre-allocated circular buffer to serve as a ring + * @ring_size: size of the ring in terms of max number of elements. + */ +static INLINE void +bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size) +{ + ASSERT(workq != BCM_WORKQ_NULL); + ASSERT(workq_peer != BCM_WORKQ_NULL); + ASSERT(buffer != NULL); + ASSERT(ring_size > 0); + + WORKQ_PEER(workq) = workq_peer; + WORKQ_PEER(workq_peer) = workq; + + bcm_ring_init(WORKQ_RING(workq)); + bcm_ring_init(WORKQ_RING(workq_peer)); + + workq->buffer = workq_peer->buffer = buffer; + workq->ring_size = workq_peer->ring_size = ring_size; +} + +/** + * bcm_workq_empty - test whether there is work + * @workq_prod: producer's workq + */ +static INLINE bool +bcm_workq_is_empty(const bcm_workq_t *workq_prod) +{ + return bcm_ring_is_empty(WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring + * @workq_prod: producer's workq whose write index must be synced to peer + */ +static INLINE void +bcm_workq_prod_sync(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring + * @workq_cons: consumer's workq whose read index must be synced to peer + */ +static INLINE void +bcm_workq_cons_sync(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons)); +} + +/** + * bcm_workq_prod_refresh - Fetch the updated consumer's read index + * @workq_prod: producer's workq whose read index must be refreshed from peer + */ +static INLINE void +bcm_workq_prod_refresh(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod)); +} + +/** + * bcm_workq_cons_refresh - Fetch the updated producer's write index + * @workq_cons: consumer's workq whose write index must be refreshed from peer + */ +static INLINE void +bcm_workq_cons_refresh(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons)); +} + +#endif /* ! __bcm_ring_h_included__ */ diff --git a/bcmdhd.101.10.361.x/include/bcmarp.h b/bcmdhd.101.10.361.x/include/bcmarp.h new file mode 100755 index 0000000..2e6d92d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmarp.h @@ -0,0 +1,84 @@ +/* + * Fundamental constants relating to ARP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmarp_h_ +#define _bcmarp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include + +/* This marks the start of a packed structure section. */ +#include + +#define ARP_OPC_OFFSET 6 /* option code offset */ +#define ARP_SRC_ETH_OFFSET 8 /* src h/w address offset */ +#define ARP_SRC_IP_OFFSET 14 /* src IP address offset */ +#define ARP_TGT_ETH_OFFSET 18 /* target h/w address offset */ +#define ARP_TGT_IP_OFFSET 24 /* target IP address offset */ + +#define ARP_OPC_REQUEST 1 /* ARP request */ +#define ARP_OPC_REPLY 2 /* ARP reply */ + +#define ARP_DATA_LEN 28 /* ARP data length */ + +#define HTYPE_ETHERNET 1 /* htype for ethernet */ +BWL_PRE_PACKED_STRUCT struct bcmarp { + uint16 htype; /* Header type (1 = ethernet) */ + uint16 ptype; /* Protocol type (0x800 = IP) */ + uint8 hlen; /* Hardware address length (Eth = 6) */ + uint8 plen; /* Protocol address length (IP = 4) */ + uint16 oper; /* ARP_OPC_... */ + uint8 src_eth[ETHER_ADDR_LEN]; /* Source hardware address */ + uint8 src_ip[IPV4_ADDR_LEN]; /* Source protocol address (not aligned) */ + uint8 dst_eth[ETHER_ADDR_LEN]; /* Destination hardware address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination protocol address */ +} BWL_POST_PACKED_STRUCT; + +/* Ethernet header + Arp message */ +BWL_PRE_PACKED_STRUCT struct bcmetharp { + struct ether_header eh; + struct bcmarp arp; +} BWL_POST_PACKED_STRUCT; + +/* IPv6 Neighbor Advertisement */ +#define NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET 8 /* src IPv6 address offset */ +#define NEIGHBOR_ADVERTISE_TYPE_OFFSET 40 /* type offset */ +#define NEIGHBOR_ADVERTISE_CHECKSUM_OFFSET 42 /* check sum offset */ +#define NEIGHBOR_ADVERTISE_FLAGS_OFFSET 44 /* R,S and O flags offset */ +#define NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET 48 /* target IPv6 address offset */ +#define NEIGHBOR_ADVERTISE_OPTION_OFFSET 64 /* options offset */ +#define NEIGHBOR_ADVERTISE_TYPE 136 +#define NEIGHBOR_SOLICITATION_TYPE 135 + +#define OPT_TYPE_SRC_LINK_ADDR 1 +#define OPT_TYPE_TGT_LINK_ADDR 2 + +#define NEIGHBOR_ADVERTISE_DATA_LEN 72 /* neighbor advertisement data length */ +#define NEIGHBOR_ADVERTISE_FLAGS_VALUE 0x60 /* R=0, S=1 and O=1 */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* !defined(_bcmarp_h_) */ diff --git a/bcmdhd.101.10.361.x/include/bcmbloom.h b/bcmdhd.101.10.361.x/include/bcmbloom.h new file mode 100755 index 0000000..dabfb26 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmbloom.h @@ -0,0 +1,73 @@ +/* + * Bloom filter support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmbloom_h_ +#define _bcmbloom_h_ + +#include +#ifdef BCMDRIVER +#include +#else +#include /* For size_t */ +#endif + +struct bcm_bloom_filter; +typedef struct bcm_bloom_filter bcm_bloom_filter_t; + +typedef void* (*bcm_bloom_alloc_t)(void *ctx, uint size); +typedef void (*bcm_bloom_free_t)(void *ctx, void *buf, uint size); +typedef uint (*bcm_bloom_hash_t)(void* ctx, uint idx, const uint8 *tag, uint len); + +/* create/allocate a bloom filter. filter size can be 0 for validate only filters */ +int bcm_bloom_create(bcm_bloom_alloc_t alloc_cb, + bcm_bloom_free_t free_cb, void *callback_ctx, uint max_hash, + uint filter_size /* bytes */, bcm_bloom_filter_t **bloom); + +/* destroy bloom filter */ +int bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb); + +/* add a hash function to filter, return an index */ +int bcm_bloom_add_hash(bcm_bloom_filter_t *filter, bcm_bloom_hash_t hash, uint *idx); + +/* remove the hash function at index from filter */ +int bcm_bloom_remove_hash(bcm_bloom_filter_t *filter, uint idx); + +/* check if given tag is member of the filter. If buf is NULL and/or buf_len is 0 + * then use the internal state. BCME_OK if member, BCME_NOTFOUND if not, + * or other error (e.g. BADARG) + */ +bool bcm_bloom_is_member(bcm_bloom_filter_t *filter, + const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len); + +/* add a member to the filter. invalid for validate_only filters */ +int bcm_bloom_add_member(bcm_bloom_filter_t *filter, const uint8 *tag, uint tag_len); + +/* no support for remove member */ + +/* get the filter data from state. BCME_BUFTOOSHORT w/ required length in buf_len + * if supplied size is insufficient + */ +int bcm_bloom_get_filter_data(bcm_bloom_filter_t *filter, + uint buf_size, uint8 *buf, uint *buf_len); + +#endif /* _bcmbloom_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmcdc.h b/bcmdhd.101.10.361.x/include/bcmcdc.h new file mode 100755 index 0000000..cc03c7a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmcdc.h @@ -0,0 +1,115 @@ +/* + * CDC network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _bcmcdc_h_ +#define _bcmcdc_h_ +#include + +typedef struct cdc_ioctl { + uint32 cmd; /* ioctl command value */ + uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */ + uint32 flags; /* flag defns given below */ + uint32 status; /* status code returned from the device */ +} cdc_ioctl_t; + +/* Max valid buffer size that can be sent to the dongle */ +#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN + +/* len field is divided into input and output buffer lengths */ +#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */ + /* excluding IOCTL header */ +#define CDCL_IOC_OUTLEN_SHIFT 0 +#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */ +#define CDCL_IOC_INLEN_SHIFT 16 + +/* CDC flag definitions */ +#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */ +#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */ +#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */ +#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */ +#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */ +#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */ +#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */ +#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */ +#define CDCF_IOC_IF_SHIFT 12 +#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */ +#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */ + +#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) +#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) + +#define CDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) +#define CDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) + +/* + * BDC header + * + * The BDC header is used on data packets to convey priority across USB. + */ + +struct bdc_header { + uint8 flags; /* Flags */ + uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */ + uint8 flags2; + uint8 dataOffset; /* Offset from end of BDC header to packet data, in + * 4-byte words. Leaves room for optional headers. + */ +}; + +#define BDC_HEADER_LEN 4 + +/* flags field bitmap */ +#define BDC_FLAG_EXEMPT 0x03 /* EXT_STA: encryption exemption (host -> dongle?) */ +#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */ +#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */ +#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */ +#define BDC_FLAG_EVENT_MSG 0x08 /* Payload contains an event msg: device->host */ +#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ +#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ + +/* priority field bitmap */ +#define BDC_PRIORITY_MASK 0x07 +#define BDC_PRIORITY_FC_MASK 0xf0 /* flow control info mask */ +#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */ + +/* flags2 field bitmap */ +#define BDC_FLAG2_IF_MASK 0x0f /* interface index (host <-> dongle) */ +#define BDC_FLAG2_IF_SHIFT 0 +#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */ + /* FLOW CONTROL info only */ + +/* version numbers */ +#define BDC_PROTO_VER_1 1 /* Old Protocol version */ +#define BDC_PROTO_VER 2 /* Protocol version */ + +/* flags2.if field access macros */ +#define BDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) +#define BDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) + +#endif /* _bcmcdc_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmdefs.h b/bcmdhd.101.10.361.x/include/bcmdefs.h new file mode 100755 index 0000000..58e1ca3 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmdefs.h @@ -0,0 +1,909 @@ +/* + * Misc system wide definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmdefs_h_ +#define _bcmdefs_h_ + +/* + * One doesn't need to include this file explicitly, gets included automatically if + * typedefs.h is included. + */ + +/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function + * arguments or local variables. + */ +#define BCM_REFERENCE(data) ((void)(data)) + +/* Allow for suppressing unused variable warnings. */ +#ifdef __GNUC__ +#define UNUSED_VAR __attribute__ ((unused)) +#else +#define UNUSED_VAR +#endif + +/* GNU GCC 4.6+ supports selectively turning off a warning. + * Define these diagnostic macros to help suppress cast-qual warning + * until all the work can be done to fix the casting issues. + */ +#if (defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) || \ + defined(__clang__)) +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF() \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wnull-dereference\"") +#define GCC_DIAGNOSTIC_POP() \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \ + __pragma(warning(push)) \ + __pragma(warning(disable:4090)) +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF() \ + __pragma(warning(push)) +#define GCC_DIAGNOSTIC_POP() \ + __pragma(warning(pop)) +#else +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF() +#define GCC_DIAGNOSTIC_POP() +#endif /* Diagnostic macros not defined */ + +/* Macros to allow Coverity modeling contructs in source code */ +#if defined(__COVERITY__) + +/* Coverity Doc: + * Indicates to the TAINTED_SCALAR checker and the INTEGER_OVERFLOW checker + * that a function taints its argument + */ +#define COV_TAINTED_DATA_ARG(arg) __coverity_tainted_data_argument__(arg) + +/* Coverity Doc: + * Indicates to the TAINTED_SCALAR checker and the INTEGER_OVERFLOW checker + * that a function is a tainted data sink for an argument. + */ +#define COV_TAINTED_DATA_SINK(arg) __coverity_tainted_data_sink__(arg) + +/* Coverity Doc: + * Models a function that cannot take a negative number as an argument. Used in + * conjunction with other models to indicate that negative arguments are invalid. + */ +#define COV_NEG_SINK(arg) __coverity_negative_sink__(arg) + +#else + +#define COV_TAINTED_DATA_ARG(arg) do { } while (0) +#define COV_TAINTED_DATA_SINK(arg) do { } while (0) +#define COV_NEG_SINK(arg) do { } while (0) + +#endif /* __COVERITY__ */ + +/* Compile-time assert can be used in place of ASSERT if the expression evaluates + * to a constant at compile time. + */ +#define STATIC_ASSERT(expr) { \ + /* Make sure the expression is constant. */ \ + typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \ + /* Make sure the expression is true. */ \ + typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \ +} + +/* Reclaiming text and data : + * The following macros specify special linker sections that can be reclaimed + * after a system is considered 'up'. + * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN, + * as in most cases, the attach function calls the detach function to clean up on error). + */ +#if defined(BCM_RECLAIM) + +extern bool bcm_reclaimed; +extern bool bcm_attach_part_reclaimed; +extern bool bcm_preattach_part_reclaimed; +extern bool bcm_postattach_part_reclaimed; + +#define RECLAIMED() (bcm_reclaimed) +#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed) +#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed) +#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed) + +/* Place _fn/_data symbols in various reclaimed output sections */ +#define BCMATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data +#define BCMATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn +#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini3." #_data))) _data +#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini3." #_fn), noinline)) _fn +#define BCMPOSTATTACHDATA(_data) __attribute__ ((__section__ (".dataini5." #_data))) _data +#define BCMPOSTATTACHFN(_fn) __attribute__ ((__section__ (".textini5." #_fn), noinline)) _fn + +/* Relocate attach symbols to save-restore region to increase pre-reclaim heap size. */ +#define BCM_SRM_ATTACH_DATA(_data) __attribute__ ((__section__ (".datasrm." #_data))) _data +#define BCM_SRM_ATTACH_FN(_fn) __attribute__ ((__section__ (".textsrm." #_fn), noinline)) _fn + +/* Explicitly place data in .rodata section so it can be write-protected after attach */ +#define BCMRODATA(_data) __attribute__ ((__section__ (".shrodata." #_data))) _data + +#ifdef BCMDBG_SR +/* + * Don't reclaim so we can compare SR ASM + */ +#define BCMPREATTACHDATASR(_data) _data +#define BCMPREATTACHFNSR(_fn) _fn +#define BCMATTACHDATASR(_data) _data +#define BCMATTACHFNSR(_fn) _fn +#else +#define BCMPREATTACHDATASR(_data) BCMPREATTACHDATA(_data) +#define BCMPREATTACHFNSR(_fn) BCMPREATTACHFN(_fn) +#define BCMATTACHDATASR(_data) BCMATTACHDATA(_data) +#define BCMATTACHFNSR(_fn) BCMATTACHFN(_fn) +#endif + +#define BCMINITDATA(_data) _data +#define BCMINITFN(_fn) _fn +#ifndef CONST +#define CONST const +#endif + +/* Non-manufacture or internal attach function/dat */ +#if !(defined(WLTEST) || defined(ATE_BUILD)) +#define BCMNMIATTACHFN(_fn) BCMATTACHFN(_fn) +#define BCMNMIATTACHDATA(_data) BCMATTACHDATA(_data) +#else +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#endif /* WLTEST || ATE_BUILD */ + +#if !defined(ATE_BUILD) && defined(BCM_CISDUMP_NO_RECLAIM) +#define BCMCISDUMPATTACHFN(_fn) _fn +#define BCMCISDUMPATTACHDATA(_data) _data +#else +#define BCMCISDUMPATTACHFN(_fn) BCMNMIATTACHFN(_fn) +#define BCMCISDUMPATTACHDATA(_data) BCMNMIATTACHDATA(_data) +#endif /* !ATE_BUILD && BCM_CISDUMP_NO_RECLAIM */ + +/* SROM with OTP support */ +#if defined(BCMOTPSROM) +#define BCMSROMATTACHFN(_fn) _fn +#define BCMSROMATTACHDATA(_data) _data +#else +#define BCMSROMATTACHFN(_fn) BCMNMIATTACHFN(_fn) +#define BCMSROMATTACHDATA(_data) BCMNMIATTACHFN(_data) +#endif /* BCMOTPSROM */ + +#if defined(BCM_CISDUMP_NO_RECLAIM) +#define BCMSROMCISDUMPATTACHFN(_fn) _fn +#define BCMSROMCISDUMPATTACHDATA(_data) _data +#else +#define BCMSROMCISDUMPATTACHFN(_fn) BCMSROMATTACHFN(_fn) +#define BCMSROMCISDUMPATTACHDATA(_data) BCMSROMATTACHDATA(_data) +#endif /* BCM_CISDUMP_NO_RECLAIM */ + +#define BCMUNINITFN(_fn) _fn + +#else /* BCM_RECLAIM */ + +#define bcm_reclaimed (1) +#define bcm_attach_part_reclaimed (1) +#define bcm_preattach_part_reclaimed (1) +#define bcm_postattach_part_reclaimed (1) +#define BCMATTACHDATA(_data) _data +#define BCMATTACHFN(_fn) _fn +#define BCM_SRM_ATTACH_DATA(_data) _data +#define BCM_SRM_ATTACH_FN(_fn) _fn +/* BCMRODATA data is written into at attach time so it cannot be in .rodata */ +#define BCMRODATA(_data) __attribute__ ((__section__ (".data." #_data))) _data +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define BCMPOSTATTACHDATA(_data) _data +#define BCMPOSTATTACHFN(_fn) _fn +#define BCMINITDATA(_data) _data +#define BCMINITFN(_fn) _fn +#define BCMUNINITFN(_fn) _fn +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#define BCMSROMATTACHFN(_fn) _fn +#define BCMSROMATTACHDATA(_data) _data +#define BCMPREATTACHFNSR(_fn) _fn +#define BCMPREATTACHDATASR(_data) _data +#define BCMATTACHFNSR(_fn) _fn +#define BCMATTACHDATASR(_data) _data +#define BCMSROMATTACHFN(_fn) _fn +#define BCMSROMATTACHDATA(_data) _data +#define BCMCISDUMPATTACHFN(_fn) _fn +#define BCMCISDUMPATTACHDATA(_data) _data +#define BCMSROMCISDUMPATTACHFN(_fn) _fn +#define BCMSROMCISDUMPATTACHDATA(_data) _data +#define CONST const + +#define RECLAIMED() (bcm_reclaimed) +#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed) +#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed) +#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed) + +#endif /* BCM_RECLAIM */ + +#define BCMUCODEDATA(_data) BCMINITDATA(_data) + +#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED) && !defined(DONGLEBUILD) +#define BCMUCODEFN(_fn) BCMINITFN(_fn) +#else +#define BCMUCODEFN(_fn) BCMATTACHFN(_fn) +#endif /* BCM_AQM_DMA_DESC */ + +/* This feature is for dongle builds only. + * In Rom build use BCMFASTPATH() to mark functions that will excluded from ROM bits if + * BCMFASTPATH_EXCLUDE_FROM_ROM flag is defined (defined by default). + * In romoffload or ram builds all functions that marked by BCMFASTPATH() will be placed + * in "text_fastpath" section and will be used by trap handler. + */ +#ifndef BCMFASTPATH +#if defined(DONGLEBUILD) +#if defined(BCMROMBUILD) +#if defined(BCMFASTPATH_EXCLUDE_FROM_ROM) + #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_ram." #_fn))) _fn +#else /* BCMFASTPATH_EXCLUDE_FROM_ROM */ + #define BCMFASTPATH(_fn) _fn +#endif /* BCMFASTPATH_EXCLUDE_FROM_ROM */ +#else /* BCMROMBUILD */ +#ifdef BCMFASTPATH_O3OPT +#ifdef ROM_ENAB_RUNTIME_CHECK + #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_fastpath." #_fn))) _fn +#else + #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_fastpath." #_fn))) \ + __attribute__ ((optimize(3))) _fn +#endif /* ROM_ENAB_RUNTIME_CHECK */ +#else + #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_fastpath." #_fn))) _fn +#endif /* BCMFASTPATH_O3OPT */ +#endif /* BCMROMBUILD */ +#else /* DONGLEBUILD */ + #define BCMFASTPATH(_fn) _fn +#endif /* DONGLEBUILD */ +#endif /* BCMFASTPATH */ + +/* Use the BCMRAMFN/BCMRAMDATA() macros to tag functions/data in source that must be included in RAM + * (excluded from ROM). This should eliminate the need to manually specify these functions/data in + * the ROM config file. It should only be used in special cases where the function must be in RAM + * for *all* ROM-based chips. + */ +#if defined(BCMROMBUILD) + #define BCMRAMFN(_fn) __attribute__ ((__section__ (".text_ram." #_fn), noinline)) _fn + #define BCMRAMDATA(_data) __attribute__ ((__section__ (".rodata_ram." #_data))) _data +#else + #define BCMRAMFN(_fn) _fn + #define BCMRAMDATA(_data) _data +#endif /* ROMBUILD */ + +/* Use BCMSPECSYM() macro to tag symbols going to a special output section in the binary. */ +#define BCMSPECSYM(_sym) __attribute__ ((__section__ (".special." #_sym))) _sym + +#define STATIC static + +/* functions that do not examine any values except their arguments, and have no effects except + * the return value should use this keyword. Note that a function that has pointer arguments + * and examines the data pointed to must not be declared as BCMCONSTFN. + */ +#ifdef __GNUC__ +#define BCMCONSTFN __attribute__ ((const)) +#else +#define BCMCONSTFN +#endif /* __GNUC__ */ + +/* Bus types */ +#define SI_BUS 0 /* SOC Interconnect */ +#define PCI_BUS 1 /* PCI target */ +#define PCMCIA_BUS 2 /* PCMCIA target */ +#define SDIO_BUS 3 /* SDIO target */ +#define JTAG_BUS 4 /* JTAG */ +#define USB_BUS 5 /* USB (does not support R/W REG) */ +#define SPI_BUS 6 /* gSPI target */ +#define RPC_BUS 7 /* RPC target */ + +/* Allows size optimization for single-bus image */ +#ifdef BCMBUSTYPE +#define BUSTYPE(bus) (BCMBUSTYPE) +#else +#define BUSTYPE(bus) (bus) +#endif + +#ifdef BCMBUSCORETYPE +#define BUSCORETYPE(ct) (BCMBUSCORETYPE) +#else +#define BUSCORETYPE(ct) (ct) +#endif + +/* Allows size optimization for single-backplane image */ +#ifdef BCMCHIPTYPE +#define CHIPTYPE(bus) (BCMCHIPTYPE) +#else +#define CHIPTYPE(bus) (bus) +#endif + +/* Allows size optimization for SPROM support */ +#if defined(BCMSPROMBUS) +#define SPROMBUS (BCMSPROMBUS) +#else +#define SPROMBUS (PCI_BUS) +#endif + +/* Allows size optimization for single-chip image */ +/* These macros are NOT meant to encourage writing chip-specific code. + * Use them only when it is appropriate for example in PMU PLL/CHIP/SWREG + * controls and in chip-specific workarounds. + */ +#ifdef BCMCHIPID +#define CHIPID(chip) (BCMCHIPID) +#else +#define CHIPID(chip) (chip) +#endif + +#ifdef BCMCHIPREV +#define CHIPREV(rev) (BCMCHIPREV) +#else +#define CHIPREV(rev) (rev) +#endif + +#ifdef BCMPCIEREV +#define PCIECOREREV(rev) (BCMPCIEREV) +#else +#define PCIECOREREV(rev) (rev) +#endif + +#ifdef BCMPMUREV +#define PMUREV(rev) (BCMPMUREV) +#else +#define PMUREV(rev) (rev) +#endif + +#ifdef BCMCCREV +#define CCREV(rev) (BCMCCREV) +#else +#define CCREV(rev) (rev) +#endif + +#ifdef BCMGCIREV +#define GCIREV(rev) (BCMGCIREV) +#else +#define GCIREV(rev) (rev) +#endif + +#ifdef BCMCR4REV +#define CR4REV(rev) (BCMCR4REV) +#define CR4REV_GE(rev, val) ((BCMCR4REV) >= (val)) +#else +#define CR4REV(rev) (rev) +#define CR4REV_GE(rev, val) ((rev) >= (val)) +#endif + +#ifdef BCMLHLREV +#define LHLREV(rev) (BCMLHLREV) +#else +#define LHLREV(rev) (rev) +#endif + +#ifdef BCMSPMISREV +#define SPMISREV(rev) (BCMSPMISREV) +#else +#define SPMISREV(rev) (rev) +#endif + +/* Defines for DMA Address Width - Shared between OSL and HNDDMA */ +#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ +#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ +#define DMADDR_MASK_26 0xFC000000 /* Address maks for 26-bits */ +#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */ + +#define DMADDRWIDTH_26 26 /* 26-bit addressing capability */ +#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */ +#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */ +#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */ +#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */ + +typedef struct { + uint32 loaddr; + uint32 hiaddr; +} dma64addr_t; + +#define PHYSADDR64HI(_pa) ((_pa).hiaddr) +#define PHYSADDR64HISET(_pa, _val) \ + do { \ + (_pa).hiaddr = (_val); \ + } while (0) +#define PHYSADDR64LO(_pa) ((_pa).loaddr) +#define PHYSADDR64LOSET(_pa, _val) \ + do { \ + (_pa).loaddr = (_val); \ + } while (0) + +#ifdef BCMDMA64OSL +typedef dma64addr_t dmaaddr_t; +#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa) +#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val) +#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa) +#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val) +#define PHYSADDRTOULONG(_pa, _ulong) \ + do { \ + _ulong = ((unsigned long long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ + } while (0) + +#else +typedef uint32 dmaaddr_t; +#define PHYSADDRHI(_pa) (0u) +#define PHYSADDRHISET(_pa, _val) +#define PHYSADDRLO(_pa) ((_pa)) +#define PHYSADDRLOSET(_pa, _val) \ + do { \ + (_pa) = (_val); \ + } while (0) +#endif /* BCMDMA64OSL */ + +#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0) + +/* One physical DMA segment */ +typedef struct { + dmaaddr_t addr; + uint32 length; +} hnddma_seg_t; + +#if defined(__linux__) +#define MAX_DMA_SEGS 8 +#else +#define MAX_DMA_SEGS 4 +#endif + +typedef struct { + void *oshdmah; /* Opaque handle for OSL to store its information */ + uint origsize; /* Size of the virtual packet */ + uint nsegs; + hnddma_seg_t segs[MAX_DMA_SEGS]; +} hnddma_seg_map_t; + +/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF). + * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL. + * There is a compile time check in wlc.c which ensure that this value is at least as big + * as TXOFF. This value is used in dma_rxfill (hnddma.c). + */ + +#ifndef BCMEXTRAHDROOM +#define BCMEXTRAHDROOM 204 +#endif + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef SDALIGN +#define SDALIGN 32 +#endif + +/* Headroom required for dongle-to-host communication. Packets allocated + * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should + * leave this much room in front for low-level message headers which may + * be needed to get across the dongle bus to the host. (These messages + * don't go over the network, so room for the full WL header above would + * be a waste.). +*/ +/* + * set the numbers to be MAX of all the devices, to avoid problems with ROM builds + * USB BCMDONGLEHDRSZ and BCMDONGLEPADSZ is 0 + * SDIO BCMDONGLEHDRSZ 12 and BCMDONGLEPADSZ 16 +*/ +#define BCMDONGLEHDRSZ 12 +#define BCMDONGLEPADSZ 16 + +#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) + +#ifdef BCMDBG + +#ifndef BCMDBG_ERR +#define BCMDBG_ERR +#endif /* BCMDBG_ERR */ + +#ifndef BCMDBG_ASSERT +#define BCMDBG_ASSERT +#endif /* BCMDBG_ASSERT */ + +#endif /* BCMDBG */ + +#if defined(NO_BCMDBG_ASSERT) + #undef BCMDBG_ASSERT + #undef BCMASSERT_LOG +#endif + +#if defined(BCMDBG_ASSERT) || defined(BCMASSERT_LOG) +#define BCMASSERT_SUPPORT +#endif /* BCMDBG_ASSERT || BCMASSERT_LOG */ + +/* Macros for doing definition and get/set of bitfields + * Usage example, e.g. a three-bit field (bits 4-6): + * #define _M BITFIELD_MASK(3) + * #define _S 4 + * ... + * regval = R_REG(osh, ®s->regfoo); + * field = GFIELD(regval, ); + * regval = SFIELD(regval, , 1); + * W_REG(osh, ®s->regfoo, regval); + */ +#define BITFIELD_MASK(width) \ + (((unsigned)1 << (width)) - 1) +#define GFIELD(val, field) \ + (((val) >> field ## _S) & field ## _M) +#define SFIELD(val, field, bits) \ + (((val) & (~(field ## _M << field ## _S))) | \ + ((unsigned)(bits) << field ## _S)) + +/* define BCMSMALL to remove misc features for memory-constrained environments */ +#ifdef BCMSMALL +#undef BCMSPACE +#define bcmspace FALSE /* if (bcmspace) code is discarded */ +#else +#define BCMSPACE +#define bcmspace TRUE /* if (bcmspace) code is retained */ +#endif + +/* ROM_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also + * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles). + */ +#if defined(BCMROMBUILD) +#ifndef ROM_ENAB_RUNTIME_CHECK + #define ROM_ENAB_RUNTIME_CHECK +#endif +#endif /* BCMROMBUILD */ + +#ifdef BCM_SH_SFLASH + extern bool _bcm_sh_sflash; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCM_SH_SFLASH_ENAB() (_bcm_sh_sflash) +#elif defined(BCM_SH_SFLASH_DISABLED) + #define BCM_SH_SFLASH_ENAB() (0) +#else + #define BCM_SH_SFLASH_ENAB() (1) +#endif +#else + #define BCM_SH_SFLASH_ENAB() (0) +#endif /* BCM_SH_SFLASH */ + +#ifdef BCM_SFLASH + extern bool _bcm_sflash; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCM_SFLASH_ENAB() (_bcm_sflash) +#elif defined(BCM_SFLASH_DISABLED) + #define BCM_SFLASH_ENAB() (0) +#else + #define BCM_SFLASH_ENAB() (1) +#endif +#else + #define BCM_SFLASH_ENAB() (0) +#endif /* BCM_SFLASH */ + +#ifdef BCM_DELAY_ON_LTR + extern bool _bcm_delay_on_ltr; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCM_DELAY_ON_LTR_ENAB() (_bcm_delay_on_ltr) +#elif defined(BCM_DELAY_ON_LTR_DISABLED) + #define BCM_DELAY_ON_LTR_ENAB() (0) +#else + #define BCM_DELAY_ON_LTR_ENAB() (1) +#endif +#else + #define BCM_DELAY_ON_LTR_ENAB() (0) +#endif /* BCM_DELAY_ON_LTR */ + +/* Max. nvram variable table size */ +#ifndef MAXSZ_NVRAM_VARS +#ifdef LARGE_NVRAM_MAXSZ +#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2) +#else +#if defined(BCMROMBUILD) || defined(DONGLEBUILD) +/* SROM12 changes */ +#define MAXSZ_NVRAM_VARS 6144 /* should be reduced */ +#else +#define LARGE_NVRAM_MAXSZ 8192 +#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2) +#endif /* BCMROMBUILD || DONGLEBUILD */ +#endif /* LARGE_NVRAM_MAXSZ */ +#endif /* !MAXSZ_NVRAM_VARS */ + +#ifdef ATE_BUILD +#ifndef ATE_NVRAM_MAXSIZE +#define ATE_NVRAM_MAXSIZE 32000 +#endif /* ATE_NVRAM_MAXSIZE */ +#endif /* ATE_BUILD */ + +#ifdef BCMLFRAG /* BCMLFRAG support enab macros */ + extern bool _bcmlfrag; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMLFRAG_ENAB() (_bcmlfrag) +#elif defined(BCMLFRAG_DISABLED) + #define BCMLFRAG_ENAB() (0) +#else + #define BCMLFRAG_ENAB() (1) +#endif +#else + #define BCMLFRAG_ENAB() (0) +#endif /* BCMLFRAG_ENAB */ + +#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */ +extern bool _pciedevenab; +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define BCMPCIEDEV_ENAB() (_pciedevenab) +#elif defined(BCMPCIEDEV_ENABLED) + #define BCMPCIEDEV_ENAB() 1 +#else + #define BCMPCIEDEV_ENAB() 0 +#endif +#else + #define BCMPCIEDEV_ENAB() 0 +#endif /* BCMPCIEDEV */ + +#ifdef BCMRESVFRAGPOOL /* BCMRESVFRAGPOOL support enab macros */ +extern bool _resvfragpool_enab; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMRESVFRAGPOOL_ENAB() (_resvfragpool_enab) +#elif defined(BCMRESVFRAGPOOL_DISABLED) + #define BCMRESVFRAGPOOL_ENAB() (0) +#else + #define BCMRESVFRAGPOOL_ENAB() (1) +#endif +#else + #define BCMRESVFRAGPOOL_ENAB() 0 +#endif /* BCMPCIEDEV */ + +#ifdef BCMSDIODEV /* BCMSDIODEV support enab macros */ +extern bool _sdiodevenab; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSDIODEV_ENAB() (_sdiodevenab) +#elif defined(BCMSDIODEV_ENABLED) + #define BCMSDIODEV_ENAB() 1 +#else + #define BCMSDIODEV_ENAB() 0 +#endif +#else + #define BCMSDIODEV_ENAB() 0 +#endif /* BCMSDIODEV */ + +#ifdef BCMSPMIS +extern bool _bcmspmi_enab; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSPMIS_ENAB() (_bcmspmi_enab) +#elif defined(BCMSPMIS_DISABLED) + #define BCMSPMIS_ENAB() 0 +#else + #define BCMSPMIS_ENAB() 1 +#endif +#else + #define BCMSPMIS_ENAB() 0 +#endif /* BCMSPMIS */ + +#ifdef BCMDVFS /* BCMDVFS support enab macros */ +extern bool _dvfsenab; +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define BCMDVFS_ENAB() (_dvfsenab) +#elif !defined(BCMDVFS_DISABLED) + #define BCMDVFS_ENAB() (1) +#else + #define BCMDVFS_ENAB() (0) +#endif +#else + #define BCMDVFS_ENAB() (0) +#endif /* BCMDVFS */ + +/* Max size for reclaimable NVRAM array */ +#ifndef ATE_BUILD +#ifdef DL_NVRAM +#define NVRAM_ARRAY_MAXSIZE DL_NVRAM +#else +#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS +#endif /* DL_NVRAM */ +#else +#define NVRAM_ARRAY_MAXSIZE ATE_NVRAM_MAXSIZE +#endif /* ATE_BUILD */ + +extern uint32 gFWID; + +#ifdef BCMFRWDPKT /* BCMFRWDPKT support enab macros */ + extern bool _bcmfrwdpkt; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMFRWDPKT_ENAB() (_bcmfrwdpkt) +#elif defined(BCMFRWDPKT_DISABLED) + #define BCMFRWDPKT_ENAB() (0) +#else + #define BCMFRWDPKT_ENAB() (1) +#endif +#else + #define BCMFRWDPKT_ENAB() (0) +#endif /* BCMFRWDPKT */ + +#ifdef BCMFRWDPOOLREORG /* BCMFRWDPOOLREORG support enab macros */ + extern bool _bcmfrwdpoolreorg; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMFRWDPOOLREORG_ENAB() (_bcmfrwdpoolreorg) +#elif defined(BCMFRWDPOOLREORG_DISABLED) + #define BCMFRWDPOOLREORG_ENAB() (0) +#else + #define BCMFRWDPOOLREORG_ENAB() (1) +#endif +#else + #define BCMFRWDPOOLREORG_ENAB() (0) +#endif /* BCMFRWDPOOLREORG */ + +#ifdef BCMPOOLRECLAIM /* BCMPOOLRECLAIM support enab macros */ + extern bool _bcmpoolreclaim; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMPOOLRECLAIM_ENAB() (_bcmpoolreclaim) +#elif defined(BCMPOOLRECLAIM_DISABLED) + #define BCMPOOLRECLAIM_ENAB() (0) +#else + #define BCMPOOLRECLAIM_ENAB() (1) +#endif +#else + #define BCMPOOLRECLAIM_ENAB() (0) +#endif /* BCMPOOLRECLAIM */ + +/* Chip related low power flags (lpflags) */ + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +#if defined(DONGLEBUILD) && ! defined(__COVERITY__) +#define MODULE_DETACH(var, detach_func)\ + do { \ + BCM_REFERENCE(detach_func); \ + OSL_SYS_HALT(); \ + } while (0); +#define MODULE_DETACH_2(var1, var2, detach_func) MODULE_DETACH(var1, detach_func) +#define MODULE_DETACH_TYPECASTED(var, detach_func) +#else +#define MODULE_DETACH(var, detach_func)\ + if (var) { \ + detach_func(var); \ + (var) = NULL; \ + } +#define MODULE_DETACH_2(var1, var2, detach_func) detach_func(var1, var2) +#define MODULE_DETACH_TYPECASTED(var, detach_func) detach_func(var) +#endif /* DONGLEBUILD */ + +/* When building ROML image use runtime conditional to cause the compiler + * to compile everything but not to complain "defined but not used" + * as #ifdef would cause at the callsites. + * In the end functions called under if (0) {} will not be linked + * into the final binary if they're not called from other places either. + */ +#if !defined(BCMROMBUILD) || defined(BCMROMSYMGEN_BUILD) +#define BCM_ATTACH_REF_DECL() +#define BCM_ATTACH_REF() (1) +#else +#define BCM_ATTACH_REF_DECL() static bool bcm_non_roml_build = 0; +#define BCM_ATTACH_REF() (bcm_non_roml_build) +#endif + +/* For ROM builds, keep it in const section so that it gets ROMmed. If abandoned, move it to + * RO section but before ro region start so that FATAL log buf doesn't use this. + */ +// Temporary - leave old definition in place until all references are removed elsewhere +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) +#define BCMRODATA_ONTRAP(_data) _data +#else +#define BCMRODATA_ONTRAP(_data) __attribute__ ((__section__ (".ro_ontrap." #_data))) _data +#endif +// Renamed for consistency with post trap function definition +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) +#define BCMPOST_TRAP_RODATA(_data) _data +#else +#define BCMPOST_TRAP_RODATA(_data) __attribute__ ((__section__ (".ro_ontrap." #_data))) _data +#endif + +/* Similar to RO data on trap, we want code that's used after a trap to be placed in a special area + * as this means we can use all of the rest of the .text for post trap dumps. Functions with + * the BCMPOSTTRAPFN macro applied will either be in ROM or this protected area. + * For RAMFNs, the ROM build only needs to nkow that they won't be in ROM, but the -roml + * builds need to know to protect them. + */ +#if defined(BCMROMBUILD) +#define BCMPOSTTRAPFN(_fn) _fn +#define BCMPOSTTRAPRAMFN(_fn) __attribute__ ((__section__ (".text_ram." #_fn))) _fn +#if defined(BCMFASTPATH_EXCLUDE_FROM_ROM) +#define BCMPOSTTRAPFASTPATH(_fn) __attribute__ ((__section__ (".text_ram." #_fn))) _fn +#else /* BCMFASTPATH_EXCLUDE_FROM_ROM */ +#define BCMPOSTTRAPFASTPATH(fn) BCMPOSTTRAPFN(fn) +#endif /* BCMFASTPATH_EXCLUDE_FROM_ROM */ +#else +#if defined(DONGLEBUILD) +#define BCMPOSTTRAPFN(_fn) __attribute__ ((__section__ (".text_posttrap." #_fn))) _fn +#else +#define BCMPOSTTRAPFN(_fn) _fn +#endif /* DONGLEBUILD */ +#define BCMPOSTTRAPRAMFN(fn) BCMPOSTTRAPFN(fn) +#define BCMPOSTTRAPFASTPATH(fn) BCMPOSTTRAPFN(fn) +#endif /* ROMBUILD */ + +typedef struct bcm_rng * bcm_rng_handle_t; + +/* Use BCM_FUNC_PTR() to tag function pointers for ASLR code implementation. It will perform + * run-time relocation of a function pointer by translating it from a physical to virtual address. + * + * BCM_FUNC_PTR() should only be used where the function name is referenced (corresponding to the + * relocation entry for that symbol). It should not be used when the function pointer is invoked. + */ +void* BCM_ASLR_CODE_FNPTR_RELOCATOR(void *func_ptr); +#if defined(BCM_ASLR_CODE_FNPTR_RELOC) + /* 'func_ptr_err_chk' performs a compile time error check to ensure that only a constant + * function name is passed as an argument to BCM_FUNC_PTR(). This ensures that the macro is + * only used for function pointer references, and not for function pointer invocations. + */ + #define BCM_FUNC_PTR(func) \ + ({ static void *func_ptr_err_chk __attribute__ ((unused)) = (func); \ + BCM_ASLR_CODE_FNPTR_RELOCATOR(func); }) +#else + #define BCM_FUNC_PTR(func) (func) +#endif /* BCM_ASLR_CODE_FNPTR_RELOC */ + +/* + * Timestamps have this tag appended following a null byte which + * helps comparison/hashing scripts find and ignore them. + */ +#define TIMESTAMP_SUFFIX "" + +#ifdef ASLR_STACK +/* MMU main thread stack data */ +#define BCM_MMU_MTH_STK_DATA(_data) __attribute__ ((__section__ (".mmu_mth_stack." #_data))) _data +#endif /* ASLR_STACK */ + +/* Special section for MMU page-tables. */ +#define BCM_MMU_PAGE_TABLE_DATA(_data) \ + __attribute__ ((__section__ (".mmu_pagetable." #_data))) _data + +/* Some phy initialization code/data can't be reclaimed in dualband mode */ +#if defined(DBAND) +#define WLBANDINITDATA(_data) _data +#define WLBANDINITFN(_fn) _fn +#else +#define WLBANDINITDATA(_data) BCMINITDATA(_data) +#define WLBANDINITFN(_fn) BCMINITFN(_fn) +#endif + +/* Tag struct members to make it explicitly clear that they are physical addresses. These are + * typically used in data structs shared by the firmware and host code (or off-line utilities). The + * use of the macro avoids customer visible API/name changes. + */ +#if defined(BCM_PHYS_ADDR_NAME_CONVERSION) + #define PHYS_ADDR_N(name) name ## _phys +#else + #define PHYS_ADDR_N(name) name +#endif + +/* + * A compact form for a list of valid register address offsets. + * Used for when dumping the contents of the register set for the user. + * + * bmp_cnt has either bitmap or count. If the MSB (bit 31) is set, then + * bmp_cnt[30:0] has count, i.e, number of valid registers whose values are + * contigous from the start address. If MSB is zero, then the value + * should be considered as a bitmap of 31 discreet addresses from the base addr. + * Note: the data type for bmp_cnt is chosen as an array of uint8 to avoid padding. + */ +typedef struct _regs_bmp_list { + uint16 addr; /* start address offset */ + uint8 bmp_cnt[4]; /* bit[31]=1, bit[30:0] is count else it is a bitmap */ +} regs_list_t; + +#endif /* _bcmdefs_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmdevs.h b/bcmdhd.101.10.361.x/include/bcmdevs.h new file mode 100755 index 0000000..87a884c --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmdevs.h @@ -0,0 +1,626 @@ +/* + * Broadcom device-specific manifest constants. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _BCMDEVS_H +#define _BCMDEVS_H + +/* PCI vendor IDs */ +#define VENDOR_EPIGRAM 0xfeda +#define VENDOR_BROADCOM 0x14e4 +#define VENDOR_3COM 0x10b7 +#define VENDOR_NETGEAR 0x1385 +#define VENDOR_DIAMOND 0x1092 +#define VENDOR_INTEL 0x8086 +#define VENDOR_DELL 0x1028 +#define VENDOR_HP 0x103c +#define VENDOR_HP_COMPAQ 0x0e11 +#define VENDOR_APPLE 0x106b +#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */ +#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */ +#define VENDOR_TI 0x104c /* Texas Instruments */ +#define VENDOR_RICOH 0x1180 /* Ricoh */ +#define VENDOR_JMICRON 0x197b + +/* precommit failed when this is removed */ +/* BLAZAR_BRANCH_101_10_DHD_001/build/dhd/linux-fc19/brix-brcm */ +/* TBD: Revisit later */ +#ifdef BCMINTERNAL +#define VENDOR_JINVANI 0x1947 /* Jinvani Systech, Inc. */ +#endif + +/* PCMCIA vendor IDs */ +#define VENDOR_BROADCOM_PCMCIA 0x02d0 + +/* SDIO vendor IDs */ +#define VENDOR_BROADCOM_SDIO 0x00BF + +/* DONGLE VID/PIDs */ +#define BCM_DNGL_VID 0x0a5c +#define BCM_DNGL_BL_PID_4328 0xbd12 +#define BCM_DNGL_BL_PID_4332 0xbd18 +#define BCM_DNGL_BL_PID_4360 0xbd1d + +#define BCM_DNGL_BDC_PID 0x0bdc +#define BCM_DNGL_JTAG_PID 0x4a44 + +/* Pseudo IDs */ +#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */ +#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */ +#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */ +#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */ +#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */ +#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */ +#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */ +#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */ +#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */ +#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */ +#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */ +#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */ +#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */ +#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */ +#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */ +#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */ +#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */ +#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */ +#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */ +#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */ +#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */ +#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */ +#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */ +#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */ +#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */ +#define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */ +#define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */ +#define BCM47XX_USBHUB_ID 0x472c /* 47xx usb hub */ +#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */ +#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */ +#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */ +#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */ +#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */ +#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */ +#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */ + +/* PCI Device IDs */ +/* DEPRECATED but used */ +#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ +/* DEPRECATED */ + +#define BCM4360_D11AC_ID 0x43a0 +#define BCM4360_D11AC2G_ID 0x43a1 +#define BCM4360_D11AC5G_ID 0x43a2 +#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */ +#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */ +#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */ +#define BCM43602_D11AC_ID 0x43ba /* ac dualband PCI devid SPROM programmed */ +#define BCM43602_D11AC2G_ID 0x43bb /* 43602 802.11ac 2.4G device */ +#define BCM43602_D11AC5G_ID 0x43bc /* 43602 802.11ac 5G device */ + +#define BCM43012_D11N_ID 0xA804 /* 43012 802.11n dualband device */ +#define BCM43012_D11N2G_ID 0xA805 /* 43012 802.11n 2.4G device */ +#define BCM43012_D11N5G_ID 0xA806 /* 43012 802.11n 5G device */ +#define BCM43014_D11N_ID 0x4495 /* 43014 802.11n dualband device */ +#define BCM43014_D11N2G_ID 0x4496 /* 43014 802.11n 2.4G device */ +#define BCM43014_D11N5G_ID 0x4497 /* 43014 802.11n 5G device */ +#define BCM43013_D11N_ID 0x4498 /* 43013 802.11n dualband device */ +#define BCM43013_D11N2G_ID 0x4499 /* 43013 802.11n 2.4G device */ +#define BCM43013_D11N5G_ID 0x449a /* 43013 802.11n 5G device */ + +/* PCI Subsystem ID */ +#define BCM4376_D11AX_ID 0x4445 /* 4376 802.11ax dualband device */ +#define BCM4376_D11AX2G_ID 0x4436 /* 4376 802.11ax 2.4G device */ +#define BCM4376_D11AX5G_ID 0x4437 /* 4376 802.11ax 5G device */ + +#define BCM4378_D11AX_ID 0x4425 /* 4378 802.11ax dualband device */ +#define BCM4378_D11AX2G_ID 0x4426 /* 4378 802.11ax 2.4G device */ +#define BCM4378_D11AX5G_ID 0x4427 /* 4378 802.11ax 5G device */ + +#define BCM4387_D11AX_ID 0x4433 /* 4387 802.11ax dualband device */ +#define BCM4388_D11AX_ID 0x4434 /* 4388 802.11ax dualband device */ +#define BCM4385_D11AX_ID 0x4442 /* 4385 802.11ax dualband device */ +#define BCM4389_D11AX_ID 0x4441 /* 4389 802.11ax dualband device */ +#define BCM4397_D11AX_ID 0x4443 /* 4397 802.11ax dualband device */ + +#define BCM4362_D11AX_ID 0x4490 /* 4362 802.11ax dualband device */ +#define BCM4362_D11AX2G_ID 0x4491 /* 4362 802.11ax 2.4G device */ +#define BCM4362_D11AX5G_ID 0x4492 /* 4362 802.11ax 5G device */ +#define BCM43751_D11AX_ID 0x449a /* 43751 802.11ac dualband device */ +#define BCM43751_D11AX2G_ID 0x449b /* 43751 802.11ac 2.4G device */ +#define BCM43751_D11AX5G_ID 0x449c /* 43751 802.11ac 5G device */ +#define BCM43752_D11AX_ID 0x449d /* 43752 802.11ax dualband device */ +#define BCM43752_D11AX2G_ID 0x449e /* 43752 802.11ax 2.4G device */ +#define BCM43752_D11AX5G_ID 0x449f /* 43752 802.11ax 5G device */ + +/* TBD change below values */ +#define BCM4369_D11AX_ID 0x4470 /* 4369 802.11ax dualband device */ +#define BCM4369_D11AX2G_ID 0x4471 /* 4369 802.11ax 2.4G device */ +#define BCM4369_D11AX5G_ID 0x4472 /* 4369 802.11ax 5G device */ + +#define BCM4375_D11AX_ID 0x4475 /* 4375 802.11ax dualband device */ +#define BCM4375_D11AX2G_ID 0x4476 /* 4375 802.11ax 2.4G device */ +#define BCM4375_D11AX5G_ID 0x4477 /* 4375 802.11ax 5G device */ + +#define BCM4377_D11AX_ID 0x4480 /* 4377 802.11ax dualband device */ +#define BCM4377_D11AX2G_ID 0x4481 /* 4377 802.11ax 2.4G device */ +#define BCM4377_D11AX5G_ID 0x4482 /* 4377 802.11ax 5G device */ + +/* 4377 802.11ax dualband device with multifunction */ +#define BCM4377_M_D11AX_ID 0x4488 + +/* Chip IDs */ + +#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */ +#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */ +#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */ +#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */ +#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */ +#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */ +#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */ +#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */ +#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */ +#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */ +#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */ +#define BCM43526_CHIP_ID 0xAA06 +#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */ +#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */ +#define BCM43562_CHIP_ID 0xAA2A /* 43562 chipcommon chipid */ +#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */ +#define BCM43013_CHIP_ID 0xA805 /* 43013 chipcommon chipid */ +#define BCM43014_CHIP_ID 0xA806 /* 43014 chipcommon chipid */ +#define BCM4369_CHIP_ID 0x4369 /* 4369 chipcommon chipid */ +#define BCM4375_CHIP_ID 0x4375 /* 4375 chipcommon chipid */ +#define BCM4376_CHIP_ID 0x4376 /* 4376 chipcommon chipid */ +#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */ +#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */ +#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */ +#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */ + +#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ +#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ +#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ +#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */ +#define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */ +#define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */ +#define BCM43752_CHIP_ID 0xAAE8 /* 43752 chipcommon chipid */ +#define BCM4369_CHIP_ID 0x4369 /* 4369 chipcommon chipid */ +#define BCM4377_CHIP_ID 0x4377 /* 4377 chipcommon chipid */ +#define BCM4378_CHIP_ID 0x4378 /* 4378 chipcommon chipid */ +#define BCM4385_CHIP_ID 0x4385 /* 4385 chipcommon chipid */ +#define BCM4387_CHIP_ID 0x4387 /* 4387 chipcommon chipid */ +#define BCM4388_CHIP_ID 0x4388 /* 4388 chipcommon chipid */ +#define BCM4389_CHIP_ID 0x4389 /* 4389 chipcommon chipid */ +#define BCM4397_CHIP_ID 0x4397 /* 4397 chipcommon chipid */ + +#define BCM4362_CHIP(chipid) (CHIPID(chipid) == BCM4362_CHIP_ID) +#define BCM4362_CHIP_GRPID BCM4362_CHIP_ID + +#define BCM4369_CHIP(chipid) ((CHIPID(chipid) == BCM4369_CHIP_ID) || \ + (CHIPID(chipid) == BCM4377_CHIP_ID) || \ + (CHIPID(chipid) == BCM4375_CHIP_ID)) +#define BCM4369_CHIP_GRPID BCM4369_CHIP_ID: \ + case BCM4377_CHIP_ID: \ + case BCM4375_CHIP_ID + +#define BCM4385_CHIP(chipid) (CHIPID(chipid) == BCM4385_CHIP_ID) +#define BCM4385_CHIP_GRPID BCM4385_CHIP_ID + +#define BCM4378_CHIP(chipid) (CHIPID(chipid) == BCM4378_CHIP_ID) +#define BCM4378_CHIP_GRPID BCM4378_CHIP_ID + +#define BCM4376_CHIP_GRPID BCM4376_CHIP_ID +#define BCM4376_CHIP(chipid) (CHIPID(chipid) == BCM4376_CHIP_ID) + +#define BCM4387_CHIP(chipid) (CHIPID(chipid) == BCM4387_CHIP_ID) +#define BCM4387_CHIP_GRPID BCM4387_CHIP_ID + +#define BCM4388_CHIP(chipid) (CHIPID(chipid) == BCM4388_CHIP_ID) +#define BCM4388_CHIP_GRPID BCM4388_CHIP_ID + +#define BCM4389_CHIP(chipid) (CHIPID(chipid) == BCM4389_CHIP_ID) +#define BCM4389_CHIP_GRPID BCM4389_CHIP_ID + +#define BCM4397_CHIP(chipid) (CHIPID(chipid) == BCM4397_CHIP_ID) +#define BCM4397_CHIP_GRPID BCM4397_CHIP_ID + +#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */ +#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */ +#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */ +#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \ + (CHIPID(chipid) == BCM43462_CHIP_ID) || \ + (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */ +#define BCM43012_CHIP(chipid) ((CHIPID(chipid) == BCM43012_CHIP_ID) || \ + (CHIPID(chipid) == BCM43013_CHIP_ID) || \ + (CHIPID(chipid) == BCM43014_CHIP_ID)) +#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \ + case BCM43462_CHIP_ID: /* fallthrough */ \ + case BCM43522_CHIP_ID + +/* Package IDs */ + +#define HDLSIM_PKG_ID 14 /* HDL simulator package id */ +#define HWSIM_PKG_ID 15 /* Hardware simulator package id */ + +#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */ + +#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */ + +/* 43012 package ID's + http://confluence.broadcom.com/display/WLAN/BCM43012+Variants%2Cpackage%2Cballmap%2Cfloorplan# + BCM43012Variants,package,ballmap,floorplan-PackageOptions +*/ +#define BCM943012_WLCSPOLY_PKG_ID 0x0 /* WLCSP Oly package */ +#define BCM943012_FCBGA_PKG_ID 0x3 /* FCBGA debug package */ +#define BCM943012_WLCSPWE_PKG_ID 0x1 /* WLCSP WE package */ +#define BCM943012_FCBGAWE_PKG_ID 0x5 /* FCBGA WE package */ +#define BCM943012_WLBGA_PKG_ID 0x2 /* WLBGA package */ + +/* boardflags */ +#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ +#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */ +#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio radio disable indication */ +#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */ +#define BFL_DIS_256QAM 0x00000008 + /* for 4360, this bit is to disable 256QAM support */ +#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */ +#define BFL_TSSIAVG 0x00000010 /* TSSI averaging for ACPHY chips */ +#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */ +#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */ +#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */ +#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */ +#define BFL_LTECOEX 0x00000200 /* LTE Coex enabled */ +#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ +#define BFL_FEM 0x00000800 /* Board supports the Front End Module */ +#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_HGPA 0x00002000 /* Board has a high gain PA */ +#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */ +#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */ +#define BFL_NOPA 0x00010000 /* Board has no PA */ +#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */ +#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */ +#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */ +#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */ +#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */ +#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */ +#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */ +#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */ +#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */ +#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */ +/* BFL_FASTPWR and BFL_UCPWRCTL_MININDX are non-overlaping features and use the same bit */ +#define BFL_FASTPWR 0x08000000 /* Fast switch/antenna powerup (no POR WAR) */ +#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */ +#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */ +#define BFL_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */ +#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field + * when this flag is set + */ +#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */ + +/* boardflags2 */ +#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */ +#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */ +#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */ +#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */ +#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */ +#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */ +#define BFL2_WLCX_ATLAS 0x00000040 /* Board flag to initialize ECI for WLCX on FL-ATLAS */ +#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */ +#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace + * BFL2_BTC3WIRE + */ +#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */ +#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */ +#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */ +#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */ +#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */ +#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */ +#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */ +#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000 /* Reducing DAC Spurs */ +#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */ +#define BFL2_REDUCED_PA_TURNONTIME 0x00010000 /* Flag to reduce PA turn on Time */ +#define BFL2_IPALVLSHIFT_3P3 0x00020000 /* Flag to Activate the PR 74115 PA Level Shift + * Workaround where the gpaio pin is connected to 3.3V + */ +#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */ +#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */ + /* Most drivers will turn it off without this flag */ + /* to save power. */ + +#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */ +#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */ +#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value + * than programmed. The exact delta is decided by + * driver per chip/boardtype. This can be used + * when tempsense qualification happens after shipment + */ +#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */ +#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */ +#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */ + /* ucode control of eLNA during Tx */ +#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */ +#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */ +#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */ +#define BFL2_LNA1BYPFORTR5G 0x80000000 /* acphy, enable lna1 bypass for clip gain, 5g */ + +/* SROM 11 - 11ac boardflag definitions */ +#define BFL_SROM11_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002 /* bluetooth and wlan share same crystal */ +#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */ +#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15 +#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */ + /* per subband, as derived from 43602A1 MCH5 */ +#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */ + +/* boardflags3 */ +#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */ +#define BFL3_RCAL_WAR 0x00000008 /* acphy, rcal war active on this board (4335a0) */ +#define BFL3_TXGAINTBLID 0x00000070 /* acphy, txgain table id */ +#define BFL3_TXGAINTBLID_SHIFT 0x4 /* acphy, txgain table id shift bit */ +#define BFL3_TSSI_DIV_WAR 0x00000080 /* acphy, Seperate paparam for 20/40/80 */ +#define BFL3_TSSI_DIV_WAR_SHIFT 0x7 /* acphy, Seperate paparam for 20/40/80 shift bit */ +#define BFL3_FEMTBL_FROM_NVRAM 0x00000100 /* acphy, femctrl table is read from nvram */ +#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8 /* acphy, femctrl table is read from nvram */ +#define BFL3_AGC_CFG_2G 0x00000200 /* acphy, gain control configuration for 2G */ +#define BFL3_AGC_CFG_5G 0x00000400 /* acphy, gain control configuration for 5G */ +#define BFL3_PPR_BIT_EXT 0x00000800 /* acphy, bit position for 1bit extension for ppr */ +#define BFL3_PPR_BIT_EXT_SHIFT 11 /* acphy, bit shift for 1bit extension for ppr */ +#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000 /* acphy, disables bbpll spur modes */ +#define BFL3_RCAL_OTP_VAL_EN 0x00002000 /* acphy, to read rcal_trim value from otp */ +#define BFL3_2GTXGAINTBL_BLANK 0x00004000 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK 0x00008000 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000 /* acphy, to max out alpha,beta to 511 */ +#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16 /* acphy, to max out alpha,beta to 511 */ +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN 0x00060000 +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17 +#define BFL3_5G_SPUR_WAR 0x00080000 /* acphy, enable spur WAR in 5G band */ + +/* acphy: lpmode2g and lpmode_5g related boardflags */ +#define BFL3_ACPHY_LPMODE_2G 0x00300000 /* bits 20:21 for lpmode_2g choice */ +#define BFL3_ACPHY_LPMODE_2G_SHIFT 20 + +#define BFL3_ACPHY_LPMODE_5G 0x00C00000 /* bits 22:23 for lpmode_5g choice */ +#define BFL3_ACPHY_LPMODE_5G_SHIFT 22 + +#define BFL3_1X1_RSDB_ANT 0x01000000 /* to find if 2-ant RSDB board or 1-ant RSDB board */ +#define BFL3_1X1_RSDB_ANT_SHIFT 24 + +#define BFL3_EXT_LPO_ISCLOCK 0x02000000 /* External LPO is clock, not x-tal */ +#define BFL3_FORCE_INT_LPO_SEL 0x04000000 /* Force internal lpo */ +#define BFL3_FORCE_EXT_LPO_SEL 0x08000000 /* Force external lpo */ + +#define BFL3_EN_BRCM_IMPBF 0x10000000 /* acphy, Allow BRCM Implicit TxBF */ + +#define BFL3_PADCAL_OTP_VAL_EN 0x20000000 /* acphy, to read pad cal values from otp */ + +#define BFL3_AVVMID_FROM_NVRAM 0x40000000 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM 0x80000000 /* Read Vlin En from NVRAM */ + +#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */ + +/* boardflags4 for SROM12/SROM13 */ + +/* To distinguigh between normal and 4dB pad board */ +#define BFL4_SROM12_4dBPAD (1u << 0) + +/* Determine power detector type for 2G */ +#define BFL4_SROM12_2G_DETTYPE (1u << 1u) + +/* Determine power detector type for 5G */ +#define BFL4_SROM12_5G_DETTYPE (1u << 2u) + +/* using pa_dettype from SROM13 flags */ +#define BFL4_SROM13_DETTYPE_EN (1u << 3u) + +/* using cck spur reduction setting */ +#define BFL4_SROM13_CCK_SPUR_EN (1u << 4u) + +/* using 1.5V cbuck board */ +#define BFL4_SROM13_1P5V_CBUCK (1u << 7u) + +/* Enable/disable bit for sw chain mask */ +#define BFL4_SROM13_EN_SW_TXRXCHAIN_MASK (1u << 8u) + +#define BFL4_BTCOEX_OVER_SECI 0x00000400u /* Enable btcoex over gci seci */ + +/* RFFE rFEM 5G and 2G present bit */ +#define BFL4_FEM_RFFE (1u << 21u) + +/* papd params */ +#define PAPD_TX_ATTN_2G 0xFF +#define PAPD_TX_ATTN_5G 0xFF00 +#define PAPD_TX_ATTN_5G_SHIFT 8 +#define PAPD_RX_ATTN_2G 0xFF +#define PAPD_RX_ATTN_5G 0xFF00 +#define PAPD_RX_ATTN_5G_SHIFT 8 +#define PAPD_CAL_IDX_2G 0xFF +#define PAPD_CAL_IDX_5G 0xFF00 +#define PAPD_CAL_IDX_5G_SHIFT 8 +#define PAPD_BBMULT_2G 0xFF +#define PAPD_BBMULT_5G 0xFF00 +#define PAPD_BBMULT_5G_SHIFT 8 +#define TIA_GAIN_MODE_2G 0xFF +#define TIA_GAIN_MODE_5G 0xFF00 +#define TIA_GAIN_MODE_5G_SHIFT 8 +#define PAPD_EPS_OFFSET_2G 0xFFFF +#define PAPD_EPS_OFFSET_5G 0xFFFF0000 +#define PAPD_EPS_OFFSET_5G_SHIFT 16 +#define PAPD_CALREF_DB_2G 0xFF +#define PAPD_CALREF_DB_5G 0xFF00 +#define PAPD_CALREF_DB_5G_SHIFT 8 + +/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ +#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */ +#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */ +#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */ +#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */ +#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */ +#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */ +#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */ +#define BOARD_GPIO_12 0x1000 /* gpio 12 */ +#define BOARD_GPIO_13 0x2000 /* gpio 13 */ +#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */ +#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */ +#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */ +#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */ +#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */ +#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */ +#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */ +#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */ +#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */ + +#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */ + +#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */ +#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */ + +/* need to be moved to a chip specific header file */ +/* power control defines */ +#define PLL_DELAY 150 /* us pll on delay */ +#define FREF_DELAY 200 /* us fref change delay */ +#define MIN_SLOW_CLK 32 /* us Slow clock period */ +#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */ + +/* Board IDs */ + +/* Reference Board Types */ +#define BU4710_BOARD 0x0400 +#define VSIM4710_BOARD 0x0401 +#define QT4710_BOARD 0x0402 + +#define BCM94710D_BOARD 0x041a +#define BCM94710R1_BOARD 0x041b +#define BCM94710R4_BOARD 0x041c +#define BCM94710AP_BOARD 0x041d + +#define BU2050_BOARD 0x041f + +/* BCM4318 boards */ +#define BU4318_BOARD 0x0447 +#define CB4318_BOARD 0x0448 +#define MPG4318_BOARD 0x0449 +#define MP4318_BOARD 0x044a +#define SD4318_BOARD 0x044b +#define BCM94318MPGH_BOARD 0x0463 + +/* 4321 boards */ +#define BU4321_BOARD 0x046b +#define BU4321E_BOARD 0x047c +#define MP4321_BOARD 0x046c +#define CB2_4321_BOARD 0x046d +#define CB2_4321_AG_BOARD 0x0066 +#define MC4321_BOARD 0x046e + +/* 4360 Boards */ +#define BCM94360X52C 0X0117 +#define BCM94360X52D 0X0137 +#define BCM94360X29C 0X0112 +#define BCM94360X29CP2 0X0134 +#define BCM94360X29CP3 0X013B +#define BCM94360X51 0x0111 +#define BCM94360X51P2 0x0129 +#define BCM94360X51P3 0x0142 +#define BCM94360X51A 0x0135 +#define BCM94360X51B 0x0136 +#define BCM94360CS 0x061B +#define BCM94360J28_D11AC2G 0x0c00 +#define BCM94360J28_D11AC5G 0x0c01 +#define BCM94360USBH5_D11AC5G 0x06aa +#define BCM94360MCM5 0x06d8 + +/* need to update si_fixup_vid_overrides() for additional platforms */ + +/* 43012 wlbga Board */ +#define BCM943012WLREF_SSID 0x07d7 + +/* 43012 fcbga Board */ +#define BCM943012FCREF_SSID 0x07d4 + +/* 43602 Boards, unclear yet what boards will be created. */ +#define BCM943602RSVD1_SSID 0x06a5 +#define BCM943602RSVD2_SSID 0x06a6 +#define BCM943602X87 0X0133 +#define BCM943602X87P2 0X0152 +#define BCM943602X87P3 0X0153 /* need to update si_fixup_vid_overrides() */ +#define BCM943602X238 0X0132 +#define BCM943602X238D 0X014A +#define BCM943602X238DP2 0X0155 /* J117 */ +#define BCM943602X238DP3 0X0156 /* J94 */ +#define BCM943602X100 0x0761 /* Dev only */ +#define BCM943602X100GS 0x0157 /* Woody */ +#define BCM943602X100P2 0x015A /* Buzz, Zurg */ + +/* 4375B0 WLCSP SEMCO Board */ +#define BCM94375B0_WLCSP_SSID 0x086b + +/* # of GPIO pins */ +#define GPIO_NUMPINS 32 + +/* chip RAM specifications */ +#define RDL_RAM_SIZE_4360 0xA0000 +#define RDL_RAM_BASE_4360 0x60000000 + +/* generic defs for nvram "muxenab" bits +* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options. +*/ +#define MUXENAB_UART 0x00000001 +#define MUXENAB_GPIO 0x00000002 +#define MUXENAB_ERCX 0x00000004 /* External Radio BT coex */ +#define MUXENAB_JTAG 0x00000008 +#define MUXENAB_HOST_WAKE 0x00000010 /* configure GPIO for SDIO host_wake */ +#define MUXENAB_I2S_EN 0x00000020 +#define MUXENAB_I2S_MASTER 0x00000040 +#define MUXENAB_I2S_FULL 0x00000080 +#define MUXENAB_SFLASH 0x00000100 +#define MUXENAB_RFSWCTRL0 0x00000200 +#define MUXENAB_RFSWCTRL1 0x00000400 +#define MUXENAB_RFSWCTRL2 0x00000800 +#define MUXENAB_SECI 0x00001000 +#define MUXENAB_BT_LEGACY 0x00002000 +#define MUXENAB_HOST_WAKE1 0x00004000 /* configure alternative GPIO for SDIO host_wake */ + +/* Boot flags */ +#define FLASH_KERNEL_NFLASH 0x00000001 +#define FLASH_BOOT_NFLASH 0x00000002 + +#endif /* _BCMDEVS_H */ diff --git a/bcmdhd.101.10.361.x/include/bcmdevs_legacy.h b/bcmdhd.101.10.361.x/include/bcmdevs_legacy.h new file mode 100755 index 0000000..6e57c42 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmdevs_legacy.h @@ -0,0 +1,188 @@ +/* + * Broadcom device-specific manifest constants used by DHD, but deprecated in firmware. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmdevs_legacy_h_ +#define _bcmdevs_legacy_h_ + +/* DONGLE VID/PIDs */ +#define BCM_DNGL_BL_PID_4322 0xbd13 +#define BCM_DNGL_BL_PID_4319 0xbd16 +#define BCM_DNGL_BL_PID_43236 0xbd17 +#define BCM_DNGL_BL_PID_43143 0xbd1e +#define BCM_DNGL_BL_PID_43242 0xbd1f +#define BCM_DNGL_BL_PID_4350 0xbd23 +#define BCM_DNGL_BL_PID_43569 0xbd27 + +/* PCI Device IDs */ +#define BCM4335_D11AC_ID 0x43ae +#define BCM4335_D11AC2G_ID 0x43af +#define BCM4335_D11AC5G_ID 0x43b0 +#define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */ +#define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */ +#define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */ +#define BCM43452_D11AC_ID 0x47ab /* 43452 802.11ac dualband device */ +#define BCM43452_D11AC2G_ID 0x47ac /* 43452 802.11ac 2.4G device */ +#define BCM43452_D11AC5G_ID 0x47ad /* 43452 802.11ac 5G device */ +#define BCM4347_D11AC_ID 0x440a /* 4347 802.11ac dualband device */ +#define BCM4347_D11AC2G_ID 0x440b /* 4347 802.11ac 2.4G device */ +#define BCM4347_D11AC5G_ID 0x440c /* 4347 802.11ac 5G device */ +#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */ +#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */ +#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */ + +#define BCM4350_D11AC_ID 0x43a3 +#define BCM4350_D11AC2G_ID 0x43a4 +#define BCM4350_D11AC5G_ID 0x43a5 +#define BCM4354_D11AC_ID 0x43df /* 4354 802.11ac dualband device */ +#define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */ +#define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */ +#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */ +#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */ +#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */ +#define BCM4356_D11AC_ID 0x43ec /* 4356 802.11ac dualband device */ +#define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */ +#define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */ +#define BCM43569_D11AC_ID 0x43d9 +#define BCM43569_D11AC2G_ID 0x43da +#define BCM43569_D11AC5G_ID 0x43db +#define BCM4358_D11AC_ID 0x43e9 /* 4358 802.11ac dualband device */ +#define BCM4358_D11AC2G_ID 0x43ea /* 4358 802.11ac 2.4G device */ +#define BCM4358_D11AC5G_ID 0x43eb /* 4358 802.11ac 5G device */ + +#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */ +#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */ +#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */ +#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */ +#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */ +#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */ +#define BCM43597_D11AC_ID 0x441c /* 43597 802.11ac dualband device */ +#define BCM43597_D11AC2G_ID 0x441d /* 43597 802.11ac 2.4G device */ +#define BCM43597_D11AC5G_ID 0x441e /* 43597 802.11ac 5G device */ +#define BCM4361_D11AC_ID 0x441f /* 4361 802.11ac dualband device */ +#define BCM4361_D11AC2G_ID 0x4420 /* 4361 802.11ac 2.4G device */ +#define BCM4361_D11AC5G_ID 0x4421 /* 4361 802.11ac 5G device */ +#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */ +#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */ +#define BCM4364_D11AC5G_ID 0x446b /* 4364 802.11ac 5G device */ +#define BCM4371_D11AC_ID 0x440d /* 4371 802.11ac dualband device */ +#define BCM4371_D11AC2G_ID 0x440e /* 4371 802.11ac 2.4G device */ +#define BCM4371_D11AC5G_ID 0x440f /* 4371 802.11ac 5G device */ + +/* Chip IDs */ +#define BCM43018_CHIP_ID 43018 /* 43018 chipcommon chipid */ +#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */ +#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */ +#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ +#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ +#define BCM43452_CHIP_ID 43452 /* 43454 chipcommon chipid */ +#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ +#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ +#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ +#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ + +#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ + CHIPID(chipid) == BCM43452_CHIP_ID || \ + CHIPID(chipid) == BCM43454_CHIP_ID || \ + CHIPID(chipid) == BCM43455_CHIP_ID || \ + CHIPID(chipid) == BCM43457_CHIP_ID || \ + CHIPID(chipid) == BCM43458_CHIP_ID) + +#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ + case BCM43454_CHIP_ID: /* fallthrough */ \ + case BCM43455_CHIP_ID: /* fallthrough */ \ + case BCM43457_CHIP_ID: /* fallthrough */ \ + case BCM43458_CHIP_ID + +#define BCM4347_CHIP_ID 0x4347 /* 4347 chipcommon chipid */ +#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \ + (CHIPID(chipid) == BCM4357_CHIP_ID) || \ + (CHIPID(chipid) == BCM4361_CHIP_ID)) +#define BCM4347_CHIP_GRPID BCM4347_CHIP_ID: \ + case BCM4357_CHIP_ID: \ + case BCM4361_CHIP_ID + +#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */ +#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */ +#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */ +#define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */ +#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */ +#define BCM4357_CHIP_ID 0x4357 /* 4357 chipcommon chipid */ +#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */ +#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ +#define BCM43596_CHIP_ID 43596 /* 43596 chipcommon chipid */ +#define BCM4361_CHIP_ID 0x4361 /* 4361 chipcommon chipid */ +#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */ +#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */ + +#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */ +#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */ +#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */ +#define BCM4355_CHIP(chipid) (CHIPID(chipid) == BCM4355_CHIP_ID) +#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \ + (CHIPID(chipid) == BCM4355_CHIP_ID) || \ + (CHIPID(chipid) == BCM4359_CHIP_ID)) +#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \ + case BCM4355_CHIP_ID: \ + case BCM4359_CHIP_ID +#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \ + (CHIPID(chipid) == BCM4354_CHIP_ID) || \ + (CHIPID(chipid) == BCM43567_CHIP_ID) || \ + (CHIPID(chipid) == BCM43569_CHIP_ID) || \ + (CHIPID(chipid) == BCM43570_CHIP_ID) || \ + (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */ + +#define BCM43598_CHIP_ID 0xaa4c /* 4371 chipcommon chipid */ +#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */ +#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ +#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ +#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */ +#define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */ +#define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */ +#define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */ +#define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */ + +/* Board Flags */ + +/* Package IDs */ + +/* Board IDs */ + +/* chip RAM specifications */ +#define RDL_RAM_BASE_4319 0x60000000 +#define RDL_RAM_BASE_4329 0x60000000 +#define RDL_RAM_SIZE_4319 0x48000 +#define RDL_RAM_SIZE_4329 0x48000 +#define RDL_RAM_SIZE_43236 0x70000 +#define RDL_RAM_BASE_43236 0x60000000 +#define RDL_RAM_SIZE_4328 0x60000 +#define RDL_RAM_BASE_4328 0x80000000 +#define RDL_RAM_SIZE_4322 0x60000 +#define RDL_RAM_BASE_4322 0x60000000 +#define RDL_RAM_SIZE_43242 0x90000 +#define RDL_RAM_BASE_43242 0x60000000 +#define RDL_RAM_SIZE_43143 0x70000 +#define RDL_RAM_BASE_43143 0x60000000 +#define RDL_RAM_SIZE_4350 0xC0000 +#define RDL_RAM_BASE_4350 0x180800 + +#endif /* _bcmdevs_legacy_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmdhcp.h b/bcmdhd.101.10.361.x/include/bcmdhcp.h new file mode 100755 index 0000000..0051ebf --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmdhcp.h @@ -0,0 +1,86 @@ +/* + * Fundamental constants relating to DHCP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmdhcp_h_ +#define _bcmdhcp_h_ + +/* DHCP params */ +#define DHCP_TYPE_OFFSET 0 /* DHCP type (request|reply) offset */ +#define DHCP_TID_OFFSET 4 /* DHCP transition id offset */ +#define DHCP_FLAGS_OFFSET 10 /* DHCP flags offset */ +#define DHCP_CIADDR_OFFSET 12 /* DHCP client IP address offset */ +#define DHCP_YIADDR_OFFSET 16 /* DHCP your IP address offset */ +#define DHCP_GIADDR_OFFSET 24 /* DHCP relay agent IP address offset */ +#define DHCP_CHADDR_OFFSET 28 /* DHCP client h/w address offset */ +#define DHCP_OPT_OFFSET 236 /* DHCP options offset */ + +#define DHCP_OPT_MSGTYPE 53 /* DHCP message type */ +#define DHCP_OPT_MSGTYPE_REQ 3 +#define DHCP_OPT_MSGTYPE_ACK 5 /* DHCP message type - ACK */ + +#define DHCP_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP_OPT_LEN_OFFSET 1 /* Option data length */ +#define DHCP_OPT_DATA_OFFSET 2 /* Option data */ + +#define DHCP_OPT_CODE_CLIENTID 61 /* Option identifier */ + +#define DHCP_TYPE_REQUEST 1 /* DHCP request (discover|request) */ +#define DHCP_TYPE_REPLY 2 /* DHCP reply (offset|ack) */ + +#define DHCP_PORT_SERVER 67 /* DHCP server UDP port */ +#define DHCP_PORT_CLIENT 68 /* DHCP client UDP port */ + +#define DHCP_FLAG_BCAST 0x8000 /* DHCP broadcast flag */ + +#define DHCP_FLAGS_LEN 2 /* DHCP flags field length */ + +#define DHCP6_TYPE_SOLICIT 1 /* DHCP6 solicit */ +#define DHCP6_TYPE_ADVERTISE 2 /* DHCP6 advertise */ +#define DHCP6_TYPE_REQUEST 3 /* DHCP6 request */ +#define DHCP6_TYPE_CONFIRM 4 /* DHCP6 confirm */ +#define DHCP6_TYPE_RENEW 5 /* DHCP6 renew */ +#define DHCP6_TYPE_REBIND 6 /* DHCP6 rebind */ +#define DHCP6_TYPE_REPLY 7 /* DHCP6 reply */ +#define DHCP6_TYPE_RELEASE 8 /* DHCP6 release */ +#define DHCP6_TYPE_DECLINE 9 /* DHCP6 decline */ +#define DHCP6_TYPE_RECONFIGURE 10 /* DHCP6 reconfigure */ +#define DHCP6_TYPE_INFOREQ 11 /* DHCP6 information request */ +#define DHCP6_TYPE_RELAYFWD 12 /* DHCP6 relay forward */ +#define DHCP6_TYPE_RELAYREPLY 13 /* DHCP6 relay reply */ + +#define DHCP6_TYPE_OFFSET 0 /* DHCP6 type offset */ + +#define DHCP6_MSG_OPT_OFFSET 4 /* Offset of options in client server messages */ +#define DHCP6_RELAY_OPT_OFFSET 34 /* Offset of options in relay messages */ + +#define DHCP6_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP6_OPT_LEN_OFFSET 2 /* Option data length */ +#define DHCP6_OPT_DATA_OFFSET 4 /* Option data */ + +#define DHCP6_OPT_CODE_CLIENTID 1 /* DHCP6 CLIENTID option */ +#define DHCP6_OPT_CODE_SERVERID 2 /* DHCP6 SERVERID option */ + +#define DHCP6_PORT_SERVER 547 /* DHCP6 server UDP port */ +#define DHCP6_PORT_CLIENT 546 /* DHCP6 client UDP port */ + +#endif /* #ifndef _bcmdhcp_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmendian.h b/bcmdhd.101.10.361.x/include/bcmendian.h new file mode 100755 index 0000000..097e5ea --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmendian.h @@ -0,0 +1,451 @@ +/* + * Byte order utilities + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * This file by default provides proper behavior on little-endian architectures. + * On big-endian architectures, IL_BIGENDIAN should be defined. + */ + +#ifndef _BCMENDIAN_H_ +#define _BCMENDIAN_H_ + +#include + +/* Reverse the bytes in a 16-bit value */ +#define BCMSWAP16(val) \ + ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ + (((uint16)(val) & (uint16)0xff00U) >> 8))) + +/* Reverse the bytes in a 32-bit value */ +#define BCMSWAP32(val) \ + ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ + (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ + (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ + (((uint32)(val) & (uint32)0xff000000U) >> 24))) + +/* Reverse the two 16-bit halves of a 32-bit value */ +#define BCMSWAP32BY16(val) \ + ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ + (((uint32)(val) & (uint32)0xffff0000U) >> 16))) + +/* Reverse the bytes in a 64-bit value */ +#define BCMSWAP64(val) \ + ((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \ + (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \ + (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \ + (((uint64)(val) & 0x00000000ff000000ULL) << 8) | \ + (((uint64)(val) & 0x000000ff00000000ULL) >> 8) | \ + (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \ + (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \ + (((uint64)(val) & 0xff00000000000000ULL) >> 56))) + +/* Reverse the two 32-bit halves of a 64-bit value */ +#define BCMSWAP64BY32(val) \ + ((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \ + (((uint64)(val) & 0xffffffff00000000ULL) >> 32))) + +/* Byte swapping macros + * Host <=> Network (Big Endian) for 16- and 32-bit values + * Host <=> Little-Endian for 16- and 32-bit values + */ +#ifndef hton16 +#ifndef IL_BIGENDIAN +#define HTON16(i) BCMSWAP16(i) +#define hton16(i) bcmswap16(i) +#define HTON32(i) BCMSWAP32(i) +#define hton32(i) bcmswap32(i) +#define NTOH16(i) BCMSWAP16(i) +#define ntoh16(i) bcmswap16(i) +#define NTOH32(i) BCMSWAP32(i) +#define ntoh32(i) bcmswap32(i) +#define LTOH16(i) (i) +#define ltoh16(i) (i) +#define LTOH32(i) (i) +#define ltoh32(i) (i) +#define HTOL16(i) (i) +#define htol16(i) (i) +#define HTOL32(i) (i) +#define htol32(i) (i) +#define HTOL64(i) (i) +#define htol64(i) (i) +#else /* IL_BIGENDIAN */ +#define HTON16(i) (i) +#define hton16(i) (i) +#define HTON32(i) (i) +#define hton32(i) (i) +#define NTOH16(i) (i) +#define ntoh16(i) (i) +#define NTOH32(i) (i) +#define ntoh32(i) (i) +#define LTOH16(i) BCMSWAP16(i) +#define ltoh16(i) bcmswap16(i) +#define LTOH32(i) BCMSWAP32(i) +#define ltoh32(i) bcmswap32(i) +#define HTOL16(i) BCMSWAP16(i) +#define htol16(i) bcmswap16(i) +#define HTOL32(i) BCMSWAP32(i) +#define htol32(i) bcmswap32(i) +#define HTOL64(i) BCMSWAP64(i) +#define htol64(i) bcmswap64(i) +#endif /* IL_BIGENDIAN */ +#endif /* hton16 */ + +#ifndef IL_BIGENDIAN +#define ltoh16_buf(buf, i) +#define htol16_buf(buf, i) +#define ltoh32_buf(buf, i) +#define htol32_buf(buf, i) +#define ltoh64_buf(buf, i) +#define htol64_buf(buf, i) +#else +#define ltoh16_buf(buf, i) bcmswap16_buf((uint16 *)(buf), (i)) +#define htol16_buf(buf, i) bcmswap16_buf((uint16 *)(buf), (i)) +#define ltoh32_buf(buf, i) bcmswap32_buf((uint16 *)(buf), (i)) +#define htol32_buf(buf, i) bcmswap32_buf((uint16 *)(buf), (i)) +#define ltoh64_buf(buf, i) bcmswap64_buf((uint16 *)(buf), (i)) +#define htol64_buf(buf, i) bcmswap64_buf((uint16 *)(buf), (i)) +#endif /* IL_BIGENDIAN */ + +/* Unaligned loads and stores in host byte order */ +#ifndef IL_BIGENDIAN +#define load32_ua(a) ltoh32_ua(a) +#define store32_ua(a, v) htol32_ua_store(v, a) +#define load16_ua(a) ltoh16_ua(a) +#define store16_ua(a, v) htol16_ua_store(v, a) +#define load64_ua(a) ltoh64_ua(a) +#define store64_ua(a, v) htol64_ua_store(v, a) +#else +#define load32_ua(a) ntoh32_ua(a) +#define store32_ua(a, v) hton32_ua_store(v, a) +#define load16_ua(a) ntoh16_ua(a) +#define store16_ua(a, v) hton16_ua_store(v, a) +#define load64_ua(a) ntoh64_ua(a) +#define store64_ua(a, v) hton64_ua_store(v, a) +#endif /* IL_BIGENDIAN */ + +#define _LTOH16_UA(cp) ((uint16)(cp)[0] | ((uint16)(cp)[1] << 8)) +#define _LTOH32_UA(cp) ((uint32)(cp)[0] | ((uint32)(cp)[1] << 8) | \ + ((uint32)(cp)[2] << 16) | ((uint32)(cp)[3] << 24)) +#define _NTOH16_UA(cp) (((uint16)(cp)[0] << 8) | (uint16)(cp)[1]) +#define _NTOH32_UA(cp) (((uint32)(cp)[0] << 24) | ((uint32)(cp)[1] << 16) | \ + ((uint32)(cp)[2] << 8) | (uint32)(cp)[3]) + +#define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \ + ((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \ + ((uint64)(cp)[4] << 32) | ((uint64)(cp)[5] << 40) | \ + ((uint64)(cp)[6] << 48) | ((uint64)(cp)[7] << 56)) + +#define _NTOH64_UA(cp) ((uint64)(cp)[7] | ((uint64)(cp)[6] << 8) | \ + ((uint64)(cp)[5] << 16) | ((uint64)(cp)[4] << 24) | \ + ((uint64)(cp)[3] << 32) | ((uint64)(cp)[2] << 40) | \ + ((uint64)(cp)[1] << 48) | ((uint64)(cp)[0] << 56)) + +#define ltoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? (uint16)_LTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? (uint32)_LTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#define ntoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? (uint16)_NTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? (uint32)_NTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#ifdef __GNUC__ + +/* GNU macro versions avoid referencing the argument multiple times, while also + * avoiding the -fno-inline used in ROM builds. + */ + +#define bcmswap16(val) ({ \ + uint16 _val = (val); \ + BCMSWAP16(_val); \ +}) + +#define bcmswap32(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32(_val); \ +}) + +#define bcmswap64(val) ({ \ + uint64 _val = (val); \ + BCMSWAP64(_val); \ +}) + +#define bcmswap32by16(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32BY16(_val); \ +}) + +#define bcmswap16_buf(buf, len) ({ \ + uint16 *_buf = (uint16 *)(buf); \ + uint _wds = (len) / 2; \ + while (_wds--) { \ + *_buf = bcmswap16(*_buf); \ + _buf++; \ + } \ +}) + +#define bcmswap32_buf(buf, len) ({ \ + uint32 *_buf = (uint32 *)(buf); \ + uint _wds = (len) / 4; \ + while (_wds--) { \ + *_buf = bcmswap32(*_buf); \ + _buf++; \ + } \ +}) + +#define bcmswap64_buf(buf, len) ({ \ + uint64 *_buf = (uint64 *)(buf); \ + uint _wds = (len) / 8; \ + while (_wds--) { \ + *_buf = bcmswap64(*_buf); \ + _buf++; \ + } \ +}) + +#define htol16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = _val >> 8; \ +}) + +#define htol32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = (_val >> 8) & 0xff; \ + _bytes[2] = (_val >> 16) & 0xff; \ + _bytes[3] = _val >> 24; \ +}) + +#define htol64_ua_store(val, bytes) ({ \ + uint64 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + int _ii; \ + for (_ii = 0; _ii < (int)sizeof(_val); ++_ii) { \ + *_bytes++ = _val & 0xff; \ + _val >>= 8; \ + } \ +}) + +#define hton16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 8; \ + _bytes[1] = _val & 0xff; \ +}) + +#define hton32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 24; \ + _bytes[1] = (_val >> 16) & 0xff; \ + _bytes[2] = (_val >> 8) & 0xff; \ + _bytes[3] = _val & 0xff; \ +}) + +#define ltoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH16_UA(_bytes); \ +}) + +#define ltoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH32_UA(_bytes); \ +}) + +#define ltoh64_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH64_UA(_bytes); \ +}) + +#define ntoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH16_UA(_bytes); \ +}) + +#define ntoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH32_UA(_bytes); \ +}) + +#define ntoh64_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH64_UA(_bytes); \ +}) + +#else /* !__GNUC__ */ + +/* Inline versions avoid referencing the argument multiple times */ +static INLINE uint16 +bcmswap16(uint16 val) +{ + return BCMSWAP16(val); +} + +static INLINE uint32 +bcmswap32(uint32 val) +{ + return BCMSWAP32(val); +} + +static INLINE uint64 +bcmswap64(uint64 val) +{ + return BCMSWAP64(val); +} + +static INLINE uint32 +bcmswap32by16(uint32 val) +{ + return BCMSWAP32BY16(val); +} + +/* Reverse pairs of bytes in a buffer (not for high-performance use) */ +/* buf - start of buffer of shorts to swap */ +/* len - byte length of buffer */ +static INLINE void +bcmswap16_buf(uint16 *buf, uint len) +{ + len = len / 2; + + while (len--) { + *buf = bcmswap16(*buf); + buf++; + } +} + +/* + * Store 16-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = val >> 8; +} + +/* + * Store 32-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = (val >> 8) & 0xff; + bytes[2] = (val >> 16) & 0xff; + bytes[3] = val >> 24; +} + +/* + * Store 64-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol64_ua_store(uint64 val, uint8 *bytes) +{ + int i; + for (i = 0; i < sizeof(val); ++i) { + *bytes++ = (uint8)(val & 0xff); + val >>= 8; + } +} + +/* + * Store 16-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val >> 8; + bytes[1] = val & 0xff; +} + +/* + * Store 32-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val >> 24; + bytes[1] = (val >> 16) & 0xff; + bytes[2] = (val >> 8) & 0xff; + bytes[3] = val & 0xff; +} + +/* + * Load 16-bit value from unaligned little-endian byte array. + */ +static INLINE uint16 +ltoh16_ua(const void *bytes) +{ + return _LTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned little-endian byte array. + */ +static INLINE uint32 +ltoh32_ua(const void *bytes) +{ + return _LTOH32_UA((const uint8 *)bytes); +} + +/* + * Load 64-bit value from unaligned little-endian byte array. + */ +static INLINE uint64 +ltoh64_ua(const void *bytes) +{ + return _LTOH64_UA((const uint8 *)bytes); +} + +/* + * Load 16-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint16 +ntoh16_ua(const void *bytes) +{ + return _NTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint32 +ntoh32_ua(const void *bytes) +{ + return _NTOH32_UA((const uint8 *)bytes); +} + +/* + * Load 64-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint64 +ntoh64_ua(const void *bytes) +{ + return _NTOH64_UA((const uint8 *)bytes); +} + +#endif /* !__GNUC__ */ +#endif /* !_BCMENDIAN_H_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmerror.h b/bcmdhd.101.10.361.x/include/bcmerror.h new file mode 100755 index 0000000..ef2a440 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmerror.h @@ -0,0 +1,573 @@ +/* + * Common header file for all error codes. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * + * + */ + +#ifndef _bcmerror_h_ +#define _bcmerror_h_ + +#include + +/* Use error codes from this file only if BCMUTILS_ERR_CODES is defined. */ +#ifdef BCMUTILS_ERR_CODES + +/* NOTE re: Module specific error codes. + * + * BCME_.. error codes are extended by various features - e.g. FTM, NAN, SAE etc. + * The current process is to allocate a range of 1024 negative 32 bit integers to + * each module that extends the error codes to indicate a module specific status. + * + * The next range to use is below. If that range is used for a new feature, please + * update the range to be used by the next feature. + * + * Next available (inclusive) range: [-8*1024 + 1, -7*1024] + * + * Common error codes use BCME_ prefix. Firmware (wl) components should use the + * convention to prefix the error code name with WL__E_ (e.g. WL_NAN_E_?). + * Non-wl components, other than common error codes use BCM__E_ + * prefix(e.g. BCM_FWSIGN_E_). + * + * End Note + */ + +typedef int bcmerror_t; + +/* + * error codes could be added but the defined ones shouldn't be changed/deleted + * these error codes are exposed to the user code + * when ever a new error code is added to this list + * please update errorstring table with the related error string and + * update osl files with os specific errorcode map +*/ + +#define BCME_OK 0 /* Success */ +#define BCME_ERROR -1 /* Error generic */ +#define BCME_BADARG -2 /* Bad Argument */ +#define BCME_BADOPTION -3 /* Bad option */ +#define BCME_NOTUP -4 /* Not up */ +#define BCME_NOTDOWN -5 /* Not down */ +#define BCME_NOTAP -6 /* Not AP */ +#define BCME_NOTSTA -7 /* Not STA */ +#define BCME_BADKEYIDX -8 /* BAD Key Index */ +#define BCME_RADIOOFF -9 /* Radio Off */ +#define BCME_NOTBANDLOCKED -10 /* Not band locked */ +#define BCME_NOCLK -11 /* No Clock */ +#define BCME_BADRATESET -12 /* BAD Rate valueset */ +#define BCME_BADBAND -13 /* BAD Band */ +#define BCME_BUFTOOSHORT -14 /* Buffer too short */ +#define BCME_BUFTOOLONG -15 /* Buffer too long */ +#define BCME_BUSY -16 /* Busy */ +#define BCME_NOTASSOCIATED -17 /* Not Associated */ +#define BCME_BADSSIDLEN -18 /* Bad SSID len */ +#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */ +#define BCME_BADCHAN -20 /* Bad Channel */ +#define BCME_BADADDR -21 /* Bad Address */ +#define BCME_NORESOURCE -22 /* Not Enough Resources */ +#define BCME_UNSUPPORTED -23 /* Unsupported */ +#define BCME_BADLEN -24 /* Bad length */ +#define BCME_NOTREADY -25 /* Not Ready */ +#define BCME_EPERM -26 /* Not Permitted */ +#define BCME_NOMEM -27 /* No Memory */ +#define BCME_ASSOCIATED -28 /* Associated */ +#define BCME_RANGE -29 /* Not In Range */ +#define BCME_NOTFOUND -30 /* Not Found */ +#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */ +#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */ +#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */ +#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */ +#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */ +#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */ +#define BCME_VERSION -37 /* Incorrect version */ +#define BCME_TXFAIL -38 /* TX failure */ +#define BCME_RXFAIL -39 /* RX failure */ +#define BCME_NODEVICE -40 /* Device not present */ +#define BCME_NMODE_DISABLED -41 /* NMODE disabled */ +#define BCME_MSCH_DUP_REG -42 /* Duplicate slot registration */ +#define BCME_SCANREJECT -43 /* reject scan request */ +#define BCME_USAGE_ERROR -44 /* WLCMD usage error */ +#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */ +#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */ +#define BCME_DISABLED -47 /* Disabled in this build */ +#define BCME_DECERR -48 /* Decrypt error */ +#define BCME_ENCERR -49 /* Encrypt error */ +#define BCME_MICERR -50 /* Integrity/MIC error */ +#define BCME_REPLAY -51 /* Replay */ +#define BCME_IE_NOTFOUND -52 /* IE not found */ +#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ +#define BCME_NOT_GC -54 /* expecting a group client */ +#define BCME_PRS_REQ_FAILED -55 /* GC presence req failed to sent */ +#define BCME_NO_P2P_SE -56 /* Could not find P2P-Subelement */ +#define BCME_NOA_PND -57 /* NoA pending, CB shuld be NULL */ +#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */ +#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */ +#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */ +#define BCME_IOV_LAST_CMD -61 /* last batched iov sub-command */ +#define BCME_MINIPMU_CAL_FAIL -62 /* MiniPMU cal failed */ +#define BCME_RCAL_FAIL -63 /* Rcal failed */ +#define BCME_LPF_RCCAL_FAIL -64 /* RCCAL failed */ +#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */ +#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */ +#define BCME_BANDLOCKED -67 /* interface is restricted to a band */ +#define BCME_BAD_IE_DATA -68 /* Recieved ie with invalid/bad data */ +#define BCME_REG_FAILED -69 /* Generic registration failed */ +#define BCME_NOCHAN -70 /* Registration with 0 chans in list */ +#define BCME_PKTTOSS -71 /* Pkt tossed */ +#define BCME_DNGL_DEVRESET -72 /* dongle re-attach during DEVRESET */ +#define BCME_ROAM -73 /* Roam related failures */ +#define BCME_NO_SIG_FILE -74 /* Signature file is missing */ + +#define BCME_LAST BCME_NO_SIG_FILE + +#define BCME_NOTENABLED BCME_DISABLED + +/* This error code is *internal* to the driver, and is not propogated to users. It should + * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL. + * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required, + * nor does it need to be part of any OSL driver-to-OS error code mapping). + */ +#define BCME_IOCTL_PATCH_UNSUPPORTED -9999 +#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED) + #error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED" +#endif + +/* These are collection of BCME Error strings */ +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "NMODE Disabled", \ + "Host Offload in device", \ + "Scan Rejected", \ + "WLCMD usage error", \ + "WLCMD ioctl error", \ + "RWL serial port error", \ + "Disabled", \ + "Decrypt error", \ + "Encrypt error", \ + "MIC error", \ + "Replay", \ + "IE not found", \ + "Data not found", \ + "NOT GC", \ + "PRS REQ FAILED", \ + "NO P2P SubElement", \ + "NOA Pending", \ + "FRAG Q FAILED", \ + "GET ActionFrame failed", \ + "scheduler not ready", \ + "Last IOV batched sub-cmd", \ + "Mini PMU Cal failed", \ + "R-cal failed", \ + "LPF RC Cal failed", \ + "DAC buf RC Cal failed", \ + "VCO Cal failed", \ + "band locked", \ + "Recieved ie with invalid data", \ + "registration failed", \ + "Registration with zero channels", \ + "pkt toss", \ + "Dongle Devreset", \ + "Critical roam in progress", \ + "Signature file is missing", \ +} + +/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */ +enum { + WL_PROXD_E_LAST = -1057, + WL_PROXD_E_ASSOC_INPROG = -1057, + WL_PROXD_E_NOAVAIL = -1056, + WL_PROXD_E_EXT_SCHED = -1055, + WL_PROXD_E_NOT_BCM = -1054, + WL_PROXD_E_FRAME_TYPE = -1053, + WL_PROXD_E_VERNOSUPPORT = -1052, + WL_PROXD_E_SEC_NOKEY = -1051, + WL_PROXD_E_SEC_POLICY = -1050, + WL_PROXD_E_SCAN_INPROCESS = -1049, + WL_PROXD_E_BAD_PARTIAL_TSF = -1048, + WL_PROXD_E_SCANFAIL = -1047, + WL_PROXD_E_NOTSF = -1046, + WL_PROXD_E_POLICY = -1045, + WL_PROXD_E_INCOMPLETE = -1044, + WL_PROXD_E_OVERRIDDEN = -1043, + WL_PROXD_E_ASAP_FAILED = -1042, + WL_PROXD_E_NOTSTARTED = -1041, + WL_PROXD_E_INVALIDMEAS = -1040, + WL_PROXD_E_INCAPABLE = -1039, + WL_PROXD_E_MISMATCH = -1038, + WL_PROXD_E_DUP_SESSION = -1037, + WL_PROXD_E_REMOTE_FAIL = -1036, + WL_PROXD_E_REMOTE_INCAPABLE = -1035, + WL_PROXD_E_SCHED_FAIL = -1034, + WL_PROXD_E_PROTO = -1033, + WL_PROXD_E_EXPIRED = -1032, + WL_PROXD_E_TIMEOUT = -1031, + WL_PROXD_E_NOACK = -1030, + WL_PROXD_E_DEFERRED = -1029, + WL_PROXD_E_INVALID_SID = -1028, + WL_PROXD_E_REMOTE_CANCEL = -1027, + WL_PROXD_E_CANCELED = -1026, /**< local */ + WL_PROXD_E_INVALID_SESSION = -1025, + WL_PROXD_E_BAD_STATE = -1024, + WL_PROXD_E_START = -1024, + WL_PROXD_E_ERROR = -1, + WL_PROXD_E_OK = 0 +}; +typedef int32 wl_proxd_status_t; + +/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */ +enum { + /* add new status here... */ + WL_NAN_E_NO_ACTION = -2136, /* status for no action */ + WL_NAN_E_INVALID_TOKEN = -2135, /* invalid token or mismatch */ + WL_NAN_E_INVALID_ATTR = -2134, /* generic invalid attr error */ + WL_NAN_E_INVALID_NDL_ATTR = -2133, /* invalid NDL attribute */ + WL_NAN_E_SCB_NORESOURCE = -2132, /* no more peer scb available */ + WL_NAN_E_PEER_NOTAVAIL = -2131, + WL_NAN_E_SCB_EXISTS = -2130, + WL_NAN_E_INVALID_PEER_NDI = -2129, + WL_NAN_E_INVALID_LOCAL_NDI = -2128, + WL_NAN_E_ALREADY_EXISTS = -2127, /* generic NAN error for duplication */ + WL_NAN_E_EXCEED_MAX_NUM_MAPS = -2126, + WL_NAN_E_INVALID_DEV_CHAN_SCHED = -2125, + WL_NAN_E_INVALID_PEER_BLOB_TYPE = -2124, + WL_NAN_E_INVALID_LCL_BLOB_TYPE = -2123, + WL_NAN_E_BCMC_PDPA = -2122, /* BCMC NAF PDPA */ + WL_NAN_E_TIMEOUT = -2121, + WL_NAN_E_HOST_CFG = -2120, + WL_NAN_E_NO_ACK = -2119, + WL_NAN_E_SECINST_FAIL = -2118, + WL_NAN_E_REJECT_NDL = -2117, /* generic NDL rejection error */ + WL_NAN_E_INVALID_NDP_ATTR = -2116, + WL_NAN_E_HOST_REJECTED = -2115, + WL_NAN_E_PCB_NORESOURCE = -2114, + WL_NAN_E_NDC_EXISTS = -2113, + WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2112, + WL_NAN_E_INVALID_NDC_ENTRY = -2111, + WL_NAN_E_SD_TX_LIST_FULL = -2110, + WL_NAN_E_SVC_SUB_LIST_FULL = -2109, + WL_NAN_E_SVC_PUB_LIST_FULL = -2108, + WL_NAN_E_SDF_MAX_LEN_EXCEEDED = -2107, + WL_NAN_E_ZERO_CRB = -2106, /* no CRB between local and peer */ + WL_NAN_E_PEER_NDC_NOT_SELECTED = -2105, /* peer ndc not selected */ + WL_NAN_E_DAM_CHAN_CONFLICT = -2104, /* dam schedule channel conflict */ + WL_NAN_E_DAM_SCHED_PERIOD = -2103, /* dam schedule period mismatch */ + WL_NAN_E_LCL_NDC_NOT_SELECTED = -2102, /* local selected ndc not configured */ + WL_NAN_E_NDL_QOS_INVALID_NA = -2101, /* na doesn't comply with ndl qos */ + WL_NAN_E_CLEAR_NAF_WITH_SA_AS_RNDI = -2100, /* rx clear naf with peer rndi */ + WL_NAN_E_SEC_CLEAR_PKT = -2099, /* rx clear pkt from a peer with sec_sa */ + WL_NAN_E_PROT_NON_PDPA_NAF = -2098, /* rx protected non PDPA frame */ + WL_NAN_E_DAM_DOUBLE_REMOVE = -2097, /* remove peer schedule already removed */ + WL_NAN_E_DAM_DOUBLE_MERGE = -2096, /* merge peer schedule already merged */ + WL_NAN_E_DAM_REJECT_INVALID = -2095, /* reject for invalid schedule */ + WL_NAN_E_DAM_REJECT_RANGE = -2094, + WL_NAN_E_DAM_REJECT_QOS = -2093, + WL_NAN_E_DAM_REJECT_NDC = -2092, + WL_NAN_E_DAM_REJECT_PEER_IMMUT = -2091, + WL_NAN_E_DAM_REJECT_LCL_IMMUT = -2090, + WL_NAN_E_DAM_EXCEED_NUM_SCHED = -2089, + WL_NAN_E_DAM_INVALID_SCHED_MAP = -2088, /* invalid schedule map list */ + WL_NAN_E_DAM_INVALID_LCL_SCHED = -2087, + WL_NAN_E_INVALID_MAP_ID = -2086, + WL_NAN_E_CHAN_OVERLAP_ACROSS_MAP = -2085, + WL_NAN_E_INVALID_CHAN_LIST = -2084, + WL_NAN_E_INVALID_RANGE_TBMP = -2083, + WL_NAN_E_INVALID_IMMUT_SCHED = -2082, + WL_NAN_E_INVALID_NDC_ATTR = -2081, + WL_NAN_E_INVALID_TIME_BITMAP = -2080, + WL_NAN_E_INVALID_NA_ATTR = -2079, + WL_NAN_E_NO_NA_ATTR_IN_AVAIL_MAP = -2078, /* no na attr saved in avail map */ + WL_NAN_E_INVALID_MAP_IDX = -2077, + WL_NAN_E_SEC_SA_NOTFOUND = -2076, + WL_NAN_E_BSSCFG_NOTFOUND = -2075, + WL_NAN_E_SCB_NOTFOUND = -2074, + WL_NAN_E_NCS_SK_KDESC_TYPE = -2073, + WL_NAN_E_NCS_SK_KEY_DESC_VER = -2072, /* key descr ver */ + WL_NAN_E_NCS_SK_KEY_TYPE = -2071, /* key descr type */ + WL_NAN_E_NCS_SK_KEYINFO_FAIL = -2070, /* key info (generic) */ + WL_NAN_E_NCS_SK_KEY_LEN = -2069, /* key len */ + WL_NAN_E_NCS_SK_KDESC_NOT_FOUND = -2068, /* key desc not found */ + WL_NAN_E_NCS_SK_INVALID_PARAMS = -2067, /* invalid args */ + WL_NAN_E_NCS_SK_KDESC_INVALID = -2066, /* key descr is not valid */ + WL_NAN_E_NCS_SK_NONCE_MISMATCH = -2065, + WL_NAN_E_NCS_SK_KDATA_SAVE_FAIL = -2064, /* not able to save key data */ + WL_NAN_E_NCS_SK_AUTH_TOKEN_CALC_FAIL = -2063, + WL_NAN_E_NCS_SK_PTK_CALC_FAIL = -2062, + WL_NAN_E_INVALID_STARTOFFSET = -2061, + WL_NAN_E_BAD_NA_ENTRY_TYPE = -2060, + WL_NAN_E_INVALID_CHANBMP = -2059, + WL_NAN_E_INVALID_OP_CLASS = -2058, + WL_NAN_E_NO_IES = -2057, + WL_NAN_E_NO_PEER_ENTRY_AVAIL = -2056, + WL_NAN_E_INVALID_PEER = -2055, + WL_NAN_E_PEER_EXISTS = -2054, + WL_NAN_E_PEER_NOTFOUND = -2053, + WL_NAN_E_NO_MEM = -2052, + WL_NAN_E_INVALID_OPTION = -2051, + WL_NAN_E_INVALID_BAND = -2050, + WL_NAN_E_INVALID_MAC = -2049, + WL_NAN_E_BAD_INSTANCE = -2048, + /* NAN status code reserved from -2048 to -3071 */ + /* Do NOT add new status below -2048 */ + WL_NAN_E_ERROR = -1, + WL_NAN_E_OK = 0 +}; + +/* SAE (Simultaneous Authentication of Equals) status codes. + * SAE status codes are reserved from -3072 to -4095 (1K) + */ +enum { + WL_SAE_E_AUTH_FAILURE = -3072, + /* Discard silently */ + WL_SAE_E_AUTH_DISCARD = -3073, + /* Authentication in progress */ + WL_SAE_E_AUTH_CONTINUE = -3074, + /* Invalid scalar/elt */ + WL_SAE_E_AUTH_COMMIT_INVALID = -3075, + /* Invalid confirm token */ + WL_SAE_E_AUTH_CONFIRM_INVALID = -3076, + /* Peer scalar validation failure */ + WL_SAE_E_CRYPTO_SCALAR_VALIDATION = -3077, + /* Peer element prime validation failure */ + WL_SAE_E_CRYPTO_ELE_PRIME_VALIDATION = -3078, + /* Peer element is not on the curve */ + WL_SAE_E_CRYPTO_ELE_NOT_ON_CURVE = -3079, + /* Generic EC error (eliptic curve related) */ + WL_SAE_E_CRYPTO_EC_ERROR = -3080, + /* Both local and peer mac addrs are same */ + WL_SAE_E_CRYPTO_EQUAL_MACADDRS = -3081, + /* Loop exceeded in deriving the scalar */ + WL_SAE_E_CRYPTO_SCALAR_ITER_EXCEEDED = -3082, + /* ECC group is unsupported */ + WL_SAE_E_CRYPTO_UNSUPPORTED_GROUP = -3083, + /* Exceeded the hunting-and-pecking counter */ + WL_SAE_E_CRYPTO_PWE_COUNTER_EXCEEDED = -3084, + /* SAE crypto component is not initialized */ + WL_SAE_E_CRYPTO_NOT_INITED = -3085, + /* bn_get has failed */ + WL_SAE_E_CRYPTO_BN_GET_ERROR = -3086, + /* bn_set has failed */ + WL_SAE_E_CRYPTO_BN_SET_ERROR = -3087, + /* PMK is not computed yet */ + WL_SAE_E_CRYPTO_PMK_UNAVAILABLE = -3088, + /* Peer confirm did not match */ + WL_SAE_E_CRYPTO_CONFIRM_MISMATCH = -3089, + /* Element K is at infinity no the curve */ + WL_SAE_E_CRYPTO_KEY_AT_INFINITY = -3090, + /* SAE Crypto private data magic number mismatch */ + WL_SAE_E_CRYPTO_PRIV_MAGIC_MISMATCH = -3091, + /* Max retry exhausted */ + WL_SAE_E_MAX_RETRY_LIMIT_REACHED = -3092, + /* peer sent password ID mismatch to local */ + WL_SAE_E_AUTH_PEER_PWDID_MISMATCH = -3093, + /* user not configured password */ + WL_SAE_E_AUTH_PASSWORD_NOT_CONFIGURED = -3094, + /* user not configured password ID */ + WL_SAE_E_AUTH_PWDID_NOT_CONFIGURED = -3095, + /* Anti-clogging token mismatch */ + WL_SAE_E_AUTH_ANTI_CLOG_MISMATCH = -3096, + /* SAE PWE method mismatch */ + WL_SAE_E_AUTH_PWE_MISMATCH = -3097 +}; + +/* + * Firmware signing error code range: -4096...-5119 + */ +enum { + /* okay */ + BCM_FWSIGN_E_OK = 0, + + /* Operation is in progress */ + BCM_FWSIGN_E_INPROGRESS = -4096, + + /* version mismatch */ + BCM_FWSIGN_E_VERSION = -4097, + + /* key not found */ + BCM_FWSIGN_E_KEY_NOT_FOUND = -4098, + + /* key found, but is not valid (revoked) */ + BCM_FWSIGN_E_KEY_NOT_VALID = -4099, + + /* Cipher suite id mismatch for the key */ + BCM_FWSIGN_E_CS_ID_MISMATCH = -4100, + + /* Signature does not match */ + BCM_FWSIGN_E_SIGNATURE = -4101, + + /* Continue */ + BCM_FWSIGN_E_CONTINUE = -4102, + + /* Heap is too small */ + BCM_FWSIGN_E_HEAP_TOO_SMALL = -4103, + + /* Allocation of bn ctx failed */ + BCM_FWSIGN_E_BN_CTX_ALLOC_FAILED = -4104, + + /* possible bug */ + BCM_FWSIGN_E_BUGCHECK = -4105, + + /* chosen key is invalid */ + BCM_FWSIGN_E_INVALID_KEY = -4106, + + /* signature is invalid */ + BCM_FWSIGN_E_INVALID_SIGNATURE = -4107, + + /* signature tlv missing */ + BCM_FWSIGN_E_NO_CSID_SIG = -4108, + + /* chosen key is invalid */ + BCM_FWSIGN_E_REVOKED_KEY = -4109, + + /* signature has no matching valid key in ROM */ + BCM_FWSIGN_E_NO_OTP_FOR_ROM_KEY = -4110, + + /* Compression not supported */ + BCM_FWSIGN_E_COMPNOTSUP = -4111, + + /* OTP read error */ + BCM_FWSIGN_E_OTP_READ = -4112, + + /* heap address overlaps with FW address space */ + BCM_FWSIGN_E_HEAP_OVR_FW = -4113, + + /* heap address overlaps with bootloader data/bss region */ + BCM_FWSIGN_E_HEAP_OVR_BSS = -4114, + + /* heap address overlaps with bootloader stack region */ + BCM_FWSIGN_E_HEAP_OVR_STACK = -4115, + + /* firmware encryption header tlv is missing */ + BCM_FWSIGN_E_NO_FWENC_HDR = -4116, + + /* firmware encryption algo not supported */ + BCM_FWSIGN_E_FWENC_ALGO_NOTSUP = -4117, + + /* firmware encryption tag tlv is missing */ + BCM_FWSIGN_E_NO_FW_TAG = -4118, + + /* firmware encryption tag tlv is not valid */ + BCM_FWSIGN_E_FW_TAG_INVALID_TLV = -4119, + + /* firmware encryption tag verification fail */ + BCM_FWSIGN_E_FW_TAG_MISMATCH = -4120, + + /* signature package is invalid */ + BCM_FWSIGN_E_PACKAGE_INVALID = -4121, + + /* last error */ + BCM_FWSIGN_E_LAST = -5119 +}; +typedef int32 bcm_fwsign_status_t; + +/* PMK manager block. Event codes from -5120 to -6143 */ +/* PSK hashing event codes */ +enum { + WL_PMK_E_PSK_HASH_FAILED = -5120, + WL_PMK_E_PSK_HASH_DONE = -5121, + WL_PMK_E_PSK_HASH_RUNNING = -5122, + WL_PMK_E_PSK_INVALID = -5123, + WL_PMK_E_PSK_NOMEM = -5124 +}; + +/* + * SOE (Security Offload Engine) status codes. + * SOE status codes are reserved from -6144 to -7167 (1K) + */ +enum { + /* Invalid operational context */ + WL_SOE_E_BAD_OP_CONTEXT = -6144, + + /* Invalid operational type */ + WL_SOE_E_BAD_OP_TYPE = -6145, + + /* Failure to get NAF3 encoded scalar */ + WL_SOE_E_BN_NAF3_GET_ERROR = -6146, + + /* Failure to get NAF3 params */ + WL_SOE_E_NAF3_PARAMS_GET_ERROR = -6147 +}; + +/* BCM crypto ASN.1 status codes. */ +/* Reserved range is from -7168 to -8291 */ +enum { + /* tag mismatch */ + BCM_CRYPTO_E_ASN1_TAG_MISMATCH = -7168, + + /* OID mismatch */ + BCM_CRYPTO_E_ASN1_OID_MISMATCH = -7169, + + /* Bad key type */ + BCM_CRYPTO_E_ASN1_BAD_KEY_TYPE = -7170, + + /* value length is invalid */ + BCM_CRYPTO_E_ASN1_INVALID_LENGTH = -7171, + + /* Invalid public key length */ + BCM_CRYPTO_E_ASN1_INVALID_PKLEN = -7172, + + /* Unsupported elliptic curve group */ + BCM_CRYPTO_E_ASN1_UNSUPPORTED_ECG = -7173 +}; + +#endif /* BCMUTILS_ERR_CODES */ + +#endif /* _bcmerror_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmeth.h b/bcmdhd.101.10.361.x/include/bcmeth.h new file mode 100755 index 0000000..f433437 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmeth.h @@ -0,0 +1,109 @@ +/* + * Broadcom Ethernettype protocol definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/* + * Broadcom Ethernet protocol defines + */ + +#ifndef _BCMETH_H_ +#define _BCMETH_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* ETHER_TYPE_BRCM is defined in ethernet.h */ + +/* + * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field + * in one of two formats: (only subtypes 32768-65535 are in use now) + * + * subtypes 0-32767: + * 8 bit subtype (0-127) + * 8 bit length in bytes (0-255) + * + * subtypes 32768-65535: + * 16 bit big-endian subtype + * 16 bit big-endian length in bytes (0-65535) + * + * length is the number of additional bytes beyond the 4 or 6 byte header + * + * Reserved values: + * 0 reserved + * 5-15 reserved for iLine protocol assignments + * 17-126 reserved, assignable + * 127 reserved + * 32768 reserved + * 32769-65534 reserved, assignable + * 65535 reserved + */ + +/* + * While adding the subtypes and their specific processing code make sure + * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition + */ + +#define BCMILCP_SUBTYPE_RATE 1 +#define BCMILCP_SUBTYPE_LINK 2 +#define BCMILCP_SUBTYPE_CSA 3 +#define BCMILCP_SUBTYPE_LARQ 4 +#define BCMILCP_SUBTYPE_VENDOR 5 +#define BCMILCP_SUBTYPE_FLH 17 + +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 +#define BCMILCP_SUBTYPE_CERT 32770 +#define BCMILCP_SUBTYPE_SES 32771 + +#define BCMILCP_BCM_SUBTYPE_RESERVED 0 +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BCMILCP_BCM_SUBTYPE_SES 2 +/* + * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded + * within BCMILCP_BCM_SUBTYPE_EVENT type messages + */ +/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */ +#define BCMILCP_BCM_SUBTYPE_DPT 4 +#define BCMILCP_BCM_SUBTYPE_DNGLEVENT 5 + +#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 +#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 +#define BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD 2 + +/* These fields are stored in network order */ +typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr +{ + uint16 subtype; /* Vendor specific..32769 */ + uint16 length; + uint8 version; /* Version is 0 */ + uint8 oui[3]; /* Broadcom OUI */ + /* user specific Data */ + uint16 usr_subtype; +} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _BCMETH_H_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmevent.h b/bcmdhd.101.10.361.x/include/bcmevent.h new file mode 100755 index 0000000..e7876c7 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmevent.h @@ -0,0 +1,1617 @@ +/* + * Broadcom Event protocol definitions + * + * Dependencies: bcmeth.h + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + */ + +/* + * Broadcom Ethernet Events protocol defines + * + */ + +#ifndef _BCMEVENT_H_ +#define _BCMEVENT_H_ + +#include +/* #include -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */ +#include +#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT) +#include +#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */ + +/* This marks the start of a packed structure section. */ +#include + +#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */ +#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */ + +/* flags */ +#define WLC_EVENT_MSG_LINK 0x01 /* link is up */ +#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */ +#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */ +#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */ +#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */ + +/* these fields are stored in network order */ + +/* version 1 */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ +} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t; + +/* the current version */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ + uint8 ifidx; /* destination OS i/f index */ + uint8 bsscfgidx; /* source bsscfg index */ +} BWL_POST_PACKED_STRUCT wl_event_msg_t; + +/* used by driver msgs */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + wl_event_msg_t event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_event_t; + +/* + * used by host event + * note: if additional event types are added, it should go with is_wlc_event_frame() as well. + */ +typedef union bcm_event_msg_u { + wl_event_msg_t event; +#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT) + bcm_dngl_event_msg_t dngl_event; +#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */ + + /* add new event here */ +} bcm_event_msg_u_t; + +#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) + +/* Event messages */ +#define WLC_E_SET_SSID 0 /* indicates status of set SSID */ +#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */ +#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */ +#define WLC_E_AUTH 3 /* 802.11 AUTH request */ +#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */ +#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */ +#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */ +#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */ +#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */ +#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */ +#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */ +#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */ +#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */ +#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */ +#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */ +#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */ +#define WLC_E_LINK 16 /* generic link indication */ +#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */ +#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */ +#define WLC_E_ROAM 19 /* roam complete: indicate status & reason */ +#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */ +#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */ +#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */ +#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */ +#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */ +#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */ +#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */ +#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */ +#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */ +#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */ +#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */ +#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */ +#define WLC_E_ROAM_PREP 32 /* before attempting to roam association */ +#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */ +#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */ +#define WLC_E_RESET_COMPLETE 35 +#define WLC_E_JOIN_START 36 +#define WLC_E_ROAM_START 37 /* roam attempt started: indicate reason */ +#define WLC_E_ASSOC_START 38 +#define WLC_E_IBSS_ASSOC 39 +#define WLC_E_RADIO 40 +#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ + +#define WLC_E_PROBREQ_MSG 44 /* probe request received */ +#define WLC_E_SCAN_CONFIRM_IND 45 +#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ +#define WLC_E_COUNTRY_CODE_CHANGED 47 +#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */ +#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */ +#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */ +#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */ +#define WLC_E_TRACE 52 +#define WLC_E_IF 54 /* I/F change (for dongle host notification) */ +#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */ +#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */ +#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */ +#define WLC_E_EXTLOG_MSG 58 +#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */ +#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */ +#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */ +#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */ +#ifdef CSI_SUPPORT +#define WLC_E_CSI 63 +#else +#define WLC_E_CHANNEL_ADOPTED 63 /* channel adopted (obsoleted) */ +#endif /* CSI_SUPPORT */ +#define WLC_E_AP_STARTED 64 /* AP started */ +#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */ +#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */ +#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */ +#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */ +#define WLC_E_ESCAN_RESULT 69 /* escan result event */ +#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */ +#define WLC_E_PROBRESP_MSG 71 /* probe response received */ +#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */ +#define WLC_E_DCS_REQUEST 73 +/* will enable this after proptxstatus code is merged back to ToT */ +#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */ +#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH + * wl_event_rx_frame_data_t header + */ +#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */ +#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */ +#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */ +#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */ +#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */ +#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */ +#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */ +/* PFN BSSID network found event, conflict/share with WLC_E_PFN_SCAN_NONE */ +#define WLC_E_PFN_BSSID_NET_FOUND 82 +#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */ +/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */ +#define WLC_E_PFN_BSSID_NET_LOST 83 +#define WLC_E_GTK_PLUMBED 84 +#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */ +#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */ +#define WLC_E_ASSOC_REQ_IE 87 +#define WLC_E_ASSOC_RESP_IE 88 +#define WLC_E_ASSOC_RECREATED 89 /* association recreated on resume */ +#define WLC_E_ACTION_FRAME_RX_NDIS 90 /* rx action frame event for NDIS only */ +#define WLC_E_AUTH_REQ 91 /* authentication request received */ +#define WLC_E_TDLS_PEER_EVENT 92 /* discovered peer, connected/disconnected peer */ +#define WLC_E_SPEEDY_RECREATE_FAIL 93 /* fast assoc recreation failed */ +#define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */ +#define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */ + +#ifdef WLAWDL +#define WLC_E_AWDL_AW 96 /* AWDL AW period starts */ +#define WLC_E_AWDL_ROLE 97 /* AWDL Master/Slave/NE master role event */ +#define WLC_E_AWDL_EVENT 98 /* Generic AWDL event */ +#endif /* WLAWDL */ + +#define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */ +#define WLC_E_NAN 100 /* NAN event - Reserved for future */ +#define WLC_E_BEACON_FRAME_RX 101 +#define WLC_E_SERVICE_FOUND 102 /* desired service found */ +#define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */ +#define WLC_E_GAS_COMPLETE 104 /* GAS sessions all complete */ +#define WLC_E_P2PO_ADD_DEVICE 105 /* New device found by p2p offload */ +#define WLC_E_P2PO_DEL_DEVICE 106 /* device has been removed by p2p offload */ +#define WLC_E_WNM_STA_SLEEP 107 /* WNM event to notify STA enter sleep mode */ +#define WLC_E_TXFAIL_THRESH 108 /* Indication of MAC tx failures (exhaustion of + * 802.11 retries) exceeding threshold(s) + */ +#define WLC_E_PROXD 109 /* Proximity Detection event */ +#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */ +#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */ +#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */ +#define WLC_E_MIMO_PWR_SAVE 115 /* Inform host MIMO PWR SAVE learning events */ +#define WLC_E_LEAKY_AP_STATS 116 /* Inform host leaky Ap stats events */ +#define WLC_E_ALLOW_CREDIT_BORROW 117 /* Allow or disallow wlfc credit borrowing in DHD */ +#define WLC_E_MSCH 120 /* Multiple channel scheduler event */ +#define WLC_E_CSA_START_IND 121 +#define WLC_E_CSA_DONE_IND 122 +#define WLC_E_CSA_FAILURE_IND 123 +#define WLC_E_CCA_CHAN_QUAL 124 /* CCA based channel quality report */ +#define WLC_E_BSSID 125 /* to report change in BSSID while roaming */ +#define WLC_E_TX_STAT_ERROR 126 /* tx error indication */ +#define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */ +#define WLC_E_PEER_TIMEOUT 128 /* silently drop a STA because of inactivity */ +#define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */ +#define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */ +#define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */ +#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */ +#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */ +#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */ +#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */ +#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */ +#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ +#define WLC_E_RMC_EVENT 139 /* RMC Event */ +#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ +#define WLC_E_RRM 141 /* RRM Event */ +#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ +#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ +#define WLC_E_ULP 146 /* ULP entered indication */ +#define WLC_E_MACDBG 147 /* Ucode debugging event */ +#define WLC_E_RESERVED 148 /* reserved */ +#define WLC_E_PRE_ASSOC_RSEP_IND 149 /* assoc resp received */ +#define WLC_E_PSK_AUTH 150 /* PSK AUTH WPA2-PSK 4 WAY Handshake failure */ +#define WLC_E_TKO 151 /* TCP keepalive offload */ +#define WLC_E_SDB_TRANSITION 152 /* SDB mode-switch event */ +#define WLC_E_NATOE_NFCT 153 /* natoe event */ +#define WLC_E_TEMP_THROTTLE 154 /* Temperature throttling control event */ +#define WLC_E_LINK_QUALITY 155 /* Link quality measurement complete */ +#define WLC_E_BSSTRANS_RESP 156 /* BSS Transition Response received */ +#define WLC_E_TWT_SETUP 157 /* TWT Setup Complete event */ +#define WLC_E_HE_TWT_SETUP 157 /* TODO:Remove after merging TWT changes to trunk */ +#define WLC_E_NAN_CRITICAL 158 /* NAN Critical Event */ +#define WLC_E_NAN_NON_CRITICAL 159 /* NAN Non-Critical Event */ +#define WLC_E_RADAR_DETECTED 160 /* Radar Detected event */ +#define WLC_E_RANGING_EVENT 161 /* Ranging event */ +#define WLC_E_INVALID_IE 162 /* Received invalid IE */ +#define WLC_E_MODE_SWITCH 163 /* Mode switch event */ +#define WLC_E_PKT_FILTER 164 /* Packet filter event */ +#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing tx/rxchain */ +#define WLC_E_FBT 166 /* FBT event */ +#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */ +#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */ +#define WLC_E_AGGR_EVENT 169 /* Aggregated event */ +#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */ +#define WLC_E_SCAN_START 172 /* Deprecated */ +#define WLC_E_SCAN 172 /* Scan event */ +#define WLC_E_MBO 173 /* MBO event */ +#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */ +#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */ +#define WLC_E_ADPS 176 /* ADPS event */ +#define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */ +#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */ +#define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */ +#define WLC_E_BCNRECV_ABORTED 181 /* Fake AP bcnrecv aborted roam event */ +#define WLC_E_PMK_INFO 182 /* PMK,PMKID information event */ +#define WLC_E_BSSTRANS 183 /* BSS Transition request / Response */ +#define WLC_E_WA_LQM 184 /* link quality monitoring */ +#define WLC_E_ACTION_FRAME_OFF_CHAN_DWELL_COMPLETE 185 /* action frame off channel + * dwell time complete + */ +#define WLC_E_WSEC 186 /* wsec keymgmt event */ +#define WLC_E_OBSS_DETECTION 187 /* OBSS HW event */ +#define WLC_E_AP_BCN_MUTE 188 /* Beacon mute mitigation event */ +#define WLC_E_SC_CHAN_QUAL 189 /* Event to indicate the SC chanel quality */ +#define WLC_E_DYNSAR 190 /* Dynamic SAR indicate optimize on/off */ +#define WLC_E_ROAM_CACHE_UPDATE 191 /* Roam cache update indication */ +#define WLC_E_AP_BCN_DRIFT 192 /* Beacon Drift event */ +#define WLC_E_PFN_SCAN_ALLGONE_EXT 193 /* last found PFN network gets lost. */ +#define WLC_E_AUTH_START 194 /* notify upper layer to start auth */ +#define WLC_E_TWT_TEARDOWN 195 /* TWT Teardown Complete Event */ +#define WLC_E_TWT_INFO_FRM 196 /* TWT Info Event Notification */ +#define WLC_E_LAST 197 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 197) +#error "WLC_E_LAST: Invalid value for last event; must be <= 197." +#endif /* WLC_E_LAST */ + +/* define an API for getting the string name of an event */ +extern const char *bcmevent_get_name(uint event_type); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern void wl_event_to_network_order(wl_event_msg_t * evt); + +/* validate if the event is proper and if valid copy event header to event */ +extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event); + +/* conversion between host and network order for events */ +void wl_event_to_host_order(wl_event_msg_t * evt); +void wl_event_to_network_order(wl_event_msg_t * evt); + +#define WLC_ROAM_EVENT_V1 0x1u + +/* tlv ids for roam event */ +#define WLC_ROAM_NO_NETWORKS_TLV_ID 1 + +/* No Networks reasons */ +#define WLC_E_REASON_NO_NETWORKS 0x0u /* value 0 means no networks found */ +#define WLC_E_REASON_NO_NETWORKS_BY_SCORE 0x01u /* bit 1 indicates filtered by score */ + +/* bit mask field indicating fail reason */ +typedef uint32 wlc_roam_fail_reason_t; + +typedef struct wlc_roam_event_header { + uint16 version; /* version */ + uint16 length; /* total length */ +} wlc_roam_event_header_t; + +typedef struct wlc_roam_event { + wlc_roam_event_header_t header; + uint8 xtlvs[]; /* data */ +} wl_roam_event_t; + +#define WLC_ROAM_PREP_EVENT_V1 0x1u +#define WLC_ROAM_START_EVENT_V1 0x1u + +typedef struct wlc_roam_start_event { + uint16 version; /* version */ + uint16 length; /* total length */ + int16 rssi; /* current bss rssi */ + int8 pad[2]; /* padding */ + uint8 xtlvs[]; /* optional xtlvs */ +} wlc_roam_start_event_t; + +typedef struct wlc_roam_prep_event { + uint16 version; /* version */ + uint16 length; /* total length */ + int16 rssi; /* target bss rssi */ + int8 pad[2]; /* padding */ + uint8 xtlvs[]; /* optional xtlvs */ +} wlc_roam_prep_event_t; + +#define WLC_ROAM_CACHE_UPDATE_EVENT_V1 0x1u + +/* WLC_E_ROAM_CACHE_UPDATE event data prototype */ +typedef struct wlc_roam_cache_update_event { + uint16 version; /* version */ + uint16 length; /* total length */ + uint8 xtlvs[]; /* optional xtlvs */ +} wlc_roam_cache_update_event_t; + +typedef enum wlc_roam_cache_update_reason { + WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE = 1, /* new roam cache */ + WLC_ROAM_CACHE_UPDATE_JOIN = 2, /* join bss */ + WLC_ROAM_CACHE_UPDATE_RSSI_DELTA = 3, /* rssi delta */ + WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA = 4, /* motion rssi delta */ + WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS = 5, /* channel missed */ + WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN = 6, /* start split scan */ + WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN = 7, /* start full scan */ + WLC_ROAM_CACHE_UPDATE_INIT_ASSOC = 8, /* init before assoc */ + WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED = 9, /* full scan failed */ + WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND = 10, /* no ap found */ + WLC_ROAM_CACHE_UPDATE_MISSING_AP = 11, /* cached ap not found */ + WLC_ROAM_CACHE_UPDATE_START_PART_SCAN = 12, /* RCC */ + WLC_ROAM_CACHE_UPDATE_RCC_MODE = 13, /* RCC */ + WLC_ROAM_CACHE_UPDATE_RCC_CHANNELS = 14, /* RCC */ + WLC_ROAM_CACHE_UPDATE_START_LP_FULL_SCAN = 15 /* start low power full scan */ +} wlc_roam_cache_update_reason_t; + +/* + * Please do not insert/delete events in the middle causing renumbering. + * It is a problem for host-device compatibility, especially with ROMmed chips. + */ + +/* Translate between internal and exported status codes */ +/* Event status codes */ +#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */ +#define WLC_E_STATUS_FAIL 1 /* operation failed */ +#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */ +#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */ +#define WLC_E_STATUS_ABORT 4 /* operation was aborted */ +#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */ +#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */ +#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */ +#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */ +#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */ +#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */ +#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */ +#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */ +#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */ +#ifdef BCMCCX +#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */ +#endif /* BCMCCX */ +#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */ +#define WLC_E_STATUS_ERROR 16 /* request failed due to error */ +#define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */ +#define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */ +#define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */ +#define WLC_E_STATUS_RXBCN_ABORT 20 /* Rx Beacon abort event for FAKEAP feature */ +#define WLC_E_STATUS_LOWPOWER_ON_LOWSPAN 21 /* LOWPOWER scan request during LOWSPAN */ +#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ + +/* 4-way handshake event type */ +#define WLC_E_PSK_AUTH_SUB_EAPOL_START 1 /* EAPOL start */ +#define WLC_E_PSK_AUTH_SUB_EAPOL_DONE 2 /* EAPOL end */ +/* GTK event type */ +#define WLC_E_PSK_AUTH_SUB_GTK_DONE 3 /* GTK end */ + +/* 4-way handshake event status code */ +#define WLC_E_STATUS_PSK_AUTH_WPA_TIMOUT 1 /* operation timed out */ +#define WLC_E_STATUS_PSK_AUTH_MIC_WPA_ERR 2 /* MIC error */ +#define WLC_E_STATUS_PSK_AUTH_IE_MISMATCH_ERR 3 /* IE Missmatch error */ +#define WLC_E_STATUS_PSK_AUTH_REPLAY_COUNT_ERR 4 +#define WLC_E_STATUS_PSK_AUTH_PEER_BLACKISTED 5 /* Blaclisted peer */ +#define WLC_E_STATUS_PSK_AUTH_GTK_REKEY_FAIL 6 /* GTK event status code */ + +/* SDB transition status code */ +#define WLC_E_STATUS_SDB_START 1 +#define WLC_E_STATUS_SDB_COMPLETE 2 +/* Slice-swap status code */ +#define WLC_E_STATUS_SLICE_SWAP_START 3 +#define WLC_E_STATUS_SLICE_SWAP_COMPLETE 4 + +/* SDB transition reason code */ +#define WLC_E_REASON_HOST_DIRECT 0 +#define WLC_E_REASON_INFRA_ASSOC 1 +#define WLC_E_REASON_INFRA_ROAM 2 +#define WLC_E_REASON_INFRA_DISASSOC 3 +#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4 + +#ifdef WLAWDL +#define WLC_E_REASON_AWDL_ENABLE 5 +#define WLC_E_REASON_AWDL_DISABLE 6 +#endif /* WLAWDL */ + +/* TX STAT ERROR REASON CODE */ +#define WLC_E_REASON_TXBACKOFF_NOT_DECREMENTED 0x1 + +/* WLC_E_SDB_TRANSITION event data */ +#define WL_MAX_BSSCFG 4 +#define WL_EVENT_SDB_TRANSITION_VER 1 +typedef struct wl_event_sdb_data { + uint8 wlunit; /* Core index */ + uint8 is_iftype; /* Interface Type(Station, SoftAP, P2P_GO, P2P_GC */ + uint16 chanspec; /* Interface Channel/Chanspec */ + char ssidbuf[(4 * 32) + 1]; /* SSID_FMT_BUF_LEN: ((4 * DOT11_MAX_SSID_LEN) + 1) */ +} wl_event_sdb_data_t; + +typedef struct wl_event_sdb_trans { + uint8 version; /* Event Data Version */ + uint8 rsdb_mode; + uint8 enable_bsscfg; + uint8 reserved; + struct wl_event_sdb_data values[WL_MAX_BSSCFG]; +} wl_event_sdb_trans_t; + +/* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */ +#define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */ + +#ifdef WLAWDL +#define WLC_E_GTKKEYROT_SKIPCHANSW_AWDL 1 /* Avoid chansw by awdl while gtk in progress */ +#endif /* WLAWDL */ + +#define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */ + +/* roam reason codes */ +#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ +#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ +#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */ +#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */ +#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */ + +/* Roam codes (5-7) used primarily by CCX */ +#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */ +#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */ +#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */ +#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */ +#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */ +#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */ +#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */ +#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */ +#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */ +#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */ +#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */ +#define WLC_E_REASON_SILENT_ROAM 16 /* roamed due to Silent roam */ +#define WLC_E_REASON_INACTIVITY 17 /* full roam scan due to inactivity */ +#define WLC_E_REASON_ROAM_SCAN_TIMEOUT 18 /* roam scan timer timeout */ +#define WLC_E_REASON_REASSOC 19 /* roamed due to reassoc iovar */ +#define WLC_E_REASON_LAST 20 /* NOTE: increment this as you add reasons above */ + +/* prune reason codes */ +#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ +#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */ +#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */ +#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */ +#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */ +#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */ +#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */ +#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */ +#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */ +#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */ +#ifdef BCMCCX +#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */ +#endif /* def BCMCCX */ +#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */ +#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */ +#ifdef BCMCCX +#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */ +#endif /* def BCMCCX */ +#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */ +#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */ +#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */ +#ifdef BCMCCX +#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */ +#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */ +#endif /* BCMCCX */ +#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */ +#define WLC_E_PRUNE_ASSOC_RETRY_DELAY 21 /* MBO assoc retry delay */ +#define WLC_E_PRUNE_RSSI_ASSOC_REJ 22 /* OCE RSSI-based assoc rejection */ +#define WLC_E_PRUNE_MAC_AVOID 23 /* AP's MAC addr is in STA's MAC avoid list */ +#define WLC_E_PRUNE_TRANSITION_DISABLE 24 /* AP's Transition Disable Policy */ + +/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ +#define WLC_E_SUP_OTHER 0 /* Other reason */ +#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */ +#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */ +#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */ +#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */ +#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */ +#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */ +#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */ +#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */ +#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */ +#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */ +#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */ +#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */ +#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */ +#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */ +#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */ +#define WLC_E_SUP_WPA_PSK_M1_TMO 16 /* WPA PSK 4-way handshake M1 timeout */ +#define WLC_E_SUP_WPA_PSK_M3_TMO 17 /* WPA PSK 4-way handshake M3 timeout */ +#define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */ +#define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */ +#define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */ +#define WLC_E_SUP_PTK_UPDATE 21 /* PTK update */ +#define WLC_E_SUP_MSG1_PMKID_MISMATCH 22 /* MSG1 PMKID not matched to PMKSA cache list */ + +/* event msg for WLC_E_SUP_PTK_UPDATE */ +typedef struct wlc_sup_ptk_update { + uint16 version; /* 0x0001 */ + uint16 length; /* length of data that follows */ + uint32 tsf_low; /* tsf at which ptk updated by internal supplicant */ + uint32 tsf_high; + uint8 key_id; /* always 0 for PTK update */ + uint8 tid; /* tid for the PN below - PTK refresh is per key */ + uint16 pn_low; + uint32 pn_high; /* local highest PN of any tid of the key when M4 was sent */ +} wlc_sup_ptk_update_t; + +/* sub event of WLC_E_WSEC */ +typedef enum { + WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR = 0x01 +} wl_wsec_event_type_t; + +/* sub event msg - WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR */ +struct wlc_wsec_ptk_pn_sync_error_v1 { + uint32 tsf_low; /* tsf at which PN sync error happened */ + uint32 tsf_high; + uint8 key_id; /* always 0 for PTK update */ + uint8 tid; /* tid for the PN below - PTK refresh is per key */ + uint16 PAD1; + uint16 rx_seqn; /* d11 seq number */ + uint16 pn_low; + uint32 pn_high; /* local PN window start for the tid */ + uint16 key_idx; /* key idx in the keymgmt */ + uint16 rx_pn_low; + uint32 rx_pn_high; /* Rx PN window start for the tid */ + uint32 span_time; /* time elapsed since replay */ + uint32 span_pkts; /* pkt count since replay */ +}; + +typedef struct wlc_wsec_ptk_pn_sync_error_v1 wlc_wsec_ptk_pn_sync_error_t; + +/* WLC_E_WSEC event msg */ +typedef struct wlc_wsec_event { + uint16 version; /* 0x0001 */ + uint16 length; /* length of data that follows */ + uint16 type; /* wsec_event_type_t */ + uint16 PAD1; + union { + wlc_wsec_ptk_pn_sync_error_t pn_sync_err; + } data; +} wlc_wsec_event_t; + +/* Ucode reason codes carried in the WLC_E_MACDBG event */ +#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */ +#define WLC_E_MACDBG_LIST_PSMX 1 /* Dump list update for PSMx registers */ +#define WLC_E_MACDBG_REGALL 2 /* Dump all registers */ + +/* Event data for events that include frames received over the air */ +/* WLC_E_PROBRESP_MSG + * WLC_E_P2P_PROBREQ_MSG + * WLC_E_ACTION_FRAME_RX + */ + +#ifdef WLAWDL +#define WLC_E_AWDL_SCAN_START 1 /* Scan start indication to host */ +#define WLC_E_AWDL_SCAN_DONE 0 /* Scan Done indication to host */ +#endif /* WLAWDL */ + +#define MAX_PHY_CORE_NUM 4u + +#define BCM_RX_FRAME_DATA_VERSION_2 2u + +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v2 { + uint16 version; + uint16 len; + uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ + uint16 pad; + int32 rssi; + uint32 mactime; + uint32 rate; + int8 per_core_rssi[MAX_PHY_CORE_NUM]; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v2_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v1 { + uint16 version; + uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ + int32 rssi; + uint32 mactime; + uint32 rate; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v1_t; + +#define BCM_RX_FRAME_DATA_VERSION_1 1u + +#ifndef WL_EVENT_RX_FRAME_DATA_ALIAS +#define BCM_RX_FRAME_DATA_VERSION BCM_RX_FRAME_DATA_VERSION_1 +typedef wl_event_rx_frame_data_v1_t wl_event_rx_frame_data_t; +#endif + +/* WLC_E_IF event data */ +typedef struct wl_event_data_if { + uint8 ifidx; /* RTE virtual device index (for dongle) */ + uint8 opcode; /* see I/F opcode */ + uint8 reserved; /* bit mask (WLC_E_IF_FLAGS_XXX ) */ + uint8 bssidx; /* bsscfg index */ + uint8 role; /* see I/F role */ +} wl_event_data_if_t; + +/* WLC_E_NATOE event data */ +typedef struct wl_event_data_natoe { + uint32 natoe_active; + uint32 sta_ip; + uint16 start_port; + uint16 end_port; +} wl_event_data_natoe_t; + +/* opcode in WLC_E_IF event */ +#define WLC_E_IF_ADD 1 /* bsscfg add */ +#define WLC_E_IF_DEL 2 /* bsscfg delete */ +#define WLC_E_IF_CHANGE 3 /* bsscfg role change */ + +/* I/F role code in WLC_E_IF event */ +#define WLC_E_IF_ROLE_STA 0 /* Infra STA */ +#define WLC_E_IF_ROLE_AP 1 /* Access Point */ +#define WLC_E_IF_ROLE_WDS 2 /* WDS link */ +#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ +#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ + +#ifdef WLAWDL +#define WLC_E_IF_ROLE_AWDL 7 /* AWDL */ +#endif /* WLAWDL */ + +#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ +#define WLC_E_IF_ROLE_NAN 9 /* NAN */ + +#define WLC_E_IF_ROLE_MESH 10u /* identifies the role as MESH */ + +/* WLC_E_RSSI event data */ +typedef struct wl_event_data_rssi { + int32 rssi; + int32 snr; + int32 noise; +} wl_event_data_rssi_t; + +#define WL_EVENT_WA_LQM_VER 0 /* initial version */ + +#define WL_EVENT_WA_LQM_BASIC 0 /* event sub-types */ +typedef struct { /* payload of subevent in xtlv */ + int32 rssi; + int32 snr; + uint32 tx_rate; + uint32 rx_rate; +} wl_event_wa_lqm_basic_t; + +typedef struct wl_event_wa_lqm { + uint16 ver; /* version */ + uint16 len; /* total length structure */ + uint8 subevent[]; /* sub-event data in bcm_xtlv_t format */ +} wl_event_wa_lqm_t; + +/* WLC_E_IF flag */ +#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */ + +/* Reason codes for LINK */ +/* Reason codes for LINK */ +#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ +#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ +#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ +#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ +#define WLC_E_LINK_ASSOC_FAIL 5 /* Link down due to assoc to new AP during roam */ +#define WLC_E_LINK_REASSOC_ROAM_FAIL 6 /* Link down due to reassoc roaming failed */ +#define WLC_E_LINK_LOWRSSI_ROAM_FAIL 7 /* Link down due to Low rssi roaming failed */ +#define WLC_E_LINK_NO_FIRST_BCN_RX 8 /* Link down due to 1st beacon rx failure */ + +/* WLC_E_NDIS_LINK event data */ +typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { + struct ether_addr peer_mac; /* 6 bytes */ + uint16 chanspec; /* 2 bytes */ + uint32 link_speed; /* current datarate in units of 500 Kbit/s */ + uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */ + int32 rssi; /* average rssi */ +} BWL_POST_PACKED_STRUCT ndis_link_parms_t; + +/* reason codes for WLC_E_OVERLAY_REQ event */ +#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */ +#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */ + +/* reason codes for WLC_E_TDLS_PEER_EVENT event */ +#define WLC_E_TDLS_PEER_DISCOVERED 0 /* peer is ready to establish TDLS */ +#define WLC_E_TDLS_PEER_CONNECTED 1 +#define WLC_E_TDLS_PEER_DISCONNECTED 2 + +/* reason codes for WLC_E_RMC_EVENT event */ +#define WLC_E_REASON_RMC_NONE 0 +#define WLC_E_REASON_RMC_AR_LOST 1 +#define WLC_E_REASON_RMC_AR_NO_ACK 2 + +#ifdef WLTDLS +/* TDLS Action Category code */ +#define TDLS_AF_CATEGORY 12 +/* Wi-Fi Display (WFD) Vendor Specific Category */ +/* used for WFD Tunneled Probe Request and Response */ +#define TDLS_VENDOR_SPECIFIC 127 +/* TDLS Action Field Values */ +#define TDLS_ACTION_SETUP_REQ 0 +#define TDLS_ACTION_SETUP_RESP 1 +#define TDLS_ACTION_SETUP_CONFIRM 2 +#define TDLS_ACTION_TEARDOWN 3 +#define WLAN_TDLS_SET_PROBE_WFD_IE 11 +#define WLAN_TDLS_SET_SETUP_WFD_IE 12 +#define WLAN_TDLS_SET_WFD_ENABLED 13 +#define WLAN_TDLS_SET_WFD_DISABLED 14 +#endif + +/* WLC_E_RANGING_EVENT subtypes */ +#define WLC_E_RANGING_RESULTS 0 + +#define PHY_CAL_EVT_VERSION 1 +typedef struct wlc_phy_cal_info { + uint16 version; /* structure version */ + uint16 length; /* length of the rest of the structure */ + uint16 chanspec; + uint8 start; + uint8 phase; + int16 temp; + uint8 reason; + uint8 slice; +} wlc_phy_cal_info_t; + +#ifdef WLAWDL +/* WLC_E_AWDL_EVENT subtypes */ +#define WLC_E_AWDL_SCAN_STATUS 0 +#define WLC_E_AWDL_RX_ACT_FRAME 1 +#define WLC_E_AWDL_RX_PRB_RESP 2 +#define WLC_E_AWDL_PHYCAL_STATUS 3 +#define WLC_E_AWDL_WOWL_NULLPKT 4 +#define WLC_E_AWDL_OOB_AF_STATUS 5 +/* WLC_E_AWDL_RANGING_RESULTS will be removed and only WLC_E_AWDL_UNUSED will be here + * Keeping both of them to avoid compilation errot on trunk + * It will be removed after wlc_ranging merge from IGUANA + */ +#define WLC_E_AWDL_RANGING_RESULTS 6 +#define WLC_E_AWDL_UNUSED 6 +#define WLC_E_AWDL_SUB_PEER_STATE 7 +#define WLC_E_AWDL_SUB_INTERFACE_STATE 8 +#define WLC_E_AWDL_UCAST_AF_TXSTATUS 9 +#define WLC_E_AWDL_NAN_CLUSTER_MERGE 10 +#define WLC_E_AWDL_NAN_RX_BEACON 11 +#define WLC_E_AWDL_SD_DISCOVERY_RESULT 12 +#define WLC_E_AWDL_SD_REPLIED 13 +#define WLC_E_AWDL_SD_TERMINATED 14 +#define WLC_E_AWDL_SD_RECEIVE 15 +#define WLC_E_AWDL_SD_VNDR_IE 16 +#define WLC_E_AWDL_SD_DEVICE_STATE_IE 17 +#define WLC_E_AWDL_DFSP_NOTIF 18 +#define WLC_E_AWDL_DFSP_SUSPECT 19 +#define WLC_E_AWDL_DFSP_RESUME 20 + +/* WLC_E_AWDL_SCAN_STATUS status values */ +#define WLC_E_AWDL_SCAN_START 1 /* Scan start indication to host */ +#define WLC_E_AWDL_SCAN_DONE 0 /* Scan Done indication to host */ +#define WLC_E_AWDL_PHYCAL_START 1 /* Phy calibration start indication to host */ +#define WLC_E_AWDL_PHYCAL_DONE 0 /* Phy calibration done indication to host */ + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 subscribe_id; /* local subscribe instance id */ + uint8 publish_id; /* publiser's insance id */ + struct ether_addr addr; /* publsher's address */ + uint8 service_info_len; /* length of the service specific information in data[] */ + uint8 data[1]; /* service specific info */ +} BWL_PRE_PACKED_STRUCT awdl_sd_discovery_result_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 instance_id; + struct ether_addr addr; /* publsher's address */ +} BWL_PRE_PACKED_STRUCT awdl_sd_replied_event_t; + +#define AWDL_SD_TERM_REASON_TIMEOUT 1 +#define AWDL_SD_TERM_REASON_USERREQ 2 +#define AWDL_SD_TERM_REASON_FAIL 3 +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 instance_id; /* publish instance id */ + uint8 reason; /* 1=timeout, 2=user request, 3=failure */ +} BWL_PRE_PACKED_STRUCT awdl_sd_term_event_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 instance_id; /* local publish/subscribe instance id */ + uint8 sender_instance_id; + struct ether_addr addr; /* sender's address */ + uint8 service_info_len; /* length of the service specific information in data[] */ + uint8 data[1]; /* service specific info */ +} BWL_PRE_PACKED_STRUCT awdl_sd_receive_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_addr addr; /* sender's address */ + uint16 len; /* length of data[] */ + uint8 data[1]; /* vndr specific info */ +} BWL_PRE_PACKED_STRUCT awdl_sd_vndr_ie_event_t; + +#endif /* WLAWDL */ + +/* GAS event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas { + uint16 channel; /* channel of GAS protocol */ + uint8 dialog_token; /* GAS dialog token */ + uint8 fragment_id; /* fragment id */ + uint16 status_code; /* status code on GAS completion */ + uint16 data_len; /* length of data to follow */ + uint8 data[1]; /* variable length specified by data_len */ +} BWL_POST_PACKED_STRUCT wl_event_gas_t; + +/* service discovery TLV */ +typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv { + uint16 length; /* length of response_data */ + uint8 protocol; /* service protocol type */ + uint8 transaction_id; /* service transaction id */ + uint8 status_code; /* status code */ + uint8 data[1]; /* response data */ +} BWL_POST_PACKED_STRUCT wl_sd_tlv_t; + +/* service discovery event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { + uint16 channel; /* channel */ + uint8 count; /* number of tlvs */ + wl_sd_tlv_t tlv[1]; /* service discovery TLV */ +} BWL_POST_PACKED_STRUCT wl_event_sd_t; + +/* WLC_E_PKT_FILTER event sub-classification codes */ +#define WLC_E_PKT_FILTER_TIMEOUT 1 /* Matching packet not received in last timeout seconds */ + +/* Note: proxd has a new API (ver 3.0) deprecates the following */ + +/* Reason codes for WLC_E_PROXD */ +#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */ +#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */ +#define WLC_E_PROXD_START 3 /* used by: target */ +#define WLC_E_PROXD_STOP 4 /* used by: target */ +#define WLC_E_PROXD_COMPLETED 5 /* used by: initiator completed */ +#define WLC_E_PROXD_ERROR 6 /* used by both initiator and target */ +#define WLC_E_PROXD_COLLECT_START 7 /* used by: target & initiator */ +#define WLC_E_PROXD_COLLECT_STOP 8 /* used by: target */ +#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */ +#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */ +#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */ +#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */ + +/* proxd_event data */ +typedef struct ftm_sample { + uint32 value; /* RTT in ns */ + int8 rssi; /* RSSI */ +} ftm_sample_t; + +typedef struct ts_sample { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} ts_sample_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint8 OFDM_frame_type; /* legacy or VHT */ + uint8 bandwidth; /* Bandwidth is 20, 40,80, MHZ */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + uint32 distance; /* dst to tgt, units meter */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */ + /* frame type is VHT, then we should do it by hardware */ + int16 avg_rssi; /* avg rssi accroos the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + int32 peer_router_info; /* Peer router information if available in TLV, */ + /* We will add this field later */ + int32 var1; /* average of group delay */ + int32 var2; /* average of threshold crossing */ + int32 var3; /* difference between group delay and threshold crossing */ + /* raw Fine Time Measurements (ftm) data */ + uint16 ftm_unit; /* ftm cnt resolution in picoseconds , 6250ps - default */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint16 ts_cnt; /* number of timestamp measurements */ + ts_sample_t ts_buff[1]; /* Timestamps */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t; + +#ifdef WLAWDL +/* WLC_E_AWDL_AW event data */ +typedef BWL_PRE_PACKED_STRUCT struct awdl_aws_event_data { + uint32 fw_time; /* firmware PMU time */ + struct ether_addr current_master; /* Current master Mac addr */ + uint16 aw_counter; /* AW seq# */ + uint8 aw_ext_count; /* AW extension count */ + uint8 aw_role; /* AW role */ + uint8 flags; /* AW event flag */ + uint16 aw_chan; + uint8 infra_rssi; /* rssi on the infra channel */ + uint32 infra_rxbcn_count; /* number of beacons received */ + struct ether_addr top_master; /* Top master */ +} BWL_POST_PACKED_STRUCT awdl_aws_event_data_t; + +/* For awdl_aws_event_data_t.flags */ +#define AWDL_AW_LAST_EXT 0x01 + +/* WLC_E_AWDL_OOB_AF_STATUS event data */ +typedef BWL_PRE_PACKED_STRUCT struct awdl_oob_af_status_data { + uint32 tx_time_diff; + uint16 pkt_tag; + uint8 tx_chan; +} BWL_POST_PACKED_STRUCT awdl_oob_af_status_data_t; +#endif /* WLAWDL */ + +/* Video Traffic Interference Monitor Event */ +#define INTFER_EVENT_VERSION 1 +#define INTFER_STREAM_TYPE_NONTCP 1 +#define INTFER_STREAM_TYPE_TCP 2 +#define WLINTFER_STATS_NSMPLS 4 +typedef struct wl_intfer_event { + uint16 version; /* version */ + uint16 status; /* status */ + uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */ +} wl_intfer_event_t; + +#define RRM_EVENT_VERSION 0 +typedef struct wl_rrm_event { + int16 version; + int16 len; + int16 cat; /* Category */ + int16 subevent; + char payload[1]; /* Measurement payload */ +} wl_rrm_event_t; + +/* WLC_E_PSTA_PRIMARY_INTF_IND event data */ +typedef struct wl_psta_primary_intf_event { + struct ether_addr prim_ea; /* primary intf ether addr */ +} wl_psta_primary_intf_event_t; + +/* WLC_E_DPSTA_INTF_IND event data */ +typedef enum { + WL_INTF_PSTA = 1, + WL_INTF_DWDS = 2 +} wl_dpsta_intf_type; + +typedef struct wl_dpsta_intf_event { + wl_dpsta_intf_type intf_type; /* dwds/psta intf register */ +} wl_dpsta_intf_event_t; + +/* ********** NAN protocol events/subevents ********** */ +#ifndef NAN_EVENT_BUFFER_SIZE +#define NAN_EVENT_BUFFER_SIZE 512 /* max size */ +#endif /* NAN_EVENT_BUFFER_SIZE */ +/* NAN Events sent by firmware */ + +/* + * If you make changes to this enum, dont forget to update the mask (if need be). + */ +typedef enum wl_nan_events { + WL_NAN_EVENT_START = 1, /* NAN cluster started */ + WL_NAN_EVENT_JOIN = 2, /* To be deprecated */ + WL_NAN_EVENT_ROLE = 3, /* Role changed */ + WL_NAN_EVENT_SCAN_COMPLETE = 4, /* To be deprecated */ + WL_NAN_EVENT_DISCOVERY_RESULT = 5, /* Subscribe Received */ + WL_NAN_EVENT_REPLIED = 6, /* Publish Sent */ + WL_NAN_EVENT_TERMINATED = 7, /* sub / pub is terminated */ + WL_NAN_EVENT_RECEIVE = 8, /* Follow up Received */ + WL_NAN_EVENT_STATUS_CHG = 9, /* change in nan_mac status */ + WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ + WL_NAN_EVENT_STOP = 11, /* To be deprecated */ + WL_NAN_EVENT_P2P = 12, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, /* Unused */ + WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ + WL_NAN_EVENT_DATA_IF_ADD = 18, /* Unused */ + WL_NAN_EVENT_DATA_PEER_ADD = 19, /* Event for peer add */ + /* nan 2.0 */ + WL_NAN_EVENT_PEER_DATAPATH_IND = 20, /* Incoming DP req */ + WL_NAN_EVENT_DATAPATH_ESTB = 21, /* DP Established */ + WL_NAN_EVENT_SDF_RX = 22, /* SDF payload */ + WL_NAN_EVENT_DATAPATH_END = 23, /* DP Terminate recvd */ + WL_NAN_EVENT_BCN_RX = 24, /* received beacon payload */ + WL_NAN_EVENT_PEER_DATAPATH_RESP = 25, /* Peer's DP response */ + WL_NAN_EVENT_PEER_DATAPATH_CONF = 26, /* Peer's DP confirm */ + WL_NAN_EVENT_RNG_REQ_IND = 27, /* Range Request */ + WL_NAN_EVENT_RNG_RPT_IND = 28, /* Range Report */ + WL_NAN_EVENT_RNG_TERM_IND = 29, /* Range Termination */ + WL_NAN_EVENT_PEER_DATAPATH_SEC_INST = 30, /* Peer's DP sec install */ + WL_NAN_EVENT_TXS = 31, /* for tx status of follow-up and SDFs */ + WL_NAN_EVENT_DW_START = 32, /* dw start */ + WL_NAN_EVENT_DW_END = 33, /* dw end */ + WL_NAN_EVENT_CHAN_BOUNDARY = 34, /* channel switch event */ + WL_NAN_EVENT_MR_CHANGED = 35, /* AMR or IMR changed event during DW */ + WL_NAN_EVENT_RNG_RESP_IND = 36, /* Range Response Rx */ + WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF = 37, /* Peer's schedule update notification */ + WL_NAN_EVENT_PEER_SCHED_REQ = 38, /* Peer's schedule request */ + WL_NAN_EVENT_PEER_SCHED_RESP = 39, /* Peer's schedule response */ + WL_NAN_EVENT_PEER_SCHED_CONF = 40, /* Peer's schedule confirm */ + WL_NAN_EVENT_SENT_DATAPATH_END = 41, /* Sent DP terminate frame */ + WL_NAN_EVENT_SLOT_START = 42, /* SLOT_START event */ + WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */ + WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */ + WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */ + WL_NAN_EVENT_DISC_CACHE_TIMEOUT = 46, /* Disc cache timeout */ + WL_NAN_EVENT_OOB_AF_TXS = 47, /* OOB AF transmit status */ + WL_NAN_EVENT_OOB_AF_RX = 48, /* OOB AF receive event */ + + /* keep WL_NAN_EVENT_INVALID as the last element */ + WL_NAN_EVENT_INVALID /* delimiter for max value */ +} nan_app_events_e; + +/* remove after precommit */ +#define NAN_EV_MASK(ev) (1 << (ev - 1)) +#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0) + +#define NAN_EV_MASK_SET(var, evt) \ + (((uint32)evt < WL_NAN_EVMASK_EXTN_LEN * 8) ? \ + ((*((uint8 *)var + ((evt - 1)/8))) |= (1 << ((evt - 1) %8))) : 0) +#define IS_NAN_EVENT_ON(var, evt) \ + (((uint32)evt < WL_NAN_EVMASK_EXTN_LEN * 8) && \ + (((*((uint8 *)var + ((evt - 1)/8))) & (1 << ((evt - 1) %8))) != 0)) + +/* ******************* end of NAN section *************** */ + +typedef enum wl_scan_events { + WL_SCAN_START = 1, + WL_SCAN_END = 2 +} wl_scan_events; + +/* WLC_E_ULP event data */ +#define WL_ULP_EVENT_VERSION 1 +#define WL_ULP_DISABLE_CONSOLE 1 /* Disable console message on ULP entry */ +#define WL_ULP_UCODE_DOWNLOAD 2 /* Download ULP ucode file */ + +typedef struct wl_ulp_event { + uint16 version; + uint16 ulp_dongle_action; +} wl_ulp_event_t; + +/* TCP keepalive event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_tko { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 pad[3]; /* 4-byte struct alignment */ +} BWL_POST_PACKED_STRUCT wl_event_tko_t; + +typedef struct { + uint8 radar_type; /* one of RADAR_TYPE_XXX */ + uint16 min_pw; /* minimum pulse-width (usec * 20) */ + uint16 max_pw; /* maximum pulse-width (usec * 20) */ + uint16 min_pri; /* minimum pulse repetition interval (usec) */ + uint16 max_pri; /* maximum pulse repetition interval (usec) */ + uint16 subband; /* subband/frequency */ +} radar_detected_event_info_t; +typedef struct wl_event_radar_detect_data { + + uint32 version; + uint16 current_chanspec; /* chanspec on which the radar is recieved */ + uint16 target_chanspec; /* Target chanspec after detection of radar on current_chanspec */ + radar_detected_event_info_t radar_info[2]; +} wl_event_radar_detect_data_t; + +#define WL_EVENT_MODESW_VER_1 1 +#define WL_EVENT_MODESW_VER_CURRENT WL_EVENT_MODESW_VER_1 + +#define WL_E_MODESW_FLAG_MASK_DEVICE 0x01u /* mask of device: belongs to local or peer */ +#define WL_E_MODESW_FLAG_MASK_FROM 0x02u /* mask of origin: firmware or user */ +#define WL_E_MODESW_FLAG_MASK_STATE 0x0Cu /* mask of state: modesw progress state */ + +#define WL_E_MODESW_FLAG_DEVICE_LOCAL 0x00u /* flag - device: info is about self/local */ +#define WL_E_MODESW_FLAG_DEVICE_PEER 0x01u /* flag - device: info is about peer */ + +#define WL_E_MODESW_FLAG_FROM_FIRMWARE 0x00u /* flag - from: request is from firmware */ +#define WL_E_MODESW_FLAG_FROM_USER 0x02u /* flag - from: request is from user/iov */ + +#define WL_E_MODESW_FLAG_STATE_REQUESTED 0x00u /* flag - state: mode switch request */ +#define WL_E_MODESW_FLAG_STATE_INITIATED 0x04u /* flag - state: switch initiated */ +#define WL_E_MODESW_FLAG_STATE_COMPLETE 0x08u /* flag - state: switch completed/success */ +#define WL_E_MODESW_FLAG_STATE_FAILURE 0x0Cu /* flag - state: failed to switch */ + +/* Get sizeof *X including variable data's length where X is pointer to wl_event_mode_switch_t */ +#define WL_E_MODESW_SIZE(X) (sizeof(*(X)) + (X)->length) + +/* Get variable data's length where X is pointer to wl_event_mode_switch_t */ +#define WL_E_MODESW_DATA_SIZE(X) (((X)->length > sizeof(*(X))) ? ((X)->length - sizeof(*(X))) : 0) + +#define WL_E_MODESW_REASON_UNKNOWN 0u /* reason: UNKNOWN */ +#define WL_E_MODESW_REASON_ACSD 1u /* reason: ACSD (based on events from FW */ +#define WL_E_MODESW_REASON_OBSS_DBS 2u /* reason: OBSS DBS (eg. on interference) */ +#define WL_E_MODESW_REASON_DFS 3u /* reason: DFS (eg. on subband radar) */ +#define WL_E_MODESW_REASON_DYN160 4u /* reason: DYN160 (160/2x2 - 80/4x4) */ + +/* event structure for WLC_E_MODE_SWITCH */ +typedef struct { + uint16 version; + uint16 length; /* size including 'data' field */ + uint16 opmode_from; + uint16 opmode_to; + uint32 flags; /* bit 0: peer(/local==0); + * bit 1: user(/firmware==0); + * bits 3,2: 00==requested, 01==initiated, + * 10==complete, 11==failure; + * rest: reserved + */ + uint16 reason; /* value 0: unknown, 1: ACSD, 2: OBSS_DBS, + * 3: DFS, 4: DYN160, rest: reserved + */ + uint16 data_offset; /* offset to 'data' from beginning of this struct. + * fields may be added between data_offset and data + */ + /* ADD NEW FIELDS HERE */ + uint8 data[]; /* reason specific data; could be empty */ +} wl_event_mode_switch_t; + +/* when reason in WLC_E_MODE_SWITCH is DYN160, data will carry the following structure */ +typedef struct { + uint16 trigger; /* value 0: MU to SU, 1: SU to MU, 2: metric_dyn160, 3:re-/assoc, + * 4: disassoc, 5: rssi, 6: traffic, 7: interference, + * 8: chanim_stats + */ + struct ether_addr sta_addr; /* causal STA's MAC address when known */ + uint16 metric_160_80; /* latest dyn160 metric */ + uint8 nss; /* NSS of the STA */ + uint8 bw; /* BW of the STA */ + int8 rssi; /* RSSI of the STA */ + uint8 traffic; /* internal metric of traffic */ +} wl_event_mode_switch_dyn160; + +#define WL_EVENT_FBT_VER_1 1 + +#define WL_E_FBT_TYPE_FBT_OTD_AUTH 1 +#define WL_E_FBT_TYPE_FBT_OTA_AUTH 2 + +/* event structure for WLC_E_FBT */ +typedef struct { + uint16 version; + uint16 length; /* size including 'data' field */ + uint16 type; /* value 0: unknown, 1: FBT OTD Auth Req */ + uint16 data_offset; /* offset to 'data' from beginning of this struct. + * fields may be added between data_offset and data + */ + /* ADD NEW FIELDS HERE */ + uint8 data[]; /* type specific data; could be empty */ +} wl_event_fbt_t; + +/* TWT Setup Completion is designed to notify the user of TWT Setup process + * status. When 'status' field is value of BCME_OK, the user must check the + * 'setup_cmd' field value in 'wl_twt_sdesc_t' structure that at the end of + * the event data to see the response from the TWT Responding STA; when + * 'status' field is value of BCME_ERROR or non BCME_OK, user must not use + * anything from 'wl_twt_sdesc_t' structure as it is the TWT Requesting STA's + * own TWT parameter. + */ + +#define WL_TWT_SETUP_CPLT_VER 0u + +/* TWT Setup Reason code */ +typedef enum wl_twt_setup_rc { + WL_TWT_SETUP_RC_ACCEPT = 0, /* TWT Setup Accepted */ + WL_TWT_SETUP_RC_REJECT = 1, /* TWT Setup Rejected */ + WL_TWT_SETUP_RC_TIMEOUT = 2, /* TWT Setup Time-out */ + WL_TWT_SETUP_RC_IE = 3, /* TWT Setup IE Validation failed */ + WL_TWT_SETUP_RC_PARAMS = 4, /* TWT Setup IE Params invalid */ + WL_TWT_SETUP_RC_ERROR = 5, /* Generic Error cases */ +} wl_twt_setup_rc_t; + +/* TWT Setup Completion event data */ +typedef struct wl_twt_setup_cplt { + uint16 version; + uint16 length; /* the byte count of fields from 'dialog' onwards */ + uint8 dialog; /* Setup frame dialog token */ + uint8 reason_code; /* see WL_TWT_SETUP_RC_XXXX */ + uint8 pad[2]; + int32 status; + /* wl_twt_sdesc_t desc; - defined in wlioctl.h */ +} wl_twt_setup_cplt_t; + +#define WL_TWT_TEARDOWN_CPLT_VER 0u + +/* TWT teardown Reason code */ +typedef enum wl_twt_td_rc { + WL_TWT_TD_RC_SUCCESS = 0, /* Teardown complete Successful */ + WL_TWT_TD_RC_HOST = 1, /* Teardown triggered by Host */ + WL_TWT_TD_RC_PEER = 2, /* Peer initiated teardown */ + WL_TWT_TD_RC_MCHAN = 3, /* Teardown due to MCHAN Active */ + WL_TWT_TD_RC_MCNX = 4, /* Teardown due to MultiConnection */ + WL_TWT_TD_RC_SETUP_FAIL = 5, /* Setup fail midway. Teardown all connections */ + WL_TWT_TD_RC_SCHED = 6, /* Teardown by TWT Scheduler */ + WL_TWT_TD_RC_CSA = 7, /* Teardown due to CSA */ + WL_TWT_TD_RC_BTCX = 8, /* Teardown due to BTCX */ + WL_TWT_TD_RC_ERROR = 9, /* Generic Error cases */ +} wl_twt_td_rc_t; + +/* TWT Teardown complete event data */ +typedef struct wl_twt_teardown_cplt { + uint16 version; + uint16 length; /* the byte count of fields from 'reason_code' onwards */ + uint8 reason_code; /* WL_TWT_TD_RC_XXXX */ + uint8 pad[3]; + int32 status; + /* wl_twt_teardesc_t; - defined in wlioctl.h */ +} wl_twt_teardown_cplt_t; + +#define WL_TWT_INFO_CPLT_VER 0u + +/* TWT Info Reason code */ +typedef enum wl_twt_info_rc { + WL_TWT_INFO_RC_HOST = 0, /* Host initiated Info complete */ + WL_TWT_INFO_RC_PEER = 1, /* Peer initiated TWT Info */ + WL_TWT_INFO_RC_ERROR = 2, /* generic error conditions */ +} wl_twt_info_rc_t; + +/* TWT Info complete event data */ +typedef struct wl_twt_info_cplt { + uint16 version; + uint16 length; /* the byte count of fields from 'reason_code' onwards */ + uint8 reason_code; /* WL_TWT_INFO_RC_XXXX */ + uint8 pad[3]; + int32 status; + /* wl_twt_infodesc_t; - defined in wlioctl.h */ +} wl_twt_info_cplt_t; + +#define WL_INVALID_IE_EVENT_VERSION 0 + +/* Invalid IE Event data */ +typedef struct wl_invalid_ie_event { + uint16 version; + uint16 len; /* Length of the invalid IE copy */ + uint16 type; /* Type/subtype of the frame which contains the invalid IE */ + uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */ + uint8 ie[]; /* Variable length buffer for the invalid IE copy */ +} wl_invalid_ie_event_t; + +/* Fixed header portion of Invalid IE Event */ +typedef struct wl_invalid_ie_event_hdr { + uint16 version; + uint16 len; /* Length of the invalid IE copy */ + uint16 type; /* Type/subtype of the frame which contains the invalid IE */ + uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */ + /* var length IE data follows */ +} wl_invalid_ie_event_hdr_t; + +typedef enum ie_error_code { + IE_ERROR_OUT_OF_RANGE = 0x01 +} ie_error_code_t; + +/* This marks the end of a packed structure section. */ +#include + +/* reason of channel switch */ +typedef enum { + CHANSW_DFS = 10, /* channel switch due to DFS module */ + CHANSW_HOMECH_REQ = 14, /* channel switch due to HOME Channel Request */ + CHANSW_STA = 15, /* channel switch due to STA */ + CHANSW_SOFTAP = 16, /* channel switch due to SodtAP */ + CHANSW_AIBSS = 17, /* channel switch due to AIBSS */ + CHANSW_NAN = 18, /* channel switch due to NAN */ + CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */ + CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */ + +#ifdef WLAWDL + CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */ + CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */ + CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */ + CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */ + CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */ +#endif /* WLAWDL */ + + CHANSW_TDLS = 26, /* channel switch due to TDLS */ + CHANSW_PROXD = 27, /* channel switch due to PROXD */ + CHANSW_SLOTTED_BSS = 28, /* channel switch due to slotted bss */ + CHANSW_SLOTTED_CMN_SYNC = 29, /* channel switch due to Common Sync Layer */ + CHANSW_SLOTTED_BSS_CAL = 30, /* channel switch due to Cal request from slotted bss */ + CHANSW_MAX_NUMBER = 31 /* max channel switch reason */ +} wl_chansw_reason_t; + +#define CHANSW_REASON(reason) (1 << reason) + +#define EVENT_AGGR_DATA_HDR_LEN 8 + +typedef struct event_aggr_data { + uint16 num_events; /* No of events aggregated */ + uint16 len; /* length of the aggregated events, excludes padding */ + uint8 pad[4]; /* Padding to make aggr event packet header aligned + * on 64-bit boundary, for a 64-bit host system. + */ + uint8 data[]; /* Aggregate buffer containing Events */ +} event_aggr_data_t; + +/* WLC_E_TVPM_MITIGATION event structure version */ +#define WL_TVPM_MITIGATION_VERSION 1 + +/* TVPM mitigation on/off status bits */ +#define WL_TVPM_MITIGATION_TXDC 0x1 +#define WL_TVPM_MITIGATION_TXPOWER 0x2 +#define WL_TVPM_MITIGATION_TXCHAINS 0x4 + +/* Event structure for WLC_E_TVPM_MITIGATION */ +typedef struct wl_event_tvpm_mitigation { + uint16 version; /* structure version */ + uint16 length; /* length of this structure */ + uint32 timestamp_ms; /* millisecond timestamp */ + uint8 slice; /* slice number */ + uint8 pad; + uint16 on_off; /* mitigation status bits */ +} wl_event_tvpm_mitigation_t; + +/* Event structures for sub health checks of PHY */ + +#define WL_PHY_HC_DESENSE_STATS_VER (1) +typedef struct wl_hc_desense_stats { + uint16 version; + uint16 chanspec; + int8 allowed_weakest_rssi; /* based on weakest link RSSI */ + uint8 ofdm_desense; /* Desense requested for OFDM */ + uint8 bphy_desense; /* Desense requested for bphy */ + int8 glitch_upd_wait; /* wait post ACI mitigation */ +} wl_hc_desense_stats_v1_t; + +#define WL_PHY_HC_TEMP_STATS_VER (1) +typedef struct wl_hc_temp_stats { + uint16 version; + uint16 chanspec; + int16 curtemp; /* Temperature */ + uint8 temp_disthresh; /* Threshold to reduce tx chain */ + uint8 temp_enthresh; /* Threshold to increase tx chains */ + uint tempsense_period; /* Temperature check period */ + bool heatedup; /* 1: temp throttling on */ + uint8 bitmap; /* Indicating rx and tx chains */ + uint8 pad[2]; +} wl_hc_temp_stats_v1_t; + +#define WL_PHY_HC_TEMP_STATS_VER_2 (2) +typedef struct { + uint16 version; + uint16 chanspec; + int16 curtemp; /* Temperature */ + uint8 pad[2]; +} wl_hc_temp_stats_v2_t; + +#define WL_PHY_HC_VCOCAL_STATS_VER (1) +typedef struct wl_hc_vcocal_stats { + uint16 version; + uint16 chanspec; + int16 curtemp; /* Temperature */ + /* Ring buffer - Maintains history of previous 16 wake/sleep cycles */ + uint16 vcocal_status_wake; + uint16 vcocal_status_sleep; + uint16 plllock_status_wake; + uint16 plllock_status_sleep; + /* Cal Codes */ + uint16 cc_maincap; + uint16 cc_secondcap; + uint16 cc_auxcap; +} wl_hc_vcocal_stats_v1_t; + +#define WL_PHY_HC_TXPWR_STATS_VER (1) +typedef struct wl_hc_tx_stats { + uint16 version; + uint16 chanspec; + int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */ + int8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */ + int8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */ + uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */ + int16 temp; /* Temperature */ + uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */ + int8 min_txpower; /* min tx power per ant */ + uint8 pad[3]; +} wl_hc_txpwr_stats_v1_t; + +#define WL_PHY_HC_TXPWR_STATS_VER_2 (2) +typedef struct { + uint16 version; + uint16 chanspec; + int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */ + uint8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */ + uint8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */ + uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */ + int16 temp; /* Temperature */ + uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */ + int8 min_txpower; /* min tx power per ant */ + uint8 pad[3]; +} wl_hc_txpwr_stats_v2_t; + +typedef enum wl_mbo_event_type { + WL_MBO_E_CELLULAR_NW_SWITCH = 1, + WL_MBO_E_BTM_RCVD = 2, + /* ADD before this */ + WL_MBO_E_LAST = 3 /* highest val + 1 for range checking */ +} wl_mbo_event_type_t; + +/* WLC_E_MBO event structure version */ +#define WL_MBO_EVT_VER 1 + +struct wl_event_mbo { + uint16 version; /* structure version */ + uint16 length; /* length of the rest of the structure from type */ + wl_mbo_event_type_t type; /* Event type */ + uint8 data[]; /* Variable length data */ +}; + +/* WLC_E_MBO_CELLULAR_NW_SWITCH event structure version */ +#define WL_MBO_CELLULAR_NW_SWITCH_VER 1 + +/* WLC_E_MBO_CELLULAR_NW_SWITCH event data */ +struct wl_event_mbo_cell_nw_switch { + uint16 version; /* structure version */ + uint16 length; /* length of the rest of the structure from reason */ + /* Reason of switch as per MBO Tech spec */ + uint8 reason; + /* pad */ + uint8 pad; + /* delay after which re-association can be tried to current BSS (seconds) */ + uint16 reassoc_delay; + /* How long current association will be there (milli seconds). + * This is zero if not known or value is overflowing. + */ + uint32 assoc_time_remain; +}; + +/* WLC_E_MBO_BTM_RCVD event structure version */ +#define WL_BTM_EVENT_DATA_VER_1 1 +/* Specific btm event type data */ +struct wl_btm_event_type_data { + uint16 version; + uint16 len; + uint8 transition_reason; /* transition reason code */ + uint8 pad[3]; /* pad */ +}; + +/* WLC_E_PRUNE event structure version */ +#define WL_BSSID_PRUNE_EVT_VER_1 1 +/* MBO-OCE params */ +struct wl_bssid_prune_evt_info { + uint16 version; + uint16 len; + uint8 SSID[32]; + uint32 time_remaining; /* Time remaining */ + struct ether_addr BSSID; + uint8 SSID_len; + uint8 reason; /* Reason code */ + int8 rssi_threshold; /* RSSI threshold */ + uint8 pad[3]; /* pad */ +}; + +/* WLC_E_ADPS status */ +enum { + WL_E_STATUS_ADPS_DEAUTH = 0, + WL_E_STATUS_ADPS_MAX +}; + +/* WLC_E_ADPS event data */ +#define WL_EVENT_ADPS_VER_1 1 + +/* WLC_E_ADPS event type */ +#define WL_E_TYPE_ADPS_BAD_AP 1 + +typedef struct wl_event_adps_bad_ap { + uint32 status; + uint32 reason; + struct ether_addr ea; /* bssid */ +} wl_event_adps_bad_ap_t; + +typedef struct wl_event_adps { + uint16 version; /* structure version */ + uint16 length; /* length of structure */ + uint32 type; /* event type */ + uint8 data[]; /* variable length data */ +} wl_event_adps_v1_t; + +typedef wl_event_adps_v1_t wl_event_adps_t; + +#define WLC_USER_E_KEY_UPDATE 1 /* Key add/remove */ +#define WLC_USER_E_FORCE_FLUSH 2 /* SDC force flush */ + +/* OBSS HW event data */ +typedef struct wlc_obss_hw_event_data { + uint16 available_chanspec; /* Contains band, channel and BW info */ +} wlc_obss_hw_event_data_t; + +/* status when WLC_E_OBSS_DETECTION */ +#define WLC_OBSS_BW_UPDATED 1 /* Sent when BW is update at SW */ +#define WLC_OBSS_BW_AVAILABLE 2 /* Sent When a change in BW is detected / noticed */ + +/* WLC_E_DYNSAR event structure version */ +#define WL_DYNSAR_VERSION 1 + +/* bits used in status field */ +#define WL_STATUS_DYNSAR_PWR_OPT (1 << 0) /* power optimized */ +#define WL_STATUS_DYNSAR_FAILSAFE (1 << 1) /* radio is using failsafe cap values */ +#define WL_STATUS_DYNSAR_NOMUTE_OPT (1 << 2) /* ack mute */ + +/* Event structure for WLC_E_DYNSAR */ +typedef struct wl_event_dynsar { + uint16 version; /* structure version */ + uint16 length; /* length of this structure */ + uint32 timestamp_ms; /* millisecond timestamp */ + uint8 opt; /* optimization power offset */ + uint8 slice; /* slice number */ + uint8 status; /* WL_STATUS_DYNSAR_XXX, to indicate which optimization + * is being applied + */ + uint8 pad; +} wl_event_dynsar_t; + +/* status when WLC_E_AP_BCN_MUTE event is sent */ +#define BCN_MUTE_MITI_ACTIVE 1u /* Mitigation is activated when probe response received + * but Beacon is not received + */ +#define BCN_MUTE_MITI_END 2u /* Sent when beacon is received */ +#define BCN_MUTE_MITI_TIMEOUT 3u /* Mitigation period is reached */ + +/* bcn_mute_miti event data */ +#define WLC_BCN_MUTE_MITI_EVENT_DATA_VER_1 1u +typedef struct wlc_bcn_mute_miti_event_data_v1 { + uint16 version; /* Structure version number */ + uint16 length; /* Length of the whole struct */ + uint16 uatbtt_count; /* Number of UATBTT during mitigation */ + uint8 PAD[2]; /* Pad to fit to 32 bit alignment */ +} wlc_bcn_mute_miti_event_data_v1_t; + +/* bcn_drift event data */ +#define WLC_BCN_DRIFT_EVENT_DATA_VER_1 (1u) +typedef struct wlc_bcn_drift_event_data_v1 { + uint16 version; /* Structure version number */ + uint16 length; /* Length of the whole struct */ + int16 drift; /* in ms */ + int16 jitter; /* in ms */ +} wlc_bcn_drift_event_data_v1_t; + +#endif /* _BCMEVENT_H_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmicmp.h b/bcmdhd.101.10.361.x/include/bcmicmp.h new file mode 100755 index 0000000..31e809a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmicmp.h @@ -0,0 +1,83 @@ +/* + * Fundamental constants relating to ICMP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmicmp_h_ +#define _bcmicmp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#define ICMP_TYPE_ECHO_REQUEST 8 /* ICMP type echo request */ +#define ICMP_TYPE_ECHO_REPLY 0 /* ICMP type echo reply */ + +#define ICMP_CHKSUM_OFFSET 2 /* ICMP body checksum offset */ + +/* ICMP6 error and control message types */ +#define ICMP6_DEST_UNREACHABLE 1 +#define ICMP6_PKT_TOO_BIG 2 +#define ICMP6_TIME_EXCEEDED 3 +#define ICMP6_PARAM_PROBLEM 4 +#define ICMP6_ECHO_REQUEST 128 +#define ICMP6_ECHO_REPLY 129 +#define ICMP_MCAST_LISTENER_QUERY 130 +#define ICMP_MCAST_LISTENER_REPORT 131 +#define ICMP_MCAST_LISTENER_DONE 132 +#define ICMP6_RTR_SOLICITATION 133 +#define ICMP6_RTR_ADVERTISEMENT 134 +#define ICMP6_NEIGH_SOLICITATION 135 +#define ICMP6_NEIGH_ADVERTISEMENT 136 +#define ICMP6_REDIRECT 137 + +#define ICMP6_RTRSOL_OPT_OFFSET 8 +#define ICMP6_RTRADV_OPT_OFFSET 16 +#define ICMP6_NEIGHSOL_OPT_OFFSET 24 +#define ICMP6_NEIGHADV_OPT_OFFSET 24 +#define ICMP6_REDIRECT_OPT_OFFSET 40 + +BWL_PRE_PACKED_STRUCT struct icmp6_opt { + uint8 type; /* Option identifier */ + uint8 length; /* Lenth including type and length */ + uint8 data[0]; /* Variable length data */ +} BWL_POST_PACKED_STRUCT; + +#define ICMP6_OPT_TYPE_SRC_LINK_LAYER 1 +#define ICMP6_OPT_TYPE_TGT_LINK_LAYER 2 +#define ICMP6_OPT_TYPE_PREFIX_INFO 3 +#define ICMP6_OPT_TYPE_REDIR_HDR 4 +#define ICMP6_OPT_TYPE_MTU 5 + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmicmp_hdr { + uint8 type; /* Echo or Echo-reply */ + uint8 code; /* Always 0 */ + uint16 chksum; /* Icmp packet checksum */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmicmp_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmiov.h b/bcmdhd.101.10.361.x/include/bcmiov.h new file mode 100755 index 0000000..05a9f58 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmiov.h @@ -0,0 +1,353 @@ +/* + * bcmiov.h + * Common iovar handling/parsing support - batching, parsing, sub-cmd dispatch etc. + * To be used in firmware and host apps or dhd - reducing code size, + * duplication, and maintenance overhead. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmiov_h_ +#define _bcmiov_h_ + +#include +#include +#include +#ifdef BCMDRIVER +#include +#else +#include /* For size_t */ +#endif /* BCMDRIVER */ + +/* Forward declarations */ +typedef uint16 bcm_iov_cmd_id_t; +typedef uint16 bcm_iov_cmd_flags_t; +typedef uint16 bcm_iov_cmd_mflags_t; +typedef struct bcm_iov_cmd_info bcm_iov_cmd_info_t; +typedef struct bcm_iov_cmd_digest bcm_iov_cmd_digest_t; +typedef struct bcm_iov_cmd_tlv_info bcm_iov_cmd_tlv_info_t; +typedef struct bcm_iov_buf bcm_iov_buf_t; +typedef struct bcm_iov_batch_buf bcm_iov_batch_buf_t; +typedef struct bcm_iov_parse_context bcm_iov_parse_context_t; +typedef struct bcm_iov_sub_cmd_context bcm_iov_sub_cmd_context_t; + +typedef void* (*bcm_iov_malloc_t)(void* alloc_ctx, size_t len); +typedef void (*bcm_iov_free_t)(void* alloc_ctx, void *buf, size_t len); + +typedef uint8 bcm_iov_tlp_data_type_t; +typedef struct bcm_iov_tlp bcm_iov_tlp_t; +typedef struct bcm_iov_tlp_node bcm_iov_tlp_node_t; +typedef struct bcm_iov_batch_subcmd bcm_iov_batch_subcmd_t; + +/* + * iov validation handler - All the common checks that are required + * for processing of iovars for any given command. + */ +typedef int (*bcm_iov_cmd_validate_t)(const bcm_iov_cmd_digest_t *dig, + uint32 actionid, const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen); + +/* iov get handler - process subcommand specific input and return output. + * input and output may overlap, so the callee needs to check if + * that is supported. For xtlv data a tlv digest is provided to make + * parsing simpler. Output tlvs may be packed into output buffer using + * bcm xtlv support. olen is input/output parameter. On input contains + * max available obuf length and callee must fill the correct length + * to represent the length of output returned. + */ +typedef int (*bcm_iov_cmd_get_t)(const bcm_iov_cmd_digest_t *dig, + const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen); + +/* iov set handler - process subcommand specific input and return output + * input and output may overlap, so the callee needs to check if + * that is supported. olen is input/output parameter. On input contains + * max available obuf length and callee must fill the correct length + * to represent the length of output returned. + */ +typedef int (*bcm_iov_cmd_set_t)(const bcm_iov_cmd_digest_t *dig, + const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen); + +/* iov (sub-cmd) batch - a vector of commands. count can be zero + * to support a version query. Each command is a tlv - whose data + * portion may have an optional return status, followed by a fixed + * length data header, optionally followed by tlvs. + * cmd = type|length|[header][tlvs] + */ + +/* + * Batch sub-commands have status length included in the + * response length packed in TLV. + */ +#define BCM_IOV_STATUS_LEN sizeof(uint32) + +/* batch version is indicated by setting high bit. */ +#define BCM_IOV_BATCH_MASK 0x8000 + +/* + * Batched commands will have the following memory layout + * +--------+---------+--------+-------+ + * |version |count | is_set |sub-cmd| + * +--------+---------+--------+-------+ + * version >= 0x8000 + * count = number of sub-commands encoded in the iov buf + * sub-cmd one or more sub-commands for processing + * Where sub-cmd is padded byte buffer with memory layout as follows + * +--------+---------+-----------------------+-------------+------ + * |cmd-id |length |IN(options) OUT(status)|command data |...... + * +--------+---------+-----------------------+-------------+------ + * cmd-id =sub-command ID + * length = length of this sub-command + * IN(options) = On input processing options/flags for this command + * OUT(status) on output processing status for this command + * command data = encapsulated IOVAR data as a single structure or packed TLVs for each + * individual sub-command. + */ +struct bcm_iov_batch_subcmd { + uint16 id; + uint16 len; + union { + uint32 options; + uint32 status; + } u; + uint8 data[1]; +}; + +struct bcm_iov_batch_buf { + uint16 version; + uint8 count; + uint8 is_set; /* to differentiate set or get */ + struct bcm_iov_batch_subcmd cmds[0]; +}; + +/* non-batched command version = major|minor w/ major <= 127 */ +struct bcm_iov_buf { + uint16 version; + uint16 len; + bcm_iov_cmd_id_t id; + uint16 data[1]; /* 32 bit alignment may be repurposed by the command */ + /* command specific data follows */ +}; + +/* iov options flags */ +enum { + BCM_IOV_CMD_OPT_ALIGN_NONE = 0x0000, + BCM_IOV_CMD_OPT_ALIGN32 = 0x0001, + BCM_IOV_CMD_OPT_TERMINATE_SUB_CMDS = 0x0002 +}; + +/* iov command flags */ +enum { + BCM_IOV_CMD_FLAG_NONE = 0, + BCM_IOV_CMD_FLAG_STATUS_PRESENT = (1 << 0), /* status present at data start - output only */ + BCM_IOV_CMD_FLAG_XTLV_DATA = (1 << 1), /* data is a set of xtlvs */ + BCM_IOV_CMD_FLAG_HDR_IN_LEN = (1 << 2), /* length starts at version - non-bacthed only */ + BCM_IOV_CMD_FLAG_NOPAD = (1 << 3) /* No padding needed after iov_buf */ +}; + +/* information about the command, xtlv options and xtlvs_off are meaningful + * only if XTLV_DATA cmd flag is selected + */ +struct bcm_iov_cmd_info { + bcm_iov_cmd_id_t cmd; /* the (sub)command - module specific */ + bcm_iov_cmd_flags_t flags; /* checked by bcmiov but set by module */ + bcm_iov_cmd_mflags_t mflags; /* owned and checked by module */ + bcm_xtlv_opts_t xtlv_opts; + bcm_iov_cmd_validate_t validate_h; /* command validation handler */ + bcm_iov_cmd_get_t get_h; + bcm_iov_cmd_set_t set_h; + uint16 xtlvs_off; /* offset to beginning of xtlvs in cmd data */ + uint16 min_len_set; + uint16 max_len_set; + uint16 min_len_get; + uint16 max_len_get; +}; + +/* tlv digest to support parsing of xtlvs for commands w/ tlv data; the tlv + * digest is available in the handler for the command. The count and order in + * which tlvs appear in the digest are exactly the same as the order of tlvs + * passed in the registration for the command. Unknown tlvs are ignored. + * If registered tlvs are missing datap will be NULL. common iov rocessing + * acquires an input digest to process input buffer. The handler is responsible + * for constructing an output digest and use packing functions to generate + * the output buffer. The handler may use the input digest as output digest once + * the tlv data is extracted and used. Multiple tlv support involves allocation of + * tlp nodes, except the first, as required, + */ + +/* tlp data type indicates if the data is not used/invalid, input or output */ +enum { + BCM_IOV_TLP_NODE_INVALID = 0, + BCM_IOV_TLP_NODE_IN = 1, + BCM_IOV_TLP_NODE_OUT = 2 +}; + +struct bcm_iov_tlp { + uint16 type; + uint16 len; + uint16 nodeix; /* node index */ +}; + +/* tlp data for a given tlv - multiple tlvs of same type chained */ +struct bcm_iov_tlp_node { + uint8 *next; /* multiple tlv support */ + bcm_iov_tlp_data_type_t type; + uint8 *data; /* pointer to data in buffer or state */ +}; + +struct bcm_iov_cmd_digest { + uint32 version; /* Version */ + void *cmd_ctx; + struct wlc_bsscfg *bsscfg; + const bcm_iov_cmd_info_t *cmd_info; + uint16 max_tlps; /* number of tlps allocated */ + uint16 max_nodes; /* number of nods allocated */ + uint16 num_tlps; /* number of tlps valid */ + uint16 num_nodes; /* number of nods valid */ + uint16 tlps_off; /* offset to tlps */ + uint16 nodes_off; /* offset to nodes */ + /* + * bcm_iov_tlp_t tlps[max_tlps]; + * bcm_iov_tlp_node_t nodes[max_nodes] + */ +}; + +/* get length callback - default length is min_len taken from digest */ +typedef size_t (*bcm_iov_xtlv_get_len_t)(const bcm_iov_cmd_digest_t *dig, + const bcm_iov_cmd_tlv_info_t *tlv_info); + +/* pack to buffer data callback. under some conditions it might + * not be a straight copy and can refer to context(ual) information and + * endian conversions... + */ +typedef void (*bcm_iov_xtlv_pack_t)(const bcm_iov_cmd_digest_t *dig, + const bcm_iov_cmd_tlv_info_t *tlv_info, + uint8 *out_buf, const uint8 *in_data, size_t len); + +struct bcm_iov_cmd_tlv_info { + uint16 id; + uint16 min_len; /* inclusive */ + uint16 max_len; /* inclusive */ + bcm_iov_xtlv_get_len_t get_len; + bcm_iov_xtlv_pack_t pack; +}; + +/* + * module private parse context. Default version type len is uint16 + */ + +/* Command parsing options with respect to validation */ +/* Possible values for parse context options */ +/* Bit 0 - Validate only */ +#define BCM_IOV_PARSE_OPT_BATCH_VALIDATE 0x00000001 + +typedef uint32 bcm_iov_parse_opts_t; + +/* get digest callback */ +typedef int (*bcm_iov_get_digest_t)(void *cmd_ctx, bcm_iov_cmd_digest_t **dig); + +typedef struct bcm_iov_parse_config { + bcm_iov_parse_opts_t options; /* to handle different ver lengths */ + bcm_iov_malloc_t alloc_fn; + bcm_iov_free_t free_fn; + bcm_iov_get_digest_t dig_fn; + int max_regs; + void *alloc_ctx; +} bcm_iov_parse_config_t; + +/* API */ + +/* All calls return an integer status code BCME_* unless otherwise indicated */ + +/* return length of allocation for 'num_cmds' commands. data_len + * includes length of data for all the commands excluding the headers + */ +size_t bcm_iov_get_alloc_len(int num_cmds, size_t data_len); + +/* create parsing context using allocator provided; max_regs provides + * the number of allowed registrations for commands using the context + * sub-components of a module may register their own commands indepdently + * using the parsing context. If digest callback is NULL or returns NULL, + * the (input) digest is allocated using the provided allocators and released on + * completion of processing. + */ +int bcm_iov_create_parse_context(const bcm_iov_parse_config_t *parse_cfg, + bcm_iov_parse_context_t **parse_ctx); + +/* free the parsing context; ctx is set to NULL on exit */ +int bcm_iov_free_parse_context(bcm_iov_parse_context_t **ctx, bcm_iov_free_t free_fn); + +/* Return the command context for the module */ +void *bcm_iov_get_cmd_ctx_info(bcm_iov_parse_context_t *parse_ctx); + +/* register a command info vector along with supported tlvs. Each command + * may support a subset of tlvs + */ +int bcm_iov_register_commands(bcm_iov_parse_context_t *parse_ctx, void *cmd_ctx, + const bcm_iov_cmd_info_t *info, size_t num_cmds, + const bcm_iov_cmd_tlv_info_t *tlv_info, size_t num_tlvs); + +/* pack the xtlvs provided in the digest. may returns BCME_BUFTOOSHORT, but the + * out_len is set to required length in that case. + */ +int bcm_iov_pack_xtlvs(const bcm_iov_cmd_digest_t *dig, bcm_xtlv_opts_t xtlv_opts, + uint8 *out_buf, size_t out_size, size_t *out_len); + +#ifdef BCMDRIVER +/* wlc modules register their iovar(s) using the parsing context w/ wlc layer + * during attach. + */ +struct wlc_if; +struct wlc_info; +extern struct wlc_bsscfg *bcm_iov_bsscfg_find_from_wlcif(struct wlc_info *wlc, + struct wlc_if *wlcif); +int bcm_iov_doiovar(void *parse_ctx, uint32 id, void *params, uint params_len, + void *arg, uint arg_len, uint vsize, struct wlc_if *intf); +#endif /* BCMDRIVER */ + +/* parsing context helpers */ + +/* get the maximum number of tlvs - can be used to allocate digest for all + * commands. the digest can be shared. Negative values are BCM_*, >=0, the + * number of tlvs + */ +int bcm_iov_parse_get_max_tlvs(const bcm_iov_parse_context_t *ctx); + +/* common packing support */ + +/* pack a buffer of uint8s - memcpy wrapper */ +int bcm_iov_pack_buf(const bcm_iov_cmd_digest_t *dig, uint8 *buf, + const uint8 *data, size_t len); + +#define bcm_iov_packv_u8 bcm_iov_pack_buf + +/* + * pack a buffer with uint16s - serialized in LE order, data points to uint16 + * length is not checked. + */ +int bcm_iov_packv_u16(const bcm_iov_cmd_digest_t *dig, uint8 *buf, + const uint16 *data, int n); + +/* + * pack a buffer with uint32s - serialized in LE order - data points to uint32 + * length is not checked. + */ +int bcm_iov_packv_u32(const bcm_iov_cmd_digest_t *dig, uint8 *buf, + const uint32 *data, int n); + +#endif /* _bcmiov_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmip.h b/bcmdhd.101.10.361.x/include/bcmip.h new file mode 100755 index 0000000..898a231 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmip.h @@ -0,0 +1,286 @@ +/* + * Fundamental constants relating to IP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmip_h_ +#define _bcmip_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* IPV4 and IPV6 common */ +#define IP_VER_OFFSET 0x0 /* offset to version field */ +#define IP_VER_MASK 0xf0 /* version mask */ +#define IP_VER_SHIFT 4 /* version shift */ +#define IP_VER_4 4 /* version number for IPV4 */ +#define IP_VER_6 6 /* version number for IPV6 */ + +#define IP_VER(ip_body) \ + ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) + +#define IP_PROT_ICMP 0x1 /* ICMP protocol */ +#define IP_PROT_IGMP 0x2 /* IGMP protocol */ +#define IP_PROT_TCP 0x6 /* TCP protocol */ +#define IP_PROT_UDP 0x11 /* UDP protocol type */ +#define IP_PROT_GRE 0x2f /* GRE protocol type */ +#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ + +/* IPV4 field offsets */ +#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */ +#define IPV4_TOS_OFFSET 1 /* type of service offset */ +#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */ +#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */ +#define IPV4_PROT_OFFSET 9 /* protocol type offset */ +#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */ +#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */ +#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */ +#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */ +#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */ + +/* IPV4 field decodes */ +#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */ +#define IPV4_VER_SHIFT 4 /* IPV4 version shift */ + +#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */ +#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) + +#define IPV4_HLEN_MIN (4 * 5) /* IPV4 header minimum length */ + +#define IPV4_ADDR_LEN 4 /* IPV4 address length */ + +#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ + ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) + +#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ + ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) + +#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */ +#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */ + +#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) + +#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */ +#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */ + +#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */ +#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */ +#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */ + +#define IPV4_TOS_ROUTINE 0 +#define IPV4_TOS_PRIORITY 1 +#define IPV4_TOS_IMMEDIATE 2 +#define IPV4_TOS_FLASH 3 +#define IPV4_TOS_FLASHOVERRIDE 4 +#define IPV4_TOS_CRITICAL 5 +#define IPV4_TOS_INETWORK_CTRL 6 +#define IPV4_TOS_NETWORK_CTRL 7 + +#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) + +#define IPV4_FRAG_RESV 0x8000 /* Reserved */ +#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */ +#define IPV4_FRAG_MORE 0x2000 /* More fragments */ +#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */ + +#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */ + +/* IPV4 packet formats */ +BWL_PRE_PACKED_STRUCT struct ipv4_addr { + uint8 addr[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv4_hdr { + uint8 version_ihl; /* Version and Internet Header Length */ + uint8 tos; /* Type Of Service */ + uint16 tot_len; /* Number of bytes in packet (max 65535) */ + uint16 id; + uint16 frag; /* 3 flag bits and fragment offset */ + uint8 ttl; /* Time To Live */ + uint8 prot; /* Protocol */ + uint16 hdr_chksum; /* IP header checksum */ + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ +} BWL_POST_PACKED_STRUCT; + +/* IPV6 field offsets */ +#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */ +#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */ +#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */ +#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */ +#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */ + +/* IPV6 field decodes */ +#define IPV6_TRAFFIC_CLASS(ipv6_body) \ + (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ + ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) + +#define IPV6_FLOW_LABEL(ipv6_body) \ + (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ + (((uint8 *)(ipv6_body))[2] << 8) | \ + (((uint8 *)(ipv6_body))[3])) + +#define IPV6_PAYLOAD_LEN(ipv6_body) \ + ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ + ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) + +#define IPV6_NEXT_HDR(ipv6_body) \ + (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) + +#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) + +#define IPV6_ADDR_LEN 16 /* IPV6 address length */ + +/* IPV4 TOS or IPV6 Traffic Classifier or 0 */ +#define IP_TOS46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) + +#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT); + +/* IPV4 or IPV6 Protocol Classifier or 0 */ +#define IP_PROT46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0) + +/* IPV6 extension headers (options) */ +#define IPV6_EXTHDR_HOP 0 +#define IPV6_EXTHDR_ROUTING 43 +#define IPV6_EXTHDR_FRAGMENT 44 +#define IPV6_EXTHDR_AUTH 51 +#define IPV6_EXTHDR_NONE 59 +#define IPV6_EXTHDR_DEST 60 + +#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \ + ((prot) == IPV6_EXTHDR_ROUTING) || \ + ((prot) == IPV6_EXTHDR_FRAGMENT) || \ + ((prot) == IPV6_EXTHDR_AUTH) || \ + ((prot) == IPV6_EXTHDR_NONE) || \ + ((prot) == IPV6_EXTHDR_DEST)) + +#define IPV6_MIN_HLEN 40 + +#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3) + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr { + uint8 nexthdr; + uint8 hdrlen; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag { + uint8 nexthdr; + uint8 rsvd; + uint16 frag_off; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +/* deprecated and replaced by ipv6_exthdr_len_check */ +static INLINE int32 +ipv6_exthdr_len(uint8 *h, uint8 *proto) +{ + uint16 len = 0, hlen; + struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h; + + while (IPV6_EXTHDR(eh->nexthdr)) { + if (eh->nexthdr == IPV6_EXTHDR_NONE) + return -1; + else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) + hlen = 8U; + else if (eh->nexthdr == IPV6_EXTHDR_AUTH) + hlen = (uint16)((eh->hdrlen + 2U) << 2U); + else + hlen = (uint16)IPV6_EXTHDR_LEN(eh); + + len += hlen; + eh = (struct ipv6_exthdr *)(h + len); + } + + *proto = eh->nexthdr; + return len; +} + +/* determine length of exthdr with length checking */ +static INLINE int32 +ipv6_exthdr_len_check(uint8 *h, uint16 plen, uint8 *proto) +{ + uint16 len = 0, hlen; + struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h; + + /* must have at least one exthdr */ + if (plen < sizeof(struct ipv6_exthdr)) { + return -1; + } + + /* length check before accessing next exthdr */ + while ((plen >= len + sizeof(struct ipv6_exthdr)) && IPV6_EXTHDR(eh->nexthdr)) { + if (eh->nexthdr == IPV6_EXTHDR_NONE) { + return -1; + } else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) { + hlen = 8U; + } else if (eh->nexthdr == IPV6_EXTHDR_AUTH) { + hlen = (uint16)((eh->hdrlen + 2U) << 2U); + } else { + hlen = (uint16)IPV6_EXTHDR_LEN(eh); + } + + /* check exthdr length */ + if (plen < len + hlen) { + /* invalid exthdr */ + return -1; + } + len += hlen; + eh = (struct ipv6_exthdr *)(h + len); + } + + /* length check before accessing next exthdr */ + if (plen >= len + sizeof(struct ipv6_exthdr)) { + *proto = eh->nexthdr; + } else { + *proto = 0; + } + return len; +} +#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000) + +#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \ +{ \ + ether[0] = 0x01; \ + ether[1] = 0x00; \ + ether[2] = 0x5E; \ + ether[3] = (ipv4 & 0x7f0000) >> 16; \ + ether[4] = (ipv4 & 0xff00) >> 8; \ + ether[5] = (ipv4 & 0xff); \ +} + +/* This marks the end of a packed structure section. */ +#include + +#define IPV4_ADDR_STR "%d.%d.%d.%d" +#define IPV4_ADDR_TO_STR(addr) ((uint32)addr & 0xff000000) >> 24, \ + ((uint32)addr & 0x00ff0000) >> 16, \ + ((uint32)addr & 0x0000ff00) >> 8, \ + ((uint32)addr & 0x000000ff) + +#endif /* _bcmip_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmipv6.h b/bcmdhd.101.10.361.x/include/bcmipv6.h new file mode 100755 index 0000000..89a1515 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmipv6.h @@ -0,0 +1,160 @@ +/* + * Fundamental constants relating to Neighbor Discovery Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmipv6_h_ +#define _bcmipv6_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* Extension headers */ +#define IPV6_EXT_HOP 0 +#define IPV6_EXT_ROUTE 43 +#define IPV6_EXT_FRAG 44 +#define IPV6_EXT_DEST 60 +#define IPV6_EXT_ESEC 50 +#define IPV6_EXT_AUTH 51 + +/* Minimum size (extension header "word" length) */ +#define IPV6_EXT_WORD 8 + +/* Offsets for most extension headers */ +#define IPV6_EXT_NEXTHDR 0 +#define IPV6_EXT_HDRLEN 1 + +/* Constants specific to fragmentation header */ +#define IPV6_FRAG_MORE_MASK 0x0001 +#define IPV6_FRAG_MORE_SHIFT 0 +#define IPV6_FRAG_OFFS_MASK 0xfff8 +#define IPV6_FRAG_OFFS_SHIFT 3 + +/* For icmpv6 */ +#define ICMPV6_HEADER_TYPE 0x3A +#define ICMPV6_PKT_TYPE_RA 134 +#define ICMPV6_PKT_TYPE_NS 135 +#define ICMPV6_PKT_TYPE_NA 136 + +#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2 +#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define IPV6_VERSION 6 +#define IPV6_HOP_LIMIT 255 + +#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \ + a[5] | a[6] | a[7] | a[8] | a[9] | \ + a[10] | a[11] | a[12] | a[13] | \ + a[14] | a[15]) == 0) + +#define IPV6_ADDR_LOCAL(a) (((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE) + +/* IPV6 address */ +BWL_PRE_PACKED_STRUCT struct ipv6_addr { + uint8 addr[16]; +} BWL_POST_PACKED_STRUCT; + +/* use masks, htonl instead of bit fileds */ +#ifndef IL_BIGENDIAN + +/* ICMPV6 Header */ +BWL_PRE_PACKED_STRUCT struct icmp6_hdr { + uint8 icmp6_type; + uint8 icmp6_code; + uint16 icmp6_cksum; + BWL_PRE_PACKED_STRUCT union { + uint32 reserved; + BWL_PRE_PACKED_STRUCT struct nd_advt { + uint32 reserved1:5, + override:1, + solicited:1, + router:1, + reserved2:24; + } BWL_POST_PACKED_STRUCT nd_advt; + } BWL_POST_PACKED_STRUCT opt; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Header Format */ +BWL_PRE_PACKED_STRUCT struct ipv6_hdr { + uint8 priority:4, + version:4; + uint8 flow_lbl[3]; + uint16 payload_len; + uint8 nexthdr; + uint8 hop_limit; + struct ipv6_addr saddr; + struct ipv6_addr daddr; +} BWL_POST_PACKED_STRUCT; + +/* Neighbor Advertisement/Solicitation Packet Structure */ +BWL_PRE_PACKED_STRUCT struct bcm_nd_msg { + struct icmp6_hdr icmph; + struct ipv6_addr target; +} BWL_POST_PACKED_STRUCT; + +/* Neighibor Solicitation/Advertisement Optional Structure */ +BWL_PRE_PACKED_STRUCT struct nd_msg_opt { + uint8 type; + uint8 len; + uint8 mac_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Fragmentation Header */ +BWL_PRE_PACKED_STRUCT struct ipv6_frag { + uint8 nexthdr; + uint8 reserved; + uint16 frag_offset; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +#endif /* IL_BIGENDIAN */ + +/* This marks the end of a packed structure section. */ +#include + +static const struct ipv6_addr all_node_ipv6_maddr = { + { 0xff, 0x2, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 1 + }}; + +#define IPV6_ISMULTI(a) (a[0] == 0xff) + +#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \ +{ \ + ether[0] = 0x33; \ + ether[1] = 0x33; \ + ether[2] = ipv6[12]; \ + ether[3] = ipv6[13]; \ + ether[4] = ipv6[14]; \ + ether[5] = ipv6[15]; \ +} + +#endif /* !defined(_bcmipv6_h_) */ diff --git a/bcmdhd.101.10.361.x/include/bcmmsgbuf.h b/bcmdhd.101.10.361.x/include/bcmmsgbuf.h new file mode 100755 index 0000000..d9177d7 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmmsgbuf.h @@ -0,0 +1,1706 @@ +/* + * MSGBUF network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _bcmmsgbuf_h_ +#define _bcmmsgbuf_h_ + +#include +#include +#include + +#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN + +#define D2H_EPOCH_MODULO 253 /* sequence number wrap */ +#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1) + +#define H2D_EPOCH_MODULO 253 /* sequence number wrap */ +#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1) + +/* Txpost base workitem size w/o any extended tags */ +#define H2DRING_TXPOST_BASE_ITEMSIZE 48u + +/* + * The workitem size - H2DRING_TXPOST_ITEMSIZE is fixed at compile time + * only for FW, depending on the BCMPCIE_EXT_TXPOST_SUPPORT flag. + * For DHD the work item size is decided dynamically based on + * the dongle capability announced in the PCIE_SHARED2 flags which + * is read by DHD during dhdpcie_readshared(). Because this + * happens before DHD allocs memory for the flowrings, the workitem + * size can be dynamic for DHD. + */ +#define H2DRING_TXPOST_EXT_ITEMSIZE 56 +#if defined(BCMPCIE_EXT_TXPOST_SUPPORT) +#define H2DRING_TXPOST_ITEMSIZE H2DRING_TXPOST_EXT_ITEMSIZE +#else +#define H2DRING_TXPOST_ITEMSIZE H2DRING_TXPOST_BASE_ITEMSIZE +#endif +#define H2DRING_RXPOST_ITEMSIZE 32 +#define H2DRING_CTRL_SUB_ITEMSIZE 40 + +#define D2HRING_TXCMPLT_ITEMSIZE 24 +#define D2HRING_RXCMPLT_ITEMSIZE 40 + +#define D2HRING_TXCMPLT_ITEMSIZE_PREREV7 16 +#define D2HRING_RXCMPLT_ITEMSIZE_PREREV7 32 + +#define D2HRING_CTRL_CMPLT_ITEMSIZE 24 +#define H2DRING_INFO_BUFPOST_ITEMSIZE H2DRING_CTRL_SUB_ITEMSIZE +#define D2HRING_INFO_BUFCMPLT_ITEMSIZE D2HRING_CTRL_CMPLT_ITEMSIZE + +#define D2HRING_SNAPSHOT_CMPLT_ITEMSIZE 20 + +#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32 +#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32 + +#define H2DRING_TXPOST_MAX_ITEM 512 + +#if defined(DHD_HTPUT_TUNABLES) +#define H2DRING_RXPOST_MAX_ITEM 2048 +#define D2HRING_RXCMPLT_MAX_ITEM 1024 +#define D2HRING_TXCMPLT_MAX_ITEM 2048 +/* Only few htput flowrings use htput max items, other use normal max items */ +#define H2DRING_HTPUT_TXPOST_MAX_ITEM 2048 +#define H2DRING_CTRL_SUB_MAX_ITEM 128 +#else +#define H2DRING_RXPOST_MAX_ITEM 512 +#define D2HRING_TXCMPLT_MAX_ITEM 1024 +#define D2HRING_RXCMPLT_MAX_ITEM 512 +#define H2DRING_CTRL_SUB_MAX_ITEM 64 +#endif /* DHD_HTPUT_TUNABLES */ + +#define D2HRING_EDL_HDR_SIZE 48u +#define D2HRING_EDL_ITEMSIZE 2048u +#define D2HRING_EDL_MAX_ITEM 256u +#define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u) + +#ifdef BCM_ROUTER_DHD +#define D2HRING_CTRL_CMPLT_MAX_ITEM 256 +#else +#define D2HRING_CTRL_CMPLT_MAX_ITEM 64 +#endif + +/* Max pktids for each type of pkt, shared between host and dongle */ +#define MAX_PKTID_CTRL (1024) +#define MAX_PKTID_RX (4 * 1024) +#define MAX_PKTID_TX (36 * 1024) + +enum { + DNGL_TO_HOST_MSGBUF, + HOST_TO_DNGL_MSGBUF +}; + +enum { + HOST_TO_DNGL_TXP_DATA, + HOST_TO_DNGL_RXP_DATA, + HOST_TO_DNGL_CTRL, + DNGL_TO_HOST_DATA, + DNGL_TO_HOST_CTRL +}; + +#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE +#define PCIEDEV_FIRMWARE_TSINFO 0x1 +#define PCIEDEV_FIRMWARE_TSINFO_FIRST 0x1 +#define PCIEDEV_FIRMWARE_TSINFO_MIDDLE 0x2 +#define PCIEDEV_BTLOG_POST 0x3 +#define PCIEDEV_BT_SNAPSHOT_POST 0x4 + +#ifdef PCIE_API_REV1 + +#define BCMMSGBUF_DUMMY_REF(a, b) do {BCM_REFERENCE((a));BCM_REFERENCE((b));} while (0) + +#define BCMMSGBUF_API_IFIDX(a) 0 +#define BCMMSGBUF_API_SEQNUM(a) 0 +#define BCMMSGBUF_IOCTL_XTID(a) 0 +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->cmd_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID(a) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) BCMMSGBUF_DUMMY_REF(a, b) + +#else /* PCIE_API_REV1 */ + +#define BCMMSGBUF_API_IFIDX(a) ((a)->if_id) +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->pkt_id) +#define BCMMSGBUF_API_SEQNUM(a) ((a)->u.seq.seq_no) +#define BCMMSGBUF_IOCTL_XTID(a) ((a)->xt_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) (BCMMSGBUF_API_IFIDX((a)) = (b)) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) (BCMMSGBUF_API_SEQNUM((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) (BCMMSGBUF_IOCTL_XTID((a)) = (b)) + +#endif /* PCIE_API_REV1 */ + +/* utility data structures */ + +union addr64 { + struct { + uint32 low; + uint32 high; + }; + struct { + uint32 low_addr; + uint32 high_addr; + }; + uint64 u64; +} DECLSPEC_ALIGN(8); + +typedef union addr64 bcm_addr64_t; + +/* IOCTL req Hdr */ +/* cmn Msg Hdr */ +typedef struct cmn_msg_hdr { + /** message type */ + uint8 msg_type; + /** interface index this is valid for */ + uint8 if_id; + /* flags */ + uint8 flags; + /** sequence number */ + uint8 epoch; + /** packet Identifier for the associated host buffer */ + uint32 request_id; +} cmn_msg_hdr_t; + +/* cmn aggregated work item msg hdr */ +typedef struct cmn_aggr_msg_hdr { + /** aggregate message type */ + uint8 msg_type; + /** aggregation count */ + uint8 aggr_cnt; + /* current phase */ + uint8 phase; + /* flags or sequence number */ + union { + uint8 flags; /* H2D direction */ + uint8 epoch; /* D2H direction */ + }; +} cmn_aggr_msg_hdr_t; + +/** cmn aggregated completion work item msg hdr */ +typedef struct compl_aggr_msg_hdr { + /** interface index this is valid for */ + uint8 if_id; + /** status for the completion */ + int8 status; + /** submisison flow ring id which generated this status */ + uint16 ring_id; +} compl_aggr_msg_hdr_t; + +/** message type */ +typedef enum bcmpcie_msgtype { + MSG_TYPE_GEN_STATUS = 0x1, + MSG_TYPE_RING_STATUS = 0x2, + MSG_TYPE_FLOW_RING_CREATE = 0x3, + MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4, + /* Enum value as copied from BISON 7.15: new generic message */ + MSG_TYPE_RING_CREATE_CMPLT = 0x4, + MSG_TYPE_FLOW_RING_DELETE = 0x5, + MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6, + /* Enum value as copied from BISON 7.15: new generic message */ + MSG_TYPE_RING_DELETE_CMPLT = 0x6, + MSG_TYPE_FLOW_RING_FLUSH = 0x7, + MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8, + MSG_TYPE_IOCTLPTR_REQ = 0x9, + MSG_TYPE_IOCTLPTR_REQ_ACK = 0xA, + MSG_TYPE_IOCTLRESP_BUF_POST = 0xB, + MSG_TYPE_IOCTL_CMPLT = 0xC, + MSG_TYPE_EVENT_BUF_POST = 0xD, + MSG_TYPE_WL_EVENT = 0xE, + MSG_TYPE_TX_POST = 0xF, + MSG_TYPE_TX_STATUS = 0x10, + MSG_TYPE_RXBUF_POST = 0x11, + MSG_TYPE_RX_CMPLT = 0x12, + MSG_TYPE_LPBK_DMAXFER = 0x13, + MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14, + MSG_TYPE_FLOW_RING_RESUME = 0x15, + MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16, + MSG_TYPE_FLOW_RING_SUSPEND = 0x17, + MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18, + MSG_TYPE_INFO_BUF_POST = 0x19, + MSG_TYPE_INFO_BUF_CMPLT = 0x1A, + MSG_TYPE_H2D_RING_CREATE = 0x1B, + MSG_TYPE_D2H_RING_CREATE = 0x1C, + MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D, + MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E, + MSG_TYPE_H2D_RING_CONFIG = 0x1F, + MSG_TYPE_D2H_RING_CONFIG = 0x20, + MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21, + MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22, + MSG_TYPE_H2D_MAILBOX_DATA = 0x23, + MSG_TYPE_D2H_MAILBOX_DATA = 0x24, + MSG_TYPE_TIMSTAMP_BUFPOST = 0x25, + MSG_TYPE_HOSTTIMSTAMP = 0x26, + MSG_TYPE_HOSTTIMSTAMP_CMPLT = 0x27, + MSG_TYPE_FIRMWARE_TIMESTAMP = 0x28, + MSG_TYPE_SNAPSHOT_UPLOAD = 0x29, + MSG_TYPE_SNAPSHOT_CMPLT = 0x2A, + MSG_TYPE_H2D_RING_DELETE = 0x2B, + MSG_TYPE_D2H_RING_DELETE = 0x2C, + MSG_TYPE_H2D_RING_DELETE_CMPLT = 0x2D, + MSG_TYPE_D2H_RING_DELETE_CMPLT = 0x2E, + MSG_TYPE_TX_POST_AGGR = 0x2F, + MSG_TYPE_TX_STATUS_AGGR = 0x30, + MSG_TYPE_RXBUF_POST_AGGR = 0x31, + MSG_TYPE_RX_CMPLT_AGGR = 0x32, + MSG_TYPE_API_MAX_RSVD = 0x3F +} bcmpcie_msg_type_t; + +/* message type used in internal queue */ +typedef enum bcmpcie_msgtype_int { + MSG_TYPE_INTERNAL_USE_START = 0x40, /* internal pkt */ + MSG_TYPE_EVENT_PYLD = 0x41, /* wl event pkt */ + MSG_TYPE_IOCT_PYLD = 0x42, /* ioctl compl pkt */ + MSG_TYPE_RX_PYLD = 0x43, + MSG_TYPE_HOST_FETCH = 0x44, + MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, /* loopback pkt */ + MSG_TYPE_TXMETADATA_PYLD = 0x46, /* transmit status pkt */ + MSG_TYPE_INDX_UPDATE = 0x47, /* write indx updated */ + MSG_TYPE_INFO_PYLD = 0x48, + MSG_TYPE_TS_EVENT_PYLD = 0x49, + MSG_TYPE_PVT_BTLOG_CMPLT = 0x4A, + MSG_TYPE_BTLOG_PYLD = 0x4B, + MSG_TYPE_HMAPTEST_PYLD = 0x4C, + MSG_TYPE_PVT_BT_SNAPSHOT_CMPLT = 0x4D, + MSG_TYPE_BT_SNAPSHOT_PYLD = 0x4E, + MSG_TYPE_LPBK_DMAXFER_PYLD_ADDR = 0x4F /* loopback from addr pkt */ +} bcmpcie_msgtype_int_t; + +typedef enum bcmpcie_msgtype_u { + MSG_TYPE_TX_BATCH_POST = 0x80, + MSG_TYPE_IOCTL_REQ = 0x81, + MSG_TYPE_HOST_EVNT = 0x82, /* console related */ + MSG_TYPE_LOOPBACK = 0x83 +} bcmpcie_msgtype_u_t; + +/** + * D2H ring host wakeup soft doorbell, override the PCIE doorbell. + * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE + * Transl0 to write specified value to host address. + * + * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register + * and value is Core/Thread context. Host will ensure routing the 32bit address + * offerred to PCIE to the mapped register. + * + * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL + */ +typedef struct bcmpcie_soft_doorbell { + uint32 value; /* host defined value to be written, eg HW threadid */ + bcm_addr64_t haddr; /* host address, eg thread wakeup register address */ + uint16 items; /* interrupt coalescing: item count before wakeup */ + uint16 msecs; /* interrupt coalescing: timeout in millisecs */ +} bcmpcie_soft_doorbell_t; + +/** + * D2H interrupt using MSI instead of INTX + * Host configures MSI vector offset for each D2H interrupt + * + * D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL + */ +typedef enum bcmpcie_msi_intr_idx { + MSI_INTR_IDX_CTRL_CMPL_RING = 0, + MSI_INTR_IDX_TXP_CMPL_RING = 1, + MSI_INTR_IDX_RXP_CMPL_RING = 2, + MSI_INTR_IDX_INFO_CMPL_RING = 3, + MSI_INTR_IDX_MAILBOX = 4, + MSI_INTR_IDX_MAX = 5 +} bcmpcie_msi_intr_idx_t; + +#define BCMPCIE_D2H_MSI_OFFSET_SINGLE 0 +typedef enum bcmpcie_msi_offset_type { + BCMPCIE_D2H_MSI_OFFSET_MB0 = 2, + BCMPCIE_D2H_MSI_OFFSET_MB1 = 3, + BCMPCIE_D2H_MSI_OFFSET_DB0 = 4, + BCMPCIE_D2H_MSI_OFFSET_DB1 = 5, + BCMPCIE_D2H_MSI_OFFSET_H1_DB0 = 6, + BCMPCIE_D2H_MSI_OFFSET_MAX = 7 +} bcmpcie_msi_offset_type_t; + +typedef struct bcmpcie_msi_offset { + uint16 intr_idx; /* interrupt index */ + uint16 msi_offset; /* msi vector offset */ +} bcmpcie_msi_offset_t; + +typedef struct bcmpcie_msi_offset_config { + uint32 len; + bcmpcie_msi_offset_t bcmpcie_msi_offset[MSI_INTR_IDX_MAX]; +} bcmpcie_msi_offset_config_t; + +#define BCMPCIE_D2H_MSI_OFFSET_DEFAULT BCMPCIE_D2H_MSI_OFFSET_DB1 + +#define BCMPCIE_D2H_MSI_SINGLE 0xFFFE + +/* if_id */ +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX 0x7 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT 0 +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX 0x1F +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) + +/* flags */ +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1 +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2 +#define BCMPCIE_CMNHDR_FLAGS_TS_SEQNUM_INIT 0x4 +#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80 +#define BCMPCIE_CMNHDR_PHASE_BIT_INIT 0x80 + +/* IOCTL request message */ +typedef struct ioctl_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** ioctl command type */ + uint32 cmd; + /** ioctl transaction ID, to pair with a ioctl response */ + uint16 trans_id; + /** input arguments buffer len */ + uint16 input_buf_len; + /** expected output len */ + uint16 output_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 rsvd[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + /* rsvd */ + uint32 rsvd1[2]; +} ioctl_req_msg_t; + +/** buffer post messages for device to use to return IOCTL responses, Events */ +typedef struct ioctl_resp_evt_buf_post_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** length of the host buffer supplied */ + uint16 host_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 reserved[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + uint32 rsvd[4]; +} ioctl_resp_evt_buf_post_msg_t; + +/* buffer post messages for device to use to return dbg buffers */ +typedef ioctl_resp_evt_buf_post_msg_t info_buf_post_msg_t; + +#ifdef DHD_EFI +#define DHD_INFOBUF_RX_BUFPOST_PKTSZ 1800 +#else +#define DHD_INFOBUF_RX_BUFPOST_PKTSZ (2 * 1024) +#endif + +#define DHD_BTLOG_RX_BUFPOST_PKTSZ (2 * 1024) + +/* An infobuf host buffer starts with a 32 bit (LE) version. */ +#define PCIE_INFOBUF_V1 1 +/* Infobuf v1 type MSGTRACE's data is exactly the same as the MSGTRACE data that + * is wrapped previously/also in a WLC_E_TRACE event. See structure + * msgrace_hdr_t in msgtrace.h. +*/ +#define PCIE_INFOBUF_V1_TYPE_MSGTRACE 1 + +/* Infobuf v1 type LOGTRACE data is exactly the same as the LOGTRACE data that + * is wrapped previously/also in a WLC_E_TRACE event. See structure + * msgrace_hdr_t in msgtrace.h. (The only difference between a MSGTRACE + * and a LOGTRACE is the "trace type" field.) +*/ +#define PCIE_INFOBUF_V1_TYPE_LOGTRACE 2 + +/* An infobuf version 1 host buffer has a single TLV. The information on the + * version 1 types follow this structure definition. (int's LE) +*/ +typedef struct info_buf_payload_hdr_s { + uint16 type; + uint16 length; +} info_buf_payload_hdr_t; + +/* BT logs/memory to DMA directly from BT memory to host */ +typedef struct info_buf_btlog_s { + void (*status_cb)(void *ctx, void *p, int error); /* obsolete - to be removed */ + void *ctx; + dma64addr_t src_addr; + uint32 length; + bool (*pcie_status_cb)(osl_t *osh, void *p, int error); + uint32 bt_intstatus; + int error; +} info_buf_btlog_t; + +/** snapshot upload request message */ +typedef struct snapshot_upload_request_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** length of the snaphost buffer supplied */ + uint32 snapshot_buf_len; + /** type of snapshot */ + uint8 snapshot_type; + /** snapshot param */ + uint8 snapshot_param; + /** to align the host address on 8 byte boundary */ + uint8 reserved[2]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + uint32 rsvd[4]; +} snapshot_upload_request_msg_t; + +/** snapshot types */ +typedef enum bcmpcie_snapshot_type { + SNAPSHOT_TYPE_BT = 0, /* Bluetooth SRAM and patch RAM */ + SNAPSHOT_TYPE_WLAN_SOCRAM = 1, /* WLAN SOCRAM */ + SNAPSHOT_TYPE_WLAN_HEAP = 2, /* WLAN HEAP */ + SNAPSHOT_TYPE_WLAN_REGISTER = 3 /* WLAN registers */ +} bcmpcie_snapshot_type_t; + +#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK 0xF +#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT 2 +#define PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK 3 +#define PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT 0 + +typedef struct pcie_dma_xfer_params { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_ouput_buf_addr; + + /** length of transfer */ + uint32 xfer_len; + /** delay before doing the src txfer */ + uint32 srcdelay; + /** delay before doing the dest txfer */ + uint32 destdelay; + uint8 rsvd[3]; + /* bit0: D11 DMA loopback flag */ + uint8 flags; +} pcie_dma_xfer_params_t; + +#define BCMPCIE_FLOW_RING_INTF_HP2P 0x01u /* bit0 */ +#define BCMPCIE_FLOW_RING_OPT_EXT_TXSTATUS 0x02u /* bit1 */ +#define BCMPCIE_FLOW_RING_INTF_MESH 0x04u /* bit2, identifies the mesh flow ring */ + +/** Complete msgbuf hdr for flow ring update from host to dongle */ +typedef struct tx_flowring_create_request { + cmn_msg_hdr_t msg; + uint8 da[ETHER_ADDR_LEN]; + uint8 sa[ETHER_ADDR_LEN]; + uint8 tid; + uint8 if_flags; + uint16 flow_ring_id; + uint8 tc; + /* priority_ifrmmask is to define core mask in ifrm mode. + * currently it is not used for priority. so uses solely for ifrm mask + */ + uint8 priority_ifrmmask; + uint16 int_vector; + uint16 max_items; + uint16 len_item; + bcm_addr64_t flow_ring_ptr; +} tx_flowring_create_request_t; + +typedef struct tx_flowring_delete_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_delete_request_t; + +typedef tx_flowring_delete_request_t d2h_ring_delete_req_t; +typedef tx_flowring_delete_request_t h2d_ring_delete_req_t; + +typedef struct tx_flowring_flush_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_flush_request_t; + +/** Subtypes for ring_config_req control message */ +typedef enum ring_config_subtype { + /** Default D2H PCIE doorbell override using ring_config_req msg */ + D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */ + D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */ +} ring_config_subtype_t; + +typedef struct ring_config_req { /* pulled from upcoming rev6 ... */ + cmn_msg_hdr_t msg; + uint16 subtype; + uint16 ring_id; + uint32 rsvd; + union { + uint32 data[6]; + /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */ + bcmpcie_soft_doorbell_t soft_doorbell; + /** D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL */ + bcmpcie_msi_offset_config_t msi_offset; + }; +} ring_config_req_t; + +/* data structure to use to create on the fly d2h rings */ +typedef struct d2h_ring_create_req { + cmn_msg_hdr_t msg; + uint16 ring_id; + uint16 ring_type; + uint32 flags; + bcm_addr64_t ring_ptr; + uint16 max_items; + uint16 len_item; + uint32 rsvd[3]; +} d2h_ring_create_req_t; + +/* data structure to use to create on the fly h2d rings */ +#define MAX_COMPLETION_RING_IDS_ASSOCIATED 4 +typedef struct h2d_ring_create_req { + cmn_msg_hdr_t msg; + uint16 ring_id; + uint8 ring_type; + uint8 n_completion_ids; + uint32 flags; + bcm_addr64_t ring_ptr; + uint16 max_items; + uint16 len_item; + uint16 completion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; + uint32 rsvd; +} h2d_ring_create_req_t; + +typedef struct d2h_ring_config_req { + cmn_msg_hdr_t msg; + uint16 d2h_ring_config_subtype; + uint16 d2h_ring_id; + uint32 d2h_ring_config_data[4]; + uint32 rsvd[3]; +} d2h_ring_config_req_t; + +typedef struct h2d_ring_config_req { + cmn_msg_hdr_t msg; + uint16 h2d_ring_config_subtype; + uint16 h2d_ring_id; + uint32 h2d_ring_config_data; + uint32 rsvd[6]; +} h2d_ring_config_req_t; + +typedef struct h2d_mailbox_data { + cmn_msg_hdr_t msg; + uint32 mail_box_data; + uint32 rsvd[7]; +} h2d_mailbox_data_t; +typedef struct host_timestamp_msg { + cmn_msg_hdr_t msg; + uint16 xt_id; /* transaction ID */ + uint16 input_data_len; /* data len at the host_buf_addr, data in TLVs */ + uint16 seqnum; /* number of times host captured the timestamp */ + uint16 rsvd; + /* always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + /* rsvd */ + uint32 rsvd1[4]; +} host_timestamp_msg_t; + +/* buffer post message for timestamp events MSG_TYPE_TIMSTAMP_BUFPOST */ +typedef ioctl_resp_evt_buf_post_msg_t ts_buf_post_msg_t; + +typedef union ctrl_submit_item { + ioctl_req_msg_t ioctl_req; + ioctl_resp_evt_buf_post_msg_t resp_buf_post; + pcie_dma_xfer_params_t dma_xfer; + tx_flowring_create_request_t flow_create; + tx_flowring_delete_request_t flow_delete; + tx_flowring_flush_request_t flow_flush; + ring_config_req_t ring_config_req; + d2h_ring_create_req_t d2h_create; + h2d_ring_create_req_t h2d_create; + d2h_ring_config_req_t d2h_config; + h2d_ring_config_req_t h2d_config; + h2d_mailbox_data_t h2d_mailbox_data; + host_timestamp_msg_t host_ts; + ts_buf_post_msg_t ts_buf_post; + d2h_ring_delete_req_t d2h_delete; + h2d_ring_delete_req_t h2d_delete; + unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE]; +} ctrl_submit_item_t; + +typedef struct info_ring_submit_item { + info_buf_post_msg_t info_buf_post; + unsigned char check[H2DRING_INFO_BUFPOST_ITEMSIZE]; +} info_sumbit_item_t; + +/** Control Completion messages (20 bytes) */ +typedef struct compl_msg_hdr { + union { + /** status for the completion */ + int16 status; + + /* mutually exclusive with pkt fate debug feature */ + struct pktts_compl_hdr { + uint16 d_t4; /* Delta TimeStamp 3: T4-tref */ + } tx_pktts; + }; + /** submisison flow ring id which generated this status */ + union { + uint16 ring_id; + uint16 flow_ring_id; + }; +} compl_msg_hdr_t; + +/** XOR checksum or a magic number to audit DMA done */ +typedef uint32 dma_done_t; + +#define MAX_CLKSRC_ID 0xF +#define TX_PKT_RETRY_CNT_0_MASK 0x000000FF +#define TX_PKT_RETRY_CNT_0_SHIFT 0 +#define TX_PKT_RETRY_CNT_1_MASK 0x0000FF00 +#define TX_PKT_RETRY_CNT_1_SHIFT 8 +#define TX_PKT_RETRY_CNT_2_MASK 0x00FF0000 +#define TX_PKT_RETRY_CNT_2_SHIFT 16 +#define TX_PKT_BAND_INFO 0x0F000000 +#define TX_PKT_BAND_INFO_SHIFT 24 +#define TX_PKT_VALID_INFO 0xF0000000 +#define TX_PKT_VALID_INFO_SHIFT 28 + +typedef struct ts_timestamp_srcid { + union { + uint32 ts_low; /* time stamp low 32 bits */ + uint32 rate_spec; /* use ratespec */ + }; + union { + uint32 ts_high; /* time stamp high 28 bits */ + union { + uint32 ts_high_ext :28; /* time stamp high 28 bits */ + uint32 clk_id_ext :3; /* clock ID source */ + uint32 phase :1; /* Phase bit */ + dma_done_t marker_ext; + }; + uint32 tx_pkt_band_retry_info; + }; +} ts_timestamp_srcid_t; + +typedef ts_timestamp_srcid_t ipc_timestamp_t; + +typedef struct ts_timestamp { + uint32 low; + uint32 high; +} ts_timestamp_t; + +typedef ts_timestamp_t tick_count_64_t; +typedef ts_timestamp_t ts_timestamp_ns_64_t; +typedef ts_timestamp_t ts_correction_m_t; +typedef ts_timestamp_t ts_correction_b_t; + +typedef struct _pktts { + uint32 tref; /* Ref Clk in uSec (currently, tsf) */ + uint16 d_t2; /* Delta TimeStamp 1: T2-tref */ + uint16 d_t3; /* Delta TimeStamp 2: T3-tref */ +} pktts_t; + +/* completion header status codes */ +#define BCMPCIE_SUCCESS 0 +#define BCMPCIE_NOTFOUND 1 +#define BCMPCIE_NOMEM 2 +#define BCMPCIE_BADOPTION 3 +#define BCMPCIE_RING_IN_USE 4 +#define BCMPCIE_RING_ID_INVALID 5 +#define BCMPCIE_PKT_FLUSH 6 +#define BCMPCIE_NO_EVENT_BUF 7 +#define BCMPCIE_NO_RX_BUF 8 +#define BCMPCIE_NO_IOCTLRESP_BUF 9 +#define BCMPCIE_MAX_IOCTLRESP_BUF 10 +#define BCMPCIE_MAX_EVENT_BUF 11 +#define BCMPCIE_BAD_PHASE 12 +#define BCMPCIE_INVALID_CPL_RINGID 13 +#define BCMPCIE_RING_TYPE_INVALID 14 +#define BCMPCIE_NO_TS_EVENT_BUF 15 +#define BCMPCIE_MAX_TS_EVENT_BUF 16 +#define BCMPCIE_PCIE_NO_BTLOG_BUF 17 +#define BCMPCIE_BT_DMA_ERR 18 +#define BCMPCIE_BT_DMA_DESCR_FETCH_ERR 19 +#define BCMPCIE_SNAPSHOT_ERR 20 +#define BCMPCIE_NOT_READY 21 +#define BCMPCIE_INVALID_DATA 22 +#define BCMPCIE_NO_RESPONSE 23 +#define BCMPCIE_NO_CLOCK 24 + +/** IOCTL completion response */ +typedef struct ioctl_compl_resp_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** response buffer len where a host buffer is involved */ + uint16 resp_len; + /** transaction id to pair with a request */ + uint16 trans_id; + /** cmd id */ + uint32 cmd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_comp_resp_msg_t; + +/** IOCTL request acknowledgement */ +typedef struct ioctl_req_ack_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** cmd id */ + uint32 cmd; + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_req_ack_msg_t; + +/** WL event message: send from device to host */ +typedef struct wlevent_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** event data len valid with the event buffer */ + uint16 event_data_len; + /** sequence number */ + uint16 seqnum; + /** rsvd */ + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} wlevent_req_msg_t; + +/** dma xfer complete message */ +typedef struct pcie_dmaxfer_cmplt { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_dmaxfer_cmplt_t; + +/** general status message */ +typedef struct pcie_gen_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_gen_status_t; + +/** ring status message */ +typedef struct pcie_ring_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** message which firmware couldn't decode */ + uint16 write_idx; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_ring_status_t; + +typedef struct ring_create_response { + cmn_msg_hdr_t cmn_hdr; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_create_response_t; + +typedef ring_create_response_t tx_flowring_create_response_t; +typedef ring_create_response_t h2d_ring_create_response_t; +typedef ring_create_response_t d2h_ring_create_response_t; + +typedef struct tx_flowring_delete_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint16 read_idx; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_delete_response_t; + +typedef tx_flowring_delete_response_t h2d_ring_delete_response_t; +typedef tx_flowring_delete_response_t d2h_ring_delete_response_t; + +typedef struct tx_flowring_flush_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_flush_response_t; + +/** Common layout of all d2h control messages */ +typedef struct ctrl_compl_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ctrl_compl_msg_t; + +typedef struct ring_config_resp { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint16 subtype; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_config_resp_t; + +typedef struct d2h_mailbox_data { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 d2h_mailbox_data; + uint32 rsvd[1]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} d2h_mailbox_data_t; + +/* dbg buf completion msg: send from device to host */ +typedef struct info_buf_resp { + /* common message header */ + cmn_msg_hdr_t cmn_hdr; + /* completion message header */ + compl_msg_hdr_t compl_hdr; + /* event data len valid with the event buffer */ + uint16 info_data_len; + /* sequence number */ + uint16 seqnum; + /* destination */ + uint8 dest; + /* rsvd */ + uint8 rsvd[3]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} info_buf_resp_t; + +/* snapshot completion msg: send from device to host */ +typedef struct snapshot_resp { + /* common message header */ + cmn_msg_hdr_t cmn_hdr; + /* completion message header */ + compl_msg_hdr_t compl_hdr; + /* snapshot length uploaded */ + uint32 resp_len; + /* snapshot type */ + uint8 type; + /* rsvd */ + uint8 rsvd[3]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} snapshot_resp_t; + +typedef struct info_ring_cpl_item { + info_buf_resp_t info_buf_post; + unsigned char check[D2HRING_INFO_BUFCMPLT_ITEMSIZE]; +} info_cpl_item_t; + +typedef struct host_timestamp_msg_cpl { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint16 xt_id; /* transaction ID */ + uint16 rsvd; + uint32 rsvd1; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_timestamp_msg_cpl_t; + +typedef struct fw_timestamp_event_msg { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + /* fw captures time stamp info and passed that to host in TLVs */ + uint16 buf_len; /* length of the time stamp data copied in host buf */ + uint16 seqnum; /* number of times fw captured time stamp */ + uint32 rsvd; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} fw_timestamp_event_msg_t; + +typedef union ctrl_completion_item { + ioctl_comp_resp_msg_t ioctl_resp; + wlevent_req_msg_t event; + ioctl_req_ack_msg_t ioct_ack; + pcie_dmaxfer_cmplt_t pcie_xfer_cmplt; + pcie_gen_status_t pcie_gen_status; + pcie_ring_status_t pcie_ring_status; + tx_flowring_create_response_t txfl_create_resp; + tx_flowring_delete_response_t txfl_delete_resp; + tx_flowring_flush_response_t txfl_flush_resp; + ctrl_compl_msg_t ctrl_compl; + ring_config_resp_t ring_config_resp; + d2h_mailbox_data_t d2h_mailbox_data; + info_buf_resp_t dbg_resp; + h2d_ring_create_response_t h2d_ring_create_resp; + d2h_ring_create_response_t d2h_ring_create_resp; + host_timestamp_msg_cpl_t host_ts_cpl; + fw_timestamp_event_msg_t fw_ts_event; + h2d_ring_delete_response_t h2d_ring_delete_resp; + d2h_ring_delete_response_t d2h_ring_delete_resp; + unsigned char ctrl_response[D2HRING_CTRL_CMPLT_ITEMSIZE]; +} ctrl_completion_item_t; + +/** H2D Rxpost ring work items */ +typedef struct host_rxbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_buf_len; + /** alignment to make the host buffers start on 8 byte boundary */ + uint32 rsvd; + /** provided meta data buffer */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; +} host_rxbuf_post_t; + +typedef union rxbuf_submit_item { + host_rxbuf_post_t rxpost; + unsigned char check[H2DRING_RXPOST_ITEMSIZE]; +} rxbuf_submit_item_t; + +/* D2H Rxcompletion ring work items for IPC rev7 */ +typedef struct host_rxbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** filled up meta data len */ + uint16 metadata_len; + /** filled up buffer len to receive data */ + uint16 data_len; + /** offset in the host rx buffer where the data starts */ + uint16 data_offset; + /** offset in the host rx buffer where the data starts */ + uint16 flags; + /** rx status */ + uint32 rx_status_0; + uint32 rx_status_1; + + union { /* size per IPC = (3 x uint32) bytes */ + struct { + /* used by Monitor mode */ + uint32 marker; + /* timestamp */ + ipc_timestamp_t ts; + }; + + /* LatTS_With_XORCSUM */ + struct { + /* latency timestamp */ + pktts_t rx_pktts; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker_ext; + }; + }; +} host_rxbuf_cmpl_t; + +typedef union rxbuf_complete_item { + host_rxbuf_cmpl_t rxcmpl; + unsigned char check[D2HRING_RXCMPLT_ITEMSIZE]; +} rxbuf_complete_item_t; + +typedef struct host_txbuf_post_v1 { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + /** flags */ + uint8 flags; + /** number of segments */ + uint8 seg_cnt; + + /** provided meta data buffer for txstatus */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer containing Tx payload */ + bcm_addr64_t data_buf_addr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len */ + uint16 data_len; + union { + struct { + /** extended transmit flags */ + uint8 ext_flags; + uint8 scale_factor; + + /** user defined rate */ + uint8 rate; + uint8 exp_time; + }; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; + }; +} host_txbuf_post_v1_t; + +typedef enum pkt_csum_type_shift { + PKT_CSUM_TYPE_IPV4_SHIFT = 0, /* pkt has IPv4 hdr */ + PKT_CSUM_TYPE_IPV6_SHIFT = 1, /* pkt has IPv6 hdr */ + PKT_CSUM_TYPE_TCP_SHIFT = 2, /* pkt has TCP hdr */ + PKT_CSUM_TYPE_UDP_SHIFT = 3, /* pkt has UDP hdr */ + PKT_CSUM_TYPE_NWK_CSUM_SHIFT = 4, /* pkt requires IP csum offload */ + PKT_CSUM_TYPE_TRANS_CSUM_SHIFT = 5, /* pkt requires TCP/UDP csum offload */ + PKT_CSUM_TYPE_PSEUDOHDR_CSUM_SHIFT = 6, /* pkt requires pseudo header csum offload */ +} pkt_type_shift_t; + +typedef struct pkt_info_cso { + /* packet csum type = ipv4/v6|udp|tcp|nwk_csum|trans_csum|ph_csum */ + uint8 ver; + uint8 pkt_csum_type; + uint8 nwk_hdr_len; /* IP header length */ + uint8 trans_hdr_len; /* TCP header length */ +} pkt_info_cso_t; + +typedef struct host_txbuf_post_v2 { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + /** flags */ + uint8 flags; + /** number of segments */ + uint8 seg_cnt; + + /** provided meta data buffer for txstatus */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer containing Tx payload */ + bcm_addr64_t data_buf_addr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len */ + uint16 data_len; + struct { + /** extended transmit flags */ + uint8 ext_flags; + uint8 scale_factor; + + /** user defined rate */ + uint8 rate; + uint8 exp_time; + }; + /** additional information on the packet required for CSO */ + pkt_info_cso_t pktinfo; + uint32 PAD; +} host_txbuf_post_v2_t; + +#if defined(BCMPCIE_EXT_TXPOST_SUPPORT) || defined(TX_CSO) +typedef host_txbuf_post_v2_t host_txbuf_post_t; +#else +typedef host_txbuf_post_v1_t host_txbuf_post_t; +#endif + +#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01 +#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02 + +#define BCMPCIE_PKT_FLAGS_FRAME_NORETRY 0x01 /* Disable retry on this frame */ +#define BCMPCIE_PKT_FLAGS_FRAME_NOAGGR 0x02 /* Disable aggregation for this frame */ +#define BCMPCIE_PKT_FLAGS_FRAME_UDR 0x04 /* User defined rate for this frame */ +#define BCMPCIE_PKT_FLAGS_FRAME_ATTR_MASK 0x07 /* Attribute mask */ + +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */ +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */ + +#define BCMPCIE_PKT_FLAGS_EPOCH_SHIFT 3u +#define BCMPCIE_PKT_FLAGS_EPOCH_MASK (1u << BCMPCIE_PKT_FLAGS_EPOCH_SHIFT) + +#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 +#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) +#define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU 0x00 +#define BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT 0x01 +#define BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT 0x02 +#define BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT 0x03 +#define BCMPCIE_PKT_FLAGS_MONITOR_SHIFT 8 +#define BCMPCIE_PKT_FLAGS_MONITOR_MASK (3 << BCMPCIE_PKT_FLAGS_MONITOR_SHIFT) + +#define BCMPCIE_PKT_FLAGS_FRAME_MESH 0x400u +/* Indicate RX checksum verified and passed */ +#define BCMPCIE_PKT_FLAGS_RCSUM_VALID 0x800u + +/* These are added to fix up compile issues */ +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3 +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11 +#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT +#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK + +#define BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC 0x10u +#define BCMPCIE_TXPOST_RATE_EXT_USAGE 0x80 /* The rate field has extended usage */ +#define BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK 0x07 /* The Tx profile index in the rate field */ + +/* H2D Txpost ring work items */ +typedef union txbuf_submit_item { + host_txbuf_post_t txpost; + unsigned char check[H2DRING_TXPOST_ITEMSIZE]; +} txbuf_submit_item_t; + +/* D2H Txcompletion ring work items - extended for IOC rev7 */ +typedef struct host_txbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + + union { /* size per IPC = (3 x uint32) bytes */ + /* Usage 1: TxS_With_TimeSync */ + struct { + struct { + union { + /** provided meta data len */ + uint16 metadata_len; + /** provided extended TX status */ + uint16 tx_status_ext; + }; /*Ext_TxStatus */ + + /** WLAN side txstatus */ + uint16 tx_status; + }; /* TxS */ + /* timestamp */ + ipc_timestamp_t ts; + }; /* TxS_with_TS */ + + /* Usage 2: LatTS_With_XORCSUM */ + struct { + /* latency timestamp */ + pktts_t tx_pktts; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker_ext; + }; + }; + +} host_txbuf_cmpl_t; + +typedef union txbuf_complete_item { + host_txbuf_cmpl_t txcmpl; + unsigned char check[D2HRING_TXCMPLT_ITEMSIZE]; +} txbuf_complete_item_t; + +#define METADATA_VER_1 1u +#define METADATA_VER_2 2u +#define PCIE_METADATA_VER METADATA_VER_2 + +/* version and length are not part of this structure. + * dhd queries version and length through bus iovar "bus:metadata_info". + */ +struct metadata_txcmpl_v1 { + uint32 tref; /* TSF or Ref Clock in uSecs */ + uint16 d_t2; /* T2-fwt1 delta */ + uint16 d_t3; /* T3-fwt1 delta */ + uint16 d_t4; /* T4-fwt1 delta */ + uint16 rsvd; /* reserved */ +}; + +struct metadata_txcmpl_v2 { + uint32 tref; /* TSF or Ref Clock in uSecs */ + uint16 d_t2; /* T2-fwt1 delta */ + uint16 d_t3; /* T3-fwt1 delta */ + uint16 d_t4; /* T4-fwt1 delta */ + + uint16 u_t1; /* PSM Packet Fetch Time in 32us */ + uint16 u_t2; /* Medium Access Delay delta */ + uint16 u_t3; /* Rx duration delta */ + uint16 u_t4; /* Mac Suspend Duration delta */ + uint16 u_t5; /* TxStatus Time in 32us */ + + uint16 u_c1; /* Number of times Tx was enabled */ + uint16 u_c2; /* Other AC TxStatus count */ + uint16 u_c3; /* DataRetry count */ + uint16 u_c4; /* RTS */ + uint16 u_c5; /* CTS */ + uint16 u_c6; /* debug 1 */ + uint16 u_c7; /* debug 2 */ + uint16 u_c8; /* debug 3 */ +}; +typedef struct metadata_txcmpl_v2 metadata_txcmpl_t; + +#define BCMPCIE_D2H_METADATA_HDRLEN 4 +#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4) + +/** ret buf struct */ +typedef struct ret_buf_ptr { + uint32 low_addr; + uint32 high_addr; +} ret_buf_t; + +#ifdef PCIE_API_REV1 + +/* ioctl specific hdr */ +typedef struct ioctl_hdr { + uint16 cmd; + uint16 retbuf_len; + uint32 cmd_id; +} ioctl_hdr_t; + +typedef struct ioctlptr_hdr { + uint16 cmd; + uint16 retbuf_len; + uint16 buflen; + uint16 rsvd; + uint32 cmd_id; +} ioctlptr_hdr_t; + +#else /* PCIE_API_REV1 */ + +typedef struct ioctl_req_hdr { + uint32 pkt_id; /**< Packet ID */ + uint32 cmd; /**< IOCTL ID */ + uint16 retbuf_len; + uint16 buflen; + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +} ioctl_req_hdr_t; + +#endif /* PCIE_API_REV1 */ + +/** Complete msgbuf hdr for ioctl from host to dongle */ +typedef struct ioct_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctl_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif + ret_buf_t ret_buf; +} ioct_reqst_hdr_t; + +typedef struct ioctptr_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctlptr_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif + ret_buf_t ret_buf; + ret_buf_t ioct_buf; +} ioctptr_reqst_hdr_t; + +/** ioctl response header */ +typedef struct ioct_resp_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + uint32 cmd_id; +#else + uint32 pkt_id; +#endif + uint32 status; + uint32 ret_len; + uint32 inline_data; +#ifdef PCIE_API_REV1 +#else + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +#endif +} ioct_resp_hdr_t; + +/* ioct resp header used in dongle */ +/* ret buf hdr will be stripped off inside dongle itself */ +typedef struct msgbuf_ioctl_resp { + ioct_resp_hdr_t ioct_hdr; + ret_buf_t ret_buf; /**< ret buf pointers */ +} msgbuf_ioct_resp_t; + +/** WL event hdr info */ +typedef struct wl_event_hdr { + cmn_msg_hdr_t msg; + uint16 event; + uint8 flags; + uint8 rsvd; + uint16 retbuf_len; + uint16 rsvd1; + uint32 rxbufid; +} wl_event_hdr_t; + +#define TXDESCR_FLOWID_PCIELPBK_1 0xFF +#define TXDESCR_FLOWID_PCIELPBK_2 0xFE + +typedef struct txbatch_lenptr_tup { + uint32 pktid; + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} txbatch_lenptr_tup_t; + +typedef struct txbatch_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 pktcnt; + uint8 flowid; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; +} txbatch_cmn_msghdr_t; + +typedef struct txbatch_msghdr { + txbatch_cmn_msghdr_t txcmn; + txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */ +} txbatch_msghdr_t; + +/* TX desc posting header */ +typedef struct tx_lenptr_tup { + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} tx_lenptr_tup_t; + +typedef struct txdescr_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 descrcnt; + uint8 flowid; + uint32 pktid; +} txdescr_cmn_msghdr_t; + +typedef struct txdescr_msghdr { + txdescr_cmn_msghdr_t txcmn; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; + tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */ +} txdescr_msghdr_t; + +/** Tx status header info */ +typedef struct txstatus_hdr { + cmn_msg_hdr_t msg; + uint32 pktid; +} txstatus_hdr_t; + +/** RX bufid-len-ptr tuple */ +typedef struct rx_lenptr_tup { + uint32 rxbufid; + uint16 len; + uint16 rsvd2; + ret_buf_t ret_buf; /**< ret buf pointers */ +} rx_lenptr_tup_t; + +/** Rx descr Post hdr info */ +typedef struct rxdesc_msghdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint8 rsvd1; + uint8 descnt; + rx_lenptr_tup_t rx_tup[0]; +} rxdesc_msghdr_t; + +/** RX complete tuples */ +typedef struct rxcmplt_tup { + uint16 retbuf_len; + uint16 data_offset; + uint32 rxstatus0; + uint32 rxstatus1; + uint32 rxbufid; +} rxcmplt_tup_t; + +/** RX complete messge hdr */ +typedef struct rxcmplt_hdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint16 rxcmpltcnt; + rxcmplt_tup_t rx_tup[0]; +} rxcmplt_hdr_t; + +typedef struct hostevent_hdr { + cmn_msg_hdr_t msg; + uint32 evnt_pyld; +} hostevent_hdr_t; + +typedef struct dma_xfer_params { + uint32 src_physaddr_hi; + uint32 src_physaddr_lo; + uint32 dest_physaddr_hi; + uint32 dest_physaddr_lo; + uint32 len; + uint32 srcdelay; + uint32 destdelay; +} dma_xfer_params_t; + +enum { + HOST_EVENT_CONS_CMD = 1 +}; + +/* defines for flags */ +#define MSGBUF_IOC_ACTION_MASK 0x1 + +#define MAX_SUSPEND_REQ 15 + +typedef struct tx_idle_flowring_suspend_request { + cmn_msg_hdr_t msg; + uint16 ring_id[MAX_SUSPEND_REQ]; /* ring Id's */ + uint16 num; /* number of flowid's to suspend */ +} tx_idle_flowring_suspend_request_t; + +typedef struct tx_idle_flowring_suspend_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_suspend_response_t; + +typedef struct tx_idle_flowring_resume_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_idle_flowring_resume_request_t; + +typedef struct tx_idle_flowring_resume_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_resume_response_t; + +/* timesync related additions */ + +/* defined similar to bcm_xtlv_t */ +typedef struct _bcm_xtlv { + uint16 id; /* TLV idenitifier */ + uint16 len; /* TLV length in bytes */ +} _bcm_xtlv_t; + +#define BCMMSGBUF_FW_CLOCK_INFO_TAG 0 +#define BCMMSGBUF_HOST_CLOCK_INFO_TAG 1 +#define BCMMSGBUF_HOST_CLOCK_SELECT_TAG 2 +#define BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG 3 +#define BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG 4 +#define BCMMSGBUF_MAX_TSYNC_TAG 5 + +/* Flags in fw clock info TLV */ +#define CAP_DEVICE_TS (1 << 0) +#define CAP_CORRECTED_TS (1 << 1) +#define TS_CLK_ACTIVE (1 << 2) + +typedef struct ts_fw_clock_info { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_FW_CLOCK_INFO_TAG */ + ts_timestamp_srcid_t ts; /* tick count */ + uchar clk_src[4]; /* clock source acronym ILP/AVB/TSF */ + uint32 nominal_clock_freq; + uint32 reset_cnt; + uint8 flags; + uint8 rsvd[3]; +} ts_fw_clock_info_t; + +typedef struct ts_host_clock_info { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */ + tick_count_64_t ticks; /* 64 bit host tick counter */ + ts_timestamp_ns_64_t ns; /* 64 bit host time in nano seconds */ +} ts_host_clock_info_t; + +typedef struct ts_host_clock_sel { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_SELECT_TAG */ + uint32 seqnum; /* number of times GPIO time sync toggled */ + uint8 min_clk_idx; /* clock idenitifer configured for packet tiem stamping */ + uint8 max_clk_idx; /* clock idenitifer configured for packet tiem stamping */ + uint16 rsvd[1]; +} ts_host_clock_sel_t; + +typedef struct ts_d2h_clock_correction { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */ + uint8 clk_id; /* clock source in the device */ + uint8 rsvd[3]; + ts_correction_m_t m; /* y = 'm' x + b */ + ts_correction_b_t b; /* y = 'm' x + 'c' */ +} ts_d2h_clock_correction_t; + +typedef struct ts_host_timestamping_config { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG */ + /* time period to capture the device time stamp and toggle WLAN_TIME_SYNC_GPIO */ + uint16 period_ms; + uint8 flags; + uint8 post_delay; + uint32 reset_cnt; +} ts_host_timestamping_config_t; + +/* Flags in host timestamping config TLV */ +#define FLAG_HOST_RESET (1 << 0) +#define IS_HOST_RESET(x) ((x) & FLAG_HOST_RESET) +#define CLEAR_HOST_RESET(x) ((x) & ~FLAG_HOST_RESET) + +#define FLAG_CONFIG_NODROP (1 << 1) +#define IS_CONFIG_NODROP(x) ((x) & FLAG_CONFIG_NODROP) +#define CLEAR_CONFIG_NODROP(x) ((x) & ~FLAG_CONFIG_NODROP) + +/* HP2P RLLW Extended TxStatus info when host enables the same */ +#define D2H_TXSTATUS_EXT_PKT_WITH_OVRRD 0x8000 /**< set when pkt had override bit on */ +#define D2H_TXSTATUS_EXT_PKT_XMIT_ON5G 0x4000 /**< set when pkt xmitted on 5G */ +#define D2H_TXSTATUS_EXT_PKT_BT_DENY 0x2000 /**< set when WLAN is given prio over BT */ +#define D2H_TXSTATUS_EXT_PKT_NAV_SWITCH 0x1000 /**< set when band switched due to NAV intr */ +#define D2H_TXSTATUS_EXT_PKT_HOF_SWITCH 0x0800 /**< set when band switched due to HOF intr */ + +/* H2D Txpost aggregated work item */ +#define TXBUF_AGGR_CNT (2u) + +/* aggregated work item of txpost v2 */ +typedef struct host_txbuf_post_aggr_v2 { + /** common aggregated message header */ + cmn_aggr_msg_hdr_t cmn_aggr_hdr; + + /** data buffer len to transmit */ + uint16 data_buf_len[TXBUF_AGGR_CNT]; + + /** address of data buffer to transmit */ + bcm_addr64_t data_buf_addr[TXBUF_AGGR_CNT]; + + /** packet Identifier for the associated host buffer */ + uint32 request_id[TXBUF_AGGR_CNT]; + + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + + /* reserved bytes */ + uint16 reserved; + + /** additional information on the packet required for CSO */ + pkt_info_cso_t pktinfo[TXBUF_AGGR_CNT]; +} host_txbuf_post_aggr_v2_t; + +/* aggregated work item of txpost v1 */ +typedef struct host_txbuf_post_aggr_v1 { + /** common aggregated message header */ + cmn_aggr_msg_hdr_t cmn_aggr_hdr; + + /** data buffer len to transmit */ + uint16 data_buf_len[TXBUF_AGGR_CNT]; + + /** address of data buffer to transmit */ + bcm_addr64_t data_buf_addr[TXBUF_AGGR_CNT]; + + /** packet Identifier for the associated host buffer */ + uint32 request_id[TXBUF_AGGR_CNT]; + + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + + /* pad bytes */ + uint16 PAD; +} host_txbuf_post_aggr_v1_t; + +#if defined(BCMPCIE_EXT_TXPOST_SUPPORT) || defined(TX_CSO) +typedef host_txbuf_post_aggr_v2_t host_txbuf_post_aggr_t; +#else +typedef host_txbuf_post_aggr_v1_t host_txbuf_post_aggr_t; +#endif + +/* D2H Txcompletion ring aggregated work item */ +#define TXCPL_AGGR_CNT (4u) + +/* head aggregated work item of txcpl */ +typedef struct host_txbuf_cmpl_aggr { + /** common aggregated message header */ + cmn_aggr_msg_hdr_t cmn_aggr_hdr; + + /** completion aggregated message header */ + compl_aggr_msg_hdr_t compl_aggr_hdr; + + /** packet Identifier for the associated host buffer */ + uint32 request_id[TXCPL_AGGR_CNT]; +} host_txbuf_cmpl_aggr_t; + +#define TXCPL_AGGR_CNT_EXT (6u) +/* non-head aggregated work item of txcpl */ +typedef struct host_txbuf_cmpl_aggr_ext { + /** packet Identifier for the associated host buffer */ + uint32 request_id[TXCPL_AGGR_CNT_EXT]; +} host_txbuf_cmpl_aggr_ext_t; + +/* H2D Rxpost ring aggregated work items */ +#define RXBUF_AGGR_CNT (2u) + +/* aggregated work item of rxpost */ +typedef struct host_rxbuf_post_aggr { + /** common aggregated message header */ + cmn_aggr_msg_hdr_t cmn_aggr_hdr; + + /** data buffer len to transmit */ + uint16 data_buf_len[RXBUF_AGGR_CNT]; + + /** packet Identifier for the associated host buffer */ + uint32 request_id[RXBUF_AGGR_CNT]; + + /** address of data buffer to transmit */ + bcm_addr64_t data_buf_addr[RXBUF_AGGR_CNT]; +} host_rxbuf_post_aggr_t; + +/* D2H Rxcompletion ring for aggregated work items */ +#define RXCPL_AGGR_CNT (2u) + +/* each rx buffer work item */ +typedef struct host_rxbuf_cmpl_pkt { + /** offset in the host rx buffer where the data starts */ + uint16 data_offset; + /** filled up buffer len to receive data */ + uint16 data_len; + /** packet Identifier for the associated host buffer */ + uint32 request_id; +} host_rxbuf_cmpl_item_t; + +/* head aggregated work item of rxcpl */ +typedef struct host_rxbuf_cmpl_aggr { + /** common aggregated message header */ + cmn_aggr_msg_hdr_t cmn_aggr_hdr; + + /** completion aggregated message header */ + compl_aggr_msg_hdr_t compl_aggr_hdr; + + /** rxbuffer work item */ + host_rxbuf_cmpl_item_t item[RXCPL_AGGR_CNT]; +} host_rxbuf_cmpl_aggr_t; + +#define RXCPL_AGGR_CNT_EXT (5u) +/* non-head aggregated work item of rxcpl */ +typedef struct host_rxbuf_cmpl_aggr_ext { + /** rxbuffer work item */ + host_rxbuf_cmpl_item_t item[RXCPL_AGGR_CNT_EXT]; +} host_rxbuf_cmpl_aggr_ext_t; + +/* txpost extended tag types */ +typedef uint8 txpost_ext_tag_type_t; +enum { + TXPOST_EXT_TAG_TYPE_RSVD = 0u, /* Reserved */ + TXPOST_EXT_TAG_TYPE_CSO = 1u, + TXPOST_EXT_TAG_TYPE_MESH = 2u, + TXPOST_EXT_TAG_TYPE_MAX = 3u /* NOTE: increment this as you add reasons above */ +}; + +/* Fixed lengths for each extended tag */ +typedef uint8 txpost_ext_tag_len_t; +enum { + TXPOST_EXT_TAG_LEN_RSVD = 0u, /* Reserved */ + TXPOST_EXT_TAG_LEN_CSO = 4u, + TXPOST_EXT_TAG_LEN_MESH = 20u +}; + +/* + * Note: The only requirement is that the overall size of the workitem be multiple of 8. + * However, each individual ext tag not necessarily 8x. + */ + +#endif /* _bcmmsgbuf_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmnvram.h b/bcmdhd.101.10.361.x/include/bcmnvram.h new file mode 100755 index 0000000..498fb99 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmnvram.h @@ -0,0 +1,162 @@ +/* + * NVRAM variable manipulation + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmnvram_h_ +#define _bcmnvram_h_ + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +struct nvram_header { + uint32 magic; + uint32 len; + uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + uint32 config_ncdl; /* ncdl values for memc */ +}; + +struct nvram_tuple { + char *name; + char *value; + struct nvram_tuple *next; +}; + +#ifdef BCMDRIVER +#include + +/* + * Initialize NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern int nvram_init(si_t *sih); + +extern int nvram_file_read(char **nvramp, int *nvraml); + +/* + * Append a chunk of nvram variables to the global list + */ +extern int nvram_append(void *si, char *vars, uint varsz, uint16 prio); + +/* + * Check for reset button press for restoring factory defaults. + */ +extern int nvram_reset(si_t *sih); + +/* + * Disable NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern void nvram_exit(si_t *sih); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get(const char *name); + +/* + * Get the value of an NVRAM variable. + * @param name name of variable to get + * @return value of variable or NUL if undefined + */ +static INLINE char * +nvram_safe_get(const char *name) +{ + char *p = nvram_get(name); + return p ? p : ""; +} + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set(const char *name, const char *value); + +/* + * Unset an NVRAM variable. Pointers to previously set values + * remain valid until a set. + * @param name name of variable to unset + * @return 0 on success and errno on failure + * NOTE: use nvram_commit to commit this change to flash. + */ +extern int nvram_unset(const char *name); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @return 0 on success and errno on failure + */ +extern int nvram_commit(void); + +/* + * Get all NVRAM variables (format name=value\0 ... \0\0). + * @param buf buffer to store variables + * @param count size of buffer in bytes + * @return 0 on success and errno on failure + */ +extern int nvram_getall(char *nvram_buf, int count); + +/* + * returns the crc value of the nvram + * @param nvh nvram header pointer + */ +uint8 nvram_calc_crc(struct nvram_header * nvh); + +extern void nvram_printall(void); + +#endif /* BCMDRIVER */ +#endif /* _LANGUAGE_ASSEMBLY */ + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_VERSION 1 +#define NVRAM_HEADER_SIZE 20 +/* This definition is for precommit staging, and will be removed */ +#define NVRAM_SPACE 0x8000 +#define MAX_NVRAM_SPACE 0x10000 +#define DEF_NVRAM_SPACE 0x8000 +#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */ + +#define NVRAM_MAX_VALUE_LEN 255 +#define NVRAM_MAX_PARAM_LEN 64 + +#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */ +#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */ + +#define BCM_JUMBO_NVRAM_DELIMIT '\n' +#define BCM_JUMBO_START "Broadcom Jumbo Nvram file" + +#if defined(BCMSDIODEV) || defined(BCMHOSTVARS) +extern char *_vars; +extern uint _varsz; +#endif + +#endif /* _bcmnvram_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmpcie.h b/bcmdhd.101.10.361.x/include/bcmpcie.h new file mode 100755 index 0000000..f464e20 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmpcie.h @@ -0,0 +1,559 @@ +/* + * Broadcom PCIE + * Software-specific definitions shared between device and host side + * Explains the shared area between host and dongle + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmpcie_h_ +#define _bcmpcie_h_ + +#include + +#define ADDR_64(x) (x.addr) +#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr)) +#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr)) + +typedef struct { + uint32 low_addr; + uint32 high_addr; +} sh_addr_t; + +/* May be overridden by 43xxxxx-roml.mk */ +#if !defined(BCMPCIE_MAX_TX_FLOWS) +#define BCMPCIE_MAX_TX_FLOWS 40 +#endif /* ! BCMPCIE_MAX_TX_FLOWS */ + +#define PCIE_SHARED_VERSION_9 0x00009 +#define PCIE_SHARED_VERSION_8 0x00008 +#define PCIE_SHARED_VERSION_7 0x00007 +#define PCIE_SHARED_VERSION_6 0x00006 /* rev6 is compatible with rev 5 */ +#define PCIE_SHARED_VERSION_5 0x00005 /* rev6 is compatible with rev 5 */ +/** + * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that + * is located in device memory. + */ +#define PCIE_SHARED_VERSION_MASK 0x000FF +#define PCIE_SHARED_ASSERT_BUILT 0x00100 +#define PCIE_SHARED_ASSERT 0x00200 +#define PCIE_SHARED_TRAP 0x00400 +#define PCIE_SHARED_IN_BRPT 0x00800 +#define PCIE_SHARED_SET_BRPT 0x01000 +#define PCIE_SHARED_PENDING_BRPT 0x02000 +/* BCMPCIE_SUPPORT_TX_PUSH_RING 0x04000 obsolete */ +#define PCIE_SHARED_EVT_SEQNUM 0x08000 +#define PCIE_SHARED_DMA_INDEX 0x10000 + +/** + * There are host types where a device interrupt can 'race ahead' of data written by the device into + * host memory. The dongle can avoid this condition using a variety of techniques (read barrier, + * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately + * these techniques have drawbacks on router platforms. For these platforms, it was decided to not + * avoid the condition, but to detect the condition instead and act on it. + * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM + */ +#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000 +#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000 +#define PCIE_SHARED_D2H_SYNC_MODE_MASK \ + (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM) +#define PCIE_SHARED_IDLE_FLOW_RING 0x80000 +#define PCIE_SHARED_2BYTE_INDICES 0x100000 + +#define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */ +#define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */ +#define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */ + +/* dongle supports fatal buf log collection */ +#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000 + +/* Implicit DMA with corerev 19 and after */ +#define PCIE_SHARED_IDMA 0x400000 + +/* MSI support */ +#define PCIE_SHARED_D2H_MSI_MULTI_MSG 0x800000 + +/* IFRM with corerev 19 and after */ +#define PCIE_SHARED_IFRM 0x1000000 + +/** + * From Rev6 and above, suspend/resume can be done using two handshake methods. + * 1. Using ctrl post/ctrl cmpl messages (Default rev6) + * 2. Using Mailbox data (old method as used in rev5) + * This shared flag indicates whether to overide rev6 default method and use mailbox for + * suspend/resume. + */ +#define PCIE_SHARED_USE_MAILBOX 0x2000000 + +/* Firmware compiled for mfgbuild purposes */ +#define PCIE_SHARED_MFGBUILD_FW 0x4000000 + +/* Firmware could use DB0 value as host timestamp */ +#define PCIE_SHARED_TIMESTAMP_DB0 0x8000000 +/* Firmware could use Hostready (IPC rev7) */ +#define PCIE_SHARED_HOSTRDY_SUPPORT 0x10000000 + +/* When set, Firmwar does not support OOB Device Wake based DS protocol */ +#define PCIE_SHARED_NO_OOB_DW 0x20000000 + +/* When set, Firmwar supports Inband DS protocol */ +#define PCIE_SHARED_INBAND_DS 0x40000000 + +/* use DAR registers */ +#define PCIE_SHARED_DAR 0x80000000 + +/** + * Following are the shared2 flags. All bits in flags have been used. A flags2 + * field got added and the definition for these flags come here: + */ +/* WAR: D11 txstatus through unused status field of PCIe completion header */ +#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */ +#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002 +#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */ +#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */ +#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */ +#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 /* Fast Delete ring support */ +#define PCIE_SHARED2_EVTBUF_MAX_MASK 0x000000C0 /* 0:32, 1:64, 2:128, 3: 256 */ + +/* using flags2 to indicate firmware support added to reuse timesync to update PKT txstatus */ +#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100 +#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */ +#define PCIE_SHARED2_FW_HC_ON_TRAP 0x00000400 +#define PCIE_SHARED2_HSCB 0x00000800 /* Host SCB support */ + +#define PCIE_SHARED2_EDL_RING 0x00001000 /* Support Enhanced Debug Lane */ +#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */ +#define PCIE_SHARED2_PCIE_ENUM_RESET_FLR 0x00004000 /* BT producer index reset WAR */ +#define PCIE_SHARED2_PKT_TIMESTAMP 0x00008000 /* Timestamp in packet */ + +#define PCIE_SHARED2_HP2P 0x00010000u /* HP2P feature */ +#define PCIE_SHARED2_HWA 0x00020000u /* HWA feature */ +#define PCIE_SHARED2_TRAP_ON_HOST_DB7 0x00040000u /* can take a trap on DB7 from host */ + +#define PCIE_SHARED2_DURATION_SCALE 0x00100000u +#define PCIE_SHARED2_ETD_ADDR_SUPPORT 0x00800000u + +#define PCIE_SHARED2_TXCSO 0x00200000u /* Tx Checksum offload support */ +#define PCIE_SHARED2_TXPOST_EXT 0x00400000u /* extended txpost work item support */ + +#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000 +#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000 + +#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 +#define PCIE_SHARED_H2D_MAGIC 0x12345678 + +typedef uint16 pcie_hwa_db_index_t; /* 16 bit HWA index (IPC Rev 7) */ +#define PCIE_HWA_DB_INDEX_SZ (2u) /* 2 bytes sizeof(pcie_hwa_db_index_t) */ + +/** + * Message rings convey messages between host and device. They are unidirectional, and are located + * in host memory. + * + * This is the minimal set of message rings, known as 'common message rings': + */ +#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0 +#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1 +#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2 +#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3 +#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4 +#define BCMPCIE_COMMON_MSGRING_MAX_ID 4 + +#define BCMPCIE_H2D_COMMON_MSGRINGS 2 +#define BCMPCIE_D2H_COMMON_MSGRINGS 3 +#define BCMPCIE_COMMON_MSGRINGS 5 + +#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows)) + +/* different ring types */ +#define BCMPCIE_H2D_RING_TYPE_CTRL_SUBMIT 0x1 +#define BCMPCIE_H2D_RING_TYPE_TXFLOW_RING 0x2 +#define BCMPCIE_H2D_RING_TYPE_RXBUFPOST 0x3 +#define BCMPCIE_H2D_RING_TYPE_TXSUBMIT 0x4 +#define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT 0x5 +#define BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT 0x6 + +#define BCMPCIE_D2H_RING_TYPE_CTRL_CPL 0x1 +#define BCMPCIE_D2H_RING_TYPE_TX_CPL 0x2 +#define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3 +#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4 +#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5 +#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6 +#define BCMPCIE_D2H_RING_TYPE_EDL 0x7 +#define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL 0x8 +#define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL 0x9 + +/** + * H2D and D2H, WR and RD index, are maintained in the following arrays: + * - Array of all H2D WR Indices + * - Array of all H2D RD Indices + * - Array of all D2H WR Indices + * - Array of all D2H RD Indices + * + * The offset of the WR or RD indexes (for common rings) in these arrays are + * listed below. Arrays ARE NOT indexed by a ring's id. + * + * D2H common rings WR and RD index start from 0, even though their ringids + * start from BCMPCIE_H2D_COMMON_MSGRINGS + */ + +#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id) + +enum h2dring_idx { + /* H2D common rings */ + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT), + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT), + + /* First TxPost's WR or RD index starts after all H2D common rings */ + BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS) +}; + +#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \ + ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS) + +enum d2hring_idx { + /* D2H Common Rings */ + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE), + BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE), + BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE) +}; + +/** + * Macros for managing arrays of RD WR indices: + * rw_index_sz: + * - in dongle, rw_index_sz is known at compile time + * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags + * + * ring_idx: See h2dring_idx and d2hring_idx + */ + +/** Offset of a RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \ + ((rw_index_sz) * (ring_idx)) + +/** Fetch the address of RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \ + (void *)((uint32)(indices_array_base) + \ + BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx))) + +/** H2D DMA Indices array size: given max flow rings */ +#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \ + ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows)) + +/** D2H DMA Indices array size */ +#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \ + ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS) + +/* Backwards compatibility for legacy branches. */ +#if !defined(PHYS_ADDR_N) + #define PHYS_ADDR_N(name) name +#endif + +/** + * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used + * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated + * both in host as well as device memory. + */ +typedef struct ring_mem { + uint16 idx; /* ring id */ + uint8 type; + uint8 rsvd; + uint16 max_item; /* Max number of items in flow ring */ + uint16 len_items; /* Items are fixed size. Length in bytes of one item */ + sh_addr_t base_addr; /* 64 bits address, either in host or device memory */ +} ring_mem_t; + +/** + * Per flow ring, information is maintained in device memory, eg at what address the ringmem and + * ringstate are located. The flow ring itself can be instantiated in either host or device memory. + * + * Perhaps this type should be renamed to make clear that it resides in device memory only. + */ +typedef struct ring_info { + uint32 PHYS_ADDR_N(ringmem_ptr); /* ring mem location in dongle memory */ + + /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not + * by a ringid. + */ + + /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */ + uint32 PHYS_ADDR_N(h2d_w_idx_ptr); /* Array of all H2D ring's WR indices */ + uint32 PHYS_ADDR_N(h2d_r_idx_ptr); /* Array of all H2D ring's RD indices */ + uint32 PHYS_ADDR_N(d2h_w_idx_ptr); /* Array of all D2H ring's WR indices */ + uint32 PHYS_ADDR_N(d2h_r_idx_ptr); /* Array of all D2H ring's RD indices */ + + /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host. + * Host may directly fetch WR and RD indices from these host-side arrays. + * + * 64bit ptr to arrays of WR or RD indices for all rings in host memory. + */ + sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */ + sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */ + sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */ + sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */ + + uint16 max_tx_flowrings; /* maximum number of H2D rings: common + flow */ + uint16 max_submission_queues; /* maximum number of H2D rings: common + flow */ + uint16 max_completion_rings; /* maximum number of H2D rings: common + flow */ + uint16 max_vdevs; /* max number of virtual interfaces supported */ + + sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */ + + /* 32bit ptr to arrays of HWA DB indices for all rings in dongle memory */ + uint32 PHYS_ADDR_N(h2d_hwa_db_idx_ptr); /* Array of all H2D rings HWA DB indices */ + uint32 PHYS_ADDR_N(d2h_hwa_db_idx_ptr); /* Array of all D2H rings HWA DB indices */ + +} ring_info_t; + +/** + * A structure located in TCM that is shared between host and device, primarily used during + * initialization. + */ +typedef struct { + /** shared area version captured at flags 7:0 */ + uint32 flags; + + uint32 PHYS_ADDR_N(trap_addr); + uint32 PHYS_ADDR_N(assert_exp_addr); + uint32 PHYS_ADDR_N(assert_file_addr); + uint32 assert_line; + uint32 PHYS_ADDR_N(console_addr); /**< Address of hnd_cons_t */ + + uint32 PHYS_ADDR_N(msgtrace_addr); + + uint32 fwid; + + /* Used for debug/flow control */ + uint16 total_lfrag_pkt_cnt; + uint16 max_host_rxbufs; /* rsvd in spec */ + + uint32 dma_rxoffset; /* rsvd in spec */ + + /** these will be used for sleep request/ack, d3 req/ack */ + uint32 PHYS_ADDR_N(h2d_mb_data_ptr); + uint32 PHYS_ADDR_N(d2h_mb_data_ptr); + + /* information pertinent to host IPC/msgbuf channels */ + /** location in the TCM memory which has the ring_info */ + uint32 PHYS_ADDR_N(rings_info_ptr); + + /** block of host memory for the scratch buffer */ + uint32 host_dma_scratch_buffer_len; + sh_addr_t host_dma_scratch_buffer; + + /* location in host memory for scb host offload structures */ + sh_addr_t host_scb_addr; + uint32 host_scb_size; + + /* anonymous union for overloading fields in structure */ + union { + uint32 buzz_dbg_ptr; /* BUZZZ state format strings and trace buffer */ + struct { + /* Host provided trap buffer length in words */ + uint16 device_trap_debug_buffer_len; + uint16 rsvd2; + }; + }; + + /* rev6 compatible changes */ + uint32 flags2; + uint32 host_cap; + + /* location in the host address space to write trap indication. + * At this point for the current rev of the spec, firmware will + * support only indications to 32 bit host addresses. + * This essentially is device_trap_debug_buffer_addr + */ + sh_addr_t host_trap_addr; + + /* location for host fatal error log buffer start address */ + uint32 PHYS_ADDR_N(device_fatal_logbuf_start); + + /* location in host memory for offloaded modules */ + sh_addr_t hoffload_addr; + uint32 flags3; + uint32 host_cap2; + uint32 host_cap3; /* host indicates its txpost ext tag capabilities */ + uint32 PHYS_ADDR_N(etd_addr); + + /* Device advertises the txpost extended tag capabilities */ + uint32 device_txpost_ext_tags_bitmask; + +} pciedev_shared_t; + +/* Device F/W provides the following access function: + * pciedev_shared_t *hnd_get_pciedev_shared(void); + */ + +/* host capabilities */ +#define HOSTCAP_PCIEAPI_VERSION_MASK 0x000000FF +#define HOSTCAP_H2D_VALID_PHASE 0x00000100 +#define HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE 0x00000200 +#define HOSTCAP_H2D_ENABLE_HOSTRDY 0x00000400 +#define HOSTCAP_DB0_TIMESTAMP 0x00000800 +#define HOSTCAP_DS_NO_OOB_DW 0x00001000 +#define HOSTCAP_DS_INBAND_DW 0x00002000 +#define HOSTCAP_H2D_IDMA 0x00004000 +#define HOSTCAP_H2D_IFRM 0x00008000 +#define HOSTCAP_H2D_DAR 0x00010000 +#define HOSTCAP_EXTENDED_TRAP_DATA 0x00020000 +#define HOSTCAP_TXSTATUS_METADATA 0x00040000 +#define HOSTCAP_BT_LOGGING 0x00080000 +#define HOSTCAP_SNAPSHOT_UPLOAD 0x00100000 +#define HOSTCAP_FAST_DELETE_RING 0x00200000 +#define HOSTCAP_PKT_TXSTATUS 0x00400000 +#define HOSTCAP_UR_FW_NO_TRAP 0x00800000 /* Don't trap on UR */ +#define HOSTCAP_TX_CSO 0x01000000 +#define HOSTCAP_HSCB 0x02000000 +/* Host support for extended device trap debug buffer */ +#define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000 +#define HOSTCAP_TXPOST_EXT 0x08000000 +/* Host support for enhanced debug lane */ +#define HOSTCAP_EDL_RING 0x10000000 +#define HOSTCAP_PKT_TIMESTAMP 0x20000000 +#define HOSTCAP_PKT_HP2P 0x40000000 +#define HOSTCAP_HWA 0x80000000 + +#define HOSTCAP2_DURATION_SCALE_MASK 0x0000003Fu + +/* extended trap debug buffer allocation sizes. Note that this buffer can be used for + * other trap related purposes also. + */ +#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN (64u * 1024u) +#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN (96u * 1024u) +#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MAX (256u * 1024u) + +/** + * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware + * support. + */ + +/* H2D mail box Data */ +#define H2D_HOST_D3_INFORM 0x00000001 +#define H2D_HOST_DS_ACK 0x00000002 +#define H2D_HOST_DS_NAK 0x00000004 +#define H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define H2D_HOST_D0_INFORM 0x00000010 +#define H2DMB_DS_ACTIVE 0x00000020 +#define H2DMB_DS_DEVICE_WAKE 0x00000040 +#define H2D_HOST_IDMA_INITED 0x00000080 +#define H2D_HOST_ACK_NOINT 0x00010000 /* d2h_ack interrupt ignore */ +#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */ +#define H2D_FW_TRAP 0x20000000 /**< h2d force TRAP */ +#define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM +#define H2DMB_DS_DEVICE_SLEEP_ACK H2D_HOST_DS_ACK +#define H2DMB_DS_DEVICE_SLEEP_NAK H2D_HOST_DS_NAK +#define H2DMB_D0_INFORM_IN_USE H2D_HOST_D0_INFORM_IN_USE +#define H2DMB_D0_INFORM H2D_HOST_D0_INFORM +#define H2DMB_FW_TRAP H2D_FW_TRAP +#define H2DMB_HOST_CONS_INT H2D_HOST_CONS_INT +#define H2DMB_DS_DEVICE_WAKE_ASSERT H2DMB_DS_DEVICE_WAKE +#define H2DMB_DS_DEVICE_WAKE_DEASSERT H2DMB_DS_ACTIVE + +/* D2H mail box Data */ +#define D2H_DEV_D3_ACK 0x00000001 +#define D2H_DEV_DS_ENTER_REQ 0x00000002 +#define D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008 +#define D2H_DEV_IDMA_INITED 0x00000010 +#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK +#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ +#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE + +#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED) +#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + +/* trap data codes */ +#define D2H_DEV_FWHALT 0x10000000 +#define D2H_DEV_EXT_TRAP_DATA 0x20000000 +#define D2H_DEV_TRAP_IN_TRAP 0x40000000 +#define D2H_DEV_TRAP_HOSTDB 0x80000000 /* trap as set by host DB */ +#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000 +/* Indicates trap due to HMAP violation */ +#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000 +/* Indicates whether HMAP violation was Write */ +#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000 +#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000 +#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */ + +#define D2HMB_FWHALT D2H_DEV_FWHALT +#define D2HMB_TRAP_IN_TRAP D2H_DEV_TRAP_IN_TRAP +#define D2HMB_EXT_TRAP_DATA D2H_DEV_EXT_TRAP_DATA +#define D2H_FWTRAP_MAC_SSSR_RDY 0x00010000u /* MAC SSSR prepped */ + +/* Size of Extended Trap data Buffer */ +#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096 + +/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */ +#define PREVTXP(i, d) (((i) == 0) ? ((d) - 1) : ((i) - 1)) +#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) +#define NEXTNTXP(i, n, d) ((((i)+(n)) >= (d)) ? 0 : ((i)+(n))) +#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) +#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1) + +/* Function can be used to notify host of FW halt */ +#define READ_AVAIL_SPACE(w, r, d) ((w >= r) ? (uint32)(w - r) : (uint32)(d - r)) +#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) +#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) +#define CHECK_WRITE_SPACE(r, w, d) ((r) > (w)) ? \ + (uint32)((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? \ + (uint32)((d) - (w) - 1) : (uint32)((d) - (w)) + +#define CHECK_NOWRITE_SPACE(r, w, d) \ + (((uint32)(r) == (uint32)((w) + 1)) || (((r) == 0) && ((w) == ((d) - 1)))) + +/* These should be moved into pciedev.h --- */ +#define WRT_PEND(x) ((x)->wr_pending) +#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */ +#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) + +#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) /**< advanced by consumer */ +#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) + +#define MODULO_RING_IDX(x, y) ((x) % (y)->bitmap_size) + +#define RING_READ_PTR(x) ((x)->ringstate->r_offset) +#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset) +#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) +#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) +#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) +#define HOST_RING_BASE(x) ((x)->dma_buf.va) +#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \ + ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x)))) + +/* Trap types copied in the pciedev_shared.trap_addr */ +#define FW_INITIATED_TRAP_TYPE (0x1 << 7) +#define HEALTHCHECK_NODS_TRAP_TYPE (0x1 << 6) + +/* Device supported txpost extended tag capabilities */ +#define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_RSVD (1u << 0u) /* Reserved */ +#define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_CSO (1u << 1u) /* CSO */ +#define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_MESH (1u << 2u) /* MESH */ + +#define RING_MESH(x) (((x)->txpost_ext_cap_flags) & PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_MESH) + +#endif /* _bcmpcie_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmpcispi.h b/bcmdhd.101.10.361.x/include/bcmpcispi.h new file mode 100755 index 0000000..bd04557 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmpcispi.h @@ -0,0 +1,204 @@ +/* + * Broadcom PCI-SPI Host Controller Register Definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _BCM_PCI_SPI_H +#define _BCM_PCI_SPI_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* ++---------------------------------------------------------------------------+ +| | +| 7 6 5 4 3 2 1 0 | +| 0x0000 SPI_CTRL SPIE SPE 0 MSTR CPOL CPHA SPR1 SPR0 | +| 0x0004 SPI_STAT SPIF WCOL ST1 ST0 WFFUL WFEMP RFFUL RFEMP | +| 0x0008 SPI_DATA Bits 31:0, data to send out on MOSI | +| 0x000C SPI_EXT ICNT1 ICNT0 BSWAP *HSMODE ESPR1 ESPR0 | +| 0x0020 GPIO_OE 0=input, 1=output PWR_OE CS_OE | +| 0x0024 GPIO_DATA CARD:1=missing, 0=present CARD PWR_DAT CS_DAT | +| 0x0040 INT_EDGE 0=level, 1=edge DEV_E SPI_E | +| 0x0044 INT_POL 1=active high, 0=active low DEV_P SPI_P | +| 0x0048 INTMASK DEV SPI | +| 0x004C INTSTATUS DEV SPI | +| 0x0060 HEXDISP Reset value: 0x14e443f5. In hexdisp mode, value | +| shows on the Raggedstone1 4-digit 7-segment display. | +| 0x0064 CURRENT_MA Low 16 bits indicate card current consumption in mA | +| 0x006C DISP_SEL Display mode (0=hexdisp, 1=current) DSP | +| 0x00C0 PLL_CTL bit31=ext_clk, remainder unused. | +| 0x00C4 PLL_STAT LOCK | +| 0x00C8 CLK_FREQ | +| 0x00CC CLK_CNT | +| | +| *Notes: HSMODE is not implemented, never set this bit! | +| BSWAP is available in rev >= 8 | +| | ++---------------------------------------------------------------------------+ +*/ + +typedef volatile struct { + uint32 spih_ctrl; /* 0x00 SPI Control Register */ + uint32 spih_stat; /* 0x04 SPI Status Register */ + uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */ + uint32 spih_ext; /* 0x0C SPI Extension Register */ + uint32 PAD[4]; /* 0x10-0x1F PADDING */ + + uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */ + uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */ + uint32 PAD[6]; /* 0x28-0x3F PADDING */ + + uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */ + uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */ + /* 1=Active High) */ + uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */ + uint32 spih_int_status; /* 0x4C SPI Interrupt Status */ + uint32 PAD[4]; /* 0x50-0x5F PADDING */ + + uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */ + uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */ + uint32 PAD[1]; /* 0x68 PADDING */ + uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */ + uint32 PAD[4]; /* 0x70-0x7F PADDING */ + uint32 PAD[8]; /* 0x80-0x9F PADDING */ + uint32 PAD[8]; /* 0xA0-0xBF PADDING */ + uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */ + uint32 spih_pll_status; /* 0xC4 PLL Status Register */ + uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */ + uint32 spih_clk_count; /* 0xCC External Clock Count Register */ + +} spih_regs_t; + +typedef volatile struct { + uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */ + uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */ + + uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */ + uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */ + uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */ + uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */ + uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */ + uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */ + uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */ + uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */ + uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */ + uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */ + uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */ + uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */ + uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */ + uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */ + uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */ + uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */ + uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */ + uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */ + uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */ + uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */ + uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */ + uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */ + uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */ + uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */ + uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */ + uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */ + + uint32 PAD[5]; /* 0x16C-0x17F PADDING */ + + uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */ + uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */ + uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */ + uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */ + uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */ + uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */ + uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */ + uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */ + uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */ + uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */ + uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */ + uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */ + uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */ + uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */ + uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */ + uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */ + uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */ + uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */ + uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */ + uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */ + uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */ + uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */ + uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */ + uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */ + uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */ + uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */ + + uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */ + uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */ + uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */ +} spih_pciregs_t; + +/* + * PCI Core interrupt enable and status bit definitions. + */ + +/* PCI Core ICR Register bit definitions */ +#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */ +#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */ +#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */ +#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */ +#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */ +#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */ + +/* PCI Core ISR Register bit definitions */ +#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */ +#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */ +#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */ +#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */ +#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */ + +/* Registers on the Wishbone bus */ +#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */ +#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */ +#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */ + +/* GPIO Bit definitions */ +#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */ +#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */ +#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */ + +/* SPI Status Register Bit definitions */ +#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */ +#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */ +#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */ +#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */ +#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */ +#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */ + +#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */ + +#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */ +#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */ + +/* Spin bit loop bound check */ +#define SPI_SPIN_BOUND 0xf4240 /* 1 million */ + +#endif /* _BCM_PCI_SPI_H */ diff --git a/bcmdhd.101.10.361.x/include/bcmperf.h b/bcmdhd.101.10.361.x/include/bcmperf.h new file mode 100755 index 0000000..fd37d27 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmperf.h @@ -0,0 +1,33 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmproto.h b/bcmdhd.101.10.361.x/include/bcmproto.h new file mode 100755 index 0000000..2770caf --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmproto.h @@ -0,0 +1,275 @@ +/* + * Fundamental constants relating to IP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmproto_h_ +#define _bcmproto_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +#include "eapol.h" +#include "802.3.h" +#include "vlan.h" +#include "bcmtcp.h" +/* copy from igsc.h */ +#define IGMP_HLEN 8 + +enum frame_l2_hdr { +FRAME_L2_SNAP_H = 1, +FRAME_L2_SNAPVLAN_H, +FRAME_L2_ETH_H, +FRAME_L2_ETHVLAN_H, +FRAME_L2_ERROR, +}; + +enum frame_l3_hdr { +FRAME_L3_IP_H = 4, +FRAME_L3_IP6_H = 6, +FRAME_L3_ARP_H, +FRAME_L3_8021X_EAPOLKEY_H, +FRAME_L3_ERROR, +}; + +enum frame_l4_hdr { +FRAME_L4_ICMP_H = 1, +FRAME_L4_IGMP_H = 2, +FRAME_L4_TCP_H = 6, +FRAME_L4_UDP_H = 17, +FRAME_L4_ICMP6_H = 58, +FRAME_L4_ERROR, +}; + +typedef struct { + uint8 *l2; + uint8 l2_t; + uint16 l2_len; + uint8 *l3; + uint8 l3_t; + uint16 l3_len; + uint8 *l4; + uint8 l4_t; + uint16 l4_len; +} frame_proto_t; + +static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; + +/* Generic header parser function */ +static INLINE int +hnd_frame_proto(uint8 *p, int plen, frame_proto_t *fp) +{ + struct dot3_mac_llc_snap_header *sh = (struct dot3_mac_llc_snap_header *)p; + struct dot3_mac_llc_snapvlan_header *svh = (struct dot3_mac_llc_snapvlan_header *)p; + struct ether_header *eh = (struct ether_header *)p; + struct ethervlan_header *evh = (struct ethervlan_header *)p; + uint16 type; + uint16 len; + + if (p == NULL || plen <= 0) { + return BCME_ERROR; + } + + if (plen < (int)sizeof(*eh)) { + return BCME_BUFTOOSHORT; + } + type = ntoh16(eh->ether_type); + + bzero(fp, sizeof(frame_proto_t)); + + /* L2 header/pointer check */ + fp->l2 = p; + fp->l2_len = (uint16)plen; + if (type < ETHER_TYPE_MIN) { + if (plen < (int)sizeof(*sh)) { + return BCME_BUFTOOSHORT; + } + if (bcmp(&sh->dsap, llc_snap_hdr, SNAP_HDR_LEN) == 0) { + type = ntoh16(sh->type); + if (type == ETHER_TYPE_8021Q) { + fp->l2_t = FRAME_L2_SNAPVLAN_H; + p += sizeof(struct dot3_mac_llc_snap_header); + if ((plen -= sizeof(struct dot3_mac_llc_snap_header)) <= 0) { + return BCME_ERROR; + } + } + else { + fp->l2_t = FRAME_L2_SNAP_H; + type = ntoh16(svh->ether_type); + p += sizeof(struct dot3_mac_llc_snapvlan_header); + if ((plen -= sizeof(struct dot3_mac_llc_snapvlan_header)) <= 0) { + return BCME_ERROR; + } + } + } + else { + return BCME_ERROR; + } + } + else { + if (type == ETHER_TYPE_8021Q) { + fp->l2_t = FRAME_L2_ETHVLAN_H; + type = ntoh16(evh->ether_type); + p += ETHERVLAN_HDR_LEN; + if ((plen -= ETHERVLAN_HDR_LEN) <= 0) { + return BCME_ERROR; + } + } + else { + fp->l2_t = FRAME_L2_ETH_H; + p += ETHER_HDR_LEN; + if ((plen -= ETHER_HDR_LEN) <= 0) { + return BCME_ERROR; + } + } + } + /* L3 header/pointer check */ + fp->l3 = p; + fp->l3_len = (uint16)plen; + switch (type) { + case ETHER_TYPE_ARP: { + if ((plen -= ARP_DATA_LEN) < 0) { + return BCME_ERROR; + } + + fp->l3_t = FRAME_L3_ARP_H; + /* no layer 4 protocol, return */ + return BCME_OK; + break; + } + case ETHER_TYPE_IP: { + struct ipv4_hdr *iph = (struct ipv4_hdr *)p; + len = IPV4_HLEN(iph); + + if ((plen -= len) <= 0) { + return BCME_ERROR; + } + + if (IP_VER(iph) == IP_VER_4 && len >= IPV4_MIN_HEADER_LEN) { + fp->l3_t = FRAME_L3_IP_H; + type = IPV4_PROT(iph); + p += len; + } + else { + /* not a valid ipv4 packet */ + return BCME_ERROR; + } + break; + } + case ETHER_TYPE_IPV6: { + struct ipv6_hdr *ip6h = (struct ipv6_hdr *)p; + + if ((plen -= IPV6_MIN_HLEN) <= 0) { + return BCME_ERROR; + } + + if (IP_VER(ip6h) == IP_VER_6) { + fp->l3_t = FRAME_L3_IP6_H; + type = IPV6_PROT(ip6h); + p += IPV6_MIN_HLEN; + if (IPV6_EXTHDR(type)) { + uint8 proto = 0; + int32 exth_len = ipv6_exthdr_len_check(p, plen, &proto); + if (exth_len < 0 || ((plen -= exth_len) <= 0)) + return BCME_ERROR; + type = proto; + p += exth_len; + } + } + else { + /* not a valid ipv6 packet */ + return BCME_ERROR; + } + break; + } + case ETHER_TYPE_802_1X: { + eapol_hdr_t *eapolh = (eapol_hdr_t *)p; + + if ((plen -= EAPOL_HDR_LEN) <= 0) { + return BCME_ERROR; + } + + if (eapolh->type == EAPOL_KEY) { + fp->l3_t = FRAME_L3_8021X_EAPOLKEY_H; + return BCME_OK; + } + else { + /* not a valid ipv6 packet */ + return BCME_ERROR; + } + + break; + } + default: + /* not interesting case */ + return BCME_ERROR; + break; + } + + /* L4 header/pointer check */ + fp->l4 = p; + fp->l4_len = (uint16)plen; + switch (type) { + case IP_PROT_ICMP: + fp->l4_t = FRAME_L4_ICMP_H; + if ((plen -= sizeof(struct bcmicmp_hdr)) < 0) { + return BCME_ERROR; + } + break; + case IP_PROT_IGMP: + fp->l4_t = FRAME_L4_IGMP_H; + if ((plen -= IGMP_HLEN) < 0) { + return BCME_ERROR; + } + break; + case IP_PROT_TCP: + fp->l4_t = FRAME_L4_TCP_H; + if ((plen -= sizeof(struct bcmtcp_hdr)) < 0) { + return BCME_ERROR; + } + break; + case IP_PROT_UDP: + fp->l4_t = FRAME_L4_UDP_H; + if ((plen -= sizeof(struct bcmudp_hdr)) < 0) { + return BCME_ERROR; + } + break; + case IP_PROT_ICMP6: + fp->l4_t = FRAME_L4_ICMP6_H; + if ((plen -= sizeof(struct icmp6_hdr)) < 0) { + return BCME_ERROR; + } + break; + default: + break; + } + + return BCME_OK; +} + +#define SNAP_HDR_LEN 6 /* 802.3 LLC/SNAP header length */ + +#define FRAME_DROP 0 +#define FRAME_NOP 1 +#define FRAME_TAKEN 2 + +#endif /* _bcmproto_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmrand.h b/bcmdhd.101.10.361.x/include/bcmrand.h new file mode 100755 index 0000000..3bebca6 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmrand.h @@ -0,0 +1,65 @@ +/* + * bcmrand.h. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmrand_h_ +#define _bcmrand_h_ + +/* When HOST driver is for PCIE dongle image, we suppose the HOST must provide the entropy + * input if it does not define the macro BCM_RNG_NO_HOST_ENTROPY + */ +#if defined(BCMPCIEDEV) && !defined(BCMFUZZ) +#if !defined(BCM_RNG_HOST_ENTROPY) && !defined(BCM_RNG_NO_HOST_ENTROPY) +#define BCM_RNG_HOST_ENTROPY +#define BCM_RNG_PCIEDEV_DEFAULT +#endif /* !BCM_RNG_HOST_ENTROPY && !BCM_RNG_NO_HOST_ENTROPY */ +#endif /* BCMPCIEDEV */ + +/* the format of current TCM layout during boot + * + * Code Unused memory Random numbers Random number Magic number NVRAM NVRAM + * byte Count 0xFEEDC0DE Size + * |<-----Variable---->|<---Variable--->|<-----4 bytes-->|<---4 bytes---->|<---V--->|<--4B--->| + * |<------------- BCM_ENTROPY_HOST_MAXSIZE --------->| + */ + +/* The HOST need to provided 64 bytes (512 bits) entropy for the bcm SW RNG */ +#define BCM_ENTROPY_MAGIC_SIZE 4u +#define BCM_ENTROPY_COUNT_SIZE 4u +#define BCM_ENTROPY_SEED_NBYTES 64u +#define BCM_ENTROPY_NONCE_NBYTES 16u +#define BCM_ENTROPY_HOST_NBYTES 128u + +#ifdef DBG_RNG_SEC_TEST +#define BCM_ENTROPY_MAX_NBYTES 128u +#else +#define BCM_ENTROPY_MAX_NBYTES 512u +#endif /* DBG_RNG_SEC_TEST */ +#define BCM_ENTROPY_HOST_MAXSIZE \ + (BCM_ENTROPY_MAGIC_SIZE + BCM_ENTROPY_COUNT_SIZE + BCM_ENTROPY_MAX_NBYTES) + +/* Constant for calculate the location of host entropy input */ +#define BCM_NVRAM_OFFSET_TCM 4u +#define BCM_NVRAM_IMG_COMPRS_FACTOR 4u +#define BCM_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu + +#endif /* _bcmrand_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmsdbus.h b/bcmdhd.101.10.361.x/include/bcmsdbus.h new file mode 100755 index 0000000..d2d89e8 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsdbus.h @@ -0,0 +1,187 @@ +/* + * Definitions for API from sdio common code (bcmsdh) to individual + * host controller drivers. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _sdio_api_h_ +#define _sdio_api_h_ + +#if defined (BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ + +/* + * The following were: + * incorrectly in bcmsdio.h + * incorrectly named using SDIOH which indicates BRCM SDIO FPGA host controller + */ + +#define SDIOH_API_RC_SUCCESS (0x00) +#define SDIOH_API_RC_FAIL (0x01) +#define SDIOH_API_SUCCESS(status) (status == 0) + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */ +#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */ +#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */ + +#define SDIOH_DATA_PIO 0 /* PIO mode */ +#define SDIOH_DATA_DMA 1 /* DMA mode */ + +/* Max number of glommed pkts */ +#ifdef CUSTOM_MAX_TXGLOM_SIZE +#define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE +#else +#define SDPCM_MAXGLOM_SIZE 36 +#endif /* CUSTOM_MAX_TXGLOM_SIZE */ + +#define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */ +#define SDPCM_TXGLOM_MDESC 1 /* SDIO 3.0 should use multi-desc mode */ + +#ifdef CUSTOM_DEF_TXGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE CUSTOM_DEF_TXGLOM_SIZE +#else +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif /* CUSTOM_DEF_TXGLOM_SIZE */ + +#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE +#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!" +#undef SDPCM_DEFGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif + +#ifdef PKT_STATICS +typedef struct pkt_statics { + uint16 event_count; + uint32 event_size; + uint16 ctrl_count; + uint32 ctrl_size; + uint32 data_count; + uint32 data_size; + uint32 glom_cnt[SDPCM_MAXGLOM_SIZE]; + uint16 glom_max; + uint16 glom_count; + uint32 glom_size; + uint16 test_count; + uint32 test_size; + uint32 glom_cnt_us[SDPCM_MAXGLOM_SIZE]; +} pkt_statics_t; +#endif + +typedef int SDIOH_API_RC; + +/* SDio Host structure */ +typedef struct sdioh_info sdioh_info_t; + +/* callback function, taking one arg */ +typedef void (*sdioh_cb_fn_t)(void *); +#if defined (BT_OVER_SDIO) +extern +void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func); +#endif /* defined (BT_OVER_SDIO) */ + +extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); +extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); + +/* query whether SD interrupt is enabled or not */ +extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff); + +/* enable or disable SD interrupt */ +extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable); + +#if defined(DHD_DEBUG) || defined(BCMDBG) +extern bool sdioh_interrupt_pending(sdioh_info_t *si); +#endif + +/* read or write one byte using cmd52 */ +extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte); + +/* read or write 2/4 bytes using cmd53 */ +extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc, + uint addr, uint32 *word, uint nbyte); + +/* read or write any buffer using cmd53 */ +extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, + uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, + void *pkt); + +/* get cis data */ +extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); +extern SDIOH_API_RC sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset); + +extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); +extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); + +/* query number of io functions */ +extern uint sdioh_query_iofnum(sdioh_info_t *si); + +/* handle iovars */ +extern int sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, uint len, bool set); + +/* Issue abort to the specified function and clear controller as needed */ +extern int sdioh_abort(sdioh_info_t *si, uint fnc); + +/* Start and Stop SDIO without re-enumerating the SD card. */ +extern int sdioh_start(sdioh_info_t *si, int stage); +extern int sdioh_stop(sdioh_info_t *si); + +/* Wait system lock free */ +extern int sdioh_waitlockfree(sdioh_info_t *si); + +/* Reset and re-initialize the device */ +extern int sdioh_sdio_reset(sdioh_info_t *si); + +#ifdef BCMSPI +/* Function to pass gSPI specific device-status bits to dhd. */ +extern uint32 sdioh_get_dstatus(sdioh_info_t *si); + +/* chipid and chiprev info for lower layers to control sw WAR's for hw bugs. */ +extern void sdioh_chipinfo(sdioh_info_t *si, uint32 chip, uint32 chiprev); +extern void sdioh_dwordmode(sdioh_info_t *si, bool set); +#endif /* BCMSPI */ + +#if defined(BCMSDIOH_STD) + /* + * Only STD host supports cmd14 sleep. + * Using define instead of empty stubs for other hosts for now. + */ + #define SDIOH_SLEEP_ENABLED +#endif +extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab); + +/* GPIO support */ +extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd); +extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab); +extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode); +#ifdef PKT_STATICS +extern uint32 sdioh_get_spend_time(sdioh_info_t *sd); +#endif + +#endif /* _sdio_api_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmsdh.h b/bcmdhd.101.10.361.x/include/bcmsdh.h new file mode 100755 index 0000000..81c3438 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsdh.h @@ -0,0 +1,290 @@ +/* + * SDIO host client driver interface of Broadcom HNBU + * export functions to client drivers + * abstract OS and BUS specific details of SDIO + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/** + * @file bcmsdh.h + */ + +#ifndef _bcmsdh_h_ +#define _bcmsdh_h_ + +#define BCMSDH_ERROR_VAL 0x0001 /* Error */ +#define BCMSDH_INFO_VAL 0x0002 /* Info */ +extern const uint bcmsdh_msglevel; + +#ifdef BCMDBG +#define BCMSDH_ERROR(x) do { if (bcmsdh_msglevel & BCMSDH_ERROR_VAL) printf x; } while (0) +#define BCMSDH_INFO(x) do { if (bcmsdh_msglevel & BCMSDH_INFO_VAL) printf x; } while (0) +#else /* BCMDBG */ +#define BCMSDH_ERROR(x) +#define BCMSDH_INFO(x) +#endif /* BCMDBG */ + +#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || defined(BCMSDIOH_SPI)) +#define BCMSDH_ADAPTER +#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */ + +/* forward declarations */ +typedef struct bcmsdh_info bcmsdh_info_t; +typedef void (*bcmsdh_cb_fn_t)(void *); + +#if defined(NDIS) && (NDISVER >= 0x0630) && defined(BCMDONGLEHOST) +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, + void **regsva, uint irq, shared_info_t *sh); +#else + +#if defined(BT_OVER_SDIO) +typedef enum { + NO_HANG_STATE = 0, + HANG_START_STATE = 1, + HANG_RECOVERY_STATE = 2 +} dhd_hang_state_t; +#endif + +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva); +/** + * BCMSDH API context + */ +struct bcmsdh_info +{ + bool init_success; /* underlying driver successfully attached */ + void *sdioh; /* handler for sdioh */ + uint32 vendevid; /* Target Vendor and Device ID on SD bus */ + osl_t *osh; + bool regfail; /* Save status of last reg_read/reg_write call */ + uint32 sbwad; /* Save backplane window address */ + void *os_cxt; /* Pointer to per-OS private data */ + bool force_sbwad_calc; /* forces calculation of sbwad instead of using cached value */ +#ifdef DHD_WAKE_STATUS + unsigned int total_wake_count; + int pkt_wake; +#endif /* DHD_WAKE_STATUS */ +}; +#endif /* defined(NDIS) && (NDISVER >= 0x0630) && defined(BCMDONGLEHOST) */ + +/* Detach - freeup resources allocated in attach */ +extern int bcmsdh_detach(osl_t *osh, void *sdh); + +/* Query if SD device interrupts are enabled */ +extern bool bcmsdh_intr_query(void *sdh); + +/* Enable/disable SD interrupt */ +extern int bcmsdh_intr_enable(void *sdh); +extern int bcmsdh_intr_disable(void *sdh); + +/* Register/deregister device interrupt handler. */ +extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); +extern int bcmsdh_intr_dereg(void *sdh); +/* Enable/disable SD card interrupt forward */ +extern void bcmsdh_intr_forward(void *sdh, bool pass); + +#if defined(DHD_DEBUG) || defined(BCMDBG) +/* Query pending interrupt status from the host controller */ +extern bool bcmsdh_intr_pending(void *sdh); +#endif + +/* Register a callback to be called if and when bcmsdh detects + * device removal. No-op in the case of non-removable/hardwired devices. + */ +extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); + +/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). + * fn: function number + * addr: unmodified SDIO-space address + * data: data byte to write + * err: pointer to error code (or NULL) + */ +extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err); +extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err); + +/* Read/Write 4bytes from/to cfg space */ +extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err); +extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err); + +/* Read CIS content for specified function. + * fn: function whose CIS is being requested (0 is common CIS) + * cis: pointer to memory location to place results + * length: number of bytes to read + * Internally, this routine uses the values from the cis base regs (0x9-0xB) + * to form an SDIO-space address to read the data from. + */ +extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); +extern int bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint offset); + +/* Synchronous access to device (client) core registers via CMD53 to F1. + * addr: backplane address (i.e. >= regsva from attach) + * size: register width in bytes (2 or 4) + * data: data for register write + */ +extern uint32 bcmsdh_reg_read(void *sdh, uintptr addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data); + +/* set sb address window */ +extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set); + +/* Indicate if last reg read/write failed */ +/* Replace this with status pointers in reg_read/write */ +extern bool bcmsdh_regfail(void *sdh); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * addr: backplane address (i.e. >= regsva from attach) + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ + +typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting); +extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); +extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); + +extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len); +extern void bcmsdh_glom_clear(void *sdh); +extern uint bcmsdh_set_mode(void *sdh, uint mode); +extern bool bcmsdh_glom_enabled(void); +#ifdef PKT_STATICS +extern uint32 bcmsdh_get_spend_time(void *sdh) ; +#endif +/* Flags bits */ +#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */ +#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */ + +/* Pending (non-error) return code */ +#define BCME_PENDING 1 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes); + +/* Issue an abort to the specified function */ +extern int bcmsdh_abort(void *sdh, uint fn); + +/* Start SDIO Host Controller communication */ +extern int bcmsdh_start(void *sdh, int stage); + +/* Stop SDIO Host Controller communication */ +extern int bcmsdh_stop(void *sdh); + +/* Wait system lock free */ +extern int bcmsdh_waitlockfree(void *sdh); + +/* Bogosity alert. This should only know about devids gleaned through + * the standard CIS (versus some client dependent method), and we already + * have an interface for the CIS. + * Remove me. + */ +/* Returns the "Device ID" of target device on the SDIO bus. */ +extern int bcmsdh_query_device(void *sdh); + +/* Returns the number of IO functions reported by the device */ +extern uint bcmsdh_query_iofnum(void *sdh); + +/* Miscellaneous knob tweaker. */ +extern int bcmsdh_iovar_op(void *sdh, const char *name, + void *params, uint plen, void *arg, uint len, bool set); + +/* Reset and reinitialize the device */ +extern int bcmsdh_reset(bcmsdh_info_t *sdh); + +/* helper functions */ + +/* callback functions */ +typedef struct { + /* probe the device */ + void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot, + uint16 func, uint bustype, void * regsva, osl_t * osh, + void * param); + /* remove the device */ + void (*remove)(void *context); + /* can we suspend now */ + int (*suspend)(void *context); + /* resume from suspend */ + int (*resume)(void *context); +} bcmsdh_driver_t; + +/* platform specific/high level functions */ +extern int bcmsdh_register(bcmsdh_driver_t *driver); +extern void bcmsdh_unregister(void); +extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device); +extern void bcmsdh_device_remove(void * sdh); + +extern int bcmsdh_reg_sdio_notify(void* semaphore); +extern void bcmsdh_unreg_sdio_notify(void); + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) +extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context); +extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh); +extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable); +extern int bcmsdh_get_oob_intr_num(bcmsdh_info_t *bcmsdh); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ +extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh); +extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh); +extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh); + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh); +int bcmsdh_resume(bcmsdh_info_t *bcmsdh); + +/* Function to pass device-status bits to DHD. */ +extern uint32 bcmsdh_get_dstatus(void *sdh); + +/* Function to return current window addr */ +extern uint32 bcmsdh_cur_sbwad(void *sdh); + +/* function to force sbwad calculation instead of using cached value */ +extern void bcmsdh_force_sbwad_calc(void *sdh, bool force); + +/* Function to pass chipid and rev to lower layers for controlling pr's */ +extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); + +#ifdef BCMSPI +extern void bcmsdh_dwordmode(void *sdh, bool set); +#endif /* BCMSPI */ + +extern int bcmsdh_sleep(void *sdh, bool enab); + +/* GPIO support */ +extern int bcmsdh_gpio_init(void *sd); +extern bool bcmsdh_gpioin(void *sd, uint32 gpio); +extern int bcmsdh_gpioouten(void *sd, uint32 gpio); +extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab); + +#endif /* _bcmsdh_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h b/bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h new file mode 100755 index 0000000..d9a67e0 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h @@ -0,0 +1,142 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef __BCMSDH_SDMMC_H__ +#define __BCMSDH_SDMMC_H__ + +#ifdef BCMDBG +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#define sd_cost(x) do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0) +#else +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_cost(x) do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0) +#endif + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); + +#ifdef BCMPERFSTATS +#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0) +#else +#define sd_log(x) +#endif + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SD4 2 +#define CLIENT_INTR 0x100 /* Get rid of this! */ +#define SDIOH_SDMMC_MAX_SG_ENTRIES 64 + +struct sdioh_info { + osl_t *osh; /* osh handler */ + void *bcmsdh; /* upper layer handle */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + uint16 intmask; /* Current active interrupts */ + + int intrcount; /* Client interrupts */ + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + bool use_rxchain; + struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES]; + struct sdio_func fake_func0; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; + uint sd_clk_rate; + uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ +#ifdef PKT_STATICS + uint32 sdio_spent_time_us; +#endif +#if !defined(OOB_INTR_ONLY) + struct mutex claim_host_mutex; // terence 20140926: fix for claim host issue +#endif +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdh_sdmmc.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); + +/************************************************************** + * Internal interfaces: bcmsdh_sdmmc.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size); +extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq); +extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); + +#ifdef GLOBAL_SDMMC_INSTANCE +typedef struct _BCMSDH_SDMMC_INSTANCE { + sdioh_info_t *sd; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; +} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE; +#endif + +#endif /* __BCMSDH_SDMMC_H__ */ diff --git a/bcmdhd.101.10.361.x/include/bcmsdpcm.h b/bcmdhd.101.10.361.x/include/bcmsdpcm.h new file mode 100755 index 0000000..3e01299 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsdpcm.h @@ -0,0 +1,304 @@ +/* + * Broadcom SDIO/PCMCIA + * Software-specific definitions shared between device and host side + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmsdpcm_h_ +#define _bcmsdpcm_h_ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* intstatus bits */ +#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ +#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ +#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ +#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ + +#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT) + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ +#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ +#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ +#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ +#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ + +/* tosbmailboxdata */ + +#ifdef DS_PROT +/* Bit msgs for custom deep sleep protocol */ +#define SMB_DATA_D3INFORM 0x100 /* host announcing D3 entry */ +#define SMB_DATA_DSACK 0x200 /* host acking a deepsleep request */ +#define SMB_DATA_DSNACK 0x400 /* host nacking a deepsleep request */ +#endif /* DS_PROT */ +/* force a trap */ +#define SMB_DATA_TRAP 0x800 /* host forcing trap */ + +#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_INT_ACK I_HMB_SW0 /* To Host Mailbox Dev Interrupt ACK */ +#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ + +#define I_TOHOSTMAIL (I_HMB_INT_ACK | I_HMB_FRAME_IND | I_HMB_HOST_INT) + +/* tohostmailbox bits corresponding to intstatus bits */ +#define HMB_INT_ACK (1 << 0) /* To Host Mailbox Dev Interrupt ACK */ +#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ +#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ +#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */ +#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */ +#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */ +#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ +#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ + +#ifdef DS_PROT +/* Bit msgs for custom deep sleep protocol */ +#define HMB_DATA_DSREQ 0x100 /* firmware requesting deepsleep entry */ +#define HMB_DATA_DSEXIT 0x200 /* firmware announcing deepsleep exit */ +#define HMB_DATA_D3ACK 0x400 /* firmware acking a D3 notice from host */ +#define HMB_DATA_D3EXIT 0x800 /* firmware announcing D3 exit */ +#define HMB_DATA_DSPROT_MASK 0xf00 +#endif /* DS_PROT */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ +#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ + +#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ +#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ + +/* + * Software-defined protocol header + */ +/* Replace all this with packed struct */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* SW frame header */ +#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ +#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ + +#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ +#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ +#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ + +#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ +#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ +#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ + +/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ +#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ +#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ +#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ +#define SDPCM_NEXTLEN_OFFSET 2 + +/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ +#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ +#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 + +#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ +#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) +#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ +#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) +#define SDPCM_VERSION_OFFSET 6 /* Version # */ +#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) +#define SDPCM_UNUSED_OFFSET 7 /* Spare */ +#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) + +#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ + +/* logical channel numbers */ +#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ +#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ +#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ +#define SDPCM_MAX_CHANNEL 15 + +#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ + +#define SDPCM_FLAG_RESVD0 0x01 +#define SDPCM_FLAG_RESVD1 0x02 +#define SDPCM_FLAG_GSPI_TXENAB 0x04 /* GSPI Tx enable (PR55150 only) */ +#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ + +/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ +#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) + +#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) + +/* For TEST_CHANNEL packets, define another 4-byte header */ +#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); + * Semantics of Ext byte depend on command. + * Len is current or requested frame length, not + * including test header; sent little-endian. + */ +#define SDPCM_TEST_PKT_CNT_FLD_LEN 4 /* Packet count filed legth */ +#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ +#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ +#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ +#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count + * (Backward compatabilty) Set frame count in a + * 4 byte filed adjacent to the HDR + */ +#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off + * Set frame count in a 4 byte filed adjacent to + * the HDR + */ + +/* Handy macro for filling in datagen packets with a pattern */ +#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) + +/* + * Software counters (first part matches hardware counters) + */ + +typedef volatile struct { + uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ + uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ + uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ + uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ + uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ + uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ + uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ + uint32 rxdescuflo; /* receive descriptor underflows */ + uint32 rxfifooflo; /* receive fifo overflows */ + uint32 txfifouflo; /* transmit fifo underflows */ + uint32 runt; /* runt (too short) frames recv'd from bus */ + uint32 badlen; /* frame's rxh len does not match its hw tag len */ + uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ + uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ + uint32 rxfcrc; /* frame rx header indicates crc error */ + uint32 rxfwoos; /* frame rx header indicates write out of sync */ + uint32 rxfwft; /* frame rx header indicates write frame termination */ + uint32 rxfabort; /* frame rx header indicates frame aborted */ + uint32 woosint; /* write out of sync interrupt */ + uint32 roosint; /* read out of sync interrupt */ + uint32 rftermint; /* read frame terminate interrupt */ + uint32 wftermint; /* write frame terminate interrupt */ +} sdpcmd_cnt_t; + +/* + * Register Access Macros + */ + +#define SDIODREV_IS(var, val) ((var) == (val)) +#define SDIODREV_GE(var, val) ((var) >= (val)) +#define SDIODREV_GT(var, val) ((var) > (val)) +#define SDIODREV_LT(var, val) ((var) < (val)) +#define SDIODREV_LE(var, val) ((var) <= (val)) + +#define SDIODDMAREG32(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) + +#define SDIODDMAREG64(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) + +#define SDIODDMAREG(h, dir, chnl) \ + (SDIODREV_LT((h)->corerev, 1) ? \ + SDIODDMAREG32((h), (dir), (chnl)) : \ + SDIODDMAREG64((h), (dir), (chnl))) + +#define PCMDDMAREG(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) + +#define SDPCMDMAREG(h, dir, chnl, coreid) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODDMAREG(h, dir, chnl) : \ + PCMDDMAREG(h, dir, chnl)) + +#define SDIODFIFOREG(h, corerev) \ + (SDIODREV_LT((corerev), 1) ? \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) + +#define PCMDFIFOREG(h) \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) + +#define SDPCMFIFOREG(h, coreid, corerev) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODFIFOREG(h, corerev) : \ + PCMDFIFOREG(h)) + +/* + * Shared structure between dongle and the host. + * The structure contains pointers to trap or assert information. + */ +#define SDPCM_SHARED_VERSION 0x0001 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 +#define SDPCM_SHARED_IN_BRPT 0x0800 +#define SDPCM_SHARED_SET_BRPT 0x1000 +#define SDPCM_SHARED_PENDING_BRPT 0x2000 +#define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000 +#define SDPCM_SHARED_RXLIM_POST 0x4000 +#define SDPCM_SHARED_TXSEQ_SYNC 0x4000 + +typedef struct { + uint32 flags; + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /* Address of hnd_cons_t */ + uint32 msgtrace_addr; + uint32 fwid; + uint32 device_fatal_logbuf_start; +#ifdef BCMSDIO_TXSEQ_SYNC + uint32 txseq_sync_addr; +#endif /* BCMSDIO_TXSEQ_SYNC */ +} sdpcm_shared_t; + +/* Device F/W provides the following access function: + * sdpcm_shared_t *hnd_get_sdpcm_shared(void); + */ + +#endif /* _bcmsdpcm_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmspi.h b/bcmdhd.101.10.361.x/include/bcmspi.h new file mode 100755 index 0000000..bfdbab2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmspi.h @@ -0,0 +1,37 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _BCM_SPI_H +#define _BCM_SPI_H + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); + +#endif /* _BCM_SPI_H */ diff --git a/bcmdhd.101.10.361.x/include/bcmspibrcm.h b/bcmdhd.101.10.361.x/include/bcmspibrcm.h new file mode 100755 index 0000000..298edda --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmspibrcm.h @@ -0,0 +1,165 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _BCM_SPI_BRCM_H +#define _BCM_SPI_BRCM_H + +#ifndef SPI_MAX_IOFUNCS +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 +#endif +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#if defined(BCMDBG) || defined(DHD_DEBUG) +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#else +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#endif + +#ifdef BCMPERFSTATS +#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0) +#else +#define sd_log(x) +#endif + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_F1 64 +#define BLOCK_SIZE_F2 2048 +#define BLOCK_SIZE_F3 2048 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 +#define ERROR_UF 2 +#define ERROR_OF 3 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ +#ifndef BCMSPI_ANDROID + void *bar0; /* BAR0 for PCI Device */ +#endif /* !BCMSPI_ANDROID */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + uint lockcount; /* nest count of spi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 card_dstatus; /* 32bit device status */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SPI_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + uint32 wordlen; /* host processor 16/32bits */ + uint32 prev_fun; + uint32 chip; + uint32 chiprev; + bool resp_delay_all; + bool dwordmode; + bool resp_delay_new; + + struct spierrstats_t spierrstats; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmspibrcm.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmspibrcm.c references to per-port code + */ + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */ +#define SPI_RW_FLAG_S 31 +#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */ +#define SPI_ACCESS_S 30 +#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */ +#define SPI_FUNCTION_S 28 +#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */ +#define SPI_REG_ADDR_S 11 +#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */ +#define SPI_LEN_S 0 + +#endif /* _BCM_SPI_BRCM_H */ diff --git a/bcmdhd.101.10.361.x/include/bcmsrom.h b/bcmdhd.101.10.361.x/include/bcmsrom.h new file mode 100755 index 0000000..f3008b2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsrom.h @@ -0,0 +1,72 @@ +/* + * Misc useful routines to access NIC local SROM/OTP . + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmsrom_h_ +#define _bcmsrom_h_ + +#include +#include +#include + +#include + +typedef struct srom_info { + char *_srom_vars; + bool is_caldata_prsnt; +} srom_info_t; + +/* Prototypes */ +extern int srom_var_init(si_t *sih, uint bus, volatile void *curmap, osl_t *osh, + char **vars, uint *count); +extern void srom_var_deinit(si_t *sih); + +extern int srom_read(si_t *sih, uint bus, volatile void *curmap, osl_t *osh, + uint byteoff, uint nbytes, uint16 *buf, + bool check_crc); + +extern int srom_write(si_t *sih, uint bus, volatile void *curmap, osl_t *osh, + uint byteoff, uint nbytes, uint16 *buf); + +extern int srom_write_short(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh, + uint byteoff, uint16 value); +extern int srom_otp_cisrwvar(si_t *sih, osl_t *osh, char *vars, int *count); +extern int srom_otp_write_region_crc(si_t *sih, uint nbytes, uint16* buf16, bool write); + +/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP + * and extract from it into name=value pairs + */ +extern int srom_parsecis(si_t *sih, osl_t *osh, uint8 **pcis, uint ciscnt, + char **vars, uint *count); +extern int _initvars_srom_pci_caldata(si_t *sih, uint16 *srom, uint32 sromrev); +extern void srom_set_sromvars(char *vars); +extern char * srom_get_sromvars(void); +extern srom_info_t * srom_info_init(osl_t *osh); +extern int get_srom_pci_caldata_size(uint32 sromrev); +extern uint32 get_srom_size(uint32 sromrev); + +/* Return sprom size in 16-bit words */ +extern uint srom_size(si_t *sih, osl_t *osh); + +extern bool srom_caldata_prsnt(si_t *sih); +extern int srom_get_caldata(si_t *sih, uint16 *srom); +#endif /* _bcmsrom_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmsrom_fmt.h b/bcmdhd.101.10.361.x/include/bcmsrom_fmt.h new file mode 100755 index 0000000..97e3e4d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsrom_fmt.h @@ -0,0 +1,1028 @@ +/* + * SROM format definition. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmsrom_fmt_h_ +#define _bcmsrom_fmt_h_ + +#define SROM_MAXREV 18 /* max revision supported by driver */ + +/* Maximum srom: 16 Kilobits == 2048 bytes */ + +#define SROM_MAX 2048 +#define SROM_MAXW 1024 + +#ifdef LARGE_NVRAM_MAXSZ +#define VARS_MAX LARGE_NVRAM_MAXSZ +#else +#if defined(BCMROMBUILD) || defined(DONGLEBUILD) +#define VARS_MAX 4096 +#else +#define LARGE_NVRAM_MAXSZ 8192 +#define VARS_MAX LARGE_NVRAM_MAXSZ +#endif /* BCMROMBUILD || DONGLEBUILD */ +#endif /* LARGE_NVRAM_MAXSZ */ + +/* PCI fields */ +#define PCI_F0DEVID 48 + +/* SROM Rev 2: 1 Kilobit map for 11a/b/g devices. + * SROM Rev 3: Upward compatible modification for lpphy and PCIe + * hardware workaround. + */ + +#define SROM_WORDS 64 +#define SROM_SIGN_MINWORDS 128 +#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */ + +#define SROM_SSID 2 +#define SROM_SVID 3 + +#define SROM_WL1LHMAXP 29 + +#define SROM_WL1LPAB0 30 +#define SROM_WL1LPAB1 31 +#define SROM_WL1LPAB2 32 + +#define SROM_WL1HPAB0 33 +#define SROM_WL1HPAB1 34 +#define SROM_WL1HPAB2 35 + +#define SROM_MACHI_IL0 36 +#define SROM_MACMID_IL0 37 +#define SROM_MACLO_IL0 38 +#define SROM_MACHI_ET0 39 +#define SROM_MACMID_ET0 40 +#define SROM_MACLO_ET0 41 +#define SROM_MACHI_ET1 42 +#define SROM_MACMID_ET1 43 +#define SROM_MACLO_ET1 44 +#define SROM3_MACHI 37 +#define SROM3_MACMID 38 +#define SROM3_MACLO 39 + +#define SROM_BXARSSI2G 40 +#define SROM_BXARSSI5G 41 + +#define SROM_TRI52G 42 +#define SROM_TRI5GHL 43 + +#define SROM_RXPO52G 45 + +#define SROM2_ENETPHY 45 + +#define SROM_AABREV 46 +/* Fields in AABREV */ +#define SROM_BR_MASK 0x00ff +#define SROM_CC_MASK 0x0f00 +#define SROM_CC_SHIFT 8 +#define SROM_AA0_MASK 0x3000 +#define SROM_AA0_SHIFT 12 +#define SROM_AA1_MASK 0xc000 +#define SROM_AA1_SHIFT 14 + +#define SROM_WL0PAB0 47 +#define SROM_WL0PAB1 48 +#define SROM_WL0PAB2 49 + +#define SROM_LEDBH10 50 +#define SROM_LEDBH32 51 + +#define SROM_WL10MAXP 52 + +#define SROM_WL1PAB0 53 +#define SROM_WL1PAB1 54 +#define SROM_WL1PAB2 55 + +#define SROM_ITT 56 + +#define SROM_BFL 57 +#define SROM_BFL2 28 +#define SROM3_BFL2 61 + +#define SROM_AG10 58 + +#define SROM_CCODE 59 + +#define SROM_OPO 60 + +#define SROM3_LEDDC 62 + +#define SROM_CRCREV 63 + +/* SROM Rev 4: Reallocate the software part of the srom to accomodate + * MIMO features. It assumes up to two PCIE functions and 440 bytes + * of useable srom i.e. the useable storage in chips with OTP that + * implements hardware redundancy. + */ + +#define SROM4_WORDS 220 + +#define SROM4_SIGN 32 +#define SROM4_SIGNATURE 0x5372 + +#define SROM4_BREV 33 + +#define SROM4_BFL0 34 +#define SROM4_BFL1 35 +#define SROM4_BFL2 36 +#define SROM4_BFL3 37 +#define SROM5_BFL0 37 +#define SROM5_BFL1 38 +#define SROM5_BFL2 39 +#define SROM5_BFL3 40 + +#define SROM4_MACHI 38 +#define SROM4_MACMID 39 +#define SROM4_MACLO 40 +#define SROM5_MACHI 41 +#define SROM5_MACMID 42 +#define SROM5_MACLO 43 + +#define SROM4_CCODE 41 +#define SROM4_REGREV 42 +#define SROM5_CCODE 34 +#define SROM5_REGREV 35 + +#define SROM4_LEDBH10 43 +#define SROM4_LEDBH32 44 +#define SROM5_LEDBH10 59 +#define SROM5_LEDBH32 60 + +#define SROM4_LEDDC 45 +#define SROM5_LEDDC 45 + +#define SROM4_AA 46 +#define SROM4_AA2G_MASK 0x00ff +#define SROM4_AA2G_SHIFT 0 +#define SROM4_AA5G_MASK 0xff00 +#define SROM4_AA5G_SHIFT 8 + +#define SROM4_AG10 47 +#define SROM4_AG32 48 + +#define SROM4_TXPID2G 49 +#define SROM4_TXPID5G 51 +#define SROM4_TXPID5GL 53 +#define SROM4_TXPID5GH 55 + +#define SROM4_TXRXC 61 +#define SROM4_TXCHAIN_MASK 0x000f +#define SROM4_TXCHAIN_SHIFT 0 +#define SROM4_RXCHAIN_MASK 0x00f0 +#define SROM4_RXCHAIN_SHIFT 4 +#define SROM4_SWITCH_MASK 0xff00 +#define SROM4_SWITCH_SHIFT 8 + +/* Per-path fields */ +#define MAX_PATH_SROM 4 +#define SROM4_PATH0 64 +#define SROM4_PATH1 87 +#define SROM4_PATH2 110 +#define SROM4_PATH3 133 + +#define SROM4_2G_ITT_MAXP 0 +#define SROM4_2G_PA 1 +#define SROM4_5G_ITT_MAXP 5 +#define SROM4_5GLH_MAXP 6 +#define SROM4_5G_PA 7 +#define SROM4_5GL_PA 11 +#define SROM4_5GH_PA 15 + +/* Fields in the ITT_MAXP and 5GLH_MAXP words */ +#define B2G_MAXP_MASK 0xff +#define B2G_ITT_SHIFT 8 +#define B5G_MAXP_MASK 0xff +#define B5G_ITT_SHIFT 8 +#define B5GH_MAXP_MASK 0xff +#define B5GL_MAXP_SHIFT 8 + +/* All the miriad power offsets */ +#define SROM4_2G_CCKPO 156 +#define SROM4_2G_OFDMPO 157 +#define SROM4_5G_OFDMPO 159 +#define SROM4_5GL_OFDMPO 161 +#define SROM4_5GH_OFDMPO 163 +#define SROM4_2G_MCSPO 165 +#define SROM4_5G_MCSPO 173 +#define SROM4_5GL_MCSPO 181 +#define SROM4_5GH_MCSPO 189 +#define SROM4_CDDPO 197 +#define SROM4_STBCPO 198 +#define SROM4_BW40PO 199 +#define SROM4_BWDUPPO 200 + +#define SROM4_CRCREV 219 + +/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6. + * This is acombined srom for both MIMO and SISO boards, usable in + * the .130 4Kilobit OTP with hardware redundancy. + */ + +#define SROM8_SIGN 64 + +#define SROM8_BREV 65 + +#define SROM8_BFL0 66 +#define SROM8_BFL1 67 +#define SROM8_BFL2 68 +#define SROM8_BFL3 69 + +#define SROM8_MACHI 70 +#define SROM8_MACMID 71 +#define SROM8_MACLO 72 + +#define SROM8_CCODE 73 +#define SROM8_REGREV 74 + +#define SROM8_LEDBH10 75 +#define SROM8_LEDBH32 76 + +#define SROM8_LEDDC 77 + +#define SROM8_AA 78 + +#define SROM8_AG10 79 +#define SROM8_AG32 80 + +#define SROM8_TXRXC 81 + +#define SROM8_BXARSSI2G 82 +#define SROM8_BXARSSI5G 83 +#define SROM8_TRI52G 84 +#define SROM8_TRI5GHL 85 +#define SROM8_RXPO52G 86 + +#define SROM8_FEM2G 87 +#define SROM8_FEM5G 88 +#define SROM8_FEM_ANTSWLUT_MASK 0xf800 +#define SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SROM8_FEM_TR_ISO_MASK 0x0700 +#define SROM8_FEM_TR_ISO_SHIFT 8 +#define SROM8_FEM_PDET_RANGE_MASK 0x00f8 +#define SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006 +#define SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SROM8_FEM_TSSIPOS_MASK 0x0001 +#define SROM8_FEM_TSSIPOS_SHIFT 0 + +#define SROM8_THERMAL 89 + +/* Temp sense related entries */ +#define SROM8_MPWR_RAWTS 90 +#define SROM8_TS_SLP_OPT_CORRX 91 +/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ +#define SROM8_FOC_HWIQ_IQSWP 92 + +#define SROM8_EXTLNAGAIN 93 + +/* Temperature delta for PHY calibration */ +#define SROM8_PHYCAL_TEMPDELTA 94 + +/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */ +#define SROM8_MPWR_1_AND_2 95 + +/* Per-path offsets & fields */ +#define SROM8_PATH0 96 +#define SROM8_PATH1 112 +#define SROM8_PATH2 128 +#define SROM8_PATH3 144 + +#define SROM8_2G_ITT_MAXP 0 +#define SROM8_2G_PA 1 +#define SROM8_5G_ITT_MAXP 4 +#define SROM8_5GLH_MAXP 5 +#define SROM8_5G_PA 6 +#define SROM8_5GL_PA 9 +#define SROM8_5GH_PA 12 + +/* All the miriad power offsets */ +#define SROM8_2G_CCKPO 160 + +#define SROM8_2G_OFDMPO 161 +#define SROM8_5G_OFDMPO 163 +#define SROM8_5GL_OFDMPO 165 +#define SROM8_5GH_OFDMPO 167 + +#define SROM8_2G_MCSPO 169 +#define SROM8_5G_MCSPO 177 +#define SROM8_5GL_MCSPO 185 +#define SROM8_5GH_MCSPO 193 + +#define SROM8_CDDPO 201 +#define SROM8_STBCPO 202 +#define SROM8_BW40PO 203 +#define SROM8_BWDUPPO 204 + +/* SISO PA parameters are in the path0 spaces */ +#define SROM8_SISO 96 + +/* Legacy names for SISO PA paramters */ +#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP) +#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA) +#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1) +#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2) +#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP) +#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP) +#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA) +#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1) +#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2) +#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA) +#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1) +#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2) +#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA) +#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1) +#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2) + +#define SROM8_CRCREV 219 + +/* SROM REV 9 */ +#define SROM9_2GPO_CCKBW20 160 +#define SROM9_2GPO_CCKBW20UL 161 +#define SROM9_2GPO_LOFDMBW20 162 +#define SROM9_2GPO_LOFDMBW20UL 164 + +#define SROM9_5GLPO_LOFDMBW20 166 +#define SROM9_5GLPO_LOFDMBW20UL 168 +#define SROM9_5GMPO_LOFDMBW20 170 +#define SROM9_5GMPO_LOFDMBW20UL 172 +#define SROM9_5GHPO_LOFDMBW20 174 +#define SROM9_5GHPO_LOFDMBW20UL 176 + +#define SROM9_2GPO_MCSBW20 178 +#define SROM9_2GPO_MCSBW20UL 180 +#define SROM9_2GPO_MCSBW40 182 + +#define SROM9_5GLPO_MCSBW20 184 +#define SROM9_5GLPO_MCSBW20UL 186 +#define SROM9_5GLPO_MCSBW40 188 +#define SROM9_5GMPO_MCSBW20 190 +#define SROM9_5GMPO_MCSBW20UL 192 +#define SROM9_5GMPO_MCSBW40 194 +#define SROM9_5GHPO_MCSBW20 196 +#define SROM9_5GHPO_MCSBW20UL 198 +#define SROM9_5GHPO_MCSBW40 200 + +#define SROM9_PO_MCS32 202 +#define SROM9_PO_LOFDM40DUP 203 +#define SROM9_EU_EDCRSTH 204 +#define SROM10_EU_EDCRSTH 204 +#define SROM8_RXGAINERR_2G 205 +#define SROM8_RXGAINERR_5GL 206 +#define SROM8_RXGAINERR_5GM 207 +#define SROM8_RXGAINERR_5GH 208 +#define SROM8_RXGAINERR_5GU 209 +#define SROM8_SUBBAND_PPR 210 +#define SROM8_PCIEINGRESS_WAR 211 +#define SROM8_EU_EDCRSTH 212 +#define SROM9_SAR 212 + +#define SROM8_NOISELVL_2G 213 +#define SROM8_NOISELVL_5GL 214 +#define SROM8_NOISELVL_5GM 215 +#define SROM8_NOISELVL_5GH 216 +#define SROM8_NOISELVL_5GU 217 +#define SROM8_NOISECALOFFSET 218 + +#define SROM9_REV_CRC 219 + +#define SROM10_CCKPWROFFSET 218 +#define SROM10_SIGN 219 +#define SROM10_SWCTRLMAP_2G 220 +#define SROM10_CRCREV 229 + +#define SROM10_WORDS 230 +#define SROM10_SIGNATURE SROM4_SIGNATURE + +/* SROM REV 11 */ +#define SROM11_BREV 65 + +#define SROM11_BFL0 66 +#define SROM11_BFL1 67 +#define SROM11_BFL2 68 +#define SROM11_BFL3 69 +#define SROM11_BFL4 70 +#define SROM11_BFL5 71 + +#define SROM11_MACHI 72 +#define SROM11_MACMID 73 +#define SROM11_MACLO 74 + +#define SROM11_CCODE 75 +#define SROM11_REGREV 76 + +#define SROM11_LEDBH10 77 +#define SROM11_LEDBH32 78 + +#define SROM11_LEDDC 79 + +#define SROM11_AA 80 + +#define SROM11_AGBG10 81 +#define SROM11_AGBG2A0 82 +#define SROM11_AGA21 83 + +#define SROM11_TXRXC 84 + +#define SROM11_FEM_CFG1 85 +#define SROM11_FEM_CFG2 86 + +/* Masks and offsets for FEM_CFG */ +#define SROM11_FEMCTRL_MASK 0xf800 +#define SROM11_FEMCTRL_SHIFT 11 +#define SROM11_PAPDCAP_MASK 0x0400 +#define SROM11_PAPDCAP_SHIFT 10 +#define SROM11_TWORANGETSSI_MASK 0x0200 +#define SROM11_TWORANGETSSI_SHIFT 9 +#define SROM11_PDGAIN_MASK 0x01f0 +#define SROM11_PDGAIN_SHIFT 4 +#define SROM11_EPAGAIN_MASK 0x000e +#define SROM11_EPAGAIN_SHIFT 1 +#define SROM11_TSSIPOSSLOPE_MASK 0x0001 +#define SROM11_TSSIPOSSLOPE_SHIFT 0 +#define SROM11_GAINCTRLSPH_MASK 0xf800 +#define SROM11_GAINCTRLSPH_SHIFT 11 + +#define SROM11_THERMAL 87 +#define SROM11_MPWR_RAWTS 88 +#define SROM11_TS_SLP_OPT_CORRX 89 +#define SROM11_XTAL_FREQ 90 +#define SROM11_5GB0_4080_W0_A1 91 +#define SROM11_PHYCAL_TEMPDELTA 92 +#define SROM11_MPWR_1_AND_2 93 +#define SROM11_5GB0_4080_W1_A1 94 +#define SROM11_TSSIFLOOR_2G 95 +#define SROM11_TSSIFLOOR_5GL 96 +#define SROM11_TSSIFLOOR_5GM 97 +#define SROM11_TSSIFLOOR_5GH 98 +#define SROM11_TSSIFLOOR_5GU 99 + +/* Masks and offsets for Thermal parameters */ +#define SROM11_TEMPS_PERIOD_MASK 0xf0 +#define SROM11_TEMPS_PERIOD_SHIFT 4 +#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f +#define SROM11_TEMPS_HYSTERESIS_SHIFT 0 +#define SROM11_TEMPCORRX_MASK 0xfc +#define SROM11_TEMPCORRX_SHIFT 2 +#define SROM11_TEMPSENSE_OPTION_MASK 0x3 +#define SROM11_TEMPSENSE_OPTION_SHIFT 0 + +#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f +#define SROM11_PDOFF_2G_40M_A0_SHIFT 0 +#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0 +#define SROM11_PDOFF_2G_40M_A1_SHIFT 4 +#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00 +#define SROM11_PDOFF_2G_40M_A2_SHIFT 8 +#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000 +#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15 + +#define SROM11_PDOFF_2G_40M 100 +#define SROM11_PDOFF_40M_A0 101 +#define SROM11_PDOFF_40M_A1 102 +#define SROM11_PDOFF_40M_A2 103 +#define SROM11_5GB0_4080_W2_A1 103 +#define SROM11_PDOFF_80M_A0 104 +#define SROM11_PDOFF_80M_A1 105 +#define SROM11_PDOFF_80M_A2 106 +#define SROM11_5GB1_4080_W0_A1 106 + +#define SROM11_SUBBAND5GVER 107 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_11 3 +#define SROM11_PATH0 108 +#define SROM11_PATH1 128 +#define SROM11_PATH2 148 + +#define SROM11_2G_MAXP 0 +#define SROM11_5GB1_4080_PA 0 +#define SROM11_2G_PA 1 +#define SROM11_5GB2_4080_PA 2 +#define SROM11_RXGAINS1 4 +#define SROM11_RXGAINS 5 +#define SROM11_5GB3_4080_PA 5 +#define SROM11_5GB1B0_MAXP 6 +#define SROM11_5GB3B2_MAXP 7 +#define SROM11_5GB0_PA 8 +#define SROM11_5GB1_PA 11 +#define SROM11_5GB2_PA 14 +#define SROM11_5GB3_PA 17 + +/* Masks and offsets for rxgains */ +#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS2GTRISOA_MASK 0x0078 +#define SROM11_RXGAINS2GTRISOA_SHIFT 3 +#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0 +#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GHTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078 +#define SROM11_RXGAINS5GMTRISOA_SHIFT 3 +#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0 + +/* Power per rate */ +#define SROM11_CCKBW202GPO 168 +#define SROM11_CCKBW20UL2GPO 169 +#define SROM11_MCSBW202GPO 170 +#define SROM11_MCSBW202GPO_1 171 +#define SROM11_MCSBW402GPO 172 +#define SROM11_MCSBW402GPO_1 173 +#define SROM11_DOT11AGOFDMHRBW202GPO 174 +#define SROM11_OFDMLRBW202GPO 175 + +#define SROM11_MCSBW205GLPO 176 +#define SROM11_MCSBW205GLPO_1 177 +#define SROM11_MCSBW405GLPO 178 +#define SROM11_MCSBW405GLPO_1 179 +#define SROM11_MCSBW805GLPO 180 +#define SROM11_MCSBW805GLPO_1 181 +#define SROM11_RPCAL_2G 182 +#define SROM11_RPCAL_5GL 183 +#define SROM11_MCSBW205GMPO 184 +#define SROM11_MCSBW205GMPO_1 185 +#define SROM11_MCSBW405GMPO 186 +#define SROM11_MCSBW405GMPO_1 187 +#define SROM11_MCSBW805GMPO 188 +#define SROM11_MCSBW805GMPO_1 189 +#define SROM11_RPCAL_5GM 190 +#define SROM11_RPCAL_5GH 191 +#define SROM11_MCSBW205GHPO 192 +#define SROM11_MCSBW205GHPO_1 193 +#define SROM11_MCSBW405GHPO 194 +#define SROM11_MCSBW405GHPO_1 195 +#define SROM11_MCSBW805GHPO 196 +#define SROM11_MCSBW805GHPO_1 197 +#define SROM11_RPCAL_5GU 198 +#define SROM11_PDOFF_2G_CCK 199 +#define SROM11_MCSLR5GLPO 200 +#define SROM11_MCSLR5GMPO 201 +#define SROM11_MCSLR5GHPO 202 + +#define SROM11_SB20IN40HRPO 203 +#define SROM11_SB20IN80AND160HR5GLPO 204 +#define SROM11_SB40AND80HR5GLPO 205 +#define SROM11_SB20IN80AND160HR5GMPO 206 +#define SROM11_SB40AND80HR5GMPO 207 +#define SROM11_SB20IN80AND160HR5GHPO 208 +#define SROM11_SB40AND80HR5GHPO 209 +#define SROM11_SB20IN40LRPO 210 +#define SROM11_SB20IN80AND160LR5GLPO 211 +#define SROM11_SB40AND80LR5GLPO 212 +#define SROM11_TXIDXCAP2G 212 +#define SROM11_SB20IN80AND160LR5GMPO 213 +#define SROM11_SB40AND80LR5GMPO 214 +#define SROM11_TXIDXCAP5G 214 +#define SROM11_SB20IN80AND160LR5GHPO 215 +#define SROM11_SB40AND80LR5GHPO 216 + +#define SROM11_DOT11AGDUPHRPO 217 +#define SROM11_DOT11AGDUPLRPO 218 + +/* MISC */ +#define SROM11_PCIEINGRESS_WAR 220 +#define SROM11_SAR 221 + +#define SROM11_NOISELVL_2G 222 +#define SROM11_NOISELVL_5GL 223 +#define SROM11_NOISELVL_5GM 224 +#define SROM11_NOISELVL_5GH 225 +#define SROM11_NOISELVL_5GU 226 + +#define SROM11_RXGAINERR_2G 227 +#define SROM11_RXGAINERR_5GL 228 +#define SROM11_RXGAINERR_5GM 229 +#define SROM11_RXGAINERR_5GH 230 +#define SROM11_RXGAINERR_5GU 231 + +#define SROM11_EU_EDCRSTH 232 +#define SROM12_EU_EDCRSTH 232 + +#define SROM11_SIGN 64 +#define SROM11_CRCREV 233 + +#define SROM11_WORDS 234 +#define SROM11_SIGNATURE 0x0634 + +/* SROM REV 12 */ +#define SROM12_SIGN 64 +#define SROM12_WORDS 512 +#define SROM12_SIGNATURE 0x8888 +#define SROM12_CRCREV 511 + +#define SROM12_BFL6 486 +#define SROM12_BFL7 487 + +#define SROM12_MCSBW205GX1PO 234 +#define SROM12_MCSBW205GX1PO_1 235 +#define SROM12_MCSBW405GX1PO 236 +#define SROM12_MCSBW405GX1PO_1 237 +#define SROM12_MCSBW805GX1PO 238 +#define SROM12_MCSBW805GX1PO_1 239 +#define SROM12_MCSLR5GX1PO 240 +#define SROM12_SB40AND80LR5GX1PO 241 +#define SROM12_SB20IN80AND160LR5GX1PO 242 +#define SROM12_SB20IN80AND160HR5GX1PO 243 +#define SROM12_SB40AND80HR5GX1PO 244 + +#define SROM12_MCSBW205GX2PO 245 +#define SROM12_MCSBW205GX2PO_1 246 +#define SROM12_MCSBW405GX2PO 247 +#define SROM12_MCSBW405GX2PO_1 248 +#define SROM12_MCSBW805GX2PO 249 +#define SROM12_MCSBW805GX2PO_1 250 +#define SROM12_MCSLR5GX2PO 251 +#define SROM12_SB40AND80LR5GX2PO 252 +#define SROM12_SB20IN80AND160LR5GX2PO 253 +#define SROM12_SB20IN80AND160HR5GX2PO 254 +#define SROM12_SB40AND80HR5GX2PO 255 + +/* MISC */ +#define SROM12_RXGAINS10 483 +#define SROM12_RXGAINS11 484 +#define SROM12_RXGAINS12 485 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_12 3 +#define SROM12_PATH0 256 +#define SROM12_PATH1 328 +#define SROM12_PATH2 400 + +#define SROM12_5GB42G_MAXP 0 +#define SROM12_2GB0_PA 1 +#define SROM12_2GB0_PA_W0 1 +#define SROM12_2GB0_PA_W1 2 +#define SROM12_2GB0_PA_W2 3 +#define SROM12_2GB0_PA_W3 4 + +#define SROM12_RXGAINS 5 +#define SROM12_5GB1B0_MAXP 6 +#define SROM12_5GB3B2_MAXP 7 + +#define SROM12_5GB0_PA 8 +#define SROM12_5GB0_PA_W0 8 +#define SROM12_5GB0_PA_W1 9 +#define SROM12_5GB0_PA_W2 10 +#define SROM12_5GB0_PA_W3 11 + +#define SROM12_5GB1_PA 12 +#define SROM12_5GB1_PA_W0 12 +#define SROM12_5GB1_PA_W1 13 +#define SROM12_5GB1_PA_W2 14 +#define SROM12_5GB1_PA_W3 15 + +#define SROM12_5GB2_PA 16 +#define SROM12_5GB2_PA_W0 16 +#define SROM12_5GB2_PA_W1 17 +#define SROM12_5GB2_PA_W2 18 +#define SROM12_5GB2_PA_W3 19 + +#define SROM12_5GB3_PA 20 +#define SROM12_5GB3_PA_W0 20 +#define SROM12_5GB3_PA_W1 21 +#define SROM12_5GB3_PA_W2 22 +#define SROM12_5GB3_PA_W3 23 + +#define SROM12_5GB4_PA 24 +#define SROM12_5GB4_PA_W0 24 +#define SROM12_5GB4_PA_W1 25 +#define SROM12_5GB4_PA_W2 26 +#define SROM12_5GB4_PA_W3 27 + +#define SROM12_2G40B0_PA 28 +#define SROM12_2G40B0_PA_W0 28 +#define SROM12_2G40B0_PA_W1 29 +#define SROM12_2G40B0_PA_W2 30 +#define SROM12_2G40B0_PA_W3 31 + +#define SROM12_5G40B0_PA 32 +#define SROM12_5G40B0_PA_W0 32 +#define SROM12_5G40B0_PA_W1 33 +#define SROM12_5G40B0_PA_W2 34 +#define SROM12_5G40B0_PA_W3 35 + +#define SROM12_5G40B1_PA 36 +#define SROM12_5G40B1_PA_W0 36 +#define SROM12_5G40B1_PA_W1 37 +#define SROM12_5G40B1_PA_W2 38 +#define SROM12_5G40B1_PA_W3 39 + +#define SROM12_5G40B2_PA 40 +#define SROM12_5G40B2_PA_W0 40 +#define SROM12_5G40B2_PA_W1 41 +#define SROM12_5G40B2_PA_W2 42 +#define SROM12_5G40B2_PA_W3 43 + +#define SROM12_5G40B3_PA 44 +#define SROM12_5G40B3_PA_W0 44 +#define SROM12_5G40B3_PA_W1 45 +#define SROM12_5G40B3_PA_W2 46 +#define SROM12_5G40B3_PA_W3 47 + +#define SROM12_5G40B4_PA 48 +#define SROM12_5G40B4_PA_W0 48 +#define SROM12_5G40B4_PA_W1 49 +#define SROM12_5G40B4_PA_W2 50 +#define SROM12_5G40B4_PA_W3 51 + +#define SROM12_5G80B0_PA 52 +#define SROM12_5G80B0_PA_W0 52 +#define SROM12_5G80B0_PA_W1 53 +#define SROM12_5G80B0_PA_W2 54 +#define SROM12_5G80B0_PA_W3 55 + +#define SROM12_5G80B1_PA 56 +#define SROM12_5G80B1_PA_W0 56 +#define SROM12_5G80B1_PA_W1 57 +#define SROM12_5G80B1_PA_W2 58 +#define SROM12_5G80B1_PA_W3 59 + +#define SROM12_5G80B2_PA 60 +#define SROM12_5G80B2_PA_W0 60 +#define SROM12_5G80B2_PA_W1 61 +#define SROM12_5G80B2_PA_W2 62 +#define SROM12_5G80B2_PA_W3 63 + +#define SROM12_5G80B3_PA 64 +#define SROM12_5G80B3_PA_W0 64 +#define SROM12_5G80B3_PA_W1 65 +#define SROM12_5G80B3_PA_W2 66 +#define SROM12_5G80B3_PA_W3 67 + +#define SROM12_5G80B4_PA 68 +#define SROM12_5G80B4_PA_W0 68 +#define SROM12_5G80B4_PA_W1 69 +#define SROM12_5G80B4_PA_W2 70 +#define SROM12_5G80B4_PA_W3 71 + +/* PD offset */ +#define SROM12_PDOFF_2G_CCK 472 + +#define SROM12_PDOFF_20in40M_5G_B0 473 +#define SROM12_PDOFF_20in40M_5G_B1 474 +#define SROM12_PDOFF_20in40M_5G_B2 475 +#define SROM12_PDOFF_20in40M_5G_B3 476 +#define SROM12_PDOFF_20in40M_5G_B4 477 + +#define SROM12_PDOFF_40in80M_5G_B0 478 +#define SROM12_PDOFF_40in80M_5G_B1 479 +#define SROM12_PDOFF_40in80M_5G_B2 480 +#define SROM12_PDOFF_40in80M_5G_B3 481 +#define SROM12_PDOFF_40in80M_5G_B4 482 + +#define SROM12_PDOFF_20in80M_5G_B0 488 +#define SROM12_PDOFF_20in80M_5G_B1 489 +#define SROM12_PDOFF_20in80M_5G_B2 490 +#define SROM12_PDOFF_20in80M_5G_B3 491 +#define SROM12_PDOFF_20in80M_5G_B4 492 + +#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */ +#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */ + +#define SROM13_SIGN 64 +#define SROM13_WORDS 590 +#define SROM13_SIGNATURE 0x4d55 +#define SROM13_CRCREV 589 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_13 4 +#define SROM13_PATH0 256 +#define SROM13_PATH1 328 +#define SROM13_PATH2 400 +#define SROM13_PATH3 512 +#define SROM13_RXGAINS 5 + +#define SROM13_XTALFREQ 90 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 +#define SROM13_SB20IN40HRLRPOX 96 + +#define SROM13_RXGAINS1CORE3 97 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 + +#define SROM13_ANTGAIN_BANDBGA 100 + +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +/* power per rate */ +#define SROM13_MCS1024QAM2GPO 108 +#define SROM13_MCS1024QAM5GLPO 109 +#define SROM13_MCS1024QAM5GLPO_1 110 +#define SROM13_MCS1024QAM5GMPO 111 +#define SROM13_MCS1024QAM5GMPO_1 112 +#define SROM13_MCS1024QAM5GHPO 113 +#define SROM13_MCS1024QAM5GHPO_1 114 +#define SROM13_MCS1024QAM5GX1PO 115 +#define SROM13_MCS1024QAM5GX1PO_1 116 +#define SROM13_MCS1024QAM5GX2PO 117 +#define SROM13_MCS1024QAM5GX2PO_1 118 + +#define SROM13_MCSBW1605GLPO 119 +#define SROM13_MCSBW1605GLPO_1 120 +#define SROM13_MCSBW1605GMPO 121 +#define SROM13_MCSBW1605GMPO_1 122 +#define SROM13_MCSBW1605GHPO 123 +#define SROM13_MCSBW1605GHPO_1 124 + +#define SROM13_MCSBW1605GX1PO 125 +#define SROM13_MCSBW1605GX1PO_1 126 +#define SROM13_MCSBW1605GX2PO 127 +#define SROM13_MCSBW1605GX2PO_1 128 + +#define SROM13_ULBPPROFFS5GB0 129 +#define SROM13_ULBPPROFFS5GB1 130 +#define SROM13_ULBPPROFFS5GB2 131 +#define SROM13_ULBPPROFFS5GB3 132 +#define SROM13_ULBPPROFFS5GB4 133 +#define SROM13_ULBPPROFFS2G 134 + +#define SROM13_MCS8POEXP 135 +#define SROM13_MCS8POEXP_1 136 +#define SROM13_MCS9POEXP 137 +#define SROM13_MCS9POEXP_1 138 +#define SROM13_MCS10POEXP 139 +#define SROM13_MCS10POEXP_1 140 +#define SROM13_MCS11POEXP 141 +#define SROM13_MCS11POEXP_1 142 +#define SROM13_ULBPDOFFS5GB0A0 143 +#define SROM13_ULBPDOFFS5GB0A1 144 +#define SROM13_ULBPDOFFS5GB0A2 145 +#define SROM13_ULBPDOFFS5GB0A3 146 +#define SROM13_ULBPDOFFS5GB1A0 147 +#define SROM13_ULBPDOFFS5GB1A1 148 +#define SROM13_ULBPDOFFS5GB1A2 149 +#define SROM13_ULBPDOFFS5GB1A3 150 +#define SROM13_ULBPDOFFS5GB2A0 151 +#define SROM13_ULBPDOFFS5GB2A1 152 +#define SROM13_ULBPDOFFS5GB2A2 153 +#define SROM13_ULBPDOFFS5GB2A3 154 +#define SROM13_ULBPDOFFS5GB3A0 155 +#define SROM13_ULBPDOFFS5GB3A1 156 +#define SROM13_ULBPDOFFS5GB3A2 157 +#define SROM13_ULBPDOFFS5GB3A3 158 +#define SROM13_ULBPDOFFS5GB4A0 159 +#define SROM13_ULBPDOFFS5GB4A1 160 +#define SROM13_ULBPDOFFS5GB4A2 161 +#define SROM13_ULBPDOFFS5GB4A3 162 +#define SROM13_ULBPDOFFS2GA0 163 +#define SROM13_ULBPDOFFS2GA1 164 +#define SROM13_ULBPDOFFS2GA2 165 +#define SROM13_ULBPDOFFS2GA3 166 + +#define SROM13_RPCAL5GB4 199 +#define SROM13_RPCAL2GCORE3 101 +#define SROM13_RPCAL5GB01CORE3 102 +#define SROM13_RPCAL5GB23CORE3 103 + +#define SROM13_SW_TXRX_MASK 104 + +#define SROM13_EU_EDCRSTH 232 + +#define SROM13_SWCTRLMAP4_CFG 493 +#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494 +#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496 +#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497 +#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498 +#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500 +#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501 +#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502 +#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504 +#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505 +#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506 +#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508 +#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509 + +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 + +#define SROM13_NOISELVLCORE3 584 +#define SROM13_NOISELVLCORE3_1 585 +#define SROM13_RXGAINERRCORE3 586 +#define SROM13_RXGAINERRCORE3_1 587 + +#define SROM13_PDOFF_2G_CCK_20M 167 + +#define SROM15_CALDATA_WORDS 943 +#define SROM15_CAL_OFFSET_LOC 68 +#define MAX_IOCTL_TXCHUNK_SIZE 1500 +#define SROM15_MAX_CAL_SIZE 1886 +#define SROM15_SIGNATURE 0x110c +#define SROM15_WORDS 1024 +#define SROM15_MACHI 65 +#define SROM15_CRCREV 1023 +#define SROM15_BRDREV 69 +#define SROM15_CCODE 70 +#define SROM15_REGREV 71 +#define SROM15_SIGN 64 + +#define SROM16_SIGN 128 +#define SROM16_WORDS 1024 +#define SROM16_SFLASH_WORDS 2048U +#define SROM16_SIGNATURE 0x4357 +#define SROM16_CRCREV 1023 +#define SROM16_MACHI (SROM16_SIGN + 1) +#define SROM16_CALDATA_OFFSET_LOC (SROM16_SIGN + 4) +#define SROM16_BOARDREV (SROM16_SIGN + 5) +#define SROM16_CCODE (SROM16_SIGN + 6) +#define SROM16_REGREV (SROM16_SIGN + 7) + +#define SROM_CALDATA_WORDS 832 + +#define SROM17_SIGN 64 +#define SROM17_BRDREV 65 +#define SROM17_MACADDR 66 +#define SROM17_CCODE 69 +#define SROM17_CALDATA 70 +#define SROM17_GCALTMP 71 + +#define SROM17_C0SRD202G 72 +#define SROM17_C0SRD202G_1 73 +#define SROM17_C0SRD205GL 74 +#define SROM17_C0SRD205GL_1 75 +#define SROM17_C0SRD205GML 76 +#define SROM17_C0SRD205GML_1 77 +#define SROM17_C0SRD205GMU 78 +#define SROM17_C0SRD205GMU_1 79 +#define SROM17_C0SRD205GH 80 +#define SROM17_C0SRD205GH_1 81 + +#define SROM17_C1SRD202G 82 +#define SROM17_C1SRD202G_1 83 +#define SROM17_C1SRD205GL 84 +#define SROM17_C1SRD205GL_1 85 +#define SROM17_C1SRD205GML 86 +#define SROM17_C1SRD205GML_1 87 +#define SROM17_C1SRD205GMU 88 +#define SROM17_C1SRD205GMU_1 89 +#define SROM17_C1SRD205GH 90 +#define SROM17_C1SRD205GH_1 91 + +#define SROM17_TRAMMAGIC 92 +#define SROM17_TRAMMAGIC_1 93 +#define SROM17_TRAMDATA 94 + +#define SROM17_WORDS 256 +#define SROM17_CRCREV 255 +#define SROM17_CALDATA_WORDS 161 +#define SROM17_SIGNATURE 0x1103 /* 4355 in hex format */ + +#define SROM18_SIGN 112 +#define SROM18_WORDS 1024 +#define SROM18_SIGNATURE 0x4377 +#define SROM18_CRCREV 1023 +#define SROM18_MACHI (SROM18_SIGN + 1) +#define SROM18_CALDATA_OFFSET_LOC (SROM18_SIGN + 4) +#define SROM18_BOARDREV (SROM18_SIGN + 5) +#define SROM18_CCODE (SROM18_SIGN + 6) +#define SROM18_REGREV (SROM18_SIGN + 7) +#define SROM18_CALDATA_WORDS (SROM18_WORDS - SROM18_CALDATA_OFFSET_LOC) + +typedef struct { + uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */ + uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */ + uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */ + uint8 triso; /* TR switch isolation */ + uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */ +} srom_fem_t; + +#endif /* _bcmsrom_fmt_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmsrom_tbl.h b/bcmdhd.101.10.361.x/include/bcmsrom_tbl.h new file mode 100755 index 0000000..2485603 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmsrom_tbl.h @@ -0,0 +1,1303 @@ +/* + * Table that encodes the srom formats for PCI/PCIe NICs. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmsrom_tbl_h_ +#define _bcmsrom_tbl_h_ + +#include +#include + +typedef struct { + const char *name; + uint32 revmask; + uint32 flags; + uint16 off; + uint16 mask; +} sromvar_t; + +#define SRFL_MORE 1 /* value continues as described by the next entry */ +#define SRFL_NOFFS 2 /* value bits can't be all one's */ +#define SRFL_PRHEX 4 /* value is in hexdecimal format */ +#define SRFL_PRSIGN 8 /* value is in signed decimal format */ +#define SRFL_CCODE 0x10 /* value is in country code format */ +#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */ +#define SRFL_UNUSED 0x40 /* unused, was SRFL_LEDDC */ +#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */ +#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST + * ONE in the array should have this flag set. + */ +#define PRHEX_N_MORE (SRFL_PRHEX | SRFL_MORE) + +#define SROM_DEVID_PCIE 48 + +/** + * Assumptions: + * - Ethernet address spans across 3 consecutive words + * + * Table rules: + * - Add multiple entries next to each other if a value spans across multiple words + * (even multiple fields in the same word) with each entry except the last having + * it's SRFL_MORE bit set. + * - Ethernet address entry does not follow above rule and must not have SRFL_MORE + * bit set. Its SRFL_ETHADDR bit implies it takes multiple words. + * - The last entry's name field must be NULL to indicate the end of the table. Other + * entries must have non-NULL name. + */ +#if !defined(SROM15_MEMOPT) +static const sromvar_t BCMATTACHDATA(pci_sromvars)[] = { +/* name revmask flags off mask */ +#if defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED) + {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff}, +#else + {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff}, +#endif /* BCMPCIEDEV && BCMPCIEDEV_ENABLED */ + {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK}, + {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, + {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, + {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, + {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM_BFL2, 0xffff}, + {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM3_BFL2, 0xffff}, + {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff}, + {"", 0, 0, SROM4_BFL1, 0xffff}, + {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff}, + {"", 0, 0, SROM5_BFL1, 0xffff}, + {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff}, + {"", 0, 0, SROM8_BFL1, 0xffff}, + {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff}, + {"", 0, 0, SROM4_BFL3, 0xffff}, + {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff}, + {"", 0, 0, SROM5_BFL3, 0xffff}, + {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff}, + {"", 0, 0, SROM8_BFL3, 0xffff}, + {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, + {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff}, + {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, + {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff}, + {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff}, + {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff}, + {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff}, + {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, + {"regrev", 0x00000008, 0, SROM_OPO, 0xff00}, + {"regrev", 0x00000010, 0, SROM4_REGREV, 0xffff}, + {"regrev", 0x000000e0, 0, SROM5_REGREV, 0xffff}, + {"regrev", 0x00000700, 0, SROM8_REGREV, 0xffff}, + {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, + {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, + {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, + {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff}, + {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, + {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, + {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, + {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, + {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00}, + {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff}, + {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff}, + {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff}, + {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, + {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff}, + {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff}, + {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, + {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00}, + {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00}, + {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff}, + {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00}, + {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff}, + {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00}, + {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff}, + {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00}, + {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff}, + {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00}, + {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff}, + {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00}, + {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, + {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, + {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, + {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, + {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, + {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, + {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, + {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, + {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, + {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00}, + {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, + {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, + {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, + {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, + {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, + {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, + {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff}, + {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff}, + {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff}, + {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff}, + {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff}, + {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff}, + {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00}, + {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff}, + {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00}, + {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, + {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, + {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f}, + {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, + {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f}, + {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff}, + {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00}, + {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00}, + {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff}, + {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00}, + {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00}, + {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, + {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, + {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK}, + {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK}, + {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK}, + {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK}, + {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, + {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, + {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, + {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, + {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, + {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, + {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, + {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, + {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, + {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, + {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, + {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, + {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, + {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, + {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, + {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, + + {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, + {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, + {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, + {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff}, + {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, + {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, + {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, + {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, + {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff}, + {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff}, + + {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00}, + {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff}, + {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, + {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300}, + {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f}, + {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010}, + {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020}, + {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff}, + {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00}, + {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80}, + + {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, + {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, + {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, + {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, + {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, + {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, + {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff}, + {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff}, + {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff}, + {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, + {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff}, + {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff}, + {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff}, + {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff}, + + /* power per rate from sromrev 9 */ + {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff}, + {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff}, + {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff}, + {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff}, + {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff}, + {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff}, + {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf}, + {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00}, + {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800}, + {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f}, + {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0}, + {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800}, + {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f}, + {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0}, + {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800}, + {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f}, + {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0}, + {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800}, + {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800}, + {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff}, + {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00}, + {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00}, + {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f}, + {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0}, + {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00}, + {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f}, + {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0}, + {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00}, + {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f}, + {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0}, + {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00}, + {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f}, + {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0}, + {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00}, + {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff}, + {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00}, + {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7}, + + {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff}, + {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00}, + /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */ + {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff}, + {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff}, + + /* sromrev 11 */ + {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff}, + {"", 0, 0, SROM11_BFL5, 0xffff}, + {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff}, + {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff}, + {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff}, + {"regrev", 0xfffff800, 0, SROM11_REGREV, 0xffff}, + {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff}, + {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00}, + {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00}, + {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff}, + {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00}, + {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff}, + {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00}, + {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff}, + {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK}, + + {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001}, + {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e}, + {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0}, + {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200}, + {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400}, + {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800}, + + {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001}, + {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e}, + {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0}, + {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200}, + {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400}, + {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800}, + + {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00}, + {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff}, + {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff}, + {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300}, + {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff}, + {"txpwrbckof", 0x00000800, SRFL_PRHEX, SROM11_PATH0 + SROM11_2G_MAXP, 0xff00}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */ + {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff}, + {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80}, + {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff}, + {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff}, + {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f}, + {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0}, + {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00}, + {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000}, + {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff}, + {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff}, + {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff}, + {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff}, + {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff}, + {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff}, + + {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff}, + {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000}, + {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */ + {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 40 MHz BW */ + {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 80 MHz BW */ + {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 2G Band, CCK */ + {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff}, + + /* power per rate */ + {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff}, + {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff}, + {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff}, + {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff}, + {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff}, + {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff}, + {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff}, + {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff}, + {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff}, + {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff}, + {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff}, + {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff}, + {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff}, + {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff}, + {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff}, + {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff}, + {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff}, + {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff}, + {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff}, + {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff}, + {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff}, + {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff}, + {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff}, + {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff}, + {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff}, + {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff}, + {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff}, + {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff}, + {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff}, + {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff}, + {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff}, + {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff}, + {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff}, + {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff}, + + /* Misc */ + {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff}, + {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00}, + + {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00}, + {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f}, + {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0}, + {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00}, + {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00}, + + {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800}, + {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800}, + {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff}, + {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff}, + {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff}, + {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff}, + {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff}, + {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0}, + {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0}, + {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f}, + {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0}, + {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00}, + + /* sromrev 12 */ + {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff}, + {"", 0, 0, SROM12_BFL7, 0xffff}, + {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff}, + {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff}, + {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff}, + {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff}, + {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff}, + {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff}, + {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff}, + {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff}, + {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff}, + {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff}, + {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff}, + {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff}, + {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff}, + {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff}, + {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff}, + {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff}, + + /* power per rate */ + {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff}, + {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff}, + {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff}, + {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff}, + {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff}, + {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff}, + + {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + + {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007}, + {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007}, + {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007}, + {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078}, + {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078}, + {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078}, + {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080}, + {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080}, + {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080}, + {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700}, + {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700}, + {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700}, + {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800}, + {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800}, + {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800}, + {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000}, + {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000}, + {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000}, + {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00}, + + {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff}, + {"", 0, 0, SROM12_GPDN_H, 0xffff}, + + {"rpcal2gcore3", 0xffffe000, 0, SROM13_RPCAL2GCORE3, 0x00ff}, + {"rpcal5gb0core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0x00ff}, + {"rpcal5gb1core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0xff00}, + {"rpcal5gb2core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0x00ff}, + {"rpcal5gb3core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0xff00}, + + {"sw_txchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x000f}, + {"sw_rxchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x00f0}, + + {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00}, + + {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00}, + {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff}, + {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f}, + {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0}, + {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f}, + {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0}, + {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007}, + {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078}, + {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080}, + {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700}, + {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800}, + {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000}, + + /* pdoffset */ + {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + {"pdoffsetcck20m", 0xffffe000, 0, SROM13_PDOFF_2G_CCK_20M, 0xffff}, + + /* power per rate */ + {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff}, + {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff}, + {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff}, + {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff}, + {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff}, + {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff}, + + {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff}, + {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff}, + {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff}, + {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff}, + {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff}, + + {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff}, + + {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff}, + {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff}, + {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff}, + {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff}, + + {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff}, + {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff}, + {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff}, + {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff}, + {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff}, + {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff}, + {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff}, + {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff}, + {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff}, + {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff}, + {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff}, + {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff}, + {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff}, + {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff}, + {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff}, + {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff}, + {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff}, + {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff}, + {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff}, + {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff}, + {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff}, + {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff}, + {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff}, + {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff}, + + {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff}, + + {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff}, + + {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff}, + {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff}, + {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff}, + {NULL, 0, 0, 0, 0} +}; +#endif /* !defined(SROM15_MEMOPT) */ + +static const sromvar_t BCMATTACHDATA(pci_srom15vars)[] = { + {"macaddr", 0x00008000, SRFL_ETHADDR, SROM15_MACHI, 0xffff}, + {"caldata_offset", 0x00008000, 0, SROM15_CAL_OFFSET_LOC, 0xffff}, + {"boardrev", 0x00008000, SRFL_PRHEX, SROM15_BRDREV, 0xffff}, + {"ccode", 0x00008000, SRFL_CCODE, SROM15_CCODE, 0xffff}, + {"regrev", 0x00008000, 0, SROM15_REGREV, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t BCMATTACHDATA(pci_srom16vars)[] = { + {"macaddr", 0x00010000, SRFL_ETHADDR, SROM16_MACHI, 0xffff}, + {"caldata_offset", 0x00010000, 0, SROM16_CALDATA_OFFSET_LOC, 0xffff}, + {"boardrev", 0x00010000, SRFL_PRHEX, SROM16_BOARDREV, 0xffff}, + {"ccode", 0x00010000, SRFL_CCODE, SROM16_CCODE, 0xffff}, + {"regrev", 0x00010000, 0, SROM16_REGREV, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t BCMATTACHDATA(pci_srom17vars)[] = { + {"boardrev", 0x00020000, SRFL_PRHEX, SROM17_BRDREV, 0xffff}, + {"macaddr", 0x00020000, SRFL_ETHADDR, SROM17_MACADDR, 0xffff}, + {"ccode", 0x00020000, SRFL_CCODE, SROM17_CCODE, 0xffff}, + {"caldata_offset", 0x00020000, 0, SROM17_CALDATA, 0xffff}, + {"gain_cal_temp", 0x00020000, SRFL_PRHEX, SROM17_GCALTMP, 0xffff}, + {"rssi_delta_2gb0_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD202G, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD202G_1, 0xffff}, + {"rssi_delta_5gl_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GL, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GL_1, 0xffff}, + {"rssi_delta_5gml_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GML, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GML_1, 0xffff}, + {"rssi_delta_5gmu_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GMU, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GMU_1, 0xffff}, + {"rssi_delta_5gh_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GH, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GH_1, 0xffff}, + {"rssi_delta_2gb0_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD202G, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD202G_1, 0xffff}, + {"rssi_delta_5gl_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GL, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GL_1, 0xffff}, + {"rssi_delta_5gml_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GML, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GML_1, 0xffff}, + {"rssi_delta_5gmu_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GMU, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GMU_1, 0xffff}, + {"rssi_delta_5gh_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GH, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GH_1, 0xffff}, + {"txpa_trim_magic", 0x00020000, PRHEX_N_MORE, SROM17_TRAMMAGIC, 0xffff}, + {"", 0x00020000, 0, SROM17_TRAMMAGIC_1, 0xffff}, + {"txpa_trim_data", 0x00020000, SRFL_PRHEX, SROM17_TRAMDATA, 0xffff}, + {NULL, 0, 0, 0, 0x00} +}; + +static const sromvar_t BCMATTACHDATA(pci_srom18vars)[] = { + {"macaddr", 0x00040000, SRFL_ETHADDR, SROM18_MACHI, 0xffff}, + {"caldata_offset", 0x00040000, 0, SROM18_CALDATA_OFFSET_LOC, 0xffff}, + {"boardrev", 0x00040000, SRFL_PRHEX, SROM18_BOARDREV, 0xffff}, + {"ccode", 0x00040000, SRFL_CCODE, SROM18_CCODE, 0xffff}, + {"regrev", 0x00040000, 0, SROM18_REGREV, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t BCMATTACHDATA(perpath_pci_sromvars)[] = { + {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, + {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, + {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, + {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, + {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, + {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, + {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, + {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff}, + {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff}, + {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff}, + {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff}, + {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff}, + {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff}, + {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff}, + {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff}, + {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff}, + {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff}, + {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff}, + + /* sromrev 11 */ + {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff}, + {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff}, + {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007}, + {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078}, + {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080}, + {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700}, + {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800}, + {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000}, + {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000}, + {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff}, + {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00}, + {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff}, + + /* sromrev 12 */ + {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00}, + {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff}, + + {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff}, + {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff}, + {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00}, + {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff}, + {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00}, + + {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff}, + + {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff}, + + {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff}, + /* sromrev 13 */ + {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000}, + {NULL, 0, 0, 0, 0} +}; + +typedef struct { + uint8 tag; /* Broadcom subtag name */ + uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in + * different tuples have the same name. Therefore, the MFGc tool + * needs to know which tuple to generate when seeing these + * parameters (given that we know sromrev from user input, like the + * nvram file). + */ + uint8 len; /* Length field of the tuple, note that it includes the + * subtag name (1 byte): 1 + tuple content length + */ + const char *params; /* Each param is in this form: length(1 byte ascii) + var name + * Note that the order here has to match the parsing + * order in parsecis() in src/shared/bcmsrom.c + */ +} cis_tuple_t; + +#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */ +/* quick hacks for supporting standard CIS tuples. */ +#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */ +#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */ +#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */ + +/** this array is used by CIS creating/writing applications */ +static const cis_tuple_t cis_hnbuvars[] = { +/* tag revmask len params */ + {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */ + {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */ + {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */ + /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ + {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"}, + {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"}, + /* NOTE: subdevid is also written to boardtype. + * Need to write HNBU_BOARDTYPE to change it if it is different. + */ + {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"}, + {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"}, + {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"}, + {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"}, + {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */ + {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"}, + {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 " + "4boardflags4 4boardflags5 "}, + {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"}, + {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"}, + {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"}, + {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 " + "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit " + "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"}, + {HNBU_RDLID, 0xffffffff, 3, "2rdlid"}, + {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g " + "0rssisav2g 0bxa2g"}, /* special case */ + {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g " + "0rssisav5g 0bxa5g"}, /* special case */ + {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"}, + {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"}, + {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"}, + {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"}, + {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"}, + {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"}, + {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */ + {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"}, + {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"}, + {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"}, + {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"}, + {HNBU_REGREV, 0xffffffff, 3, "2regrev"}, + {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g " + "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */ + {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 " + "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 " + "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"}, + {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 " + "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 " + "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"}, + {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo " + "4ofdm5ghpo"}, + {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 " + "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"}, + {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 " + "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"}, + {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 " + "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 " + "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 " + "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"}, + {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"}, + {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"}, + {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"}, + {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"}, + {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"}, + {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"}, + {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"}, + {HNBU_USBFS, 0xffffffff, 2, "1usbfs"}, + {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"}, + {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"}, + {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"}, + {OTP_RAW, 0xffffffff, 0, ""}, /* special case */ + {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"}, + {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"}, + {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"}, + {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"}, + {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo " + "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"}, + {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"}, + {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"}, + {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"}, + {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"}, + {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"}, + {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"}, + {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis " + "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option " + "1phycal_tempdelta"}, /* special case */ + {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"}, + {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g " + "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g " + "0tssiposslope5g"}, /* special case */ + {HNBU_ACPA_C0, 0x00001800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 " + "1*4maxp5ga0 2*12pa5ga0"}, + {HNBU_ACPA_C1, 0x00001800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"}, + {HNBU_ACPA_C2, 0x00001800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"}, + {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"}, + {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 " + "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"}, + {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo " + "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo " + "2sb20in80ofdmlrbw202gpo"}, + {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo " + "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo " + "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po " + "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"}, + {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"}, + {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo " + "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo " + "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo " + "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo " + "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo " + "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po " + }, + {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo " + "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo " + "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo " + "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo " + "2sb20in80p80lr5gpo 2dot11agduppo"}, + {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 " + "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"}, + {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 " + "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"}, + {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"}, + {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"}, + {HNBU_UUID, 0xffffffff, 17, "16uuid"}, + {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"}, + {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 " + "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 " + "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 " + "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */ + {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 " + "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 " + "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 " + "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */ + {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 " + "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 " + "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 " + "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */ + {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g " + "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"}, + {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 " + "0pdoffset2g40ma2 0pdoffset2g40mvalid"}, + {HNBU_ACPA_CCK_C0, 0xfffff800, 7, "2*3pa2gccka0"}, + {HNBU_ACPA_CCK_C1, 0xfffff800, 7, "2*3pa2gccka1"}, + {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"}, + {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"}, + {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"}, + {HNBU_ACPA_4X4C0, 0xffffe000, 23, "1maxp2ga0 2*4pa2ga0 2*4pa2g40a0 " + "1maxp5gb0a0 1maxp5gb1a0 1maxp5gb2a0 1maxp5gb3a0 1maxp5gb4a0"}, + {HNBU_ACPA_4X4C1, 0xffffe000, 23, "1maxp2ga1 2*4pa2ga1 2*4pa2g40a1 " + "1maxp5gb0a1 1maxp5gb1a1 1maxp5gb2a1 1maxp5gb3a1 1maxp5gb4a1"}, + {HNBU_ACPA_4X4C2, 0xffffe000, 23, "1maxp2ga2 2*4pa2ga2 2*4pa2g40a2 " + "1maxp5gb0a2 1maxp5gb1a2 1maxp5gb2a2 1maxp5gb3a2 1maxp5gb4a2"}, + {HNBU_ACPA_4X4C3, 0xffffe000, 23, "1maxp2ga3 2*4pa2ga3 2*4pa2g40a3 " + "1maxp5gb0a3 1maxp5gb1a3 1maxp5gb2a3 1maxp5gb3a3 1maxp5gb4a3"}, + {HNBU_ACPA_BW20_4X4C0, 0xffffe000, 41, "2*20pa5ga0"}, + {HNBU_ACPA_BW40_4X4C0, 0xffffe000, 41, "2*20pa5g40a0"}, + {HNBU_ACPA_BW80_4X4C0, 0xffffe000, 41, "2*20pa5g80a0"}, + {HNBU_ACPA_BW20_4X4C1, 0xffffe000, 41, "2*20pa5ga1"}, + {HNBU_ACPA_BW40_4X4C1, 0xffffe000, 41, "2*20pa5g40a1"}, + {HNBU_ACPA_BW80_4X4C1, 0xffffe000, 41, "2*20pa5g80a1"}, + {HNBU_ACPA_BW20_4X4C2, 0xffffe000, 41, "2*20pa5ga2"}, + {HNBU_ACPA_BW40_4X4C2, 0xffffe000, 41, "2*20pa5g40a2"}, + {HNBU_ACPA_BW80_4X4C2, 0xffffe000, 41, "2*20pa5g80a2"}, + {HNBU_ACPA_BW20_4X4C3, 0xffffe000, 41, "2*20pa5ga3"}, + {HNBU_ACPA_BW40_4X4C3, 0xffffe000, 41, "2*20pa5g40a3"}, + {HNBU_ACPA_BW80_4X4C3, 0xffffe000, 41, "2*20pa5g80a3"}, + {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"}, + {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"}, + {HNBU_TXBFRPCALS, 0xfffff800, 11, + "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */ + {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"}, + {HNBU_MACADDR2, 0xffffffff, 7, "6macaddr2"}, /* special case */ + {HNBU_RSSI_DELTA_2G_B0, 0xffffffff, 17, "1*16rssi_delta_2gb0"}, + {HNBU_RSSI_DELTA_2G_B1, 0xffffffff, 17, "1*16rssi_delta_2gb1"}, + {HNBU_RSSI_DELTA_2G_B2, 0xffffffff, 17, "1*16rssi_delta_2gb2"}, + {HNBU_RSSI_DELTA_2G_B3, 0xffffffff, 17, "1*16rssi_delta_2gb3"}, + {HNBU_RSSI_DELTA_2G_B4, 0xffffffff, 17, "1*16rssi_delta_2gb4"}, + {HNBU_RSSI_CAL_FREQ_GRP_2G, 0xffffffff, 8, "1*7rssi_cal_freq_grp"}, + {HNBU_RSSI_DELTA_5GL, 0xffffffff, 25, "1*24rssi_delta_5gl"}, + {HNBU_RSSI_DELTA_5GML, 0xffffffff, 25, "1*24rssi_delta_5gml"}, + {HNBU_RSSI_DELTA_5GMU, 0xffffffff, 25, "1*24rssi_delta_5gmu"}, + {HNBU_RSSI_DELTA_5GH, 0xffffffff, 25, "1*24rssi_delta_5gh"}, + {HNBU_ACPA_6G_C0, 0x00000800, 45, "2subband6gver 1*6maxp6ga0 2*18pa6ga0 "}, + {HNBU_ACPA_6G_C1, 0x00000800, 43, "1*6maxp6ga1 2*18pa6ga1 "}, + {HNBU_ACPA_6G_C2, 0x00000800, 43, "1*6maxp6ga2 2*18pa6ga2 "}, + {0xFF, 0xffffffff, 0, ""} +}; + +#endif /* _bcmsrom_tbl_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmstdlib_s.h b/bcmdhd.101.10.361.x/include/bcmstdlib_s.h new file mode 100755 index 0000000..dad66b8 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmstdlib_s.h @@ -0,0 +1,54 @@ +/* + * Broadcom Secure Standard Library. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmstdlib_s_h_ +#define _bcmstdlib_s_h_ + +#ifndef BWL_NO_INTERNAL_STDLIB_S_SUPPORT +#if !defined(__STDC_WANT_SECURE_LIB__) && \ + !(defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__)) +extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n); +extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n); +extern int memset_s(void *dest, size_t destsz, int c, size_t n); +#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */ +#if !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY) +extern size_t strlcpy(char *dest, const char *src, size_t size); +#endif /* !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY) */ +extern size_t strlcat_s(char *dest, const char *src, size_t size); + +/* Remap xxx_s() APIs to use compiler builtin functions for C standard library functions. + * The intent is to identify buffer overflow at compile-time for the safe stdlib APIs when + * the user-specified destination buffer-size is incorrect. + * + * This is only intended as a compile-time test, and should be used by compile-only targets. + */ +#if defined(BCM_STDLIB_S_BUILTINS_TEST) +#define memmove_s(dest, destsz, src, n) ((void)(destsz), (int)__builtin_memmove((dest), (src), (n))) +#define memcpy_s(dest, destsz, src, n) ((void)(destsz), (int)__builtin_memcpy((dest), (src), (n))) +#define memset_s(dest, destsz, c, n) ((void)(destsz), (int)__builtin_memset((dest), (c), (n))) +#define strlcpy(dest, src, size) ((void)(size), (size_t)__builtin_strcpy((dest), (src))) +#define strlcat_s(dest, src, size) ((void)(size), (size_t)__builtin_strcat((dest), (src))) +#endif /* BCM_STDLIB_S_BUILTINS_TEST */ + +#endif /* !BWL_NO_INTERNAL_STDLIB_S_SUPPORT */ +#endif /* _bcmstdlib_s_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmtcp.h b/bcmdhd.101.10.361.x/include/bcmtcp.h new file mode 100755 index 0000000..3e580a2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmtcp.h @@ -0,0 +1,86 @@ +/* + * Fundamental constants relating to TCP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmtcp_h_ +#define _bcmtcp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */ +#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */ +#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */ +#define TCP_ACK_NUM_OFFSET 8 /* TCP acknowledgement number offset */ +#define TCP_HLEN_OFFSET 12 /* HLEN and reserved bits offset */ +#define TCP_FLAGS_OFFSET 13 /* FLAGS and reserved bits offset */ +#define TCP_CHKSUM_OFFSET 16 /* TCP body checksum offset */ + +#define TCP_PORT_LEN 2 /* TCP port field length */ + +/* 8bit TCP flag field */ +#define TCP_FLAG_URG 0x20 +#define TCP_FLAG_ACK 0x10 +#define TCP_FLAG_PSH 0x08 +#define TCP_FLAG_RST 0x04 +#define TCP_FLAG_SYN 0x02 +#define TCP_FLAG_FIN 0x01 + +#define TCP_HLEN_MASK 0xf000 +#define TCP_HLEN_SHIFT 12 + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint32 seq_num; /* TCP Sequence Number */ + uint32 ack_num; /* TCP Sequence Number */ + uint16 hdrlen_rsvd_flags; /* Header length, reserved bits and flags */ + uint16 tcpwin; /* TCP window */ + uint16 chksum; /* Segment checksum with pseudoheader */ + uint16 urg_ptr; /* Points to seq-num of byte following urg data */ +} BWL_POST_PACKED_STRUCT; + +#define TCP_MIN_HEADER_LEN 20 + +#define TCP_HDRLEN_MASK 0xf0 +#define TCP_HDRLEN_SHIFT 4 +#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT) + +#define TCP_FLAGS_MASK 0x1f +#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK) + +/* This marks the end of a packed structure section. */ +#include + +/* To address round up by 32bit. */ +#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31)) /* a >= b */ +#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31)) /* a =< b */ +#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b) /* a > b */ +#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b) /* a < b */ + +#endif /* #ifndef _bcmtcp_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmtlv.h b/bcmdhd.101.10.361.x/include/bcmtlv.h new file mode 100755 index 0000000..78710b6 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmtlv.h @@ -0,0 +1,375 @@ +/* + * TLV and XTLV support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmtlv_h_ +#define _bcmtlv_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* begin tlvs - used in 802.11 IEs etc. */ + +/* type(aka id)/length/value buffer triple */ +typedef struct bcm_tlv { + uint8 id; + uint8 len; + uint8 data[1]; +} bcm_tlv_t; + +/* size of tlv including data */ +#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0u) + +/* get next tlv - no length checks */ +#define BCM_TLV_NEXT(_tlv) (bcm_tlv_t *)((uint8 *)(_tlv)+ BCM_TLV_SIZE(_tlv)) + +/* tlv length is restricted to 1 byte */ +#define BCM_TLV_MAX_DATA_SIZE (255) + +/* tlv header - two bytes */ +#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data)) + +/* Check that bcm_tlv_t fits into the given buffer len */ +#define bcm_valid_tlv(elt, buflen) \ + ((elt != NULL) && \ + ((buflen) >= (uint)BCM_TLV_HDR_SIZE) && \ + ((buflen) >= (uint)(BCM_TLV_HDR_SIZE + (elt)->len))) + +/* type(aka id)/length/ext/value buffer */ +typedef struct bcm_tlv_ext { + uint8 id; + uint8 len; + uint8 ext; + uint8 data[1]; +} bcm_tlv_ext_t; + +/* get next tlv_ext - no length checks */ +#define BCM_TLV_EXT_NEXT(_tlv_ext) \ + (bcm_tlv_ext_t *)((uint8 *)(_tlv_ext)+ BCM_TLV_EXT_SIZE(_tlv_ext)) + +/* tlv_ext length is restricted to 1 byte */ +#define BCM_TLV_EXT_MAX_DATA_SIZE (254u) + +/* tlv_ext header - three bytes */ +#define BCM_TLV_EXT_HDR_SIZE (OFFSETOF(bcm_tlv_ext_t, data)) + +/* size of tlv_ext including data */ +#define BCM_TLV_EXT_SIZE(_tlv_ext) (BCM_TLV_EXT_HDR_SIZE + (_tlv_ext)->len) + +/* find the next tlv */ +bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen); + +/* move buffer/buflen up to the given tlv, or set to NULL/0 on error */ +void bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen); + +/* move buffer/buflen past the given tlv, or set to NULL/0 on error */ +void bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen); + +/* find the tlv for a given id */ +bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key); + +/* advancement modes for bcm_parse_tlvs_advance() */ +typedef enum { + BCM_TLV_ADVANCE_NONE = 0, // do not adjust the buffer/buflen + BCM_TLV_ADVANCE_TO = 1, // advance to the found tlv + BCM_TLV_ADVANCE_PAST = 2 // advance past the found tlv +} bcm_tlv_advance_mode_t; + +/* Find an IE of a specific type from a buffer. + * tlvs: buffer to search for IE + * tlvs_len: buffer length + * tag: IE tag + * oui_len: length of the OUI + * oui: Specific OUI to match + * type: OUI type + * Return the matched IE, else return null. +*/ +extern bcm_tlv_t *bcm_find_ie(const uint8 *tlvs, uint tlvs_len, uint8 tag, + uint8 oui_len, const char *oui, uint8 type); + +/* search for a matching tlv id, and adjust the parse buffer pointer/length */ +const bcm_tlv_t *bcm_parse_tlvs_advance(const uint8 **buf, uint *buflen, uint key, + bcm_tlv_advance_mode_t advance); + +/* + * Traverse tlvs and return pointer to the first tlv that + * matches the key. Return NULL if not found or tlv len < min_bodylen + */ +bcm_tlv_t *bcm_parse_tlvs_min_bodylen(const void *buf, uint buflen, uint key, uint min_bodylen); + +/* + * Traverse tlvs and return pointer to the first tlv that + * matches the key. Return NULL if not found or tlv size > max_len or < min_len + */ +bcm_tlv_t *bcm_parse_tlvs_minmax_len(const void *buf, uint buflen, uint key, + uint min_len, uint max_len); + +/* parse tlvs for dot11 - same as parse_tlvs but supports 802.11 id extension */ +bcm_tlv_t *bcm_parse_tlvs_dot11(const void *buf, uint buflen, uint key, bool id_ext); + +/* same as parse_tlvs, but stops when found id > key */ +const bcm_tlv_t *bcm_parse_ordered_tlvs(const void *buf, uint buflen, uint key); + +/* find a tlv with DOT11_MNG_PROPR_ID as id, and the given oui and type */ + bcm_tlv_t *bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, + uint8 *type, uint type_len); + +/* write tlv at dst and return next tlv ptr */ +uint8 *bcm_write_tlv(int type, const void *data, uint datalen, uint8 *dst); + +/* write tlv_ext at dst and return next tlv ptr */ +uint8 *bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst); + +/* write tlv at dst if space permits and return next tlv ptr */ +uint8 *bcm_write_tlv_safe(int type, const void *data, uint datalen, uint8 *dst, + uint dst_maxlen); + +/* copy a tlv and return next tlv ptr */ +uint8 *bcm_copy_tlv(const void *src, uint8 *dst); + +/* copy a tlv if space permits and return next tlv ptr */ +uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, uint dst_maxlen); + +/* end tlvs */ + +/* begin xtlv - used for iovars, nan attributes etc. */ + +/* bcm type(id), length, value with w/16 bit id/len. The structure below + * is nominal, and is used to support variable length id and type. See + * xtlv options below. + */ +typedef struct bcm_xtlv { + uint16 id; + uint16 len; + uint8 data[1]; +} bcm_xtlv_t; + +/* xtlv options */ +#define BCM_XTLV_OPTION_NONE 0x0000u +#define BCM_XTLV_OPTION_ALIGN32 0x0001u /* 32bit alignment of type.len.data */ +#define BCM_XTLV_OPTION_IDU8 0x0002u /* shorter id */ +#define BCM_XTLV_OPTION_LENU8 0x0004u /* shorted length */ +#define BCM_XTLV_OPTION_IDBE 0x0008u /* big endian format id */ +#define BCM_XTLV_OPTION_LENBE 0x0010u /* big endian format length */ +typedef uint16 bcm_xtlv_opts_t; + +/* header size. depends on options. Macros names ending w/ _EX are where + * options are explcitly specified that may be less common. The ones + * without use default values that correspond to ...OPTION_NONE + */ + +/* xtlv header size depends on options */ +#define BCM_XTLV_HDR_SIZE 4u +#define BCM_XTLV_HDR_SIZE_EX(_opts) bcm_xtlv_hdr_size(_opts) + +/* note: xtlv len only stores the value's length without padding */ +#define BCM_XTLV_LEN(_elt) ltoh16_ua(&(_elt)->len) +#define BCM_XTLV_LEN_EX(_elt, _opts) bcm_xtlv_len(_elt, _opts) + +#define BCM_XTLV_ID(_elt) ltoh16_ua(&(_elt)->id) +#define BCM_XTLV_ID_EX(_elt, _opts) bcm_xtlv_id(_elt, _opts) + +/* entire size of the XTLV including header, data, and optional padding */ +#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts) +#define BCM_XTLV_SIZE_EX(_elt, _opts) bcm_xtlv_size(_elt, _opts) + +/* max xtlv data size */ +#define BCM_XTLV_MAX_DATA_SIZE 65535u +#define BCM_XTLV_MAX_DATA_SIZE_EX(_opts) ((_opts & BCM_XTLV_OPTION_LENU8) ? \ + 255u : 65535u) + +/* descriptor of xtlv data, packing(src) and unpacking(dst) support */ +typedef struct { + uint16 type; + uint16 len; + void *ptr; /* ptr to memory location */ +} xtlv_desc_t; + +/* xtlv buffer - packing/unpacking support */ +struct bcm_xtlvbuf { + bcm_xtlv_opts_t opts; + uint16 size; + uint8 *head; /* point to head of buffer */ + uint8 *buf; /* current position of buffer */ + /* allocated buffer may follow, but not necessarily */ +}; +typedef struct bcm_xtlvbuf bcm_xtlvbuf_t; + +/* valid xtlv ? */ +bool bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts); + +/* return the next xtlv element, and update buffer len (remaining). Buffer length + * updated includes padding as specified by options + */ +bcm_xtlv_t *bcm_next_xtlv(const bcm_xtlv_t *elt, int *buf_len, bcm_xtlv_opts_t opts); + +/* initialize an xtlv buffer. Use options specified for packing/unpacking using + * the buffer. Caller is responsible for allocating both buffers. + */ +int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, + bcm_xtlv_opts_t opts); + +/* length of data in the xtlv buffer */ +uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf); + +/* remaining space in the xtlv buffer */ +uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf); + +/* write ptr */ +uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf); + +/* head */ +uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf); + +/* put a data buffer into xtlv */ +int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n); + +/* put one or more u16 elts into xtlv */ +int bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n); + +/* put one or more u32 elts into xtlv */ +int bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n); + +/* put one or more u64 elts into xtlv */ +int bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n); + +/* note: there are no get equivalent of integer unpacking, becasuse bcmendian.h + * can be used directly using pointers returned in the buffer being processed. + */ + +/* unpack a single xtlv entry, advances buffer and copies data to dst_data on match + * type and length match must be exact + */ +int bcm_unpack_xtlv_entry(const uint8 **buf, uint16 expected_type, uint16 expected_len, + uint8 *dst_data, bcm_xtlv_opts_t opts); + +/* packs an xtlv into buffer, and advances buffer, decreements buffer length. + * buffer length is checked and must be >= size of xtlv - otherwise BCME_BADLEN + */ +int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len, + const uint8 *src_data, bcm_xtlv_opts_t opts); + +/* accessors and lengths for element given options */ +int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); +int bcm_xtlv_hdr_size(bcm_xtlv_opts_t opts); +int bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); +int bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); +int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts); + +/* compute size needed for number of tlvs whose total data len is given */ +#define BCM_XTLV_SIZE_FOR_TLVS(_data_len, _num_tlvs, _opts) (\ + bcm_xtlv_size_for_data(_data_len, _opts) + (\ + (_num_tlvs) * BCM_XTLV_HDR_SIZE_EX(_opts))) + +/* unsafe copy xtlv */ +#define BCM_XTLV_BCOPY(_src, _dst, _opts) \ + bcm_xtlv_bcopy(_src, _dst, BCM_XTLV_MAX_DATA_SIZE_EX(_opts), \ + BCM_XTLV_MAX_DATA_SIZE_EX(_opts), _opts) + +/* copy xtlv - note: src->dst bcopy order - to be compatible w/ tlv version */ +bcm_xtlv_t* bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst, + int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts); + +/* callback for unpacking xtlv from a buffer into context. */ +typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, const uint8 *buf, + uint16 type, uint16 len); + +/* unpack a tlv buffer using buffer, options, and callback */ +int bcm_unpack_xtlv_buf(void *ctx, const uint8 *buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn); + +/* unpack a set of tlvs from the buffer using provided xtlv descriptors */ +int bcm_unpack_xtlv_buf_to_mem(const uint8 *buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* pack a set of tlvs into buffer using provided xtlv descriptors */ +int bcm_pack_xtlv_buf_from_mem(uint8 **buf, uint16 *buflen, + const xtlv_desc_t *items, bcm_xtlv_opts_t opts); + +/* return data pointer and data length of a given id from xtlv buffer + * data_len may be NULL + */ +const uint8* bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen, + uint16 id, uint16 *datalen, bcm_xtlv_opts_t opts); + +/* callback to return next tlv id and len to pack, if there is more tlvs to come and + * options e.g. alignment + */ +typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len); + +/* callback to pack the tlv into length validated buffer */ +typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx, + uint16 tlv_id, uint16 tlv_len, uint8* buf); + +/* pack a set of tlvs into buffer using get_next to interate */ +int bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next, + bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen); + +/* pack an xtlv. does not do any error checking. if data is not NULL + * data of given length is copied to buffer (xtlv) + */ +void bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len, + const uint8 *data, bcm_xtlv_opts_t opts); + +/* unpack an xtlv and return ptr to data, and data length */ +void bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len, + const uint8 **data, bcm_xtlv_opts_t opts); + +/* end xtlvs */ + +/* length value pairs */ +struct bcm_xlv { + uint16 len; + uint8 data[1]; +}; +typedef struct bcm_xlv bcm_xlv_t; + +struct bcm_xlvp { + uint16 len; + uint8 *data; +}; +typedef struct bcm_xlvp bcm_xlvp_t; + +struct bcm_const_xlvp { + uint16 len; + const uint8 *data; +}; + +typedef struct bcm_const_xlvp bcm_const_xlvp_t; + +struct bcm_const_ulvp { + uint32 len; + const uint8 *data; +}; + +typedef struct bcm_const_ulvp bcm_const_ulvp_t; + +/* end length value pairs */ +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _bcmtlv_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmudp.h b/bcmdhd.101.10.361.x/include/bcmudp.h new file mode 100755 index 0000000..5a5113d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmudp.h @@ -0,0 +1,54 @@ +/* + * Fundamental constants relating to UDP Protocol + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmudp_h_ +#define _bcmudp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* UDP header */ +#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */ +#define UDP_LEN_OFFSET 4 /* UDP length offset */ +#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */ + +#define UDP_HDR_LEN 8 /* UDP header length */ +#define UDP_PORT_LEN 2 /* UDP port length */ + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmudp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint16 len; /* Number of bytes in datagram including header */ + uint16 chksum; /* entire datagram checksum with pseudoheader */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmudp_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmutils.h b/bcmdhd.101.10.361.x/include/bcmutils.h new file mode 100755 index 0000000..2971d8b --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmutils.h @@ -0,0 +1,1639 @@ +/* + * Misc useful os-independent macros and functions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmutils_h_ +#define _bcmutils_h_ + +#include + +/* For now, protect the bcmerror.h */ +#ifdef BCMUTILS_ERR_CODES +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) +#ifdef FREEBSD +#define bcm_strncat_s(dst, noOfElements, src, count) strcat((dst), (src)) +#else +#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count)) +#endif /* FREEBSD */ +#define bcm_snprintf_s snprintf +#define bcm_sprintf_s snprintf + +/* + * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count)) + * Use bcm_strcpy_s instead as it is a safer option + * bcm_strcat_s: Use bcm_strncat_s as a safer option + * + */ + +#define BCM_BIT(x) (1u << (x)) +/* useful to count number of set bit in x */ +#define BCM_CLR_FISRT_BIT(x) ((x - 1) & x) +/* first bit set in x. Useful to iterate through a mask */ +#define BCM_FIRST_BIT(x) (BCM_CLR_FISRT_BIT(x)^(x)) + +/* Macro to iterate through the set bits in mask. + * NOTE: the argument "mask" will be cleared after + * the iteration. + */ + +#define FOREACH_BIT(c, mask)\ + for (c = BCM_FIRST_BIT(mask); mask != 0; \ + mask = BCM_CLR_FISRT_BIT(mask), c = BCM_FIRST_BIT(mask)) + +/* ctype replacement */ +#define _BCM_U 0x01 /* upper */ +#define _BCM_L 0x02 /* lower */ +#define _BCM_D 0x04 /* digit */ +#define _BCM_C 0x08 /* cntrl */ +#define _BCM_P 0x10 /* punct */ +#define _BCM_S 0x20 /* white space (space/lf/tab) */ +#define _BCM_X 0x40 /* hex digit */ +#define _BCM_SP 0x80 /* hard space (0x20) */ + +extern const unsigned char bcm_ctype[256]; +#define bcm_ismask(x) (bcm_ctype[(unsigned char)(x)]) + +#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) +#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) +#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) +#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) +#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) +#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) +#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) +#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) +#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) +#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) + +#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx) + +#define KB(bytes) (((bytes) + 1023) / 1024) + +/* Buffer structure for collecting string-formatted data +* using bcm_bprintf() API. +* Use bcm_binit() to initialize before use +*/ + +struct bcmstrbuf { + char *buf; /* pointer to current position in origbuf */ + unsigned int size; /* current (residual) size in bytes */ + char *origbuf; /* unmodified pointer to orignal buffer */ + unsigned int origsize; /* unmodified orignal buffer size in bytes */ +}; + +#define BCMSTRBUF_LEN(b) (b->size) +#define BCMSTRBUF_BUF(b) (b->buf) + +struct ether_addr; +extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); +extern int bcm_ether_atoe(const char *p, struct ether_addr *ea); + +/* ** driver-only section ** */ +#ifdef BCMDRIVER + +#include +#include +#include + +#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */ + +/* + * Spin at most 'us' microseconds while 'exp' is true. + * Caller should explicitly test 'exp' when this completes + * and take appropriate error action if 'exp' is still true. + */ +#ifndef SPINWAIT_POLL_PERIOD +#define SPINWAIT_POLL_PERIOD 10U +#endif + +#ifdef BCMFUZZ +/* fake spinwait for fuzzing */ +#define SPINWAIT(exp, us) { \ + uint countdown = (exp) != 0 ? 1 : 0; \ + while (countdown > 0) { \ + countdown--; \ + } \ +} + +#elif defined(PHY_REG_TRACE_FRAMEWORK) +#include +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1U); \ + phy_utils_log_spinwait_start(); \ + while (((exp) != 0) && (uint)(countdown >= SPINWAIT_POLL_PERIOD)) { \ + OSL_DELAY(SPINWAIT_POLL_PERIOD); \ + countdown -= SPINWAIT_POLL_PERIOD; \ + } \ + phy_utils_log_spinwait_end(us, countdown); \ +} + +#else +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1U); \ + while (((exp) != 0) && (uint)(countdown >= SPINWAIT_POLL_PERIOD)) { \ + OSL_DELAY(SPINWAIT_POLL_PERIOD); \ + countdown -= SPINWAIT_POLL_PERIOD; \ + } \ +} +#endif /* BCMFUZZ */ + +/* forward definition of ether_addr structure used by some function prototypes */ + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + +#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */ +#define CORE_SLAVE_PORT_0 0 +#define CORE_SLAVE_PORT_1 1 +#define CORE_BASE_ADDR_0 0 +#define CORE_BASE_ADDR_1 1 + +#ifdef DONGLEBUILD +/* TRIM Tail bytes from lfrag */ +extern void pktfrag_trim_tailbytes(osl_t * osh, void* p, uint16 len, uint8 type); +#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) pktfrag_trim_tailbytes(osh, p, len, type) +#else +#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) +#endif /* DONGLEBUILD */ + +/* externs */ +/* packet */ +extern uint pktcopy(osl_t *osh, void *p, uint offset, uint len, uchar *buf); +extern uint pktfrombuf(osl_t *osh, void *p, uint offset, uint len, uchar *buf); +extern uint pkttotlen(osl_t *osh, void *p); +extern uint pkttotcnt(osl_t *osh, void *p); +extern void *pktlast(osl_t *osh, void *p); +extern uint pktsegcnt(osl_t *osh, void *p); +extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset); +extern void *pktoffset(osl_t *osh, void *p, uint offset); + +#ifdef WLCSO +extern uint pkttotlen_no_sfhtoe_hdr(osl_t *osh, void *p, uint toe_hdr_len); +#else +#define pkttotlen_no_sfhtoe_hdr(osh, p, hdrlen) pkttotlen(osh, p) +#endif /* WLCSO */ + +/* Get priority from a packet and pass it back in scb (or equiv) */ +#define PKTPRIO_VDSCP 0x100u /* DSCP prio found after VLAN tag */ +#define PKTPRIO_VLAN 0x200u /* VLAN prio found */ +#define PKTPRIO_UPD 0x400u /* DSCP used to update VLAN prio */ +#define PKTPRIO_DSCP 0x800u /* DSCP prio found */ + +/* DSCP type definitions (RFC4594) */ +/* AF1x: High-Throughput Data (RFC2597) */ +#define DSCP_AF11 0x0Au +#define DSCP_AF12 0x0Cu +#define DSCP_AF13 0x0Eu +/* AF2x: Low-Latency Data (RFC2597) */ +#define DSCP_AF21 0x12u +#define DSCP_AF22 0x14u +#define DSCP_AF23 0x16u +/* CS2: OAM (RFC2474) */ +#define DSCP_CS2 0x10u +/* AF3x: Multimedia Streaming (RFC2597) */ +#define DSCP_AF31 0x1Au +#define DSCP_AF32 0x1Cu +#define DSCP_AF33 0x1Eu +/* CS3: Broadcast Video (RFC2474) */ +#define DSCP_CS3 0x18u +/* VA: VOCIE-ADMIT (RFC5865) */ +#define DSCP_VA 0x2Cu +/* EF: Telephony (RFC3246) */ +#define DSCP_EF 0x2Eu +/* CS6: Network Control (RFC2474) */ +#define DSCP_CS6 0x30u +/* CS7: Network Control (RFC2474) */ +#define DSCP_CS7 0x38u + +extern uint pktsetprio(void *pkt, bool update_vtag); +extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); +extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp); + +/* ethernet address */ +extern uint64 bcm_ether_ntou64(const struct ether_addr *ea) BCMCONSTFN; +extern int bcm_addrmask_set(int enable); +extern int bcm_addrmask_get(int *val); + +/* ip address */ +struct ipv4_addr; +extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); +extern char *bcm_ipv6_ntoa(void *ipv6, char *buf); +extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip); + +/* delay */ +extern void bcm_mdelay(uint ms); +/* variable access */ +#if defined(BCM_RECLAIM) +extern bool _nvram_reclaim_enb; +#define NVRAM_RECLAIM_ENAB() (_nvram_reclaim_enb) +#ifdef BCMDBG +#define NVRAM_RECLAIM_CHECK(name) \ + if (NVRAM_RECLAIM_ENAB() && (bcm_attach_part_reclaimed == TRUE)) { \ + printf("NVRAM already reclaimed, %s\n", (name)); \ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF(); \ + *(char*) 0 = 0; /* TRAP */ \ + GCC_DIAGNOSTIC_POP(); \ + return NULL; \ + } +#else /* BCMDBG */ +#define NVRAM_RECLAIM_CHECK(name) \ + if (NVRAM_RECLAIM_ENAB() && (bcm_attach_part_reclaimed == TRUE)) { \ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF(); \ + *(char*) 0 = 0; /* TRAP */ \ + GCC_DIAGNOSTIC_POP(); \ + return NULL; \ + } +#endif /* BCMDBG */ +#else /* BCM_RECLAIM */ +#define NVRAM_RECLAIM_CHECK(name) +#endif /* BCM_RECLAIM */ + +#ifdef WL_FWSIGN +#define getvar(vars, name) (NULL) +#define getintvar(vars, name) (0) +#define getintvararray(vars, name, index) (0) +#define getintvararraysize(vars, name) (0) +#else /* WL_FWSIGN */ +extern char *getvar(char *vars, const char *name); +extern int getintvar(char *vars, const char *name); +extern int getintvararray(char *vars, const char *name, int index); +extern int getintvararraysize(char *vars, const char *name); +#endif /* WL_FWSIGN */ + +/* Read an array of values from a possibly slice-specific nvram string */ +extern int get_uint8_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, uint8* dest_array, uint dest_size); +extern int get_int16_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, int16* dest_array, uint dest_size); +/* Prepend a slice-specific accessor to an nvram string name */ +extern uint get_slicespecific_var_name(osl_t *osh, char *vars_table_accessor, + const char *name, char **name_out); + +extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); +#ifdef BCMDBG +extern void prpkt(const char *msg, osl_t *osh, void *p0); +#endif /* BCMDBG */ +#ifdef BCMPERFSTATS +extern void bcm_perf_enable(void); +extern void bcmstats(char *fmt); +extern void bcmlog(char *fmt, uint a1, uint a2); +extern void bcmdumplog(char *buf, int size); +extern int bcmdumplogent(char *buf, uint idx); +#else +#define bcm_perf_enable() +#define bcmstats(fmt) +#define bcmlog(fmt, a1, a2) +#define bcmdumplog(buf, size) *buf = '\0' +#define bcmdumplogent(buf, idx) -1 +#endif /* BCMPERFSTATS */ + +#define TSF_TICKS_PER_MS 1000 +#define TS_ENTER 0xdeadbeef /* Timestamp profiling enter */ +#define TS_EXIT 0xbeefcafe /* Timestamp profiling exit */ + +#if defined(BCMTSTAMPEDLOGS) +/* Store a TSF timestamp and a log line in the log buffer */ +extern void bcmtslog(uint32 tstamp, const char *fmt, uint a1, uint a2); +/* Print out the log buffer with timestamps */ +extern void bcmprinttslogs(void); +/* Print out a microsecond timestamp as "sec.ms.us " */ +extern void bcmprinttstamp(uint32 us); +/* Dump to buffer a microsecond timestamp as "sec.ms.us " */ +extern void bcmdumptslog(struct bcmstrbuf *b); +#else +#define bcmtslog(tstamp, fmt, a1, a2) +#define bcmprinttslogs() +#define bcmprinttstamp(us) +#define bcmdumptslog(b) +#endif /* BCMTSTAMPEDLOGS */ + +bool bcm_match_buffers(const uint8 *b1, uint b1_len, const uint8 *b2, uint b2_len); + +/* Support for sharing code across in-driver iovar implementations. + * The intent is that a driver use this structure to map iovar names + * to its (private) iovar identifiers, and the lookup function to + * find the entry. Macros are provided to map ids and get/set actions + * into a single number space for a switch statement. + */ + +/* iovar structure */ +typedef struct bcm_iovar { + const char *name; /* name for lookup and display */ + uint16 varid; /* id for switch */ + uint16 flags; /* driver-specific flag bits */ + uint8 flags2; /* driver-specific flag bits */ + uint8 type; /* base type of argument */ + uint16 minlen; /* min length for buffer vars */ +} bcm_iovar_t; + +/* varid definitions are per-driver, may use these get/set bits */ + +/* IOVar action bits for id mapping */ +#define IOV_GET 0 /* Get an iovar */ +#define IOV_SET 1 /* Set an iovar */ + +/* Varid to actionid mapping */ +#define IOV_GVAL(id) ((id) * 2) +#define IOV_SVAL(id) ((id) * 2 + IOV_SET) +#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) +#define IOV_ID(actionid) (actionid >> 1) + +/* flags are per-driver based on driver attributes */ + +extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); +extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, uint len, bool set); + +/* ioctl structure */ +typedef struct wlc_ioctl_cmd { + uint16 cmd; /**< IOCTL command */ + uint16 flags; /**< IOCTL command flags */ + uint16 min_len; /**< IOCTL command minimum argument len (in bytes) */ +} wlc_ioctl_cmd_t; + +#if defined(WLTINYDUMP) || defined(BCMDBG) || defined(WLMSG_INFORM) || \ + defined(WLMSG_ASSOC) || defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); +#endif /* WLTINYDUMP || BCMDBG || WLMSG_INFORM || WLMSG_ASSOC || WLMSG_PRPKT */ +#endif /* BCMDRIVER */ + +/* string */ +extern int bcm_atoi(const char *s); +extern ulong bcm_strtoul(const char *cp, char **endp, uint base); +extern uint64 bcm_strtoull(const char *cp, char **endp, uint base); +extern char *bcmstrstr(const char *haystack, const char *needle); +extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); +uint16 bcmhex2bin(const uint8* hex, uint hex_len, uint8 *buf, uint buf_len); + +/* Base type definitions */ +#define IOVT_VOID 0 /* no value (implictly set only) */ +#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */ +#define IOVT_INT8 2 /* integer values are range-checked */ +#define IOVT_UINT8 3 /* unsigned int 8 bits */ +#define IOVT_INT16 4 /* int 16 bits */ +#define IOVT_UINT16 5 /* unsigned int 16 bits */ +#define IOVT_INT32 6 /* int 32 bits */ +#define IOVT_UINT32 7 /* unsigned int 32 bits */ +#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */ +#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) + +/* Initializer for IOV type strings */ +#define BCM_IOV_TYPE_INIT { \ + "void", \ + "bool", \ + "int8", \ + "uint8", \ + "int16", \ + "uint16", \ + "int32", \ + "uint32", \ + "buffer", \ + "" } + +#define BCM_IOVT_IS_INT(type) (\ + (type == IOVT_BOOL) || \ + (type == IOVT_INT8) || \ + (type == IOVT_UINT8) || \ + (type == IOVT_INT16) || \ + (type == IOVT_UINT16) || \ + (type == IOVT_INT32) || \ + (type == IOVT_UINT32)) + +/* ** driver/apps-shared section ** */ + +#define BCME_STRLEN 64 /* Max string length for BCM errors */ +#define VALID_BCMERROR(e) valid_bcmerror(e) + +#ifdef DBG_BUS +/** tracks non typical execution paths, use gdb with arm sim + firmware dump to read counters */ +#define DBG_BUS_INC(s, cnt) ((s)->dbg_bus->cnt++) +#else +#define DBG_BUS_INC(s, cnt) +#endif /* DBG_BUS */ + +/* BCMUTILS_ERR_CODES is defined to use the error codes from bcmerror.h + * otherwise use from this file. + */ +#ifndef BCMUTILS_ERR_CODES + +/* + * error codes could be added but the defined ones shouldn't be changed/deleted + * these error codes are exposed to the user code + * when ever a new error code is added to this list + * please update errorstring table with the related error string and + * update osl files with os specific errorcode map +*/ + +#define BCME_OK 0 /* Success */ +#define BCME_ERROR -1 /* Error generic */ +#define BCME_BADARG -2 /* Bad Argument */ +#define BCME_BADOPTION -3 /* Bad option */ +#define BCME_NOTUP -4 /* Not up */ +#define BCME_NOTDOWN -5 /* Not down */ +#define BCME_NOTAP -6 /* Not AP */ +#define BCME_NOTSTA -7 /* Not STA */ +#define BCME_BADKEYIDX -8 /* BAD Key Index */ +#define BCME_RADIOOFF -9 /* Radio Off */ +#define BCME_NOTBANDLOCKED -10 /* Not band locked */ +#define BCME_NOCLK -11 /* No Clock */ +#define BCME_BADRATESET -12 /* BAD Rate valueset */ +#define BCME_BADBAND -13 /* BAD Band */ +#define BCME_BUFTOOSHORT -14 /* Buffer too short */ +#define BCME_BUFTOOLONG -15 /* Buffer too long */ +#define BCME_BUSY -16 /* Busy */ +#define BCME_NOTASSOCIATED -17 /* Not Associated */ +#define BCME_BADSSIDLEN -18 /* Bad SSID len */ +#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */ +#define BCME_BADCHAN -20 /* Bad Channel */ +#define BCME_BADADDR -21 /* Bad Address */ +#define BCME_NORESOURCE -22 /* Not Enough Resources */ +#define BCME_UNSUPPORTED -23 /* Unsupported */ +#define BCME_BADLEN -24 /* Bad length */ +#define BCME_NOTREADY -25 /* Not Ready */ +#define BCME_EPERM -26 /* Not Permitted */ +#define BCME_NOMEM -27 /* No Memory */ +#define BCME_ASSOCIATED -28 /* Associated */ +#define BCME_RANGE -29 /* Not In Range */ +#define BCME_NOTFOUND -30 /* Not Found */ +#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */ +#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */ +#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */ +#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */ +#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */ +#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */ +#define BCME_VERSION -37 /* Incorrect version */ +#define BCME_TXFAIL -38 /* TX failure */ +#define BCME_RXFAIL -39 /* RX failure */ +#define BCME_NODEVICE -40 /* Device not present */ +#define BCME_NMODE_DISABLED -41 /* NMODE disabled */ +#define BCME_MSCH_DUP_REG -42 /* Duplicate slot registration */ +#define BCME_SCANREJECT -43 /* reject scan request */ +#define BCME_USAGE_ERROR -44 /* WLCMD usage error */ +#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */ +#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */ +#define BCME_DISABLED -47 /* Disabled in this build */ +#define BCME_DECERR -48 /* Decrypt error */ +#define BCME_ENCERR -49 /* Encrypt error */ +#define BCME_MICERR -50 /* Integrity/MIC error */ +#define BCME_REPLAY -51 /* Replay */ +#define BCME_IE_NOTFOUND -52 /* IE not found */ +#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ +#define BCME_NOT_GC -54 /* expecting a group client */ +#define BCME_PRS_REQ_FAILED -55 /* GC presence req failed to sent */ +#define BCME_NO_P2P_SE -56 /* Could not find P2P-Subelement */ +#define BCME_NOA_PND -57 /* NoA pending, CB shuld be NULL */ +#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */ +#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */ +#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */ +#define BCME_IOV_LAST_CMD -61 /* last batched iov sub-command */ +#define BCME_MINIPMU_CAL_FAIL -62 /* MiniPMU cal failed */ +#define BCME_RCAL_FAIL -63 /* Rcal failed */ +#define BCME_LPF_RCCAL_FAIL -64 /* RCCAL failed */ +#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */ +#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */ +#define BCME_BANDLOCKED -67 /* interface is restricted to a band */ +#define BCME_BAD_IE_DATA -68 /* Recieved ie with invalid/bad data */ +#define BCME_REG_FAILED -69 /* Generic registration failed */ +#define BCME_NOCHAN -70 /* Registration with 0 chans in list */ +#define BCME_PKTTOSS -71 /* Pkt tossed */ +#define BCME_DNGL_DEVRESET -72 /* dongle re-attach during DEVRESET */ +#define BCME_ROAM -73 /* Roam related failures */ +#define BCME_NO_SIG_FILE -74 /* Signature file is missing */ + +#define BCME_LAST BCME_NO_SIG_FILE + +#define BCME_NOTENABLED BCME_DISABLED + +/* This error code is *internal* to the driver, and is not propogated to users. It should + * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL. + * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required, + * nor does it need to be part of any OSL driver-to-OS error code mapping). + */ +#define BCME_IOCTL_PATCH_UNSUPPORTED -9999 +#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED) + #error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED" +#endif + +/* These are collection of BCME Error strings */ +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "NMODE Disabled", \ + "Host Offload in device", \ + "Scan Rejected", \ + "WLCMD usage error", \ + "WLCMD ioctl error", \ + "RWL serial port error", \ + "Disabled", \ + "Decrypt error", \ + "Encrypt error", \ + "MIC error", \ + "Replay", \ + "IE not found", \ + "Data not found", \ + "NOT GC", \ + "PRS REQ FAILED", \ + "NO P2P SubElement", \ + "NOA Pending", \ + "FRAG Q FAILED", \ + "GET ActionFrame failed", \ + "scheduler not ready", \ + "Last IOV batched sub-cmd", \ + "Mini PMU Cal failed", \ + "R-cal failed", \ + "LPF RC Cal failed", \ + "DAC buf RC Cal failed", \ + "VCO Cal failed", \ + "band locked", \ + "Recieved ie with invalid data", \ + "registration failed", \ + "Registration with zero channels", \ + "pkt toss", \ + "Dongle Devreset", \ + "Critical roam in progress", \ + "Signature file is missing", \ +} +#endif /* BCMUTILS_ERR_CODES */ + +#ifndef ABS +#define ABS(a) (((a) < 0) ? -(a) : (a)) +#endif /* ABS */ + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* MIN */ + +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MAX */ + +/* limit to [min, max] */ +#ifndef LIMIT_TO_RANGE +#define LIMIT_TO_RANGE(x, min, max) \ + ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_RANGE */ + +/* limit to max */ +#ifndef LIMIT_TO_MAX +#define LIMIT_TO_MAX(x, max) \ + (((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_MAX */ + +/* limit to min */ +#ifndef LIMIT_TO_MIN +#define LIMIT_TO_MIN(x, min) \ + (((x) < (min) ? (min) : (x))) +#endif /* LIMIT_TO_MIN */ + +#define SIZE_BITS(x) (sizeof(x) * NBBY) +#define SIZE_BITS32(x) ((uint)sizeof(x) * NBBY) + +#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \ + (0xffffffff - (prev) + (curr) + 1)) +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) +#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define ROUNDDN(p, align) ((p) & ~((align) - 1)) +#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0) +#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ + & ~((uintptr)(boundary) - 1)) +#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0) +#define VALID_MASK(mask) !((mask) & ((mask) + 1)) + +#ifndef OFFSETOF +#if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8)) + /* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */ + #define OFFSETOF(type, member) __builtin_offsetof(type, member) +#else +#ifdef BCMFUZZ + /* use 0x10 offset to avoid undefined behavior error due to NULL access */ + #define OFFSETOF(type, member) (((uint)(uintptr)&((type *)0x10)->member) - 0x10) +#else + #define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) +#endif /* BCMFUZZ */ +#endif /* GCC 4.8 or newer */ +#endif /* OFFSETOF */ + +#ifndef CONTAINEROF +#define CONTAINEROF(ptr, type, member) ((type *)((char *)(ptr) - OFFSETOF(type, member))) +#endif /* CONTAINEROF */ + +/* substruct size up to and including a member of the struct */ +#ifndef STRUCT_SIZE_THROUGH +#define STRUCT_SIZE_THROUGH(sptr, fname) \ + (((uint8*)&((sptr)->fname) - (uint8*)(sptr)) + sizeof((sptr)->fname)) +#endif + +/* Extracting the size of element in a structure */ +#define SIZE_OF(type, field) sizeof(((type *)0)->field) + +/* Extracting the size of pointer element in a structure */ +#define SIZE_OF_PV(type, pfield) sizeof(*((type *)0)->pfield) + +#ifndef ARRAYSIZE +#define ARRAYSIZE(a) (uint32)(sizeof(a) / sizeof(a[0])) +#endif + +#ifndef ARRAYLAST /* returns pointer to last array element */ +#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1]) +#endif + +/* Calculates the required pad size. This is mainly used in register structures */ +#define PADSZ(start, end) ((((end) - (start)) / 4) + 1) + +/* Reference a function; used to prevent a static function from being optimized out */ +extern void *_bcmutils_dummy_fn; +#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f)) + +/* bit map related macros */ +#ifndef setbit +#ifndef NBBY /* the BSD family defines NBBY */ +#define NBBY 8 /* 8 bits per byte */ +#endif /* #ifndef NBBY */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +extern void setbit(void *array, uint bit); +extern void clrbit(void *array, uint bit); +extern bool isset(const void *array, uint bit); +extern bool isclr(const void *array, uint bit); +#else +#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY)) +#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY))) +#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) +#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0) +#endif +#endif /* setbit */ + +/* read/write/clear field in a consecutive bits in an octet array. + * 'addr' is the octet array's start byte address + * 'size' is the octet array's byte size + * 'stbit' is the value's start bit offset + * 'nbits' is the value's bit size + * This set of utilities are for convenience. Don't use them + * in time critical/data path as there's a great overhead in them. + */ +void setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val); +uint32 getbits(const uint8 *addr, uint size, uint stbit, uint nbits); +#define clrbits(addr, size, stbit, nbits) setbits(addr, size, stbit, nbits, 0) + +extern void set_bitrange(void *array, uint start, uint end, uint maxbit); +extern void clr_bitrange(void *array, uint start, uint end, uint maxbit); +extern void set_bitrange_u32(void *array, uint start, uint end, uint maxbit); +extern void clr_bitrange_u32(void *array, uint start, uint end, uint maxbit); + +extern int bcm_find_fsb(uint32 num); + +#define isbitset(a, i) (((a) & (1 << (i))) != 0) + +#if defined DONGLEBUILD +#define NBITS(type) (sizeof(type) * 8) +#else +#define NBITS(type) ((uint32)(sizeof(type) * 8)) +#endif /* DONGLEBUILD */ +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + +enum { + BCM_FMT_BASE32 +}; +typedef int bcm_format_t; + +/* encodes using specified format and returns length of output written on success + * or a status code BCME_XX on failure. Input and output buffers may overlap. + * input will be advanced to the position when function stoped. + * out value of in_len will specify the number of processed input bytes. + * on input pad_off represents the number of bits (MSBs of the first output byte) + * to preserve and on output number of pad bits (LSBs) set to 0 in the output. + */ +int bcm_encode(uint8 **in, uint *in_len, bcm_format_t fmt, + uint *pad_off, uint8 *out, uint out_size); + +/* decodes input in specified format, returns length of output written on success + * or a status code BCME_XX on failure. Input and output buffers may overlap. + * input will be advanced to the position when function stoped. + * out value of in_len will specify the number of processed input bytes. + * on input pad_off represents the number of bits (MSBs of the first output byte) + * to preserve and on output number of pad bits (LSBs) set to 0 in the output. + */ +int bcm_decode(const uint8 **in, uint *in_len, bcm_format_t fmt, + uint *pad_off, uint8 *out, uint out_size); + +extern void bcm_bitprint32(const uint32 u32); + +/* + * ---------------------------------------------------------------------------- + * Multiword map of 2bits, nibbles + * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val) + * getbit2 getbit4 (void *ptr, uint32 ix) + * ---------------------------------------------------------------------------- + */ + +#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK) \ +static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */ \ + uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */ \ + uint32 mask = (MSK << pos); \ + uint32 tmp = *a & ~mask; \ + *a = tmp | (val << pos); \ +} \ +static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); \ + uint32 pos = (ix & OFF) << LSH; \ + return ((*a >> pos) & MSK); \ +} + +DECLARE_MAP_API(2, 4, 1, 15u, 0x0003u) /* setbit2() and getbit2() */ +DECLARE_MAP_API(4, 3, 2, 7u, 0x000Fu) /* setbit4() and getbit4() */ +DECLARE_MAP_API(8, 2, 3, 3u, 0x00FFu) /* setbit8() and getbit8() */ + +/* basic mux operation - can be optimized on several architectures */ +#define MUX(pred, true, false) ((pred) ? (true) : (false)) + +/* modulo inc/dec - assumes x E [0, bound - 1] */ +#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) +#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) + +/* modulo inc/dec, bound = 2^k */ +#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) +#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) + +/* modulo add/sub - assumes x, y E [0, bound - 1] */ +#define MODADD(x, y, bound) \ + MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) +#define MODSUB(x, y, bound) \ + MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) + +/* module add/sub, bound = 2^k */ +#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) +#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) + +/* crc defines */ +#define CRC8_INIT_VALUE 0xffu /* Initial CRC8 checksum value */ +#define CRC8_GOOD_VALUE 0x9fu /* Good final CRC8 checksum value */ +#define CRC16_INIT_VALUE 0xffffu /* Initial CRC16 checksum value */ +#define CRC16_GOOD_VALUE 0xf0b8u /* Good final CRC16 checksum value */ +#define CRC32_INIT_VALUE 0xffffffffu /* Initial CRC32 checksum value */ +#define CRC32_GOOD_VALUE 0xdebb20e3u /* Good final CRC32 checksum value */ + +#ifdef DONGLEBUILD +#define MACF "MACADDR:%08x%04x" +#define ETHERP_TO_MACF(ea) (uint32)bcm_ether_ntou64(ea), \ + (uint32)(bcm_ether_ntou64(ea) >> 32) + +#define CONST_ETHERP_TO_MACF(ea) ETHERP_TO_MACF(ea) + +#define ETHER_TO_MACF(ea) ETHERP_TO_MACF(&ea) + +#else +/* use for direct output of MAC address in printf etc */ +#define MACF "%02x:%02x:%02x:%02x:%02x:%02x" +#define ETHERP_TO_MACF(ea) ((const struct ether_addr *) (ea))->octet[0], \ + ((const struct ether_addr *) (ea))->octet[1], \ + ((const struct ether_addr *) (ea))->octet[2], \ + ((const struct ether_addr *) (ea))->octet[3], \ + ((const struct ether_addr *) (ea))->octet[4], \ + ((const struct ether_addr *) (ea))->octet[5] + +#define CONST_ETHERP_TO_MACF(ea) ETHERP_TO_MACF(ea) + +#define ETHER_TO_MACF(ea) (ea).octet[0], \ + (ea).octet[1], \ + (ea).octet[2], \ + (ea).octet[3], \ + (ea).octet[4], \ + (ea).octet[5] +#endif /* DONGLEBUILD */ +/* use only for debug, the string length can be changed + * If you want to use this macro to the logic, + * USE MACF instead + */ +#if !defined(SIMPLE_MAC_PRINT) +#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC2STRDBG(ea) ((const uint8*)(ea))[0], \ + ((const uint8*)(ea))[1], \ + ((const uint8*)(ea))[2], \ + ((const uint8*)(ea))[3], \ + ((const uint8*)(ea))[4], \ + ((const uint8*)(ea))[5] +#else +#define MACDBG "%02x:xx:xx:xx:x%x:%02x" +#define MAC2STRDBG(ea) ((const uint8*)(ea))[0], \ + (((const uint8*)(ea))[4] & 0xf), \ + ((const uint8*)(ea))[5] +#endif /* SIMPLE_MAC_PRINT */ + +#define MACOUIDBG "%02x:%x:%02x" +#define MACOUI2STRDBG(ea) ((const uint8*)(ea))[0], \ + ((const uint8*)(ea))[1] & 0xf, \ + ((const uint8*)(ea))[2] + +#define MACOUI "%02x:%02x:%02x" +#define MACOUI2STR(ea) (ea)[0], (ea)[1], (ea)[2] + +/* bcm_format_flags() bit description structure */ +typedef struct bcm_bit_desc { + uint32 bit; + const char* name; +} bcm_bit_desc_t; + +/* bcm_format_field */ +typedef struct bcm_bit_desc_ex { + uint32 mask; + const bcm_bit_desc_t *bitfield; +} bcm_bit_desc_ex_t; + +/* buffer length for ethernet address from bcm_ether_ntoa() */ +#define ETHER_ADDR_STR_LEN 18u /* 18-bytes of Ethernet address buffer length */ + +static INLINE uint32 /* 32bit word aligned xor-32 */ +bcm_compute_xor32(volatile uint32 *u32_val, int num_u32) +{ + int idx; + uint32 xor32 = 0; + for (idx = 0; idx < num_u32; idx++) + xor32 ^= *(u32_val + idx); + return xor32; +} + +/* crypto utility function */ +/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */ +static INLINE void +xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) +{ + if ( +#ifdef __i386__ + 1 || +#endif + (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { + /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */ + /* x86 supports unaligned. This version runs 6x-9x faster on x86. */ + ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0]; + ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1]; + ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2]; + ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3]; + } else { + /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */ + int k; + for (k = 0; k < 16; k++) + dst[k] = src1[k] ^ src2[k]; + } +} + +/* externs */ +/* crc */ +uint8 hndcrc8(const uint8 *p, uint nbytes, uint8 crc); +uint16 hndcrc16(const uint8 *p, uint nbytes, uint16 crc); +uint32 hndcrc32(const uint8 *p, uint nbytes, uint32 crc); + +/* format/print */ +/* print out the value a field has: fields may have 1-32 bits and may hold any value */ +extern uint bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, uint len); +/* print out which bits in flags are set */ +extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, uint len); +/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */ +int bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz, + const uint8 *addr, uint size, char *buf, uint len); + +extern int bcm_format_hex(char *str, const void *bytes, uint len); + +#ifdef BCMDBG +extern void deadbeef(void *p, uint len); +#endif +extern const char *bcm_crypto_algo_name(uint algo); +extern char *bcm_chipname(uint chipid, char *buf, uint len); +extern char *bcm_brev_str(uint32 brev, char *buf); +extern void printbig(char *buf); +extern void prhex(const char *msg, const uchar *buf, uint len); +extern void prhexstr(const char *prefix, const uint8 *buf, uint len, bool newline); + +/* bcmerror */ +extern const char *bcmerrorstr(int bcmerror); + +#if defined(BCMDBG) || defined(WLMSG_ASSOC) +/* get 802.11 frame name based on frame kind - see frame types FC_.. in 802.11.h */ +const char *bcm_80211_fk_name(uint fk); +#else +#define bcm_80211_fk_names(_x) "" +#endif + +extern int wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie); + +/* multi-bool data type: set of bools, mbool is true if any is set */ +typedef uint32 mbool; +#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */ +#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */ +#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */ +#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) + +/* generic datastruct to help dump routines */ +struct fielddesc { + const char *nameandfmt; + uint32 offset; + uint32 len; +}; + +extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); +#define bcm_bsize(b) ((b)->size) +#define bcm_breset(b) do {bcm_binit(b, (b)->origbuf, (b)->origsize);} while (0) +extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, + const uint8 *buf, uint len); + +extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); +extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes); +extern void bcm_print_bytes(const char *name, const uchar *cdata, uint len); + +typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); +extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, + char *buf, uint32 bufsize); +extern uint bcm_bitcount(const uint8 *bitmap, uint bytelength); + +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); + +/* power conversion */ +extern uint16 bcm_qdbm_to_mw(uint8 qdbm); +extern uint8 bcm_mw_to_qdbm(uint16 mw); +extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len); + +#ifdef BCMDBG_PKT /* pkt logging for debugging */ +#define PKTLIST_SIZE 3000 + +#ifdef BCMDBG_PTRACE +#define PKTTRACE_MAX_BYTES 12 +#define PKTTRACE_MAX_BITS (PKTTRACE_MAX_BYTES * NBBY) + +enum pkttrace_info { + PKTLIST_PRECQ, /* Pkt in Prec Q */ + PKTLIST_FAIL_PRECQ, /* Pkt failed to Q in PRECQ */ + PKTLIST_DMAQ, /* Pkt in DMA Q */ + PKTLIST_MI_TFS_RCVD, /* Received TX status */ + PKTLIST_TXDONE, /* Pkt TX done */ + PKTLIST_TXFAIL, /* Pkt TX failed */ + PKTLIST_PKTFREE, /* pkt is freed */ + PKTLIST_PRECREQ, /* Pkt requeued in precq */ + PKTLIST_TXFIFO /* To trace in wlc_fifo */ +}; +#endif /* BCMDBG_PTRACE */ + +typedef struct pkt_dbginfo { + int line; + char *file; + void *pkt; +#ifdef BCMDBG_PTRACE + char pkt_trace[PKTTRACE_MAX_BYTES]; +#endif /* BCMDBG_PTRACE */ +} pkt_dbginfo_t; + +typedef struct { + pkt_dbginfo_t list[PKTLIST_SIZE]; /* List of pointers to packets */ + uint16 count; /* Total count of the packets */ +} pktlist_info_t; + +extern void pktlist_add(pktlist_info_t *pktlist, void *p, int len, char *file); +extern void pktlist_remove(pktlist_info_t *pktlist, void *p); +extern char* pktlist_dump(pktlist_info_t *pktlist, char *buf); +#ifdef BCMDBG_PTRACE +extern void pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit); +#endif /* BCMDBG_PTRACE */ +#endif /* BCMDBG_PKT */ +unsigned int process_nvram_vars(char *varbuf, unsigned int len); +bool replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, + unsigned int *datalen); + +/* trace any object allocation / free, with / without features (flags) set to the object */ +#if (defined(DONGLEBUILD) && defined(BCMDBG_MEM) && (!defined(BCM_OBJECT_TRACE))) +#define BCM_OBJECT_TRACE +#endif /* (defined(DONGLEBUILD) && defined(BCMDBG_MEM) && (!defined(BCM_OBJECT_TRACE))) */ + +#define BCM_OBJDBG_ADD 1 +#define BCM_OBJDBG_REMOVE 2 +#define BCM_OBJDBG_ADD_PKT 3 + +/* object feature: set or clear flags */ +#define BCM_OBJECT_FEATURE_FLAG 1 +#define BCM_OBJECT_FEATURE_PKT_STATE 2 +/* object feature: flag bits */ +#define BCM_OBJECT_FEATURE_0 (1 << 0) +#define BCM_OBJECT_FEATURE_1 (1 << 1) +#define BCM_OBJECT_FEATURE_2 (1 << 2) +/* object feature: clear flag bits field set with this flag */ +#define BCM_OBJECT_FEATURE_CLEAR (1 << 31) +#if defined(BCM_OBJECT_TRACE) && !defined(BINCMP) +#define bcm_pkt_validate_chk(obj, func) do { \ + void * pkttag; \ + bcm_object_trace_chk(obj, 0, 0, \ + func, __LINE__); \ + if ((pkttag = PKTTAG(obj))) { \ + bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \ + func, __LINE__); \ + } \ +} while (0) +extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line); +extern void bcm_object_trace_upd(void *obj, void *obj_new); +extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line); +extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value); +extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value); +extern void bcm_object_trace_init(void); +extern void bcm_object_trace_deinit(void); +#else +#define bcm_pkt_validate_chk(obj, func) +#define bcm_object_trace_opr(a, b, c, d) +#define bcm_object_trace_upd(a, b) +#define bcm_object_trace_chk(a, b, c, d, e) +#define bcm_object_feature_set(a, b, c) +#define bcm_object_feature_get(a, b, c) +#define bcm_object_trace_init() +#define bcm_object_trace_deinit() +#endif /* BCM_OBJECT_TRACE && !BINCMP */ + +/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */ + +/* Table driven count set bits. */ +static const uint8 /* Table only for use by bcm_cntsetbits */ +_CSBTBL[256] = +{ + #define B2(n) n, n + 1, n + 1, n + 2 + #define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2) + #define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2) + B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2) +}; + +static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */ +bcm_cntsetbits(const uint32 u32arg) +{ + /* function local scope declaration of const _CSBTBL[] */ + const uint8 * p = (const uint8 *)&u32arg; + /* uint32 cast to avoid uint8 being promoted to int for arithmetic operation */ + return ((uint32)_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]); +} + +static INLINE int /* C equivalent count of leading 0's in a u32 */ +C_bcm_count_leading_zeros(uint32 u32arg) +{ + int shifts = 0; + while (u32arg) { + shifts++; u32arg >>= 1; + } + return (32 - shifts); +} + +typedef struct bcm_rand_metadata { + uint32 count; /* number of random numbers in bytes */ + uint32 signature; /* host fills it in, FW verfies before reading rand */ +} bcm_rand_metadata_t; + +#ifdef BCMDRIVER +/* + * Assembly instructions: Count Leading Zeros + * "clz" : MIPS, ARM + * "cntlzw" : PowerPC + * "BSF" : x86 + * "lzcnt" : AMD, SPARC + */ + +#if defined(__arm__) +#if defined(__ARM_ARCH_7M__) /* Cortex M3 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7M__ */ +#if defined(__ARM_ARCH_7R__) /* Cortex R4 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7R__ */ +#endif /* __arm__ */ + +static INLINE int +bcm_count_leading_zeros(uint32 u32arg) +{ +#if defined(__USE_ASM_CLZ__) + int zeros; + __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg)); + return zeros; +#else /* C equivalent */ + return C_bcm_count_leading_zeros(u32arg); +#endif /* C equivalent */ +} + +/* + * Macro to count leading zeroes + * + */ +#if defined(__GNUC__) +#define CLZ(x) __builtin_clzl(x) +#elif defined(__arm__) +#define CLZ(x) __clz(x) +#else +#define CLZ(x) bcm_count_leading_zeros(x) +#endif /* __GNUC__ */ + +/* INTERFACE: Multiword bitmap based small id allocator. */ +struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */ + +#define BCM_MWBMAP_INVALID_HDL ((struct bcm_mwbmap *)NULL) +#define BCM_MWBMAP_INVALID_IDX ((uint32)(~0U)) + +/* Incarnate a multiword bitmap based small index allocator */ +extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max); + +/* Free up the multiword bitmap index allocator */ +extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl); + +/* Allocate a unique small index using a multiword bitmap index allocator */ +extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl); + +/* Force an index at a specified position to be in use */ +extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Free a previously allocated index back into the multiword bitmap allocator */ +extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl); + +/* Determine whether an index is inuse or free */ +extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Debug dump a multiword bitmap allocator */ +extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl); + +extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl); +/* End - Multiword bitmap based small Id allocator. */ + +/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */ + +#define ID8_INVALID 0xFFu +#define ID16_INVALID 0xFFFFu +#define ID32_INVALID 0xFFFFFFFFu +#define ID16_UNDEFINED ID16_INVALID + +/* + * Construct a 16bit id allocator, managing 16bit ids in the range: + * [start_val16 .. start_val16+total_ids) + * Note: start_val16 is inclusive. + * Returns an opaque handle to the 16bit id allocator. + */ +extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16); +extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl); +extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16); + +/* Allocate a unique 16bit id */ +extern uint16 id16_map_alloc(void * id16_map_hndl); + +/* Free a 16bit id value into the id16 allocator */ +extern void id16_map_free(void * id16_map_hndl, uint16 val16); + +/* Get the number of failures encountered during id allocation. */ +extern uint32 id16_map_failures(void * id16_map_hndl); + +/* Audit the 16bit id allocator state. */ +extern bool id16_map_audit(void * id16_map_hndl); +/* End - Simple 16bit Id Allocator. */ +#endif /* BCMDRIVER */ + +void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset); +void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset); + +#define MASK_32_BITS (~0) +#define MASK_8_BITS ((1 << 8) - 1) + +#define EXTRACT_LOW32(num) (uint32)(num & MASK_32_BITS) +#define EXTRACT_HIGH32(num) (uint32)(((uint64)num >> 32) & MASK_32_BITS) + +#define MAXIMUM(a, b) ((a > b) ? a : b) +#define MINIMUM(a, b) ((a < b) ? a : b) +#define LIMIT(x, min, max) ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum); + +#ifndef _dll_t_ +#define _dll_t_ +/* + * ----------------------------------------------------------------------------- + * Double Linked List Macros + * ----------------------------------------------------------------------------- + * + * All dll operations must be performed on a pre-initialized node. + * Inserting an uninitialized node into a list effectively initialized it. + * + * When a node is deleted from a list, you may initialize it to avoid corruption + * incurred by double deletion. You may skip initialization if the node is + * immediately inserted into another list. + * + * By placing a dll_t element at the start of a struct, you may cast a dll_t * + * to the struct or vice versa. + * + * Example of declaring an initializing someList and inserting nodeA, nodeB + * + * typedef struct item { + * dll_t node; + * int someData; + * } Item_t; + * Item_t nodeA, nodeB, nodeC; + * nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333; + * + * dll_t someList; + * dll_init(&someList); + * + * dll_append(&someList, (dll_t *) &nodeA); + * dll_prepend(&someList, &nodeB.node); + * dll_insert((dll_t *)&nodeC, &nodeA.node); + * + * dll_delete((dll_t *) &nodeB); + * + * Example of a for loop to walk someList of node_p + * + * extern void mydisplay(Item_t * item_p); + * + * dll_t * item_p, * next_p; + * for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p); + * item_p = next_p) + * { + * next_p = dll_next_p(item_p); + * ... use item_p at will, including removing it from list ... + * mydisplay((PItem_t)item_p); + * } + * + * ----------------------------------------------------------------------------- + */ +typedef struct dll { + struct dll * next_p; + struct dll * prev_p; +} dll_t; + +static INLINE void +dll_init(dll_t *node_p) +{ + node_p->next_p = node_p; + node_p->prev_p = node_p; +} +/* dll macros returing a pointer to dll_t */ + +static INLINE dll_t * +BCMPOSTTRAPFN(dll_head_p)(dll_t *list_p) +{ + return list_p->next_p; +} + +static INLINE dll_t * +BCMPOSTTRAPFN(dll_tail_p)(dll_t *list_p) +{ + return (list_p)->prev_p; +} + +static INLINE dll_t * +BCMPOSTTRAPFN(dll_next_p)(dll_t *node_p) +{ + return (node_p)->next_p; +} + +static INLINE dll_t * +BCMPOSTTRAPFN(dll_prev_p)(dll_t *node_p) +{ + return (node_p)->prev_p; +} + +static INLINE bool +BCMPOSTTRAPFN(dll_empty)(dll_t *list_p) +{ + return ((list_p)->next_p == (list_p)); +} + +static INLINE bool +BCMPOSTTRAPFN(dll_end)(dll_t *list_p, dll_t * node_p) +{ + return (list_p == node_p); +} + +/* inserts the node new_p "after" the node at_p */ +static INLINE void +BCMPOSTTRAPFN(dll_insert)(dll_t *new_p, dll_t * at_p) +{ + new_p->next_p = at_p->next_p; + new_p->prev_p = at_p; + at_p->next_p = new_p; + (new_p->next_p)->prev_p = new_p; +} + +static INLINE void +BCMPOSTTRAPFN(dll_append)(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, dll_tail_p(list_p)); +} + +static INLINE void +BCMPOSTTRAPFN(dll_prepend)(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, list_p); +} + +/* deletes a node from any list that it "may" be in, if at all. */ +static INLINE void +BCMPOSTTRAPFN(dll_delete)(dll_t *node_p) +{ + node_p->prev_p->next_p = node_p->next_p; + node_p->next_p->prev_p = node_p->prev_p; +} +#endif /* ! defined(_dll_t_) */ + +/* Elements managed in a double linked list */ + +typedef struct dll_pool { + dll_t free_list; + uint16 free_count; + uint16 elems_max; + uint16 elem_size; + dll_t elements[1]; +} dll_pool_t; + +dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size); +void * dll_pool_alloc(dll_pool_t * dll_pool_p); +void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p); +void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p); +typedef void (* dll_elem_dump)(void * elem_p); +#ifdef BCMDBG +void dll_pool_dump(dll_pool_t * dll_pool_p, dll_elem_dump dump); +#endif +void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size); + +int valid_bcmerror(int e); +/* Stringify macro definition */ +#define BCM_STRINGIFY(s) #s +/* Used to pass in a macro variable that gets expanded and then stringified */ +#define BCM_EXTENDED_STRINGIFY(s) BCM_STRINGIFY(s) + +/* calculate IPv4 header checksum + * - input ip points to IP header in network order + * - output cksum is in network order + */ +uint16 ipv4_hdr_cksum(uint8 *ip, uint ip_len); + +/* calculate IPv4 TCP header checksum + * - input ip and tcp points to IP and TCP header in network order + * - output cksum is in network order + */ +uint16 ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len); + +/* calculate IPv6 TCP header checksum + * - input ipv6 and tcp points to IPv6 and TCP header in network order + * - output cksum is in network order + */ +uint16 ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len); + +#ifdef __cplusplus + } +#endif + +/* #define DEBUG_COUNTER */ +#ifdef DEBUG_COUNTER +#define CNTR_TBL_MAX 10 +typedef struct _counter_tbl_t { + char name[16]; /* name of this counter table */ + uint32 prev_log_print; /* Internal use. Timestamp of the previous log print */ + uint log_print_interval; /* Desired interval to print logs in ms */ + uint needed_cnt; /* How many counters need to be used */ + uint32 cnt[CNTR_TBL_MAX]; /* Counting entries to increase at desired places */ + bool enabled; /* Whether to enable printing log */ +} counter_tbl_t; + +/* How to use + Eg.: In dhd_linux.c + cnt[0]: How many times dhd_start_xmit() was called in every 1sec. + cnt[1]: How many bytes were requested to be sent in every 1sec. + +++ static counter_tbl_t xmit_tbl = {"xmit", 0, 1000, 2, {0,}, 1}; + + int + dhd_start_xmit(struct sk_buff *skb, struct net_device *net) + { + .......... +++ counter_printlog(&xmit_tbl); +++ xmit_tbl.cnt[0]++; + + ifp = dhd->iflist[ifidx]; + datalen = PKTLEN(dhdp->osh, skb); + +++ xmit_tbl.cnt[1] += datalen; + ............ + + ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf); + ........... + } +*/ + +void counter_printlog(counter_tbl_t *ctr_tbl); +#endif /* DEBUG_COUNTER */ + +#if defined(__GNUC__) +#define CALL_SITE __builtin_return_address(0) +#elif defined(_WIN32) +#define CALL_SITE _ReturnAddress() +#else +#define CALL_SITE ((void*) 0) +#endif +#ifdef SHOW_LOGTRACE +#define TRACE_LOG_BUF_MAX_SIZE 1700 +#define RTT_LOG_BUF_MAX_SIZE 1700 +#define BUF_NOT_AVAILABLE 0 +#define NEXT_BUF_NOT_AVAIL 1 +#define NEXT_BUF_AVAIL 2 + +typedef struct trace_buf_info { + int availability; + int size; + char buf[TRACE_LOG_BUF_MAX_SIZE]; +} trace_buf_info_t; +#endif /* SHOW_LOGTRACE */ + +enum dump_dongle_e { + DUMP_DONGLE_COREREG = 0, + DUMP_DONGLE_D11MEM +}; + +typedef struct { + uint32 type; /**< specifies e.g dump of d11 memory, use enum dump_dongle_e */ + uint32 index; /**< iterator1, specifies core index or d11 memory index */ + uint32 offset; /**< iterator2, byte offset within register set or memory */ +} dump_dongle_in_t; + +typedef struct { + uint32 address; /**< e.g. backplane address of register */ + uint32 id; /**< id, e.g. core id */ + uint32 rev; /**< rev, e.g. core rev */ + uint32 n_bytes; /**< nbytes in array val[] */ + uint32 val[1]; /**< out: values that were read out of registers or memory */ +} dump_dongle_out_t; + +extern uint32 sqrt_int(uint32 value); + +extern uint8 bcm_get_ceil_pow_2(uint val); + +#ifdef BCMDRIVER +/* structures and routines to process variable sized data */ +typedef struct var_len_data { + uint32 vlen; + uint8 *vdata; +} var_len_data_t; + +int bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size); +int bcm_vdata_free(osl_t *osh, var_len_data_t *vld); +#if defined(PRIVACY_MASK) +void bcm_ether_privacy_mask(struct ether_addr *addr); +#else +#define bcm_ether_privacy_mask(addr) +#endif /* PRIVACY_MASK */ +#endif /* BCMDRIVER */ + +/* Count the number of elements in an array that do not match the given value */ +extern int array_value_mismatch_count(uint8 value, uint8 *array, int array_size); +/* Count the number of non-zero elements in an uint8 array */ +extern int array_nonzero_count(uint8 *array, int array_size); +/* Count the number of non-zero elements in an int16 array */ +extern int array_nonzero_count_int16(int16 *array, int array_size); +/* Count the number of zero elements in an uint8 array */ +extern int array_zero_count(uint8 *array, int array_size); +/* Validate a uint8 ordered array. Assert if invalid. */ +extern int verify_ordered_array_uint8(uint8 *array, int array_size, uint8 range_lo, uint8 range_hi); +/* Validate a int16 configuration array that need not be zero-terminated. Assert if invalid. */ +extern int verify_ordered_array_int16(int16 *array, int array_size, int16 range_lo, int16 range_hi); +/* Validate all values in an array are in range */ +extern int verify_array_values(uint8 *array, int array_size, + int range_lo, int range_hi, bool zero_terminated); + +/* To unwind from the trap_handler. */ +extern void (*const print_btrace_int_fn)(int depth, uint32 pc, uint32 lr, uint32 sp); +extern void (*const print_btrace_fn)(int depth); +#define PRINT_BACKTRACE(depth) if (print_btrace_fn) print_btrace_fn(depth) +#define PRINT_BACKTRACE_INT(depth, pc, lr, sp) \ + if (print_btrace_int_fn) print_btrace_int_fn(depth, pc, lr, sp) + +/* FW Signing - only in bootloader builds, never in dongle FW builds */ +#ifdef WL_FWSIGN + #define FWSIGN_ENAB() (1) +#else + #define FWSIGN_ENAB() (0) +#endif /* WL_FWSIGN */ + +/* Utilities for reading SROM/SFlash vars */ + +typedef struct varbuf { + char *base; /* pointer to buffer base */ + char *buf; /* pointer to current position */ + unsigned int size; /* current (residual) size in bytes */ +} varbuf_t; + +/** Initialization of varbuf structure */ +void varbuf_init(varbuf_t *b, char *buf, uint size); +/** append a null terminated var=value string */ +int varbuf_append(varbuf_t *b, const char *fmt, ...); +#if defined(BCMDRIVER) +int initvars_table(osl_t *osh, char *start, char *end, char **vars, uint *count); +#endif + +/* Count the number of trailing zeros in uint32 val + * Applying unary minus to unsigned value is intentional, + * and doesn't influence counting of trailing zeros + */ +static INLINE uint32 +count_trailing_zeros(uint32 val) +{ +#ifdef BCMDRIVER + uint32 c = (uint32)CLZ(val & ((uint32)(-(int)val))); +#else + uint32 c = (uint32)C_bcm_count_leading_zeros(val & ((uint32)(-(int)val))); +#endif /* BCMDRIVER */ + return val ? 31u - c : c; +} + +/** Size in bytes of data block, defined by struct with last field, declared as + * one/zero element vector - such as wl_uint32_list_t or bcm_xtlv_cbuf_s. + * Arguments: + * list - address of data block (value is ignored, only type is important) + * last_var_len_field - name of last field (usually declared as ...[] or ...[1]) + * num_elems - number of elements in data block + * Example: + * wl_uint32_list_t *list; + * WL_VAR_LEN_STRUCT_SIZE(list, element, 10); // Size in bytes of 10-element list + */ +#define WL_VAR_LEN_STRUCT_SIZE(list, last_var_len_field, num_elems) \ + ((size_t)((const char *)&((list)->last_var_len_field) - (const char *)(list)) + \ + (sizeof((list)->last_var_len_field[0]) * (size_t)(num_elems))) + +int buf_shift_right(uint8 *buf, uint16 len, uint8 bits); +#endif /* _bcmutils_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_channels.h b/bcmdhd.101.10.361.x/include/bcmwifi_channels.h new file mode 100755 index 0000000..d3744de --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmwifi_channels.h @@ -0,0 +1,888 @@ +/* + * Misc utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmwifi_channels_h_ +#define _bcmwifi_channels_h_ + +/* A chanspec holds the channel number, band, bandwidth and primary 20MHz sub-band */ +typedef uint16 chanspec_t; +typedef uint16 chanspec_band_t; +typedef uint16 chanspec_bw_t; +typedef uint16 chanspec_subband_t; + +/* channel defines */ +#define CH_80MHZ_APART 16u +#define CH_40MHZ_APART 8u +#define CH_20MHZ_APART 4u +#define CH_10MHZ_APART 2u +#define CH_5MHZ_APART 1u /* 2G band channels are 5 Mhz apart */ +#define CH_160MHZ_APART (32u * CH_5MHZ_APART) /* 32 5Mhz-spaces */ + +#define CH_MIN_2G_CHANNEL 1u /* Min channel in 2G band */ +#define CH_MAX_2G_CHANNEL 14u /* Max channel in 2G band */ +#define CH_MIN_2G_40M_CHANNEL 3u /* Min 40MHz center channel in 2G band */ +#define CH_MAX_2G_40M_CHANNEL 11u /* Max 40MHz center channel in 2G band */ + +#define CH_MIN_6G_CHANNEL 1u /* Min 20MHz channel in 6G band */ +#define CH_MAX_6G_CHANNEL 253u /* Max 20MHz channel in 6G band */ +#define CH_MIN_6G_40M_CHANNEL 3u /* Min 40MHz center channel in 6G band */ +#define CH_MAX_6G_40M_CHANNEL 227u /* Max 40MHz center channel in 6G band */ +#define CH_MIN_6G_80M_CHANNEL 7u /* Min 80MHz center channel in 6G band */ +#define CH_MAX_6G_80M_CHANNEL 215u /* Max 80MHz center channel in 6G band */ +#define CH_MIN_6G_160M_CHANNEL 15u /* Min 160MHz center channel in 6G band */ +#define CH_MAX_6G_160M_CHANNEL 207u /* Max 160MHz center channel in 6G band */ +#define CH_MIN_6G_240M_CHANNEL 23u /* Min 240MHz center channel in 6G band */ +#define CH_MAX_6G_240M_CHANNEL 167u /* Max 240MHz center channel in 6G band */ +#define CH_MIN_6G_320M_CHANNEL 31u /* Min 320MHz center channel in 6G band */ +#define CH_MAX_6G_320M_CHANNEL 199u /* Max 320MHz center channel in 6G band */ + +/* maximum # channels the s/w supports */ +#define MAXCHANNEL 254u /* max # supported channels. + * DO NOT MAKE > 255: channels are uint8's all over + */ +#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */ + +#define INVCHANNEL 255u /* error value for a bad channel */ + +/* length of channel vector bitmap is the MAXCHANNEL we want to handle rounded up to a byte */ +/* The actual CHANVEC_LEN fix is leading to high static memory impact +* in all projects wherein the previous CHANVEC_LEN definition is used. +* +* Retaining the previous definition under MAXCHNL_ROM_COMPAT flag. +* All those chip porgrams where memory impact is observed need to define the same. +*/ +#ifdef MAXCHNL_ROM_COMPAT +#define CHANVEC_LEN (MAXCHANNEL + (8 - 1) / 8) +#else +#define CHANVEC_LEN ((MAXCHANNEL + (8 - 1)) / 8) +#endif + +/* channel bitvec */ +typedef struct { + uint8 vec[CHANVEC_LEN]; /* bitvec of channels */ +} chanvec_t; + +/* make sure channel num is within valid range */ +#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM) + +#define CHSPEC_CTLOVLP(sp1, sp2, sep) \ + ((uint)ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (uint)(sep)) + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +/* For contiguous channel bandwidth other than 240MHz/320Mhz */ +#define WL_CHANSPEC_CHAN_MASK 0x00ffu +#define WL_CHANSPEC_CHAN_SHIFT 0u + +/* For contiguous channel bandwidth >= 240MHz */ +#define WL_CHANSPEC_GE240_CHAN_MASK 0x0003u +#define WL_CHANSPEC_GE240_CHAN_SHIFT 0u + +/* For discontiguous channel bandwidth */ +#define WL_CHANSPEC_CHAN0_MASK 0x000fu +#define WL_CHANSPEC_CHAN0_SHIFT 0u +#define WL_CHANSPEC_CHAN1_MASK 0x00f0u +#define WL_CHANSPEC_CHAN1_SHIFT 4u + +/* Non-320/Non-240 Mhz channel sideband indication */ +#define WL_CHANSPEC_CTL_SB_MASK 0x0700u +#define WL_CHANSPEC_CTL_SB_SHIFT 8u +#define WL_CHANSPEC_CTL_SB_LLL 0x0000u +#define WL_CHANSPEC_CTL_SB_LLU 0x0100u +#define WL_CHANSPEC_CTL_SB_LUL 0x0200u +#define WL_CHANSPEC_CTL_SB_LUU 0x0300u +#define WL_CHANSPEC_CTL_SB_ULL 0x0400u +#define WL_CHANSPEC_CTL_SB_ULU 0x0500u +#define WL_CHANSPEC_CTL_SB_UUL 0x0600u +#define WL_CHANSPEC_CTL_SB_UUU 0x0700u +#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL +#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU +#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL + +/* channel sideband indication for frequency >= 240MHz */ +#define WL_CHANSPEC_GE240_SB_MASK 0x0780u +#define WL_CHANSPEC_GE240_SB_SHIFT 7u + +/* Bandwidth field */ +#define WL_CHANSPEC_BW_MASK 0x3800u +#define WL_CHANSPEC_BW_SHIFT 11u +#define WL_CHANSPEC_BW_320 0x0000u +#define WL_CHANSPEC_BW_160160 0x0800u +#define WL_CHANSPEC_BW_20 0x1000u +#define WL_CHANSPEC_BW_40 0x1800u +#define WL_CHANSPEC_BW_80 0x2000u +#define WL_CHANSPEC_BW_160 0x2800u +#define WL_CHANSPEC_BW_8080 0x3000u +#define WL_CHANSPEC_BW_240 0x3800u + +/* Band field */ +#define WL_CHANSPEC_BAND_MASK 0xc000u +#define WL_CHANSPEC_BAND_SHIFT 14u +#define WL_CHANSPEC_BAND_2G 0x0000u +#define WL_CHANSPEC_BAND_6G 0x4000u +#define WL_CHANSPEC_BAND_4G 0x8000u +#define WL_CHANSPEC_BAND_5G 0xc000u + +#define INVCHANSPEC 255u +#define MAX_CHANSPEC 0xFFFFu + +#define WL_CHSPEC_BW(chspec) ((chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT) +#define MAX_BW_NUM (uint8)(((WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT)) + +#define WL_CHANNEL_BAND(ch) (((uint)(ch) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G) + +/* channel defines */ +#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ + ((channel) - CH_10MHZ_APART) : 0) +#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ + ((channel) + CH_10MHZ_APART) : 0) + +/* pass a 80MHz channel number (uint8) to get respective LL, UU, LU, UL */ +#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0) +#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \ + ((channel) + 3 * CH_10MHZ_APART) : 0) +#define LU_20_SB(channel) LOWER_20_SB(channel) +#define UL_20_SB(channel) UPPER_20_SB(channel) + +#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART) +#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART) + +#ifndef CHSPEC_WLCBANDUNIT +#define CHSPEC_WLCBANDUNIT(chspec) \ + ((CHSPEC_IS5G(chspec) || CHSPEC_IS6G(chspec)) ? BAND_5G_INDEX : BAND_2G_INDEX) +#endif +#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ + WL_CHANNEL_BAND(channel)) +#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ + ((channel) + CH_20MHZ_APART) : 0) +#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ + WL_CHANNEL_BAND(channel)) +#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G) +#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G) + +/* simple MACROs to get different fields of chanspec */ +#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) +#define CHSPEC_CHAN0(chspec) (((chspec) & WL_CHANSPEC_CHAN0_MASK) >> WL_CHANSPEC_CHAN0_SHIFT) +#define CHSPEC_CHAN1(chspec) (((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT) +#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) +#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) +#define CHSPEC_GE240_CHAN(chspec) (((chspec) & WL_CHANSPEC_GE240_CHAN_MASK) >> \ + WL_CHANSPEC_GE240_CHAN_SHIFT) +#define CHSPEC_GE240_SB(chspec) ((chspec) & WL_CHANSPEC_GE240_SB_MASK) + +#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#define CHSPEC_IS20_5G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \ + CHSPEC_IS5G(chspec)) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) +#endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80) +#endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160) +#endif +#define CHSPEC_IS8080(chspec) (FALSE) +#ifndef CHSPEC_IS320 +#ifdef WL11BE +#define CHSPEC_IS320(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_320) +#else +#define CHSPEC_IS320(chspec) (FALSE) +#endif +#endif /* CHSPEC_IS320 */ +#ifndef CHSPEC_IS240 +#ifdef WL11BE +#define CHSPEC_IS240(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_240) +#else +#define CHSPEC_IS240(chspec) (FALSE) +#endif +#endif /* CHSPEC_IS240 */ + +/* pass a center channel and get channel offset from it by 10MHz */ +#define CH_OFF_10MHZ_MULTIPLES(channel, offset) \ +((uint8) (((offset) < 0) ? \ + (((channel) > (WL_CHANSPEC_CHAN_MASK & ((uint16)((-(offset)) * CH_10MHZ_APART)))) ? \ + ((channel) + (offset) * CH_10MHZ_APART) : 0) : \ + ((((uint16)(channel) + (uint16)(offset) * CH_10MHZ_APART) < (uint16)MAXCHANNEL) ? \ + ((channel) + (offset) * CH_10MHZ_APART) : 0))) + +uint wf_chspec_first_20_sb(chanspec_t chspec); + +#if defined(WL_BW160MHZ) +/* pass a 160MHz center channel to get 20MHz subband channel numbers */ +#define LLL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -7) +#define LLU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -5) +#define LUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -3) +#define LUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -1) +#define ULL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 1) +#define ULU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 3) +#define UUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 5) +#define UUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 7) + +/* get lowest 20MHz sideband of a given chspec + * (works with 20, 40, 80, 160) + */ +#define CH_FIRST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS160(chspec) ? LLL_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec))))) + +/* get upper most 20MHz sideband of a given chspec + * (works with 20, 40, 80, 160) + */ +#define CH_LAST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS160(chspec) ? UUU_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec))))) + +/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband + * (works with 20, 40, 80, 160) + * resolves to 0 if called with upper most channel + */ +#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\ + ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \ + ((channel) + CH_20MHZ_APART)))) + +#else /* WL_BW160MHZ */ + +#define LLL_20_SB_160(channel) 0 +#define LLU_20_SB_160(channel) 0 +#define LUL_20_SB_160(channel) 0 +#define LUU_20_SB_160(channel) 0 +#define ULL_20_SB_160(channel) 0 +#define ULU_20_SB_160(channel) 0 +#define UUL_20_SB_160(channel) 0 +#define UUU_20_SB_160(channel) 0 + +/* get lowest 20MHz sideband of a given chspec + * (works with 20, 40, 80) + */ +#define CH_FIRST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec)))) +/* get upper most 20MHz sideband of a given chspec + * (works with 20, 40, 80, 160) + */ +#define CH_LAST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec)))) + +/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband + * (works with 20, 40, 80, 160) + * resolves to 0 if called with upper most channel + */ +#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\ + ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \ + ((channel) + CH_20MHZ_APART)))) + +#endif /* WL_BW160MHZ */ + +/* Iterator for 20MHz side bands of a chanspec: (chanspec_t chspec, uint8 channel) + * 'chspec' chanspec_t of interest (used in loop, better to pass a resolved value than a macro) + * 'channel' must be a variable (not an expression). + */ +#define FOREACH_20_SB(chspec, channel) \ + for (channel = (uint8)wf_chspec_first_20_sb(chspec); channel; \ + channel = CH_NEXT_20_SB((chspec), channel)) + +/* Uses iterator to populate array with all side bands involved (sorted lower to upper). + * 'chspec' chanspec_t of interest + * 'psb' pointer to uint8 array of enough size to hold all side bands for the given chspec + */ +#define GET_ALL_SB(chspec, psb) do { \ + uint8 channel, idx = 0; \ + chanspec_t chspec_local = chspec; \ + FOREACH_20_SB(chspec_local, channel) \ + (psb)[idx++] = channel; \ +} while (0) + +/* given a chanspec of any bw, tests if primary20 SB is in lower 20, 40, 80 respectively */ +#define IS_CTL_IN_L20(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_U) /* CTL SB is in low 20 of any 40 */ +#define IS_CTL_IN_L40(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_UL) /* in low 40 of any 80 */ +#define IS_CTL_IN_L80(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_ULL) /* in low 80 of 160 */ + +#define BW_LE40(bw) ((bw) == WL_CHANSPEC_BW_20 || ((bw) == WL_CHANSPEC_BW_40)) +#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80)) +#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160)) + +#define CHSPEC_IS6G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_6G) +#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) +#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) +#define CHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) + +#ifdef WL_BAND6G +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS2G(chspec) ? WLC_BAND_2G : CHSPEC_IS5G(chspec) ? \ + WLC_BAND_5G : WLC_BAND_6G) +#else +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS2G(chspec) ? WLC_BAND_2G : WLC_BAND_5G) +#endif + +#define CHSPEC_BW_CHANGED(prev_chspec, curr_chspec) \ + (((prev_chspec) & WL_CHANSPEC_BW_MASK) != ((curr_chspec) & WL_CHANSPEC_BW_MASK)) + +#if (defined(WL_BAND6G) && !defined(WL_BAND6G_DISABLED)) +#define CHSPEC_IS_5G_6G(chspec) (CHSPEC_IS5G(chspec) || CHSPEC_IS6G(chspec)) +#define CHSPEC_IS20_5G_6G(chspec) ((((chspec) & \ + WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \ + (CHSPEC_IS5G(chspec) || CHSPEC_IS6G(chspec))) +#else +#define CHSPEC_IS_5G_6G(chspec) (CHSPEC_IS5G(chspec)) +#define CHSPEC_IS20_5G_6G(chspec) (CHSPEC_IS20_5G(chspec)) +#endif + +/** + * Number of chars needed for wf_chspec_ntoa() destination character buffer. + */ +#ifdef WL11BE +#define CHANSPEC_STR_LEN 22 +#else +#define CHANSPEC_STR_LEN 20 +#endif + +/* + * This function returns TRUE if both the chanspec can co-exist in PHY. + * Addition to primary20 channel, the function checks for side band for 2g 40 channels + */ +extern bool wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2); + +#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\ + CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080) + +/* BW inequality comparisons, GE (>=), GT (>) */ + +#define CHSPEC_BW_GE(chspec, bw) (CHSPEC_BW(chspec) >= (bw)) + +#define CHSPEC_BW_GT(chspec, bw) (CHSPEC_BW(chspec) > (bw)) + +/* Legacy Chanspec defines + * These are the defines for the previous format of the chanspec_t + */ +#define WL_LCHANSPEC_CHAN_MASK 0x00ff +#define WL_LCHANSPEC_CHAN_SHIFT 0 + +#define WL_LCHANSPEC_CTL_SB_MASK 0x0300 +#define WL_LCHANSPEC_CTL_SB_SHIFT 8 +#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_LCHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_LCHANSPEC_BW_MASK 0x0C00 +#define WL_LCHANSPEC_BW_SHIFT 10 +#define WL_LCHANSPEC_BW_10 0x0400 +#define WL_LCHANSPEC_BW_20 0x0800 +#define WL_LCHANSPEC_BW_40 0x0C00 + +#define WL_LCHANSPEC_BAND_MASK 0xf000 +#define WL_LCHANSPEC_BAND_SHIFT 12 +#define WL_LCHANSPEC_BAND_5G 0x1000 +#define WL_LCHANSPEC_BAND_2G 0x2000 + +#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK)) +#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK) +#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK) +#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK) +#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20) +#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40) +#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G) +#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G) + +#define LCHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) +#define LCHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) + +#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band))) + +#define CH20MHZ_LCHSPEC(channel) \ + (chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \ + WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G)) + +#define GET_ALL_EXT wf_get_all_ext + +/* + * WF_CHAN_FACTOR_* constants are used to calculate channel frequency + * given a channel number. + * chan_freq = chan_factor * 500Mhz + chan_number * 5 + */ + +/** + * Channel Factor for the starting frequence of 2.4 GHz channels. + * The value corresponds to 2407 MHz. + */ +#define WF_CHAN_FACTOR_2_4_G 4814u /* 2.4 GHz band, 2407 MHz */ + +/** + * Channel Factor for the starting frequence of 4.9 GHz channels. + * The value corresponds to 4000 MHz. + */ +#define WF_CHAN_FACTOR_4_G 8000u /* 4.9 GHz band for Japan */ + +/** + * Channel Factor for the starting frequence of 5 GHz channels. + * The value corresponds to 5000 MHz. + */ +#define WF_CHAN_FACTOR_5_G 10000u /* 5 GHz band, 5000 MHz */ + +/** + * Channel Factor for the starting frequence of 6 GHz channels. + * The value corresponds to 5940 MHz. + */ +#define WF_CHAN_FACTOR_6_G 11900u /* 6 GHz band, 5950 MHz */ + +#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */ + +/** + * No of sub-band value of the specified Mhz chanspec + */ +#define WF_NUM_SIDEBANDS_40MHZ 2u +#define WF_NUM_SIDEBANDS_80MHZ 4u +#define WF_NUM_SIDEBANDS_160MHZ 8u + +/** + * Return the chanspec bandwidth in MHz + */ +uint wf_bw_chspec_to_mhz(chanspec_t chspec); + +/** + * Return the bandwidth string for a given chanspec + */ +const char *wf_chspec_to_bw_str(chanspec_t chspec); + +/** + * Convert chanspec to ascii string, or formats hex of an invalid chanspec. + */ +char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf); + +/** + * Convert chanspec to ascii string, or returns NULL on error. + */ +char * wf_chspec_ntoa(chanspec_t chspec, char *buf); + +/** + * Convert ascii string to chanspec + */ +chanspec_t wf_chspec_aton(const char *a); + +/** + * Verify the chanspec fields are valid for a chanspec_t + */ +bool wf_chspec_malformed(chanspec_t chanspec); + +/** + * Verify the chanspec specifies a valid channel according to 802.11. + */ +bool wf_chspec_valid(chanspec_t chanspec); + +/** + * Verify that the channel is a valid 20MHz channel according to 802.11. + */ +bool wf_valid_20MHz_chan(uint channel, chanspec_band_t band); + +/** + * Verify that the center channel is a valid 40MHz center channel according to 802.11. + */ +bool wf_valid_40MHz_center_chan(uint center_channel, chanspec_band_t band); + +/** + * Verify that the center channel is a valid 80MHz center channel according to 802.11. + */ +bool wf_valid_80MHz_center_chan(uint center_channel, chanspec_band_t band); + +/** + * Verify that the center channel is a valid 160MHz center channel according to 802.11. + */ +bool wf_valid_160MHz_center_chan(uint center_channel, chanspec_band_t band); + +/** + * Verify that the center channel is a valid 240MHz center channel according to 802.11. + */ +bool wf_valid_240MHz_center_chan(uint center_channel, chanspec_band_t band); + +/** + * Verify that the center channel is a valid 320MHz center channel according to 802.11. + */ +bool wf_valid_320MHz_center_chan(uint center_channel, chanspec_band_t band); + +/** + * Create a 20MHz chanspec for the given band. + */ +chanspec_t wf_create_20MHz_chspec(uint channel, chanspec_band_t band); + +/** + * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + */ +chanspec_t wf_create_40MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band); + +/** + * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number, + * the sub-band for the primary 20MHz channel, and the band. + */ +chanspec_t wf_create_40MHz_chspec_primary_sb(uint primary_channel, + chanspec_subband_t primary_subband, + chanspec_band_t band); +/** + * Returns the chanspec for an 80MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + */ +chanspec_t wf_create_80MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band); + +/** + * Returns the chanspec for an 160MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + */ +chanspec_t wf_create_160MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band); + +/** + * Returns the chanspec for an 240MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + */ +chanspec_t wf_create_240MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band); + +/** + * Returns the chanspec for an 320MHz channel given the primary 20MHz channel number, + * the center channel number, and the band. + */ +chanspec_t wf_create_320MHz_chspec(uint primary_channel, uint center_channel, + chanspec_band_t band); + +/** + * Returns the chanspec for an 80+80MHz channel given the primary 20MHz channel number, + * the center channel numbers for each frequency segment, and the band. + */ +chanspec_t wf_create_8080MHz_chspec(uint primary_channel, uint chan0, uint chan1, + chanspec_band_t band); + +/** + * Returns the chanspec for an 160+160MHz channel given the primary 20MHz channel number, + * the center channel numbers for each frequency segment, and the band. + */ +chanspec_t wf_create_160160MHz_chspec(uint primary_channel, uint chan0, uint chan1, + chanspec_band_t band); +/** + * Returns the chanspec given the primary 20MHz channel number, + * the center channel number, channel width, and the band. + * + * The channel width must be 20, 40, 80, or 160 MHz. + */ +chanspec_t wf_create_chspec(uint primary_channel, uint center_channel, + chanspec_bw_t bw, chanspec_band_t band); + +/** + * Returns the chanspec given the primary 20MHz channel number, + * channel width, and the band. + */ +chanspec_t wf_create_chspec_from_primary(uint primary_channel, chanspec_bw_t bw, + chanspec_band_t band); + +/** + * Returns the chanspec given the index of primary 20MHz channel within whole + * channel, the center channel number, channel width, and the band. + * + * The channel width must be 20, 40, 80, or 160 MHz. + */ +chanspec_t wf_create_chspec_sb(uint sb, uint center_channel, chanspec_bw_t bw, + chanspec_band_t band); + +/** + * Returns the chanspec for an 160+160MHz channel given the index of primary 20MHz + * channel within whole channel pair (0-3 if within chan0, 4-7 if within chan1), + * the center channel numbers for each frequency segment, and the band. + */ +chanspec_t wf_create_160160MHz_chspec_sb(uint sb, uint chan0, uint chan1, + chanspec_band_t band); + +/** + * Return the primary 20MHz channel. + */ +uint8 wf_chspec_primary20_chan(chanspec_t chspec); + +/* alias for old function name */ +#define wf_chspec_ctlchan(c) wf_chspec_primary20_chan(c) + +/** + * Return the primary 20MHz chanspec of a given chanspec + */ +chanspec_t wf_chspec_primary20_chspec(chanspec_t chspec); + +/* alias for old function name */ +#define wf_chspec_ctlchspec(c) wf_chspec_primary20_chspec(c) + +/** + * Return the primary 40MHz chanspec for a 40MHz or wider channel + */ +chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec); + +/** + * Return the channel number for a given frequency and base frequency + */ +int wf_mhz2channel(uint freq, uint start_factor); + +/** + * Return the center frequency in MHz of the given channel and base frequency. + */ +int wf_channel2mhz(uint channel, uint start_factor); + +/** + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel); + +/** + * Convert ctl chan and bw to chanspec + * + * @param ctl_ch channel + * @param bw bandwidth + * + * @return > 0 if successful or 0 otherwise + * + */ +extern uint16 wf_channel2chspec(uint ctl_ch, uint bw); + +/* + * Returns the 80+80 MHz chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0_80MHz - center channel number of one frequency segment + * chan1_80MHz - center channel number of the other frequency segment + * + * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to 802.11-2016 section 22.3.14 "Channelization". + */ +extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz, + uint8 chan0_80Mhz, uint8 chan1_80Mhz); + +/** + * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec + * + * @param chspec input chanspec + * + * @return center channel number of the primary 80MHz sub-band of the input. + * Will return the center channel of an input 80MHz chspec. + * Will return INVCHANNEL if the chspec is malformed or less than 80MHz bw. + */ +extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec); + +/** + * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec + * + * @param chspec input chanspec + * + * @return center channel number of the secondary 80MHz sub-band of the input. + * Will return INVCHANNEL if the chspec is malformed or bw is not greater than 80MHz. + */ +extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec); + +/** + * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel + * + * @param chspec input chanspec + * + * @return An 80MHz chanspec describing the primary 80MHz sub-band of the input. + * Will return an input 80MHz chspec as is. + * Will return INVCHANSPEC if the chspec is malformed or less than 80MHz bw. + */ +extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec); + +/** + * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel + * The sideband in the chanspec is always set to WL_CHANSPEC_CTL_SB_LL since this sub-band + * does not contain the primary 20MHz channel. + * + * @param chspec input chanspec + * + * @return An 80MHz chanspec describing the secondary 80MHz sub-band of the input. + * Will return INVCHANSPEC if the chspec is malformed or bw is not greater than 80MHz. + */ +extern chanspec_t wf_chspec_secondary80_chspec(chanspec_t chspec); + +/** + * Returns the center channel of the primary 160MHz sub-band of the provided chanspec + * + * @param chspec input chanspec + * + * @return center channel number of the primary 160MHz sub-band of the input. + * Will return the center channel of an input 160MHz chspec. + * Will return INVCHANNEL if the chspec is malformed or less than 160MHz bw. + */ +extern uint8 wf_chspec_primary160_channel(chanspec_t chanspec); + +/** + * Returns the chanspec for the primary 160MHz sub-band of an 320MHz channel + * + * @param chspec input chanspec + * + * @return An 160MHz chanspec describing the primary 160MHz sub-band of the input. + * Will return an input 160MHz chspec as is. + * Will return INVCHANSPEC if the chspec is malformed or less than 160MHz bw. + */ +extern chanspec_t wf_chspec_primary160_chspec(chanspec_t chspec); + +/* + * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels + * + * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1 + */ +extern void wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch); + +/* wf_chanspec_iter_... iterator API is deprecated. Use wlc_clm_chanspec_iter_... API instead */ + +struct wf_iter_range { + uint8 start; + uint8 end; +}; + +/* Internal structure for wf_chanspec_iter_* functions. + * Do not directly access the members. Only use the related + * functions to query and manipulate the structure. + */ +typedef struct chanspec_iter { + uint8 state; + chanspec_t chanspec; + chanspec_band_t band; + chanspec_bw_t bw; + struct wf_iter_range range; + union { + uint8 range_id; + struct { + uint8 ch0; + uint8 ch1; + }; + }; +} wf_chanspec_iter_t; + +/** + * Initialize a chanspec iteration structure. + * The parameters define the set of chanspecs to generate in the iteration. + * After initialization wf_chanspec_iter_current() will return the first chanspec + * in the set. A call to wf_chanspec_iter_next() will advance the interation + * to the next chanspec in the set. + * + * Example use: + * wf_chanspec_iter_t iter; + * chanspec_t chanspec; + * + * wf_chanspec_iter_init(&iter, band, bw); + * + * while (wf_chanspec_iter_next(&iter, &chanspec)) { + * ... do some work ... + * } + * + * @param iter pointer to a wf_chanspec_iter_t structure to initialize + * @param band chanspec_band_t value specifying the band of interest + * @param bw chanspec_bw_t value specifying the bandwidth of interest, + * or INVCHANSPEC to specify all bandwidths + * + * @return a success value, FALSE on error, or TRUE if OK + */ +bool wf_chanspec_iter_init(wf_chanspec_iter_t *iter, chanspec_band_t band, chanspec_bw_t bw); + +/** + * Advance the iteration to the next chanspec in the set. + * + * @param iter pointer to a wf_chanspec_iter_t structure + * @param chspec pointer to storage for the next chanspec. Return value will be INVCHANSPEC + * if the iteration ended. Pass in NULL if return value is not desired. + * + * @return a success value, TRUE if there was another chanspec in the iteration, FALSE if not + */ +bool wf_chanspec_iter_next(wf_chanspec_iter_t *iter, chanspec_t *chspec); + +/** + * Return the current chanspec of the iteration. + * + * @param iter pointer to a wf_chanspec_iter_t structure + * + * @return the current chanspec_t + */ +chanspec_t wf_chanspec_iter_current(wf_chanspec_iter_t *iter); + +/* Populates array with all 20MHz side bands of a given chanspec_t in the following order: + * primary20, ext20, two ext40s, four ext80s. + * 'chspec' is the chanspec of interest + * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec + * + * Works with 20, 40, 80 and 160MHz chspec + */ + +extern void wf_get_all_ext(chanspec_t chspec, uint8 *chan_ptr); + +/* + * Given two chanspecs, returns true if they overlap. + * (Overlap: At least one 20MHz subband is common between the two chanspecs provided) + */ +extern bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1); + +extern uint8 channel_bw_to_width(chanspec_t chspec); + +uint8 wf_chspec_320_id2cch(chanspec_t chanspec); + +uint8 wf_chspec_240_id2cch(chanspec_t chanspec); + +#endif /* _bcmwifi_channels_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_monitor.h b/bcmdhd.101.10.361.x/include/bcmwifi_monitor.h new file mode 100755 index 0000000..41f1f94 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmwifi_monitor.h @@ -0,0 +1,98 @@ +/* + * Monitor Mode routines. + * This header file housing the define and function use by DHD + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ +#ifndef _BCMWIFI_MONITOR_H_ +#define _BCMWIFI_MONITOR_H_ + +#include + +typedef struct monitor_info monitor_info_t; + +typedef struct monitor_pkt_ts { + union { + uint32 ts_low; /* time stamp low 32 bits */ + uint32 reserved; /* If timestamp not used */ + }; + union { + uint32 ts_high; /* time stamp high 28 bits */ + union { + uint32 ts_high_ext :28; /* time stamp high 28 bits */ + uint32 clk_id_ext :3; /* clock ID source */ + uint32 phase :1; /* Phase bit */ + uint32 marker_ext; + }; + }; +} monitor_pkt_ts_t; + +typedef struct monitor_pkt_info { + uint32 marker; + /* timestamp */ + monitor_pkt_ts_t ts; +} monitor_pkt_info_t; + +typedef struct monitor_pkt_rssi { + int8 dBm; /* number of full dBms */ + /* sub-dbm resolution */ + int8 decidBm; /* sub dBms : value after the decimal point */ +} monitor_pkt_rssi_t; + +/* structure to add specific information to rxsts structure + * otherwise non available to all modules like core RSSI and qdbm resolution +*/ + +typedef struct monitor_pkt_rxsts { + wl_rxsts_t *rxsts; + uint8 corenum; /* number of cores/antennas */ + monitor_pkt_rssi_t rxpwr[4]; +} monitor_pkt_rxsts_t; + +#define HE_EXTRACT_FROM_PLCP(plcp, ppdu_type, field) \ + (getbits(plcp, D11_PHY_HDR_LEN, \ + HE_ ## ppdu_type ## _PPDU_ ## field ## _IDX, \ + HE_ ## ppdu_type ## _PPDU_ ## field ## _FSZ)) + +#define HE_PACK_RTAP_FROM_PLCP(plcp, ppdu_type, field) \ + (HE_EXTRACT_FROM_PLCP(plcp, ppdu_type, field) << \ + HE_RADIOTAP_ ## field ## _SHIFT) + +#define HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, ppdu_type, field, member) \ + ((he_plcp2ltf_gi[HE_EXTRACT_FROM_PLCP(plcp, ppdu_type, field)].member) << \ + HE_RADIOTAP_ ## field ## _SHIFT) + +#define HE_PACK_RTAP_FROM_VAL(val, field) \ + ((val) << HE_RADIOTAP_ ## field ## _SHIFT) + +#define HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, field) \ + (HE_PACK_RTAP_FROM_VAL(D11PPDU_ ## field(rxh, corerev, corerev_minor), field)) + +/* channel bandwidth */ +#define WLC_20_MHZ 20 /**< 20Mhz channel bandwidth */ +#define WLC_40_MHZ 40 /**< 40Mhz channel bandwidth */ +#define WLC_80_MHZ 80 /**< 80Mhz channel bandwidth */ +#define WLC_160_MHZ 160 /**< 160Mhz channel bandwidth */ +#define WLC_240_MHZ 240 /**< 240Mhz channel bandwidth */ +#define WLC_320_MHZ 320 /**< 320Mhz channel bandwidth */ + +extern uint16 bcmwifi_monitor_create(monitor_info_t**); +extern void bcmwifi_set_corerev_major(monitor_info_t* info, int8 corerev); +extern void bcmwifi_set_corerev_minor(monitor_info_t* info, int8 corerev); +extern void bcmwifi_monitor_delete(monitor_info_t* info); +extern uint16 bcmwifi_monitor(monitor_info_t* info, + monitor_pkt_info_t* pkt_info, void *pdata, uint16 len, void* pout, + uint16* offset, uint16 pad_req, void *wrxh_in, void *wrxh_last); +extern uint16 wl_rxsts_to_rtap(monitor_pkt_rxsts_t* pkt_rxsts, void *pdata, + uint16 len, void* pout, uint16 pad_req); + +#endif /* _BCMWIFI_MONITOR_H_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h b/bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h new file mode 100755 index 0000000..77fc0e7 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h @@ -0,0 +1,382 @@ +/* + * RadioTap utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _BCMWIFI_RADIOTAP_H_ +#define _BCMWIFI_RADIOTAP_H_ + +#include +#include +#include +#include <802.11.h> +#include <802.11ax.h> +#include "bcmwifi_monitor.h" +#include +#include + +/* This marks the start of a packed structure section. */ +#include +/* + * RadioTap header specific implementation. Used by MacOS implementation only. + */ +BWL_PRE_PACKED_STRUCT struct wl_radiotap_hdr { + struct ieee80211_radiotap_header ieee_radiotap; + uint64 tsft; + uint8 flags; + union { + uint8 rate; + uint8 pad; + } u; + uint16 channel_freq; + uint16 channel_flags; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct wl_radiotap_sna { + uint8 signal; + uint8 noise; + uint8 antenna; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct wl_radiotap_xchan { + uint32 xchannel_flags; + uint16 xchannel_freq; + uint8 xchannel_channel; + uint8 xchannel_maxpower; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct wl_radiotap_ampdu { + uint32 ref_num; + uint16 flags; + uint8 delimiter_crc; + uint8 reserved; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct wl_htmcs { + uint8 mcs_known; + uint8 mcs_flags; + uint8 mcs_index; + uint8 pad; /* pad to 32 bit aligned */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct wl_vhtmcs { + uint16 vht_known; /* IEEE80211_RADIOTAP_VHT */ + uint8 vht_flags; + uint8 vht_bw; + uint8 vht_mcs_nss[4]; + uint8 vht_coding; + uint8 vht_group_id; + uint16 vht_partial_aid; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct wl_radiotap_ht_tail { + struct wl_radiotap_xchan xc; + struct wl_radiotap_ampdu ampdu; + union { + struct wl_htmcs ht; + struct wl_vhtmcs vht; + } u; +} BWL_POST_PACKED_STRUCT; + +typedef struct bsd_header_rx { + struct wl_radiotap_hdr hdr; + /* + * include extra space beyond wl_radiotap_ht size + * (larger of two structs in union): + * signal/noise/ant plus max of 3 pad for xchannel + * tail struct (xchannel and MCS info) + */ + uint8 pad[3]; + uint8 ht[sizeof(struct wl_radiotap_ht_tail)]; +} bsd_header_rx_t; + +typedef struct radiotap_parse { + struct ieee80211_radiotap_header *hdr; + void *fields; + uint fields_len; + uint idx; + uint offset; +} radiotap_parse_t; + +struct rtap_field { + uint len; + uint align; +}; + +/* he radiotap - https://www.radiotap.org/fields/HE.html */ +#define HE_RADIOTAP_BSS_COLOR_SHIFT 0u +#define HE_RADIOTAP_BEAM_CHANGE_SHIFT 6u +#define HE_RADIOTAP_DL_UL_SHIFT 7u +#define HE_RADIOTAP_MCS_SHIFT 8u +#define HE_RADIOTAP_DCM_SHIFT 12u +#define HE_RADIOTAP_CODING_SHIFT 13u +#define HE_RADIOTAP_LDPC_SHIFT 14u +#define HE_RADIOTAP_STBC_SHIFT 15u +#define HE_RADIOTAP_SR_SHIFT 0u +#define HE_RADIOTAP_STAID_SHIFT 4u +#define HE_RADIOTAP_SR1_SHIFT 0u +#define HE_RADIOTAP_SR2_SHIFT 4u +#define HE_RADIOTAP_SR3_SHIFT 8u +#define HE_RADIOTAP_SR4_SHIFT 12u +#define HE_RADIOTAP_BW_SHIFT 0u +#define HE_RADIOTAP_RU_ALLOC_SHIFT 0u +#define HE_RADIOTAP_GI_SHIFT 4u +#define HE_RADIOTAP_LTF_SIZE_SHIFT 6u +#define HE_RADIOTAP_NUM_LTF_SHIFT 8u +#define HE_RADIOTAP_PADDING_SHIFT 12u +#define HE_RADIOTAP_TXBF_SHIFT 14u +#define HE_RADIOTAP_PE_SHIFT 15u +#define HE_RADIOTAP_NSTS_SHIFT 0u +#define HE_RADIOTAP_DOPPLER_SHIFT 4u +#define HE_RADIOTAP_TXOP_SHIFT 8u +#define HE_RADIOTAP_MIDAMBLE_SHIFT 15u +#define HE_RADIOTAP_DOPPLER_SET_NSTS_SHIFT 0u +#define HE_RADIOTAP_DOPPLER_NOTSET_NSTS_SHIFT 0u + +/* he mu radiotap - https://www.radiotap.org/fields/HE-MU.html */ +#define HE_RADIOTAP_SIGB_MCS_SHIFT 0u +#define HE_RADIOTAP_SIGB_MCS_KNOWN_SHIFT 4u +#define HE_RADIOTAP_SIGB_DCM_SHIFT 5u +#define HE_RADIOTAP_SIGB_DCM_KNOWN_SHIFT 6u +#define HE_RADIOTAP_SIGB_COMP_KNOWN_SHIFT 14u +#define HE_RADIOTAP_SIGB_COMP_SHIFT 3u +#define HE_RADIOTAP_SIGB_SYMB_SHIFT 18u +#define HE_RADIOTAP_BW_SIGA_SHIFT 0u +#define HE_RADIOTAP_BW_SIGA_KNOWN_SHIFT 2u +#define HE_RADIOTAP_SIGB_SYM_MU_MIMO_USER_SHIFT 4u +#define HE_RADIOTAP_PRE_PUNCR_SIGA_SHIFT 8u +#define HE_RADIOTAP_PRE_PUNCR_SIGA_KNOWN_SHIFT 10u + +#define WL_RADIOTAP_BRCM_SNS 0x01 +#define WL_RADIOTAP_BRCM_MCS 0x00000001 +#define WL_RADIOTAP_LEGACY_SNS 0x02 +#define WL_RADIOTAP_LEGACY_VHT 0x00000001 +#define WL_RADIOTAP_BRCM_PAD_SNS 0x3 + +#define IEEE80211_RADIOTAP_HTMOD_40 0x01 +#define IEEE80211_RADIOTAP_HTMOD_SGI 0x02 +#define IEEE80211_RADIOTAP_HTMOD_GF 0x04 +#define IEEE80211_RADIOTAP_HTMOD_LDPC 0x08 +#define IEEE80211_RADIOTAP_HTMOD_STBC_MASK 0x30 +#define IEEE80211_RADIOTAP_HTMOD_STBC_SHIFT 4 + +/* Dyanmic bandwidth for VHT signaled in NONHT */ +#define WL_RADIOTAP_F_NONHT_VHT_DYN_BW 0x01 +/* VHT BW is valid in NONHT */ +#define WL_RADIOTAP_F_NONHT_VHT_BW 0x02 + +typedef struct ieee80211_radiotap_header ieee80211_radiotap_header_t; + +/* VHT information in non-HT frames; primarily VHT b/w signaling + * in frames received at legacy rates. + */ +BWL_PRE_PACKED_STRUCT struct wl_radiotap_nonht_vht { + uint8 len; /* length of the field excluding 'len' field */ + uint8 flags; + uint8 bw; + uint8 PAD; /* Add a pad so the next vendor entry, if any, will be 16 bit aligned */ +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_radiotap_nonht_vht wl_radiotap_nonht_vht_t; + +BWL_PRE_PACKED_STRUCT struct wl_radiotap_basic { + uint32 tsft_l; + uint32 tsft_h; + uint8 flags; + uint8 rate; /* this field acts as a pad for non legacy packets */ + uint16 channel_freq; + uint16 channel_flags; + uint8 signal; + uint8 noise; + int8 antenna; +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_radiotap_basic wl_radiotap_basic_t; + +/* radiotap standard - non-HT, non-VHT information with Broadcom vendor namespace extension + * that includes VHT information. + * Used with monitor type 3 when received by HT/Legacy PHY and received rate is legacy. + * Struct ieee80211_radiotap_header is of variable length due to possible + * extra it_present bitmap fields. + * It should not be included as a static length field here + */ +BWL_PRE_PACKED_STRUCT struct wl_radiotap_legacy { + wl_radiotap_basic_t basic; + uint8 PAD; +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_radiotap_legacy wl_radiotap_legacy_t; + +#define WL_RADIOTAP_LEGACY_SKIP_LEN htol16(sizeof(struct wl_radiotap_legacy) - \ + OFFSETOF(struct wl_radiotap_legacy, nonht_vht)) + +#define WL_RADIOTAP_NONHT_VHT_LEN (sizeof(wl_radiotap_nonht_vht_t) - 1) + +/* Radiotap standard that includes HT information. This is for use with monitor type 3 + * whenever frame is received by HT-PHY, and received rate is non-VHT. + * Struct ieee80211_radiotap_header is of variable length due to possible + * extra it_present bitmap fields. + * It should not be included as a static length field here + */ +BWL_PRE_PACKED_STRUCT struct wl_radiotap_ht { + wl_radiotap_basic_t basic; + uint8 PAD[3]; + uint32 xchannel_flags; + uint16 xchannel_freq; + uint8 xchannel_channel; + uint8 xchannel_maxpower; + uint8 mcs_known; + uint8 mcs_flags; + uint8 mcs_index; + uint8 PAD; + uint32 ampdu_ref_num; /* A-MPDU ID */ + uint16 ampdu_flags; /* A-MPDU flags */ + uint8 ampdu_delim_crc; /* Delimiter CRC if present in flags */ + uint8 ampdu_reserved; +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_radiotap_ht wl_radiotap_ht_t; + +/* Radiotap standard that includes VHT information. + * This is for use with monitor type 3 whenever frame is + * received by HT-PHY (VHT-PHY), and received rate is VHT. + * Struct ieee80211_radiotap_header is of variable length due to possible + * extra it_present bitmap fields. + * It should not be included as a static length field here + */ +BWL_PRE_PACKED_STRUCT struct wl_radiotap_vht { + wl_radiotap_basic_t basic; + uint8 PAD[3]; + uint32 ampdu_ref_num; /* A-MPDU ID */ + uint16 ampdu_flags; /* A-MPDU flags */ + uint8 ampdu_delim_crc; /* Delimiter CRC if present in flags */ + uint8 ampdu_reserved; + uint16 vht_known; /* IEEE80211_RADIOTAP_VHT */ + uint8 vht_flags; /* IEEE80211_RADIOTAP_VHT */ + uint8 vht_bw; /* IEEE80211_RADIOTAP_VHT */ + uint8 vht_mcs_nss[4]; /* IEEE80211_RADIOTAP_VHT */ + uint8 vht_coding; /* IEEE80211_RADIOTAP_VHT */ + uint8 vht_group_id; /* IEEE80211_RADIOTAP_VHT */ + uint16 vht_partial_aid; /* IEEE80211_RADIOTAP_VHT */ +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_radiotap_vht wl_radiotap_vht_t; + +/* Radiotap standard that includes HE information. */ +BWL_PRE_PACKED_STRUCT struct wl_radiotap_he { + wl_radiotap_basic_t basic; + uint8 PAD[3]; + uint32 ampdu_ref_num; /* A-MPDU ID */ + uint16 ampdu_flags; /* A-MPDU flags */ + uint8 ampdu_delim_crc; /* Delimiter CRC if present in flags */ + uint8 ampdu_reserved; + uint16 data1; + uint16 data2; + uint16 data3; + uint16 data4; + uint16 data5; + uint16 data6; +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_radiotap_he wl_radiotap_he_t; + +BWL_PRE_PACKED_STRUCT struct radiotap_vendor_ns { + uint8 vend_oui[3]; + uint8 sns; + uint16 skip_len; +} BWL_POST_PACKED_STRUCT; + +typedef struct radiotap_vendor_ns radiotap_vendor_ns_t; + +#define WL_RADIOTAP_PRESENT_BASIC \ + ((1 << IEEE80211_RADIOTAP_TSFT) | \ + (1 << IEEE80211_RADIOTAP_FLAGS) | \ + (1 << IEEE80211_RADIOTAP_CHANNEL) | \ + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \ + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \ + (1 << IEEE80211_RADIOTAP_ANTENNA)) + +#define WL_RADIOTAP_PRESENT_LEGACY \ + WL_RADIOTAP_PRESENT_BASIC | \ + (1 << IEEE80211_RADIOTAP_RATE) + +#define WL_RADIOTAP_PRESENT_HT \ + WL_RADIOTAP_PRESENT_BASIC | \ + ((1 << IEEE80211_RADIOTAP_XCHANNEL) | \ + (1 << IEEE80211_RADIOTAP_MCS) | \ + (1 << IEEE80211_RADIOTAP_AMPDU)) + +#define WL_RADIOTAP_PRESENT_VHT \ + WL_RADIOTAP_PRESENT_BASIC | \ + ((1 << IEEE80211_RADIOTAP_AMPDU) | \ + (1 << IEEE80211_RADIOTAP_VHT)) + +#define WL_RADIOTAP_PRESENT_HE \ + WL_RADIOTAP_PRESENT_BASIC | \ + ((1 << IEEE80211_RADIOTAP_AMPDU) | \ + (1 << IEEE80211_RADIOTAP_HE)) + +/* include/linux/if_arp.h + * #define ARPHRD_IEEE80211_PRISM 802 IEEE 802.11 + Prism2 header + * #define ARPHRD_IEEE80211_RADIOTAP 803 IEEE 802.11 + radiotap header + * include/net/ieee80211_radiotap.h + * radiotap structure + */ + +#ifndef ARPHRD_IEEE80211_RADIOTAP +#define ARPHRD_IEEE80211_RADIOTAP 803 +#endif + +/* This marks the end of a packed structure section. */ +#include + +extern void wl_rtapParseInit(radiotap_parse_t *rtap, uint8 *rtap_header); +extern ratespec_t wl_calcRspecFromRTap(uint8 *rtap_header); +extern bool wl_rtapFlags(uint8 *rtap_header, uint8* flags); +extern uint wl_radiotap_rx(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + bsd_header_rx_t *bsd_header); +extern uint wl_radiotap_rx_legacy(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t* rtap_hdr); +extern uint wl_radiotap_rx_ht(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t* rtap_hdr); +extern uint wl_radiotap_rx_vht(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t* rtap_hdr); +extern uint wl_radiotap_rx_he(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t* rtap_hdr); +extern uint wl_radiotap_rx_eht(struct dot11_header *mac_header, wl_rxsts_t *rxsts, + ieee80211_radiotap_header_t *rtap_hdr); + +/* Legacy phy radiotap header may include VHT bw signaling VS element */ +#define MAX_RADIOTAP_LEGACY_SIZE (sizeof(wl_radiotap_legacy_t) + \ + sizeof(radiotap_vendor_ns_t) + sizeof(wl_radiotap_nonht_vht_t)) + +/* RadioTap header starts with a fixed struct ieee80211_radiotap_header, + * followed by variable fields for the 4 encodings supported, HE, VHT, HT, and Legacy + */ +#define MAX_RADIOTAP_SIZE (sizeof(struct ieee80211_radiotap_header) + \ + MAX(sizeof(wl_radiotap_he_t), \ + MAX(sizeof(wl_radiotap_vht_t), \ + MAX(sizeof(wl_radiotap_ht_t), MAX_RADIOTAP_LEGACY_SIZE)))) +#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE) + +#endif /* _BCMWIFI_RADIOTAP_H_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_rates.h b/bcmdhd.101.10.361.x/include/bcmwifi_rates.h new file mode 100755 index 0000000..9be50eb --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmwifi_rates.h @@ -0,0 +1,1262 @@ +/* + * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmwifi_rates_h_ +#define _bcmwifi_rates_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define WL_RATESET_SZ_DSSS 4 +#define WL_RATESET_SZ_OFDM 8 +#if defined(WLPROPRIETARY_11N_RATES) +#define WL_RATESET_SZ_HT_MCS 10 +#else +#define WL_RATESET_SZ_HT_MCS 8 +#endif +#define WL_RATESET_SZ_VHT_MCS 10 +#define WL_RATESET_SZ_VHT_MCS_P 12 /* 10 VHT rates + 2 proprietary rates */ +#define WL_RATESET_SZ_HE_MCS 12 /* 12 HE rates (mcs 0-11) */ +#define WL_RATESET_SZ_EHT_MCS 14u /* 14 EHT rates (mcs 0-13) */ + +#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */ + +#define WL_TX_CHAINS_MAX 4 + +#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */ + +/* Transmit channel bandwidths */ +typedef enum wl_tx_bw { + WL_TX_BW_20, + WL_TX_BW_40, + WL_TX_BW_80, + WL_TX_BW_20IN40, + WL_TX_BW_20IN80, + WL_TX_BW_40IN80, + WL_TX_BW_160, + WL_TX_BW_20IN160, + WL_TX_BW_40IN160, + WL_TX_BW_80IN160, + WL_TX_BW_240, + WL_TX_BW_20IN240, + WL_TX_BW_40IN240, + WL_TX_BW_80IN240, + WL_TX_BW_160IN240, + WL_TX_BW_320, + WL_TX_BW_20IN320, + WL_TX_BW_40IN320, + WL_TX_BW_80IN320, + WL_TX_BW_160IN320, + WL_TX_BW_ALL +} wl_tx_bw_t; + +/* + * Transmit modes. + * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed + */ +typedef enum wl_tx_mode { + WL_TX_MODE_NONE, + WL_TX_MODE_STBC, + WL_TX_MODE_CDD, + WL_TX_MODE_TXBF, + WL_NUM_TX_MODES +} wl_tx_mode_t; + +/* Number of transmit chains */ +typedef enum wl_tx_chains { + WL_TX_CHAINS_1 = 1, + WL_TX_CHAINS_2, + WL_TX_CHAINS_3, + WL_TX_CHAINS_4 +} wl_tx_chains_t; + +/* Number of transmit streams */ +typedef enum wl_tx_nss { + WL_TX_NSS_1 = 1, + WL_TX_NSS_2, + WL_TX_NSS_3, + WL_TX_NSS_4 +} wl_tx_nss_t; + +/* 802.11ax rate types */ +typedef enum wl_he_rate_type { + WL_HE_RT_SU = 0, + WL_HE_RT_RU26 = 1, + WL_HE_RT_RU52 = 2, + WL_HE_RT_RU106 = 3, + WL_HE_RT_UB = 4, + WL_HE_RT_LUB = 5, + WL_HE_RT_RU242 = 6, + WL_HE_RT_RU484 = 7, + WL_HE_RT_RU996 = 8 +} wl_he_rate_type_t; + +#define WL_NUM_HE_RT 9u + +/* This enum maps each rate to a CLM index + * 802.11ax OFDMA (RU) rates are in separate enum + */ + +typedef enum clm_rates { + /************ + * 1 chain * + ************ + */ + + /* 1 Stream */ + WL_RATE_1X1_DSSS_1 = 0, + WL_RATE_1X1_DSSS_2 = 1, + WL_RATE_1X1_DSSS_5_5 = 2, + WL_RATE_1X1_DSSS_11 = 3, + + WL_RATE_1X1_OFDM_6 = 4, + WL_RATE_1X1_OFDM_9 = 5, + WL_RATE_1X1_OFDM_12 = 6, + WL_RATE_1X1_OFDM_18 = 7, + WL_RATE_1X1_OFDM_24 = 8, + WL_RATE_1X1_OFDM_36 = 9, + WL_RATE_1X1_OFDM_48 = 10, + WL_RATE_1X1_OFDM_54 = 11, + + WL_RATE_1X1_MCS0 = 12, + WL_RATE_1X1_MCS1 = 13, + WL_RATE_1X1_MCS2 = 14, + WL_RATE_1X1_MCS3 = 15, + WL_RATE_1X1_MCS4 = 16, + WL_RATE_1X1_MCS5 = 17, + WL_RATE_1X1_MCS6 = 18, + WL_RATE_1X1_MCS7 = 19, + WL_RATE_P_1X1_MCS87 = 20, + WL_RATE_P_1X1_MCS88 = 21, + + WL_RATE_1X1_VHT0SS1 = 12, + WL_RATE_1X1_VHT1SS1 = 13, + WL_RATE_1X1_VHT2SS1 = 14, + WL_RATE_1X1_VHT3SS1 = 15, + WL_RATE_1X1_VHT4SS1 = 16, + WL_RATE_1X1_VHT5SS1 = 17, + WL_RATE_1X1_VHT6SS1 = 18, + WL_RATE_1X1_VHT7SS1 = 19, + WL_RATE_1X1_VHT8SS1 = 20, + WL_RATE_1X1_VHT9SS1 = 21, + WL_RATE_P_1X1_VHT10SS1 = 22, + WL_RATE_P_1X1_VHT11SS1 = 23, + + WL_RATE_1X1_HE0SS1 = 24, + WL_RATE_1X1_HE1SS1 = 25, + WL_RATE_1X1_HE2SS1 = 26, + WL_RATE_1X1_HE3SS1 = 27, + WL_RATE_1X1_HE4SS1 = 28, + WL_RATE_1X1_HE5SS1 = 29, + WL_RATE_1X1_HE6SS1 = 30, + WL_RATE_1X1_HE7SS1 = 31, + WL_RATE_1X1_HE8SS1 = 32, + WL_RATE_1X1_HE9SS1 = 33, + WL_RATE_1X1_HE10SS1 = 34, + WL_RATE_1X1_HE11SS1 = 35, + + /************ + * 2 chains * + ************ + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_DSSS_1 = 36, + WL_RATE_1X2_DSSS_2 = 37, + WL_RATE_1X2_DSSS_5_5 = 38, + WL_RATE_1X2_DSSS_11 = 39, + + WL_RATE_1X2_CDD_OFDM_6 = 40, + WL_RATE_1X2_CDD_OFDM_9 = 41, + WL_RATE_1X2_CDD_OFDM_12 = 42, + WL_RATE_1X2_CDD_OFDM_18 = 43, + WL_RATE_1X2_CDD_OFDM_24 = 44, + WL_RATE_1X2_CDD_OFDM_36 = 45, + WL_RATE_1X2_CDD_OFDM_48 = 46, + WL_RATE_1X2_CDD_OFDM_54 = 47, + + WL_RATE_1X2_CDD_MCS0 = 48, + WL_RATE_1X2_CDD_MCS1 = 49, + WL_RATE_1X2_CDD_MCS2 = 50, + WL_RATE_1X2_CDD_MCS3 = 51, + WL_RATE_1X2_CDD_MCS4 = 52, + WL_RATE_1X2_CDD_MCS5 = 53, + WL_RATE_1X2_CDD_MCS6 = 54, + WL_RATE_1X2_CDD_MCS7 = 55, + WL_RATE_P_1X2_CDD_MCS87 = 56, + WL_RATE_P_1X2_CDD_MCS88 = 57, + + WL_RATE_1X2_VHT0SS1 = 48, + WL_RATE_1X2_VHT1SS1 = 49, + WL_RATE_1X2_VHT2SS1 = 50, + WL_RATE_1X2_VHT3SS1 = 51, + WL_RATE_1X2_VHT4SS1 = 52, + WL_RATE_1X2_VHT5SS1 = 53, + WL_RATE_1X2_VHT6SS1 = 54, + WL_RATE_1X2_VHT7SS1 = 55, + WL_RATE_1X2_VHT8SS1 = 56, + WL_RATE_1X2_VHT9SS1 = 57, + WL_RATE_P_1X2_VHT10SS1 = 58, + WL_RATE_P_1X2_VHT11SS1 = 59, + + WL_RATE_1X2_HE0SS1 = 60, + WL_RATE_1X2_HE1SS1 = 61, + WL_RATE_1X2_HE2SS1 = 62, + WL_RATE_1X2_HE3SS1 = 63, + WL_RATE_1X2_HE4SS1 = 64, + WL_RATE_1X2_HE5SS1 = 65, + WL_RATE_1X2_HE6SS1 = 66, + WL_RATE_1X2_HE7SS1 = 67, + WL_RATE_1X2_HE8SS1 = 68, + WL_RATE_1X2_HE9SS1 = 69, + WL_RATE_1X2_HE10SS1 = 70, + WL_RATE_1X2_HE11SS1 = 71, + + /* 2 Streams */ + WL_RATE_2X2_STBC_MCS0 = 72, + WL_RATE_2X2_STBC_MCS1 = 73, + WL_RATE_2X2_STBC_MCS2 = 74, + WL_RATE_2X2_STBC_MCS3 = 75, + WL_RATE_2X2_STBC_MCS4 = 76, + WL_RATE_2X2_STBC_MCS5 = 77, + WL_RATE_2X2_STBC_MCS6 = 78, + WL_RATE_2X2_STBC_MCS7 = 79, + WL_RATE_P_2X2_STBC_MCS87 = 80, + WL_RATE_P_2X2_STBC_MCS88 = 81, + + WL_RATE_2X2_STBC_VHT0SS1 = 72, + WL_RATE_2X2_STBC_VHT1SS1 = 73, + WL_RATE_2X2_STBC_VHT2SS1 = 74, + WL_RATE_2X2_STBC_VHT3SS1 = 75, + WL_RATE_2X2_STBC_VHT4SS1 = 76, + WL_RATE_2X2_STBC_VHT5SS1 = 77, + WL_RATE_2X2_STBC_VHT6SS1 = 78, + WL_RATE_2X2_STBC_VHT7SS1 = 79, + WL_RATE_2X2_STBC_VHT8SS1 = 80, + WL_RATE_2X2_STBC_VHT9SS1 = 81, + WL_RATE_P_2X2_STBC_VHT10SS1 = 82, + WL_RATE_P_2X2_STBC_VHT11SS1 = 83, + + WL_RATE_2X2_SDM_MCS8 = 84, + WL_RATE_2X2_SDM_MCS9 = 85, + WL_RATE_2X2_SDM_MCS10 = 86, + WL_RATE_2X2_SDM_MCS11 = 87, + WL_RATE_2X2_SDM_MCS12 = 88, + WL_RATE_2X2_SDM_MCS13 = 89, + WL_RATE_2X2_SDM_MCS14 = 90, + WL_RATE_2X2_SDM_MCS15 = 91, + WL_RATE_P_2X2_SDM_MCS99 = 92, + WL_RATE_P_2X2_SDM_MCS100 = 93, + + WL_RATE_2X2_VHT0SS2 = 84, + WL_RATE_2X2_VHT1SS2 = 85, + WL_RATE_2X2_VHT2SS2 = 86, + WL_RATE_2X2_VHT3SS2 = 87, + WL_RATE_2X2_VHT4SS2 = 88, + WL_RATE_2X2_VHT5SS2 = 89, + WL_RATE_2X2_VHT6SS2 = 90, + WL_RATE_2X2_VHT7SS2 = 91, + WL_RATE_2X2_VHT8SS2 = 92, + WL_RATE_2X2_VHT9SS2 = 93, + WL_RATE_P_2X2_VHT10SS2 = 94, + WL_RATE_P_2X2_VHT11SS2 = 95, + + WL_RATE_2X2_HE0SS2 = 96, + WL_RATE_2X2_HE1SS2 = 97, + WL_RATE_2X2_HE2SS2 = 98, + WL_RATE_2X2_HE3SS2 = 99, + WL_RATE_2X2_HE4SS2 = 100, + WL_RATE_2X2_HE5SS2 = 101, + WL_RATE_2X2_HE6SS2 = 102, + WL_RATE_2X2_HE7SS2 = 103, + WL_RATE_2X2_HE8SS2 = 104, + WL_RATE_2X2_HE9SS2 = 105, + WL_RATE_2X2_HE10SS2 = 106, + WL_RATE_2X2_HE11SS2 = 107, + + /**************************** + * TX Beamforming, 2 chains * + **************************** + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_TXBF_OFDM_6 = 108, + WL_RATE_1X2_TXBF_OFDM_9 = 109, + WL_RATE_1X2_TXBF_OFDM_12 = 110, + WL_RATE_1X2_TXBF_OFDM_18 = 111, + WL_RATE_1X2_TXBF_OFDM_24 = 112, + WL_RATE_1X2_TXBF_OFDM_36 = 113, + WL_RATE_1X2_TXBF_OFDM_48 = 114, + WL_RATE_1X2_TXBF_OFDM_54 = 115, + + WL_RATE_1X2_TXBF_MCS0 = 116, + WL_RATE_1X2_TXBF_MCS1 = 117, + WL_RATE_1X2_TXBF_MCS2 = 118, + WL_RATE_1X2_TXBF_MCS3 = 119, + WL_RATE_1X2_TXBF_MCS4 = 120, + WL_RATE_1X2_TXBF_MCS5 = 121, + WL_RATE_1X2_TXBF_MCS6 = 122, + WL_RATE_1X2_TXBF_MCS7 = 123, + WL_RATE_P_1X2_TXBF_MCS87 = 124, + WL_RATE_P_1X2_TXBF_MCS88 = 125, + + WL_RATE_1X2_TXBF_VHT0SS1 = 116, + WL_RATE_1X2_TXBF_VHT1SS1 = 117, + WL_RATE_1X2_TXBF_VHT2SS1 = 118, + WL_RATE_1X2_TXBF_VHT3SS1 = 119, + WL_RATE_1X2_TXBF_VHT4SS1 = 120, + WL_RATE_1X2_TXBF_VHT5SS1 = 121, + WL_RATE_1X2_TXBF_VHT6SS1 = 122, + WL_RATE_1X2_TXBF_VHT7SS1 = 123, + WL_RATE_1X2_TXBF_VHT8SS1 = 124, + WL_RATE_1X2_TXBF_VHT9SS1 = 125, + WL_RATE_P_1X2_TXBF_VHT10SS1 = 126, + WL_RATE_P_1X2_TXBF_VHT11SS1 = 127, + + WL_RATE_1X2_TXBF_HE0SS1 = 128, + WL_RATE_1X2_TXBF_HE1SS1 = 129, + WL_RATE_1X2_TXBF_HE2SS1 = 130, + WL_RATE_1X2_TXBF_HE3SS1 = 131, + WL_RATE_1X2_TXBF_HE4SS1 = 132, + WL_RATE_1X2_TXBF_HE5SS1 = 133, + WL_RATE_1X2_TXBF_HE6SS1 = 134, + WL_RATE_1X2_TXBF_HE7SS1 = 135, + WL_RATE_1X2_TXBF_HE8SS1 = 136, + WL_RATE_1X2_TXBF_HE9SS1 = 137, + WL_RATE_1X2_TXBF_HE10SS1 = 138, + WL_RATE_1X2_TXBF_HE11SS1 = 139, + + /* 2 Streams */ + WL_RATE_2X2_TXBF_SDM_MCS8 = 140, + WL_RATE_2X2_TXBF_SDM_MCS9 = 141, + WL_RATE_2X2_TXBF_SDM_MCS10 = 142, + WL_RATE_2X2_TXBF_SDM_MCS11 = 143, + WL_RATE_2X2_TXBF_SDM_MCS12 = 144, + WL_RATE_2X2_TXBF_SDM_MCS13 = 145, + WL_RATE_2X2_TXBF_SDM_MCS14 = 146, + WL_RATE_2X2_TXBF_SDM_MCS15 = 147, + WL_RATE_P_2X2_TXBF_SDM_MCS99 = 148, + WL_RATE_P_2X2_TXBF_SDM_MCS100 = 149, + + WL_RATE_2X2_TXBF_VHT0SS2 = 140, + WL_RATE_2X2_TXBF_VHT1SS2 = 141, + WL_RATE_2X2_TXBF_VHT2SS2 = 142, + WL_RATE_2X2_TXBF_VHT3SS2 = 143, + WL_RATE_2X2_TXBF_VHT4SS2 = 144, + WL_RATE_2X2_TXBF_VHT5SS2 = 145, + WL_RATE_2X2_TXBF_VHT6SS2 = 146, + WL_RATE_2X2_TXBF_VHT7SS2 = 147, + WL_RATE_2X2_TXBF_VHT8SS2 = 148, + WL_RATE_2X2_TXBF_VHT9SS2 = 149, + WL_RATE_P_2X2_TXBF_VHT10SS2 = 150, + WL_RATE_P_2X2_TXBF_VHT11SS2 = 151, + + WL_RATE_2X2_TXBF_HE0SS2 = 152, + WL_RATE_2X2_TXBF_HE1SS2 = 153, + WL_RATE_2X2_TXBF_HE2SS2 = 154, + WL_RATE_2X2_TXBF_HE3SS2 = 155, + WL_RATE_2X2_TXBF_HE4SS2 = 156, + WL_RATE_2X2_TXBF_HE5SS2 = 157, + WL_RATE_2X2_TXBF_HE6SS2 = 158, + WL_RATE_2X2_TXBF_HE7SS2 = 159, + WL_RATE_2X2_TXBF_HE8SS2 = 160, + WL_RATE_2X2_TXBF_HE9SS2 = 161, + WL_RATE_2X2_TXBF_HE10SS2 = 162, + WL_RATE_2X2_TXBF_HE11SS2 = 163, + + /************ + * 3 chains * + ************ + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_DSSS_1 = 164, + WL_RATE_1X3_DSSS_2 = 165, + WL_RATE_1X3_DSSS_5_5 = 166, + WL_RATE_1X3_DSSS_11 = 167, + + WL_RATE_1X3_CDD_OFDM_6 = 168, + WL_RATE_1X3_CDD_OFDM_9 = 169, + WL_RATE_1X3_CDD_OFDM_12 = 170, + WL_RATE_1X3_CDD_OFDM_18 = 171, + WL_RATE_1X3_CDD_OFDM_24 = 172, + WL_RATE_1X3_CDD_OFDM_36 = 173, + WL_RATE_1X3_CDD_OFDM_48 = 174, + WL_RATE_1X3_CDD_OFDM_54 = 175, + + WL_RATE_1X3_CDD_MCS0 = 176, + WL_RATE_1X3_CDD_MCS1 = 177, + WL_RATE_1X3_CDD_MCS2 = 178, + WL_RATE_1X3_CDD_MCS3 = 179, + WL_RATE_1X3_CDD_MCS4 = 180, + WL_RATE_1X3_CDD_MCS5 = 181, + WL_RATE_1X3_CDD_MCS6 = 182, + WL_RATE_1X3_CDD_MCS7 = 183, + WL_RATE_P_1X3_CDD_MCS87 = 184, + WL_RATE_P_1X3_CDD_MCS88 = 185, + + WL_RATE_1X3_VHT0SS1 = 176, + WL_RATE_1X3_VHT1SS1 = 177, + WL_RATE_1X3_VHT2SS1 = 178, + WL_RATE_1X3_VHT3SS1 = 179, + WL_RATE_1X3_VHT4SS1 = 180, + WL_RATE_1X3_VHT5SS1 = 181, + WL_RATE_1X3_VHT6SS1 = 182, + WL_RATE_1X3_VHT7SS1 = 183, + WL_RATE_1X3_VHT8SS1 = 184, + WL_RATE_1X3_VHT9SS1 = 185, + WL_RATE_P_1X3_VHT10SS1 = 186, + WL_RATE_P_1X3_VHT11SS1 = 187, + + WL_RATE_1X3_HE0SS1 = 188, + WL_RATE_1X3_HE1SS1 = 189, + WL_RATE_1X3_HE2SS1 = 190, + WL_RATE_1X3_HE3SS1 = 191, + WL_RATE_1X3_HE4SS1 = 192, + WL_RATE_1X3_HE5SS1 = 193, + WL_RATE_1X3_HE6SS1 = 194, + WL_RATE_1X3_HE7SS1 = 195, + WL_RATE_1X3_HE8SS1 = 196, + WL_RATE_1X3_HE9SS1 = 197, + WL_RATE_1X3_HE10SS1 = 198, + WL_RATE_1X3_HE11SS1 = 199, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_STBC_MCS0 = 200, + WL_RATE_2X3_STBC_MCS1 = 201, + WL_RATE_2X3_STBC_MCS2 = 202, + WL_RATE_2X3_STBC_MCS3 = 203, + WL_RATE_2X3_STBC_MCS4 = 204, + WL_RATE_2X3_STBC_MCS5 = 205, + WL_RATE_2X3_STBC_MCS6 = 206, + WL_RATE_2X3_STBC_MCS7 = 207, + WL_RATE_P_2X3_STBC_MCS87 = 208, + WL_RATE_P_2X3_STBC_MCS88 = 209, + + WL_RATE_2X3_STBC_VHT0SS1 = 200, + WL_RATE_2X3_STBC_VHT1SS1 = 201, + WL_RATE_2X3_STBC_VHT2SS1 = 202, + WL_RATE_2X3_STBC_VHT3SS1 = 203, + WL_RATE_2X3_STBC_VHT4SS1 = 204, + WL_RATE_2X3_STBC_VHT5SS1 = 205, + WL_RATE_2X3_STBC_VHT6SS1 = 206, + WL_RATE_2X3_STBC_VHT7SS1 = 207, + WL_RATE_2X3_STBC_VHT8SS1 = 208, + WL_RATE_2X3_STBC_VHT9SS1 = 209, + WL_RATE_P_2X3_STBC_VHT10SS1 = 210, + WL_RATE_P_2X3_STBC_VHT11SS1 = 211, + + WL_RATE_2X3_SDM_MCS8 = 212, + WL_RATE_2X3_SDM_MCS9 = 213, + WL_RATE_2X3_SDM_MCS10 = 214, + WL_RATE_2X3_SDM_MCS11 = 215, + WL_RATE_2X3_SDM_MCS12 = 216, + WL_RATE_2X3_SDM_MCS13 = 217, + WL_RATE_2X3_SDM_MCS14 = 218, + WL_RATE_2X3_SDM_MCS15 = 219, + WL_RATE_P_2X3_SDM_MCS99 = 220, + WL_RATE_P_2X3_SDM_MCS100 = 221, + + WL_RATE_2X3_VHT0SS2 = 212, + WL_RATE_2X3_VHT1SS2 = 213, + WL_RATE_2X3_VHT2SS2 = 214, + WL_RATE_2X3_VHT3SS2 = 215, + WL_RATE_2X3_VHT4SS2 = 216, + WL_RATE_2X3_VHT5SS2 = 217, + WL_RATE_2X3_VHT6SS2 = 218, + WL_RATE_2X3_VHT7SS2 = 219, + WL_RATE_2X3_VHT8SS2 = 220, + WL_RATE_2X3_VHT9SS2 = 221, + WL_RATE_P_2X3_VHT10SS2 = 222, + WL_RATE_P_2X3_VHT11SS2 = 223, + + WL_RATE_2X3_HE0SS2 = 224, + WL_RATE_2X3_HE1SS2 = 225, + WL_RATE_2X3_HE2SS2 = 226, + WL_RATE_2X3_HE3SS2 = 227, + WL_RATE_2X3_HE4SS2 = 228, + WL_RATE_2X3_HE5SS2 = 229, + WL_RATE_2X3_HE6SS2 = 230, + WL_RATE_2X3_HE7SS2 = 231, + WL_RATE_2X3_HE8SS2 = 232, + WL_RATE_2X3_HE9SS2 = 233, + WL_RATE_2X3_HE10SS2 = 234, + WL_RATE_2X3_HE11SS2 = 235, + + /* 3 Streams */ + WL_RATE_3X3_SDM_MCS16 = 236, + WL_RATE_3X3_SDM_MCS17 = 237, + WL_RATE_3X3_SDM_MCS18 = 238, + WL_RATE_3X3_SDM_MCS19 = 239, + WL_RATE_3X3_SDM_MCS20 = 240, + WL_RATE_3X3_SDM_MCS21 = 241, + WL_RATE_3X3_SDM_MCS22 = 242, + WL_RATE_3X3_SDM_MCS23 = 243, + WL_RATE_P_3X3_SDM_MCS101 = 244, + WL_RATE_P_3X3_SDM_MCS102 = 245, + + WL_RATE_3X3_VHT0SS3 = 236, + WL_RATE_3X3_VHT1SS3 = 237, + WL_RATE_3X3_VHT2SS3 = 238, + WL_RATE_3X3_VHT3SS3 = 239, + WL_RATE_3X3_VHT4SS3 = 240, + WL_RATE_3X3_VHT5SS3 = 241, + WL_RATE_3X3_VHT6SS3 = 242, + WL_RATE_3X3_VHT7SS3 = 243, + WL_RATE_3X3_VHT8SS3 = 244, + WL_RATE_3X3_VHT9SS3 = 245, + WL_RATE_P_3X3_VHT10SS3 = 246, + WL_RATE_P_3X3_VHT11SS3 = 247, + + WL_RATE_3X3_HE0SS3 = 248, + WL_RATE_3X3_HE1SS3 = 249, + WL_RATE_3X3_HE2SS3 = 250, + WL_RATE_3X3_HE3SS3 = 251, + WL_RATE_3X3_HE4SS3 = 252, + WL_RATE_3X3_HE5SS3 = 253, + WL_RATE_3X3_HE6SS3 = 254, + WL_RATE_3X3_HE7SS3 = 255, + WL_RATE_3X3_HE8SS3 = 256, + WL_RATE_3X3_HE9SS3 = 257, + WL_RATE_3X3_HE10SS3 = 258, + WL_RATE_3X3_HE11SS3 = 259, + + /**************************** + * TX Beamforming, 3 chains * + **************************** + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_TXBF_OFDM_6 = 260, + WL_RATE_1X3_TXBF_OFDM_9 = 261, + WL_RATE_1X3_TXBF_OFDM_12 = 262, + WL_RATE_1X3_TXBF_OFDM_18 = 263, + WL_RATE_1X3_TXBF_OFDM_24 = 264, + WL_RATE_1X3_TXBF_OFDM_36 = 265, + WL_RATE_1X3_TXBF_OFDM_48 = 266, + WL_RATE_1X3_TXBF_OFDM_54 = 267, + + WL_RATE_1X3_TXBF_MCS0 = 268, + WL_RATE_1X3_TXBF_MCS1 = 269, + WL_RATE_1X3_TXBF_MCS2 = 270, + WL_RATE_1X3_TXBF_MCS3 = 271, + WL_RATE_1X3_TXBF_MCS4 = 272, + WL_RATE_1X3_TXBF_MCS5 = 273, + WL_RATE_1X3_TXBF_MCS6 = 274, + WL_RATE_1X3_TXBF_MCS7 = 275, + WL_RATE_P_1X3_TXBF_MCS87 = 276, + WL_RATE_P_1X3_TXBF_MCS88 = 277, + + WL_RATE_1X3_TXBF_VHT0SS1 = 268, + WL_RATE_1X3_TXBF_VHT1SS1 = 269, + WL_RATE_1X3_TXBF_VHT2SS1 = 270, + WL_RATE_1X3_TXBF_VHT3SS1 = 271, + WL_RATE_1X3_TXBF_VHT4SS1 = 272, + WL_RATE_1X3_TXBF_VHT5SS1 = 273, + WL_RATE_1X3_TXBF_VHT6SS1 = 274, + WL_RATE_1X3_TXBF_VHT7SS1 = 275, + WL_RATE_1X3_TXBF_VHT8SS1 = 276, + WL_RATE_1X3_TXBF_VHT9SS1 = 277, + WL_RATE_P_1X3_TXBF_VHT10SS1 = 278, + WL_RATE_P_1X3_TXBF_VHT11SS1 = 279, + + WL_RATE_1X3_TXBF_HE0SS1 = 280, + WL_RATE_1X3_TXBF_HE1SS1 = 281, + WL_RATE_1X3_TXBF_HE2SS1 = 282, + WL_RATE_1X3_TXBF_HE3SS1 = 283, + WL_RATE_1X3_TXBF_HE4SS1 = 284, + WL_RATE_1X3_TXBF_HE5SS1 = 285, + WL_RATE_1X3_TXBF_HE6SS1 = 286, + WL_RATE_1X3_TXBF_HE7SS1 = 287, + WL_RATE_1X3_TXBF_HE8SS1 = 288, + WL_RATE_1X3_TXBF_HE9SS1 = 289, + WL_RATE_1X3_TXBF_HE10SS1 = 290, + WL_RATE_1X3_TXBF_HE11SS1 = 291, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_TXBF_SDM_MCS8 = 292, + WL_RATE_2X3_TXBF_SDM_MCS9 = 293, + WL_RATE_2X3_TXBF_SDM_MCS10 = 294, + WL_RATE_2X3_TXBF_SDM_MCS11 = 295, + WL_RATE_2X3_TXBF_SDM_MCS12 = 296, + WL_RATE_2X3_TXBF_SDM_MCS13 = 297, + WL_RATE_2X3_TXBF_SDM_MCS14 = 298, + WL_RATE_2X3_TXBF_SDM_MCS15 = 299, + WL_RATE_P_2X3_TXBF_SDM_MCS99 = 300, + WL_RATE_P_2X3_TXBF_SDM_MCS100 = 301, + + WL_RATE_2X3_TXBF_VHT0SS2 = 292, + WL_RATE_2X3_TXBF_VHT1SS2 = 293, + WL_RATE_2X3_TXBF_VHT2SS2 = 294, + WL_RATE_2X3_TXBF_VHT3SS2 = 295, + WL_RATE_2X3_TXBF_VHT4SS2 = 296, + WL_RATE_2X3_TXBF_VHT5SS2 = 297, + WL_RATE_2X3_TXBF_VHT6SS2 = 298, + WL_RATE_2X3_TXBF_VHT7SS2 = 299, + WL_RATE_2X3_TXBF_VHT8SS2 = 300, + WL_RATE_2X3_TXBF_VHT9SS2 = 301, + WL_RATE_P_2X3_TXBF_VHT10SS2 = 302, + WL_RATE_P_2X3_TXBF_VHT11SS2 = 303, + + WL_RATE_2X3_TXBF_HE0SS2 = 304, + WL_RATE_2X3_TXBF_HE1SS2 = 305, + WL_RATE_2X3_TXBF_HE2SS2 = 306, + WL_RATE_2X3_TXBF_HE3SS2 = 307, + WL_RATE_2X3_TXBF_HE4SS2 = 308, + WL_RATE_2X3_TXBF_HE5SS2 = 309, + WL_RATE_2X3_TXBF_HE6SS2 = 310, + WL_RATE_2X3_TXBF_HE7SS2 = 311, + WL_RATE_2X3_TXBF_HE8SS2 = 312, + WL_RATE_2X3_TXBF_HE9SS2 = 313, + WL_RATE_2X3_TXBF_HE10SS2 = 314, + WL_RATE_2X3_TXBF_HE11SS2 = 315, + + /* 3 Streams */ + WL_RATE_3X3_TXBF_SDM_MCS16 = 316, + WL_RATE_3X3_TXBF_SDM_MCS17 = 317, + WL_RATE_3X3_TXBF_SDM_MCS18 = 318, + WL_RATE_3X3_TXBF_SDM_MCS19 = 319, + WL_RATE_3X3_TXBF_SDM_MCS20 = 320, + WL_RATE_3X3_TXBF_SDM_MCS21 = 321, + WL_RATE_3X3_TXBF_SDM_MCS22 = 322, + WL_RATE_3X3_TXBF_SDM_MCS23 = 323, + WL_RATE_P_3X3_TXBF_SDM_MCS101 = 324, + WL_RATE_P_3X3_TXBF_SDM_MCS102 = 325, + + WL_RATE_3X3_TXBF_VHT0SS3 = 316, + WL_RATE_3X3_TXBF_VHT1SS3 = 317, + WL_RATE_3X3_TXBF_VHT2SS3 = 318, + WL_RATE_3X3_TXBF_VHT3SS3 = 319, + WL_RATE_3X3_TXBF_VHT4SS3 = 320, + WL_RATE_3X3_TXBF_VHT5SS3 = 321, + WL_RATE_3X3_TXBF_VHT6SS3 = 322, + WL_RATE_3X3_TXBF_VHT7SS3 = 323, + WL_RATE_3X3_TXBF_VHT8SS3 = 324, + WL_RATE_3X3_TXBF_VHT9SS3 = 325, + WL_RATE_P_3X3_TXBF_VHT10SS3 = 326, + WL_RATE_P_3X3_TXBF_VHT11SS3 = 327, + + WL_RATE_3X3_TXBF_HE0SS3 = 328, + WL_RATE_3X3_TXBF_HE1SS3 = 329, + WL_RATE_3X3_TXBF_HE2SS3 = 330, + WL_RATE_3X3_TXBF_HE3SS3 = 331, + WL_RATE_3X3_TXBF_HE4SS3 = 332, + WL_RATE_3X3_TXBF_HE5SS3 = 333, + WL_RATE_3X3_TXBF_HE6SS3 = 334, + WL_RATE_3X3_TXBF_HE7SS3 = 335, + WL_RATE_3X3_TXBF_HE8SS3 = 336, + WL_RATE_3X3_TXBF_HE9SS3 = 337, + WL_RATE_3X3_TXBF_HE10SS3 = 338, + WL_RATE_3X3_TXBF_HE11SS3 = 339, + + /************ + * 4 chains * + ************ + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_DSSS_1 = 340, + WL_RATE_1X4_DSSS_2 = 341, + WL_RATE_1X4_DSSS_5_5 = 342, + WL_RATE_1X4_DSSS_11 = 343, + + WL_RATE_1X4_CDD_OFDM_6 = 344, + WL_RATE_1X4_CDD_OFDM_9 = 345, + WL_RATE_1X4_CDD_OFDM_12 = 346, + WL_RATE_1X4_CDD_OFDM_18 = 347, + WL_RATE_1X4_CDD_OFDM_24 = 348, + WL_RATE_1X4_CDD_OFDM_36 = 349, + WL_RATE_1X4_CDD_OFDM_48 = 350, + WL_RATE_1X4_CDD_OFDM_54 = 351, + + WL_RATE_1X4_CDD_MCS0 = 352, + WL_RATE_1X4_CDD_MCS1 = 353, + WL_RATE_1X4_CDD_MCS2 = 354, + WL_RATE_1X4_CDD_MCS3 = 355, + WL_RATE_1X4_CDD_MCS4 = 356, + WL_RATE_1X4_CDD_MCS5 = 357, + WL_RATE_1X4_CDD_MCS6 = 358, + WL_RATE_1X4_CDD_MCS7 = 359, + WL_RATE_P_1X4_CDD_MCS87 = 360, + WL_RATE_P_1X4_CDD_MCS88 = 361, + + WL_RATE_1X4_VHT0SS1 = 352, + WL_RATE_1X4_VHT1SS1 = 353, + WL_RATE_1X4_VHT2SS1 = 354, + WL_RATE_1X4_VHT3SS1 = 355, + WL_RATE_1X4_VHT4SS1 = 356, + WL_RATE_1X4_VHT5SS1 = 357, + WL_RATE_1X4_VHT6SS1 = 358, + WL_RATE_1X4_VHT7SS1 = 359, + WL_RATE_1X4_VHT8SS1 = 360, + WL_RATE_1X4_VHT9SS1 = 361, + WL_RATE_P_1X4_VHT10SS1 = 362, + WL_RATE_P_1X4_VHT11SS1 = 363, + + WL_RATE_1X4_HE0SS1 = 364, + WL_RATE_1X4_HE1SS1 = 365, + WL_RATE_1X4_HE2SS1 = 366, + WL_RATE_1X4_HE3SS1 = 367, + WL_RATE_1X4_HE4SS1 = 368, + WL_RATE_1X4_HE5SS1 = 369, + WL_RATE_1X4_HE6SS1 = 370, + WL_RATE_1X4_HE7SS1 = 371, + WL_RATE_1X4_HE8SS1 = 372, + WL_RATE_1X4_HE9SS1 = 373, + WL_RATE_1X4_HE10SS1 = 374, + WL_RATE_1X4_HE11SS1 = 375, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_STBC_MCS0 = 376, + WL_RATE_2X4_STBC_MCS1 = 377, + WL_RATE_2X4_STBC_MCS2 = 378, + WL_RATE_2X4_STBC_MCS3 = 379, + WL_RATE_2X4_STBC_MCS4 = 380, + WL_RATE_2X4_STBC_MCS5 = 381, + WL_RATE_2X4_STBC_MCS6 = 382, + WL_RATE_2X4_STBC_MCS7 = 383, + WL_RATE_P_2X4_STBC_MCS87 = 384, + WL_RATE_P_2X4_STBC_MCS88 = 385, + + WL_RATE_2X4_STBC_VHT0SS1 = 376, + WL_RATE_2X4_STBC_VHT1SS1 = 377, + WL_RATE_2X4_STBC_VHT2SS1 = 378, + WL_RATE_2X4_STBC_VHT3SS1 = 379, + WL_RATE_2X4_STBC_VHT4SS1 = 380, + WL_RATE_2X4_STBC_VHT5SS1 = 381, + WL_RATE_2X4_STBC_VHT6SS1 = 382, + WL_RATE_2X4_STBC_VHT7SS1 = 383, + WL_RATE_2X4_STBC_VHT8SS1 = 384, + WL_RATE_2X4_STBC_VHT9SS1 = 385, + WL_RATE_P_2X4_STBC_VHT10SS1 = 386, + WL_RATE_P_2X4_STBC_VHT11SS1 = 387, + + WL_RATE_2X4_SDM_MCS8 = 388, + WL_RATE_2X4_SDM_MCS9 = 389, + WL_RATE_2X4_SDM_MCS10 = 390, + WL_RATE_2X4_SDM_MCS11 = 391, + WL_RATE_2X4_SDM_MCS12 = 392, + WL_RATE_2X4_SDM_MCS13 = 393, + WL_RATE_2X4_SDM_MCS14 = 394, + WL_RATE_2X4_SDM_MCS15 = 395, + WL_RATE_P_2X4_SDM_MCS99 = 396, + WL_RATE_P_2X4_SDM_MCS100 = 397, + + WL_RATE_2X4_VHT0SS2 = 388, + WL_RATE_2X4_VHT1SS2 = 389, + WL_RATE_2X4_VHT2SS2 = 390, + WL_RATE_2X4_VHT3SS2 = 391, + WL_RATE_2X4_VHT4SS2 = 392, + WL_RATE_2X4_VHT5SS2 = 393, + WL_RATE_2X4_VHT6SS2 = 394, + WL_RATE_2X4_VHT7SS2 = 395, + WL_RATE_2X4_VHT8SS2 = 396, + WL_RATE_2X4_VHT9SS2 = 397, + WL_RATE_P_2X4_VHT10SS2 = 398, + WL_RATE_P_2X4_VHT11SS2 = 399, + + WL_RATE_2X4_HE0SS2 = 400, + WL_RATE_2X4_HE1SS2 = 401, + WL_RATE_2X4_HE2SS2 = 402, + WL_RATE_2X4_HE3SS2 = 403, + WL_RATE_2X4_HE4SS2 = 404, + WL_RATE_2X4_HE5SS2 = 405, + WL_RATE_2X4_HE6SS2 = 406, + WL_RATE_2X4_HE7SS2 = 407, + WL_RATE_2X4_HE8SS2 = 408, + WL_RATE_2X4_HE9SS2 = 409, + WL_RATE_2X4_HE10SS2 = 410, + WL_RATE_2X4_HE11SS2 = 411, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_SDM_MCS16 = 412, + WL_RATE_3X4_SDM_MCS17 = 413, + WL_RATE_3X4_SDM_MCS18 = 414, + WL_RATE_3X4_SDM_MCS19 = 415, + WL_RATE_3X4_SDM_MCS20 = 416, + WL_RATE_3X4_SDM_MCS21 = 417, + WL_RATE_3X4_SDM_MCS22 = 418, + WL_RATE_3X4_SDM_MCS23 = 419, + WL_RATE_P_3X4_SDM_MCS101 = 420, + WL_RATE_P_3X4_SDM_MCS102 = 421, + + WL_RATE_3X4_VHT0SS3 = 412, + WL_RATE_3X4_VHT1SS3 = 413, + WL_RATE_3X4_VHT2SS3 = 414, + WL_RATE_3X4_VHT3SS3 = 415, + WL_RATE_3X4_VHT4SS3 = 416, + WL_RATE_3X4_VHT5SS3 = 417, + WL_RATE_3X4_VHT6SS3 = 418, + WL_RATE_3X4_VHT7SS3 = 419, + WL_RATE_3X4_VHT8SS3 = 420, + WL_RATE_3X4_VHT9SS3 = 421, + WL_RATE_P_3X4_VHT10SS3 = 422, + WL_RATE_P_3X4_VHT11SS3 = 423, + + WL_RATE_3X4_HE0SS3 = 424, + WL_RATE_3X4_HE1SS3 = 425, + WL_RATE_3X4_HE2SS3 = 426, + WL_RATE_3X4_HE3SS3 = 427, + WL_RATE_3X4_HE4SS3 = 428, + WL_RATE_3X4_HE5SS3 = 429, + WL_RATE_3X4_HE6SS3 = 430, + WL_RATE_3X4_HE7SS3 = 431, + WL_RATE_3X4_HE8SS3 = 432, + WL_RATE_3X4_HE9SS3 = 433, + WL_RATE_3X4_HE10SS3 = 434, + WL_RATE_3X4_HE11SS3 = 435, + + /* 4 Streams */ + WL_RATE_4X4_SDM_MCS24 = 436, + WL_RATE_4X4_SDM_MCS25 = 437, + WL_RATE_4X4_SDM_MCS26 = 438, + WL_RATE_4X4_SDM_MCS27 = 439, + WL_RATE_4X4_SDM_MCS28 = 440, + WL_RATE_4X4_SDM_MCS29 = 441, + WL_RATE_4X4_SDM_MCS30 = 442, + WL_RATE_4X4_SDM_MCS31 = 443, + WL_RATE_P_4X4_SDM_MCS103 = 444, + WL_RATE_P_4X4_SDM_MCS104 = 445, + + WL_RATE_4X4_VHT0SS4 = 436, + WL_RATE_4X4_VHT1SS4 = 437, + WL_RATE_4X4_VHT2SS4 = 438, + WL_RATE_4X4_VHT3SS4 = 439, + WL_RATE_4X4_VHT4SS4 = 440, + WL_RATE_4X4_VHT5SS4 = 441, + WL_RATE_4X4_VHT6SS4 = 442, + WL_RATE_4X4_VHT7SS4 = 443, + WL_RATE_4X4_VHT8SS4 = 444, + WL_RATE_4X4_VHT9SS4 = 445, + WL_RATE_P_4X4_VHT10SS4 = 446, + WL_RATE_P_4X4_VHT11SS4 = 447, + + WL_RATE_4X4_HE0SS4 = 448, + WL_RATE_4X4_HE1SS4 = 449, + WL_RATE_4X4_HE2SS4 = 450, + WL_RATE_4X4_HE3SS4 = 451, + WL_RATE_4X4_HE4SS4 = 452, + WL_RATE_4X4_HE5SS4 = 453, + WL_RATE_4X4_HE6SS4 = 454, + WL_RATE_4X4_HE7SS4 = 455, + WL_RATE_4X4_HE8SS4 = 456, + WL_RATE_4X4_HE9SS4 = 457, + WL_RATE_4X4_HE10SS4 = 458, + WL_RATE_4X4_HE11SS4 = 459, + + /**************************** + * TX Beamforming, 4 chains * + **************************** + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_TXBF_OFDM_6 = 460, + WL_RATE_1X4_TXBF_OFDM_9 = 461, + WL_RATE_1X4_TXBF_OFDM_12 = 462, + WL_RATE_1X4_TXBF_OFDM_18 = 463, + WL_RATE_1X4_TXBF_OFDM_24 = 464, + WL_RATE_1X4_TXBF_OFDM_36 = 465, + WL_RATE_1X4_TXBF_OFDM_48 = 466, + WL_RATE_1X4_TXBF_OFDM_54 = 467, + + WL_RATE_1X4_TXBF_MCS0 = 468, + WL_RATE_1X4_TXBF_MCS1 = 469, + WL_RATE_1X4_TXBF_MCS2 = 470, + WL_RATE_1X4_TXBF_MCS3 = 471, + WL_RATE_1X4_TXBF_MCS4 = 472, + WL_RATE_1X4_TXBF_MCS5 = 473, + WL_RATE_1X4_TXBF_MCS6 = 474, + WL_RATE_1X4_TXBF_MCS7 = 475, + WL_RATE_P_1X4_TXBF_MCS87 = 476, + WL_RATE_P_1X4_TXBF_MCS88 = 477, + + WL_RATE_1X4_TXBF_VHT0SS1 = 468, + WL_RATE_1X4_TXBF_VHT1SS1 = 469, + WL_RATE_1X4_TXBF_VHT2SS1 = 470, + WL_RATE_1X4_TXBF_VHT3SS1 = 471, + WL_RATE_1X4_TXBF_VHT4SS1 = 472, + WL_RATE_1X4_TXBF_VHT5SS1 = 473, + WL_RATE_1X4_TXBF_VHT6SS1 = 474, + WL_RATE_1X4_TXBF_VHT7SS1 = 475, + WL_RATE_1X4_TXBF_VHT8SS1 = 476, + WL_RATE_1X4_TXBF_VHT9SS1 = 477, + WL_RATE_P_1X4_TXBF_VHT10SS1 = 478, + WL_RATE_P_1X4_TXBF_VHT11SS1 = 479, + + WL_RATE_1X4_TXBF_HE0SS1 = 480, + WL_RATE_1X4_TXBF_HE1SS1 = 481, + WL_RATE_1X4_TXBF_HE2SS1 = 482, + WL_RATE_1X4_TXBF_HE3SS1 = 483, + WL_RATE_1X4_TXBF_HE4SS1 = 484, + WL_RATE_1X4_TXBF_HE5SS1 = 485, + WL_RATE_1X4_TXBF_HE6SS1 = 486, + WL_RATE_1X4_TXBF_HE7SS1 = 487, + WL_RATE_1X4_TXBF_HE8SS1 = 488, + WL_RATE_1X4_TXBF_HE9SS1 = 489, + WL_RATE_1X4_TXBF_HE10SS1 = 490, + WL_RATE_1X4_TXBF_HE11SS1 = 491, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_TXBF_SDM_MCS8 = 492, + WL_RATE_2X4_TXBF_SDM_MCS9 = 493, + WL_RATE_2X4_TXBF_SDM_MCS10 = 494, + WL_RATE_2X4_TXBF_SDM_MCS11 = 495, + WL_RATE_2X4_TXBF_SDM_MCS12 = 496, + WL_RATE_2X4_TXBF_SDM_MCS13 = 497, + WL_RATE_2X4_TXBF_SDM_MCS14 = 498, + WL_RATE_2X4_TXBF_SDM_MCS15 = 499, + WL_RATE_P_2X4_TXBF_SDM_MCS99 = 500, + WL_RATE_P_2X4_TXBF_SDM_MCS100 = 501, + + WL_RATE_2X4_TXBF_VHT0SS2 = 492, + WL_RATE_2X4_TXBF_VHT1SS2 = 493, + WL_RATE_2X4_TXBF_VHT2SS2 = 494, + WL_RATE_2X4_TXBF_VHT3SS2 = 495, + WL_RATE_2X4_TXBF_VHT4SS2 = 496, + WL_RATE_2X4_TXBF_VHT5SS2 = 497, + WL_RATE_2X4_TXBF_VHT6SS2 = 498, + WL_RATE_2X4_TXBF_VHT7SS2 = 499, + WL_RATE_2X4_TXBF_VHT8SS2 = 500, + WL_RATE_2X4_TXBF_VHT9SS2 = 501, + WL_RATE_P_2X4_TXBF_VHT10SS2 = 502, + WL_RATE_P_2X4_TXBF_VHT11SS2 = 503, + + WL_RATE_2X4_TXBF_HE0SS2 = 504, + WL_RATE_2X4_TXBF_HE1SS2 = 505, + WL_RATE_2X4_TXBF_HE2SS2 = 506, + WL_RATE_2X4_TXBF_HE3SS2 = 507, + WL_RATE_2X4_TXBF_HE4SS2 = 508, + WL_RATE_2X4_TXBF_HE5SS2 = 509, + WL_RATE_2X4_TXBF_HE6SS2 = 510, + WL_RATE_2X4_TXBF_HE7SS2 = 511, + WL_RATE_2X4_TXBF_HE8SS2 = 512, + WL_RATE_2X4_TXBF_HE9SS2 = 513, + WL_RATE_2X4_TXBF_HE10SS2 = 514, + WL_RATE_2X4_TXBF_HE11SS2 = 515, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_TXBF_SDM_MCS16 = 516, + WL_RATE_3X4_TXBF_SDM_MCS17 = 517, + WL_RATE_3X4_TXBF_SDM_MCS18 = 518, + WL_RATE_3X4_TXBF_SDM_MCS19 = 519, + WL_RATE_3X4_TXBF_SDM_MCS20 = 520, + WL_RATE_3X4_TXBF_SDM_MCS21 = 521, + WL_RATE_3X4_TXBF_SDM_MCS22 = 522, + WL_RATE_3X4_TXBF_SDM_MCS23 = 523, + WL_RATE_P_3X4_TXBF_SDM_MCS101 = 524, + WL_RATE_P_3X4_TXBF_SDM_MCS102 = 525, + + WL_RATE_3X4_TXBF_VHT0SS3 = 516, + WL_RATE_3X4_TXBF_VHT1SS3 = 517, + WL_RATE_3X4_TXBF_VHT2SS3 = 518, + WL_RATE_3X4_TXBF_VHT3SS3 = 519, + WL_RATE_3X4_TXBF_VHT4SS3 = 520, + WL_RATE_3X4_TXBF_VHT5SS3 = 521, + WL_RATE_3X4_TXBF_VHT6SS3 = 522, + WL_RATE_3X4_TXBF_VHT7SS3 = 523, + WL_RATE_P_3X4_TXBF_VHT8SS3 = 524, + WL_RATE_P_3X4_TXBF_VHT9SS3 = 525, + WL_RATE_P_3X4_TXBF_VHT10SS3 = 526, + WL_RATE_P_3X4_TXBF_VHT11SS3 = 527, + + WL_RATE_3X4_TXBF_HE0SS3 = 528, + WL_RATE_3X4_TXBF_HE1SS3 = 529, + WL_RATE_3X4_TXBF_HE2SS3 = 530, + WL_RATE_3X4_TXBF_HE3SS3 = 531, + WL_RATE_3X4_TXBF_HE4SS3 = 532, + WL_RATE_3X4_TXBF_HE5SS3 = 533, + WL_RATE_3X4_TXBF_HE6SS3 = 534, + WL_RATE_3X4_TXBF_HE7SS3 = 535, + WL_RATE_3X4_TXBF_HE8SS3 = 536, + WL_RATE_3X4_TXBF_HE9SS3 = 537, + WL_RATE_3X4_TXBF_HE10SS3 = 538, + WL_RATE_3X4_TXBF_HE11SS3 = 539, + + /* 4 Streams */ + WL_RATE_4X4_TXBF_SDM_MCS24 = 540, + WL_RATE_4X4_TXBF_SDM_MCS25 = 541, + WL_RATE_4X4_TXBF_SDM_MCS26 = 542, + WL_RATE_4X4_TXBF_SDM_MCS27 = 543, + WL_RATE_4X4_TXBF_SDM_MCS28 = 544, + WL_RATE_4X4_TXBF_SDM_MCS29 = 545, + WL_RATE_4X4_TXBF_SDM_MCS30 = 546, + WL_RATE_4X4_TXBF_SDM_MCS31 = 547, + WL_RATE_P_4X4_TXBF_SDM_MCS103 = 548, + WL_RATE_P_4X4_TXBF_SDM_MCS104 = 549, + + WL_RATE_4X4_TXBF_VHT0SS4 = 540, + WL_RATE_4X4_TXBF_VHT1SS4 = 541, + WL_RATE_4X4_TXBF_VHT2SS4 = 542, + WL_RATE_4X4_TXBF_VHT3SS4 = 543, + WL_RATE_4X4_TXBF_VHT4SS4 = 544, + WL_RATE_4X4_TXBF_VHT5SS4 = 545, + WL_RATE_4X4_TXBF_VHT6SS4 = 546, + WL_RATE_4X4_TXBF_VHT7SS4 = 547, + WL_RATE_P_4X4_TXBF_VHT8SS4 = 548, + WL_RATE_P_4X4_TXBF_VHT9SS4 = 549, + WL_RATE_P_4X4_TXBF_VHT10SS4 = 550, + WL_RATE_P_4X4_TXBF_VHT11SS4 = 551, + + WL_RATE_4X4_TXBF_HE0SS4 = 552, + WL_RATE_4X4_TXBF_HE1SS4 = 553, + WL_RATE_4X4_TXBF_HE2SS4 = 554, + WL_RATE_4X4_TXBF_HE3SS4 = 555, + WL_RATE_4X4_TXBF_HE4SS4 = 556, + WL_RATE_4X4_TXBF_HE5SS4 = 557, + WL_RATE_4X4_TXBF_HE6SS4 = 558, + WL_RATE_4X4_TXBF_HE7SS4 = 559, + WL_RATE_4X4_TXBF_HE8SS4 = 560, + WL_RATE_4X4_TXBF_HE9SS4 = 561, + WL_RATE_4X4_TXBF_HE10SS4 = 562, + WL_RATE_4X4_TXBF_HE11SS4 = 563 +} clm_rates_t; + +/* Number of rate codes */ +#define WL_NUMRATES 564 + +/* This enum maps 802.11ax OFDMA (RU) 'rates' to a CLM index */ + +typedef enum clm_ru_rates { + /* RU26 OFDMA UL rates */ + WL_RU_RATE_1X1_26SS1 = 0, + WL_RU_RATE_1X2_26SS1 = 1, + WL_RU_RATE_2X2_26SS2 = 2, + WL_RU_RATE_1X2_TXBF_26SS1 = 3, + WL_RU_RATE_2X2_TXBF_26SS2 = 4, + WL_RU_RATE_1X3_26SS1 = 5, + WL_RU_RATE_2X3_26SS2 = 6, + WL_RU_RATE_3X3_26SS3 = 7, + WL_RU_RATE_1X3_TXBF_26SS1 = 8, + WL_RU_RATE_2X3_TXBF_26SS2 = 9, + WL_RU_RATE_3X3_TXBF_26SS3 = 10, + WL_RU_RATE_1X4_26SS1 = 11, + WL_RU_RATE_2X4_26SS2 = 12, + WL_RU_RATE_3X4_26SS3 = 13, + WL_RU_RATE_4X4_26SS4 = 14, + WL_RU_RATE_1X4_TXBF_26SS1 = 15, + WL_RU_RATE_2X4_TXBF_26SS2 = 16, + WL_RU_RATE_3X4_TXBF_26SS3 = 17, + WL_RU_RATE_4X4_TXBF_26SS4 = 18, + + /* RU52 OFDMA UL rates */ + WL_RU_RATE_1X1_52SS1 = 19, + WL_RU_RATE_1X2_52SS1 = 20, + WL_RU_RATE_2X2_52SS2 = 21, + WL_RU_RATE_1X2_TXBF_52SS1 = 22, + WL_RU_RATE_2X2_TXBF_52SS2 = 23, + WL_RU_RATE_1X3_52SS1 = 24, + WL_RU_RATE_2X3_52SS2 = 25, + WL_RU_RATE_3X3_52SS3 = 26, + WL_RU_RATE_1X3_TXBF_52SS1 = 27, + WL_RU_RATE_2X3_TXBF_52SS2 = 28, + WL_RU_RATE_3X3_TXBF_52SS3 = 29, + WL_RU_RATE_1X4_52SS1 = 30, + WL_RU_RATE_2X4_52SS2 = 31, + WL_RU_RATE_3X4_52SS3 = 32, + WL_RU_RATE_4X4_52SS4 = 33, + WL_RU_RATE_1X4_TXBF_52SS1 = 34, + WL_RU_RATE_2X4_TXBF_52SS2 = 35, + WL_RU_RATE_3X4_TXBF_52SS3 = 36, + WL_RU_RATE_4X4_TXBF_52SS4 = 37, + + /* RU106 OFDMA UL rates */ + WL_RU_RATE_1X1_106SS1 = 38, + WL_RU_RATE_1X2_106SS1 = 39, + WL_RU_RATE_2X2_106SS2 = 40, + WL_RU_RATE_1X2_TXBF_106SS1 = 41, + WL_RU_RATE_2X2_TXBF_106SS2 = 42, + WL_RU_RATE_1X3_106SS1 = 43, + WL_RU_RATE_2X3_106SS2 = 44, + WL_RU_RATE_3X3_106SS3 = 45, + WL_RU_RATE_1X3_TXBF_106SS1 = 46, + WL_RU_RATE_2X3_TXBF_106SS2 = 47, + WL_RU_RATE_3X3_TXBF_106SS3 = 48, + WL_RU_RATE_1X4_106SS1 = 49, + WL_RU_RATE_2X4_106SS2 = 50, + WL_RU_RATE_3X4_106SS3 = 51, + WL_RU_RATE_4X4_106SS4 = 52, + WL_RU_RATE_1X4_TXBF_106SS1 = 53, + WL_RU_RATE_2X4_TXBF_106SS2 = 54, + WL_RU_RATE_3X4_TXBF_106SS3 = 55, + WL_RU_RATE_4X4_TXBF_106SS4 = 56, + + /* Upper Bound OFDMA DL 'rates' */ + WL_RU_RATE_1X1_UBSS1 = 57, + WL_RU_RATE_1X2_UBSS1 = 58, + WL_RU_RATE_2X2_UBSS2 = 59, + WL_RU_RATE_1X2_TXBF_UBSS1 = 60, + WL_RU_RATE_2X2_TXBF_UBSS2 = 61, + WL_RU_RATE_1X3_UBSS1 = 62, + WL_RU_RATE_2X3_UBSS2 = 63, + WL_RU_RATE_3X3_UBSS3 = 64, + WL_RU_RATE_1X3_TXBF_UBSS1 = 65, + WL_RU_RATE_2X3_TXBF_UBSS2 = 66, + WL_RU_RATE_3X3_TXBF_UBSS3 = 67, + WL_RU_RATE_1X4_UBSS1 = 68, + WL_RU_RATE_2X4_UBSS2 = 69, + WL_RU_RATE_3X4_UBSS3 = 70, + WL_RU_RATE_4X4_UBSS4 = 71, + WL_RU_RATE_1X4_TXBF_UBSS1 = 72, + WL_RU_RATE_2X4_TXBF_UBSS2 = 73, + WL_RU_RATE_3X4_TXBF_UBSS3 = 74, + WL_RU_RATE_4X4_TXBF_UBSS4 = 75, + + /* Less Upper Bound OFDMA DL 'rates' */ + WL_RU_RATE_1X1_LUBSS1 = 76, + WL_RU_RATE_1X2_LUBSS1 = 77, + WL_RU_RATE_2X2_LUBSS2 = 78, + WL_RU_RATE_1X2_TXBF_LUBSS1 = 79, + WL_RU_RATE_2X2_TXBF_LUBSS2 = 80, + WL_RU_RATE_1X3_LUBSS1 = 81, + WL_RU_RATE_2X3_LUBSS2 = 82, + WL_RU_RATE_3X3_LUBSS3 = 83, + WL_RU_RATE_1X3_TXBF_LUBSS1 = 84, + WL_RU_RATE_2X3_TXBF_LUBSS2 = 85, + WL_RU_RATE_3X3_TXBF_LUBSS3 = 86, + WL_RU_RATE_1X4_LUBSS1 = 87, + WL_RU_RATE_2X4_LUBSS2 = 88, + WL_RU_RATE_3X4_LUBSS3 = 89, + WL_RU_RATE_4X4_LUBSS4 = 90, + WL_RU_RATE_1X4_TXBF_LUBSS1 = 91, + WL_RU_RATE_2X4_TXBF_LUBSS2 = 92, + WL_RU_RATE_3X4_TXBF_LUBSS3 = 93, + WL_RU_RATE_4X4_TXBF_LUBSS4 = 94, + + /* RU242 OFDMA UL rates */ + WL_RU_RATE_1X1_242SS1 = 95, + WL_RU_RATE_1X2_242SS1 = 96, + WL_RU_RATE_2X2_242SS2 = 97, + WL_RU_RATE_1X2_TXBF_242SS1 = 98, + WL_RU_RATE_2X2_TXBF_242SS2 = 99, + WL_RU_RATE_1X3_242SS1 = 100, + WL_RU_RATE_2X3_242SS2 = 101, + WL_RU_RATE_3X3_242SS3 = 102, + WL_RU_RATE_1X3_TXBF_242SS1 = 103, + WL_RU_RATE_2X3_TXBF_242SS2 = 104, + WL_RU_RATE_3X3_TXBF_242SS3 = 105, + WL_RU_RATE_1X4_242SS1 = 106, + WL_RU_RATE_2X4_242SS2 = 107, + WL_RU_RATE_3X4_242SS3 = 108, + WL_RU_RATE_4X4_242SS4 = 109, + WL_RU_RATE_1X4_TXBF_242SS1 = 110, + WL_RU_RATE_2X4_TXBF_242SS2 = 111, + WL_RU_RATE_3X4_TXBF_242SS3 = 112, + WL_RU_RATE_4X4_TXBF_242SS4 = 113, + + /* RU484 OFDMA UL rates */ + WL_RU_RATE_1X1_484SS1 = 114, + WL_RU_RATE_1X2_484SS1 = 115, + WL_RU_RATE_2X2_484SS2 = 116, + WL_RU_RATE_1X2_TXBF_484SS1 = 117, + WL_RU_RATE_2X2_TXBF_484SS2 = 118, + WL_RU_RATE_1X3_484SS1 = 119, + WL_RU_RATE_2X3_484SS2 = 120, + WL_RU_RATE_3X3_484SS3 = 121, + WL_RU_RATE_1X3_TXBF_484SS1 = 122, + WL_RU_RATE_2X3_TXBF_484SS2 = 123, + WL_RU_RATE_3X3_TXBF_484SS3 = 124, + WL_RU_RATE_1X4_484SS1 = 125, + WL_RU_RATE_2X4_484SS2 = 126, + WL_RU_RATE_3X4_484SS3 = 127, + WL_RU_RATE_4X4_484SS4 = 128, + WL_RU_RATE_1X4_TXBF_484SS1 = 129, + WL_RU_RATE_2X4_TXBF_484SS2 = 130, + WL_RU_RATE_3X4_TXBF_484SS3 = 131, + WL_RU_RATE_4X4_TXBF_484SS4 = 132, + + /* RU996 OFDMA UL rates */ + WL_RU_RATE_1X1_996SS1 = 133, + WL_RU_RATE_1X2_996SS1 = 134, + WL_RU_RATE_2X2_996SS2 = 135, + WL_RU_RATE_1X2_TXBF_996SS1 = 136, + WL_RU_RATE_2X2_TXBF_996SS2 = 137, + WL_RU_RATE_1X3_996SS1 = 138, + WL_RU_RATE_2X3_996SS2 = 139, + WL_RU_RATE_3X3_996SS3 = 140, + WL_RU_RATE_1X3_TXBF_996SS1 = 141, + WL_RU_RATE_2X3_TXBF_996SS2 = 142, + WL_RU_RATE_3X3_TXBF_996SS3 = 143, + WL_RU_RATE_1X4_996SS1 = 144, + WL_RU_RATE_2X4_996SS2 = 145, + WL_RU_RATE_3X4_996SS3 = 146, + WL_RU_RATE_4X4_996SS4 = 147, + WL_RU_RATE_1X4_TXBF_996SS1 = 148, + WL_RU_RATE_2X4_TXBF_996SS2 = 149, + WL_RU_RATE_3X4_TXBF_996SS3 = 150, + WL_RU_RATE_4X4_TXBF_996SS4 = 151 +} clm_ru_rates_t; + +/* Number of OFDMA rate codes */ +#define WL_RU_NUMRATES 152 + +/* MCS rates */ +#define WLC_MAX_VHT_MCS 11 /**< Std VHT MCS 0-9 plus prop VHT MCS 10-11 */ +#define WLC_MAX_HE_MCS 11 /**< Std HE MCS 0-11 */ +#define WLC_MAX_EHT_MCS 13 /**< Std EHT MCS 0-13 */ + +/* Convert encoded rate value in plcp header to numerical rates in 500 KHz increments */ +#define OFDM_PHY2MAC_RATE(rlpt) plcp_ofdm_rate_tbl[(rlpt) & 0x7] +#define CCK_PHY2MAC_RATE(signal) ((signal)/5) + +/* 'proprietary' string should not exist in open source(OEM_ANDROID) */ +/* given a proprietary MCS, get number of spatial streams */ +#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8) + +#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) : \ + ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs))) + +#if defined(WLPROPRIETARY_11N_RATES) /* Broadcom proprietary rate support for 11n */ +#define IS_PROPRIETARY_11N_MCS(mcs) \ + ((mcs) == 87 || (mcs) == 88 || (mcs) == 99 || (mcs) == 100 || (mcs) == 101 || (mcs) == 102) +#define IS_PROPRIETARY_11N_SS_MCS(mcs) \ + ((mcs) == 87 || (mcs) == 88) +#else +#define IS_PROPRIETARY_11N_MCS(mcs) FALSE +#define IS_PROPRIETARY_11N_SS_MCS(mcs) FALSE /**< is proprietary HT single stream MCS */ +#endif /* WLPROPRIETARY_11N_RATES */ + +extern const uint8 plcp_ofdm_rate_tbl[]; + +uint8 wf_get_single_stream_mcs(uint mcs); + +/* extract NSS:MCS portions of the rspec */ +#define WF_NON_HT_MCS 0x80 +uint8 wf_vht_plcp_to_rate(uint8 *plcp); +uint8 wf_he_plcp_to_rate(uint8 *plcp, bool is_mu); +uint8 wf_eht_plcp_to_rate(uint8 *plcp, bool is_mu); + +/* convert rate from mcs to Kbps */ +uint wf_mcs_to_rate(uint mcs, uint nss, uint bw, int sgi); +uint wf_he_mcs_to_rate(uint mcs, uint nss, uint bw, uint gi, bool dcm); + +uint wf_mcs_to_Ndbps(uint mcs, uint nss, uint bw); +uint wf_he_mcs_to_Ndbps(uint mcs, uint nss, uint bw, bool dcm); +uint32 wf_he_mcs_ru_to_ndbps(uint8 mcs, uint8 nss, bool dcm, uint8 ru_index); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _bcmwifi_rates_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_rspec.h b/bcmdhd.101.10.361.x/include/bcmwifi_rspec.h new file mode 100755 index 0000000..a90f3a3 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmwifi_rspec.h @@ -0,0 +1,286 @@ +/* + * Common OS-independent driver header for rate management. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _bcmwifi_rspec_h_ +#define _bcmwifi_rspec_h_ + +#include + +/** + * =================================================================================== + * rate spec : holds rate and mode specific information required to generate a tx frame. + * Legacy CCK and OFDM information is held in the same manner as was done in the past. + * (in the lower byte) the upper 3 bytes primarily hold MIMO specific information + * =================================================================================== + */ +typedef uint32 ratespec_t; + +/* Rate spec. definitions */ +/* for WL_RSPEC_ENCODING field >= WL_RSPEC_ENCODING_HE, backward compatible */ +#define WL_RSPEC_RATE_MASK 0x000000FFu /**< Legacy rate or MCS or MCS + NSS */ +#define WL_RSPEC_TXEXP_MASK 0x00000300u /**< Tx chain expansion beyond Nsts */ +#define WL_RSPEC_TXEXP_SHIFT 8u +#define WL_RSPEC_HE_GI_MASK 0x00000C00u /* HE GI indices */ +#define WL_RSPEC_HE_GI_SHIFT 10u +#define WL_RSPEC_ER_MASK 0x0000C000u /**< Range extension mask */ +#define WL_RSPEC_ER_SHIFT 14u +#define WL_RSPEC_ER_TONE_MASK 0x00004000u /**< Range extension tone config */ +#define WL_RSPEC_ER_TONE_SHIFT 14u +#define WL_RSPEC_ER_ENAB_MASK 0x00008000u /**< Range extension enable */ +#define WL_RSPEC_ER_ENAB_SHIFT 15u +#define WL_RSPEC_BW_MASK 0x00070000u /**< Band width */ +#define WL_RSPEC_BW_SHIFT 16u +#define WL_RSPEC_DCM 0x00080000u /**< Dual Carrier Modulation */ +#define WL_RSPEC_DCM_SHIFT 19u +#define WL_RSPEC_STBC 0x00100000u /**< STBC expansion, Nsts = 2 * Nss */ +#define WL_RSPEC_TXBF 0x00200000u +#define WL_RSPEC_LDPC 0x00400000u +#define WL_RSPEC_SGI 0x00800000u +#define WL_RSPEC_SHORT_PREAMBLE 0x00800000u /**< DSSS short preable - Encoding 0 */ +#ifdef WL11BE +#define WL_RSPEC_ENCODING_MASK 0x07000000u /**< Encoding of RSPEC_RATE field */ +#else +#define WL_RSPEC_ENCODING_MASK 0x03000000u /**< Encoding of RSPEC_RATE field */ +#endif +#define WL_RSPEC_ENCODING_SHIFT 24u +#define WL_RSPEC_OVERRIDE_RATE 0x40000000u /**< override rate only */ +#define WL_RSPEC_OVERRIDE_MODE 0x80000000u /**< override both rate & mode */ + +/* ======== RSPEC_HE_GI|RSPEC_SGI fields for HE ======== */ + +/* GI for HE */ +#define RSPEC_HE_LTF_GI(rspec) (((rspec) & WL_RSPEC_HE_GI_MASK) >> WL_RSPEC_HE_GI_SHIFT) +#define WL_RSPEC_HE_1x_LTF_GI_0_8us (0x0u) +#define WL_RSPEC_HE_2x_LTF_GI_0_8us (0x1u) +#define WL_RSPEC_HE_2x_LTF_GI_1_6us (0x2u) +#define WL_RSPEC_HE_4x_LTF_GI_3_2us (0x3u) +#define RSPEC_ISHEGI(rspec) (RSPEC_HE_LTF_GI(rspec) > WL_RSPEC_HE_1x_LTF_GI_0_8us) +#define HE_GI_TO_RSPEC(gi) (((ratespec_t)(gi) << WL_RSPEC_HE_GI_SHIFT) & WL_RSPEC_HE_GI_MASK) +#define HE_GI_TO_RSPEC_SET(rspec, gi) ((rspec & (~WL_RSPEC_HE_GI_MASK)) | \ + HE_GI_TO_RSPEC(gi)) + +/* Macros for HE LTF and GI */ +#define HE_IS_1X_LTF(gi) ((gi) == WL_RSPEC_HE_1x_LTF_GI_0_8us) +#define HE_IS_2X_LTF(gi) (((gi) == WL_RSPEC_HE_2x_LTF_GI_0_8us) || \ + ((gi) == WL_RSPEC_HE_2x_LTF_GI_1_6us)) +#define HE_IS_4X_LTF(gi) ((gi) == WL_RSPEC_HE_4x_LTF_GI_3_2us) + +#define HE_IS_GI_0_8us(gi) (((gi) == WL_RSPEC_HE_1x_LTF_GI_0_8us) || \ + ((gi) == WL_RSPEC_HE_2x_LTF_GI_0_8us)) +#define HE_IS_GI_1_6us(gi) ((gi) == WL_RSPEC_HE_2x_LTF_GI_1_6us) +#define HE_IS_GI_3_2us(gi) ((gi) == WL_RSPEC_HE_4x_LTF_GI_3_2us) + +/* RSPEC Macros for extracting and using HE-ER and DCM */ +#define RSPEC_HE_DCM(rspec) (((rspec) & WL_RSPEC_DCM) >> WL_RSPEC_DCM_SHIFT) +#define RSPEC_HE_ER(rspec) (((rspec) & WL_RSPEC_ER_MASK) >> WL_RSPEC_ER_SHIFT) +#ifdef WL11AX +#define RSPEC_HE_ER_ENAB(rspec) (((rspec) & WL_RSPEC_ER_ENAB_MASK) >> \ + WL_RSPEC_ER_ENAB_SHIFT) +#else +#define RSPEC_HE_ER_ENAB(rspec) FALSE +#endif +#define RSPEC_HE_ER_TONE(rspec) (((rspec) & WL_RSPEC_ER_TONE_MASK) >> \ + WL_RSPEC_ER_TONE_SHIFT) +/* ======== RSPEC_RATE field ======== */ + +/* Encoding 0 - legacy rate */ +/* DSSS, CCK, and OFDM rates in [500kbps] units */ +#define WL_RSPEC_LEGACY_RATE_MASK 0x0000007F +#define WLC_RATE_1M 2 +#define WLC_RATE_2M 4 +#define WLC_RATE_5M5 11 +#define WLC_RATE_11M 22 +#define WLC_RATE_6M 12 +#define WLC_RATE_9M 18 +#define WLC_RATE_12M 24 +#define WLC_RATE_18M 36 +#define WLC_RATE_24M 48 +#define WLC_RATE_36M 72 +#define WLC_RATE_48M 96 +#define WLC_RATE_54M 108 + +/* Encoding 1 - HT MCS */ +#define WL_RSPEC_HT_MCS_MASK 0x0000007F /**< HT MCS value mask in rspec */ + +/* Encoding >= 2 */ +#define WL_RSPEC_NSS_MCS_MASK 0x000000FF /* NSS & MCS values mask in rspec */ +#define WL_RSPEC_MCS_MASK 0x0000000F /* mimo MCS value mask in rspec */ +#define WL_RSPEC_NSS_MASK 0x000000F0 /* mimo NSS value mask in rspec */ +#define WL_RSPEC_NSS_SHIFT 4 /* mimo NSS value shift in rspec */ + +/* Encoding 2 - VHT MCS + NSS */ +#define WL_RSPEC_VHT_MCS_MASK WL_RSPEC_MCS_MASK /**< VHT MCS value mask in rspec */ +#define WL_RSPEC_VHT_NSS_MASK WL_RSPEC_NSS_MASK /**< VHT Nss value mask in rspec */ +#define WL_RSPEC_VHT_NSS_SHIFT WL_RSPEC_NSS_SHIFT /**< VHT Nss value shift in rspec */ + +/* Encoding 3 - HE MCS + NSS */ +#define WL_RSPEC_HE_MCS_MASK WL_RSPEC_MCS_MASK /**< HE MCS value mask in rspec */ +#define WL_RSPEC_HE_NSS_MASK WL_RSPEC_NSS_MASK /**< HE Nss value mask in rspec */ +#define WL_RSPEC_HE_NSS_SHIFT WL_RSPEC_NSS_SHIFT /**< HE Nss value shift in rpsec */ + +/* Encoding 4 - EHT MCS + NSS */ +#define WL_RSPEC_EHT_MCS_MASK WL_RSPEC_MCS_MASK /**< EHT MCS value mask in rspec */ +#define WL_RSPEC_EHT_NSS_MASK WL_RSPEC_NSS_MASK /**< EHT Nss value mask in rspec */ +#define WL_RSPEC_EHT_NSS_SHIFT WL_RSPEC_NSS_SHIFT /**< EHT Nss value shift in rpsec */ + +/* ======== RSPEC_BW field ======== */ + +#define WL_RSPEC_BW_UNSPECIFIED 0u +#define WL_RSPEC_BW_20MHZ 0x00010000u +#define WL_RSPEC_BW_40MHZ 0x00020000u +#define WL_RSPEC_BW_80MHZ 0x00030000u +#define WL_RSPEC_BW_160MHZ 0x00040000u +#define WL_RSPEC_BW_240MHZ 0x00050000u +#define WL_RSPEC_BW_320MHZ 0x00060000u + +/* ======== RSPEC_ENCODING field ======== */ + +/* NOTE: Assuming the rate field is always NSS+MCS starting from VHT encoding! + * Modify/fix RSPEC_ISNSSMCS() macro if above condition changes any time. + */ +#define WL_RSPEC_ENCODE_RATE 0x00000000u /**< Legacy rate is stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_HT 0x01000000u /**< HT MCS is stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_VHT 0x02000000u /**< VHT MCS and NSS are stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_HE 0x03000000u /**< HE MCS and NSS are stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_EHT 0x04000000u /**< EHT MCS and NSS are stored in RSPEC_RATE */ + +/** + * =============================== + * Handy macros to parse rate spec + * =============================== + */ +#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK) +#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ) +#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ) +#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ) +#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ) +#ifdef WL11BE +#define RSPEC_IS240MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_240MHZ) +#define RSPEC_IS320MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_320MHZ) +#else +#define RSPEC_IS320MHZ(rspec) (FALSE) +#define RSPEC_IS240MHZ(rspec) (FALSE) +#endif /* WL11BE */ + +#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0) +#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0) +#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0) +#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0) + +#define RSPEC_TXEXP(rspec) (((rspec) & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT) + +#define RSPEC_ENCODE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >> WL_RSPEC_ENCODING_SHIFT) +#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) + +#define RSPEC_ISCCK(rspec) (RSPEC_ISLEGACY(rspec) && \ + (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] > 0) +#define RSPEC_ISOFDM(rspec) (RSPEC_ISLEGACY(rspec) && \ + (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] < 0) + +#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) +#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) +#ifdef WL11AX +#define RSPEC_ISHE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HE) +#else /* WL11AX */ +#define RSPEC_ISHE(rspec) 0 +#endif /* WL11AX */ +#ifdef WL11BE +#define RSPEC_ISEHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_EHT) +#else /* WL11BE */ +#define RSPEC_ISEHT(rspec) 0 +#endif /* WL11BE */ + +/* fast check if rate field is NSS+MCS format (starting from VHT ratespec) */ +#define RSPEC_ISVHTEXT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >= WL_RSPEC_ENCODE_VHT) +/* fast check if rate field is NSS+MCS format (starting from HE ratespec) */ +#define RSPEC_ISHEEXT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >= WL_RSPEC_ENCODE_HE) + +/** + * ================================ + * Handy macros to create rate spec + * ================================ + */ +/* create ratespecs */ +#define LEGACY_RSPEC(rate) (WL_RSPEC_ENCODE_RATE | WL_RSPEC_BW_20MHZ | \ + ((rate) & WL_RSPEC_LEGACY_RATE_MASK)) +#define CCK_RSPEC(cck) LEGACY_RSPEC(cck) +#define OFDM_RSPEC(ofdm) LEGACY_RSPEC(ofdm) +#define HT_RSPEC(mcs) (WL_RSPEC_ENCODE_HT | ((mcs) & WL_RSPEC_HT_MCS_MASK)) +#define VHT_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_VHT | \ + (((nss) << WL_RSPEC_VHT_NSS_SHIFT) & WL_RSPEC_VHT_NSS_MASK) | \ + ((mcs) & WL_RSPEC_VHT_MCS_MASK)) +#define HE_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_HE | \ + (((nss) << WL_RSPEC_HE_NSS_SHIFT) & WL_RSPEC_HE_NSS_MASK) | \ + ((mcs) & WL_RSPEC_HE_MCS_MASK)) +#define EHT_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_EHT | \ + (((nss) << WL_RSPEC_EHT_NSS_SHIFT) & WL_RSPEC_EHT_NSS_MASK) | \ + ((mcs) & WL_RSPEC_EHT_MCS_MASK)) + +/** + * ================== + * Other handy macros + * ================== + */ +/* return rate in unit of Kbps */ +#define RSPEC2KBPS(rspec) wf_rspec_to_rate(rspec) + +/* return rate in unit of 500Kbps */ +/* works only for legacy rate */ +#ifdef BCMDBG +#define RSPEC2RATE(rspec) wf_rspec_to_rate_legacy(rspec) +#else +#define RSPEC2RATE(rspec) ((rspec) & WL_RSPEC_LEGACY_RATE_MASK) +#endif + +/** + * ================================= + * Macros to use the rate_info table + * ================================= + */ +/* phy_rate table index is in [500kbps] units */ +#define WLC_MAXRATE 108 /**< in 500kbps units */ +extern const uint8 rate_info[]; +/* phy_rate table value is encoded */ +#define RATE_INFO_OFDM_MASK 0x80 /* ofdm mask */ +#define RATE_INFO_RATE_MASK 0x7f /* rate signal index mask */ +#define RATE_INFO_M_RATE_MASK 0x0f /* M_RATE_TABLE index mask */ +#define RATE_INFO_RATE_ISCCK(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] > 0) +#define RATE_INFO_RATE_ISOFDM(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] < 0) + +/** + * =================== + * function prototypes + * =================== + */ +ratespec_t wf_vht_plcp_to_rspec(uint8 *plcp); +ratespec_t wf_he_plcp_to_rspec(uint8 *plcp); +ratespec_t wf_eht_plcp_to_rspec(uint8 *plcp); +ratespec_t wf_ht_plcp_to_rspec(uint8 *plcp); + +#ifdef BCMDBG +uint wf_rspec_to_rate_legacy(ratespec_t rspec); +#endif +uint wf_rspec_to_rate(ratespec_t rspec); +uint wf_rspec_to_rate_rsel(ratespec_t rspec); + +#endif /* _bcmwifi_rspec_h_ */ diff --git a/bcmdhd.101.10.361.x/include/bcmwpa.h b/bcmdhd.101.10.361.x/include/bcmwpa.h new file mode 100755 index 0000000..45d4a9f --- /dev/null +++ b/bcmdhd.101.10.361.x/include/bcmwpa.h @@ -0,0 +1,634 @@ +/* + * bcmwpa.h - interface definitions of shared WPA-related functions + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _BCMWPA_H_ +#define _BCMWPA_H_ +#ifdef BCM_EXTERNAL_APP +typedef int osl_t; +#endif +#include +#if defined(BCMSUP_PSK) || defined(BCMSUPPL) || \ + defined(MFP) || defined(BCMAUTH_PSK) || defined(WLFBT) || \ + defined(WL_OKC) || defined(GTKOE) || defined(WL_FILS) +#include +#endif +#include <802.11.h> +#ifdef WLP2P +#include +#endif +#include +#include +#include +#include +#ifdef WL_OCV +#include +#endif /* WL_OCV */ + +/* Field sizes for WPA key hierarchy */ +#define WPA_TEMP_TX_KEY_LEN 8u +#define WPA_TEMP_RX_KEY_LEN 8u + +#define PMK_LEN 32u +#define TKIP_PTK_LEN 64u +#define TKIP_TK_LEN 32u +#define AES_PTK_LEN 48u +#define AES_TK_LEN 16u +#define AES_GCM_PTK_LEN 48u +#define AES_GCM_TK_LEN 16u +#define AES_GCM256_PTK_LEN 64u +#define AES_GCM256_TK_LEN 32u + +/* limits for pre-shared key lengths */ +#define WPA_MIN_PSK_LEN 8u +#define WPA_MAX_PSK_LEN 64u + +#define WPA_KEY_DATA_LEN_256 256u /* allocation size of 256 for temp data pointer. */ +#define WPA_KEY_DATA_LEN_128 128u /* allocation size of 128 for temp data pointer. */ + +/* Minimum length of WPA2 GTK encapsulation in EAPOL */ +#define EAPOL_WPA2_GTK_ENCAP_MIN_LEN (EAPOL_WPA2_ENCAP_DATA_HDR_LEN - \ + TLV_HDR_LEN + EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN) + +/* Minimum length of WPA2 IGTK encapsulation in EAPOL */ +#define EAPOL_WPA2_IGTK_ENCAP_MIN_LEN (EAPOL_WPA2_ENCAP_DATA_HDR_LEN - \ + TLV_HDR_LEN + EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN) + +/* Minimum length of BIGTK encapsulation in EAPOL */ +#define EAPOL_WPA2_BIGTK_ENCAP_MIN_LEN (EAPOL_WPA2_ENCAP_DATA_HDR_LEN - \ + TLV_HDR_LEN + EAPOL_WPA2_KEY_BIGTK_ENCAP_HDR_LEN) + +#ifdef WL_OCV +/* Size of the OCI element */ +#define WPA_OCV_OCI_IE_SIZE \ + (bcm_ocv_get_oci_len() + BCM_TLV_EXT_HDR_SIZE) + +/* Size of the OCI KDE */ +#define WPA_OCV_OCI_KDE_SIZE \ + (bcm_ocv_get_oci_len() + EAPOL_WPA2_ENCAP_DATA_HDR_LEN) + +/* Size of the OCI subelement */ +#define WPA_OCV_OCI_SUBELEM_SIZE \ + (bcm_ocv_get_oci_len() + TLV_HDR_LEN) + +/* Minimum length of WPA2 OCI encapsulation in EAPOL */ +#define EAPOL_WPA2_OCI_ENCAP_MIN_LEN \ + (WPA_OCV_OCI_KDE_SIZE - TLV_HDR_LEN) +#endif /* WL_OCV */ + +#ifdef WLFIPS +#define WLC_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \ + ((bsscfg)->wsec & (WSEC_SWFLAG | FIPS_ENABLED)))) +#else +#define WLC_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \ + ((bsscfg)->wsec & WSEC_SWFLAG))) +#endif /* WLFIPS */ + +/* This doesn't really belong here, but neither does WSEC_CKIP* */ +/* per-packet encryption exemption policy */ +/* no exemption...follow whatever standard rules apply */ +#define WSEC_EXEMPT_NO 0 +/* send unencrypted */ +#define WSEC_EXEMPT_ALWAYS 1 +/* send unencrypted if no pairwise key */ +#define WSEC_EXEMPT_NO_PAIRWISE 2 + +#define WPA_CIPHER_UNSPECIFIED 0xff +#define WPA_P_CIPHERS_UNSPECIFIED 0x80000000 + +#ifdef RSN_IE_INFO_STRUCT_RELOCATED +#define WPA_AKMS_UNSPECIFIED 0x80000000 +#else +#define WPA_AKMS_UNSPECIFIED 0 +#endif + +#ifdef BCMWAPI_WAI +#define IS_WAPI_AUTH(auth) ((auth) == WAPI_AUTH_UNSPECIFIED || \ + (auth) == WAPI_AUTH_PSK) +#define INCLUDES_WAPI_AUTH(auth) \ + ((auth) & (WAPI_AUTH_UNSPECIFIED | \ + WAPI_AUTH_PSK)) +#endif /* BCMWAPI_WAI */ + +#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \ + (akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK) + +#define IS_WPA2_AKM(akm) ((akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK || \ + (akm) == RSN_AKM_FILS_SHA256 || \ + (akm) == RSN_AKM_FILS_SHA384) + +/* this doesn't mean much. A WPA (not RSN) akm type would match this */ +#define RSN_AKM_MASK (\ + BCM_BIT(RSN_AKM_UNSPECIFIED) | \ + BCM_BIT(RSN_AKM_PSK) | \ + BCM_BIT(RSN_AKM_SAE_PSK) | \ + BCM_BIT(RSN_AKM_FILS_SHA256) | \ + BCM_BIT(RSN_AKM_FILS_SHA384) | \ + BCM_BIT(RSN_AKM_OWE) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA384_1X)) + +/* verify less than 32 before shifting bits */ +#define VALID_AKM_BIT(akm) ((akm) < 32u ? BCM_BIT((akm)) : 0u) + +#define IS_RSN_AKM(akm) (VALID_AKM_BIT((akm)) & RSN_AKM_MASK) + +#define FBT_AKM_MASK (BCM_BIT(RSN_AKM_FBT_1X) | \ + BCM_BIT(RSN_AKM_FBT_PSK) | \ + BCM_BIT(RSN_AKM_SAE_FBT) | \ + BCM_BIT(RSN_AKM_FBT_SHA256_FILS) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_FILS) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_PSK)) + +#define IS_FBT_AKM(akm) (VALID_AKM_BIT((akm)) & FBT_AKM_MASK) + +#define FILS_AKM_MASK (\ + BCM_BIT(RSN_AKM_FILS_SHA256) | \ + BCM_BIT(RSN_AKM_FILS_SHA384)) + +#define IS_FILS_AKM(akm) (VALID_AKM_BIT((akm)) & FILS_AKM_MASK) + +#define MFP_AKM_MASK (\ + BCM_BIT(RSN_AKM_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SHA256_PSK)) + +#define IS_MFP_AKM(akm) (MFP_AKM_MASK & VALID_AKM_BIT((akm))) + +#ifdef BCMWAPI_WAI +#define IS_WAPI_AKM(akm) ((akm) == RSN_AKM_NONE || \ + (akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK) +#endif /* BCMWAPI_WAI */ + +#define IS_TDLS_AKM(akm) ((akm) == RSN_AKM_TPK) + +/* Broadcom(OUI) authenticated key managment suite */ +#define BRCM_AKM_NONE 0 +#define BRCM_AKM_PSK 1u /* Proprietary PSK AKM */ + +#define IS_BRCM_AKM(akm) ((akm) == BRCM_AKM_PSK) + +#define ONE_X_AKM_MASK (BCM_BIT(RSN_AKM_FBT_1X) | \ + BCM_BIT(RSN_AKM_MFP_1X) | \ + BCM_BIT(RSN_AKM_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \ + BCM_BIT(RSN_AKM_UNSPECIFIED)) + +#define IS_1X_AKM(akm) (VALID_AKM_BIT((akm)) & ONE_X_AKM_MASK) + +#define SUITEB_AKM_MASK (BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA384_1X)) +#define IS_1X_SUITEB_AKM(akm) (VALID_AKM_BIT((akm)) & SUITEB_AKM_MASK) + +#define SAE_AKM_MASK (BCM_BIT(RSN_AKM_SAE_PSK) | BCM_BIT(RSN_AKM_SAE_FBT)) +#define IS_SAE_AKM(akm) (VALID_AKM_BIT((akm)) & SAE_AKM_MASK) + +#define SHA256_AKM_MASK (BCM_BIT(RSN_AKM_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SHA256_PSK) | \ + BCM_BIT(RSN_AKM_SAE_PSK) | \ + BCM_BIT(RSN_AKM_SAE_FBT) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \ + BCM_BIT(RSN_AKM_FILS_SHA256) | \ + BCM_BIT(RSN_AKM_FBT_SHA256_FILS) | \ + BCM_BIT(RSN_AKM_OWE)) +#define IS_SHA256_AKM(akm) (VALID_AKM_BIT((akm)) & SHA256_AKM_MASK) + +#define SHA384_AKM_MASK (BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \ + BCM_BIT(RSN_AKM_FILS_SHA384) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_FILS) | \ + BCM_BIT(RSN_AKM_PSK_SHA384)) +#define IS_SHA384_AKM(akm) (VALID_AKM_BIT((akm)) & SHA384_AKM_MASK) + +#define OPEN_AUTH_AKM_MASK (\ + BCM_BIT(RSN_AKM_UNSPECIFIED) | \ + BCM_BIT(RSN_AKM_PSK) | \ + BCM_BIT(RSN_AKM_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SHA256_PSK) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \ + BCM_BIT(RSN_AKM_PSK_SHA384)) +#define IS_OPEN_AUTH_AKM(akm) (VALID_AKM_BIT((akm)) & OPEN_AUTH_AKM_MASK) + +typedef enum akm_type { + WPA_AUTH_IE = 0x01, + RSN_AUTH_IE = 0x02, + OSEN_AUTH_IE = 0x04 +} akm_type_t; + +#define MAX_ARRAY 1 +#define MIN_ARRAY 0 + +#define WPS_ATID_SEL_REGISTRAR 0x1041 + +/* move these to appropriate file(s) */ +#define WPS_IE_FIXED_LEN 6 + +/* GTK indices we use - 0-3 valid per IEEE/802.11 2012 */ +#define GTK_INDEX_1 1 +#define GTK_INDEX_2 2 + +/* IGTK indices we use - 4-5 are valid per IEEE 802.11 2012 */ +#define IGTK_INDEX_1 4 +#define IGTK_INDEX_2 5 + +/* following needed for compatibility for router code because it automerges */ +#define IGTK_ID_TO_WSEC_INDEX(_id) (_id) +#define WPA_AES_CMAC_CALC aes_cmac_calc + +#define IS_IGTK_INDEX(x) ((x) == IGTK_INDEX_1 || (x) == IGTK_INDEX_2) + +#ifdef RSN_IE_INFO_STRUCT_RELOCATED +typedef struct rsn_ie_info { + uint8 version; + int parse_status; + device_type_t dev_type; /* AP or STA */ + auth_ie_type_mask_t auth_ie_type; /* bit field of WPA, WPA2 and (not yet) WAPI */ + rsn_cipher_t g_cipher; + rsn_akm_t sta_akm; /* single STA akm */ + uint16 caps; + rsn_ciphers_t rsn_p_ciphers; + rsn_ciphers_t wpa_p_ciphers; + rsn_akm_mask_t rsn_akms; + rsn_akm_mask_t wpa_akms; + uint8 pmkid_count; + uint8 pmkids_offset; /* offset into the IE */ + rsn_cipher_t g_mgmt_cipher; + rsn_cipher_t sta_cipher; /* single STA cipher */ + uint16 key_desc; /* key descriptor version as STA */ + uint16 mic_len; /* unused. keep for ROM compatibility. */ + uint8 pmk_len; /* EAPOL PMK */ + uint8 kck_mic_len; /* EAPOL MIC (by KCK) */ + uint8 kck_len; /* EAPOL KCK */ + uint8 kek_len; /* EAPOL KEK */ + uint8 tk_len; /* EAPOL TK */ + uint8 ptk_len; /* EAPOL PTK */ + uint8 kck2_len; /* EAPOL KCK2 */ + uint8 kek2_len; /* EAPOL KEK2 */ + uint8* rsn_ie; /* RSN IE from beacon or assoc request */ + uint16 rsn_ie_len; /* RSN IE length */ + uint8* wpa_ie; /* WPA IE */ + uint16 wpa_ie_len; /* WPA IE length (is it fixed ? */ + /* the following are helpers in the AP rsn info to be filled in by the STA + * after determination of which IE is being used.in wsec_filter. + */ + uint32 p_ciphers; /* current ciphers for the chosen auth IE */ + uint32 akms; /* current ciphers for the chosen auth IE */ + uint8 *auth_ie; /* pointer to current chosen auth IE */ + uint16 auth_ie_len; + uint8 ref_count; /* external reference count to decide if structure must be freed */ + uint8 rsnxe_len; /* RSNXE IE length */ + uint8 PAD[3]; + uint8* rsnxe; /* RSNXE IE TLV buffer */ + uint32 rsnxe_cap; /* RSNXE IE cap flag, refer to 802.11.h */ +} rsn_ie_info_t; +#endif /* RSN_IE_INFO_STRUCT_RELOCATED */ + +/* WiFi WPS Attribute fixed portion */ +typedef struct wps_at_fixed { + uint8 at[2]; + uint8 len[2]; + uint8 data[1]; +} wps_at_fixed_t; + +typedef const struct oui_akm_wpa_tbl { + const char *oui; /* WPA auth category */ + uint16 rsn_akm; + uint32 wpa_auth; +} oui_akm_wpa_tbl_t; + +#define WPS_AT_FIXED_LEN 4 + +#define wps_ie_fixed_t wpa_ie_fixed_t + +/* What should be the multicast mask for AES ? */ +#define WPA_UNICAST_AES_MASK (\ + BCM_BIT(WPA_CIPHER_AES_CCM) | \ + BCM_BIT(WPA_CIPHER_AES_GCM) | \ + BCM_BIT(WPA_CIPHER_AES_GCM256)) + +#define WPA_CIPHER_WEP_MASK (\ + BCM_BIT(WPA_CIPHER_WEP_104) | \ + BCM_BIT(WPA_CIPHER_WEP_40)) + +/* temporary to pass pre-commit */ +#ifdef TMP_USE_RSN_INFO +/* wsec macros */ +#ifdef EXT_STA +#define UCAST_NONE(rsn_info) (((rsn_info)->p_ciphers == (1 << WPA_CIPHER_NONE)) && \ + (!WLEXTSTA_ENAB(wlc->pub) || wlc->use_group_enabled)) +#else +#define UCAST_NONE(rsn_info) (rsn_info->p_ciphers == (1 << WPA_CIPHER_NONE)) +#endif /* EXT_STA */ + +#define UCAST_AES(rsn_info) (rsn_info->p_ciphers & WPA_UNICAST_AES_MASK) +#define UCAST_TKIP(rsn_info) (rsn_info->p_ciphers & (1 << WPA_CIPHER_TKIP)) +#define UCAST_WEP(rsn_info) (rsn_info->p_ciphers & WPA_CIPHER_WEP_MASK) + +#define MCAST_NONE(rsn_info) ((rsn_info)->g_cipher == WPA_CIPHER_NONE) +#define MCAST_AES(rsn_info) ((1 << rsn_info->g_cipher) & WPA_UNICAST_AES_MASK) +#define MCAST_TKIP(rsn_info) (rsn_info->g_cipher == WPA_CIPHER_TKIP) +#define MCAST_WEP(rsn_info) ((1 << rsn_info->g_cipher) & WPA_CIPHER_WEP_MASK) + +#endif /* TMP_USE_RSN_INFO */ + +#define AKM_SHA256_MASK (\ + BCM_BIT(RSN_AKM_SHA256_1X) | \ + BCM_BIT(RSN_AKM_SHA256_PSK) | \ + BCM_BIT(RSN_AKM_SAE_PSK) | \ + BCM_BIT(RSN_AKM_OWE) | \ + BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \ + BCM_BIT(RSN_AKM_FILS_SHA256) | \ + BCM_BIT(RSN_AKM_FBT_SHA256_FILS) | \ + BCM_BIT(RSN_AKM_SAE_FBT)) + +#define AKM_SHA384_MASK (\ + BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \ + BCM_BIT(RSN_AKM_FILS_SHA384) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_FILS) | \ + BCM_BIT(RSN_AKM_FBT_SHA384_PSK) | \ + BCM_BIT(RSN_AKM_PSK_SHA384)) + +/* these AKMs require MFP capable set in their IE */ +#define RSN_MFPC_AKM_MASK (\ + BCM_BIT(RSN_AKM_SAE_PSK) | \ + BCM_BIT(RSN_AKM_OWE) | \ + BCM_BIT(RSN_AKM_SAE_FBT)) + +/* AKMs that supported by in-driver supplicant. + * TODO: have to redesign this to include 1x and other PSK AKMs. + */ +#define IS_BCMSUP_AKM(akm) \ + ((akm == RSN_AKM_PSK) | \ + (akm == RSN_AKM_SAE_PSK) | \ + (akm == RSN_AKM_OWE) | \ + (akm == RSN_AKM_FBT_PSK) | \ + (akm == RSN_AKM_SAE_FBT) | \ + (akm == RSN_AKM_FBT_SHA384_1X) | \ + (akm == RSN_AKM_FBT_SHA384_PSK)) + +/* AKMs use common PSK which identified by broadcast addr */ +#define IS_SHARED_PMK_AKM(akm) \ + ((akm == RSN_AKM_PSK) | \ + (akm == RSN_AKM_FBT_PSK) | \ + (akm == RSN_AKM_SHA256_PSK) | \ + (akm == RSN_AKM_FBT_SHA384_PSK) | \ + (akm == RSN_AKM_PSK_SHA384)) + +#define RSN_AKM_USE_KDF(akm) (akm >= RSN_AKM_FBT_1X ? 1u : 0) + +/* Macro to abstract access to the rsn_ie_info strucuture in case + * we want to move it to a cubby or something else. + * Gives the rsn_info pointer + */ + +#define RSN_INFO_GET(s) (s->rsn_info) +/* where the rsn_info resides */ +#define RSN_INFO_GET_PTR(s) (&s->rsn_info) + +#define AUTH_AKM_INCLUDED(s) (s->rsn_info != NULL && s->rsn_info->parse_status == BCME_OK && \ + s->rsn_info->akms != WPA_AKMS_UNSPECIFIED) + +#define AKM_IS_MEMBER(akm, mask) ((mask) & VALID_AKM_BIT((akm)) || ((akm) == 0 && (mask) == 0)) + +typedef enum eapol_key_type { + EAPOL_KEY_NONE = 0, + EAPOL_KEY_PMK = 1, + EAPOL_KEY_KCK_MIC = 2, + EAPOL_KEY_KEK = 3, + EAPOL_KEY_TK = 4, + EAPOL_KEY_PTK = 5, + EAPOL_KEY_KCK = 6, + EAPOL_KEY_KCK2 = 7, + EAPOL_KEY_KEK2 = 8 +} eapol_key_type_t; + +/* Return address of max or min array depending first argument. + * Return NULL in case of a draw. + */ +extern const uint8 *wpa_array_cmp(int max_array, const uint8 *x, const uint8 *y, uint len); + +/* Increment the array argument */ +extern void wpa_incr_array(uint8 *array, uint len); + +/* Convert WPA IE cipher suite to locally used value */ +extern bool wpa_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok); + +/* Look for a WPA IE; return it's address if found, NULL otherwise */ +extern wpa_ie_fixed_t *bcm_find_wpaie(uint8 *parse, uint len); +extern bcm_tlv_t *bcm_find_wmeie(uint8 *parse, uint len, uint8 subtype, uint8 subtype_len); +/* Look for a WPS IE; return it's address if found, NULL otherwise */ +extern wps_ie_fixed_t *bcm_find_wpsie(const uint8 *parse, uint len); +extern wps_at_fixed_t *bcm_wps_find_at(wps_at_fixed_t *at, uint len, uint16 id); +int bcm_find_security_ies(uint8 *buf, uint buflen, void **wpa_ie, + void **rsn_ie); + +#ifdef WLP2P +/* Look for a WiFi P2P IE; return it's address if found, NULL otherwise */ +extern wifi_p2p_ie_t *bcm_find_p2pie(const uint8 *parse, uint len); +#endif +/* Look for a hotspot2.0 IE; return it's address if found, NULL otherwise */ +bcm_tlv_t *bcm_find_hs20ie(uint8 *parse, uint len); +/* Look for a OSEN IE; return it's address if found, NULL otherwise */ +bcm_tlv_t *bcm_find_osenie(uint8 *parse, uint len); + +/* Check whether the given IE has the specific OUI and the specific type. */ +extern bool bcm_has_ie(uint8 *ie, uint8 **tlvs, uint *tlvs_len, + const uint8 *oui, uint oui_len, uint8 type); + +/* Check whether pointed-to IE looks like WPA. */ +#define bcm_is_wpa_ie(ie, tlvs, len) bcm_has_ie(ie, tlvs, len, \ + (const uint8 *)WPA_OUI, WPA_OUI_LEN, WPA_OUI_TYPE) +/* Check whether pointed-to IE looks like WPS. */ +#define bcm_is_wps_ie(ie, tlvs, len) bcm_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE) +#ifdef WLP2P +/* Check whether the given IE looks like WFA P2P IE. */ +#define bcm_is_p2p_ie(ie, tlvs, len) bcm_has_ie(ie, tlvs, len, \ + (const uint8 *)P2P_OUI, P2P_OUI_LEN, P2P_OUI_TYPE) +#endif + +/* Convert WPA2 IE cipher suite to locally used value */ +extern bool wpa2_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok); + +#if defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS) +/* Look for an encapsulated GTK; return it's address if found, NULL otherwise */ +extern eapol_wpa2_encap_data_t *wpa_find_gtk_encap(uint8 *parse, uint len); + +/* Check whether pointed-to IE looks like an encapsulated GTK. */ +extern bool wpa_is_gtk_encap(uint8 *ie, uint8 **tlvs, uint *tlvs_len); + +/* Look for encapsulated key data; return it's address if found, NULL otherwise */ +extern eapol_wpa2_encap_data_t *wpa_find_kde(const uint8 *parse, uint len, uint8 type); + +/* Find kde data given eapol header. */ +extern int wpa_find_eapol_kde_data(eapol_header_t *eapol, uint8 eapol_mic_len, + uint8 subtype, eapol_wpa2_encap_data_t **out_data); + +/* Look for kde data in key data. */ +extern int wpa_find_kde_data(const uint8 *kde_buf, uint16 buf_len, + uint8 subtype, eapol_wpa2_encap_data_t **out_data); + +#ifdef WL_OCV +/* Check if both local and remote are OCV capable */ +extern bool wpa_check_ocv_caps(uint16 local_caps, uint16 peer_caps); + +/* Write OCI KDE into the buffer */ +extern int wpa_add_oci_encap(chanspec_t chspec, uint8* buf, uint buf_len); + +/* Validate OCI KDE */ +extern int wpa_validate_oci_encap(chanspec_t chspec, const uint8* buf, uint buf_len); + +/* Write OCI IE into the buffer */ +extern int wpa_add_oci_ie(chanspec_t chspec, uint8* buf, uint buf_len); + +/* Validate OCI IE */ +extern int wpa_validate_oci_ie(chanspec_t chspec, const uint8* buf, uint buf_len); + +/* Write OCI subelement into the FTE buffer */ +extern int wpa_add_oci_ft_subelem(chanspec_t chspec, uint8* buf, uint buf_len); + +/* Validate OCI FTE subelement */ +extern int wpa_validate_oci_ft_subelem(chanspec_t chspec, + const uint8* buf, uint buf_len); +#endif /* WL_OCV */ +#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS) */ + +#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK)|| \ + defined(WL_OKC) || defined(GTKOE) +/* Calculate a pair-wise transient key */ +extern int wpa_calc_ptk(rsn_akm_t akm, const struct ether_addr *auth_ea, + const struct ether_addr *sta_ea, const uint8 *anonce, uint8 anonce_len, + const uint8* snonce, uint8 snonce_len, const uint8 *pmk, + uint pmk_len, uint8 *ptk, uint ptk_len); + +/* Compute Message Integrity Code (MIC) over EAPOL message */ +extern int wpa_make_mic(eapol_header_t *eapol, uint key_desc, uint8 *mic_key, + rsn_ie_info_t *rsn_info, uchar *mic, uint mic_len); + +/* Check MIC of EAPOL message */ +extern bool wpa_check_mic(eapol_header_t *eapol, + uint key_desc, uint8 *mic_key, rsn_ie_info_t *rsn_info); + +/* Calculate PMKID */ +extern void wpa_calc_pmkid(const struct ether_addr *auth_ea, + const struct ether_addr *sta_ea, const uint8 *pmk, uint pmk_len, uint8 *pmkid); + +/* Encrypt key data for a WPA key message */ +extern bool wpa_encr_key_data(eapol_wpa_key_header_t *body, uint16 key_info, + uint8 *ekey, uint8 *gtk, uint8 *data, uint8 *encrkey, rc4_ks_t *rc4key, + const rsn_ie_info_t *rsn_info); + +typedef uint8 wpa_rc4_ivkbuf_t[EAPOL_WPA_KEY_IV_LEN + EAPOL_WPA_ENCR_KEY_MAX_LEN]; +/* Decrypt key data from a WPA key message */ +extern int wpa_decr_key_data(eapol_wpa_key_header_t *body, uint16 key_info, + uint8 *ekey, wpa_rc4_ivkbuf_t ivk, rc4_ks_t *rc4key, const rsn_ie_info_t *rsn_info, + uint16 *dec_len); +#endif /* BCMSUP_PSK || WLFBT || BCMAUTH_PSK || defined(GTKOE) */ + +#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK)|| \ + defined(WL_OKC) || defined(GTKOE) || defined(WLHOSTFBT) + +/* Calculate PMKR0 for FT association */ +extern void wpa_calc_pmkR0(sha2_hash_type_t hash_type, const uint8 *ssid, uint ssid_len, + uint16 mdid, const uint8 *r0kh, uint r0kh_len, const struct ether_addr *sta_ea, + const uint8 *pmk, uint pmk_len, uint8 *pmkr0, uint8 *pmkr0name); + +/* Calculate PMKR1 for FT association */ +extern void wpa_calc_pmkR1(sha2_hash_type_t hash_type, const struct ether_addr *r1kh, + const struct ether_addr *sta_ea, const uint8 *pmk, uint pmk_len, + const uint8 *pmkr0name, uint8 *pmkr1, uint8 *pmkr1name); + +/* Calculate PTK for FT association */ +extern void wpa_calc_ft_ptk(sha2_hash_type_t hash_type, const struct ether_addr *bssid, + const struct ether_addr *sta_ea, const uint8 *anonce, const uint8* snonce, + const uint8 *pmk, uint pmk_len, uint8 *ptk, uint ptk_len); + +extern void wpa_derive_pmkR1_name(sha2_hash_type_t hash_type, struct ether_addr *r1kh, + struct ether_addr *sta_ea, uint8 *pmkr0name, uint8 *pmkr1name); + +#endif /* defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK) || + * defined(WL_OKC) || defined(WLTDLS) || defined(GTKOE) || defined(WLHOSTFBT) + */ + +#if defined(BCMSUP_PSK) || defined(BCMSUPPL) + +/* Translate RSNE group mgmt cipher to CRYPTO_ALGO_XXX */ +extern uint8 bcmwpa_find_group_mgmt_algo(rsn_cipher_t g_mgmt_cipher); + +#endif /* BCMSUP_PSK || BCMSUPPL */ + +extern bool bcmwpa_akm2WPAauth(uint8 *akm, uint32 *auth, bool sta_iswpa); + +extern bool bcmwpa_cipher2wsec(uint8 *cipher, uint32 *wsec); + +#ifdef RSN_IE_INFO_STRUCT_RELOCATED +extern uint32 bcmwpa_wpaciphers2wsec(uint32 unicast); +extern int bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, + uint32 *remaining, uint8 *type); + +/* to be removed after merge to NEWT (changed into bcmwpa_rsn_ie_info_reset) */ +void rsn_ie_info_reset(rsn_ie_info_t *rsn_info, osl_t *osh); +uint32 wlc_convert_rsn_to_wsec_bitmap(uint32 ap_cipher_mask); +#else +uint32 bcmwpa_wpaciphers2wsec(uint8 wpacipher); +int bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, uint32 *remaining); +#endif /* RSN_IE_INFO_STRUCT_RELOCATED */ + +extern int bcmwpa_parse_rsnie(const bcm_tlv_t *ie, rsn_ie_info_t *info, device_type_t dev_type); + +/* Calculate PMKID */ +extern void kdf_calc_pmkid(const struct ether_addr *auth_ea, + const struct ether_addr *sta_ea, const uint8 *key, uint key_len, uint8 *pmkid, + rsn_ie_info_t *rsn_info); + +extern void kdf_calc_ptk(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea, + const uint8 *anonce, const uint8 *snonce, const uint8 *pmk, uint pmk_len, + uint8 *ptk, uint ptk_len); + +#ifdef WLTDLS +/* Calculate TPK for TDLS association */ +extern void wpa_calc_tpk(const struct ether_addr *init_ea, + const struct ether_addr *resp_ea, const struct ether_addr *bssid, + const uint8 *anonce, const uint8* snonce, uint8 *tpk, uint tpk_len); +#endif +extern bool bcmwpa_is_wpa_auth(uint32 wpa_auth); +extern bool bcmwpa_includes_wpa_auth(uint32 wpa_auth); +extern bool bcmwpa_is_rsn_auth(uint32 wpa_auth); +extern bool bcmwpa_includes_rsn_auth(uint32 wpa_auth); +extern int bcmwpa_get_algo_key_len(uint8 algo, uint16 *key_len); + +/* macro to pass precommit on ndis builds */ +#define bcmwpa_is_wpa2_auth(wpa_auth) bcmwpa_is_rsn_auth(wpa_auth) +extern uint8 bcmwpa_eapol_key_length(eapol_key_type_t key, rsn_akm_t akm, rsn_cipher_t cipher); + +/* rsn info allocation utilities. */ +void bcmwpa_rsn_ie_info_reset(rsn_ie_info_t *rsn_info, osl_t *osh); +void bcmwpa_rsn_ie_info_rel_ref(rsn_ie_info_t **rsn_info, osl_t *osh); +int bcmwpa_rsn_ie_info_add_ref(rsn_ie_info_t *rsn_info); +int bcmwpa_rsn_akm_cipher_match(rsn_ie_info_t *rsn_info); +int bcmwpa_rsnie_eapol_key_len(rsn_ie_info_t *info); +#if defined(WL_BAND6G) +/* Return TRUE if any of the akm in akms_bmp is invalid in 6Ghz */ +bool bcmwpa_is_invalid_6g_akm(const rsn_akm_mask_t akms_bmp); +/* Return TRUE if any of the cipher in ciphers_bmp is invalid in 6Ghz */ +bool bcmwpa_is_invalid_6g_cipher(const rsn_ciphers_t ciphers_bmp); +#endif /* WL_BAND6G */ +#endif /* _BCMWPA_H_ */ diff --git a/bcmdhd.101.10.361.x/include/brcm_nl80211.h b/bcmdhd.101.10.361.x/include/brcm_nl80211.h new file mode 100755 index 0000000..29a4281 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/brcm_nl80211.h @@ -0,0 +1,77 @@ +/* + * Definitions for nl80211 vendor command/event access to host driver + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + */ + +#ifndef _brcm_nl80211_h_ +#define _brcm_nl80211_h_ + +//#ifdef OEM_ANDROID Need proper #ifdef in the referencing code as well +#define OUI_BRCM 0x001018 +#define OUI_GOOGLE 0x001A11 + +enum wl_vendor_subcmd { + BRCM_VENDOR_SCMD_UNSPEC = 0, + BRCM_VENDOR_SCMD_PRIV_STR = 1, + BRCM_VENDOR_SCMD_BCM_STR = 2, + BRCM_VENDOR_SCMD_BCM_PSK = 3, + BRCM_VENDOR_SCMD_SET_PMK = 4, + BRCM_VENDOR_SCMD_GET_FEATURES = 5, + BRCM_VENDOR_SCMD_SET_MAC = 6, + BRCM_VENDOR_SCMD_SET_CONNECT_PARAMS = 7, + BRCM_VENDOR_SCMD_SET_START_AP_PARAMS = 8, + BRCM_VENDOR_SCMD_MAX = 9 +}; + +struct bcm_nlmsg_hdr { + uint cmd; /* common ioctl definition */ + int len; /* expected return buffer length */ + uint offset; /* user buffer offset */ + uint set; /* get or set request optional */ + uint magic; /* magic number for verification */ +}; + +enum bcmnl_attrs { + BCM_NLATTR_UNSPEC, + + BCM_NLATTR_LEN, + BCM_NLATTR_DATA, + + __BCM_NLATTR_AFTER_LAST, + BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1 +}; + +struct nl_prv_data { + int err; /* return result */ + void *data; /* ioctl return buffer pointer */ + uint len; /* ioctl return buffer length */ + struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */ +}; +//#endif /* OEM_ANDROID */ + +/* Keep common BCM netlink macros here */ +#define BCM_NL_USER 31 +#define BCM_NL_OXYGEN 30 +#define BCM_NL_TS 29 +/* ====== !! ADD NEW NL socket related defines here !! ====== */ + +#endif /* _brcm_nl80211_h_ */ diff --git a/bcmdhd.101.10.361.x/include/d11.h b/bcmdhd.101.10.361.x/include/d11.h new file mode 100755 index 0000000..5e8e7b2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/d11.h @@ -0,0 +1,6055 @@ +/* + * Chip-specific hardware definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _D11_H +#define _D11_H + +/* + * Notes: + * 1. pre40/pre rev40: corerev < 40 + * 2. pre80/pre rev80: 40 <= corerev < 80 + * 3. rev40/D11AC: 80 > corerev >= 40 + * 4. rev80: corerev >= 80 + */ + +#include +#include +#include +#include <802.11.h> + +#if defined(BCMDONGLEHOST) || defined(WL_UNITTEST) +typedef struct { + uint32 pad; +} shmdefs_t; +#else /* defined(BCMDONGLEHOST)|| defined(WL_UNITTEST) */ +#include +#ifdef USE_BCMCONF_H +#include +#else +#include +#endif +#endif /* !defined(BCMDONGLEHOST)|| !defined(WL_UNITTEST) */ + +#include + +/* This marks the start of a packed structure section. */ +#include + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +#define D11AC_BCN_TMPL_LEN 640 /**< length of the BCN template area for 11AC */ + +#define LPRS_TMPL_LEN 512 /**< length of the legacy PRS template area */ + +/* RX FIFO numbers */ +#define RX_FIFO 0 /**< data and ctl frames */ +#define RX_FIFO1 1 /**< ctl frames */ +#define RX_FIFO2 2 /**< ctl frames */ +#define RX_FIFO_NUMBER 3 + +/* TX FIFO numbers using WME Access Classes */ +#define TX_AC_BK_FIFO 0 /**< Access Category Background TX FIFO */ +#define TX_AC_BE_FIFO 1 /**< Access Category Best-Effort TX FIFO */ +#define TX_AC_VI_FIFO 2 /**< Access Class Video TX FIFO */ +#define TX_AC_VO_FIFO 3 /**< Access Class Voice TX FIFO */ +#define TX_BCMC_FIFO 4 /**< Broadcast/Multicast TX FIFO */ +#define TX_ATIM_FIFO 5 /**< TX fifo for ATIM window info */ +#define TX_AC_N_DATA_FIFO 4 /**< Number of legacy Data Fifos (BK, BE, VI, VO) */ + +/* TX FIFO numbers for trigger queues for HE STA only chips (i.e + * This is valid only for 4369 or similar STA chips that supports + * a single HE STA connection. + */ +#define TX_TRIG_BK_FIFO 6 /**< Access Category Background TX FIFO */ +#define TX_TRIG_BE_FIFO 7 /**< Access Category Best-Effort TX FIFO */ +#define TX_TRIG_VI_FIFO 8 /**< Access Class Video TX FIFO */ +#define TX_TRIG_VO_FIFO 9 /**< Access Class Voice TX FIFO */ +#define TX_TRIG_HP_FIFO 10 /**< Access High Priority TX FIFO */ +#define TX_TRIG_N_DATA_FIFO 4 /**< Number of Trigger Data Fifos (BK, BE, VI, VO) */ + +#if defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED) +#define IS_TRIG_FIFO(fifo) \ + (((fifo) >= TX_TRIG_BK_FIFO) && ((fifo) < (TX_TRIG_BK_FIFO + TX_TRIG_N_DATA_FIFO))) +#else +#define IS_TRIG_FIFO(fifo) FALSE +#endif /* defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED) */ + +#define IS_AC_FIFO(fifo) \ + ((fifo) < (TX_AC_BK_FIFO + TX_AC_N_DATA_FIFO)) + +/** Legacy TX FIFO numbers */ +#define TX_DATA_FIFO TX_AC_BE_FIFO +#define TX_CTL_FIFO TX_AC_VO_FIFO + +/** Trig TX FIFO numbers */ +#define TX_TRIG_DATA_FIFO TX_TRIG_BE_FIFO +#define TX_TRIG_CTL_FIFO TX_TRIG_VO_FIFO + +/* Extended FIFOs for corerev >= 64 */ +#define TX_FIFO_6 6 +#define TX_FIFO_7 7 +#define TX_FIFO_16 16 +#define TX_FIFO_23 23 +#define TX_FIFO_25 25 + +#define TX_FIFO_EXT_START TX_FIFO_6 /* Starting index of extendied HW TX FIFOs */ +#define TX_FIFO_MU_START 8 /* index at which MU TX FIFOs start */ + +#define D11REG_IHR_WBASE 0x200 +#define D11REG_IHR_BASE (D11REG_IHR_WBASE << 1) + +#define PIHR_BASE 0x0400 /**< byte address of packed IHR region */ + +/* biststatus */ +#define BT_DONE (1U << 31) /**< bist done */ +#define BT_B2S (1 << 30) /**< bist2 ram summary bit */ + +/* DMA intstatus and intmask */ +#define I_PC (1 << 10) /**< pci descriptor error */ +#define I_PD (1 << 11) /**< pci data error */ +#define I_DE (1 << 12) /**< descriptor protocol error */ +#define I_RU (1 << 13) /**< receive descriptor underflow */ +#define I_RO (1 << 14) /**< receive fifo overflow */ +#define I_XU (1 << 15) /**< transmit fifo underflow */ +#define I_RI (1 << 16) /**< receive interrupt */ +#define I_XI (1 << 24) /**< transmit interrupt */ + +/* interrupt receive lazy */ +#define IRL_TO_MASK 0x00ffffff /**< timeout */ +#define IRL_FC_MASK 0xff000000 /**< frame count */ +#define IRL_FC_SHIFT 24 /**< frame count */ +#define IRL_DISABLE 0x01000000 /**< Disabled value: int on 1 frame, zero time */ + +/** for correv >= 80. prev rev uses bit 21 */ +#define MCTL_BCNS_PROMISC_SHIFT 21 +/** for correv < 80. prev rev uses bit 20 */ +#define MCTL_BCNS_PROMISC_SHIFT_LT80 20 + +/* maccontrol register */ +#define MCTL_GMODE (1U << 31) +#define MCTL_DISCARD_PMQ (1 << 30) +#define MCTL_DISCARD_TXSTATUS (1 << 29) +#define MCTL_TBTT_HOLD (1 << 28) +#define MCTL_CLOSED_NETWORK (1 << 27) +#define MCTL_WAKE (1 << 26) +#define MCTL_HPS (1 << 25) +#define MCTL_PROMISC (1 << 24) +#define MCTL_KEEPBADFCS (1 << 23) +#define MCTL_KEEPCONTROL (1 << 22) +#define MCTL_BCNS_PROMISC (1 << MCTL_BCNS_PROMISC_SHIFT) +#define MCTL_BCNS_PROMISC_LT80 (1 << MCTL_BCNS_PROMISC_SHIFT_LT80) +#define MCTL_NO_TXDMA_LAST_PTR (1 << 20) /** for correv >= 85 */ +#define MCTL_LOCK_RADIO (1 << 19) +#define MCTL_AP (1 << 18) +#define MCTL_INFRA (1 << 17) +#define MCTL_BIGEND (1 << 16) +#define MCTL_DISABLE_CT (1 << 14) /** for corerev >= 83.1 */ +#define MCTL_GPOUT_SEL_MASK (3 << 14) +#define MCTL_GPOUT_SEL_SHIFT 14 +#define MCTL_EN_PSMDBG (1 << 13) +#define MCTL_IHR_EN (1 << 10) +#define MCTL_SHM_UPPER (1 << 9) +#define MCTL_SHM_EN (1 << 8) +#define MCTL_PSM_JMP_0 (1 << 2) +#define MCTL_PSM_RUN (1 << 1) +#define MCTL_EN_MAC (1 << 0) + +/* maccontrol1 register */ +#define MCTL1_GCPS (1u << 0u) +#define MCTL1_EGS_MASK 0x0000c000 +#define MCTL1_EGS_SHIFT 14u +#define MCTL1_AVB_ENABLE (1u << 1u) +#define MCTL1_GPIOSEL_SHIFT 8u +#define MCTL1_GPIOSEL (0x3F) +#define MCTL1_GPIOSEL_MASK (MCTL1_GPIOSEL << MCTL1_GPIOSEL_SHIFT) +/* Select MAC_SMPL_CPTR debug data that is placed in pc<7:1> & ifs_gpio_out<8:0> GPIOs */ +#define MCTL1_GPIOSEL_TSF_PC_IFS(_corerev) (D11REV_GE(_corerev, 85) ? 0x3b : 0x36) +#define MCTL1_AVB_TRIGGER (1u << 2u) +#define MCTL1_THIRD_AXI1_FOR_PSM (1u << 3u) +#define MCTL1_AXI1_FOR_RX (1u << 4u) +#define MCTL1_TXDMA_ENABLE_PASS (1u << 5u) +/* SampleCollectPlayCtrl */ +#define SC_PLAYCTRL_MASK_ENABLE (1u << 8u) +#define SC_PLAYCTRL_TRANS_MODE (1u << 6u) +#define SC_PLAYCTRL_SRC_SHIFT 3u +#define SC_PLAYCTRL_SRC_MASK (3u << SC_PLAYCTRL_SRC_SHIFT) +#define SC_PLAYCTRL_SRC_PHY_DBG (3u << SC_PLAYCTRL_SRC_SHIFT) +#define SC_PLAYCTRL_SRC_GPIO_OUT (2u << SC_PLAYCTRL_SRC_SHIFT) +#define SC_PLAYCTRL_SRC_GPIO_IN (1u << SC_PLAYCTRL_SRC_SHIFT) +#define SC_PLAYCTRL_SRC_PHY_SMPL (0u << SC_PLAYCTRL_SRC_SHIFT) +#define SC_PLAYCTRL_STOP (1u << 2u) +#define SC_PLAYCTRL_PAUSE (1u << 1u) +#define SC_PLAYCTRL_START (1u << 0u) +/* SCPortalSel fields */ +#define SC_PORTAL_SEL_AUTO_INCR (1u << 15u) /* Autoincr */ +#define SC_PORTAL_SEL_STORE_MASK (0u << 5u) /* Bits 14:5 SCStoreMask15to0 */ +#define SC_PORTAL_SEL_MATCH_MASK (4u << 5u) /* Bits 14:5 SCMatchMask15to0 */ +#define SC_PORTAL_SEL_MATCH_VALUE (8u << 5u) /* Bits 14:5 SCMatchValue15to0 */ +#define SC_PORTAL_SEL_TRIGGER_MASK (12u << 0u) /* Bits 4:0 SCTriggerMask15to0 */ +#define SC_PORTAL_SEL_TRIGGER_VALUE (16u << 0u) /* Bits 4:0 SCTriggerValue15to0 */ +#define SC_PORTAL_SEL_TRANS_MASK (20u << 0u) /* Bits 4:0 SCTransMask15to0 */ + +/* GpioOut register */ +#define MGPIO_OUT_RXQ1_IFIFO_CNT_MASK 0x1fc0u +#define MGPIO_OUT_RXQ1_IFIFO_CNT_SHIFT 6u + +#define MAC_RXQ1_IFIFO_CNT_ADDR 0x26u +#define MAC_RXQ1_IFIFO_MAXLEN 3u + +/* maccommand register */ +#define MCMD_BCN0VLD (1 << 0) +#define MCMD_BCN1VLD (1 << 1) +#define MCMD_DIRFRMQVAL (1 << 2) +#define MCMD_CCA (1 << 3) +#define MCMD_BG_NOISE (1 << 4) +#define MCMD_SKIP_SHMINIT (1 << 5) /**< only used for simulation */ +#define MCMD_SLOWCAL (1 << 6) +#define MCMD_SAMPLECOLL MCMD_SKIP_SHMINIT /**< reuse for sample collect */ +#define MCMD_IF_DOWN (1 << 8 ) /**< indicate interface is going down */ +#define MCMD_TOF (1 << 9) /**< wifi ranging processing in ucode for rxd frames */ +#define MCMD_TSYNC (1 << 10) /**< start timestamp sync process in ucode */ +#define MCMD_RADIO_DOWN (1 << 11) /**< radio down by ucode */ +#define MCMD_RADIO_UP (1 << 12) /**< radio up by ucode */ +#define MCMD_TXPU (1 << 13) /**< txpu control by ucode */ + +/* macintstatus/macintmask */ +#define MI_MACSSPNDD (1 << 0) /**< MAC has gracefully suspended */ +#define MI_BCNTPL (1 << 1) /**< beacon template available */ +#define MI_TBTT (1 << 2) /**< TBTT indication */ +#define MI_BCNSUCCESS (1 << 3) /**< beacon successfully tx'd */ +#define MI_BCNCANCLD (1 << 4) /**< beacon canceled (IBSS) */ +#define MI_ATIMWINEND (1 << 5) /**< end of ATIM-window (IBSS) */ +#define MI_PMQ (1 << 6) /**< PMQ entries available */ +#define MI_ALTTFS (1 << 7) /**< TX status interrupt for ARM offloads */ +#define MI_NSPECGEN_1 (1 << 8) /**< non-specific gen-stat bits that are set by PSM */ +#define MI_MACTXERR (1 << 9) /**< MAC level Tx error */ +#define MI_PMQERR (1 << 10) +#define MI_PHYTXERR (1 << 11) /**< PHY Tx error */ +#define MI_PME (1 << 12) /**< Power Management Event */ +#define MI_GP0 (1 << 13) /**< General-purpose timer0 */ +#define MI_GP1 (1 << 14) /**< General-purpose timer1 */ +#define MI_DMAINT (1 << 15) /**< (ORed) DMA-interrupts */ +#define MI_TXSTOP (1 << 16) /**< MAC has completed a TX FIFO Suspend/Flush */ +#define MI_CCA (1 << 17) /**< MAC has completed a CCA measurement */ +#define MI_BG_NOISE (1 << 18) /**< MAC has collected background noise samples */ +#define MI_DTIM_TBTT (1 << 19) /**< MBSS DTIM TBTT indication */ +#define MI_PRQ (1 << 20) /**< Probe response queue needs attention */ +#define MI_HEB (1 << 21) /**< HEB (Hardware Event Block) interrupt - 11ax cores */ +#define MI_BT_RFACT_STUCK (1 << 22) /**< MAC has detected invalid BT_RFACT pin, + * valid when rev < 15 + */ +#define MI_TTTT (1 << 22) /**< Target TIM Transmission Time, + * valid in rev = 26/29, or rev >= 42 + */ +#define MI_BT_PRED_REQ (1 << 23) /**< MAC requested driver BTCX predictor calc */ +#define MI_BCNTRIM_RX (1 << 24) /**< PSM received a partial beacon */ +#define MI_P2P (1 << 25) /**< WiFi P2P interrupt */ +#define MI_DMATX (1 << 26) /**< MAC new frame ready */ +#define MI_TSSI_LIMIT (1 << 27) /**< Tssi Limit Reach, TxIdx=0/127 Interrupt */ +#define MI_HWACI_NOTIFY (1 << 27) /**< HWACI detects ACI, Apply Mitigation settings */ +#define MI_RFDISABLE (1 << 28) /**< MAC detected a change on RF Disable input + * (corerev >= 10) + */ +#define MI_TFS (1 << 29) /**< MAC has completed a TX (corerev >= 5) */ +#define MI_LEGACY_BUS_ERROR (1 << 30) /**< uCode indicated bus error */ +#define MI_TO (1U << 31) /**< general purpose timeout (corerev >= 3) */ + +#define MI_RXOV MI_NSPECGEN_1 /**< rxfifo overflow interrupt */ + +/* macintstatus_ext/macintmask_ext */ +#define MI_BUS_ERROR (1U << 0u) /**< uCode indicated bus error */ +#define MI_VCOPLL (1U << 1u) /**< uCode indicated PLL lock issue */ +#define MI_EXT_PS_CHG (1U << 2u) /**< Power state is changing (PS 0 <-> 1) */ +#define MI_DIS_ULOFDMA (1U << 3u) /**< ucode indicated disabling ULOFDMA request */ +#define MI_EXT_PM_OFFLOAD (1U << 4u) /**< PM offload */ +#define MI_OBSS_INTR (1U << 5u) /**< OBSS detection interrupt */ +#define MI_SENSORC_CX_REQ (1U << 6u) /**< SensorC Mitigation Request interrupt */ +#define MI_RLL_NAV_HOF (1U << 7u) /**< RLLW Switch */ + +#define MI_EXT_TXE_SHARED_ERR (1U << 28u) /* Error event in blocks inside TXE shared + * (BMC/AQM/AQM-DMA/MIF) + */ + +/* Mac capabilities registers */ +#define MCAP_TKIPMIC 0x80000000 /**< TKIP MIC hardware present */ +#define MCAP_TKIPPH2KEY 0x40000000 /**< TKIP phase 2 key hardware present */ +#define MCAP_BTCX 0x20000000 /**< BT coexistence hardware and pins present */ +#define MCAP_MBSS 0x10000000 /**< Multi-BSS hardware present */ +#define MCAP_RXFSZ_MASK 0x0ff80000 /**< Rx fifo size in blocks (revid >= 16) */ +#define MCAP_RXFSZ_SHIFT 19 +#define MCAP_NRXQ_MASK 0x00070000 /**< Max Rx queues supported - 1 */ +#define MCAP_NRXQ_SHIFT 16 +#define MCAP_UCMSZ_MASK 0x0000e000 /**< Ucode memory size */ +#define MCAP_UCMSZ_3K3 0 /**< 3328 Words Ucode memory, in unit of 50-bit */ +#define MCAP_UCMSZ_4K 1 /**< 4096 Words Ucode memory */ +#define MCAP_UCMSZ_5K 2 /**< 5120 Words Ucode memory */ +#define MCAP_UCMSZ_6K 3 /**< 6144 Words Ucode memory */ +#define MCAP_UCMSZ_8K 4 /**< 8192 Words Ucode memory */ +#define MCAP_UCMSZ_SHIFT 13 +#define MCAP_TXFSZ_MASK 0x00000ff8 /**< Tx fifo size (* 512 bytes) */ +#define MCAP_TXFSZ_SHIFT 3 +#define MCAP_NTXQ_MASK 0x00000007 /**< Max Tx queues supported - 1 */ +#define MCAP_NTXQ_SHIFT 0 + +#define MCAP_BTCX_SUP(corerev) (MCAP_BTCX) + +#define MCAP_UCMSZ_TYPES 8 /**< different Ucode memory size types */ + +/* machwcap1 */ +#define MCAP1_ERC_MASK 0x00000001 /**< external radio coexistence */ +#define MCAP1_ERC_SHIFT 0 +#define MCAP1_SHMSZ_MASK 0x0000000e /**< shm size (corerev >= 16) */ +#define MCAP1_SHMSZ_SHIFT 1 +#define MCAP1_SHMSZ_1K 0 /**< 1024 words in unit of 32-bit */ +#define MCAP1_SHMSZ_2K 1 /**< 1536 words in unit of 32-bit */ +#define MCAP1_NUMMACCHAINS 0x00003000 /**< Indicates one less than the + number of MAC Chains in the MAC. + */ +#define MCAP1_NUMMACCHAINS_SHIFT 12 +#define MCAP1_RXBLMAX_MASK 0x1800000u +#define MCAP1_RXBLMAX_SHIFT 23u +#define MCAP1_NUM_HEB_MASK 0xE0000000u +#define MCAP1_NUM_HEB_SHIFT 29u +#define MCAP1_NUM_HEB_FACTOR 3u +#define MCAP1_CT_CAPABLE_SHIFT 17 + +/* BTCX control */ +#define BTCX_CTRL_EN 0x0001 /**< Enable BTCX module */ +#define BTCX_CTRL_SW 0x0002 /**< Enable software override */ +#define BTCX_CTRL_DSBLBTCXOUT 0x8000 /* Disable txconf/prisel signal output from btcx module */ + +#define BTCX_CTRL_PRI_POL 0x0080 /* Invert prisel polarity */ +#define BTCX_CTRL_TXC_POL 0x0020 /* Invert txconf polarity */ + +#define SW_PRI_ON 1 /* switch prisel polarity */ +#define SW_TXC_ON 2 /* switch txconf polarity */ + +/* BTCX status */ +#define BTCX_STAT_RA 0x0001 /**< RF_ACTIVE state */ + +/* BTCX transaction control */ +#define BTCX_TRANS_ANTSEL 0x0040 /**< ANTSEL output */ +#define BTCX_TRANS_TXCONF 0x0080 /**< TX_CONF output */ + +/* pmqhost data */ +#define PMQH_DATA_MASK 0xffff0000 /**< data entry of head pmq entry */ +#define PMQH_BSSCFG 0x00100000 /**< PM entry for BSS config */ +#define PMQH_PMOFF 0x00010000 /**< PM Mode OFF: power save off */ +#define PMQH_PMON 0x00020000 /**< PM Mode ON: power save on */ +#define PMQH_PMPS 0x00200000 /**< PM Mode PRETEND */ +#define PMQH_DASAT 0x00040000 /**< Dis-associated or De-authenticated */ +#define PMQH_ATIMFAIL 0x00080000 /**< ATIM not acknowledged */ +#define PMQH_DEL_ENTRY 0x00000001 /**< delete head entry */ +#define PMQH_DEL_MULT 0x00000002 /**< delete head entry to cur read pointer -1 */ +#define PMQH_OFLO 0x00000004 /**< pmq overflow indication */ +#define PMQH_NOT_EMPTY 0x00000008 /**< entries are present in pmq */ + +/* phydebug (corerev >= 3) */ +#define PDBG_CRS (1 << 0) /**< phy is asserting carrier sense */ +#define PDBG_TXA (1 << 1) /**< phy is taking xmit byte from mac this cycle */ +#define PDBG_TXF (1 << 2) /**< mac is instructing the phy to transmit a frame */ +#define PDBG_TXE (1 << 3) /**< phy is signaling a transmit Error to the mac */ +#define PDBG_RXF (1 << 4) /**< phy detected the end of a valid frame preamble */ +#define PDBG_RXS (1 << 5) /**< phy detected the end of a valid PLCP header */ +#define PDBG_RXFRG (1 << 6) /**< rx start not asserted */ +#define PDBG_RXV (1 << 7) /**< mac is taking receive byte from phy this cycle */ +#define PDBG_RFD (1 << 16) /**< RF portion of the radio is disabled */ + +/* objaddr register */ +#define OBJADDR_UCM_SEL 0x00000000 +#define OBJADDR_SHM_SEL 0x00010000 +#define OBJADDR_SCR_SEL 0x00020000 +#define OBJADDR_IHR_SEL 0x00030000 +#define OBJADDR_RCMTA_SEL 0x00040000 +#define OBJADDR_AMT_SEL 0x00040000 +#define OBJADDR_SRCHM_SEL 0x00060000 +#define OBJADDR_KEYTBL_SEL 0x000c0000 +#define OBJADDR_HEB_SEL 0x00120000 +#define OBJADDR_TXDC_TBL_SEL 0x00140000 +#define OBJADDR_TXDC_RIB_SEL 0x00150000 +#define OBJADDR_FCBS_SEL 0x00160000 +#define OBJADDR_LIT_SEL 0x00170000 +#define OBJADDR_LIB_SEL 0x00180000 +#define OBJADDR_WINC 0x01000000 +#define OBJADDR_RINC 0x02000000 +#define OBJADDR_AUTO_INC 0x03000000 +/* SHM/SCR/IHR/SHMX/SCRX/IHRX allow 2 bytes read/write, else only 4 bytes */ +#define OBJADDR_2BYTES_ACCESS(sel) \ + (((sel & 0x70000) == OBJADDR_SHM_SEL) || \ + ((sel & 0x70000) == OBJADDR_SCR_SEL) || \ + ((sel & 0x70000) == OBJADDR_IHR_SEL)) + +/* objdata register */ +#define OBJDATA_WR_COMPLT 0x00000001 + +/* frmtxstatus */ +#define TXS_V (1 << 0) /**< valid bit */ + +#define TXS_STATUS_MASK 0xffff +/* sw mask to map txstatus for corerevs <= 4 to be the same as for corerev > 4 */ +#define TXS_COMPAT_MASK 0x3 +#define TXS_COMPAT_SHIFT 1 +#define TXS_FID_MASK 0xffff0000 +#define TXS_FID_SHIFT 16 + +/* frmtxstatus2 */ +#define TXS_SEQ_MASK 0xffff +#define TXS_PTX_MASK 0xff0000 +#define TXS_PTX_SHIFT 16 +#define TXS_MU_MASK 0x01000000 +#define TXS_MU_SHIFT 24 + +/* clk_ctl_st, corerev >= 17 */ +#define CCS_ERSRC_REQ_D11PLL 0x00000100 /**< d11 core pll request */ +#define CCS_ERSRC_REQ_PHYPLL 0x00000200 /**< PHY pll request */ +#define CCS_ERSRC_REQ_PTMPLL 0x00001000 /* PTM clock request */ +#define CCS_ERSRC_AVAIL_D11PLL 0x01000000 /**< d11 core pll available */ +#define CCS_ERSRC_AVAIL_PHYPLL 0x02000000 /**< PHY pll available */ +#define CCS_ERSRC_AVAIL_PTMPLL 0x10000000 /**< PHY pll available */ + +/* tsf_cfprep register */ +#define CFPREP_CBI_MASK 0xffffffc0 +#define CFPREP_CBI_SHIFT 6 +#define CFPREP_CFPP 0x00000001 + +/* receive fifo control */ +#define RFC_FR (1 << 0) /**< frame ready */ +#define RFC_DR (1 << 1) /**< data ready */ + +/* tx fifo sizes for corerev >= 9 */ +/* tx fifo sizes values are in terms of 256 byte blocks */ +#define TXFIFOCMD_RESET_MASK (1 << 15) /**< reset */ +#define TXFIFOCMD_FIFOSEL_SHIFT 8 /**< fifo */ +#define TXFIFOCMD_FIFOSEL_SET(val) ((val & 0x7) << TXFIFOCMD_FIFOSEL_SHIFT) /* fifo */ +#define TXFIFOCMD_FIFOSEL_GET(val) ((val >> TXFIFOCMD_FIFOSEL_SHIFT) & 0x7) /* fifo */ +#define TXFIFO_FIFOTOP_SHIFT 8 /**< fifo start */ + +#define TXFIFO_FIFO_START(def, def1) ((def & 0xFF) | ((def1 & 0xFF) << 8)) +#define TXFIFO_FIFO_END(def, def1) (((def & 0xFF00) >> 8) | (def1 & 0xFF00)) + +/* Must redefine to 65 for 16 MBSS */ +#ifdef WLLPRS +#define TXFIFO_START_BLK16 (65+16) /**< Base address + 32 * 512 B/P + 8 * 512 11g P */ +#else /* WLLPRS */ +#define TXFIFO_START_BLK16 65 /**< Base address + 32 * 512 B/P */ +#endif /* WLLPRS */ +#define TXFIFO_START_BLK 6 /**< Base address + 6 * 256 B */ +#define TXFIFO_START_BLK_NIN 7 /**< Base address + 6 * 256 B */ + +#define TXFIFO_AC_SIZE_PER_UNIT 512 /**< one unit corresponds to 512 bytes */ + +#define MBSS16_TEMPLMEM_MINBLKS 65 /**< one unit corresponds to 256 bytes */ + +/* phy versions, PhyVersion:Revision field */ +#define PV_AV_MASK 0xf000 /**< analog block version */ +#define PV_AV_SHIFT 12 /**< analog block version bitfield offset */ +#define PV_PT_MASK 0x0f00 /**< phy type */ +#define PV_PT_SHIFT 8 /**< phy type bitfield offset */ +#define PV_PV_MASK 0x00ff /**< phy version */ +#define PHY_TYPE(v) ((v & PV_PT_MASK) >> PV_PT_SHIFT) + +/* phy types, PhyVersion:PhyType field */ +#ifndef USE_BCMCONF_H +#define PHY_TYPE_A 0 /**< A-Phy value */ +#define PHY_TYPE_B 1 /**< B-Phy value */ +#define PHY_TYPE_G 2 /**< G-Phy value */ +#define PHY_TYPE_N 4 /**< N-Phy value */ +/* #define PHY_TYPE_LP 5 */ /**< LP-Phy value */ +/* #define PHY_TYPE_SSN 6 */ /**< SSLPN-Phy value */ +#define PHY_TYPE_HT 7 /**< 3x3 HTPhy value */ +#define PHY_TYPE_LCN 8 /**< LCN-Phy value */ +#define PHY_TYPE_LCNXN 9 /**< LCNXN-Phy value */ +#define PHY_TYPE_LCN40 10 /**< LCN40-Phy value */ +#define PHY_TYPE_AC 11 /**< AC-Phy value */ +#define PHY_TYPE_LCN20 12 /**< LCN20-Phy value */ +#define PHY_TYPE_HE 13 /**< HE-Phy value */ +#define PHY_TYPE_NULL 0xf /**< Invalid Phy value */ +#endif /* USE_BCMCONF_H */ + +/* analog types, PhyVersion:AnalogType field */ +#define ANA_11G_018 1 +#define ANA_11G_018_ALL 2 +#define ANA_11G_018_ALLI 3 +#define ANA_11G_013 4 +#define ANA_11N_013 5 +#define ANA_11LP_013 6 + +/** 802.11a PLCP header def */ +typedef struct ofdm_phy_hdr ofdm_phy_hdr_t; +BWL_PRE_PACKED_STRUCT struct ofdm_phy_hdr { + uint8 rlpt[3]; /**< rate, length, parity, tail */ + uint16 service; + uint8 pad; +} BWL_POST_PACKED_STRUCT; + +#define D11A_PHY_HDR_GRATE(phdr) ((phdr)->rlpt[0] & 0x0f) +#define D11A_PHY_HDR_GRES(phdr) (((phdr)->rlpt[0] >> 4) & 0x01) +#define D11A_PHY_HDR_GLENGTH(phdr) (((*((uint32 *)((phdr)->rlpt))) >> 5) & 0x0fff) +#define D11A_PHY_HDR_GPARITY(phdr) (((phdr)->rlpt[3] >> 1) & 0x01) +#define D11A_PHY_HDR_GTAIL(phdr) (((phdr)->rlpt[3] >> 2) & 0x3f) + +/** rate encoded per 802.11a-1999 sec 17.3.4.1 */ +#define D11A_PHY_HDR_SRATE(phdr, rate) \ + ((phdr)->rlpt[0] = ((phdr)->rlpt[0] & 0xf0) | ((rate) & 0xf)) +/** set reserved field to zero */ +#define D11A_PHY_HDR_SRES(phdr) ((phdr)->rlpt[0] &= 0xef) +/** length is number of octets in PSDU */ +#define D11A_PHY_HDR_SLENGTH(phdr, length) \ + (*(uint32 *)((phdr)->rlpt) = *(uint32 *)((phdr)->rlpt) | \ + (((length) & 0x0fff) << 5)) +/** set the tail to all zeros */ +#define D11A_PHY_HDR_STAIL(phdr) ((phdr)->rlpt[3] &= 0x03) + +#define D11A_PHY_HDR_LEN_L 3 /**< low-rate part of PLCP header */ +#define D11A_PHY_HDR_LEN_R 2 /**< high-rate part of PLCP header */ + +#define D11A_PHY_TX_DELAY (2) /**< 2.1 usec */ + +#define D11A_PHY_HDR_TIME (4) /**< low-rate part of PLCP header */ +#define D11A_PHY_PRE_TIME (16) +#define D11A_PHY_PREHDR_TIME (D11A_PHY_PRE_TIME + D11A_PHY_HDR_TIME) + +/** 802.11b PLCP header def */ +typedef struct cck_phy_hdr cck_phy_hdr_t; +BWL_PRE_PACKED_STRUCT struct cck_phy_hdr { + uint8 signal; + uint8 service; + uint16 length; + uint16 crc; +} BWL_POST_PACKED_STRUCT; + +#define D11B_PHY_HDR_LEN 6 + +#define D11B_PHY_TX_DELAY (3) /**< 3.4 usec */ + +#define D11B_PHY_LHDR_TIME (D11B_PHY_HDR_LEN << 3) +#define D11B_PHY_LPRE_TIME (144) +#define D11B_PHY_LPREHDR_TIME (D11B_PHY_LPRE_TIME + D11B_PHY_LHDR_TIME) + +#define D11B_PHY_SHDR_TIME (D11B_PHY_LHDR_TIME >> 1) +#define D11B_PHY_SPRE_TIME (D11B_PHY_LPRE_TIME >> 1) +#define D11B_PHY_SPREHDR_TIME (D11B_PHY_SPRE_TIME + D11B_PHY_SHDR_TIME) + +#define D11B_PLCP_SIGNAL_LOCKED (1 << 2) +#define D11B_PLCP_SIGNAL_LE (1 << 7) + +/* AMPDUXXX: move to ht header file once it is ready: Mimo PLCP */ +#define MIMO_PLCP_MCS_MASK 0x7f /**< mcs index */ +#define MIMO_PLCP_40MHZ 0x80 /**< 40 Hz frame */ +#define MIMO_PLCP_AMPDU 0x08 /**< ampdu */ + +#define WLC_GET_CCK_PLCP_LEN(plcp) (plcp[4] + (plcp[5] << 8)) +#define WLC_GET_MIMO_PLCP_LEN(plcp) (plcp[1] + (plcp[2] << 8)) +#define WLC_SET_MIMO_PLCP_LEN(plcp, len) \ + plcp[1] = len & 0xff; plcp[2] = ((len >> 8) & 0xff); + +#define WLC_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU) +#define WLC_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU) +#define WLC_IS_MIMO_PLCP_AMPDU(plcp) (plcp[3] & MIMO_PLCP_AMPDU) + +/** + * The dot11a PLCP header is 5 bytes. To simplify the software (so that we don't need eg different + * tx DMA headers for 11a and 11b), the PLCP header has padding added in the ucode. + */ +#define D11_PHY_HDR_LEN 6u + +/** For the AC phy PLCP is 12 bytes and not all bytes are used for all the modulations */ +#define D11AC_PHY_HDR_LEN 12 +#define D11AC_PHY_VHT_PLCP_OFFSET 0 +#define D11AC_PHY_HTMM_PLCP_OFFSET 0 +#define D11AC_PHY_HTGF_PLCP_OFFSET 3 +#define D11AC_PHY_OFDM_PLCP_OFFSET 3 +#define D11AC_PHY_CCK_PLCP_OFFSET 6 +#define D11AC_PHY_BEACON_PLCP_OFFSET 0 + +#define D11_PHY_RXPLCP_LEN(rev) (D11_PHY_HDR_LEN) +#define D11_PHY_RXPLCP_OFF(rev) (0) + +/** TX descriptor - pre40 */ +typedef struct d11txh_pre40 d11txh_pre40_t; +BWL_PRE_PACKED_STRUCT struct d11txh_pre40 { + uint16 MacTxControlLow; /* 0x0 */ + uint16 MacTxControlHigh; /* 0x1 */ + uint16 MacFrameControl; /* 0x2 */ + uint16 TxFesTimeNormal; /* 0x3 */ + uint16 PhyTxControlWord; /* 0x4 */ + uint16 PhyTxControlWord_1; /* 0x5 */ + uint16 PhyTxControlWord_1_Fbr; /* 0x6 */ + uint16 PhyTxControlWord_1_Rts; /* 0x7 */ + uint16 PhyTxControlWord_1_FbrRts; /* 0x8 */ + uint16 MainRates; /* 0x9 */ + uint16 XtraFrameTypes; /* 0xa */ + uint8 IV[16]; /* 0x0b - 0x12 */ + uint8 TxFrameRA[6]; /* 0x13 - 0x15 */ + uint16 TxFesTimeFallback; /* 0x16 */ + uint8 RTSPLCPFallback[6]; /* 0x17 - 0x19 */ + uint16 RTSDurFallback; /* 0x1a */ + uint8 FragPLCPFallback[6]; /* 0x1b - 1d */ + uint16 FragDurFallback; /* 0x1e */ + uint16 MModeLen; /* 0x1f */ + uint16 MModeFbrLen; /* 0x20 */ + uint16 TstampLow; /* 0x21 */ + uint16 TstampHigh; /* 0x22 */ + uint16 ABI_MimoAntSel; /* 0x23 */ + uint16 PreloadSize; /* 0x24 */ + uint16 AmpduSeqCtl; /* 0x25 */ + uint16 TxFrameID; /* 0x26 */ + uint16 TxStatus; /* 0x27 */ + uint16 MaxNMpdus; /* 0x28 corerev >=16 */ + BWL_PRE_PACKED_STRUCT union { + uint16 MaxAggDur; /* 0x29 corerev >=16 */ + uint16 MaxAggLen; + } BWL_POST_PACKED_STRUCT u1; + BWL_PRE_PACKED_STRUCT union { + BWL_PRE_PACKED_STRUCT struct { /* 0x29 corerev >=16 */ + uint8 MaxRNum; + uint8 MaxAggBytes; /* Max Agg Bytes in power of 2 */ + } BWL_POST_PACKED_STRUCT s1; + uint16 MaxAggLen_FBR; + } BWL_POST_PACKED_STRUCT u2; + uint16 MinMBytes; /* 0x2b corerev >=16 */ + uint8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */ + struct dot11_rts_frame rts_frame; /* 0x2f - 0x36 */ + uint16 pad; /* 0x37 */ +} BWL_POST_PACKED_STRUCT; + +#define D11_TXH_LEN 112 /**< bytes */ + +/* Frame Types */ +#define FT_LEGACY (-1) +#define FT_CCK 0 +#define FT_OFDM 1 +#define FT_HT 2 +#define FT_VHT 3 +#define FT_HE 4 +#define FT_EHT 6 + +/* HE PPDU type */ +#define HE_SU_PPDU 0 +#define HE_SU_RE_PPDU 1 +#define HE_MU_PPDU 2 +#define HE_TRIG_PPDU 3 + +/* Position of MPDU inside A-MPDU; indicated with bits 10:9 of MacTxControlLow */ +#define TXC_AMPDU_SHIFT 9 /**< shift for ampdu settings */ +#define TXC_AMPDU_NONE 0 /**< Regular MPDU, not an A-MPDU */ +#define TXC_AMPDU_FIRST 1 /**< first MPDU of an A-MPDU */ +#define TXC_AMPDU_MIDDLE 2 /**< intermediate MPDU of an A-MPDU */ +#define TXC_AMPDU_LAST 3 /**< last (or single) MPDU of an A-MPDU */ + +/* MacTxControlLow */ +#define TXC_AMIC 0x8000 +#define TXC_USERIFS 0x4000 +#define TXC_LIFETIME 0x2000 +#define TXC_FRAMEBURST 0x1000 +#define TXC_SENDCTS 0x0800 +#define TXC_AMPDU_MASK 0x0600 +#define TXC_BW_40 0x0100 +#define TXC_FREQBAND_5G 0x0080 +#define TXC_DFCS 0x0040 +#define TXC_IGNOREPMQ 0x0020 +#define TXC_HWSEQ 0x0010 +#define TXC_STARTMSDU 0x0008 +#define TXC_SENDRTS 0x0004 +#define TXC_LONGFRAME 0x0002 +#define TXC_IMMEDACK 0x0001 + +/* MacTxControlHigh */ +#define TXC_PREAMBLE_RTS_FB_SHORT 0x8000 /* RTS fallback preamble type 1 = SHORT 0 = LONG */ +#define TXC_PREAMBLE_RTS_MAIN_SHORT 0x4000 /* RTS main rate preamble type 1 = SHORT 0 = LONG */ +#define TXC_PREAMBLE_DATA_FB_SHORT 0x2000 /**< Main fallback rate preamble type + * 1 = SHORT for OFDM/GF for MIMO + * 0 = LONG for CCK/MM for MIMO + */ +/* TXC_PREAMBLE_DATA_MAIN is in PhyTxControl bit 5 */ +#define TXC_AMPDU_FBR 0x1000 /**< use fallback rate for this AMPDU */ +#define TXC_SECKEY_MASK 0x0FF0 +#define TXC_SECKEY_SHIFT 4 +#define TXC_ALT_TXPWR 0x0008 /**< Use alternate txpwr defined at loc. M_ALT_TXPWR_IDX */ +#define TXC_SECTYPE_MASK 0x0007 +#define TXC_SECTYPE_SHIFT 0 + +/* Null delimiter for Fallback rate */ +#define AMPDU_FBR_NULL_DELIM 5 /**< Location of Null delimiter count for AMPDU */ + +/* PhyTxControl for Mimophy */ +#define PHY_TXC_PWR_MASK 0xFC00 +#define PHY_TXC_PWR_SHIFT 10 +#define PHY_TXC_ANT_MASK 0x03C0 /**< bit 6, 7, 8, 9 */ +#define PHY_TXC_ANT_SHIFT 6 +#define PHY_TXC_ANT_0_1 0x00C0 /**< auto, last rx */ +#define PHY_TXC_LPPHY_ANT_LAST 0x0000 +#define PHY_TXC_ANT_3 0x0200 /**< virtual antenna 3 */ +#define PHY_TXC_ANT_2 0x0100 /**< virtual antenna 2 */ +#define PHY_TXC_ANT_1 0x0080 /**< virtual antenna 1 */ +#define PHY_TXC_ANT_0 0x0040 /**< virtual antenna 0 */ + +#define PHY_TXC_SHORT_HDR 0x0010 +#define PHY_TXC_FT_MASK 0x0003 + +#define PHY_TXC_FT_CCK 0x0000 +#define PHY_TXC_FT_OFDM 0x0001 +#define PHY_TXC_FT_HT 0x0002 +#define PHY_TXC_FT_VHT 0x0003 +#define PHY_TXC_FT_HE 0x0004 +#define PHY_TXC_FT_EHT 0x0006 + +#define PHY_TXC_OLD_ANT_0 0x0000 +#define PHY_TXC_OLD_ANT_1 0x0100 +#define PHY_TXC_OLD_ANT_LAST 0x0300 + +/** PhyTxControl_1 for Mimophy */ +#define PHY_TXC1_BW_MASK 0x0007 +#define PHY_TXC1_BW_10MHZ 0 +#define PHY_TXC1_BW_10MHZ_UP 1 +#define PHY_TXC1_BW_20MHZ 2 +#define PHY_TXC1_BW_20MHZ_UP 3 +#define PHY_TXC1_BW_40MHZ 4 +#define PHY_TXC1_BW_40MHZ_DUP 5 +#define PHY_TXC1_MODE_SHIFT 3 +#define PHY_TXC1_MODE_MASK 0x0038 +#define PHY_TXC1_MODE_SISO 0 +#define PHY_TXC1_MODE_CDD 1 +#define PHY_TXC1_MODE_STBC 2 +#define PHY_TXC1_MODE_SDM 3 +#define PHY_TXC1_CODE_RATE_SHIFT 8 +#define PHY_TXC1_CODE_RATE_MASK 0x0700 +#define PHY_TXC1_CODE_RATE_1_2 0 +#define PHY_TXC1_CODE_RATE_2_3 1 +#define PHY_TXC1_CODE_RATE_3_4 2 +#define PHY_TXC1_CODE_RATE_4_5 3 +#define PHY_TXC1_CODE_RATE_5_6 4 +#define PHY_TXC1_CODE_RATE_7_8 6 +#define PHY_TXC1_MOD_SCHEME_SHIFT 11 +#define PHY_TXC1_MOD_SCHEME_MASK 0x3800 +#define PHY_TXC1_MOD_SCHEME_BPSK 0 +#define PHY_TXC1_MOD_SCHEME_QPSK 1 +#define PHY_TXC1_MOD_SCHEME_QAM16 2 +#define PHY_TXC1_MOD_SCHEME_QAM64 3 +#define PHY_TXC1_MOD_SCHEME_QAM256 4 + +/* PhyTxControl for HTphy that are different from Mimophy */ +#define PHY_TXC_HTANT_MASK 0x3fC0 /**< bit 6, 7, 8, 9, 10, 11, 12, 13 */ +#define PHY_TXC_HTCORE_MASK 0x03C0 /**< core enable core3:core0, 1=enable, 0=disable */ +#define PHY_TXC_HTCORE_SHIFT 6 /**< bit 6, 7, 8, 9 */ +#define PHY_TXC_HTANT_IDX_MASK 0x3C00 /**< 4-bit, 16 possible antenna configuration */ +#define PHY_TXC_HTANT_IDX_SHIFT 10 +#define PHY_TXC_HTANT_IDX0 0 +#define PHY_TXC_HTANT_IDX1 1 +#define PHY_TXC_HTANT_IDX2 2 +#define PHY_TXC_HTANT_IDX3 3 + +/* PhyTxControl_1 for HTphy that are different from Mimophy */ +#define PHY_TXC1_HTSPARTIAL_MAP_MASK 0x7C00 /**< bit 14:10 */ +#define PHY_TXC1_HTSPARTIAL_MAP_SHIFT 10 +#define PHY_TXC1_HTTXPWR_OFFSET_MASK 0x01f8 /**< bit 8:3 */ +#define PHY_TXC1_HTTXPWR_OFFSET_SHIFT 3 + +/* TxControl word follows new interface for AX */ +/* PhyTxControl_6 for AXphy */ +#define PHY_TXC5_AXTXPWR_OFFSET_C0_MASK 0xff00 /**< bit 15:8 */ +#define PHY_TXC5_AXTXPWR_OFFSET_C0_SHIFT 8 +#define PHY_TXC6_AXTXPWR_OFFSET_C1_MASK 0x00ff /**< bit 7:0 */ +#define PHY_TXC6_AXTXPWR_OFFSET_C1_SHIFT 0 +#define PHY_TXC5_AXTXPWR_OFFSET_C2_MASK 0x00ff /**< bit 7:0 */ +#define PHY_TXC5_AXTXPWR_OFFSET_C2_SHIFT 0 + +/* XtraFrameTypes */ +#define XFTS_RTS_FT_SHIFT 2 +#define XFTS_FBRRTS_FT_SHIFT 4 +#define XFTS_CHANNEL_SHIFT 8 + +/** Antenna diversity bit in ant_wr_settle */ +#define PHY_AWS_ANTDIV 0x2000 + +/* IFS ctl */ +#define IFS_USEEDCF (1 << 2) + +/* IFS ctl1 */ +#define IFS_CTL1_EDCRS (1 << 3) +#define IFS_CTL1_EDCRS_20L (1 << 4) +#define IFS_CTL1_EDCRS_40 (1 << 5) +#define IFS_EDCRS_MASK (IFS_CTL1_EDCRS | IFS_CTL1_EDCRS_20L | IFS_CTL1_EDCRS_40) +#define IFS_EDCRS_SHIFT 3 + +/* IFS ctl sel pricrs */ +#define IFS_CTL_CRS_SEL_20LL 1 +#define IFS_CTL_CRS_SEL_20LU 2 +#define IFS_CTL_CRS_SEL_20UL 4 +#define IFS_CTL_CRS_SEL_20UU 8 +#define IFS_CTL_CRS_SEL_MASK (IFS_CTL_CRS_SEL_20LL | IFS_CTL_CRS_SEL_20LU | \ + IFS_CTL_CRS_SEL_20UL | IFS_CTL_CRS_SEL_20UU) +#define IFS_CTL_ED_SEL_20LL (1 << 8) +#define IFS_CTL_ED_SEL_20LU (1 << 9) +#define IFS_CTL_ED_SEL_20UL (1 << 10) +#define IFS_CTL_ED_SEL_20UU (1 << 11) +#define IFS_CTL_ED_SEL_MASK (IFS_CTL_ED_SEL_20LL | IFS_CTL_ED_SEL_20LU | \ + IFS_CTL_ED_SEL_20UL | IFS_CTL_ED_SEL_20UU) + +/* ABI_MimoAntSel */ +#define ABI_MAS_ADDR_BMP_IDX_MASK 0x0f00 +#define ABI_MAS_ADDR_BMP_IDX_SHIFT 8 +#define ABI_MAS_FBR_ANT_PTN_MASK 0x00f0 +#define ABI_MAS_FBR_ANT_PTN_SHIFT 4 +#define ABI_MAS_MRT_ANT_PTN_MASK 0x000f + +#ifdef WLAWDL +#define ABI_MAS_AWDL_TS_INSERT 0x1000 /**< bit 12 */ +#endif + +#define ABI_MAS_TIMBC_TSF 0x2000 /**< Enable TIMBC tsf field present */ + +/* MinMBytes */ +#define MINMBYTES_PKT_LEN_MASK 0x0300 +#define MINMBYTES_FBRATE_PWROFFSET_MASK 0xFC00 +#define MINMBYTES_FBRATE_PWROFFSET_SHIFT 10 + +/* Rev40 template constants */ + +/** templates include a longer PLCP header that matches the MAC / PHY interface */ +#define D11_VHT_PLCP_LEN 12 + +/* 11AC TX DMA buffer header */ + +#define D11AC_TXH_NUM_RATES 4 + +/** per rate info - rev40 */ +typedef struct d11actxh_rate d11actxh_rate_t; +BWL_PRE_PACKED_STRUCT struct d11actxh_rate { + uint16 PhyTxControlWord_0; /* 0 - 1 */ + uint16 PhyTxControlWord_1; /* 2 - 3 */ + uint16 PhyTxControlWord_2; /* 4 - 5 */ + uint8 plcp[D11_PHY_HDR_LEN]; /* 6 - 11 */ + uint16 FbwInfo; /* 12 -13, fall back bandwidth info */ + uint16 TxRate; /* 14 */ + uint16 RtsCtsControl; /* 16 */ + uint16 Bfm0; /* 18 */ +} BWL_POST_PACKED_STRUCT; + +/* Bit definition for FbwInfo field */ +#define FBW_BW_MASK 3 +#define FBW_BW_SHIFT 0 +#define FBW_TXBF 4 +#define FBW_TXBF_SHIFT 2 +/* this needs to be re-visited if we want to use this feature */ +#define FBW_BFM0_TXPWR_MASK 0x1F8 +#define FBW_BFM0_TXPWR_SHIFT 3 +#define FBW_BFM_TXPWR_MASK 0x7E00 +#define FBW_BFM_TXPWR_SHIFT 9 + +/* Bit definition for Bfm0 field */ +#define BFM0_TXPWR_MASK 0x3f +#define BFM0_STBC_SHIFT 6 +#define BFM0_STBC (1 << BFM0_STBC_SHIFT) +/* should find a chance to converge the two */ +#define D11AC2_BFM0_TXPWR_MASK 0x7f +#define D11AC2_BFM0_STBC_SHIFT 7 +#define D11AC2_BFM0_STBC (1 << D11AC2_BFM0_STBC_SHIFT) + +/* per packet info */ +typedef struct d11pktinfo_common d11pktinfo_common_t; +typedef struct d11pktinfo_common d11actxh_pkt_t; +BWL_PRE_PACKED_STRUCT struct d11pktinfo_common { + /* Per pkt info */ + uint16 TSOInfo; /* 0 */ + uint16 MacTxControlLow; /* 2 */ + uint16 MacTxControlHigh; /* 4 */ + uint16 Chanspec; /* 6 */ + uint8 IVOffset; /* 8 */ + uint8 PktCacheLen; /* 9 */ + uint16 FrameLen; /* 10. In [bytes] units. */ + uint16 TxFrameID; /* 12 */ + uint16 Seq; /* 14 */ + uint16 Tstamp; /* 16 */ + uint16 TxStatus; /* 18 */ +} BWL_POST_PACKED_STRUCT; + +/* common cache info between rev40 and rev80 formats */ +typedef struct d11txh_cache_common d11txh_cache_common_t; +BWL_PRE_PACKED_STRUCT struct d11txh_cache_common { + uint8 BssIdEncAlg; /* 0 */ + uint8 KeyIdx; /* 1 */ + uint8 PrimeMpduMax; /* 2 */ + uint8 FallbackMpduMax; /* 3 */ + uint16 AmpduDur; /* 4 - 5 */ + uint8 BAWin; /* 6 */ + uint8 MaxAggLen; /* 7 */ +} BWL_POST_PACKED_STRUCT; + +/** Per cache info - rev40 */ +typedef struct d11actxh_cache d11actxh_cache_t; +BWL_PRE_PACKED_STRUCT struct d11actxh_cache { + d11txh_cache_common_t common; /* 0 - 7 */ + uint8 TkipPH1Key[10]; /* 8 - 17 */ + uint8 TSCPN[6]; /* 18 - 23 */ +} BWL_POST_PACKED_STRUCT; + +/** Long format tx descriptor - rev40 */ +typedef struct d11actxh d11actxh_t; +BWL_PRE_PACKED_STRUCT struct d11actxh { + /* Per pkt info */ + d11actxh_pkt_t PktInfo; /* 0 - 19 */ + + union { + + /** Rev 40 to rev 63 layout */ + struct { + /** Per rate info */ + d11actxh_rate_t RateInfo[D11AC_TXH_NUM_RATES]; /* 20 - 99 */ + + /** Per cache info */ + d11actxh_cache_t CacheInfo; /* 100 - 123 */ + } rev40; + + /** Rev >= 64 layout */ + struct { + /** Per cache info */ + d11actxh_cache_t CacheInfo; /* 20 - 43 */ + + /** Per rate info */ + d11actxh_rate_t RateInfo[D11AC_TXH_NUM_RATES]; /* 44 - 123 */ + } rev64; + + }; +} BWL_POST_PACKED_STRUCT; + +#define D11AC_TXH_LEN sizeof(d11actxh_t) /* 124 bytes */ + +/* Short format tx descriptor only has per packet info */ +#define D11AC_TXH_SHORT_LEN sizeof(d11actxh_pkt_t) /* 20 bytes */ + +/* -TXDC-TxH Excluding Rate Info 41 bytes (Note 1 byte of RATEINFO is removed */ +#define D11AC_TXH_SHORT_EXT_LEN (sizeof(d11txh_rev80_t) - 1) + +/* Retry limit regs */ +/* Current retries for the fallback rates are hardcoded */ +#define D11AC_TXDC_SRL_FB (3u) /* Short Retry Limit - Fallback */ +#define D11AC_TXDC_LRL_FB (2u) /* Long Retry Limit - Fallback */ + +#define D11AC_TXDC_RET_LIM_MASK (0x000Fu) +#define D11AC_TXDC_SRL_SHIFT (0u) /* Short Retry Limit */ +#define D11AC_TXDC_SRL_FB_SHIFT (4u) /* Short Retry Limit - Fallback */ +#define D11AC_TXDC_LRL_SHIFT (8u) /* Long Retry Limit */ +#define D11AC_TXDC_LRL_FB_SHIFT (12u) /* Long Retry Limit - Fallback */ + +/* MacTxControlLow */ +#define D11AC_TXC_HDR_FMT_SHORT 0x0001 /**< 0: long format, 1: short format */ +#define D11AC_TXC_UPD_CACHE 0x0002 +#define D11AC_TXC_CACHE_IDX_MASK 0x003C /**< Cache index 0 .. 15 */ +#define D11AC_TXC_CACHE_IDX_SHIFT 2 + +#define D11AC_TXDC_IDX_SHIFT 1 +#define D11AC_TXDC_CPG_SHIFT 5 +#define D11REV80_TXDC_RIB_CPG 0x0020 /**< Cache Index CPG (Bit 5) -TXDC- */ +#define D11REV80_TXDC_RIB_DEL_MASK 0x001E /**< Cache index CIPX 0 .. 15 (Bit 1-4 -TXDC- */ +#define D11REV80_TXDC_RIB_IMM_MASK 0x003E /**< Cache index CIPX 0 .. 31 (Bit 1-5) -TXDC- */ +#define D11AC_TXC_AMPDU 0x0040 /**< Is aggregate-able */ +#define D11AC_TXC_IACK 0x0080 /**< Expect immediate ACK */ +#define D11AC_TXC_LFRM 0x0100 /**< Use long/short retry frame count/limit */ +#define D11AC_TXC_IPMQ 0x0200 /**< Ignore PMQ */ +#define D11AC_TXC_MBURST 0x0400 /**< Burst mode */ +#define D11AC_TXC_ASEQ 0x0800 /**< Add ucode generated seq num */ +#define D11AC_TXC_AGING 0x1000 /**< Use lifetime */ +#define D11AC_TXC_AMIC 0x2000 /**< Compute and add TKIP MIC */ +#define D11AC_TXC_STMSDU 0x4000 /**< First MSDU */ +#define D11AC_TXC_URIFS 0x8000 /**< Use RIFS */ + +/* MacTxControlHigh */ +#define D11AC_TXC_DISFCS 0x0001 /**< Discard FCS */ +#define D11AC_TXC_FIX_RATE 0x0002 /**< Use primary rate only */ +#define D11AC_TXC_SVHT 0x0004 /**< Single VHT mpdu ampdu */ +#define D11AC_TXC_PPS 0x0008 /**< Enable PS Pretend feature */ +#define D11AC_TXC_UCODE_SEQ 0x0010 /* Sequence counter for BK traffic, for offloads */ +#define D11AC_TXC_TIMBC_TSF 0x0020 /**< Enable TIMBC tsf field present */ +#define D11AC_TXC_TCPACK 0x0040 +#define D11AC_TXC_AWDL_PHYTT 0x0080 /**< Fill in PHY Transmission Time for AWDL action frames */ +#define D11AC_TXC_TOF 0x0100 /**< Enable wifi ranging processing for rxd frames */ +#define D11AC_TXC_MU 0x0200 /**< MU Tx data */ +#define D11AC_TXC_BFIX 0x0800 /**< BFI from SHMx */ +#define D11AC_TXC_NORETRY 0x0800 /**< Disable retry for tsync frames */ +#define D11AC_TXC_UFP 0x1000 /**< UFP */ +#define D11AC_TXC_OVERRIDE_NAV 0x1000 /**< if set, ucode will tx without honoring NAV */ +#define D11AC_TXC_DYNBW 0x2000 /**< Dynamic BW */ +#define D11AC_TXC_TXPROF_EN 0x8000 /**< TxProfile Enable TODO: support multiple idx */ +#define D11AC_TXC_SLTF 0x8000 /**< 11az Secure Ranging frame */ + +#define D11AC_TSTAMP_SHIFT 8 /**< Tstamp in 256us units */ + +/* PhyTxControlWord_0 */ +#define D11AC_PHY_TXC_FT_MASK 0x0003 + +/* vht txctl0 */ +#define D11AC_PHY_TXC_NON_SOUNDING 0x0004 +#define D11AC_PHY_TXC_BFM 0x0008 +#define D11AC_PHY_TXC_SHORT_PREAMBLE 0x0010 +#define D11AC2_PHY_TXC_STBC 0x0020 +#define D11AC_PHY_TXC_ANT_MASK 0x3FC0 +#define D11AC_PHY_TXC_CORE_MASK 0x03C0 +#define D11AC_PHY_TXC_CORE_SHIFT 6 +#define D11AC_PHY_TXC_ANT_IDX_MASK 0x3C00 +#define D11AC_PHY_TXC_ANT_IDX_SHIFT 10 +#define D11AC_PHY_TXC_BW_MASK 0xC000 +#define D11AC_PHY_TXC_BW_SHIFT 14 +#define D11AC_PHY_TXC_BW_20MHZ 0x0000 +#define D11AC_PHY_TXC_BW_40MHZ 0x4000 +#define D11AC_PHY_TXC_BW_80MHZ 0x8000 +#define D11AC_PHY_TXC_BW_160MHZ 0xC000 + +/* PhyTxControlWord_1 */ +#define D11AC_PHY_TXC_PRIM_SUBBAND_MASK 0x0007 +#define D11AC_PHY_TXC_PRIM_SUBBAND_LLL 0x0000 +#define D11AC_PHY_TXC_PRIM_SUBBAND_LLU 0x0001 +#define D11AC_PHY_TXC_PRIM_SUBBAND_LUL 0x0002 +#define D11AC_PHY_TXC_PRIM_SUBBAND_LUU 0x0003 +#define D11AC_PHY_TXC_PRIM_SUBBAND_ULL 0x0004 +#define D11AC_PHY_TXC_PRIM_SUBBAND_ULU 0x0005 +#define D11AC_PHY_TXC_PRIM_SUBBAND_UUL 0x0006 +#define D11AC_PHY_TXC_PRIM_SUBBAND_UUU 0x0007 +#define D11AC_PHY_TXC_TXPWR_OFFSET_MASK 0x01F8 +#define D11AC_PHY_TXC_TXPWR_OFFSET_SHIFT 3 +#define D11AC2_PHY_TXC_TXPWR_OFFSET_MASK 0x03F8 +#define D11AC2_PHY_TXC_TXPWR_OFFSET_SHIFT 3 +#define D11AC_PHY_TXC_TXBF_USER_IDX_MASK 0x7C00 +#define D11AC_PHY_TXC_TXBF_USER_IDX_SHIFT 10 +#define D11AC2_PHY_TXC_DELTA_TXPWR_OFFSET_MASK 0x7C00 +#define D11AC2_PHY_TXC_DELTA_TXPWR_OFFSET_SHIFT 10 +/* Rather awkward bit mapping to keep pctl1 word same as legacy, for proprietary 11n rate support */ +#define D11AC_PHY_TXC_11N_PROP_MCS 0x8000 /* this represents bit mcs[6] */ +#define D11AC2_PHY_TXC_MU 0x8000 + +/* PhyTxControlWord_2 phy rate */ +#define D11AC_PHY_TXC_PHY_RATE_MASK 0x003F +#define D11AC2_PHY_TXC_PHY_RATE_MASK 0x007F + +/* 11b phy rate */ +#define D11AC_PHY_TXC_11B_PHY_RATE_MASK 0x0003 +#define D11AC_PHY_TXC_11B_PHY_RATE_1 0x0000 +#define D11AC_PHY_TXC_11B_PHY_RATE_2 0x0001 +#define D11AC_PHY_TXC_11B_PHY_RATE_5_5 0x0002 +#define D11AC_PHY_TXC_11B_PHY_RATE_11 0x0003 + +/* 11a/g phy rate */ +#define D11AC_PHY_TXC_11AG_PHY_RATE_MASK 0x0007 +#define D11AC_PHY_TXC_11AG_PHY_RATE_6 0x0000 +#define D11AC_PHY_TXC_11AG_PHY_RATE_9 0x0001 +#define D11AC_PHY_TXC_11AG_PHY_RATE_12 0x0002 +#define D11AC_PHY_TXC_11AG_PHY_RATE_18 0x0003 +#define D11AC_PHY_TXC_11AG_PHY_RATE_24 0x0004 +#define D11AC_PHY_TXC_11AG_PHY_RATE_36 0x0005 +#define D11AC_PHY_TXC_11AG_PHY_RATE_48 0x0006 +#define D11AC_PHY_TXC_11AG_PHY_RATE_54 0x0007 + +/* 11ac phy rate */ +#define D11AC_PHY_TXC_11AC_MCS_MASK 0x000F +#define D11AC_PHY_TXC_11AC_NSS_MASK 0x0030 +#define D11AC_PHY_TXC_11AC_NSS_SHIFT 4 + +/* 11n phy rate */ +#define D11AC_PHY_TXC_11N_MCS_MASK 0x003F +#define D11AC2_PHY_TXC_11N_MCS_MASK 0x007F +#define D11AC2_PHY_TXC_11N_PROP_MCS 0x0040 /* this represents bit mcs[6] */ + +/* PhyTxControlWord_2 rest */ +#define D11AC_PHY_TXC_STBC 0x0040 +#define D11AC_PHY_TXC_DYN_BW_IN_NON_HT_PRESENT 0x0080 +#define D11AC_PHY_TXC_DYN_BW_IN_NON_HT_DYNAMIC 0x0100 +#define D11AC2_PHY_TXC_TXBF_USER_IDX_MASK 0xFE00 +#define D11AC2_PHY_TXC_TXBF_USER_IDX_SHIFT 9 + +/* RtsCtsControl */ +#define D11AC_RTSCTS_FRM_TYPE_MASK 0x0001 /**< frame type */ +#define D11AC_RTSCTS_FRM_TYPE_11B 0x0000 /**< 11b */ +#define D11AC_RTSCTS_FRM_TYPE_11AG 0x0001 /**< 11a/g */ +#define D11AC_RTSCTS_USE_RTS 0x0004 /**< Use RTS */ +#define D11AC_RTSCTS_USE_CTS 0x0008 /**< Use CTS */ +#define D11AC_RTSCTS_SHORT_PREAMBLE 0x0010 /**< Long/short preamble: 0 - long, 1 - short? */ +#define D11AC_RTSCTS_LAST_RATE 0x0020 /**< this is last rate */ +#define D11AC_RTSCTS_IMBF 0x0040 /**< Implicit TxBF */ +#define D11AC_RTSCTS_MIMOPS_RTS 0x8000 /**< Use RTS for mimops */ +#define D11AC_RTSCTS_DPCU_VALID 0x0080 /**< DPCU Valid : Same bitfield as above */ +#define D11AC_RTSCTS_BF_IDX_MASK 0xF000 /**< 4-bit index to the beamforming block */ +#define D11AC_RTSCTS_BF_IDX_SHIFT 12 +#define D11AC_RTSCTS_RATE_MASK 0x0F00 /**< Rate table offset: bit 3-0 of PLCP byte 0 */ +#define D11AC_RTSCTS_USE_RATE_SHIFT 8 + +/* BssIdEncAlg */ +#define D11AC_BSSID_MASK 0x000F /**< BSS index */ +#define D11AC_BSSID_SHIFT 0 +#define D11AC_ENCRYPT_ALG_MASK 0x00F0 /**< Encryption algoritm */ +#define D11AC_ENCRYPT_ALG_SHIFT 4 +#define D11AC_ENCRYPT_ALG_NOSEC 0x0000 /**< No security */ +#define D11AC_ENCRYPT_ALG_WEP 0x0010 /**< WEP */ +#define D11AC_ENCRYPT_ALG_TKIP 0x0020 /**< TKIP */ +#define D11AC_ENCRYPT_ALG_AES 0x0030 /**< AES */ +#define D11AC_ENCRYPT_ALG_WEP128 0x0040 /**< WEP128 */ +#define D11AC_ENCRYPT_ALG_NA 0x0050 /**< N/A */ +#define D11AC_ENCRYPT_ALG_WAPI 0x0060 /**< WAPI */ + +/* AmpduDur */ +#define D11AC_AMPDU_MIN_DUR_IDX_MASK 0x000F /**< AMPDU minimum duration index */ +#define D11AC_AMPDU_MIN_DUR_IDX_SHIFT 0 +#define D11AC_AMPDU_MAX_DUR_MASK 0xFFF0 /**< AMPDU maximum duration in unit 16 usec */ +#define D11AC_AMPDU_MAX_DUR_SHIFT 4 + +/** + * TX Descriptor definitions for supporting rev80 (HE) + */ +/* Maximum number of TX fallback rates per packet */ +#define D11_REV80_TXH_NUM_RATES 4 +#define D11_REV80_TXH_PHYTXCTL_MIN_LENGTH 1 + +/** per rate info - fixed portion - rev80 */ +typedef struct d11txh_rev80_rate_fixed d11txh_rev80_rate_fixed_t; +BWL_PRE_PACKED_STRUCT struct d11txh_rev80_rate_fixed { + uint16 TxRate; /* rate in 500Kbps */ + uint16 RtsCtsControl; /* RTS - CTS control */ + uint8 plcp[D11_PHY_HDR_LEN]; /* 6 bytes */ +} BWL_POST_PACKED_STRUCT; + +/* rev80 specific per packet info fields */ +typedef struct d11pktinfo_rev80 d11pktinfo_rev80_t; +BWL_PRE_PACKED_STRUCT struct d11pktinfo_rev80 { + uint16 HEModeControl; /* 20 */ + uint16 length; /* 22 - length of txd in bytes */ +} BWL_POST_PACKED_STRUCT; + +#define D11_REV80_TXH_TX_MODE_SHIFT 0 /* Bits 2:0 of HeModeControl */ +#define D11_REV80_TXH_TX_MODE_MASK 0x3 +#define D11_REV80_TXH_HTC_OFFSET_SHIFT 4 /* Bits 8:4 of HeModeControl */ +#define D11_REV80_TXH_HTC_OFFSET_MASK 0x01F0u +#define D11_REV80_TXH_TWT_EOSP 0x0200u /* bit 9 indicate TWT EOSP */ +#define D11_REV80_TXH_QSZ_QOS_CTL_IND_SHIFT 10 /* Bit 10 of HeModeControl */ +#define D11_REV80_TXH_QSZ_QOS_CTL_IND_MASK (1 << D11_REV80_TXH_QSZ_QOS_CTL_IND_SHIFT) +#define D11_REV80_TXH_USE_BSSCOLOR_SHM_SHIFT 15 /* Bit 15 of HEModeControl */ +#define D11_REV80_TXH_USE_BSSCOLOR_SHM_MASK (1 << D11_REV80_TXH_USE_BSSCOLOR_SHM_SHIFT) + +/* Calculate Length for short format TXD */ +#define D11_TXH_SHORT_LEN(__corerev__) (D11REV_GE(__corerev__, 80) ? \ + D11_REV80_TXH_SHORT_LEN : \ + D11AC_TXH_SHORT_LEN) + +/* Calculate Length for short format TXD (TXDC and/or FMF) */ +#define D11_TXH_SHORT_EX_LEN(__corerev__) (D11REV_GE(__corerev__, 80) ? \ + D11_REV80_TXH_SHORT_EX_LEN : \ + D11AC_TXH_SHORT_LEN) + +#define D11_REV80_TXH_IS_HE_AMPDU_SHIFT 11 /* Bit 11 of HeModeControl */ +#define D11_REV80_TXH_IS_HE_AMPDU_MASK (1 << D11_REV80_TXH_IS_HE_AMPDU_SHIFT) + +#define D11_REV80_PHY_TXC_EDCA 0x00 +#define D11_REV80_PHY_TXC_OFDMA_RA 0x01 /* Use Random Access Trigger for Tx */ +#define D11_REV80_PHY_TXC_OFDMA_DT 0x02 /* Use Directed Trigger for Tx */ +#define D11_REV80_PHY_TXC_OFDMA_ET 0x03 /* Use earliest Trigger Opportunity */ + +/** Per cache info - rev80 */ +typedef struct d11txh_rev80_cache d11txh_rev80_cache_t; +BWL_PRE_PACKED_STRUCT struct d11txh_rev80_cache { + d11txh_cache_common_t common; /* 0 - 7 */ + uint16 ampdu_mpdu_all; /* 8 - 9 */ + uint16 aggid; /* 10 - 11 */ + uint8 tkipph1_index; /* 12 */ + uint8 pktext; /* 13 */ + uint16 hebid_map; /* 14 -15: HEB ID bitmap */ +} BWL_POST_PACKED_STRUCT; + +/** Fixed size portion of TX descriptor - rev80 */ +typedef struct d11txh_rev80 d11txh_rev80_t; +BWL_PRE_PACKED_STRUCT struct d11txh_rev80 { + /** + * Per pkt info fields (common + rev80 specific) + * + * Note : Ensure that PktInfo field is always the first member + * of the d11txh_rev80 struct (that is at OFFSET - 0) + */ + d11pktinfo_common_t PktInfo; /* 0 - 19 */ + d11pktinfo_rev80_t PktInfoExt; /* 20 - 23 */ + + /** Per cache info */ + d11txh_rev80_cache_t CacheInfo; /* 24 - 39 */ + + /** + * D11_REV80_TXH_NUM_RATES number of Rate Info blocks + * contribute to the variable size portion of the TXD. + * Each Rate Info element (block) is a funtion of + * (N_PwrOffset, N_RU, N_User). + */ + uint8 RateInfoBlock[1]; +} BWL_POST_PACKED_STRUCT; + +/* Size of fixed portion in TX descriptor (without CacheInfo(Link info) and RateInfoBlock) + * this portion never change regardless of TXDC/FMF support. + */ +/* OFFSETOF() is available in bcmutils.h but including it will cause + * recursive inclusion of d11.h specifically on NDIS platforms. + */ +#ifdef BCMFUZZ + /* use 0x10 offset to avoid undefined behavior error due to NULL access */ +#define D11_REV80_TXH_FIXED_LEN (((uint)(uintptr)&((d11txh_rev80_t *)0x10)->CacheInfo) - 0x10) +#else +#define D11_REV80_TXH_FIXED_LEN ((uint)(uintptr)&((d11txh_rev80_t *)0)->CacheInfo) +#endif /* BCMFUZZ */ + +/* Short format tx descriptor only has per packet info (24 bytes) */ +#define D11_REV80_TXH_SHORT_LEN (sizeof(d11pktinfo_common_t) + sizeof(d11pktinfo_rev80_t)) + +/* Size of CacheInfo(Link info) in TX descriptor */ +#define D11_REV80_TXH_LINK_INFO_LEN (sizeof(d11txh_rev80_cache_t)) + +/* Size of Short format TX descriptor + * with TXDC - Short TXD(40 bytes) shall include PktInfo and Cache info without Rate info + * with TXDC+FMF - Short TXD(24 bytes) shall include PktInfo only without Link info and Rate info + * do NOT use D11_REV80_TXH_SHORT_EX_LEN to calculate long TXD length, value depends on FMF feature + */ +#if defined(FMF_LIT) && !defined(FMF_LIT_DISABLED) +#define D11_REV80_TXH_SHORT_EX_LEN D11_REV80_TXH_FIXED_LEN +#else +#define D11_REV80_TXH_SHORT_EX_LEN (D11_REV80_TXH_FIXED_LEN + D11_REV80_TXH_LINK_INFO_LEN) +#endif /* FMF_LIT && !FMF_LIT_DISABLED */ + +/* Length of BFM0 field in RateInfo Blk */ +#define D11_REV80_TXH_BFM0_FIXED_LEN(pwr_offs) 2u + +/** + * Length of FBWInfo field in RateInfo Blk + * + * Note : for now return fixed length of 1 word + */ +#define D11_REV80_TXH_FBWINFO_FIXED_LEN(pwr_offs) 2 + +#define D11_REV80_TXH_FIXED_RATEINFO_LEN sizeof(d11txh_rev80_rate_fixed_t) + +/** + * Macros to find size of N-RUs field in the PhyTxCtlWord. + */ +#define D11_REV80_TXH_TXC_N_RUs_FIELD_SIZE 1 +#define D11_REV80_TXH_TXC_PER_RU_INFO_SIZE 4 +#define D11_REV80_TXH_TXC_PER_RU_MIN_SIZE 2 + +#define D11_REV80_TXH_TXC_RU_FIELD_SIZE(n_rus) ((n_rus == 1) ? \ + (D11_REV80_TXH_TXC_PER_RU_MIN_SIZE) : \ + ((D11_REV80_TXH_TXC_N_RUs_FIELD_SIZE) + \ + ((n_rus) * D11_REV80_TXH_TXC_PER_RU_INFO_SIZE))) + +/** + * Macros to find size of N-Users field in the TXCTL_EXT + */ +#define D11_REV80_TXH_TXC_EXT_N_USERs_FIELD_SIZE 1 +#define D11_REV80_TXH_TXC_EXT_PER_USER_INFO_SIZE 4 + +#define D11_REV80_TXH_TXC_N_USERs_FIELD_SIZE(n_users) \ + ((n_users) ? \ + (((n_users) * \ + (D11_REV80_TXH_TXC_EXT_PER_USER_INFO_SIZE)) + \ + (D11_REV80_TXH_TXC_EXT_N_USERs_FIELD_SIZE)) : \ + (n_users)) + +/** + * Size of each Tx Power Offset field in PhyTxCtlWord. + */ +#define D11_REV80_TXH_TXC_PWR_OFFSET_SIZE 1u + +/** + * Size of fixed / static fields in PhyTxCtlWord (all fields except N-RUs, N-Users and Pwr offsets) + */ +#define D11_REV80_TXH_TXC_CONST_FIELDS_SIZE 6u + +/** + * Macros used for filling PhyTxCtlWord + */ + +/* PhyTxCtl Byte 0 */ +#define D11_REV80_PHY_TXC_FT_MASK 0x0007u +#define D11_REV80_PHY_TXC_HE_FMT_MASK 0x0018u +#define D11_REV80_PHY_TXC_SOFT_AP_MODE 0x0020u +#define D11_REV80_PHY_TXC_NON_SOUNDING 0x0040u +#define D11_REV80_PHY_TXC_SHORT_PREAMBLE 0x0080u +#define D11_REV80_PHY_TXC_FRAME_TYPE_VHT 0X0003u +#define D11_REV80_PHY_TXC_FRAME_TYPE_HT 0X0002u +#define D11_REV80_PHY_TXC_FRAME_TYPE_LEG 0X0001u + +#define D11_REV80_PHY_TXC_HE_FMT_SHIFT 3u + +/* PhyTxCtl Byte 1 */ +#define D11_REV80_PHY_TXC_STBC 0x0080u + +/* PhyTxCtl Word 1 (Bytes 2 - 3) */ +#define D11_REV80_PHY_TXC_DPCU_SUBBAND_SHIFT 5u +#define D11_REV80_PHY_TXC_DYNBW_PRESENT 0x2000u +#define D11_REV80_PHY_TXC_DYNBW_MODE 0x4000u +#define D11_REV80_PHY_TXC_MU 0x8000u +#define D11_REV80_PHY_TXC_BW_MASK 0x0003u +#define D11_REV80_PHY_TXC_BW_20MHZ 0x0000u +#define D11_REV80_PHY_TXC_BW_40MHZ 0x0001u +#define D11_REV80_PHY_TXC_BW_80MHZ 0x0002u +#define D11_REV80_PHY_TXC_BW_160MHZ 0x0003u +/* PhyTxCtl Word 2 (Bytes 4 -5) */ +/* Though the width antennacfg, coremask fields are 8-bits, + * only 4 bits is valid for 4369a0, hence masking only 4 bits + */ +#define D11_REV80_PHY_TXC_ANT_CONFIG_MASK 0x00F0u +#define D11_REV80_PHY_TXC_CORE_MASK 0x000Fu +#define D11_REV80_PHY_TXC_ANT_CONFIG_SHIFT 4u +/* upper byte- Ant. cfg, lower byte - Core */ +#define D11_REV80_PHY_TXC_ANT_CORE_MASK 0x0F0Fu + +/* PhyTxCtl BFM field */ +#define D11_REV80_PHY_TXC_BFM 0x80u + +/* PhyTxCtl power offsets */ +#define D11_REV80_PHY_TXC_PWROFS0_BYTE_POS 6u + +/* Phytx Ctl Sub band location */ +#define D11_REV80_PHY_TXC_SB_SHIFT 2u +#define D11_REV80_PHY_TXC_SB_MASK 0x001Cu + +/* 11n phy rate */ +#define D11_REV80_PHY_TXC_11N_MCS_MASK 0x003Fu +#define D11_REV80_PHY_TXC_11N_PROP_MCS 0x0040u /* this represents bit mcs[6] */ + +/* 11ac phy rate */ +#define D11_REV80_PHY_TXC_11AC_NSS_SHIFT 4u + +/* PhyTxCtl Word0 */ +#define D11_REV80_PHY_TXC_MCS_NSS_MASK 0x7F00u +#define D11_REV80_PHY_TXC_MCS_MASK 0xF00u +#define D11_REV80_PHY_TXC_MCS_NSS_SHIFT 8u + +/* 11ax phy rate */ +#define D11_REV80_PHY_TXC_11AX_NSS_SHIFT 4u + +#define D11_PHY_TXC_FT_MASK(corerev) ((D11REV_GE(corerev, 80)) ? D11_REV80_PHY_TXC_FT_MASK : \ + D11AC_PHY_TXC_FT_MASK) + +/* PhyTxCtl Word 4 */ +#define D11_REV80_PHY_TXC_HEHL_ENABLE 0x2000u + +/* PhyTxCtl Word 5 */ +#define D11_REV80_PHY_TXC_CORE0_PWR_OFFSET_SHIFT 8u +#define D11_REV80_PHY_TXC_CORE0_PWR_OFFSET_MASK 0xFF00u +/* PhyTxCtl Word 6 */ +#define D11_REV80_PHY_TXC_CORE1_PWR_OFFSET_MASK 0x00FFu +/* Number of RU assigned */ +#define D11_REV80_PHY_TXC_NRU 0x0100u + +/* A wrapper structure for all versions of TxD/d11txh structures */ +typedef union d11txhdr { + d11txh_pre40_t pre40; + d11actxh_t rev40; + d11txh_rev80_t rev80; +} d11txhdr_t; + +/** + * Generic tx status packet for software use. This is independent of hardware + * structure for a particular core. Hardware structure should be read and converted + * to this structure before being sent for the sofware consumption. + */ +typedef struct tx_status tx_status_t; +typedef struct tx_status_macinfo tx_status_macinfo_t; + +BWL_PRE_PACKED_STRUCT struct tx_status_macinfo { + int8 pad0; + int8 is_intermediate; + int8 pm_indicated; + int8 pad1; + uint8 suppr_ind; + int8 was_acked; + uint16 rts_tx_cnt; + uint16 frag_tx_cnt; + uint16 cts_rx_cnt; + uint16 raw_bits; + uint32 s3; + uint32 s4; + uint32 s5; + uint32 s8; + uint32 s9; + uint32 s10; + uint32 s11; + uint32 s12; + uint32 s13; + uint32 s14; + /* 128BA support */ + uint16 ncons_ext; + uint16 s15; + uint32 ack_map[8]; + /* pktlat */ + uint16 pkt_fetch_ts; /* PSM Packet Fetch Time */ + uint16 med_acc_dly; /* Medium Access Delay */ + uint16 rx_dur; /* Rx duration */ + uint16 mac_susp_dur; /* Mac Suspend Duration */ + uint16 txstatus_ts; /* TxStatus Time */ + uint16 tx_en_cnt; /* Number of times Tx was enabled */ + uint16 oac_txs_cnt; /* Other AC TxStatus count */ + uint16 data_retx_cnt; /* DataRetry count */ + uint16 pktlat_rsvd; /* reserved */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct tx_status { + uint16 framelen; + uint16 frameid; + uint16 sequence; + uint16 phyerr; + uint32 lasttxtime; + uint16 ackphyrxsh; + uint16 procflags; /* tx status processing flags */ + uint32 dequeuetime; + tx_status_macinfo_t status; +} BWL_POST_PACKED_STRUCT; + +/* Bits in struct tx_status procflags */ +#define TXS_PROCFLAG_AMPDU_BA_PKG2_READ_REQD 0x1 /* AMPDU BA txs pkg2 read required */ + +/* status field bit definitions */ +#define TX_STATUS_FRM_RTX_MASK 0xF000 +#define TX_STATUS_FRM_RTX_SHIFT 12 +#define TX_STATUS_RTS_RTX_MASK 0x0F00 +#define TX_STATUS_RTS_RTX_SHIFT 8 +#define TX_STATUS_MASK 0x00FE +#define TX_STATUS_PMINDCTD (1 << 7) /**< PM mode indicated to AP */ +#define TX_STATUS_INTERMEDIATE (1 << 6) /**< intermediate or 1st ampdu pkg */ +#define TX_STATUS_AMPDU (1 << 5) /**< AMPDU status */ +#define TX_STATUS_SUPR_MASK 0x1C /**< suppress status bits (4:2) */ +#define TX_STATUS_SUPR_SHIFT 2 +#define TX_STATUS_ACK_RCV (1 << 1) /**< ACK received */ +#define TX_STATUS_VALID (1 << 0) /**< Tx status valid (corerev >= 5) */ +#define TX_STATUS_NO_ACK 0 +#define TX_STATUS_BE (TX_STATUS_ACK_RCV | TX_STATUS_PMINDCTD) + +/* TX_STATUS for fw initiated pktfree event */ +#define TX_STATUS_SW_Q_FLUSH 0x10000 + +/* status field bit definitions phy rev > 40 */ +#define TX_STATUS40_FIRST 0x0002 +#define TX_STATUS40_INTERMEDIATE 0x0004 +#define TX_STATUS40_PMINDCTD 0x0008 + +#define TX_STATUS40_SUPR 0x00f0 +#define TX_STATUS40_SUPR_SHIFT 4 + +#define TX_STATUS40_NCONS 0x7f00 + +#define TX_STATUS40_NCONS_SHIFT 8 + +#define TX_STATUS40_ACK_RCV 0x8000 + +/* tx status bytes 8-16 */ +#define TX_STATUS40_TXCNT_RATE0_MASK 0x000000ff +#define TX_STATUS40_TXCNT_RATE0_SHIFT 0 + +#define TX_STATUS40_TXCNT_RATE1_MASK 0x00ff0000 +#define TX_STATUS40_TXCNT_RATE1_SHIFT 16 + +#define TX_STATUS40_MEDIUM_DELAY_MASK 0xFFFF + +#define TX_STATUS40_TXCNT(s3, s4) \ + (((s3 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT) + \ + ((s3 & TX_STATUS40_TXCNT_RATE1_MASK) >> TX_STATUS40_TXCNT_RATE1_SHIFT) + \ + ((s4 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT) + \ + ((s4 & TX_STATUS40_TXCNT_RATE1_MASK) >> TX_STATUS40_TXCNT_RATE1_SHIFT)) + +#define TX_STATUS40_TXCNT_RT0(s3) \ + ((s3 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT) + +#define TX_STATUS_EXTBA_TXCNT_BITS 0x3u +#define TX_STATUS_EXTBA_TXSUCCNT_BITS 0x1u +#define TX_STATUS_EXTBA_TXSIZE_RT 0x4u + +#define TX_STATUS_EXTBA_TXCNT_RATE_MASK 0x7u +#define TX_STATUS_EXTBA_TXSUCCNT_RATE_MASK 0x8u + +#define TX_STATUS_EXTBA_TXCNT_RATE_SHIFT 0x8u +#define TX_STATUS_EXTBA_TXSUCCNT_RATE_SHIFT 0x8u + +#define TX_STATUS_EXTBA_TXCNT_RT(s15, rt) \ + ((((s15) & (TX_STATUS_EXTBA_TXCNT_RATE_MASK << ((rt) * TX_STATUS_EXTBA_TXSIZE_RT))) >> \ + ((rt) * TX_STATUS_EXTBA_TXSIZE_RT)) << TX_STATUS_EXTBA_TXCNT_RATE_SHIFT) + +#define TX_STATUS_EXTBA_TXSUCCNT_RT(s15, rt) \ + ((((s15) & (TX_STATUS_EXTBA_TXSUCCNT_RATE_MASK << ((rt) * TX_STATUS_EXTBA_TXSIZE_RT))) >> \ + (((rt) * TX_STATUS_EXTBA_TXSIZE_RT))) << TX_STATUS_EXTBA_TXSUCCNT_RATE_SHIFT) + +#define TX_STATUS40_TX_MEDIUM_DELAY(txs) ((txs)->status.s8 & TX_STATUS40_MEDIUM_DELAY_MASK) + +/* chip rev 40 pkg 2 fields */ +#define TX_STATUS40_IMPBF_MASK 0x0000000Cu /* implicit bf applied */ +#define TX_STATUS40_IMPBF_BAD_MASK 0x00000010u /* impl bf applied but ack frm has no bfm */ +#define TX_STATUS40_IMPBF_LOW_MASK 0x00000020u /* ack received with low rssi */ +#define TX_STATUS40_BFTX 0x00000040u /* Beamformed pkt TXed */ +/* pkt two status field bit definitions mac rev > 64 */ +#define TX_STATUS64_MUTX 0x00000080u /* Not used in STA-dongle chips */ + +/* pkt two status field bit definitions mac rev > 80 */ + +/* TXS rate cookie contains + * mac rev 81/82 : RIT idx in bit[4:0] of RIB CtrlStat[0] + * mac rev >= 83 : RIB version in bit[4:0] of RIB CtrlStat[1] + */ +#define TX_STATUS80_RATE_COOKIE_MASK 0x00003E00u +#define TX_STATUS80_RATE_COOKIE_SHIFT 9u +#define TX_STATUS80_NAV_HDR 0x00004000u /* NAV Overriden */ + +#define TX_STATUS80_TBPPDU_MASK 0x00000040u /* Indicates TBPPDU TX */ +#define TX_STATUS80_TBPPDU_SHIFT 6u +#define TX_STATUS40_RTS_RTX_MASK 0x00ff0000u +#define TX_STATUS40_RTS_RTX_SHIFT 16u +#define TX_STATUS40_CTS_RRX_MASK 0xff000000u +#define TX_STATUS40_CTS_RRX_SHIFT 24u + +/* + * Intermediate status for TBPPDU (for stats purposes) + * First uint16 word (word0 - status): VALID, !FIRST, INTERMEDIATE + * Remaining word0 bits (3 - 15) are unasisgned + */ +#define TX_ITBSTATUS(status) \ + (((status) & (TX_STATUS40_FIRST | TX_STATUS40_INTERMEDIATE)) == TX_STATUS40_INTERMEDIATE) +/* Remainder of first uint32 (words 0 and 1) */ +#define TX_ITBSTATUS_LSIG_MASK 0x0000fff0u +#define TX_ITBSTATUS_LSIG_SHIFT 4u +#define TX_ITBSTATUS_TXPOWER_MASK 0xffff0000u +#define TX_ITBSTATUS_TXPOWER_SHIFT 16u +/* Second uint32 (words 2 and 3) */ +#define TX_ITBSTATUS_NULL_DELIMS_MASK 0x0007ffffu /* 19 bits * 4B => ~2M bytes */ +#define TX_ITBSTATUS_NULL_DELIMS_SHIFT 0u +#define TX_ITBSTATUS_ACKED_MPDUS_MASK 0x3ff80000u /* 11 bits: 0-2047 */ +#define TX_ITBSTATUS_ACKED_MPDUS_SHIFT 19u +/* Third uint32 (words 4 and 5) */ +#define TX_ITBSTATUS_SENT_MPDUS_MASK 0x0000ffe0u /* 11 bits: 0-2047 */ +#define TX_ITBSTATUS_SENT_MPDUS_SHIFT 5u +#define TX_ITBSTATUS_APTXPWR_MASK 0x003f0000u /* 0-60 => -20 - 40 */ +#define TX_ITBSTATUS_APTXPWR_SHIFT 16u +#define TX_ITBSTATUS_ULPKTEXT_MASK 0x01c00000u +#define TX_ITBSTATUS_ULPKTEXT_SHIFT 22u +#define TX_ITBSTATUS_MORETF_MASK 0x02000000u +#define TX_ITBSTATUS_MORETF_SHIFT 25u +#define TX_ITBSTATUS_CSREQ_MASK 0x04000000u +#define TX_ITBSTATUS_CSREQ_SHIFT 26u +#define TX_ITBSTATUS_ULBW_MASK 0x18000000u +#define TX_ITBSTATUS_ULBW_SHIFT 27u +#define TX_ITBSTATUS_GI_LTF_MASK 0x60000000u +#define TX_ITBSTATUS_GI_LTF_SHIFT 29u +#define TX_ITBSTATUS_MUMIMO_LTF_MASK 0x80000000u +#define TX_ITBSTATUS_MUMIMO_LTF_SHIFT 30u +/* Fourth uint32 (words 6 and 7) */ +#define TX_ITBSTATUS_CODING_TYPE_MASK 0x00000001u +#define TX_ITBSTATUS_CODING_TYPE_SHIFT 0u +#define TX_ITBSTATUS_MCS_MASK 0x0000001eu +#define TX_ITBSTATUS_MCS_SHIFT 1u +#define TX_ITBSTATUS_DCM_MASK 0x00000020u +#define TX_ITBSTATUS_DCM_SHIFT 5u +#define TX_ITBSTATUS_RU_ALLOC_MASK 0x00003fc0u +#define TX_ITBSTATUS_RU_ALLOC_SHIFT 6u +/* Bits 14 and 15 unassigned */ +#define TX_ITBSTATUS_NSS_MASK 0x00030000u +#define TX_ITBSTATUS_NSS_SHIFT 16u +#define TX_ITBSTATUS_TARGET_RSSI_MASK 0x03fc0000u +#define TX_ITBSTATUS_TARGET_RSSI_SHIFT 18u +#define TX_ITBSTATUS_RA_RU_MASK 0x04000000u +#define TX_ITBSTATUS_RA_RU_SHIFT 26u +/* Bits 27 through 31 unassigned */ +/* End of intermediate TBPPDU txstatus definitions */ + +/* MU group info txstatus field (s3 b[31:16]) */ +#define TX_STATUS64_MU_GID_MASK 0x003f0000u +#define TX_STATUS64_MU_GID_SHIFT 16u +#define TX_STATUS64_MU_BW_MASK 0x00c00000u +#define TX_STATUS64_MU_BW_SHIFT 22u +#define TX_STATUS64_MU_TXPWR_MASK 0x7f000000u +#define TX_STATUS64_MU_TXPWR_SHIFT 24u +#define TX_STATUS64_MU_SGI_MASK 0x80000080u +#define TX_STATUS64_MU_SGI_SHIFT 31u +#define TX_STATUS64_INTERM_MUTXCNT(s3) \ + ((s3 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT) + +#define TX_STATUS64_MU_GID(s3) ((s3 & TX_STATUS64_MU_GID_MASK) >> TX_STATUS64_MU_GID_SHIFT) +#define TX_STATUS64_MU_BW(s3) ((s3 & TX_STATUS64_MU_BW_MASK) >> TX_STATUS64_MU_BW_SHIFT) +#define TX_STATUS64_MU_TXPWR(s3) ((s3 & TX_STATUS64_MU_TXPWR_MASK) >> TX_STATUS64_MU_TXPWR_SHIFT) +#define TX_STATUS64_MU_SGI(s3) ((s3 & TX_STATUS64_MU_SGI_MASK) >> TX_STATUS64_MU_SGI_SHIFT) + +/* MU user info0 txstatus field (s4 b[15:0]) */ +#define TX_STATUS64_MU_MCS_MASK 0x0000000f +#define TX_STATUS64_MU_MCS_SHIFT 0 +#define TX_STATUS64_MU_NSS_MASK 0x00000070 +#define TX_STATUS64_MU_NSS_SHIFT 4 +#define TX_STATUS64_MU_SNR_MASK 0x0000ff00 +#define TX_STATUS64_MU_SNR_SHIFT 8 + +#define TX_STATUS64_MU_MCS(s4) ((s4 & TX_STATUS64_MU_MCS_MASK) >> TX_STATUS64_MU_MCS_SHIFT) +#define TX_STATUS64_MU_NSS(s4) ((s4 & TX_STATUS64_MU_NSS_MASK) >> TX_STATUS64_MU_NSS_SHIFT) +#define TX_STATUS64_MU_SNR(s4) ((s4 & TX_STATUS64_MU_SNR_MASK) >> TX_STATUS64_MU_SNR_SHIFT) + +/* MU txstatus rspec field (NSS | MCS) */ +#define TX_STATUS64_MU_RSPEC_MASK (TX_STATUS64_MU_NSS_MASK | TX_STATUS64_MU_MCS_MASK) +#define TX_STATUS64_MU_RSPEC_SHIFT 0 + +#define TX_STATUS64_MU_RSPEC(s4) ((s4 & TX_STATUS64_MU_RSPEC_MASK) >> TX_STATUS64_MU_RSPEC_SHIFT) + +/* MU user info0 txstatus field (s4 b[31:16]) */ +#define TX_STATUS64_MU_GBMP_MASK 0x000f0000 +#define TX_STATUS64_MU_GBMP_SHIFT 16 +#define TX_STATUS64_MU_GPOS_MASK 0x00300000 +#define TX_STATUS64_MU_GPOS_SHIFT 20 +#define TX_STATUS64_MU_TXCNT_MASK 0x0fc00000 +#define TX_STATUS64_MU_TXCNT_SHIFT 22 + +#define TX_STATUS64_MU_GBMP(s4) ((s4 & TX_STATUS64_MU_GBMP_MASK) >> TX_STATUS64_MU_GBMP_SHIFT) +#define TX_STATUS64_MU_GPOS(s4) ((s4 & TX_STATUS64_MU_GPOS_MASK) >> TX_STATUS64_MU_GPOS_SHIFT) +#define TX_STATUS64_MU_TXCNT(s4) ((s4 & TX_STATUS64_MU_TXCNT_MASK) >> TX_STATUS64_MU_TXCNT_SHIFT) + +#define HE_MU_APTX_PWR_MAX 60u +#define HE_TXS_MU_APTX_PWR_DBM(aptx_pwr) ((aptx_pwr) - 20u) + +#define HE_TXS_MU_TARGET_RSSI_RANG 90 +#define HE_TXS_MU_TARGET_RSSI_MAX_PWR 127 +#define HE_TXS_MU_TARGET_RSSI_DBM(rssi) ((rssi) - 110) + +#define HE_TXS_W4_MU_GET_RU_INDEX(index) ((index <= HE_MAX_26_TONE_RU_INDX) ? 0u : \ + ((index) <= HE_MAX_52_TONE_RU_INDX) ? 1u : \ + ((index) <= HE_MAX_106_TONE_RU_INDX) ? 2u : \ + ((index) <= HE_MAX_242_TONE_RU_INDX) ? 3u : \ + ((index) <= HE_MAX_484_TONE_RU_INDX) ? 4u :\ + ((index) <= HE_MAX_996_TONE_RU_INDX) ? 5u : 6u) + +/* Bit 8 indicates upper 80 MHz */ +#define HE_TXS_W4_MU_RU_INDEX_RU_INDEX_MASK 0x7Fu +#define HE_TXS_W4_MU_RU_INDEX_TONE(index) HE_TXS_W4_MU_GET_RU_INDEX(((index) & \ + HE_TXS_W4_MU_RU_INDEX_RU_INDEX_MASK)) + +#define HE_TXS_W3_MU_APTX_PWR_MASK 0x003F0000u +#define HE_TXS_W3_MU_APTX_PWR_SHIFT 16u +#define HE_TXS_W3_MU_PKT_EXT_MASK 0x01C00000u +#define HE_TXS_W3_MU_PKT_EXT_SHIFT 22u +#define HE_TXS_W3_MU_MORE_TF_MASK 0x02000000u +#define HE_TXS_W3_MU_MORE_TF_SHIFT 25u +#define HE_TXS_W3_MU_CS_REQ_MASK 0x04000000u +#define HE_TXS_W3_MU_CS_REQ_SHIFT 26u +#define HE_TXS_W3_MU_UL_BW_MASK 0x18000000u +#define HE_TXS_W3_MU_UL_BW_SHIFT 27u +#define HE_TXS_W3_MU_GI_LTF_MASK 0x60000000u +#define HE_TXS_W3_MU_GI_LTF_SHIFT 29u +#define HE_TXS_W3_MU_MIMO_LTF_MASK 0x80000000u +#define HE_TXS_W3_MU_MIMO_LTF_SHIFT 31u + +#define HE_TXS_W3_MU_APTX_PWR(s3) (((s3) & HE_TXS_W3_MU_APTX_PWR_MASK) >> \ + HE_TXS_W3_MU_APTX_PWR_SHIFT) +#define HE_TXS_W3_MU_PKT_EXT(s3) (((s3) & HE_TXS_W3_MU_PKT_EXT_MASK) >> \ + HE_TXS_W3_MU_PKT_EXT_SHIFT) +#define HE_TXS_W3_MU_MORE_TF(s3) (((s3) & HE_TXS_W3_MU_MORE_TF_MASK) >> \ + HE_TXS_W3_MU_MORE_TF_SHIFT) +#define HE_TXS_W3_MU_CS_REQ(s3) (((s3) & HE_TXS_W3_MU_CS_REQ_MASK) >> \ + HE_TXS_W3_MU_CS_REQ_SHIFT) +#define HE_TXS_W3_MU_UL_BW(s3) (((s3) & HE_TXS_W3_MU_UL_BW_MASK) >> \ + HE_TXS_W3_MU_UL_BW_SHIFT) +#define HE_TXS_W3_MU_GI_LTF(s3) (((s3) & HE_TXS_W3_MU_GI_LTF_MASK) >> \ + HE_TXS_W3_MU_GI_LTF_SHIFT) +#define HE_TXS_W3_MU_MIMO_LT(s3) (((s3) & HE_TXS_W3_MU_MIMO_LTF_MASK) >> \ + HE_TXS_W3_MU_MIMO_LTF_SHIFT) + +#define HE_TXS_W4_MU_CODINF_TYPE_MASK 0x00000001u +#define HE_TXS_W4_MU_CODINF_TYPE_SHIFT 0u +#define HE_TXS_W4_MU_MCS_MASK 0x0000001Eu +#define HE_TXS_W4_MU_MCS_SHIFT 1u +#define HE_TXS_W4_MU_DCM_MASK 0x00000020u +#define HE_TXS_W4_MU_DCM_SHIFT 5u +#define HE_TXS_W4_RU_ALLOCATION_MASK 0x00003FC0u +#define HE_TXS_W4_RU_ALLOCATION_SHIFT 6u + +#define HE_TXS_W4_MU_CODINF_TYPE(s4) (((s4) & HE_TXS_W4_MU_CODINF_TYPE_MASK) >> \ + HE_TXS_W4_MU_CODINF_TYPE_SHIFT) +#define HE_TXS_W4_MU_MCS(s4) (((s4) & HE_TXS_W4_MU_MCS_MASK) >> \ + HE_TXS_W4_MU_MCS_SHIFT) +#define HE_TXS_W4_MU_DCM(s4) (((s4) & HE_TXS_W4_MU_DCM_MASK) >> \ + HE_TXS_W4_MU_DCM_SHIFT) +#define HE_TXS_W4_RU_ALLOCATION(s4) (((s4) & HE_TXS_W4_RU_ALLOCATION_MASK) >> \ + HE_TXS_W4_RU_ALLOCATION_SHIFT) + +#define HE_TXS_W4_MU_NSS_MASK 0x00030000u +#define HE_TXS_W4_MU_NSS_SHIFT 16u +#define HE_TXS_W4_MU_TARGET_RSSI_MASK 0x03FC0000u +#define HE_TXS_W4_MU_TARGET_RSSI_SHIFT 18u + +#define HE_TXS_W4_MU_NSS(s4) (((s4) & HE_TXS_W4_MU_NSS_MASK) >> \ + HE_TXS_W4_MU_NSS_SHIFT) +#define HE_TXS_W4_MU_TARGET_RSSI(s4) (((s4) & HE_TXS_W4_MU_TARGET_RSSI_MASK) >> \ + HE_TXS_W4_MU_TARGET_RSSI_SHIFT) + +/* WARNING: Modifying suppress reason codes? + * Update wlc_tx_status_t and TX_STS_REASON_STRINGS and + * wlc_tx_status_map_hw_to_sw_supr_code() also + */ +/* status field bit definitions */ +/** suppress status reason codes */ +enum { + TX_STATUS_SUPR_NONE = 0, + TX_STATUS_SUPR_PMQ = 1, /**< PMQ entry */ + TX_STATUS_SUPR_FLUSH = 2, /**< flush request */ + TX_STATUS_SUPR_FRAG = 3, /**< previous frag failure */ + TX_STATUS_SUPR_TBTT = 3, /**< SHARED: Probe response supr for TBTT */ + TX_STATUS_SUPR_BADCH = 4, /**< channel mismatch */ + TX_STATUS_SUPR_EXPTIME = 5, /**< lifetime expiry */ + TX_STATUS_SUPR_UF = 6, /**< underflow */ +#ifdef WLP2P_UCODE + TX_STATUS_SUPR_NACK_ABS = 7, /**< BSS entered ABSENCE period */ +#endif + TX_STATUS_SUPR_PPS = 8, /**< Pretend PS */ + TX_STATUS_SUPR_PHASE1_KEY = 9, /**< Request new TKIP phase-1 key */ + TX_STATUS_UNUSED = 10, /**< Unused in trunk */ + TX_STATUS_INT_XFER_ERR = 11, /**< Internal DMA xfer error */ + TX_STATUS_SUPR_TWT_SP_OUT = 12, /**< Suppress Tx outside TWTSP */ + NUM_TX_STATUS_SUPR +}; + +/** Unexpected tx status for rate update */ +#define TX_STATUS_UNEXP(status) \ + ((((status.is_intermediate))) && \ + TX_STATUS_UNEXP_AMPDU(status)) + +/** Unexpected tx status for A-MPDU rate update */ +#ifdef WLP2P_UCODE +#define TX_STATUS_UNEXP_AMPDU(status) \ + ((((status.suppr_ind)) != TX_STATUS_SUPR_NONE) && \ + (((status.suppr_ind)) != TX_STATUS_SUPR_EXPTIME) && \ + (((status.suppr_ind)) != TX_STATUS_SUPR_NACK_ABS)) +#else +#define TX_STATUS_UNEXP_AMPDU(status) \ + ((((status.suppr_ind)) != TX_STATUS_SUPR_NONE) && \ + (((status.suppr_ind)) != TX_STATUS_SUPR_EXPTIME)) +#endif + +/** + * This defines the collection of supp reasons (including none) + * for which mac has done its (re-)transmission in any of ucode retx schemes + * which include ucode/hw/aqm agg + */ +#define TXS_SUPR_MAGG_DONE_MASK ((1 << TX_STATUS_SUPR_NONE) | \ + (1 << TX_STATUS_SUPR_UF) | \ + (1 << TX_STATUS_SUPR_FRAG) | \ + (1 << TX_STATUS_SUPR_EXPTIME)) +#define TXS_SUPR_MAGG_DONE(suppr_ind) \ + ((1 << (suppr_ind)) & TXS_SUPR_MAGG_DONE_MASK) + +#define TX_STATUS_BA_BMAP03_MASK 0xF000 /**< ba bitmap 0:3 in 1st pkg */ +#define TX_STATUS_BA_BMAP03_SHIFT 12 /**< ba bitmap 0:3 in 1st pkg */ +#define TX_STATUS_BA_BMAP47_MASK 0x001E /**< ba bitmap 4:7 in 2nd pkg */ +#define TX_STATUS_BA_BMAP47_SHIFT 3 /**< ba bitmap 4:7 in 2nd pkg */ + +/* RXE (Receive Engine) */ + +/* RCM_CTL */ +#define RCM_INC_MASK_H 0x0080 +#define RCM_INC_MASK_L 0x0040 +#define RCM_INC_DATA 0x0020 +#define RCM_INDEX_MASK 0x001F +#define RCM_SIZE 15 + +#define RCM_MAC_OFFSET 0 /**< current MAC address */ +#define RCM_BSSID_OFFSET 3 /**< current BSSID address */ +#define RCM_F_BSSID_0_OFFSET 6 /**< foreign BSS CFP tracking */ +#define RCM_F_BSSID_1_OFFSET 9 /**< foreign BSS CFP tracking */ +#define RCM_F_BSSID_2_OFFSET 12 /**< foreign BSS CFP tracking */ + +#define RCM_WEP_TA0_OFFSET 16 +#define RCM_WEP_TA1_OFFSET 19 +#define RCM_WEP_TA2_OFFSET 22 +#define RCM_WEP_TA3_OFFSET 25 + +/* AMT - Address Match Table */ + +/* AMT Attribute bits */ +#define AMT_ATTR_VALID 0x8000 /**< Mark the table entry valid */ +#define AMT_ATTR_A1 0x0008 /**< Match for A1 */ +#define AMT_ATTR_A2 0x0004 /**< Match for A2 */ +#define AMT_ATTR_A3 0x0002 /**< Match for A3 */ + +/* AMT Index defines */ +#define AMT_SIZE_64 64 /* number of AMT entries */ +#define AMT_SIZE_128 128 /* number of AMT entries for corerev >= 64 */ +#define AMT_IDX_MAC 63 /**< device MAC */ +#define AMT_IDX_BSSID 62 /**< BSSID match */ +#define AMT_IDX_TRANSMITTED_BSSID 60 /**< transmitted BSSID in multiple BSSID set */ +#define AMT_WORD_CNT 2 /* Number of word count per AMT entry */ + +#define AMT_SIZE(_corerev) (D11REV_GE(_corerev, 64) ? \ + (D11REV_GE(_corerev, 80) ? AMT_SIZE_64 : AMT_SIZE_128) : \ + AMT_SIZE_64) + +/* RMC entries */ +#define AMT_IDX_MCAST_ADDR 61 /**< MCAST address for Reliable Mcast feature */ +#define AMT_IDX_MCAST_ADDR1 59 /**< MCAST address for Reliable Mcast feature */ +#define AMT_IDX_MCAST_ADDR2 58 /**< MCAST address for Reliable Mcast feature */ +#define AMT_IDX_MCAST_ADDR3 57 /**< MCAST address for Reliable Mcast feature */ + +#ifdef WLMESH +/* note: this is max supported by ucode. But ARM-driver can + * only mesh_info->mesh_max_peers which should be <= this value. + */ + +#define AMT_MAX_MESH_PEER 10 +#define AMT_MAXIDX_MESH_PEER 60 +#define AMT_MAXIDX_P2P_USE \ + (AMT_MAXIDX_MESH_PEER - AMT_MAX_MESH_PEER) +#else +#define AMT_MAXIDX_P2P_USE 60 /**< Max P2P entry to use */ +#endif /* WL_STA_MONITOR */ +#define AMT_MAX_TXBF_ENTRIES 7 /**< Max tx beamforming entry */ +/* PSTA AWARE AP: Max PSTA Tx beamforming entry */ +#define AMT_MAX_TXBF_PSTA_ENTRIES 20 + +/* M_AMT_INFO SHM bit field definition */ +#define AMTINFO_BMP_IBSS (1u << 0u) /* IBSS Station */ +#define AMTINFO_BMP_MESH (1u << 1u) /* MESH Station */ +#define AMTINFO_BMP_BSSID (1u << 2u) /* BSSID-only */ +#define AMTINFO_BMP_IS_WAPI (1u << 3u) /* For WAPI keyid extraction */ +#define AMTINFO_BMP_IS_HE (1u << 13u) /* For HE peer indication */ + +#define AUXPMQ_ENTRIES 64 /* number of AUX PMQ entries */ +#define AUXPMQ_ENTRY_SIZE 8 + +/* PSM Block */ + +/* psm_phy_hdr_param bits */ +#define MAC_PHY_RESET 1 +#define MAC_PHY_CLOCK_EN 2 +#define MAC_PHY_FORCE_CLK 4 +#define MAC_IHRP_CLOCK_EN 15 + +/* PSMCoreControlStatus (IHR Address 0x078) bit definitions */ +#define PSM_CORE_CTL_AR (1 << 0) +#define PSM_CORE_CTL_HR (1 << 1) +#define PSM_CORE_CTL_IR (1 << 2) +#define PSM_CORE_CTL_AAR (1 << 3) +#define PSM_CORE_CTL_HAR (1 << 4) +#define PSM_CORE_CTL_PPAR (1 << 5) +#define PSM_CORE_CTL_SS (1 << 6) +#define PSM_CORE_CTL_REHE (1 << 7) +#define PSM_CORE_CTL_PPAS (1 << 13) +#define PSM_CORE_CTL_AAS (1 << 14) +#define PSM_CORE_CTL_HAS (1 << 15) + +#define PSM_CORE_CTL_LTR_BIT 9 +#define PSM_CORE_CTL_LTR_MASK 0x3 + +#define PSM_SBACCESS_FIFO_MODE (1 << 1) +#define PSM_SBACCESS_EXT_ERR (1 << 11) + +/* WEP Block */ + +/* WEP_WKEY */ +#define WKEY_START (1 << 8) +#define WKEY_SEL_MASK 0x1F + +/* WEP data formats */ + +/* the number of RCMTA entries */ +#define RCMTA_SIZE 50 + +/* max keys in M_TKMICKEYS_BLK - 96 * sizeof(uint16) */ +#define WSEC_MAX_TKMIC_ENGINE_KEYS(_corerev) ((D11REV_GE(_corerev, 64)) ? \ + AMT_SIZE(_corerev) : 12) /* 8 + 4 default - 2 mic keys 8 bytes each */ + +/* max keys in M_WAPIMICKEYS_BLK - 64 * sizeof(uint16) */ +#define WSEC_MAX_SMS4MIC_ENGINE_KEYS(_corerev) ((D11REV_GE(_corerev, 64)) ? \ + AMT_SIZE(_corerev) : 8) /* 4 + 4 default - 16 bytes each */ + +/* max RXE match registers */ +#define WSEC_MAX_RXE_KEYS 4 + +/* SECKINDXALGO (Security Key Index & Algorithm Block) word format */ +/* SKL (Security Key Lookup) */ +#define SKL_POST80_ALGO_MASK 0x000F +#define SKL_PRE80_ALGO_MASK 0x0007 +#define SKL_ALGO_SHIFT 0 + +#define SKL_ALGO_MASK(_corerev) (D11REV_GE(_corerev, 80) ? SKL_POST80_ALGO_MASK : \ + SKL_PRE80_ALGO_MASK) + +#define SKL_WAPI_KEYID_MASK 0x8000 +#define SKL_WAPI_KEYID_SHIFT 15 +#define SKL_INDEX_SHIFT 4 + +#define SKL_PRE80_WAPI_KEYID_MASK 0x0008 +#define SKL_PRE80_WAPI_KEYID_SHIFT 3 + +#define SKL_INDEX_MASK(_corerev) ((D11REV_GE(_corerev, 64)) ? \ + (0x0FF0) : (0x03F0)) +#define SKL_GRP_ALGO_MASK(_corerev) ((D11REV_GE(_corerev, 64)) ? \ + ((D11REV_GE(_corerev, 80)) ? (0xE000) : (0x7000)) : (0x1c00)) +#define SKL_GRP_ALGO_SHIFT(_corerev) ((D11REV_GE(_corerev, 64)) ? \ + ((D11REV_GE(_corerev, 80)) ? (13) : (12)) : (10)) + +#define SKL_STAMON_NBIT 0x8000 /* STA monitor bit */ + +/* additional bits defined for IBSS group key support */ +#define SKL_IBSS_INDEX_MASK 0x01F0 +#define SKL_IBSS_INDEX_SHIFT 4 +#define SKL_IBSS_KEYID1_MASK 0x0600 +#define SKL_IBSS_KEYID1_SHIFT 9 +#define SKL_IBSS_KEYID2_MASK 0x1800 +#define SKL_IBSS_KEYID2_SHIFT 11 +#define SKL_IBSS_KEYALGO_MASK 0xE000 +#define SKL_IBSS_KEYALGO_SHIFT 13 + +#define WSEC_MODE_OFF 0 +#define WSEC_MODE_HW 1 +#define WSEC_MODE_SW 2 + +/* Mapped as per HW_ALGO */ +#define WSEC_ALGO_OFF 0 +#define WSEC_ALGO_WEP1 1 +#define WSEC_ALGO_TKIP 2 +#define WSEC_ALGO_WEP128 3 +#define WSEC_ALGO_AES_LEGACY 4 +#define WSEC_ALGO_AES 5 +#define WSEC_ALGO_SMS4 6 +#define WSEC_ALGO_SMS4_DFT_2005_09_07 7 /**< Not used right now */ +#define WSEC_ALGO_NALG 8 + +/* For CORE_REV 80 */ +#define WSEC_ALGO_AES_GCM 8 +#define WSEC_ALGO_AES_GCM256 9 + +/* For CORE_REV Less than 80 and */ +#define WSEC_ALGO_AES_PRE80_GCM 6 +#define WSEC_ALGO_AES_PRE80_GCM256 8 + +/* D11 MAX TTAK INDEX */ +#define TSC_TTAK_PRE80_MAX_INDEX 50 +#define TSC_TTAK_MAX_INDEX 8 +/* D11 COREREV 80 TTAK KEY INDEX SHIFT */ +#define SKL_TTAK_INDEX_SHIFT 13 +#define SKL_TTAK_INDEX_MASK 0xE000 + +/* D11 PRECOREREV 40 Hw algos...changed from corerev 40 */ +#define D11_PRE40_WSEC_ALGO_AES 3 +#define D11_PRE40_WSEC_ALGO_WEP128 4 +#define D11_PRE40_WSEC_ALGO_AES_LEGACY 5 +#define D11_PRE40_WSEC_ALGO_SMS4 6 +#define D11_PRE40_WSEC_ALGO_NALG 7 + +#define D11_WSEC_ALGO_AES(_corerev) WSEC_ALGO_AES + +#define AES_MODE_NONE 0 +#define AES_MODE_CCM 1 +#define AES_MODE_OCB_MSDU 2 +#define AES_MODE_OCB_MPDU 3 +#define AES_MODE_CMAC 4 +#define AES_MODE_GCM 5 +#define AES_MODE_GMAC 6 + +/* WEP_CTL (Rev 0) */ +#define WECR0_KEYREG_SHIFT 0 +#define WECR0_KEYREG_MASK 0x7 +#define WECR0_DECRYPT (1 << 3) +#define WECR0_IVINLINE (1 << 4) +#define WECR0_WEPALG_SHIFT 5 +#define WECR0_WEPALG_MASK (0x7 << 5) +#define WECR0_WKEYSEL_SHIFT 8 +#define WECR0_WKEYSEL_MASK (0x7 << 8) +#define WECR0_WKEYSTART (1 << 11) +#define WECR0_WEPINIT (1 << 14) +#define WECR0_ICVERR (1 << 15) + +/* Frame template map byte offsets */ +#define T_ACTS_TPL_BASE (0) +#define T_NULL_TPL_BASE (0xc * 2) +#define T_QNULL_TPL_BASE (0x1c * 2) +#define T_RR_TPL_BASE (0x2c * 2) +#define T_BCN0_TPL_BASE (0x34 * 2) +#define T_PRS_TPL_BASE (0x134 * 2) +#define T_BCN1_TPL_BASE (0x234 * 2) +#define T_P2P_NULL_TPL_BASE (0x340 * 2) +#define T_P2P_NULL_TPL_SIZE (32) +#define T_TRIG_TPL_BASE (0x90 * 2) + +/* FCBS base addresses and sizes in BM */ + +#define FCBS_DS0_BM_CMD_SZ_CORE0 0x0200 /* 512 bytes */ +#define FCBS_DS0_BM_DAT_SZ_CORE0 0x0200 /* 512 bytes */ + +#ifndef FCBS_DS0_BM_CMDPTR_BASE_CORE0 +#define FCBS_DS0_BM_CMDPTR_BASE_CORE0 0x3000 +#endif +#define FCBS_DS0_BM_DATPTR_BASE_CORE0 (FCBS_DS0_BM_CMDPTR_BASE_CORE0 + FCBS_DS0_BM_CMD_SZ_CORE0) + +#define FCBS_DS0_BM_CMD_SZ_CORE1 0x0200 /* 512 bytes */ +#define FCBS_DS0_BM_DAT_SZ_CORE1 0x0200 /* 512 bytes */ + +#ifndef FCBS_DS0_BM_CMDPTR_BASE_CORE1 +#define FCBS_DS0_BM_CMDPTR_BASE_CORE1 0x2400 +#endif +#define FCBS_DS0_BM_DATPTR_BASE_CORE1 (FCBS_DS0_BM_CMDPTR_BASE_CORE1 + FCBS_DS0_BM_CMD_SZ_CORE1) + +#define FCBS_DS0_BM_CMD_SZ_CORE2 0x0200 /* 512 bytes */ +#define FCBS_DS0_BM_DAT_SZ_CORE2 0x0200 /* 512 bytes */ + +#define FCBS_DS1_BM_CMD_SZ_CORE0 0x2000 /* Not used */ +#define FCBS_DS1_BM_DAT_SZ_CORE0 0x2000 /* Not used */ + +#define FCBS_DS1_BM_CMDPTR_BASE_CORE0 0x17B4 +#define FCBS_DS1_BM_DATPTR_BASE_CORE0 (FCBS_DS1_BM_CMDPTR_BASE_CORE0 + FCBS_DS1_BM_CMD_SZ_CORE0) + +#define FCBS_DS1_BM_CMD_SZ_CORE1 0x2000 /* Not used */ +#define FCBS_DS1_BM_DAT_SZ_CORE1 0x2000 /* Not used */ + +#define FCBS_DS1_BM_CMDPTR_BASE_CORE1 0x17B4 +#define FCBS_DS1_BM_DATPTR_BASE_CORE1 (FCBS_DS1_BM_CMDPTR_BASE_CORE1 + FCBS_DS1_BM_CMD_SZ_CORE1) + +#define T_BA_TPL_BASE T_QNULL_TPL_BASE /**< template area for BA */ + +#define T_RAM_ACCESS_SZ 4 /**< template ram is 4 byte access only */ + +#define TPLBLKS_PER_BCN_NUM 2 +#define TPLBLKS_AC_PER_BCN_NUM 1 + +#if defined(WLLPRS) && defined(MBSS) +#define TPLBLKS_PER_PRS_NUM 4 +#define TPLBLKS_AC_PER_PRS_NUM 2 +#else +#define TPLBLKS_PER_PRS_NUM 2 +#define TPLBLKS_AC_PER_PRS_NUM 1 +#endif /* WLLPRS && MBSS */ + +/* MAC Sample Collect Params */ + +/* SampleCapture set-up options in + * different registers based on CoreRev + */ +/* CoreRev >= 50, use SMP_CTRL in TXE_IHR */ +#define SC_SRC_MAC 2 /* MAC as Sample Collect Src */ +#define SC_SRC_SHIFT 3 /* SC_SRC bits [3:4] */ +#define SC_TRIG_SHIFT 5 +#define SC_TRANS_SHIFT 6 +#define SC_MATCH_SHIFT 7 +#define SC_STORE_SHIFT 8 + +#define SC_STRT 1 +#define SC_TRIG_EN (1 << SC_TRIG_SHIFT) +#define SC_TRANS_EN (1 << SC_TRANS_SHIFT) +#define SC_MATCH_EN (1 << SC_MATCH_SHIFT) +#define SC_STORE_EN (1 << SC_STORE_SHIFT) + +/* CoreRev < 50, use PHY_CTL in PSM_IHR */ +#define PHYCTL_PHYCLKEN (1 << 1) +#define PHYCTL_FORCE_GATED_CLK_ON (1 << 2) +#define PHYCTL_SC_STRT (1 << 4) +#define PHYCTL_SC_SRC_LB (1 << 7) +#define PHYCTL_SC_TRIG_EN (1 << 8) +#define PHYCTL_SC_TRANS_EN (1 << 9) +#define PHYCTL_SC_STR_EN (1 << 10) +#define PHYCTL_IHRP_CLK_EN (1 << 15) +/* End MAC Sample Collect Params */ + +#define ANTSEL_CLKDIV_4MHZ 6 +#define MIMO_ANTSEL_BUSY 0x4000 /**< bit 14 (busy) */ +#define MIMO_ANTSEL_SEL 0x8000 /**< bit 15 write the value */ +#define MIMO_ANTSEL_WAIT 50 /**< 50us wait */ +#define MIMO_ANTSEL_OVERRIDE 0x8000 /**< flag */ + +typedef struct shm_acparams shm_acparams_t; +BWL_PRE_PACKED_STRUCT struct shm_acparams { + uint16 txop; + uint16 cwmin; + uint16 cwmax; + uint16 cwcur; + uint16 aifs; + uint16 bslots; + uint16 reggap; + uint16 status; + uint16 txcnt; + uint16 rsvd[7]; +} BWL_POST_PACKED_STRUCT; + +#define WME_STATUS_NEWAC (1 << 8) + +/* M_HOST_FLAGS */ +#define MHFMAX 5 /* Number of valid hostflag half-word (uint16) */ +#define MHF1 0 /* Hostflag 1 index */ +#define MHF2 1 /* Hostflag 2 index */ +#define MHF3 2 /* Hostflag 3 index */ +#define MHF4 3 /* Hostflag 4 index */ +#define MHF5 4 /* Hostflag 5 index */ + +#define MXHFMAX 1 /* Number of valid PSMx hostflag half-word (uint16) */ +#define MXHF0 64 /* PSMx Hostflag 0 index */ + +/* Flags in M_HOST_FLAGS */ +#define MHF1_D11AC_DYNBW 0x0001 /**< dynamic bw */ +#define MHF1_WLAN_CRITICAL 0x0002 /**< WLAN is in critical state */ +#define MHF1_MBSS_EN 0x0004 /**< Enable MBSS: RXPUWAR deprecated for rev >= 9 */ +#define MHF1_BTCOEXIST 0x0010 /**< Enable Bluetooth / WLAN coexistence */ +#define MHF1_P2P_SKIP_TIME_UPD 0x0020 /**< Skip P2P SHM updates and P2P event generations */ +#define MHF1_TXMUTE_WAR 0x0040 /**< ucode based Tx mute */ +#define MHF1_RXFIFO1 0x0080 /**< Switch data reception from RX fifo 0 to fifo 1 */ +#define MHF1_EDCF 0x0100 /**< Enable EDCF access control */ +#define MHF1_ULP 0x0200 /**< Force Ucode to put chip in low power state */ +#define MHF1_FORCE_SEND_BCN 0x0800 /**< Force send bcn, even if rcvd from peer STA (IBSS) */ +#define MHF1_TIMBC_EN 0x1000 /**< Enable Target TIM Transmission Time function */ +#define MHF1_RADARWAR 0x2000 /**< Enable Radar Detect WAR PR 16559 */ +#define MHF1_DEFKEYVALID 0x4000 /**< Enable use of the default keys */ +#define MHF1_CTS2SELF 0x8000 /**< Enable CTS to self full phy bw protection */ + +/* Flags in M_HOST_FLAGS2 */ +#define MHF2_DISABLE_PRB_RESP 0x0001 /**< disable Probe Response in ucode */ +#define MHF2_HIB_FEATURE_ENABLE 0x0008 /* Enable HIB feature in ucode (60<=rev<80) */ +#define MHF2_SKIP_ADJTSF 0x0010 /**< skip TSF update when receiving bcn/probeRsp */ +#define MHF2_RSPBW20 0x0020 /**< Uses bw20 for response frames ack/ba/cts */ +#define MHF2_TXBCMC_NOW 0x0040 /**< Flush BCMC FIFO immediately */ +#define MHF2_PPR_HWPWRCTL 0x0080 /**< TSSI_DIV WAR (rev<80) */ +#define MHF2_BTC2WIRE_ALTGPIO 0x0100 /**< BTC 2wire in alternate pins */ +#define MHF2_BTCPREMPT 0x0200 /**< BTC enable bluetooth check during tx */ +#define MHF2_SKIP_CFP_UPDATE 0x0400 /**< Skip CFP update ; for d11 rev <= 80 */ +#define MHF2_TX_TMSTMP 0x0800 /**< Enable passing tx-timestamps in tx-status */ +#define MHF2_UFC_GE84 0x2000 /**< Enable UFC in CT mode */ +#define MHF2_NAV_NORST_WAR 0x4000 /**< WAR to use rogue NAV duration */ +#define MHF2_BTCANTMODE 0x4000 // OBSOLETE (TO BE REMOVED) + +/* Flags in M_HOST_FLAGS3 */ +#define MHF3_ANTSEL_EN 0x0001 /**< enabled mimo antenna selection (REV<80) */ +#define MHF3_TKIP_FRAG_WAR 0x0001 /**< TKIP fragment corrupt WAR (REV>=80) */ +#define MHF3_TXSHAPER_EN 0x0002 /** enable tx shaper for non-OFDM-A frames */ +#define MHF3_ANTSEL_MODE 0x0002 /**< antenna selection mode: 0: 2x3, 1: 2x4 (REV<80) */ +#define MHF3_BTCX_DEF_BT 0x0004 /**< corerev >= 13 BT Coex. */ +#define MHF3_BTCX_ACTIVE_PROT 0x0008 /**< corerev >= 13 BT Coex. */ +#define MHF3_PKTENG_PROMISC 0x0010 /**< pass frames to driver in packet engine Rx mode */ +#define MHF3_SCANCORE_PM_EN 0x0040 /**< enable ScanCore PM from ucode */ +#define MHF3_PM_BCNRX 0x0080 /**< PM single core beacon RX for power reduction */ +#define MHF3_BTCX_SIM_RSP 0x0100 /**< allow limited lwo power tx when BT is active */ +#define MHF3_BTCX_PS_PROTECT 0x0200 /**< use PS mode to protect BT activity */ +#define MHF3_BTCX_SIM_TX_LP 0x0400 /**< use low power for simultaneous tx responses */ +#define MHF3_SELECT_RXF1 0x0800 /**< enable frame classification in pcie FD */ +#define MHF3_BTCX_ECI 0x1000 /**< Enable BTCX ECI interface */ +#define MHF3_NOISECAL_ENHANCE 0x2000 + +/* Flags in M_HOST_FLAGS4 */ +#define MHF4_RCMTA_BSSID_EN 0x0002 /**< BTAMP: multiSta BSSIDs matching in RCMTA area */ +#define MHF4_SC_MIX_EN 0x0002 /**< set to enable 4389a0 specific changes */ +#define MHF4_BCN_ROT_RR 0x0004 /**< MBSSID: beacon rotate in round-robin fashion */ +#define MHF4_OPT_SLEEP 0x0008 /**< enable opportunistic sleep (REV<80) */ +#define MHF4_PM_OFFLOAD 0x0008 /**< enable PM offload */ +#define MHF4_PROXY_STA 0x0010 /**< enable proxy-STA feature */ +#define MHF4_AGING 0x0020 /**< Enable aging threshold for RF awareness */ +#define MHF4_STOP_BA_ON_NDP 0x0080 /**< Stop BlockAck to AP to get chance to send NULL data */ +#define MHF4_NOPHYHANGWAR 0x0100 /**< disable ucode WAR for idletssi cal (rev=61) */ +#define MHF4_WMAC_ACKTMOUT 0x0200 /**< reserved for WMAC testing */ +#define MHF4_NAPPING_ENABLE 0x0400 /**< Napping enable (REV<80) */ +#define MHF4_IBSS_SEC 0x0800 /**< IBSS WPA2-PSK operating mode */ +#define MHF4_SISO_BCMC_RX 0x1000 /* Disable switch to MIMO on recving multicast TIM */ +#define MHF4_RSDB_CR1_MINIPMU_CAL_EN 0x8000 /* for 4349B0. JIRA:SW4349-1469 */ + +/* Flags in M_HOST_FLAGS5 */ +#define MHF5_BTCX_LIGHT 0x0002 /**< light coex mode, off txpu only for critical BT */ +#define MHF5_BTCX_PARALLEL 0x0004 /**< BT and WLAN run in parallel. */ +#define MHF5_BTCX_DEFANT 0x0008 /**< default position for shared antenna */ +#define MHF5_P2P_MODE 0x0010 /**< Enable P2P mode */ +#define MHF5_LEGACY_PRS 0x0020 /**< Enable legacy probe resp support */ +#define MHF5_HWRSSI_EN 0x0800 /**< Enable HW RSSI (ac) */ +#define MHF5_HIBERNATE 0x1000 /**< Force ucode to power save until wake-bit */ +#define MHF5_BTCX_GPIO_DEBUG 0x4000 /**< Enable gpio pins for btcoex ECI signals */ +#define MHF5_SUPPRESS_PRB_REQ 0x8000 /**< Suppress probe requests at ucode level */ + +/* Flags in M_HOST_FLAGS6 */ +#define MHF6_TXPWRCAP_RST_EN 0x0001 /** < Ucode clear phyreg after each tx */ +#define MHF6_TXPWRCAP_EN 0x0002 /** < Enable TX power capping in ucode */ +#define MHF6_TSYNC_AVB 0x0004 /** Enable AVB for timestamping */ +#define MHF6_TSYNC_3PKG 0x0020 /** < Enable 3rd txstatus package */ +#define MHF6_TDMTX 0x0040 /** < Enable SDB TDM in ucode */ +#define MHF6_TSYNC_NODEEPSLP 0x0080 /** < Disable deep sleep to keep AVB clock */ +#define MHF6_TSYNC_CAL 0x0100 /** < Enable Tsync cal in ucode */ +#define MHF6_TXPWRCAP_IOS_NBIT 0x0200 /** < Enable IOS mode of operation for Txpwrcap (REV>=80) */ +#define MHF6_MULBSSID_NBIT 0x0400 /** < associated to AP belonging to a multiple BSSID set */ +#define MHF6_HEBCN_TX_NBIT 0x0800 /** < HE BCN-TX */ +#define MHF6_LATENCY_EN 0x2000 /** < Enable Latency instrumentation in ucode */ +#define MHF6_PTMSTS_EN 0x4000 /** < Enable PTM Status */ + +/* MX_HOST_FLAGS */ +/* Flags for MX_HOST_FLAGS0 */ +#define MXHF0_RSV0 0x0001 /* ucode internal, not exposed yet */ +#define MXHF0_TXDRATE 0x0002 /* mu txrate to use rate from txd */ +#define MXHF0_CHKFID 0x0004 /* check if frameid->fifo matches hw txfifo idx */ +#define MXHF0_DISWAR 0x0008 /* disable some WAR. */ + +/* M_AXC_HOST_FLAGS0 */ +#define MAXCHF0_WAIT_TRIG 0x0001 /* Hold frames till trigger frame is rxed */ +#define MAXCHF0_HTC_SUPPORT 0x0002 /* 11AX HTC field support */ +#define MAXCHF0_AX_ASSOC_SHIFT 0x0003 /* 11AX association indicator */ +#define MAXCHF0_HEB_CONFIG 0x0004 /* HEB configuration */ +#define MAXCHF0_ACI_DET 0x0008 /* ACI detect soft enable */ +#define MAXCHF0_TRIGRES_LP 0x0010 /* Lite-Point testing */ +#define MAXCHF0_HDRCONV_SHIFT 5u /* Enable header conversion */ +#define MAXCHF0_HDRCONV (1 << MAXCHF0_HDRCONV_SHIFT) +#define MAXCHF0_FORCE_ZERO_PPR_SHIFT 6u /* Force PPR value to 0 for ULTPC */ +#define MAXCHF0_FORCE_ZERO_PPR (1 << MAXCHF0_FORCE_ZERO_PPR_SHIFT) +#define MAXCHF0_DISABLE_PYLDECWAR_SHIFT 7u /* Disable WAR for Paydecode issue */ +#define MAXCHF0_DISABLE_PYLDECWAR (1 << MAXCHF0_DISABLE_PYLDECWAR_SHIFT) +#define MAXCHF0_BSR_SUPPORT_SHIFT 8u /* BSR is supported */ +#define MAXCHF0_BSR_SUPPORT (1 << MAXCHF0_BSR_SUPPORT_SHIFT) +#define MAXCHF0_MUEDCA_VALID_SHIFT 9u /* MUEDCA information is valid */ +#define MAXCHF0_MUEDCA_VALID (1 << MAXCHF0_MUEDCA_VALID_SHIFT) +/* Bit 10 definition missing? */ +#define MAXCHF0_TWT_PKTSUPP_SHIFT 11u /* Enable pkt suppress outside TWT SP */ +#define MAXCHF0_TWT_PKTSUPP_EN (1 << MAXCHF0_TWT_PKTSUPP_SHIFT) +#define MAXCHF0_TBPPDU_STATUS_SHIFT 12u +#define MAXCHF0_TBPPDU_STATUS_EN (1 << MAXCHF0_TBPPDU_STATUS_SHIFT) +#define MAXCHF0_11AX_TXSTATUS_EXT_SHIFT 13u /* Enable 128 BA pkg in TX status */ +#define MAXCHF0_11AX_TXSTATUS_EXT_EN (1u << MAXCHF0_11AX_TXSTATUS_EXT_SHIFT) +#define MAXCHF1_11AX_TXSTATUS_EXT_SHIFT 0u /* Enable 256 BA pkg in TX status */ +#define MAXCHF1_11AX_TXSTATUS_EXT_EN (1u << MAXCHF1_11AX_TXSTATUS_EXT_SHIFT) +/* Bit 14 for UORA_EN */ +#define MAXCHF0_11AX_UORA_SHIFT 14u /* Enable UORA support */ +#define MAXCHF0_11AX_UORA_EN (1u << MAXCHF0_11AX_UORA_SHIFT) + +/* M_AXC_HOST_FLAGS1 */ +#define MAXCHF1_ITXSTATUS_EN 0x0004u /* Enable intermediate txs for TB PPDU */ +#define MAXCHF1_OBSSHWSTATS_EN 0x0008u /* Enable ucode OBSS stats monitoring */ + +/* M_SC_HOST_FLAGS */ +#define C_SCCX_STATS_EN 0x0001u /* Enable SC stats */ +#define C_SC_BTMC_COEX_EN 0x0002u /* Enable WLSC-BTMC coex */ + +/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - rev61.1 */ +typedef struct d11rxhdrshort_rev61_1 d11rxhdrshort_rev61_1_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_rev61_1 { + uint16 RxFrameSize; /**< Actual byte length of the frame data received */ + + /* These two 8-bit fields remain in the same order regardless of + * processor byte order. + */ + uint8 dma_flags; /**< bit 0 indicates short or long rx status. 1 == short. */ + uint8 fifo; /**< rx fifo number */ + uint16 mrxs; /**< MAC Rx Status */ + uint16 RxFrameSize0; /**< rxframesize for fifo-0 (in bytes). */ + uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */ + uint16 RxTSFTimeL; /**< RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */ + uint16 RxTSFTimeH; /**< RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */ + uint16 aux_status; /**< DMA writes into this field. ucode treats as reserved. */ +} BWL_POST_PACKED_STRUCT; + +/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - pre80 */ +typedef struct d11rxhdrshort_lt80 d11rxhdrshort_lt80_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_lt80 { + uint16 RxFrameSize; /**< Actual byte length of the frame data received */ + + /* These two 8-bit fields remain in the same order regardless of + * processor byte order. + */ + uint8 dma_flags; /**< bit 0 indicates short or long rx status. 1 == short. */ + uint8 fifo; /**< rx fifo number */ + uint16 mrxs; /**< MAC Rx Status */ + uint16 RxTSFTime; /**< RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */ + uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */ + uint16 aux_status; /**< DMA writes into this field. ucode treats as reserved. */ +} BWL_POST_PACKED_STRUCT; + +/* Errflag bits for ge80 */ +#define ERRFLAGS_ERR_STATE 0x0003u +#define ERRFLAGS_GREATER_MSDU_LEN 0x0001u +#define ERRFLAGS_AMSDU_TRUNCATED 0x0002u +#define ERRFLAGS_HDRCONV_MASK 0x00F0u +#define ERRFLAGS_HDRCONV_SHIFT 4u +#define ERRFLAGS_CSI_LEN_64K 0x0100u +#define ERRFLAGS_MESH_FMT_ERR 0x0200u + +/* Register 'D11_RXE_ERRVAL' bits for ge80 */ +#define RXEERR_GREATER_MSDU_LEN (1u << 6) + +/* 128 BA configuration */ +/* Register D11_TXBA_DataSel bits for ge80 */ +#define TXBA_DATASEL_WSIZE_BITMAP_LEN_ENC_SEL (1u << 0u) + +/* Register D11_TXBA_Data bits (ge80) */ +#define TXBA_DATA_WSIZE_256 (0x100u) +#define TXBA_DATA_WSIZE_128 (0x80u) +#define TXBA_DATA_WSIZE_64 (0x40u) + +/* HW optimisation to generate bitmap based on start SSN & max SSN */ +#define TXBA_DATA_HW_CONST (0xfu << 12) + +/* Register D11_RXE_BA_LEN bits (ge80) */ +#define RXE_BA_LEN_RXBA_64 (0x0u) +#define RXE_BA_LEN_RXBA_128 (0x1u) +#define RXE_BA_LEN_RXBA_256 (0x2u) +#define RXE_BA_LEN_TID0_SHIFT (0u) +#define RXE_BA_LEN_TID1_SHIFT (2u) +#define RXE_BA_LEN_TID2_SHIFT (4u) +#define RXE_BA_LEN_TID3_SHIFT (6u) +#define RXE_BA_LEN_TID4_SHIFT (8u) +#define RXE_BA_LEN_TID5_SHIFT (10u) +#define RXE_BA_LEN_TID6_SHIFT (12u) +#define RXE_BA_LEN_TID7_SHIFT (14u) + +/* Register D11_RXE_BA_LEN_ENC bits (ge80) */ +#define RXE_BA_LEN_ENC_BA32_VAL (0x3u << 0u) +#define RXE_BA_LEN_ENC_BA64_VAL (0x0u << 2u) +#define RXE_BA_LEN_ENC_BA128_VAL (0x1u << 4u) +#define RXE_BA_LEN_ENC_BA256_VAL (0x2u << 6u) + +/* Register D11_RXE_TXBA_CTL2 (ge80) */ +#define RXE_TXBA_CTL2_CONIG_SINGLE_TID (0x0u << 0u) +#define RXE_TXBA_CTL2_CONIG_ALL_TID (0x1u << 0u) +#define RXE_TXBA_CTL2_SEL_TID0 (0x0u << 12u) +#define RXE_TXBA_CTL2_SEL_TID1 (0x1u << 12u) +#define RXE_TXBA_CTL2_SEL_TID2 (0x2u << 12u) +#define RXE_TXBA_CTL2_SEL_TID3 (0x3u << 12u) +#define RXE_TXBA_CTL2_SEL_TID4 (0x4u << 12u) +#define RXE_TXBA_CTL2_SEL_TID5 (0x5u << 12u) +#define RXE_TXBA_CTL2_SEL_TID6 (0x6u << 12u) +#define RXE_TXBA_CTL2_SEL_TID7 (0x7u << 12u) + +/** + * Special Notes + * #1: dma_flags, fifo + * These two 8-bit fields remain in the same order regardless of + * processor byte order. + * #2: pktclass + * 16 bit bitmap is a result of Packet (or Flow ) Classification. + * + * 0 : Flow ID Different + * 1,2,3 : A1, A2, A3 Different + * 4 : TID Different + * 5, 6 : DA, SA from AMSDU SubFrame Different + * 7 : FC Different + * 8 : AMPDU boundary + * 9 - 15 : Reserved + * #3: errflags + * These bits indicate specific errors detected by the HW on the Rx Path. + * However, these will be relevant for Last MSDU Status only. + * + * Whenever there is an error at any MSDU, HW treats it as last + * MSDU and send out last MSDU status. + */ + +#define D11RXHDR_HW_STATUS_GE80 \ + uint16 RxFrameSize; /**< Actual byte length of the frame data received */ \ + /* For comments see special note #1 above */\ + uint8 dma_flags; /**< bit 0 indicates short or long rx status. 1 == short. */ \ + uint8 fifo; /**< rx fifo number */ \ + \ + uint16 mrxs; /**< MAC Rx Status */ \ + uint16 RxFrameSize0; /**< rxframesize for fifo-0 (in bytes). */ \ + uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */ \ + uint16 pktclass; \ + uint32 filtermap; /**< 32 bit bitmap indicates which "Filters" have matched. */ \ + /* For comments see special note #2 above */ \ + uint16 flowid; /**< result of Flow ID Look Up performed by the HW. */ \ + /* For comments see special note #3 above */\ + uint16 errflags; + +#define D11RXHDR_UCODE_STATUS_GE80 \ + /**< Ucode Generated Status (16 Bytes) */ \ + uint16 RxStatus1; /**< MAC Rx Status */ \ + uint16 RxStatus2; /**< extended MAC Rx status */ \ + uint16 RxChan; /**< Rx channel info or chanspec */ \ + uint16 AvbRxTimeL; /**< AVB RX timestamp low16 */ \ + uint16 AvbRxTimeH; /**< AVB RX timestamp high16 */ \ + uint16 RxTSFTime; /**< Lower 16 bits of Rx timestamp */ \ + uint16 RxTsfTimeH; /**< Higher 16 bits of Rx timestamp */ \ + uint16 MuRate; /**< MU rate info (bit3:0 MCS, bit6:4 NSTS) */ + +#define D11RXHDR_HW_STATUS_GE87_1 /**< HW Generated 24 bytes RX Status */ \ + D11RXHDR_HW_STATUS_GE80 /**< First 20 bytes are same as mac rev >= 80 */ \ + uint16 roe_hw_sts; /**< ROE HW status */ \ + uint16 roe_err_flags; /**< ROE error flags */ + +#define D11RXHDR_UCODE_STATUS_GE87_1 /**< Ucode Generated Status (22 Bytes) */ \ + uint16 RxStatus1; /**< MAC Rx Status */ \ + uint16 RxStatus2; /**< extended MAC Rx status */ \ + uint16 RxChan; /**< Rx channel info or chanspec */ \ + uint16 MuRate; /**< MU rate info (bit3:0 MCS, bit6:4 NSTS) */ \ + uint32 AVBRxTime; /**< 32 bit AVB timestamp */ \ + uint32 TSFRxTime; /**< 32 bit TSF timestamp */ \ + uint64 PTMRxTime; /**< 64 bit PTM timestamp */ + + /**< HW Generated Status (20 Bytes) */ +/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - rev80 */ +typedef struct d11rxhdrshort_ge87_1 d11rxhdrshort_ge87_1_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_ge87_1 { + + D11RXHDR_HW_STATUS_GE87_1 + +} BWL_POST_PACKED_STRUCT; + +/** Mid version of receive frame status. Only used for MPDU of AMPDU - rev80 */ +typedef struct d11rxhdrmid_ge87_1 d11rxhdrmid_ge87_1_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdrmid_ge87_1 { + + D11RXHDR_HW_STATUS_GE87_1 + D11RXHDR_UCODE_STATUS_GE87_1 +} BWL_POST_PACKED_STRUCT; + +/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - rev80 */ +typedef struct d11rxhdrshort_ge80 d11rxhdrshort_ge80_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_ge80 { + + D11RXHDR_HW_STATUS_GE80 + +} BWL_POST_PACKED_STRUCT; + +/** Mid version of receive frame status. Only used for MPDU of AMPDU - rev80 */ +typedef struct d11rxhdrmid_ge80 d11rxhdrmid_ge80_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdrmid_ge80 { + + D11RXHDR_HW_STATUS_GE80 + D11RXHDR_UCODE_STATUS_GE80 + +} BWL_POST_PACKED_STRUCT; + +/** Receive Frame Data Header - pre80 */ +typedef struct d11rxhdr_lt80 d11rxhdr_lt80_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdr_lt80 { + uint16 RxFrameSize; /**< Actual byte length of the frame data received */ + + /** + * These two 8-bit fields remain in the same order regardless of + * processor byte order. + */ + uint8 dma_flags; /* bit 0 indicates short or long rx status. 1 == short. */ + uint8 fifo; /* rx fifo number */ + + uint16 PhyRxStatus_0; /**< PhyRxStatus 15:0 */ + uint16 PhyRxStatus_1; /**< PhyRxStatus 31:16 */ + uint16 PhyRxStatus_2; /**< PhyRxStatus 47:32 */ + uint16 PhyRxStatus_3; /**< PhyRxStatus 63:48 */ + uint16 PhyRxStatus_4; /**< PhyRxStatus 79:64 */ + uint16 PhyRxStatus_5; /**< PhyRxStatus 95:80 */ + uint16 RxStatus1; /**< MAC Rx Status */ + uint16 RxStatus2; /**< extended MAC Rx status */ + + /** + * - RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY + */ + uint16 RxTSFTime; + + uint16 RxChan; /**< Rx channel info or chanspec */ + uint16 RxFrameSize0; /**< size of rx-frame in fifo-0 in case frame is copied to fifo-1 */ + uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */ + uint16 AvbRxTimeL; /**< AVB RX timestamp low16 */ + uint16 AvbRxTimeH; /**< AVB RX timestamp high16 */ + uint16 MuRate; /**< MU rate info (bit3:0 MCS, bit6:4 NSTS) */ + /** + * These bits indicate specific errors detected by the HW on the Rx Path. + * However, these will be relevant for Last MSDU Status only. + * + * Whenever there is an error at any MSDU, HW treats it as last + * MSDU and send out last MSDU status. + */ + uint16 errflags; +} BWL_POST_PACKED_STRUCT; + +#define N_PRXS_GE80 16 /* Total number of PhyRx status words for corerev >= 80 */ +#define N_PRXS_LT80 6 /* Total number of PhyRx status words for corerev < 80 */ + +/* number of PhyRx status words newly added for (corerev >= 80) */ +#define N_PRXS_REM_GE80 (N_PRXS_GE80 - N_PRXS_LT80) + +/** RX Hdr definition - rev80 */ +typedef struct d11rxhdr_ge80 d11rxhdr_ge80_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdr_ge80 { + /** + * Even though rxhdr can be in short or long format, always declare it here + * to be in long format. So the offsets for the other fields are always the same. + */ + + /**< HW Generated Status (20 Bytes) */ + D11RXHDR_HW_STATUS_GE80 + D11RXHDR_UCODE_STATUS_GE80 + + /**< PHY Generated Status (32 Bytes) */ + uint16 PhyRxStatus_0; /**< PhyRxStatus 15:0 */ + uint16 PhyRxStatus_1; /**< PhyRxStatus 31:16 */ + uint16 PhyRxStatus_2; /**< PhyRxStatus 47:32 */ + uint16 PhyRxStatus_3; /**< PhyRxStatus 63:48 */ + uint16 PhyRxStatus_4; /**< PhyRxStatus 79:64 */ + uint16 PhyRxStatus_5; /**< PhyRxStatus 95:80 */ + uint16 phyrxs_rem[N_PRXS_REM_GE80]; /**< 20 bytes of remaining prxs (corerev >= 80) */ + /* Currently only 6 words are being pushed out of uCode: 6, 9, 16, 17, 21, 23 */ +} BWL_POST_PACKED_STRUCT; + +#define N_PRXS_GE85 32u // total number of PhyRxStatus BYTEs for rev >= 85 + +typedef struct d11rxhdr_ge87_1 d11rxhdr_ge87_1_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdr_ge87_1 { + /** + * Even though rxhdr can be in short or long format, always declare it here + * to be in long format. So the offsets for the other fields are always the same. + */ + + D11RXHDR_HW_STATUS_GE87_1 /**< HW Generated Status (24 Bytes) */ + D11RXHDR_UCODE_STATUS_GE87_1 /**< uCode Generated Status (24 Bytes) */ + uint8 PHYRXSTATUS[N_PRXS_GE85]; /**< PHY Generated Status (32 Bytes) */ +} BWL_POST_PACKED_STRUCT; + +/* A wrapper structure for all versions of d11rxh short structures */ +typedef struct d11rxhdr_ge85 d11rxhdr_ge85_t; +BWL_PRE_PACKED_STRUCT struct d11rxhdr_ge85 { + /** + * Even though rxhdr can be in short or long format, always declare it here + * to be in long format. So the offsets for the other fields are always the same. + */ + + /**< HW Generated Status (20 Bytes) */ + D11RXHDR_HW_STATUS_GE80 + D11RXHDR_UCODE_STATUS_GE80 + + /**< PHY Generated Status (32 Bytes) */ + uint8 PHYRXSTATUS[N_PRXS_GE85]; +} BWL_POST_PACKED_STRUCT; + +/* A wrapper structure for all versions of d11rxh short structures */ +typedef union d11rxhdrshort { + d11rxhdrshort_rev61_1_t rev61_1; + d11rxhdrshort_lt80_t lt80; + d11rxhdrshort_ge80_t ge80; + d11rxhdrshort_ge87_1_t ge87_1; +} d11rxhdrshort_t; + +/* A wrapper structure for all versions of d11rxh mid structures */ +typedef union d11rxhdrmid { + d11rxhdrmid_ge80_t ge80; + d11rxhdrmid_ge87_1_t ge87_1; +} d11rxhdrmid_t; + +/* A wrapper structure for all versions of d11rxh structures */ +typedef union d11rxhdr { + d11rxhdr_lt80_t lt80; + d11rxhdr_ge80_t ge80; + d11rxhdr_ge85_t ge85; + d11rxhdr_ge87_1_t ge87_1; +} d11rxhdr_t; + +#define D11RXHDRSHORT_GE87_1_ACCESS_REF(srxh, member) \ + (&((((d11rxhdrshort_t *)(srxh))->ge87_1).member)) + +#define D11RXHDRMID_GE87_1_ACCESS_REF(mrxh, member) \ + (&((((d11rxhdrmid_t *)(mrxh))->ge87_1).member)) + +#define D11RXHDRSHORT_GE87_1_ACCESS_VAL(srxh, member) \ + ((((d11rxhdrshort_t *)(srxh))->ge87_1).member) + +#define D11RXHDRMID_GE87_1_ACCESS_VAL(mrxh, member) \ + ((((d11rxhdrmid_t *)(mrxh))->ge87_1).member) + +#define D11RXHDR_GE87_1_ACCESS_REF(rxh, member) \ + (&((rxh)->ge87_1).member) + +#define D11RXHDR_GE87_1_ACCESS_VAL(rxh, member) \ + (((rxh)->ge87_1).member) + +#define D11RXHDR_GE87_1_SET_VAL(rxh, member, value) \ + (((rxh)->ge87_1).member = value) + +#define D11RXHDRSHORT_GE80_ACCESS_REF(srxh, member) \ + (&((((d11rxhdrshort_t *)(srxh))->ge80).member)) + +#define D11RXHDRMID_GE80_ACCESS_REF(mrxh, member) \ + (&((((d11rxhdrmid_t *)(mrxh))->ge80).member)) + +#define D11RXHDRSHORT_LT80_ACCESS_REF(srxh, member) \ + (&((((d11rxhdrshort_t *)(srxh))->lt80).member)) + +#define D11RXHDRSHORT_GE80_ACCESS_VAL(srxh, member) \ + ((((d11rxhdrshort_t *)(srxh))->ge80).member) + +#define D11RXHDRMID_GE80_ACCESS_VAL(mrxh, member) \ + ((((d11rxhdrmid_t *)(mrxh))->ge80).member) + +#define D11RXHDRSHORT_LT80_ACCESS_VAL(srxh, member) \ + ((((d11rxhdrshort_t *)(srxh))->lt80).member) + +#define D11RXHDR_GE80_ACCESS_REF(rxh, member) \ + (&((rxh)->ge80).member) + +#define D11RXHDR_LT80_ACCESS_REF(rxh, member) \ + (&((rxh)->lt80).member) + +#define D11RXHDR_GE80_ACCESS_VAL(rxh, member) \ + (((rxh)->ge80).member) + +#define D11RXHDR_GE80_SET_VAL(rxh, member, value) \ + (((rxh)->ge80).member = value) + +#define D11RXHDR_LT80_ACCESS_VAL(rxh, member) \ + (((rxh)->lt80).member) + +#define D11RXHDR_LT80_SET_VAL(rxh, member, value) \ + (((rxh)->lt80).member = value) + +/** For accessing members of d11rxhdrshort_t by reference (address of members) */ +#define D11RXHDRSHORT_ACCESS_REF(srxh, corerev, corerev_minor, member) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDRSHORT_GE87_1_ACCESS_REF(srxh, member) : \ + D11REV_GE(corerev, 80) ? D11RXHDRSHORT_GE80_ACCESS_REF(srxh, member) : \ + D11RXHDRSHORT_LT80_ACCESS_REF(srxh, member)) + +/** For accessing members of d11rxhdrshort_t by value (only value stored inside members accessed) */ +#define D11RXHDRSHORT_ACCESS_VAL(srxh, corerev, corerev_minor, member) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDRSHORT_GE87_1_ACCESS_VAL(srxh, member) : \ + D11REV_GE(corerev, 80) ? D11RXHDRSHORT_GE80_ACCESS_VAL(srxh, member) : \ + D11RXHDRSHORT_LT80_ACCESS_VAL(srxh, member)) + +/** For accessing members of d11rxhdrmid_t by reference (address of members) */ +#define D11RXHDRMID_ACCESS_REF(mrxh, corerev, corerev_minor, member) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDRMID_GE87_1_ACCESS_REF(mrxh, member) : \ + D11REV_GE(corerev, 80) ? D11RXHDRMID_GE80_ACCESS_REF(mrxh, member) : NULL) + +/** For accessing members of d11rxhdrmid_t by value (only value stored inside members accessed) */ +#define D11RXHDRMID_ACCESS_VAL(mrxh, corerev, corerev_minor, member) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDRMID_GE87_1_ACCESS_VAL(mrxh, member) : \ + D11REV_GE(corerev, 80) ? D11RXHDRMID_GE80_ACCESS_VAL(mrxh, member) : NULL) + +/** For accessing members of d11rxhdr_t by reference (address of members) */ +#define D11RXHDR_ACCESS_REF(rxh, corerev, corerev_minor, member) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_REF(rxh, member) : \ + D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_REF(rxh, member) : \ + D11RXHDR_LT80_ACCESS_REF(rxh, member)) + +/** For accessing members of d11rxhdr_t by value (only value stored inside members accessed) */ +#define D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, member) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, member) : \ + D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, member) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, member)) + +/** For accessing members of d11rxhdr_t by value (only value stored inside members accessed) */ +#define D11RXHDR_SET_VAL(rxh, corerev, corerev_minor, member, value) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_SET_VAL(rxh, member, value) : \ + D11REV_GE(corerev, 80) ? D11RXHDR_GE80_SET_VAL(rxh, member, value) : \ + D11RXHDR_LT80_SET_VAL(rxh, member, value)) + +#define D11RXHDR_PTM(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, PTMRxTime) : 0) + +#define D11RXHDR_AVB(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + (uint32)D11RXHDR_GE87_1_ACCESS_VAL(rxh, AVBRxTime) : \ + D11REV_GE(corerev, 80) ? ((uint32)D11RXHDR_GE80_ACCESS_VAL(rxh, AvbRxTimeL) | \ + ((uint32)D11RXHDR_GE80_ACCESS_VAL(rxh, AvbRxTimeH) << 16u)) : \ + ((uint32)D11RXHDR_LT80_ACCESS_VAL(rxh, AvbRxTimeL) | \ + ((uint32)D11RXHDR_LT80_ACCESS_VAL(rxh, AvbRxTimeH) << 16u))) + +#define D11RXHDR_TSF_REF(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_REF(rxh, TSFRxTime) : \ + D11REV_GE(corerev, 80) ? (uint32*)D11RXHDR_GE80_ACCESS_REF(rxh, RxTSFTime) : \ + (uint32*)D11RXHDR_LT80_ACCESS_REF(rxh, RxTSFTime)) + +#define D11RXHDR_TSF(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, TSFRxTime) : \ + D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, RxTSFTime) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, RxTSFTime)) + +#define RXS_SHORT_ENAB(rev) (D11REV_GE(rev, 64) || \ + D11REV_IS(rev, 60) || \ + D11REV_IS(rev, 62)) + +#define RXS_MID_ENAB(rev) (D11REV_GE(rev, 80)) +#define RXS_LONG_ENAB(rev) (D11REV_GE(rev, 80)) + +#define IS_D11RXHDRSHORT(rxh, rev, rev_min) ((RXS_SHORT_ENAB(rev) && \ + ((D11RXHDR_ACCESS_VAL((rxh), (rev), (rev_min), dma_flags)) & RXS_SHORT_MASK)) != 0) + +#define IS_D11RXHDRMID(rxh, rev, rev_min) ((RXS_MID_ENAB(rev) && \ + ((D11RXHDR_ACCESS_VAL((rxh), (rev), (rev_min), dma_flags)) == 0))) + +#define IS_D11RXHDRLONG(rxh, rev, rev_min) \ + ((!(IS_D11RXHDRSHORT((rxh), (rev), (rev_min)))) && \ + (!(IS_D11RXHDRMID((rxh), (rev), (rev_min))))) + +#define D11RXHDR_HAS_UCODE_STATUS(rxhdr, corerev, corerev_minor) \ + ((!IS_D11RXHDRSHORT((rxhdr), (corerev), (corerev_minor))) || \ + (IS_D11RXHDRMID((rxhdr), (corerev), (corerev_minor)))) + +#define IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + (D11RXHDR_GE87_1_ACCESS_VAL(rxh, dma_flags) == RXS_PHYRXST_VALID_REV_GE80) : \ + D11REV_GE(corerev, 80) ? \ + (D11RXHDR_GE80_ACCESS_VAL(rxh, dma_flags) == RXS_PHYRXST_VALID_REV_GE80) : \ + (D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus2) & RXS_PHYRXST_VALID)) + +#define RXHDR_GET_PAD_LEN(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \ + ((((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \ + D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_PBPRES) != 0) ? HDRCONV_PAD : 0) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + (((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs) & \ + RXSS_PBPRES) != 0) ? HDRCONV_PAD : 0) : \ + (((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus1) & RXS_PBPRES) != 0) ? HDRCONV_PAD : 0))) + +#define RXHDR_GET_PAD_PRES(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \ + (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \ + D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_PBPRES) != 0) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs) & \ + RXSS_PBPRES) != 0) : \ + (((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus1) & RXS_PBPRES) != 0)))) + +#define RXHDR_GET_CONV_TYPE(rxh, corerev, corerev_minor) \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, \ + HdrConvSt) & HDRCONV_ETH_FRAME) != 0) : ((D11RXHDR_ACCESS_VAL(rxh, \ + corerev, corerev_minor, HdrConvSt) & HDRCONV_ETH_FRAME) != 0)) + +#define RXHDR_GET_ROE_ERR_STS(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_err_flags))) : 0) + +#define RXHDR_GET_ROE_L3_TYPE(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L3_PROT_TYPE_MASK) : 0) + +#define RXHDR_GET_ROE_L4_TYPE(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L4_PROT_TYPE_MASK) : 0) + +#define RXHDR_GET_ROE_L3_STATUS(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L3_CHKSUM_STATUS_MASK) : 0) + +#define RXHDR_GET_ROE_L4_STATUS(rxh, corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L4_CHKSUM_STATUS_MASK) : 0) + +#define RXHDR_GET_AGG_TYPE(rxh, corerev, corerev_minor) \ + (D11REV_GE(corerev, 80) ? \ + (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \ + D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_AGGTYPE_MASK) >> RXSS_AGGTYPE_SHIFT) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs) \ + & RXSS_AGGTYPE_MASK) >> RXSS_AGGTYPE_SHIFT) : \ + ((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus2) & RXS_AGGTYPE_MASK) >> RXS_AGGTYPE_SHIFT))) + +#define RXHDR_GET_PBPRS_REF(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_REF(rxh, mrxs) : \ + D11RXHDR_GE80_ACCESS_REF(rxh, mrxs)) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + ((D11RXHDRSHORT_ACCESS_REF(rxh, corerev, corerev_minor, mrxs))) : \ + (D11RXHDR_LT80_ACCESS_REF(rxh, RxStatus1)))) + +#define RXHDR_GET_IS_DEFRAG(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \ + (D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxStatus1) & RXS_IS_DEFRAG) : 0) + +#define SET_RXHDR_PBPRS_REF_VAL(rxh, corerev, corerev_minor, val) \ + (D11REV_GE(corerev, 80) ? \ + (*val |= RXSS_PBPRES) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? (*val |= RXSS_PBPRES) : \ + (*val |= RXS_PBPRES))) + +#define CLEAR_RXHDR_PBPRS_REF_VAL(rxh, corerev, corerev_minor, val) \ + (D11REV_GE(corerev, 80) ? \ + (*val &= ~RXSS_PBPRES) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? (*val &= ~RXSS_PBPRES) : \ + (*val &= ~RXS_PBPRES))) + +#define RXHDR_GET_AMSDU(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \ + (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \ + D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_AMSDU_MASK) != 0) : \ + (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, \ + mrxs) & RXSS_AMSDU_MASK) != 0) : \ + ((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus2) & RXS_AMSDU_MASK) != 0))) + +#ifdef BCMDBG +#define RXHDR_GET_MSDU_COUNT(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \ + (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \ + D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \ + D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_MSDU_CNT_MASK) >> RXSS_MSDU_CNT_SHIFT) : \ + IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \ + (((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs)) & \ + RXSS_MSDU_CNT_MASK) >> RXSS_MSDU_CNT_SHIFT) : 0) + +#endif /* BCMDBG */ + +/** Length of HW RX status in RxStatus */ +#define HW_RXHDR_LEN_REV_GE87_1 (sizeof(d11rxhdrshort_ge87_1_t)) /* 24 bytes */ +#define HW_RXHDR_LEN_REV_GE80 (sizeof(d11rxhdrshort_ge80_t)) /* 20 bytes */ +#define HW_RXHDR_LEN_REV_LT80 (sizeof(d11rxhdrshort_lt80_t)) /* 12 bytes */ +#define HW_RXHDR_LEN_REV_61_1 (sizeof(d11rxhdrshort_rev61_1_t)) /* 16 bytes */ + +/** Length of HW RX status + ucode Rx status in RxStatus */ +#define MID_RXHDR_LEN_REV_GE87_1 (sizeof(d11rxhdrmid_ge87_1_t)) /* 48 bytes */ +#define MID_RXHDR_LEN_REV_GE80 (sizeof(d11rxhdrmid_ge80_t)) /* 36 bytes */ + +/** Length of HW RX status + ucode RX status + PHY RX status + padding(if need align) */ +#define D11_RXHDR_LEN_REV_GE87_1 (sizeof(d11rxhdr_ge87_1_t)) /* 80 bytes */ +#define D11_RXHDR_LEN_REV_GE80 (sizeof(d11rxhdr_ge80_t)) /* 68 bytes */ +#define D11_RXHDR_LEN_REV_LT80 (sizeof(d11rxhdr_lt80_t)) /* 36 bytes */ + +#define HW_RXHDR_LEN(corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? HW_RXHDR_LEN_REV_GE87_1 : \ + D11REV_GE(corerev, 80) ? HW_RXHDR_LEN_REV_GE80 : HW_RXHDR_LEN_REV_LT80) + +#define MID_RXHDR_LEN(corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? MID_RXHDR_LEN_REV_GE87_1 : \ + D11REV_GE(corerev, 80) ? \ + MID_RXHDR_LEN_REV_GE80 : NULL) + +#define D11_RXHDR_LEN(corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? D11_RXHDR_LEN_REV_GE87_1 : \ + D11REV_GE(corerev, 80) ? D11_RXHDR_LEN_REV_GE80 : \ + D11_RXHDR_LEN_REV_LT80) + +#define FRAMELEN(corerev, corerev_minor, rxh) \ + D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxFrameSize) + +#define RXS_SHORT_MASK 0x01 /**< Short vs full rx status in dma_flags field of d11rxhdr */ + +/** validate chip specific phychain info for MCSSQ snr. + * should sync with uCode reporting. + * please add a condition with decending order to avoid any wrong skip + * Note: this macro can be removed once NEWT no longer needs 4368a0. + */ +#define IS_MCSSQ_ANT3_VALID_GE80(corerev, corerev_minor) \ + (D11REV_IS(corerev, 83) && (D11MINORREV_IS(corerev_minor, 1))) + +/* Header conversion status register bit fields */ +#define HDRCONV_USR_ENAB 0x0001 +#define HDRCONV_ENAB 0x0100 +#define HDRCONV_ETH_FRAME 0x0200 +#define HDRCONV_STATUS_VALID 0x8000 + +#define ROE_L3_PROT_TYPE_IPV4 (0x10u) +#define ROE_L3_PROT_TYPE_IPV6 (0x20u) +#define ROE_L3_PROT_TYPE_MASK (0x30u) +#define ROE_L3_PROT_TYPE_SHIFT (4u) + +#define ROE_L4_PROT_TYPE_TCP (0x40u) +#define ROE_L4_PROT_TYPE_UDP (0x80u) +#define ROE_L4_PROT_TYPE_MASK (0xC0u) +#define ROE_L4_PROT_TYPE_SHIFT (6u) + +#define ROE_L3_CHKSUM_STATUS_FAIL (0x100u) +#define ROE_L3_CHKSUM_STATUS_SUCCESS (0x200u) +#define ROE_L3_CHKSUM_STATUS_MASK (0x300u) +#define ROE_L3_CHKSUM_STATUS_SHIFT (8u) + +#define ROE_L4_CHKSUM_STATUS_FAIL (0x400u) +#define ROE_L4_CHKSUM_STATUS_SUCCESS (0x800u) +#define ROE_L4_CHKSUM_STATUS_MASK (0xC00u) +#define ROE_L4_CHKSUM_STATUS_SHIFT (10u) + +/** NOTE: Due to precommit issue, _d11_autophyrxsts_ will be moved + * to a separated file when 4387 trunk build is stable + */ +#ifndef _d11_autophyrxsts_ +#define _d11_autophyrxsts_ + +#define APRXS_WD0_L_EN_GE85 1u +#define APRXS_WD0_H_EN_GE85 1u +#define APRXS_WD1_L_EN_GE85 1u +#define APRXS_WD1_H_EN_GE85 1u +#define APRXS_WD2_L_EN_GE85 1u +#define APRXS_WD2_H_EN_GE85 1u +#define APRXS_WD3_L_EN_GE85 1u +#define APRXS_WD3_H_EN_GE85 0u // DO NOT ENABLE WD3_H +#define APRXS_WD4_L_EN_GE85 1u +#define APRXS_WD4_H_EN_GE85 1u +#define APRXS_WD5_L_EN_GE85 1u +#define APRXS_WD5_H_EN_GE85 1u +#define APRXS_WD6_L_EN_GE85 0u +#define APRXS_WD6_H_EN_GE85 0u +#define APRXS_WD7_L_EN_GE85 0u +#define APRXS_WD7_H_EN_GE85 0u +#define APRXS_WD8_L_EN_GE85 0u +#define APRXS_WD8_H_EN_GE85 1u +#define APRXS_WD9_L_EN_GE85 0u +#define APRXS_WD9_H_EN_GE85 0u +#define APRXS_WD10_L_EN_GE85 0u +#define APRXS_WD10_H_EN_GE85 0u +#define APRXS_WD11_L_EN_GE85 0u +#define APRXS_WD11_H_EN_GE85 0u +#define APRXS_WD12_L_EN_GE85 0u +#define APRXS_WD12_H_EN_GE85 0u +#define APRXS_WD13_L_EN_GE85 0u +#define APRXS_WD13_H_EN_GE85 0u +#define APRXS_WD14_L_EN_GE85 0u +#define APRXS_WD14_H_EN_GE85 0u +#define APRXS_WD15_L_EN_GE85 0u +#define APRXS_WD15_H_EN_GE85 0u +#define APRXS_WD16_L_EN_GE85 1u +#define APRXS_WD16_H_EN_GE85 0u +#define APRXS_WD17_L_EN_GE85 0u +#define APRXS_WD17_H_EN_GE85 0u +#define APRXS_WD18_L_EN_GE85 1u +#define APRXS_WD18_H_EN_GE85 0u +#define APRXS_WD19_L_EN_GE85 0u +#define APRXS_WD19_H_EN_GE85 0u +#define APRXS_WD20_L_EN_GE85 1u +#define APRXS_WD20_H_EN_GE85 1u +#define APRXS_WD21_L_EN_GE85 0u +#define APRXS_WD21_H_EN_GE85 1u +#define APRXS_WD22_L_EN_GE85 1u +#define APRXS_WD22_H_EN_GE85 1u +#define APRXS_WD23_L_EN_GE85 1u +#define APRXS_WD23_H_EN_GE85 1u +#define APRXS_WD24_L_EN_GE85 0u +#define APRXS_WD24_H_EN_GE85 0u +#define APRXS_WD25_L_EN_GE85 0u +#define APRXS_WD25_H_EN_GE85 0u + +enum { + APRXS_WD0_L_SHIFT = 0, // frameType, unsupportedRate, band, lostCRS, shortPreamble + APRXS_WD0_H_SHIFT, // PLCPViolation, MFCRSFired, ACCRSFired, MUPPDU, OBSSStat + APRXS_WD1_L_SHIFT, // coremask, antcfg, + APRXS_WD1_H_SHIFT, // BWclassification + APRXS_WD2_L_SHIFT, // RxPwrAnt0 + APRXS_WD2_H_SHIFT, // RxPwrAnt1 + APRXS_WD3_L_SHIFT, // RxPwrAnt2 + APRXS_WD3_H_SHIFT, // RxPwrAnt3, OCL + APRXS_WD4_L_SHIFT, // RSSI factional bit + APRXS_WD4_H_SHIFT, // AGC type, ACI mitigation state, ClipCount, DynBWInNonHT + APRXS_WD5_L_SHIFT, // MCSSQSNRCore0 + APRXS_WD5_H_SHIFT, // MCSSQSNRCore1 + APRXS_WD6_L_SHIFT, // MCSSQSNRCore2 + APRXS_WD6_H_SHIFT, // MCSSQSNRCore3, OCL 1 + APRXS_WD7_L_SHIFT, // MUIntProcessType, + APRXS_WD7_H_SHIFT, // coarse freq_offset, packet abort + APRXS_WD8_L_SHIFT = 0, // fine freq offset + APRXS_WD8_H_SHIFT, // ChBWInNonHT, MLUsed, SINRBasedACIDet + APRXS_WD9_L_SHIFT, // SpatialSQCnt + APRXS_WD9_H_SHIFT, // packet gain + APRXS_WD10_L_SHIFT, // RxPwrAntExt + APRXS_WD10_H_SHIFT, // coarse freq_offset of 2nd 80mhz + APRXS_WD11_L_SHIFT, // fine freq_offset of 2nd 80mhz + APRXS_WD11_H_SHIFT, + APRXS_WD12_L_SHIFT, + APRXS_WD12_H_SHIFT, + APRXS_WD13_L_SHIFT, + APRXS_WD13_H_SHIFT, + APRXS_WD14_L_SHIFT, + APRXS_WD14_H_SHIFT, + APRXS_WD15_L_SHIFT, + APRXS_WD15_H_SHIFT, + APRXS_WD16_L_SHIFT = 0, + APRXS_WD16_H_SHIFT, + APRXS_WD17_L_SHIFT, + APRXS_WD17_H_SHIFT, + APRXS_WD18_L_SHIFT, + APRXS_WD18_H_SHIFT, + APRXS_WD19_L_SHIFT, + APRXS_WD19_H_SHIFT, + APRXS_WD20_L_SHIFT, + APRXS_WD20_H_SHIFT, + APRXS_WD21_L_SHIFT, + APRXS_WD21_H_SHIFT, + APRXS_WD22_L_SHIFT, // STA ID + APRXS_WD22_H_SHIFT, // STA ID, NSTS, TXBF, DCM + APRXS_WD23_L_SHIFT, + APRXS_WD23_H_SHIFT, + APRXS_WD24_L_SHIFT = 0, + APRXS_WD24_H_SHIFT, + APRXS_WD25_L_SHIFT, + APRXS_WD25_H_SHIFT +}; + +#define APRXS_WD0_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD0_L_EN_GE85 : 0) +#define APRXS_WD0_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD0_H_EN_GE85 : 0) +#define APRXS_WD1_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD1_L_EN_GE85 : 0) +#define APRXS_WD1_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD1_H_EN_GE85 : 0) +#define APRXS_WD2_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD2_L_EN_GE85 : 0) +#define APRXS_WD2_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD2_H_EN_GE85 : 0) +#define APRXS_WD3_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD3_L_EN_GE85 : 0) +#define APRXS_WD3_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD3_H_EN_GE85 : 0) +#define APRXS_WD4_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD4_L_EN_GE85 : 0) +#define APRXS_WD4_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD4_H_EN_GE85 : 0) +#define APRXS_WD5_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD5_L_EN_GE85 : 0) +#define APRXS_WD5_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD5_H_EN_GE85 : 0) +#define APRXS_WD6_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD6_L_EN_GE85 : 0) +#define APRXS_WD6_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD6_H_EN_GE85 : 0) +#define APRXS_WD7_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD7_L_EN_GE85 : 0) +#define APRXS_WD7_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD7_H_EN_GE85 : 0) +#define APRXS_WD8_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD8_L_EN_GE85 : 0) +#define APRXS_WD8_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD8_H_EN_GE85 : 0) +#define APRXS_WD9_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD9_L_EN_GE85 : 0) +#define APRXS_WD9_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD9_H_EN_GE85 : 0) +#define APRXS_WD10_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD10_L_EN_GE85 : 0) +#define APRXS_WD10_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD10_H_EN_GE85 : 0) +#define APRXS_WD11_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD11_L_EN_GE85 : 0) +#define APRXS_WD11_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD11_H_EN_GE85 : 0) +#define APRXS_WD12_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD12_L_EN_GE85 : 0) +#define APRXS_WD12_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD12_H_EN_GE85 : 0) +#define APRXS_WD13_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD13_L_EN_GE85 : 0) +#define APRXS_WD13_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD13_H_EN_GE85 : 0) +#define APRXS_WD14_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD14_L_EN_GE85 : 0) +#define APRXS_WD14_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD14_H_EN_GE85 : 0) +#define APRXS_WD15_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD15_L_EN_GE85 : 0) +#define APRXS_WD15_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD15_H_EN_GE85 : 0) +#define APRXS_WD16_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD16_L_EN_GE85 : 0) +#define APRXS_WD16_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD16_H_EN_GE85 : 0) +#define APRXS_WD17_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD17_L_EN_GE85 : 0) +#define APRXS_WD17_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD17_H_EN_GE85 : 0) +#define APRXS_WD18_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD18_L_EN_GE85 : 0) +#define APRXS_WD18_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD18_H_EN_GE85 : 0) +#define APRXS_WD19_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD19_L_EN_GE85 : 0) +#define APRXS_WD19_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD19_H_EN_GE85 : 0) +#define APRXS_WD20_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD20_L_EN_GE85 : 0) +#define APRXS_WD20_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD20_H_EN_GE85 : 0) +#define APRXS_WD21_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD21_L_EN_GE85 : 0) +#define APRXS_WD21_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD21_H_EN_GE85 : 0) +#define APRXS_WD22_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD22_L_EN_GE85 : 0) +#define APRXS_WD22_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD22_H_EN_GE85 : 0) +#define APRXS_WD23_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD23_L_EN_GE85 : 0) +#define APRXS_WD23_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD23_H_EN_GE85 : 0) +#define APRXS_WD24_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD24_L_EN_GE85 : 0) +#define APRXS_WD24_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD24_H_EN_GE85 : 0) +#define APRXS_WD25_L_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD25_L_EN_GE85 : 0) +#define APRXS_WD25_H_EN(rev) ((D11REV_GE(rev, 85)) ? \ + APRXS_WD25_H_EN_GE85 : 0) + +#define APRXS_BMAP0(rev) ((APRXS_WD0_L_EN(rev) << APRXS_WD0_L_SHIFT) | \ + (APRXS_WD0_H_EN(rev) << APRXS_WD0_H_SHIFT) |\ + (APRXS_WD1_L_EN(rev) << APRXS_WD1_L_SHIFT) |\ + (APRXS_WD1_H_EN(rev) << APRXS_WD1_H_SHIFT) |\ + (APRXS_WD2_L_EN(rev) << APRXS_WD2_L_SHIFT) |\ + (APRXS_WD2_H_EN(rev) << APRXS_WD2_H_SHIFT) |\ + (APRXS_WD3_L_EN(rev) << APRXS_WD3_L_SHIFT) |\ + (APRXS_WD3_H_EN(rev) << APRXS_WD3_H_SHIFT) |\ + (APRXS_WD4_L_EN(rev) << APRXS_WD4_L_SHIFT) |\ + (APRXS_WD4_H_EN(rev) << APRXS_WD4_H_SHIFT) |\ + (APRXS_WD5_L_EN(rev) << APRXS_WD5_L_SHIFT) |\ + (APRXS_WD5_H_EN(rev) << APRXS_WD5_H_SHIFT) |\ + (APRXS_WD6_L_EN(rev) << APRXS_WD6_L_SHIFT) |\ + (APRXS_WD6_H_EN(rev) << APRXS_WD6_H_SHIFT) |\ + (APRXS_WD7_L_EN(rev) << APRXS_WD7_L_SHIFT) |\ + (APRXS_WD7_H_EN(rev) << APRXS_WD7_H_SHIFT)) + +#define APRXS_BMAP1(rev) ((APRXS_WD8_L_EN(rev) << APRXS_WD8_L_SHIFT) | \ + (APRXS_WD8_H_EN(rev) << APRXS_WD8_H_SHIFT) |\ + (APRXS_WD9_L_EN(rev) << APRXS_WD9_L_SHIFT) |\ + (APRXS_WD9_H_EN(rev) << APRXS_WD9_H_SHIFT) |\ + (APRXS_WD10_L_EN(rev) << APRXS_WD10_L_SHIFT) |\ + (APRXS_WD10_H_EN(rev) << APRXS_WD10_H_SHIFT) |\ + (APRXS_WD11_L_EN(rev) << APRXS_WD11_L_SHIFT) |\ + (APRXS_WD11_H_EN(rev) << APRXS_WD11_H_SHIFT) |\ + (APRXS_WD12_L_EN(rev) << APRXS_WD12_L_SHIFT) |\ + (APRXS_WD12_H_EN(rev) << APRXS_WD12_H_SHIFT) |\ + (APRXS_WD13_L_EN(rev) << APRXS_WD13_L_SHIFT) |\ + (APRXS_WD13_H_EN(rev) << APRXS_WD13_H_SHIFT) |\ + (APRXS_WD14_L_EN(rev) << APRXS_WD14_L_SHIFT) |\ + (APRXS_WD14_H_EN(rev) << APRXS_WD14_H_SHIFT) |\ + (APRXS_WD15_L_EN(rev) << APRXS_WD15_L_SHIFT) |\ + (APRXS_WD15_H_EN(rev) << APRXS_WD15_H_SHIFT)) + +#define APRXS_BMAP2(rev) ((APRXS_WD16_L_EN(rev) << APRXS_WD16_L_SHIFT) | \ + (APRXS_WD16_H_EN(rev) << APRXS_WD16_H_SHIFT) |\ + (APRXS_WD17_L_EN(rev) << APRXS_WD17_L_SHIFT) |\ + (APRXS_WD17_H_EN(rev) << APRXS_WD17_H_SHIFT) |\ + (APRXS_WD18_L_EN(rev) << APRXS_WD18_L_SHIFT) |\ + (APRXS_WD18_H_EN(rev) << APRXS_WD18_H_SHIFT) |\ + (APRXS_WD19_L_EN(rev) << APRXS_WD19_L_SHIFT) |\ + (APRXS_WD19_H_EN(rev) << APRXS_WD19_H_SHIFT) |\ + (APRXS_WD20_L_EN(rev) << APRXS_WD20_L_SHIFT) |\ + (APRXS_WD20_H_EN(rev) << APRXS_WD20_H_SHIFT) |\ + (APRXS_WD21_L_EN(rev) << APRXS_WD21_L_SHIFT) |\ + (APRXS_WD21_H_EN(rev) << APRXS_WD21_H_SHIFT) |\ + (APRXS_WD22_L_EN(rev) << APRXS_WD22_L_SHIFT) |\ + (APRXS_WD22_H_EN(rev) << APRXS_WD22_H_SHIFT) |\ + (APRXS_WD23_L_EN(rev) << APRXS_WD23_L_SHIFT) |\ + (APRXS_WD23_H_EN(rev) << APRXS_WD23_H_SHIFT)) + +#define APRXS_BMAP3(rev) ((APRXS_WD24_L_EN(rev) << APRXS_WD24_L_SHIFT) | \ + (APRXS_WD24_H_EN(rev) << APRXS_WD24_H_SHIFT) |\ + (APRXS_WD25_L_EN(rev) << APRXS_WD25_L_SHIFT) |\ + (APRXS_WD25_H_EN(rev) << APRXS_WD25_H_SHIFT)) +/* byte position */ +#define APRXS_WD0_L_POS(rev) 0u +#define APRXS_WD0_H_POS(rev) (APRXS_WD0_L_POS(rev) + APRXS_WD0_L_EN(rev)) /* 1 */ +#define APRXS_WD1_L_POS(rev) (APRXS_WD0_H_POS(rev) + APRXS_WD0_H_EN(rev)) /* 2 */ +#define APRXS_WD1_H_POS(rev) (APRXS_WD1_L_POS(rev) + APRXS_WD1_L_EN(rev)) /* 3 */ +#define APRXS_WD2_L_POS(rev) (APRXS_WD1_H_POS(rev) + APRXS_WD1_H_EN(rev)) /* 4 */ +#define APRXS_WD2_H_POS(rev) (APRXS_WD2_L_POS(rev) + APRXS_WD2_L_EN(rev)) /* 5 */ +#define APRXS_WD3_L_POS(rev) (APRXS_WD2_H_POS(rev) + APRXS_WD2_H_EN(rev)) /* 6 */ +#define APRXS_WD3_H_POS(rev) (APRXS_WD3_L_POS(rev) + APRXS_WD3_L_EN(rev)) /* 7 */ +#define APRXS_WD4_L_POS(rev) (APRXS_WD3_H_POS(rev) + APRXS_WD3_H_EN(rev)) /* 7 */ +#define APRXS_WD4_H_POS(rev) (APRXS_WD4_L_POS(rev) + APRXS_WD4_L_EN(rev)) /* 8 */ +#define APRXS_WD5_L_POS(rev) (APRXS_WD4_H_POS(rev) + APRXS_WD4_H_EN(rev)) /* 9 */ +#define APRXS_WD5_H_POS(rev) (APRXS_WD5_L_POS(rev) + APRXS_WD5_L_EN(rev)) /* 10 */ +#define APRXS_WD6_L_POS(rev) (APRXS_WD5_H_POS(rev) + APRXS_WD5_H_EN(rev)) /* 11 */ +#define APRXS_WD6_H_POS(rev) (APRXS_WD6_L_POS(rev) + APRXS_WD6_L_EN(rev)) /* 11 */ +#define APRXS_WD7_L_POS(rev) (APRXS_WD6_H_POS(rev) + APRXS_WD6_H_EN(rev)) /* 11 */ +#define APRXS_WD7_H_POS(rev) (APRXS_WD7_L_POS(rev) + APRXS_WD7_L_EN(rev)) /* 11 */ +#define APRXS_WD8_L_POS(rev) (APRXS_WD7_H_POS(rev) + APRXS_WD7_H_EN(rev)) /* 11 */ +#define APRXS_WD8_H_POS(rev) (APRXS_WD8_L_POS(rev) + APRXS_WD8_L_EN(rev)) /* 11 */ +#define APRXS_WD9_L_POS(rev) (APRXS_WD8_H_POS(rev) + APRXS_WD8_H_EN(rev)) /* 12 */ +#define APRXS_WD9_H_POS(rev) (APRXS_WD9_L_POS(rev) + APRXS_WD9_L_EN(rev)) /* 12 */ +#define APRXS_WD10_L_POS(rev) (APRXS_WD9_H_POS(rev) + APRXS_WD9_H_EN(rev)) /* 12 */ +#define APRXS_WD10_H_POS(rev) (APRXS_WD10_L_POS(rev) + APRXS_WD10_L_EN(rev)) /* 12 */ +#define APRXS_WD11_L_POS(rev) (APRXS_WD10_H_POS(rev) + APRXS_WD10_H_EN(rev)) /* 12 */ +#define APRXS_WD11_H_POS(rev) (APRXS_WD11_L_POS(rev) + APRXS_WD11_L_EN(rev)) /* 12 */ +#define APRXS_WD12_L_POS(rev) (APRXS_WD11_H_POS(rev) + APRXS_WD11_H_EN(rev)) /* 12 */ +#define APRXS_WD12_H_POS(rev) (APRXS_WD12_L_POS(rev) + APRXS_WD12_L_EN(rev)) /* 12 */ +#define APRXS_WD13_L_POS(rev) (APRXS_WD12_H_POS(rev) + APRXS_WD12_H_EN(rev)) /* 12 */ +#define APRXS_WD13_H_POS(rev) (APRXS_WD13_L_POS(rev) + APRXS_WD13_L_EN(rev)) /* 12 */ +#define APRXS_WD14_L_POS(rev) (APRXS_WD13_H_POS(rev) + APRXS_WD13_H_EN(rev)) /* 12 */ +#define APRXS_WD14_H_POS(rev) (APRXS_WD14_L_POS(rev) + APRXS_WD14_L_EN(rev)) /* 12 */ +#define APRXS_WD15_L_POS(rev) (APRXS_WD14_H_POS(rev) + APRXS_WD14_H_EN(rev)) /* 12 */ +#define APRXS_WD15_H_POS(rev) (APRXS_WD15_L_POS(rev) + APRXS_WD15_L_EN(rev)) /* 12 */ +#define APRXS_WD16_L_POS(rev) (APRXS_WD15_H_POS(rev) + APRXS_WD15_H_EN(rev)) /* 12 */ +#define APRXS_WD16_H_POS(rev) (APRXS_WD16_L_POS(rev) + APRXS_WD16_L_EN(rev)) /* 13 */ +#define APRXS_WD17_L_POS(rev) (APRXS_WD16_H_POS(rev) + APRXS_WD16_H_EN(rev)) /* 13 */ +#define APRXS_WD17_H_POS(rev) (APRXS_WD17_L_POS(rev) + APRXS_WD17_L_EN(rev)) /* 13 */ +#define APRXS_WD18_L_POS(rev) (APRXS_WD17_H_POS(rev) + APRXS_WD17_H_EN(rev)) /* 13 */ +#define APRXS_WD18_H_POS(rev) (APRXS_WD18_L_POS(rev) + APRXS_WD18_L_EN(rev)) /* 14 */ +#define APRXS_WD19_L_POS(rev) (APRXS_WD18_H_POS(rev) + APRXS_WD18_H_EN(rev)) /* 14 */ +#define APRXS_WD19_H_POS(rev) (APRXS_WD19_L_POS(rev) + APRXS_WD19_L_EN(rev)) /* 14 */ +#define APRXS_WD20_L_POS(rev) (APRXS_WD19_H_POS(rev) + APRXS_WD19_H_EN(rev)) /* 14 */ +#define APRXS_WD20_H_POS(rev) (APRXS_WD20_L_POS(rev) + APRXS_WD20_L_EN(rev)) /* 15 */ +#define APRXS_WD21_L_POS(rev) (APRXS_WD20_H_POS(rev) + APRXS_WD20_H_EN(rev)) /* 16 */ +#define APRXS_WD21_H_POS(rev) (APRXS_WD21_L_POS(rev) + APRXS_WD21_L_EN(rev)) /* 16 */ +#define APRXS_WD22_L_POS(rev) (APRXS_WD21_H_POS(rev) + APRXS_WD21_H_EN(rev)) /* 17 */ +#define APRXS_WD22_H_POS(rev) (APRXS_WD22_L_POS(rev) + APRXS_WD22_L_EN(rev)) /* 18 */ +#define APRXS_WD23_L_POS(rev) (APRXS_WD22_H_POS(rev) + APRXS_WD22_H_EN(rev)) /* 19 */ +#define APRXS_WD23_H_POS(rev) (APRXS_WD23_L_POS(rev) + APRXS_WD23_L_EN(rev)) /* 20 */ +#define APRXS_WD24_L_POS(rev) (APRXS_WD23_H_POS(rev) + APRXS_WD23_H_EN(rev)) /* 21 */ +#define APRXS_WD24_H_POS(rev) (APRXS_WD24_L_POS(rev) + APRXS_WD24_L_EN(rev)) /* 21 */ +#define APRXS_WD25_L_POS(rev) (APRXS_WD24_H_POS(rev) + APRXS_WD24_H_EN(rev)) /* 22 */ +#define APRXS_WD25_H_POS(rev) (APRXS_WD25_L_POS(rev) + APRXS_WD25_L_EN(rev)) /* 23 */ + +#define APRXS_NBYTES(rev) (APRXS_WD25_H_POS(rev)) // total number of bytes enabled + +// frame type +#define APRXS_FT_POS(rev) APRXS_WD0_L_POS(rev) +#define APRXS_FT_MASK 0xFu +#define APRXS_FT(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_FT_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_FT_POS(rev)]) & \ + APRXS_FT_MASK) + +// unsupported rate +#define APRXS_UNSRATE_POS(rev) APRXS_WD0_L_POS(rev) +#define APRXS_UNSRATE_MASK 0x10u +#define APRXS_UNSRATE_SHIFT 4u +#define APRXS_UNSRATE(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_UNSRATE_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_UNSRATE_POS(rev)]) & \ + APRXS_UNSRATE_MASK) >> APRXS_UNSRATE_SHIFT) + +// band +#define APRXS_BAND_POS(rev) APRXS_WD0_L_POS(rev) +#define APRXS_BAND_MASK 0x20u +#define APRXS_BAND_SHIFT 5u +#define APRXS_BAND(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_BAND_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_BAND_POS(rev)]) & \ + APRXS_BAND_MASK) >> APRXS_BAND_SHIFT) + +// lost CRS +#define APRXS_LOSTCRS_POS(rev) APRXS_WD0_L_POS(rev) +#define APRXS_LOSTCRS_MASK 0x40u +#define APRXS_LOSTCRS_SHIFT 6u +#define APRXS_LOSTCRS(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_LOSTCRS_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_LOSTCRS_POS(rev)]) & \ + APRXS_LOSTCRS_MASK) >> APRXS_LOSTCRS_SHIFT) + +// short preamble +#define APRXS_SHORTH_POS(rev) APRXS_WD0_L_POS(rev) +#define APRXS_SHORTH_MASK 0x80u +#define APRXS_SHORTH_SHIFT 7u +#define APRXS_SHORTH(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_SHORTH_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_SHORTH_POS(rev)]) & \ + APRXS_SHORTH_MASK) >> APRXS_SHORTH_SHIFT) + +// plcp format violation +#define APRXS_PLCPFV_POS(rev) APRXS_WD0_H_POS(rev) +#define APRXS_PLCPFV_MASK 0x1u +#define APRXS_PLCPFV(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_PLCPFV_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_PLCPFV_POS(rev)]) & \ + APRXS_PLCPFV_MASK) + +// plcp header CRC failed +#define APRXS_PLCPHCF_POS(rev) APRXS_WD0_H_POS(rev) +#define APRXS_PLCPHCF_MASK 0x2u +#define APRXS_PLCPHCF_SHIFT 1u +#define APRXS_PLCPHCF(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_PLCPHCF_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_PLCPHCF_POS(rev)]) & \ + APRXS_PLCPHCF_MASK) >> APRXS_PLCPHCF_SHIFT) + +// MFCRS fired +#define APRXS_MFCRS_FIRED_POS(rev) APRXS_WD0_H_POS(rev) +#define APRXS_MFCRS_FIRED_MASK 0x4u +#define APRXS_MFCRS_FIRED_SHIFT 2u +#define APRXS_MFCRS_FIRED(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_MFCRS_FIRED_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_MFCRS_FIRED_POS(rev)]) & \ + APRXS_MFCRS_FIRED_MASK) >> APRXS_MFCRS_FIRED_SHIFT) + +// ACCRS fired +#define APRXS_ACCRS_FIRED_POS(rev) APRXS_WD0_H_POS(rev) +#define APRXS_ACCRS_FIRED_MASK 0x8u +#define APRXS_ACCRS_FIRED_SHIFT 3u +#define APRXS_ACCRS_FIRED(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_ACCRS_FIRED_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_ACCRS_FIRED_POS(rev)]) & \ + APRXS_ACCRS_FIRED_MASK) >> APRXS_ACCRS_FIRED_SHIFT) + +// MU PPDU +#define APRXS_MUPPDU_POS(rev) APRXS_WD0_H_POS(rev) +#define APRXS_MUPPDU_MASK 0x10u +#define APRXS_MUPPDU_SHIFT 4u +#define APRXS_MUPPDU(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_MUPPDU_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_MUPPDU_POS(rev)]) & \ + APRXS_MUPPDU_MASK) >> APRXS_MUPPDU_SHIFT) + +// OBSS status +#define APRXS_OBSS_STS_POS(rev) APRXS_WD0_H_POS(rev) +#define APRXS_OBSS_STS_MASK 0xE0u +#define APRXS_OBSS_STS_SHIFT 5u +#define APRXS_OBSS_STS(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_OBSS_STS_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_OBSS_STS_POS(rev)]) & \ + APRXS_OBSS_STS_MASK) >> APRXS_OBSS_STS_SHIFT) + +// coremask +#define APRXS_COREMASK_POS(rev) APRXS_WD1_L_POS(rev) +#define APRXS_COREMASK_MASK 0xFu +#define APRXS_COREMASK(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_COREMASK_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_COREMASK_POS(rev)]) & \ + APRXS_COREMASK_MASK) + +// antcfg +#define APRXS_ANTCFG_POS(rev) APRXS_WD1_L_POS(rev) +#define APRXS_ANTCFG_MASK 0xF0u +#define APRXS_ANTCFG_SHIFT 4u +#define APRXS_ANTCFG(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_ANTCFG_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_ANTCFG_POS(rev)]) & \ + APRXS_ANTCFG_MASK) >> APRXS_ANTCFG_SHIFT) + +// final BW classification +#define APRXS_SUBBAND_POS(rev) APRXS_WD1_H_POS(rev) +#define APRXS_SUBBAND_MASK 0xFFu +#define APRXS_SUBBAND(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_SUBBAND_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_SUBBAND_POS(rev)]) & \ + APRXS_SUBBAND_MASK) + +// Rx power Antenna0 +#define APRXS_RXPWR_ANT0_POS(rev) APRXS_WD2_L_POS(rev) +#define APRXS_RXPWR_ANT0_MASK 0xFFu +#define APRXS_RXPWR_ANT0(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT0_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT0_POS(rev)]) & \ + APRXS_RXPWR_ANT0_MASK) + +// Rx power Antenna1 +#define APRXS_RXPWR_ANT1_POS(rev) APRXS_WD2_H_POS(rev) +#define APRXS_RXPWR_ANT1_MASK 0xFFu +#define APRXS_RXPWR_ANT1(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT1_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT1_POS(rev)]) & \ + APRXS_RXPWR_ANT1_MASK) + +// Rx power Antenna2 +#define APRXS_RXPWR_ANT2_POS(rev) APRXS_WD3_L_POS(rev) +#define APRXS_RXPWR_ANT2_MASK 0xFFu +#define APRXS_RXPWR_ANT2(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT2_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT2_POS(rev)]) & \ + APRXS_RXPWR_ANT2_MASK) + +// Rx power Antenna3 +#define APRXS_RXPWR_ANT3_POS(rev) APRXS_WD3_H_POS(rev) +#define APRXS_RXPWR_ANT3_MASK 0xFFu +#define APRXS_RXPWR_ANT3(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT3_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT3_POS(rev)]) & \ + APRXS_RXPWR_ANT3_MASK) + +// RX ELNA INDEX ANT0 +#define APRXS_ELNA_IDX_ANT0_POS(rev) APRXS_WD20_L_POS(rev) +#define APRXS_ELNA_IDX_ANT0_MASK 0x2u +#define APRXS_ELNA_IDX_ANT0_SHIFT 1u +#define APRXS_ELNA_IDX_ANT0(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_ELNA_IDX_ANT0_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_ELNA_IDX_ANT0_POS(rev)]) & \ + APRXS_ELNA_IDX_ANT0_MASK) >> APRXS_ELNA_IDX_ANT0_SHIFT) + +// RX ELNA INDEX ANT1 +#define APRXS_ELNA_IDX_ANT1_POS(rev) APRXS_WD20_L_POS(rev) +#define APRXS_ELNA_IDX_ANT1_MASK 0x20u +#define APRXS_ELNA_IDX_ANT1_SHIFT 5u +#define APRXS_ELNA_IDX_ANT1(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_ELNA_IDX_ANT1_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_ELNA_IDX_ANT1_POS(rev)]) & \ + APRXS_ELNA_IDX_ANT1_MASK) >> APRXS_ELNA_IDX_ANT1_SHIFT) + +// RX TIA INDEX ANT0 LO +#define APRXS_TIA_IDX_ANT0_POS(rev) APRXS_WD16_L_POS(rev) +#define APRXS_TIA_IDX_ANT0_MASK 0x1Cu +#define APRXS_TIA_IDX_ANT0_SHIFT 2u +#define APRXS_TIA_IDX_ANT0(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_TIA_IDX_ANT0_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_TIA_IDX_ANT0_POS(rev)]) & \ + APRXS_TIA_IDX_ANT0_MASK) >> APRXS_TIA_IDX_ANT0_SHIFT) + +// RX TIA INDEX ANT1 LO +#define APRXS_TIA_IDX_ANT1_POS(rev) APRXS_WD18_L_POS(rev) +#define APRXS_TIA_IDX_ANT1_MASK 0x1Cu +#define APRXS_TIA_IDX_ANT1_SHIFT 2u +#define APRXS_TIA_IDX_ANT1(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_TIA_IDX_ANT1_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_TIA_IDX_ANT1_POS(rev)]) & \ + APRXS_TIA_IDX_ANT1_MASK) >> APRXS_TIA_IDX_ANT1_SHIFT) + +// RX VSW INDEX ANT0 +#define APRXS_VSW_IDX_ANT0_POS(rev) APRXS_WD20_L_POS(rev) +#define APRXS_VSW_IDX_ANT0_MASK 0x8u +#define APRXS_VSW_IDX_ANT0_SHIFT 3u +#define APRXS_VSW_IDX_ANT0(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_VSW_IDX_ANT0_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_VSW_IDX_ANT0_POS(rev)]) & \ + APRXS_VSW_IDX_ANT0_MASK) >> APRXS_VSW_IDX_ANT0_SHIFT) + +// RX VSW INDEX ANT1 +#define APRXS_VSW_IDX_ANT1_POS(rev) APRXS_WD20_L_POS(rev) +#define APRXS_VSW_IDX_ANT1_MASK 0x80u +#define APRXS_VSW_IDX_ANT1_SHIFT 7u +#define APRXS_VSW_IDX_ANT1(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_VSW_IDX_ANT1_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_VSW_IDX_ANT1_POS(rev)]) & \ + APRXS_VSW_IDX_ANT1_MASK) >> APRXS_VSW_IDX_ANT1_SHIFT) + +// RSSI fractional bits +#define APRXS_RXPWR_FRAC_POS(rev) APRXS_WD4_L_POS(rev) +#define APRXS_RXPWR_FRAC_MASK 0xFFu +#define APRXS_RXPWR_FRAC(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_FRAC_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_FRAC_POS(rev)]) & \ + APRXS_RXPWR_FRAC_MASK) + +// Ucode overwrites ClipCount with GILTF +#define APRXS_GILTF_POS(rev) APRXS_WD4_H_POS(rev) +#define APRXS_GILTF_MASK 0x18u +#define APRXS_GILTF_SHIFT 3u +#define APRXS_GILTF(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_GILTF_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_GILTF_POS(rev)]) & \ + APRXS_GILTF_MASK) >> APRXS_GILTF_SHIFT) + +#define APRXS_DYNBWINNONHT_POS(rev) APRXS_WD4_H_POS(rev) +#define APRXS_DYNBWINNONHT_MASK 0x20u +#define APRXS_DYNBWINNONHT_SHIFT 5u +#define APRXS_DYNBWINNONHT(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_DYNBWINNONHT_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_DYNBWINNONHT_POS(rev)]) & \ + APRXS_DYNBWINNONHT_MASK) >> APRXS_DYNBWINNONHT_SHIFT) + +#define APRXS_MCSSQSNR0_POS(rev) APRXS_WD5_L_POS(rev) +#define APRXS_MCSSQSNR0_MASK 0xFFu +#define APRXS_MCSSQSNR0(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_MCSSQSNR0_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_MCSSQSNR0_POS(rev)]) & \ + APRXS_MCSSQSNR0_MASK) + +#define APRXS_MCSSQSNR1_POS(rev) APRXS_WD5_H_POS(rev) +#define APRXS_MCSSQSNR1_MASK 0xFFu +#define APRXS_MCSSQSNR1(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_MCSSQSNR1_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_MCSSQSNR1_POS(rev)]) & \ + APRXS_MCSSQSNR1_MASK) + +#define APRXS_MCSSQSNR2_POS(rev) APRXS_WD6_L_POS(rev) +#define APRXS_MCSSQSNR2_MASK 0xFFu +#define APRXS_MCSSQSNR2(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_MCSSQSNR2_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_MCSSQSNR2_POS(rev)]) & \ + APRXS_MCSSQSNR2_MASK) + +#define APRXS_CHBWINNONHT_POS(rev) APRXS_WD8_H_POS(rev) +#define APRXS_CHBWINNONHT_MASK 0x3u +#define APRXS_CHBWINNONHT(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_CHBWINNONHT_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_CHBWINNONHT_POS(rev)]) & \ + APRXS_CHBWINNONHT_MASK) + +// User type +#define APRXS_USTY_POS(rev) APRXS_WD23_H_POS(rev) +#define APRXS_USTY_MASK 0xE0u +#define APRXS_USTY_SHIFT 0x5u +#define APRXS_USTY(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_USTY_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_USTY_POS(rev)]) & \ + APRXS_USTY_MASK) >> APRXS_USTY_SHIFT) + +// 11ax frame format +#define APRXS_AXFF_POS(rev) APRXS_WD20_H_POS(rev) +#define APRXS_AXFF_MASK 0x7u +#define APRXS_AXFF(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_AXFF_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_AXFF_POS(rev)]) & \ + APRXS_AXFF_MASK) + +// MCS +#define APRXS_AXMCS_POS(rev) APRXS_WD21_H_POS(rev) +#define APRXS_AXMCS_MASK 0xFu +#define APRXS_AXMCS(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_AXMCS_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_AXMCS_POS(rev)]) & \ + APRXS_AXMCS_MASK) + +// Coding +#define APRXS_CODING_POS(rev) APRXS_WD21_H_POS(rev) +#define APRXS_CODING_MASK 0x10u +#define APRXS_CODING_SHIFT 4u +#define APRXS_CODING(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_CODING_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_CODING_POS(rev)]) & \ + APRXS_CODING_MASK) >> APRXS_CODING_SHIFT) + +// STAID +#define APRXS_AX_STAID_L_POS(rev) APRXS_WD22_L_POS(rev) +#define APRXS_AX_STAID_L_MASK 0xFFu +#define APRXS_AX_STAID_L(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_AX_STAID_L_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_AX_STAID_L_POS(rev)]) & \ + APRXS_AX_STAID_L_MASK) + +#define APRXS_AX_STAID_H_POS(rev) APRXS_WD22_H_POS(rev) +#define APRXS_AX_STAID_H_MASK 0x03u +#define APRXS_AX_STAID_H(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_AX_STAID_H_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_AX_STAID_H_POS(rev)]) & \ + APRXS_AX_STAID_H_MASK) + +#define APRXS_AX_STAID(rxh, rev, min_rev) ((APRXS_AX_STAID_H(rxh, rev, min_rev) << 1) |\ + APRXS_AX_STAID_L(rxh, rev, min_rev)) + +// NSTS +#define APRXS_NSTS_POS(rev) APRXS_WD22_H_POS(rev) +#define APRXS_NSTS_MASK 0x38u +#define APRXS_NSTS_SHIFT 3u +#define APRXS_NSTS(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_DCM_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_DCM_POS(rev)]) & \ + APRXS_NSTS_MASK) >> APRXS_NSTS_SHIFT) + +// TXBF +#define APRXS_TXBF_POS(rev) APRXS_WD22_H_POS(rev) +#define APRXS_TXBF_MASK 0x40u +#define APRXS_TXBF_SHIFT 6u +#define APRXS_TXBF(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_TXBF_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_TXBF_POS(rev)]) & \ + APRXS_TXBF_MASK) >> APRXS_TXBF_SHIFT) + +//DCM +#define APRXS_DCM_POS(rev) APRXS_WD22_H_POS(rev) +#define APRXS_DCM_MASK 0x80u +#define APRXS_DCM_SHIFT 7u +#define APRXS_DCM(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_DCM_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_DCM_POS(rev)]) & \ + APRXS_DCM_MASK) >> APRXS_DCM_SHIFT) + +// RU Offset +#define APRXS_AX_RUALLOC_POS(rev) APRXS_WD23_L_POS(rev) +#define APRXS_AX_RUALLOC_MASK 0x7Fu +#define APRXS_AX_RUALLOC_SHIFT 0u +#define APRXS_AX_RUALLOC(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_AX_RUALLOC_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_AX_RUALLOC_POS(rev)]) & \ + APRXS_AX_RUALLOC_MASK) >> APRXS_AX_RUALLOC_SHIFT) + +#define APRXS_PE_L_POS(rev) APRXS_WD23_L_POS(rev) +#define APRXS_PE_L_MASK 0x80u +#define APRXS_PE_L_SHIFT 7u +#define APRXS_PE_L(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_PE_L_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_PE_L_POS(rev)]) & \ + APRXS_PE_L_MASK) >> APRXS_PE_L_SHIFT) + +#define APRXS_PE_H_POS(rev) APRXS_WD23_H_POS(rev) +#define APRXS_PE_H_MASK 0x3u +#define APRXS_PE_H(rxh, rev, min_rev) \ + ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_PE_H_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_PE_H_POS(rev)]) & \ + APRXS_PE_H_MASK) + +#define APRXS_PE(rxh, rev, rev_min) \ + ((APRXS_PE_H(rxh, rev, rev_min) << 1) | APRXS_PE_L(rxh, rev, rev_min)) + +#define APRXS_RU_POS(rev) APRXS_WD23_H_POS(rev) +#define APRXS_RU_MASK 0x1Cu +#define APRXS_RU_SHIFT 2u +#define APRXS_RU(rxh, rev, min_rev) \ + (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \ + (rxh)->ge87_1.PHYRXSTATUS[APRXS_RU_POS(rev)] : \ + (rxh)->ge85.PHYRXSTATUS[APRXS_RU_POS(rev)]) & \ + APRXS_RU_MASK) >> APRXS_RU_SHIFT) + +#endif /* _d11_autophyrxsts_ */ + +#if defined(AUTO_PHYRXSTS) +#define AUTO_PHYRXSTS_ENAB() 1u +#else +#define AUTO_PHYRXSTS_ENAB() 0u +#endif /* AUTO_PHYRXSTS */ + +/* PhyRxStatus_0: */ +#define PRXS0_FT_MASK 0x0003u /**< [PRE-HE] NPHY only: CCK, OFDM, HT, VHT */ +#define PRXS0_CLIP_MASK 0x000Cu /**< NPHY only: clip count adjustment steps by AGC */ +#define PRXS0_CLIP_SHIFT 2u /**< SHIFT bits for clip count adjustment */ +#define PRXS0_UNSRATE 0x0010u /**< PHY received a frame with unsupported rate */ +#define PRXS0_UNSRATE_SHIFT 4u +#define PRXS0_RXANT_UPSUBBAND 0x0020u /**< GPHY: rx ant, NPHY: upper sideband */ +#define PRXS0_LCRS 0x0040u /**< CCK frame only: lost crs during cck frame reception */ +#define PRXS0_SHORTH 0x0080u /**< Short Preamble */ +#define PRXS0_SHORTH_SHIFT 7u +#define PRXS0_PLCPFV 0x0100u /**< PLCP violation */ +#define PRXS0_PLCPFV_SHIFT 8u +#define PRXS0_PLCPHCF 0x0200u /**< PLCP header integrity check failed */ +#define PRXS0_PLCPHCF_SHIFT 9u +#define PRXS0_GAIN_CTL 0x4000u /**< legacy PHY gain control */ +#define PRXS0_ANTSEL_MASK 0xF000u /**< NPHY: Antennas used for received frame, bitmask */ +#define PRXS0_ANTSEL_SHIFT 12u /**< SHIFT bits for Antennas used for received frame */ +#define PRXS0_PPDU_MASK 0x1000u /**< PPDU type SU/MU */ + +/* subfield PRXS0_FT_MASK [PRXS0_PRE_HE_FT_MASK] */ +#define PRXS0_CCK 0x0000u +#define PRXS0_OFDM 0x0001u /**< valid only for G phy, use rxh->RxChan for A phy */ +#define PRXS0_PREN 0x0002u +#define PRXS0_STDN 0x0003u + +/* subfield PRXS0_ANTSEL_MASK */ +#define PRXS0_ANTSEL_0 0x0u /**< antenna 0 is used */ +#define PRXS0_ANTSEL_1 0x2u /**< antenna 1 is used */ +#define PRXS0_ANTSEL_2 0x4u /**< antenna 2 is used */ +#define PRXS0_ANTSEL_3 0x8u /**< antenna 3 is used */ + +/* PhyRxStatus_1: */ +#define PRXS1_JSSI_MASK 0x00FFu +#define PRXS1_JSSI_SHIFT 0u +#define PRXS1_SQ_MASK 0xFF00u +#define PRXS1_SQ_SHIFT 8u +#define PRXS1_COREMAP 0x000Fu /**< core enable bits for core 0/1/2/3 */ +#define PRXS1_ANTCFG 0x00F0u /**< anttenna configuration bits */ + +#define PHY_COREMAP_LT85(rxh, rev) \ + ((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_1) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_1)) & \ + PRXS1_COREMAP) +#define PHY_COREMAP(rev, rev_min, rxh) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_COREMASK(rxh, rev, rev_min) : PHY_COREMAP_LT85(rxh, rev)) + +#define PHY_ANTMAP_LT85(rxh, corerev) \ + (((D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_1) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_1)) & \ + PRXS1_ANTCFG) >> 4) +#define PHY_ANTMAP(rev, rev_min, rxh) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_ANTCFG(rxh, rev, rev_min) : PHY_ANTMAP_LT85(rxh, rev)) + +/* nphy PhyRxStatus_1: */ +#define PRXS1_nphy_PWR0_MASK 0x00FF +#define PRXS1_nphy_PWR1_MASK 0xFF00 + +/* PhyRxStatus_2: */ +#define PRXS2_LNAGN_MASK 0xC000 +#define PRXS2_LNAGN_SHIFT 14 +#define PRXS2_PGAGN_MASK 0x3C00 +#define PRXS2_PGAGN_SHIFT 10 +#define PRXS2_FOFF_MASK 0x03FF + +/* nphy PhyRxStatus_2: */ +#define PRXS2_nphy_SQ_ANT0 0x000F /**< nphy overall signal quality for antenna 0 */ +#define PRXS2_nphy_SQ_ANT1 0x00F0 /**< nphy overall signal quality for antenna 0 */ +#define PRXS2_nphy_cck_SQ 0x00FF /**< bphy signal quality(when FT field is 0) */ +#define PRXS3_nphy_SSQ_MASK 0xFF00 /**< spatial conditioning of the two receive channels */ +#define PRXS3_nphy_SSQ_SHIFT 8 + +/* PhyRxStatus_3: */ +#define PRXS3_DIGGN_MASK 0x1800 +#define PRXS3_DIGGN_SHIFT 11 +#define PRXS3_TRSTATE 0x0400 + +/* nphy PhyRxStatus_3: */ +#define PRXS3_nphy_MMPLCPLen_MASK 0x0FFF /**< Mixed-mode preamble PLCP length */ +#define PRXS3_nphy_MMPLCP_RATE_MASK 0xF000 /**< Mixed-mode preamble rate field */ +#define PRXS3_nphy_MMPLCP_RATE_SHIFT 12 + +/* HTPHY Rx Status defines */ +/* htphy PhyRxStatus_0: those bit are overlapped with PhyRxStatus_0 */ +#define PRXS0_BAND 0x0400 /**< 0 = 2.4G, 1 = 5G */ +#define PRXS0_RSVD 0x0800 /**< reserved; set to 0 */ +#define PRXS0_UNUSED 0xF000 /**< unused and not defined; set to 0 */ + +/* htphy PhyRxStatus_1: */ +#define PRXS1_HTPHY_MMPLCPLenL_MASK 0xFF00 /**< Mixmode PLCP Length low byte mask */ + +/* htphy PhyRxStatus_2: */ +#define PRXS2_HTPHY_MMPLCPLenH_MASK 0x000F /**< Mixmode PLCP Length high byte maskw */ +#define PRXS2_HTPHY_MMPLCH_RATE_MASK 0x00F0 /**< Mixmode PLCP rate mask */ +#define PRXS2_HTPHY_RXPWR_ANT0 0xFF00 /**< Rx power on core 0 */ + +/* htphy PhyRxStatus_3: */ +#define PRXS3_HTPHY_RXPWR_ANT1 0x00FF /**< Rx power on core 1 */ +#define PRXS3_HTPHY_RXPWR_ANT2 0xFF00 /**< Rx power on core 2 */ + +/* htphy PhyRxStatus_4: */ +#define PRXS4_HTPHY_RXPWR_ANT3 0x00FF /**< Rx power on core 3 */ +#define PRXS4_HTPHY_CFO 0xFF00 /**< Coarse frequency offset */ + +/* htphy PhyRxStatus_5: */ +#define PRXS5_HTPHY_FFO 0x00FF /**< Fine frequency offset */ +#define PRXS5_HTPHY_AR 0xFF00 /**< Advance Retard */ + +/* ACPHY RxStatus defs */ + +/* ACPHY PhyRxStatus_0: */ +#define PRXS0_ACPHY_FT_MASK 0x0003 /**< CCK, OFDM, HT, VHT */ +#define PRXS0_ACPHY_CLIP_MASK 0x000C /**< clip count adjustment steps by AGC */ +#define PRXS0_ACPHY_CLIP_SHIFT 2 +#define PRXS0_ACPHY_UNSRATE 0x0010 /**< PHY received a frame with unsupported rate */ +#define PRXS0_ACPHY_BAND5G 0x0020 /**< Rx Band indication: 0 -> 2G, 1 -> 5G */ +#define PRXS0_ACPHY_LCRS 0x0040 /**< CCK frame only: lost crs during cck frame reception */ +#define PRXS0_ACPHY_SHORTH 0x0080 /**< Short Preamble (CCK), GF preamble (HT) */ +#define PRXS0_ACPHY_PLCPFV 0x0100 /**< PLCP violation */ +#define PRXS0_ACPHY_PLCPHCF 0x0200 /**< PLCP header integrity check failed */ +#define PRXS0_ACPHY_MFCRS 0x0400 /**< Matched Filter CRS fired */ +#define PRXS0_ACPHY_ACCRS 0x0800 /**< Autocorrelation CRS fired */ +#define PRXS0_ACPHY_SUBBAND_MASK 0xF000 /**< FinalBWClassification: + * lower nibble Bitfield of sub-bands occupied by Rx frame + */ +/* ACPHY PhyRxStatus_1: */ +#define PRXS1_ACPHY_ANT_CORE0 0x0001 /* Antenna Config for core 0 */ +#define PRXS1_ACPHY_SUBBAND_MASK_GEN2 0xFF00 /**< FinalBWClassification: + * lower byte Bitfield of sub-bands occupied by Rx frame + */ +#define PRXS0_ACPHY_SUBBAND_SHIFT 12 +#define PRXS1_ACPHY_SUBBAND_SHIFT_GEN2 8 + +/* acphy PhyRxStatus_3: */ +#define PRXS2_ACPHY_RXPWR_ANT0 0xFF00 /**< Rx power on core 1 */ +#define PRXS3_ACPHY_RXPWR_ANT1 0x00FF /**< Rx power on core 1 */ +#define PRXS3_ACPHY_RXPWR_ANT2 0xFF00 /**< Rx power on core 2 */ +#define PRXS3_ACPHY_SNR_ANT0 0xFF00 /* SNR on core 0 */ + +/* acphy PhyRxStatus_4: */ +/** FinalBWClassification:upper nibble of sub-bands occupied by Rx frame */ +#define PRXS4_ACPHY_SUBBAND_MASK 0x000F +#define PRXS4_ACPHY_RXPWR_ANT3 0x00FF /**< Rx power on core 3 */ +#define PRXS4_ACPHY_SNR_ANT1 0xFF00 /* SNR on core 1 */ + +#define PRXS5_ACPHY_CHBWINNONHT_MASK 0x0003 +#define PRXS5_ACPHY_CHBWINNONHT_20MHZ 0 +#define PRXS5_ACPHY_CHBWINNONHT_40MHZ 1 +#define PRXS5_ACPHY_CHBWINNONHT_80MHZ 2 +#define PRXS5_ACPHY_CHBWINNONHT_160MHZ 3 /* includes 80+80 */ +#define PRXS5_ACPHY_DYNBWINNONHT_MASK 0x0004 + +/** Get Rx power on core 0 */ +#define ACPHY_RXPWR_ANT0(rxs) (((rxs)->lt80.PhyRxStatus_2 & PRXS2_ACPHY_RXPWR_ANT0) >> 8) +/** Get Rx power on core 1 */ +#define ACPHY_RXPWR_ANT1(rxs) ((rxs)->lt80.PhyRxStatus_3 & PRXS3_ACPHY_RXPWR_ANT1) +/** Get Rx power on core 2 */ +#define ACPHY_RXPWR_ANT2(rxs) (((rxs)->lt80.PhyRxStatus_3 & PRXS3_ACPHY_RXPWR_ANT2) >> 8) +/** Get Rx power on core 3 */ +#define ACPHY_RXPWR_ANT3(rxs) ((rxs)->lt80.PhyRxStatus_4 & PRXS4_ACPHY_RXPWR_ANT3) + +/** MCSSQSNR location access. MCSSQ usage is limited by chip specific impl, + * and there is no way to commonize these status location yet. + * TODO: When the storage locations are settled we need to revisit + * this defs controls. + */ + +/* exception handling */ +#ifdef PHY_CORE_MAX +#if PHY_CORE_MAX > 4 +#error "PHY_CORE_MAX is exceeded more than MCSSQSNR defs (4)" +#endif +#endif /* PHY_CORE_MAX */ + +/* rev 48/55/59 are obsoleted for SNR in trunk */ +#define D11_PRXS_MCSSQ_SNR_SUPPORT(corerev) (D11REV_GE((corerev), 80)) + +#define ACPHY_SNR_MASK (0xFF) +#define ACPHY_SNR_SHIFT (8) + +#define PRXS5_ACPHY_DYNBWINNONHT(rxs) ((rxs)->lt80.PhyRxStatus_5 & PRXS5_ACPHY_DYNBWINNONHT_MASK) +#define PRXS5_ACPHY_CHBWINNONHT(rxs) ((rxs)->lt80.PhyRxStatus_5 & PRXS5_ACPHY_CHBWINNONHT_MASK) + +#define D11N_MMPLCPLen(rxs) ((rxs)->lt80.PhyRxStatus_3 & PRXS3_nphy_MMPLCPLen_MASK) +#define D11HT_MMPLCPLen(rxs) ((((rxs)->lt80.PhyRxStatus_1 & PRXS1_HTPHY_MMPLCPLenL_MASK) >> 8) | \ + (((rxs)->lt80.PhyRxStatus_2 & PRXS2_HTPHY_MMPLCPLenH_MASK) << 8)) + +/* REV80 Defintions (corerev >= 80) */ + +/** Dma_flags Masks */ +#define RXS_PHYRXST_VALID_REV_GE80 0x02 + +/** Get RxStatus1 */ +#define RXSTATUS1_REV_GE87_1(rxs) ((rxs)->ge87_1.RxStatus1) +#define RXSTATUS1_REV_GE80(rxs) ((rxs)->ge80.RxStatus1) +#define RXSTATUS1_REV_LT80(rxs) ((rxs)->lt80.RxStatus1) + +#define PHY_RXSTATUS1(corerev, corerev_minor, rxs) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? RXSTATUS1_REV_GE87_1(rxs) : \ + D11REV_GE(corerev, 80) ? RXSTATUS1_REV_GE80(rxs) : \ + RXSTATUS1_REV_LT80(rxs)) + +/* (FT Mask) PhyRxStatus_0: */ +#define PRXS0_FT_MASK_REV_LT80 PRXS0_FT_MASK /**< (corerev < 80) frame type field mask */ + +#define PRXS0_FT_SHIFT_REV_GE80 8 +#define PRXS0_FT_MASK_REV_GE80 0x0700 /** + * (corerev >= 80) frame type field mask. + * + * 0 = CCK, 1 = 11a/g legacy OFDM, + * 2 = HT, 3 = VHT, 4 = 11ah, 5 = HE, + * 6-15 Rsvd. + */ + +/* * +* Macro to find Frame type from RX Hdr based on corerev. +* +* Note: From rev80 onwards frame type is indicated only +* in the phyrxstatus, which is valid only for the last +* MPDU of an AMPDU. Since FT is required for every MPDU, +* frametype for core-revs >= 80, shall be +* provided in bits (8:10) of MuRate field in RXH. +* +*/ +#define D11PPDU_FT(rxh, rev) (\ + (D11REV_GE(rev, 80) ? \ + ((D11RXHDR_ACCESS_VAL(rxh, rev, 0, MuRate) & PRXS_FT_MASK(rev)) >> \ + (PRXS0_FT_SHIFT_REV_GE80)) : \ + (D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0) & PRXS_FT_MASK(rev)))) + +#define PRXS_UNSRATE_LT85(rxh, rev) \ + (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \ + PRXS0_UNSRATE) >> PRXS0_UNSRATE_SHIFT) + +#define PRXS_UNSRATE(rxh, rev, min_rev) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_UNSRATE(rxh, rev, min_rev) : PRXS_UNSRATE_LT85(rxh, rev)) + +// 1: short (or GF) preamble, 0: long (or MM) preamble +#define PRXS_SHORTH_LT85(rxh, rev) \ + (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \ + PRXS0_SHORTH) >> PRXS0_SHORTH_SHIFT) +#define PRXS_SHORTH(rxh, rev, min_rev) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_SHORTH(rxh, rev, min_rev) : \ + PRXS_SHORTH_LT85(rxh, rev)) + +#define PRXS_PLCPFV_LT85(rxh, rev) \ + (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \ + PRXS0_PLCPFV) >> PRXS0_PLCPFV_SHIFT) +#define PRXS_PLCPFV(rxh, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_PLCPFV(rxh, rev, rev_min) : PRXS_PLCPFV_LT85(rxh, rev)) + +#define PRXS_PLCPHCF_LT85(rxh, rev) \ + (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \ + PRXS0_PLCPHCF) >> PRXS0_PLCPHCF_SHIFT) +#define PRXS_PLCPHCF(rxh, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_PLCPHCF(rxh, rev, rev_min) : PRXS_PLCPHCF_LT85(rxh, rev)) + +// final BW classification +#define PRXS_SUBBAND_ACPHY(rxh, rev, rev_min) \ + (((D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0) & \ + PRXS0_ACPHY_SUBBAND_MASK) >> PRXS0_ACPHY_SUBBAND_SHIFT) | \ + ((D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_4) & \ + PRXS4_ACPHY_SUBBAND_MASK) << 4)) +#define PRXS_SUBBAND_ACPHY2(rxh, rev, rev_min) \ + (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_1) : \ + D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_1)) & PRXS1_ACPHY2_SUBBAND_MASK) >> \ + PRXS1_ACPHY2_SUBBAND_SHIFT) + +#define PRXS_SUBBAND(rxh, rev, rev_min, phyrev) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_SUBBAND(rxh, rev, rev_min) : (ACREV_GE(phyrev, 32) ? \ + PRXS_SUBBAND_ACPHY2(rxh, rev, rev_min) : \ + PRXS_SUBBAND_ACPHY(rxh, rev, rev_min))) + +/* Macros to access MCS, NSTS and MU valididity from MuRate field in corerev > 80 RXH */ +#define RXS_MU_VALID_MASK_REV80 0x0080 +#define RXS_MU_VALID_SHIFT_REV80 7 +#define RXS_MCS_MASK_REV80 0x000F +#define RXS_MCS_SHIFT_REV80 0 +#define RXS_NSTS_MASK_REV80 0x0070 +#define RXS_NSTS_SHIFT_REV80 4 + +#define D11PPDU_ISMU_REV80(rxh, corerev, corerev_minor) \ + ((D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, MuRate) & \ + (RXS_MU_VALID_MASK_REV80)) >> RXS_MU_VALID_SHIFT_REV80) +#define D11RXHDR_GE80_GET_MCS(rxh, corerev, corerev_minor) \ + ((D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, MuRate) & \ + (RXS_MCS_MASK_REV80)) >> RXS_MCS_SHIFT_REV80) +#define D11RXHDR_GE80_GET_NSTS(rxh, corerev, corerev_minor) \ + ((D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, MuRate) & \ + (RXS_NSTS_MASK_REV80)) >> RXS_NSTS_SHIFT_REV80) + +/* subfield PRXS0_FT_MASK_REV_GE80 */ +#define PRXS0_HE 0x0004 /**< HE frame type */ + +/* (Corerev >= 80) PhyRxStatus_2: */ +#define PRXS2_RXPWR_ANT0_REV_GE80 0x00FF /**< (corerev >= 80) Rx power on first antenna */ +#define PRXS2_RXPWR_ANT1_REV_GE80 0xFF00 /**< (corerev >= 80) Rx power on second antenna */ + +/* (Corerev >= 80) PhyRxStatus_3: */ +#define PRXS3_RXPWR_ANT2_REV_GE80 0x00FF /**< (corerev >= 80) Rx power on third antenna */ +#define PRXS3_RXPWR_ANT3_REV_GE80 0xFF00 /** + * (corerev >= 80) Rx power on fourth antenna. + * + * Note: For PHY revs 3 and > 4, OCL Status + * byte 0 will be reported if PHY register + * OCL_RxStatus_Ctrl is set to 0x2 or 0x6. + */ +#define PRXS3_RXPWR_FRAC_REV_GE80 0xFFu + +/** Get Rx power on ANT 0 */ +#define RXPWR_ANT0_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_2 & \ + (PRXS2_RXPWR_ANT0_REV_GE80)) + +#define PHY_RXPWR_ANT0(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_RXPWR_ANT0(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + RXPWR_ANT0_REV_GE80(rxs) : ACPHY_RXPWR_ANT0(rxs))) + +/** Get Rx power on ANT 1 */ +#define RXPWR_ANT1_REV_GE80(rxs) (((rxs)->ge80.PhyRxStatus_2 & \ + (PRXS2_RXPWR_ANT1_REV_GE80)) >> 8) + +#define PHY_RXPWR_ANT1(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_RXPWR_ANT1(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + RXPWR_ANT1_REV_GE80(rxs) : ACPHY_RXPWR_ANT1(rxs))) + +/** Get Rx power on ANT 2 */ +#define RXPWR_ANT2_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_3 & \ + (PRXS3_RXPWR_ANT2_REV_GE80)) + +#define PHY_RXPWR_ANT2(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_RXPWR_ANT2(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + RXPWR_ANT2_REV_GE80(rxs) : ACPHY_RXPWR_ANT2(rxs))) + +/** Get Rx power on ANT 3 */ +#define RXPWR_ANT3_REV_GE80(rxs) (((rxs)->ge80.PhyRxStatus_3 & \ + (PRXS3_RXPWR_ANT3_REV_GE80)) >> 8) + +#define PHY_RXPWR_ANT3(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_RXPWR_ANT3(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + RXPWR_ANT3_REV_GE80(rxs) : ACPHY_RXPWR_ANT3(rxs))) + +/* Get the following entries from RXStatus bytes +* for RSSI compensation +* based on factory calibration +* TIA Index +* eLNA Index +* V_path Switch +*/ +#define PHY_ELNA_IDX_ANT0_REV_GE85(corerev, corerev_min, rxs) \ + APRXS_ELNA_IDX_ANT0(rxs, corerev, corerev_min) +#define PHY_ELNA_IDX_ANT1_REV_GE85(corerev, corerev_min, rxs) \ + APRXS_ELNA_IDX_ANT1(rxs, corerev, corerev_min) +#define PHY_TIA_IDX_ANT0_REV_GE85(corerev, corerev_min, rxs) \ + APRXS_TIA_IDX_ANT0(rxs, corerev, corerev_min) +#define PHY_TIA_IDX_ANT1_REV_GE85(corerev, corerev_min, rxs) \ + APRXS_TIA_IDX_ANT1(rxs, corerev, corerev_min) +#define PHY_VSW_IDX_ANT0_REV_GE85(corerev, corerev_min, rxs) \ + APRXS_VSW_IDX_ANT0(rxs, corerev, corerev_min) +#define PHY_VSW_IDX_ANT1_REV_GE85(corerev, corerev_min, rxs) \ + APRXS_VSW_IDX_ANT1(rxs, corerev, corerev_min) + +/** Get RSSI fractional bits */ +#define RXPWR_FRAC_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_4 & \ + (PRXS3_RXPWR_FRAC_REV_GE80)) + +#define RXPWR_FRAC(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_RXPWR_FRAC(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + RXPWR_FRAC_REV_GE80(rxs) : 0)) + +/* HECAPPHY PhyRxStatus_4: */ +#define PRXS4_DYNBWINNONHT_MASK_REV_GE80 0x1000 +#define PRXS4_DYNBWINNONHT_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_4 & \ + PRXS4_DYNBWINNONHT_MASK_REV_GE80) + +#define PRXS_PHY_DYNBWINNONHT(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_DYNBWINNONHT(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + PRXS4_DYNBWINNONHT_REV_GE80(rxs) : PRXS5_ACPHY_DYNBWINNONHT(rxs))) + +/** (corerev >= 80) PhyRxStatus_5: MCSSQ SNR for core 0 and 1 */ +#define PRXS5_MCSSQ_SHIFT (8u) +#define PRXS5_MCSSQ_CORE0_REV_GE80 (0x00FF) +#define PRXS5_MCSSQ_CORE1_REV_GE80 (0xFF00) + +#define MCSSQ_SNR_ANT0_GE80(rxs) ((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE0_REV_GE80) +#define MCSSQ_SNR_ANT0(rxs, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_MCSSQSNR0(rxs, rev, rev_min) : \ + ((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE0_REV_GE80)) + +#define MCSSQ_SNR_ANT1_GE80(rxs) (((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE1_REV_GE80) \ + >> PRXS5_MCSSQ_SHIFT) +#define MCSSQ_SNR_ANT1(rxs, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_MCSSQSNR1(rxs, rev, rev_min) : \ + (((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE1_REV_GE80) \ + >> PRXS5_MCSSQ_SHIFT)) + +/** (corerev >= 80) PhyRxStatus_6: MCSSQ SNR for core 2 and 3 */ +#define PRXS6_MCSSQ_SHIFT (8u) +#define PRXS6_MCSSQ_CORE2_REV_GE80 (0x00FF) +#define PRXS6_MCSSQ_CORE3_REV_GE80 (0xFF00) + +#define MCSSQ_SNR_ANT2_GE80(rxs) (((rxs)->ge80.phyrxs_rem[0] & \ + PRXS6_MCSSQ_CORE2_REV_GE80)) +#define MCSSQ_SNR_ANT2(rxs, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_MCSSQSNR2(rxs, rev, rev_min) : \ + (((rxs)->ge80.phyrxs_rem[0] & PRXS6_MCSSQ_CORE2_REV_GE80))) + +/* HECAPPHY PhyRxStatus_8 (part of phyrxs_rem[2]) : */ +#define PRXS8_CHBWINNONHT_MASK_REV_GE80 0x0100 +#define PRXS8_CHBWINNONHT_REV_GE80(rxs) ((rxs)->ge80.phyrxs_rem[2] & \ + PRXS8_CHBWINNONHT_MASK_REV_GE80) + +#define PRXS_PHY_CHBWINNONHT(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_CHBWINNONHT(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \ + PRXS8_CHBWINNONHT_REV_GE80(rxs) : PRXS5_ACPHY_CHBWINNONHT(rxs))) + +/* HE phyrxs_rem[4] */ +#define PRXS_REM4_PE_MASK_REV80 0x0380 +#define PRXS_REM4_PE_SHIFT_REV80 7u +#define PRXS_REM4_RU_TYPE_MASK_REV80 0x1c00 +#define PRXS_REM4_RU_TYPE_SHIFT_REV80 10u +#define PRXS_REM4_NUM_USER_SHIFT_REV80 13u +#define PRXS_REM4_NUM_USER_BIT_MASK_REV80 0xe000 + +/* HE phyrxs_rem[5] */ +#define PRXS_REM5_GI_LTF_MASK_REV80 0x0003 +#define PRXS_REM5_GI_LTF_SHIFT_REV80 0u +#define PRXS_REM5_11AX_FF_MASK_REV80 0x0700 +#define PRXS_REM5_11AX_FF_SHIFT_REV80 8u + +/* HE phyrxs_rem[6] */ +#define PRXS_REM6_MCS_MASK_REV80 0x0f00 +#define PRXS_REM6_MCS_SHIFT_REV80 8u +#define PRXS_REM6_CODING_MASK_REV80 0x1000 +#define PRXS_REM6_CODING_SHIFT_REV80 12u + +/* HE phyrxs_rem[7] */ +#define PRXS_REM7_DCM_MASK_REV80 0x8000 +#define PRXS_REM7_DCM_SHIFT_REV80 15u +#define PRXS_REM7_TXBF_MASK_REV80 0x4000 +#define PRXS_REM7_TXBF_SHIFT_REV80 14u +#define PRXS_REM7_NSTS_MASK_REV80 0x3800 +#define PRXS_REM7_NSTS_SHIFT_REV80 11u +#define PRXS_REM7_RU_ALLOC_MASK_REV80 0x007f +#define PRXS_REM7_RU_ALLOC_SHIFT_REV80 0u + +#define PRXS_STAID_MASK 0x07ff +#define PRXS_STAID_SHIFT 0u + +enum { + HE_RU_TYPE_26T = 0, /* 26 tone RU, 0 - 36 */ + HE_RU_TYPE_52T = 1, /* 52 tone RU, 37 - 52 */ + HE_RU_TYPE_106T = 2, /* 106 tone RU, 53 - 60 */ + HE_RU_TYPE_242T = 3, /* 242 tone RU, 61 - 64 */ + HE_RU_TYPE_484T = 4, /* 484 tone RU, 65 - 66 */ + HE_RU_TYPE_996T = 5, /* 996 tone RU, 67 - 68 */ + HE_RU_TYPE_2x996T = 6, /* 2x996 tone RU, 69 */ + HE_RU_TYPE_LAST = 7 /* Reserved, Invalid */ +}; + +#define HE_RU_TYPE_MAX 6 + +/* received PE duration is present in phyrxs_rem[4] bit position [7-9] */ +#define D11PPDU_PE_GE80(rxh, corerev) ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[4]) & \ + (PRXS_REM4_PE_MASK_REV80)) >> PRXS_REM4_PE_SHIFT_REV80) + +#define D11PPDU_PE(rxh, corerev, corerev_minor) (AUTO_PHYRXSTS_ENAB() ? \ + APRXS_PE(rxh, corerev, corerev_minor) : D11PPDU_PE_GE80(rxh, corerev)) + +/* received RU type is present in phyrxs_rem[4] bit position [10-11] */ +#define D11PPDU_RU_TYPE(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_RU(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[4]) & \ + (PRXS_REM4_RU_TYPE_MASK_REV80)) >> PRXS_REM4_RU_TYPE_SHIFT_REV80) : 0)) + +/* received he num of user type is present in phyrxs_rem[4] bit position [13-15] */ +#define D11PPDU_HE_NUM_USER_TYPE(rxh, corerev, corerev_min) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_USTY(rxh, corerev, corerev_min) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[4]) & \ + (PRXS_REM4_NUM_USER_BIT_MASK_REV80)) >> PRXS_REM4_NUM_USER_SHIFT_REV80) : 0)) + +#define D11PPDU_FF_TYPE(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_AXFF(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[5]) & \ + (PRXS_REM5_11AX_FF_MASK_REV80)) >> PRXS_REM5_11AX_FF_SHIFT_REV80) : 0)) + +/* DCM is present in phyrxs_rem[7] byte 27, bit position [7] */ +#define D11PPDU_DCM(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_DCM(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \ + (PRXS_REM7_DCM_MASK_REV80)) >> PRXS_REM7_DCM_SHIFT_REV80) : 0)) + +/* coding used is present in phyrxs_rem[6] byte:25, bit position [12] */ +#define D11PPDU_CODING(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_CODING(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[6]) & \ + (PRXS_REM6_CODING_MASK_REV80)) >> PRXS_REM6_CODING_SHIFT_REV80) : 0)) + +/* spatial reuse 2 / STA-ID */ +#define D11PPDU_STAID(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_AX_STAID(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \ + (PRXS_STAID_MASK)) >> PRXS_STAID_SHIFT) : 0)) + +#define D11PPDU_TXBF(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_TXBF(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \ + (PRXS_REM7_TXBF_MASK_REV80)) >> PRXS_REM7_TXBF_SHIFT_REV80) : 0)) + +/* GI_LTF is present in phyrxs_rem[5] bit position [0-1] */ +#define D11PPDU_GI_LTF(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_GILTF(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[5]) & \ + (PRXS_REM5_GI_LTF_MASK_REV80)) >> PRXS_REM5_GI_LTF_SHIFT_REV80) : 0)) + +/* MCS is present in phyrxs_rem[6] - byte 25, bit position [8-11] */ +#define D11PPDU_MCS(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_AXMCS(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[6]) & \ + (PRXS_REM6_MCS_MASK_REV80)) >> PRXS_REM6_MCS_SHIFT_REV80) : 0)) + +/* NSTS present in phyrxs_rem[7] bit position [11-13] */ +#define D11PPDU_NSTS(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_NSTS(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \ + (PRXS_REM7_NSTS_MASK_REV80)) >> PRXS_REM7_NSTS_SHIFT_REV80) : 0)) + +/* RU ALLOC present in phyrxs_rem[7]- byte 26; bit position [6:0] */ +#define D11PPDU_RU_ALLOC(rxh, corerev, corerev_minor) \ + (AUTO_PHYRXSTS_ENAB() ? APRXS_AX_RUALLOC(rxh, corerev, corerev_minor) : \ + (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \ + (PRXS_REM7_RU_ALLOC_MASK_REV80)) >> PRXS_REM7_RU_ALLOC_SHIFT_REV80) : 0) + +/* PHY RX status "Frame Type" field mask. */ +#define PRXS_FT_MASK(corerev) \ + (D11REV_GE(corerev, 80) ? (PRXS0_FT_MASK_REV_GE80) : \ + (PRXS0_FT_MASK_REV_LT80)) + +/** + * ACPHY PhyRxStatus0 SubBand (FinalBWClassification) bit defs + * FinalBWClassification is a 4 bit field, each bit representing one 20MHz sub-band + * of a channel. + */ +enum prxs_subband { + PRXS_SUBBAND_20LL = 0x0001, + PRXS_SUBBAND_20LU = 0x0002, + PRXS_SUBBAND_20UL = 0x0004, + PRXS_SUBBAND_20UU = 0x0008, + PRXS_SUBBAND_40L = 0x0003, + PRXS_SUBBAND_40U = 0x000C, + PRXS_SUBBAND_80 = 0x000F, + PRXS_SUBBAND_20LLL = 0x0001, + PRXS_SUBBAND_20LLU = 0x0002, + PRXS_SUBBAND_20LUL = 0x0004, + PRXS_SUBBAND_20LUU = 0x0008, + PRXS_SUBBAND_20ULL = 0x0010, + PRXS_SUBBAND_20ULU = 0x0020, + PRXS_SUBBAND_20UUL = 0x0040, + PRXS_SUBBAND_20UUU = 0x0080, + PRXS_SUBBAND_40LL = 0x0003, + PRXS_SUBBAND_40LU = 0x000c, + PRXS_SUBBAND_40UL = 0x0030, + PRXS_SUBBAND_40UU = 0x00c0, + PRXS_SUBBAND_80L = 0x000f, + PRXS_SUBBAND_80U = 0x00f0, + PRXS_SUBBAND_160 = 0x00ff +}; + +enum prxs_subband_bphy { + PRXS_SUBBAND_BPHY_20L = 0x0000, + PRXS_SUBBAND_BPHY_20U = 0x0001 +}; + +/* ACPHY Gen2 RxStatus defs */ + +/* ACPHY Gen2 PhyRxStatus_0: */ +#define PRXS0_ACPHY2_MUPPDU 0x1000 /**< 0: SU PPDU; 1: MU PPDU */ +#define PRXS0_ACPHY2_OBSS 0xE000 /**< OBSS mitigation state */ + +/* ACPHY Gen2 PhyRxStatus_1: */ +#define PRXS1_ACPHY2_SUBBAND_MASK 0xFF00 /**< FinalBWClassification: + * 8-bit bitfield of sub-bands occupied by Rx frame + */ +#define PRXS1_ACPHY2_SUBBAND_SHIFT 8 + +/* ACPHY Gen2 PhyRxStatus_2: */ +#define PRXS2_ACPHY2_MU_INT 0x003F /**< MU interference processing type */ + +/* ACPHY Gen2 PhyRxStatus_5: */ +#define PRXS5_ACPHY2_RSSI_FRAC 0xFF00 /**< RSSI fractional bits */ + +/* ucode RxStatus1: */ +#define RXS_BCNSENT 0x8000 +#define RXS_TOFINFO 0x4000 /**< Rxed measurement frame processed by ucode */ +#define RXS_GRANTBT 0x2000 /* Indicate medium given to BT */ +#define RXS_SECKINDX_MASK_GE64 0x1fe0 +#define RXS_SECKINDX_MASK 0x07e0 +#define RXS_IS_DEFRAG 0x4 +#define RXS_DEFRAG_SHIFT 2 +#define RXS_SECKINDX_SHIFT 5 +#define RXS_DECERR (1 << 4) +#define RXS_DECATMPT (1 << 3) +#define RXS_PBPRES (1 << 2) /**< PAD bytes to make IP data 4 bytes aligned */ +#define RXS_RESPFRAMETX (1 << 1) +#define RXS_FCSERR (1 << 0) + +/* ucode RxStatus2: */ +#define RXS_AMSDU_MASK 1 +#define RXS_AGGTYPE_MASK 0x6 +#define RXS_AGGTYPE_SHIFT 1 +#define RXS_AMSDU_FIRST 1 +#define RXS_AMSDU_INTERMEDIATE 0 +#define RXS_AMSDU_LAST 2 +#define RXS_AMSDU_N_ONE 3 +#define RXS_TKMICATMPT (1 << 3) +#define RXS_TKMICERR (1 << 4) +#define RXS_PHYRXST_PRISEL_CLR (1 << 5) /**< PR113291: When '1', Indicates that the Rx */ + /* packet was received while the antenna */ + /* (prisel) had been granted to BT. */ +#define RXS_PHYRXST_VALID (1 << 8) +#define RXS_BCNCLSG (1 << 9) /**< Coleasced beacon packet */ +#define RXS_RXANT_MASK 0x3 +#define RXS_RXANT_SHIFT_LT80 12 +#define RXS_RXANT_SHIFT_GE80 5 +#define RXS_LOOPBACK_MODE 4 + +/* Bit definitions for MRXS word for short rx status. */ +/* RXSS = RX Status Short */ +#define RXSS_AMSDU_MASK 1 /**< 1: AMSDU */ +#define RXSS_AGGTYPE_MASK 0x6 /**< 0 intermed, 1 first, 2 last, 3 single/non-AMSDU */ +#define RXSS_AGGTYPE_SHIFT 1 +#define RXSS_PBPRES (1 << 3) /**< two-byte PAD prior to plcp */ +#define RXSS_HDRSTS (1 << 4) /**< header conversion status. 1 enabled, 0 disabled */ +#define RXSS_RES_MASK 0xE0 /**< reserved */ +#define RXSS_MSDU_CNT_MASK 0xFF00 /**< index of this AMSDU sub-frame in the AMSDU */ +#define RXSS_MSDU_CNT_SHIFT 8 + +/* RX signal control definitions */ +/** PHYRXSTAUS validity checker; in-between ampdu, or rxs status isn't valid */ +#define PRXS_IS_VALID(rxh, rev, rev_min) \ + ((D11REV_GE(rev, 80) && \ + (D11RXHDR_ACCESS_VAL(rxh, rev, rev_min, dma_flags) & \ + RXS_PHYRXST_VALID_REV_GE80)) || \ + (D11REV_GE(rev, 64) && !(D11RXHDR_ACCESS_VAL(rxh, \ + rev, rev_min, dma_flags) & RXS_SHORT_MASK)) || \ + (D11RXHDR_ACCESS_VAL(rxh, rev, rev_min, RxStatus2) & RXS_PHYRXST_VALID)) + +/* RxChan */ +#define RXS_CHAN_40 0x1000 +#define RXS_CHAN_5G 0x0800 +#define RXS_CHAN_ID_MASK 0x07f8 +#define RXS_CHAN_ID_SHIFT 3 + +#define C_BTCX_AGGOFF_BLE (1 << 0) +#define C_BTCX_AGGOFF_A2DP (1 << 1) +#define C_BTCX_AGGOFF_PER (1 << 2) +#define C_BTCX_AGGOFF_MULTIHID (1 << 3) +#define C_BTCX_AGG_LMT_SET_HIGH (1 << 4) +#define C_BTCX_AGGOFF_ESCO_SLAVE (1 << 5) + +#define BTCX_HFLG_NO_A2DP_BFR (1 << 0) /**< no check a2dp buffer */ +#define BTCX_HFLG_NO_CCK (1 << 1) /**< no cck rate for null or cts2self */ +#define BTCX_HFLG_NO_OFDM_FBR (1 << 2) /**< no ofdm fbr for null or cts2self */ +#define BTCX_HFLG_NO_INQ_DEF (1 << 3) /**< no defer inquery */ +#define BTCX_HFLG_GRANT_BT (1 << 4) /**< always grant bt */ +#define BTCX_HFLG_ANT2WL (1 << 5) /**< force prisel to wl */ +#define BTCX_HFLG_PS4ACL (1 << 7) /**< use ps null for unsniff acl */ +#define BTCX_HFLG_DYAGG (1 << 8) /**< dynamic tx aggregation */ +#define BTCX_HFLG_SKIPLMP (1 << 10) /**< no LMP check for 4331 (w 20702 A1/A3) */ +#define BTCX_HFLG_ACL_BSD_BLE_SCAN_GRNT (1 << 14) /**< ACL based grant for BLE scan */ + /* indication to ucode */ +#define BTCX_HFLG2_TRAP_RFACTIVE (1 << 0) /* trap when RfActive too long */ +#define BTCX_HFLG2_TRAP_TXCONF (1 << 1) /* trap when coex grants txconf late */ +#define BTCX_HFLG2_TRAP_ANTDLY (1 << 2) /* trap when coex grants antdly late */ +#define BTCX_HFLG2_TRAP_BTTYPE (1 << 3) /* trap when illegal BT tasktype receive */ +/* Bit definitions for M_BTCX_CONFIG */ +#define BTCX_CONFIG_FORCE_TRAP (1 << 13) /* Force a specific BTCoex TRAP when set */ + +/* BTCX_CONFIG bits */ +#define C_BTCX_CONFIG_SLOTTED_STATE_1 (1 << 3) +#define C_BTCX_CONFIG_SLOTTED_STATE_2 (1 << 4) +#define C_BTCX_CONFIG_SLOTTED_STATE_3 (1 << 5) +#define C_BTCX_CONFIG_LOW_RSSI (1 << 7) +#define C_BTCX_CONFIG_BT_STROBE (1 << 9) +#define C_BTCX_CONFIG_SCO_PROT (1 << 10) +#define C_BTCX_CFG_CMN_CTS2SELF (1 << 11) +#define C_BTCX_CONFIG_HPP_STATE (1 << 15) + +#define BTC_PARAMS_FW_START_IDX 1000 /**< starting index of FW only btc params */ +/** BTC_PARAMS_FW definitions */ +typedef enum +{ + // allow rx-agg to be re-enabled after SCO session completes + BTC_FW_RX_REAGG_AFTER_SCO = BTC_PARAMS_FW_START_IDX, + // RSSI threshold at which SCO grant/deny limits are changed dynamically + BTC_FW_RSSI_THRESH_SCO = BTC_PARAMS_FW_START_IDX + 1, + // Enable the dynamic LE scan priority + BTC_FW_ENABLE_DYN_LESCAN_PRI = BTC_PARAMS_FW_START_IDX + 2, + // If Tput(mbps) is above this, then share antenna with BT's LE_SCAN packet type. + BTC_FW_LESCAN_LO_TPUT_THRESH = BTC_PARAMS_FW_START_IDX + 3, + // If Tput(mbps) is below this, then share antenna with BT's LE_SCAN packet type. + // sampled once a second. + BTC_FW_LESCAN_HI_TPUT_THRESH = BTC_PARAMS_FW_START_IDX + 4, + // Numbers of denials before granting LS scans + BTC_FW_LESCAN_GRANT_INT = BTC_PARAMS_FW_START_IDX + 5, + // number of times algorighm changes lescn pri + BTC_FW_LESCAN_ALG_CNT = BTC_PARAMS_FW_START_IDX + 6, + // RSSI threshold at which aggregation will be disabled during frequent BLE activity + BTC_FW_RSSI_THRESH_BLE = BTC_PARAMS_FW_START_IDX + 7, + // AMPDU Aggregation state requested by BTC + BTC_FW_AGG_STATE_REQ = BTC_PARAMS_FW_START_IDX + 8, + // Reserving space for parameters used in other projects + BTC_FW_RSVD_1 = BTC_PARAMS_FW_START_IDX + 9, + BTC_FW_HOLDSCO_LIMIT = BTC_PARAMS_FW_START_IDX + 10, // Lower Limit + BTC_FW_HOLDSCO_LIMIT_HI = BTC_PARAMS_FW_START_IDX + 11, // Higher Limit + BTC_FW_SCO_GRANT_HOLD_RATIO = BTC_PARAMS_FW_START_IDX + 12, // Low Ratio + BTC_FW_SCO_GRANT_HOLD_RATIO_HI = BTC_PARAMS_FW_START_IDX + 13, // High Ratio + BTC_FW_HOLDSCO_HI_THRESH = BTC_PARAMS_FW_START_IDX + 14, // BT Period Threshold + BTC_FW_MOD_RXAGG_PKT_SZ_FOR_SCO = BTC_PARAMS_FW_START_IDX + 15, + /* Modify Rx Aggregation size when SCO/eSCO detected */ + BTC_FW_AGG_SIZE_LOW = BTC_PARAMS_FW_START_IDX + 16, + /* Agg size when BT period < 7500 ms */ + BTC_FW_AGG_SIZE_HIGH = BTC_PARAMS_FW_START_IDX + 17, + /* Agg size when BT period >= 7500 ms */ + BTC_FW_MOD_RXAGG_PKT_SZ_FOR_A2DP = BTC_PARAMS_FW_START_IDX + 18, + /* Enable COEX constraints for TWT scheduling */ + BTC_FW_TWT_COEX_CONSTRAINTS_EN = BTC_PARAMS_FW_START_IDX + 19, + /* Enable Rx Aggregation for P2P_GO and SOFTAP when ACL/A2DP detected */ + BTC_FW_MOD_RXAGG_PKT_SZ_FOR_APMODE_ACL_A2DP = BTC_PARAMS_FW_START_IDX + 20, + /* Disable amsdu dynamicaly during Rx limited aggregation */ + BTC_FW_DISABLE_AMSDU_DURING_LIM_AGG = BTC_PARAMS_FW_START_IDX + 21, + /* Enable acl based grant for ble scan based on number of 2G slots */ + BTC_FW_ENABLE_ACL_GRNT_FOR_BLE_SCAN = BTC_PARAMS_FW_START_IDX + 22, + /* Threshold slot count for 2g band to Enable acl based grant for ble scan during NAN */ + BTC_FW_NAN_THRESHOLD_SLOTS_FOR_2G = BTC_PARAMS_FW_START_IDX + 23, + /* BT task bm override for critical chansw slots */ + BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_L = BTC_PARAMS_FW_START_IDX + 24, + BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_H = BTC_PARAMS_FW_START_IDX + 25, + /* Limited Aggr AP check grace period, # of BTC watchdog timeout */ + BTC_FW_AGG_AP_GRACE_PERIOD = BTC_PARAMS_FW_START_IDX + 26, + /* Limited Aggr AP check buffer limit, sample interval, # of BTC watchdog timeout */ + BTC_FW_AGG_AP_BUFLIM_SMPLINTV = BTC_PARAMS_FW_START_IDX + 27, + /* Limited Aggr AP check excessive DELBA, sample interval, # of BTC watchdog timeout */ + BTC_FW_AGG_AP_DELBA_SMPLINTV = BTC_PARAMS_FW_START_IDX + 28, + /* Limited Aggr AP check excessive DELBA, threshold, # of DELBA */ + BTC_FW_AGG_AP_DELBA_THRESHOLD = BTC_PARAMS_FW_START_IDX + 29, + BTC_FW_MAX_INDICES // Maximum number of btc_fw sw registers +} btcParamsFirmwareDefinitions; + +#define BTC_FW_NUM_INDICES (BTC_FW_MAX_INDICES - BTC_PARAMS_FW_START_IDX) + +// 1: Re-enable aggregation after SCO +#define BTC_FW_RX_REAGG_AFTER_SCO_INIT_VAL 1 + +// 1: Enable limited aggregation for SCO +#define BTC_FW_MOD_RXAGG_PKT_SZ_FOR_SCO_INIT_VAL 0 + +/* Enable Limited aggregation for HI interval BT periodic task only (>=7.5ms) */ +#ifdef WL_BTC_LIMAGG_HI_INT +/* RX aggregation packet size when SCO */ +#define BTC_FW_AGG_SIZE_LOW_INIT_VAL 0 +#else +/* RX aggregation packet size when SCO */ +#define BTC_FW_AGG_SIZE_LOW_INIT_VAL 1 +#endif + +/* aggregation size when BT period < BT_AMPDU_RESIZE_THRESH */ +#define BTC_FW_AGG_SIZE_HIGH_INIT_VAL 2 +/* aggregation size when BT period > BT_AMPDU_RESIZE_THRESH */ +// 0: disable weak-rssi SCO coex feature. If > 0, adjust SCO COEX algorithm for weak RSSI scenario. +#define BTC_FW_RSSI_THRESH_SCO_INIT_VAL 0 + +// 1: Enable limited aggregation for A2DP +#define BTC_FW_MOD_RXAGG_PKT_SZ_FOR_A2DP_INIT_VAL 0 + +// Enable LE Scan Priority Algorithm 0: Disable, 1: Enable +#define BTC_FW_ENABLE_DYN_LESCAN_PRI_INIT_VAL 0 +// If WL Tput below 7 mbps, don't grant background LE Scans +#define BTC_FW_LESCAN_LO_TPUT_THRESH_INIT_VAL 7 +// If WL Tput above 30 mbps, don't grant background LE Scans +#define BTC_FW_LESCAN_HI_TPUT_THRESH_INIT_VAL 30 +// If LE Priority algorithm is triggered, grant one out of 2 LE_SCAN requests +#define BTC_FW_LESCAN_GRANT_INT_INIT_VAL 2 +// If RSSI is weaker than -70 dBm and BLE activity is frequent, then disable +// RX aggregation, and clamp TX aggregation. +#ifdef WL_BTCX_UDM +#define BTC_FW_RSSI_THRESH_BLE_INIT_VAL 100 +#else +#define BTC_FW_RSSI_THRESH_BLE_INIT_VAL 70 +#endif +#define BTC_FW_HOLDSCO_LIMIT_INIT_VAL 100 +#define BTC_FW_HOLDSCO_LIMIT_HI_INIT_VAL 10 +#define BTC_FW_SCO_GRANT_HOLD_RATIO_INIT_VAL 1500 +#define BTC_FW_SCO_GRANT_HOLD_RATIO_HI_INIT_VAL 1000 +#define BTC_FW_HOLDSCO_HI_THRESH_INIT_VAL 7400 +#define BTC_FW_TWT_COEX_CONSTRAINTS_EN_INIT_VAL 1 +/* Aggregation in AP mode (P2P_GO and SOFTAP) when ACL and A2DP */ +#define BTC_FW_MOD_RXAGG_PKT_SZ_FOR_APMODE_ACL_A2DP_INIT_VAL 16 +/* Disable amsdu dynamicaly during Rx limited aggregation */ +#define BTC_FW_DISABLE_AMSDU_DURING_LIM_AGG_INIT_VAL 1 +/* Enable acl based grant for ble scan based on number of 2G slots during NAN */ +#define BTC_FW_ENABLE_ACL_GRNT_FOR_BLE_SCAN_INIT_VAL 0 +/* Threshold slot count for 2g band to Enable acl based grant for ble + * scan during NAN. Setting current value to 8, considering time line is 512ms + * Threshold changes dynamically based on different time line + */ +#define BTC_FW_NAN_THRESHOLD_SLOTS_FOR_2G_INIT_VAL 8 +/* BT task bm override for critical chansw slots -initval */ +#define BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_L_INIT_VAL 0x0000 +#define BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_H_INIT_VAL 0x0020 +#define BTC_FW_AGG_AP_GRACE_PERIOD_VAL 1 +#define BTC_FW_AGG_AP_BUFLIM_SMPLINTV_VAL 1 +#define BTC_FW_AGG_AP_DELBA_SMPLINTV_VAL 5 +#define BTC_FW_AGG_AP_DELBA_THRESHOLD_VAL 3 + +/* NR Coex Params Set/Get via wl btc_params, starting index */ +#define NR5GCX_PARAMS_FW_START_IDX 1200 + +typedef enum NR5GCX_Params { + // Min # of PPDU to be tracked for hysteresis + NR5GCX_FW_MIN_NUM_PPDU = NR5GCX_PARAMS_FW_START_IDX, + // Threshold for data stall detection, percentage + NR5GCX_FW_DATA_STALL_TH = NR5GCX_PARAMS_FW_START_IDX + 1, + // max number of rate recovery attempts + NR5GCX_FW_MAX_NUM_ATTEMPTS = NR5GCX_PARAMS_FW_START_IDX + 2, + // Rate recovery rate check duration + NR5GCX_FW_RR_RATE_CHK_DUR = NR5GCX_PARAMS_FW_START_IDX + 3, + // Rate recovery attempt duration + NR5GCX_FW_RR_ATTEMPT_DUR = NR5GCX_PARAMS_FW_START_IDX + 4, + // NR grant duration after a unsuccessful rate recovery + NR5GCX_FW_RR_UNSC_DUR = NR5GCX_PARAMS_FW_START_IDX + 5, + // Threshold for rate recovery, percentage + NR5GCX_FW_RECOVERY_TH = NR5GCX_PARAMS_FW_START_IDX + 6, + // Threshold for low RSSI + NR5GCX_FW_LOWRSSI_TH = NR5GCX_PARAMS_FW_START_IDX + 7, + // Maximum number of nr5gcx fw params + NR5GCX_FW_MAX_INDICES +} NR5GCXParamsFirmwareDefinitions; + +#define NR5GCX_FW_NUM_INDICES (NR5GCX_FW_MAX_INDICES - NR5GCX_PARAMS_FW_START_IDX) + +#define NR5GCX_FW_MIN_NUM_PPDU_INIT 10u +#define NR5GCX_FW_DATA_STALL_TH_INIT 75u +#define NR5GCX_FW_MAX_NUM_ATTEMPTS_INIT 5u +#define NR5GCX_FW_RR_RATE_CHK_DUR_INIT_MS 60u /* ms */ +#define NR5GCX_FW_RR_ATTEMPT_DUR_INIT_MS 60u /* ms */ +#define NR5GCX_FW_RR_UNSC_DUR_INIT_MS 10000u /* ms */ +#define NR5GCX_FW_RECOVERY_TH_INIT 50u +#define NR5GCX_FW_LOWRSSI_TH_INIT 85u /* dBm */ + +/* RC1 Coex Params Set/Get via wl btc_params, starting index */ +#define RC1CX_PARAMS_FW_START_IDX 1200 + +typedef enum RC1CX_Params { + // Min # of PPDU to be tracked for hysteresis + RC1CX_FW_MIN_NUM_PPDU = RC1CX_PARAMS_FW_START_IDX, + // Threshold for data stall detection, percentage + RC1CX_FW_DATA_STALL_TH = RC1CX_PARAMS_FW_START_IDX + 1, + // max number of rate recovery attempts + RC1CX_FW_MAX_NUM_ATTEMPTS = RC1CX_PARAMS_FW_START_IDX + 2, + // Rate recovery rate check duration + RC1CX_FW_RR_RATE_CHK_DUR = RC1CX_PARAMS_FW_START_IDX + 3, + // Rate recovery attempt duration + RC1CX_FW_RR_ATTEMPT_DUR = RC1CX_PARAMS_FW_START_IDX + 4, + // NR grant duration after a unsuccessful rate recovery + RC1CX_FW_RR_UNSC_DUR = RC1CX_PARAMS_FW_START_IDX + 5, + // Threshold for rate recovery, percentage + RC1CX_FW_RECOVERY_TH = RC1CX_PARAMS_FW_START_IDX + 6, + // Threshold for low RSSI + RC1CX_FW_LOWRSSI_TH = RC1CX_PARAMS_FW_START_IDX + 7, + // Maximum number of rc1cx fw params + RC1CX_FW_MAX_INDICES +} RC1CXParamsFirmwareDefinitions; + +#define RC1CX_FW_NUM_INDICES (RC1CX_FW_MAX_INDICES - RC1CX_PARAMS_FW_START_IDX) + +#define RC1CX_FW_MIN_NUM_PPDU_INIT 10u +#define RC1CX_FW_DATA_STALL_TH_INIT 75u +#define RC1CX_FW_MAX_NUM_ATTEMPTS_INIT 5u +#define RC1CX_FW_RR_RATE_CHK_DUR_INIT_MS 60u /* ms */ +#define RC1CX_FW_RR_ATTEMPT_DUR_INIT_MS 60u /* ms */ +#define RC1CX_FW_RR_UNSC_DUR_INIT_MS 10000u /* ms */ +#define RC1CX_FW_RECOVERY_TH_INIT 50u +#define RC1CX_FW_LOWRSSI_TH_INIT 85u /* dBm */ + +#ifdef GPIO_TXINHIBIT +/* GPIO based TX_INHIBIT:SWWLAN-109270 */ +typedef enum shm_macintstatus_ext_e { + C_MISE_GPIO_TXINHIBIT_VAL_NBIT = 0, + C_MISE_GPIO_TXINHIBIT_INT_NBIT = 1 +} shm_macintstatus_ext_t; +#define C_MISE_GPIO_TXINHIBIT_VAL_MASK (1 << C_MISE_GPIO_TXINHIBIT_VAL_NBIT) +#define C_MISE_GPIO_TXINHIBIT_INT_MASK (1 << C_MISE_GPIO_TXINHIBIT_INT_NBIT) +#endif +#define M_PSM_SOFT_REGS 0x0 + +/** Scratch Reg defs */ +typedef enum +{ + S_RSV0 = 0, + S_RSV1, + S_RSV2, + + /* scratch registers for Dot11-constants */ + S_DOT11_CWMIN, /**< CW-minimum 0x03 */ + S_DOT11_CWMAX, /**< CW-maximum 0x04 */ + S_DOT11_CWCUR, /**< CW-current 0x05 */ + S_DOT11_SRC_LMT, /**< short retry count limit 0x06 */ + S_DOT11_LRC_LMT, /**< long retry count limit 0x07 */ + S_DOT11_DTIMCOUNT, /**< DTIM-count 0x08 */ + + /* Tx-side scratch registers */ + S_SEQ_NUM, /**< hardware sequence number reg 0x09 */ + S_SEQ_NUM_FRAG, /**< seq-num for frags (Set at the start os MSDU 0x0A */ + S_FRMRETX_CNT, /**< frame retx count 0x0B */ + S_SSRC, /**< Station short retry count 0x0C */ + S_SLRC, /**< Station long retry count 0x0D */ + S_EXP_RSP, /**< Expected response frame 0x0E */ + S_OLD_BREM, /**< Remaining backoff ctr 0x0F */ + S_OLD_CWWIN, /**< saved-off CW-cur 0x10 */ + S_TXECTL, /**< TXE-Ctl word constructed in scr-pad 0x11 */ + S_CTXTST, /**< frm type-subtype as read from Tx-descr 0x12 */ + + /* Rx-side scratch registers */ + S_RXTST, /**< Type and subtype in Rxframe 0x13 */ + + /* Global state register */ + S_STREG, /**< state storage actual bit maps below 0x14 */ + + S_TXPWR_SUM, /**< Tx power control: accumulator 0x15 */ + S_TXPWR_ITER, /**< Tx power control: iteration 0x16 */ + S_RX_FRMTYPE, /**< Rate and PHY type for frames 0x17 */ + S_THIS_AGG, /**< Size of this AGG (A-MSDU) 0x18 */ + + S_KEYINDX, /* 0x19 */ + S_RXFRMLEN, /**< Receive MPDU length in bytes 0x1A */ + + /* Receive TSF time stored in SCR */ + S_RXTSFTMRVAL_WD3, /**< TSF value at the start of rx 0x1B */ + S_RXTSFTMRVAL_WD2, /**< TSF value at the start of rx 0x1C */ + S_RXTSFTMRVAL_WD1, /**< TSF value at the start of rx 0x1D */ + S_RXTSFTMRVAL_WD0, /**< TSF value at the start of rx 0x1E */ + S_RXSSN, /**< Received start seq number for A-MPDU BA 0x1F */ + S_RXQOSFLD, /**< Rx-QoS field (if present) 0x20 */ + + /* Scratch pad regs used in microcode as temp storage */ + S_TMP0, /**< stmp0 0x21 */ + S_TMP1, /**< stmp1 0x22 */ + S_TMP2, /**< stmp2 0x23 */ + S_TMP3, /**< stmp3 0x24 */ + S_TMP4, /**< stmp4 0x25 */ + S_TMP5, /**< stmp5 0x26 */ + S_PRQPENALTY_CTR, /**< Probe response queue penalty counter 0x27 */ + S_ANTCNT, /**< unsuccessful attempts on current ant. 0x28 */ + S_SYMBOL, /**< flag for possible symbol ctl frames 0x29 */ + S_RXTP, /**< rx frame type 0x2A */ + S_STREG2, /**< extra state storage 0x2B */ + S_STREG3, /**< even more extra state storage 0x2C */ + S_STREG4, /**< ... 0x2D */ + S_STREG5, /**< remember to initialize it to zero 0x2E */ + + S_UNUSED_0X2F, /**< No longer used 0x2F */ + S_UPTR, /* Use this to initialize utrace 0x30 */ + S_ADJPWR_IDX, /**< PR 37101 WAR, adj_pwr_idx 0x31 */ + S_CUR_PTR, /**< Temp pointer for A-MPDU re-Tx SHM table 0x32 */ + S_REVID4, /**< 0x33 */ + S_INDX, /**< 0x34 */ + S_ADDR0, /**< 0x35 */ + S_ADDR1, /**< 0x36 */ + S_ADDR2, /**< 0x37 */ + S_ADDR3, /**< 0x38 */ + S_ADDR4, /**< 0x39 */ + S_ADDR5, /**< 0x3A */ + S_TMP6, /**< 0x3B */ + S_KEYINDX_BU, /**< Backup for Key index 0x3C */ + S_MFGTEST_TMP0, /**< Temp register used for RX test calculations 0x3D */ + S_RXESN, /**< Received end sequence number for A-MPDU BA 0x3E */ + S_STREG6, /**< 0x3F */ +} ePsmScratchPadRegDefinitions; + +#define C_STREG_SLOWCAL_PD_NBIT 0x00000004 /* BIT 2 slow clock cal is pending */ +#define C_STREG_SLOWCAL_DN_NBIT 0x00000008 /* BIT 3 slow clock cal is done */ + +#define S_BEACON_INDX S_OLD_BREM +#define S_PRS_INDX S_OLD_CWWIN +#define S_BTCX_BT_DUR S_REVID4 +#define S_PHYTYPE S_SSRC +#define S_PHYVER S_SLRC + +/* IHR GPT_2 is corerev >= 3 */ +#define TSF_GPT_2_STAT 0x133 +#define TSF_GPT_2_CTR_L 0x134 +#define TSF_GPT_2_CTR_H 0x135 +#define TSF_GPT_2_VAL_L 0x136 +#define TSF_GPT_2_VAL_H 0x137 + +/* IHR TSF_GPT STAT values */ +#define TSF_GPT_PERIODIC (1 << 12) +#define TSF_GPT_ADJTSF (1 << 13) +#define TSF_GPT_USETSF (1 << 14) +#define TSF_GPT_ENABLE (1 << 15) + +/** ucode mac statistic counters in shared memory */ +#define MACSTAT_OFFSET_SZ 64 +#define MACSTAT_REV80_OFFSET_SZ 118 + +/* ucode macstat txfunflw offset */ +#define UCODEMSTAT_TXFUNFL_BLK ((0x70 * 2) + (0x76 * 2)) + +/* MACSTAT offset to SHM address */ +#define MACSTAT_ADDR(x, offset) (M_PSM2HOST_STATS(x) + (offset)) + +/** ucode mac statistic counters in shared memory, base addr defined in M_UCODE_MACSTAT1 */ +typedef struct macstat1 { + uint16 txndpa; /* + 0 (0x0) */ + uint16 txndp; /* + 1*2 (0x2) */ + uint16 txsf; /* + 2*2 (0x4) */ + uint16 txcwrts; /* + 3*2 (0x6) */ + uint16 txcwcts; /* + 4*2 (0x8) */ + uint16 txbfm; /* + 5*2 (0xa) */ + uint16 rxndpaucast; /* + 6*2 (0xc) */ + uint16 bferptrdy; /* + 7*2 (0xe) */ + uint16 rxsfucast; /* + 8*2 (0x10) */ + uint16 rxcwrtsucast; /* + 9*2 (0x12) */ + uint16 rxcwctsucast; /* +10*2 (0x14) */ + uint16 rx20s; /* +11*2 (0x16) */ + uint16 bcntrim; /* +12*2 (0x18) */ + uint16 btc_rfact_l; /* +13*2 (0x1a) */ + uint16 btc_rfact_h; /* +14*2 (0x1c) */ + uint16 btc_txconf_l; /* +15*2 (0x1e) : cnt */ + uint16 btc_txconf_h; /* +16*2 (0x20) : cnt */ + uint16 btc_txconf_durl; /* +17*2 (0x22) : dur */ + uint16 btc_txconf_durh; /* +18*2 (0x24) : dur */ + uint16 rxsecrssi0; /* +19*2 (0x26) : high bin */ + uint16 rxsecrssi1; /* +20*2 (0x28) : med bin */ + uint16 rxsecrssi2; /* +21*2 (0x2a) : low bin */ + uint16 rxpri_durl; /* +22*2 (0x2c) : dur */ + uint16 rxpri_durh; /* +23*2 (0x2e) : dur */ + uint16 rxsec20_durl; /* +24*2 (0x30) : dur */ + uint16 rxsec20_durh; /* +25*2 (0x32) : dur */ + uint16 rxsec40_durl; /* +26*2 (0x34) : dur */ + uint16 rxsec40_durh; /* +27*2 (0x36) : dur */ +} macstat1_t; + +#define MX_UCODEX_MACSTAT (0x40 * 2) +/* ucodex mac statistic counters in shared memory */ +#define MACXSTAT_OFFSET_SZ 6 + +/* psm2 statistic counters in shared memory, base addr defined in MX_PSM2HOST_STATS */ +typedef enum { + MCXSTOFF_MACXSUSP = 0, + MCXSTOFF_M2VMSG = 1, + MCXSTOFF_V2MMSG = 2, + MCXSTOFF_MBOXOUT = 3, + MCXSTOFF_MUSND = 4, + MCXSTOFF_SFB2V = 5 +} macxstat_offset_t; + +/* dot11 core-specific control flags */ +#define SICF_MCLKE 0x0001 /* Mac core clock Enable */ +#define SICF_FCLKON 0x0002 /* Force clocks On */ +#define SICF_PCLKE 0x0004 /**< PHY clock enable */ +#define SICF_PRST 0x0008 /**< PHY reset */ +#define SICF_MPCLKE 0x0010 /**< MAC PHY clockcontrol enable */ +#define SICF_FREF 0x0020 /**< PLL FreqRefSelect (corerev >= 5) */ +/* NOTE: the following bw bits only apply when the core is attached + * to a NPHY (and corerev >= 11 which it will always be for NPHYs). + */ +#ifdef SICF_160M_BWMASK_DEF +#define SICF_BWMASK(macrev) (D11REV_GE(macrev, 86) ? 0x00e0 : 0x00c0) /**< phy clkmsk */ +#define SICF_BW160(macrev) (D11REV_GE(macrev, 86) ? 0x0080 : 0x00c0) /**< 160MHz BW */ +#define SICF_BW80(macrev) (D11REV_GE(macrev, 86) ? 0x0060 : 0x00c0) /**< 80MHz BW */ +#define SICF_BW40(macrev) (D11REV_GE(macrev, 86) ? 0x0040 : 0x0080) /**< 40MHz BW */ +#define SICF_BW20(macrev) (D11REV_GE(macrev, 86) ? 0x0020 : 0x0040) /**< 20MHz BW */ +#define SICF_BW10(macrev) (D11REV_GE(macrev, 86) ? 0x0000 : 0x0000) /**< 10MHz BW */ +#else +#define SICF_BWMASK 0x00c0 /**< phy clock mask (b6 & b7) */ +#define SICF_BW160 0x00c0 /**< 160MHz BW */ +#define SICF_BW80 0x00c0 /**< 80MHz BW */ +#define SICF_BW40 0x0080 /**< 40MHz BW (160MHz phyclk) */ +#define SICF_BW20 0x0040 /**< 20MHz BW (80MHz phyclk) */ +#define SICF_BW10 0x0000 /**< 10MHz BW (40MHz phyclk) */ +#endif +#define SICF_DAC 0x0300 /**< Highspeed DAC mode control field */ +#define SICF_GMODE 0x2000 /**< gmode enable */ + +/* Macmode / Phymode / Opmode are used interchangebly sometimes + * even though they all mean the same. Going ahead with the HW + * signal name - using phymode here on (even though we know its + * a misnomer). Applicable to d11 corerev >= 50 ---- ACPHY only + */ +#define SICF_PHYMODE_SHIFT 16 +#define SICF_PHYMODE 0xf0000 /**< mask */ + +#define SICF_160CLKSEL 0x100000u /* main phy clock speed selection */ + +/* dot11 core-specific status flags */ +#define SISF_2G_PHY 0x0001 /**< 2.4G capable phy (corerev >= 5) */ +#define SISF_5G_PHY 0x0002 /**< 5G capable phy (corerev >= 5) */ +#define SISF_FCLKA 0x0004 /**< FastClkAvailable (corerev >= 5) */ +#define SISF_DB_PHY 0x0008 /**< Dualband phy (corerev >= 11) */ + +/* === End of MAC reg, Beginning of PHY(b/a/g/n) reg, radio and LPPHY regs are separated === */ + +/* Bits in phytest(0x0a): */ +#define TST_DDFS 0x2000 +#define TST_TXFILT1 0x0800 +#define TST_UNSCRAM 0x0400 +#define TST_CARR_SUPP 0x0200 +#define TST_DC_COMP_LOOP 0x0100 +#define TST_LOOPBACK 0x0080 +#define TST_TXFILT0 0x0040 +#define TST_TXTEST_ENABLE 0x0020 +#define TST_TXTEST_RATE 0x0018 +#define TST_TXTEST_PHASE 0x0007 + +/* phytest txTestRate values */ +#define TST_TXTEST_RATE_1MBPS 0 +#define TST_TXTEST_RATE_2MBPS 1 +#define TST_TXTEST_RATE_5_5MBPS 2 +#define TST_TXTEST_RATE_11MBPS 3 +#define TST_TXTEST_RATE_SHIFT 3 + +typedef struct shm_mbss_prq_entry_s shm_mbss_prq_entry_t; +BWL_PRE_PACKED_STRUCT struct shm_mbss_prq_entry_s { + struct ether_addr ta; + uint8 prq_info[2]; + uint8 time_stamp; + uint8 flags; /**< bit 0 HT STA Indication, bit 7:1 Reserved */ +} BWL_POST_PACKED_STRUCT; + +typedef enum shm_mbss_prq_ft_e { + SHM_MBSS_PRQ_FT_CCK, + SHM_MBSS_PRQ_FT_OFDM, + SHM_MBSS_PRQ_FT_MIMO, + SHM_MBSS_PRQ_FT_RESERVED +} shm_mbss_prq_ft_t; + +#define SHM_MBSS_PRQ_FT_COUNT SHM_MBSS_PRQ_FT_RESERVED + +#define SHM_MBSS_PRQ_ENT_FRAMETYPE(entry) ((entry)->prq_info[0] & 0x3) +#define SHM_MBSS_PRQ_ENT_UPBAND(entry) ((((entry)->prq_info[0] >> 2) & 0x1) != 0) + +/** What was the index matched? */ +#define SHM_MBSS_PRQ_ENT_UC_BSS_IDX(entry) (((entry)->prq_info[0] >> 2) & 0x3) +#define SHM_MBSS_PRQ_ENT_PLCP0(entry) ((entry)->prq_info[1]) + +/** Was this directed to a specific SSID or BSSID? If bit clear, quantity known */ +#define SHM_MBSS_PRQ_ENT_DIR_SSID(entry) \ + ((((entry)->prq_info[0] >> 6) == 0) || ((entry)->prq_info[0] >> 6) == 1) +#define SHM_MBSS_PRQ_ENT_DIR_BSSID(entry) \ + ((((entry)->prq_info[0] >> 6) == 0) || ((entry)->prq_info[0] >> 6) == 2) + +#define SHM_MBSS_PRQ_ENT_TIMESTAMP(entry) ((entry)->time_stamp) +/** Was the probe request from a ht STA or a legacy STA */ +#define SHM_MBSS_PRQ_ENT_HTSTA(entry) ((entry)->flags & 0x1) + +typedef struct d11ac_tso_s d11ac_tso_t; + +BWL_PRE_PACKED_STRUCT struct d11ac_tso_s { + uint8 flag[3]; + uint8 sfh_hdr_offset; + uint16 tso_mss; /**< tso segment size */ + uint16 msdu_siz; /**< msdu size */ + uint32 tso_payload_siz; /**< total byte cnt in tcp payload */ + uint16 ip_hdr_offset; /**< relative to the start of txd header */ + uint16 tcp_hdr_offset; /**< relative to start of txd header */ +} BWL_POST_PACKED_STRUCT; + +/* toe_ctl TCP offload engine register definitions */ +#define TOE_CTL_DISAB (1u << 0) +#define TOE_CTL_MASK (1u << 0) +#define TOE_CTL_ENAB (0xFFFEu) +#define TOE_CLK_GATING_DISAB (1u << 1) + +#define TSO_HDR_TOE_FLAG_OFFSET (0u) + +#define TOE_F0_HDRSIZ_NORMAL (1u << 0) +#define TOE_F0_PASSTHROUGH (1u << 1) +#define TOE_F0_TCPSEG_EN (1u << 3) +#define TOE_F0_IPV4 (1u << 4) +#define TOE_F0_IPV6 (1u << 5) +#define TOE_F0_TCP (1u << 6) +#define TOE_F0_UDP (1u << 7) + +#define TOE_F1_IPV4_CSUM_EN (1u << 0) +#define TOE_F1_TCPUDP_CSUM_EN (1u << 1) +#define TOE_F1_PSEUDO_CSUM_EN (1u << 2) +#define TOE_F1_FRAG_ALLOW (1u << 5) +#define TOE_F1_FRAMETYPE_1 (1u << 6) +#define TOE_F1_FRAMETYPE_2 (1u << 7) +#define TOE_F1_FT_MASK (TOE_F1_FRAMETYPE_1 | TOE_F1_FRAMETYPE_2) +#define TOE_F1_FT_SHIFT (6u) + +#define TOE_F2_TXD_HEAD_SHORT (1u << 0) +#define TOE_F2_EPOCH_SHIFT (1u) +#define TOE_F2_EPOCH (1u << TOE_F2_EPOCH_SHIFT) +#define TOE_F2_EPOCH_EXT (1u << 2) +#define TOE_F2_EPOCH_EXT_MASK (TOE_F2_EPOCH | TOE_F2_EPOCH_EXT) +#define TOE_F2_AMSDU_AGGR_EN (1u << 4) +#define TOE_F2_AMSDU_CSUM_EN (1u << 5) +#define TOE_F2_AMSDU_FS_MID (1u << 6) +#define TOE_F2_AMSDU_FS_LAST (1u << 7) + +#define TOE_TXDMA_FLAGS_AMSDU_FIRST (0x14u) +#define TOE_TXDMA_FLAGS_AMSDU_MID (0x24u) +#define TOE_TXDMA_FLAGS_AMSDU_LAST (0x34u) + +/* This marks the end of a packed structure section. */ +#include + +#define SHM_BYT_CNT 0x2 /**< IHR location */ +#define MAX_BYT_CNT 0x600 /**< Maximum frame len */ + +/* WOWL Template Regions */ +#define WOWL_NS_CHKSUM (0x57 * 2) +#define WOWL_PSP_TPL_BASE (0x334 * 2) +#define WOWL_GTK_MSG2 (0x434 * 2) +#define WOWL_NS_OFFLOAD (0x634 * 2) +#define T_KEEPALIVE_0 (0x6b4 * 2) +#define T_KEEPALIVE_1 ((0x6b4 + 0x40) * 2) +#define WOWL_ARP_OFFLOAD (0x734 * 2) +#define WOWL_TX_FIFO_TXRAM_BASE (0x774 * 2) /**< conservative, leave 1KB for GTKM2 */ + +/* template regions for 11ac */ +#define D11AC_WOWL_PSP_TPL_BASE (0x4c0 * 2) +#define D11AC_WOWL_GTK_MSG2 (0x5c0 * 2) /**< for core rev >= 42 */ +#define WOWL_NS_OFFLOAD_GE42 (0x7c0 * 2) +#define T_KEEPALIVE_0_GE42 (0x840 * 2) +#define T_KEEPALIVE_1_GE42 ((0x840 + 0x40) * 2) +#define WOWL_ARP_OFFLOAD_GE42 (0x8c0 * 2) +#define D11AC_WOWL_TX_FIFO_TXRAM_BASE (0x900 * 2) /**< GTKM2 for core rev >= 42 */ + +/* Event definitions */ +#define WOWL_MAGIC (1 << 0) /**< Wakeup on Magic packet */ +#define WOWL_NET (1 << 1) /**< Wakeup on Netpattern */ +#define WOWL_DIS (1 << 2) /**< Wakeup on loss-of-link due to Disassoc/Deauth */ +#define WOWL_RETR (1 << 3) /**< Wakeup on retrograde TSF */ +#define WOWL_BCN (1 << 4) /**< Wakeup on loss of beacon */ +#define WOWL_TST (1 << 5) /**< Wakeup after test */ +#define WOWL_M1 (1 << 6) /**< Wakeup after PTK refresh */ +#define WOWL_EAPID (1 << 7) /**< Wakeup after receipt of EAP-Identity Req */ +#define WOWL_PME_GPIO (1 << 8) /**< Wakeind via PME(0) or GPIO(1) */ +#define WOWL_NEEDTKIP1 (1 << 9) /**< need tkip phase 1 key to be updated by the driver */ +#define WOWL_GTK_FAILURE (1 << 10) /**< enable wakeup if GTK fails */ +#define WOWL_EXTMAGPAT (1 << 11) /**< support extended magic packets */ +#define WOWL_ARPOFFLOAD (1 << 12) /**< support ARP/NS offloading */ +#define WOWL_WPA2 (1 << 13) /**< read protocol version for EAPOL frames */ +#define WOWL_KEYROT (1 << 14) /**< If the bit is set, use key rotaton */ +#define WOWL_BCAST (1 << 15) /**< If the bit is set, frm received was bcast frame */ + +#define MAXBCNLOSS (1 << 13) - 1 /**< max 12-bit value for bcn loss */ + +/* UCODE shm view: + * typedef struct { + * uint16 offset; // byte offset + * uint16 patternsize; // the length of value[.] in bytes + * uchar bitmask[MAXPATTERNSIZE/8]; // 16 bytes, the effect length is (patternsize+7)/8 + * uchar value[MAXPATTERNSIZE]; // 128 bytes, the effect length is patternsize. + * } netpattern_t; + */ +#define NETPATTERNSIZE (148) /* 128 value + 16 mask + 4 offset + 4 patternsize */ +#define MAXPATTERNSIZE 128 +#define MAXMASKSIZE MAXPATTERNSIZE/8 + +/** Security Algorithm defines */ +#define WOWL_TSCPN_SIZE 6 +#define WOWL_TSCPN_COUNT 4 /**< 4 ACs */ +#define WOWL_TSCPN_BLK_SIZE (WOWL_TSCPN_SIZE * WOWL_TSCPN_COUNT) + +#define WOWL_SECSUITE_GRP_ALGO_MASK 0x0007 +#define WOWL_SECSUITE_GRP_ALGO_SHIFT 0 +#define WOWL_SECSUITE_ALGO_MASK 0x0700 +#define WOWL_SECSUITE_ALGO_SHIFT 8 + +#define EXPANDED_KEY_RNDS 10 +#define EXPANDED_KEY_LEN 176 /* the expanded key from KEK (4*11*4, 16-byte state, 11 rounds) */ + +/* Organization of Template RAM is as follows + * typedef struct { + * uint8 AES_XTIME9DBE[1024]; + * uint8 AES_INVSBOX[256]; + * uint8 AES_KEYW[176]; + * } AES_TABLES_t; + */ +/* See dot11_firmware/diag/wmac_tcl/wmac_762_wowl_gtk_aes: proc write_aes_tables, + * for an example of writing those tables into the tx fifo buffer. + */ + +typedef struct { + uint16 MacTxControlLow; /**< mac-tx-ctl-low word */ + uint16 MacTxControlHigh; /**< mac-tx-ctl-high word */ + uint16 PhyTxControlWord; /**< phy control word */ + uint16 PhyTxControlWord_1; /**< extra phy control word for mimophy */ + union { + uint16 XtraFrameTypes; /**< frame type for RTS/FRAG fallback (used only for AES) */ + uint16 bssenc_pos; /**< BssEnc includes key ID , for corerev >= 42 */ + } u1; + uint8 plcp[6]; /**< plcp of template */ + + uint16 mac_frmtype; /**< MAC frame type for GTK MSG2, can be + * dot11_data frame (0x20) or dot11_QoS_Data frame (0x22). + */ + uint16 frm_bytesize; /**< number of bytes in the template, it includes: + * PLCP, MAC header, IV/EIV, the data payload + * (eth-hdr and EAPOL-Key), TKIP MIC + */ + uint16 payload_wordoffset; /**< the word offset of the data payload */ + + /* ALIGN */ + uint16 seqnum; /**< Sequence number for this frame */ + uint8 seciv[18]; /**< 10-byte TTAK used for TKIP, 8-byte IV/EIV. + * See in the general tx descriptor. + */ +} wowl_templ_ctxt_t; + +#define WOWL_TEMPL_CTXT_LEN 42 /**< For making sure that no PADs are needed */ +#define WOWL_TEMPL_CTXT_FRMTYPE_DATA 0x2 +#define WOWL_TEMPL_CTXT_FRMTYPE_QOS 0x22 + +/** constant tables required for AES key unwrapping for key rotation */ +extern uint16 aes_invsbox[128]; +extern uint16 aes_xtime9dbe[512]; + +#define MAX_MPDU_SPACE (D11_TXH_LEN + 1538) + +/* Bits in TXE_BMCCTL */ +#define BMCCTL_INITREQ_SHIFT 0 +#define BMC_CTL_DONE (1 << BMCCTL_INITREQ_SHIFT) +#define BMCCTL_RESETSTATS_SHIFT 1 +#define BMCCTL_TXBUFSIZE_SHIFT 2 +#define BMCCTL_LOOPBACK_SHIFT 5 +#define BMCCTL_TXBUFSZ_MASK ((1 << BMCCTL_LOOPBACK_SHIFT) - (1 << BMCCTL_TXBUFSIZE_SHIFT)) +#define BMCCTL_CLKGATEEN_SHIFT 8 + +/* Bits in TXE_BMCConfig */ +#define BMCCONFIG_BUFCNT_SHIFT 0 +#define BMCCONFIG_DISCLKGATE_SHIFT 13 +#define BMCCONFIG_BUFCNT_MASK ((1 << BMCCONFIG_DISCLKGATE_SHIFT) - (1 << BMCCONFIG_BUFCNT_SHIFT)) + +/* Bits in TXE_BMCStartAddr */ +#define BMCSTARTADDR_STRTADDR_MASK 0x3ff + +/* Bits in TXE_BMCDescrLen */ +#define BMCDescrLen_ShortLen_SHIFT 0 +#define BMCDescrLen_LongLen_SHIFT 8 + +/* Bits in TXE_BMCAllocCtl */ +#define BMCAllocCtl_AllocCount_SHIFT 0 +/* Rev==50 || Rev>52 +* BMCAllocCtl.AllocCount [0:10] +* BMCAllocCtl.AllocThreshold [11:14] +* !Rev50 +* BMCAllocCtl.AllocCount [0:7] +* BMCAllocCtl.AllocThreshold [8:15] +*/ +#define BMCAllocCtl_AllocThreshold_SHIFT_Rev50 11 +#define BMCAllocCtl_AllocThreshold_SHIFT 8 + +/* Bits in TXE_BMCCmd1 */ +#define BMCCMD1_TIDSEL_SHIFT 1 +#define BMCCMD1_RDSRC_SHIFT 6 +#define BMCCmd1_RXMapPassThru_SHIFT 12 +#define BMCCMD1_BQSelNum_SHIFT 1u +#define BMCCMD1_BQSelType_SHIFT 7u +#define BMCCMD1_RDSRC_Group0 0u /* register itself */ +#define BMCCMD1_RDSRC_Group1 1u /* staged max/min */ +#define BMCCMD1_RDSRC_Group2 2u /* staged max/previous min */ +#define BMCCMD1_RDSRC_Group3 3u /* active max/min */ +#define BMCCMD1_RDSRC_SHIFT_rev80 10u +#define BMCCMD1_CoreSel_SHIFT 13u +#define BMCCMD1_CoreSel_SHIFT_rev80 15u + +/* Bits in TXE_BMCCmd */ +#define BMCCmd_TIDSel_SHIFT 0 +#define BMCCmd_Enable_SHIFT 4 +#define BMCCmd_ReleasePreAlloc_SHIFT 5 +#define BMCCmd_ReleasePreAllocAll_SHIFT 6 +#define BMCCmd_UpdateBA_SHIFT 7 +#define BMCCmd_Consume_SHIFT 8 +#define BMCCmd_Aggregate_SHIFT 9 +#define BMCCmd_UpdateRetryCount_SHIFT 10 +#define BMCCmd_DisableTID_SHIFT 11 + +#define BMCCmd_BQSelType_TX 0 +#define BMCCmd_BQSelType_RX 1 +#define BMCCmd_BQSelType_Templ 2 + +/* Bits in TXE_BMCCMD for rev >= 80 */ +#define BMCCmd_BQSelType_MASK_Rev80 0x00c0 +#define BMCCmd_BQSelType_SHIFT_Rev80 6 +#define BMCCmd_Enable_SHIFT_rev80 8 +#define BMCCmd_ReleasePreAllocAll_SHIFT_rev80 10 + +/* Bits in TXE_BMCCmd1 */ +#define BMCCmd1_Minmaxappall_SHIFT 0 +#define BMCCmd1_Minmaxlden_SHIFT 5 +#define BMCCmd1_Minmaxffszlden_SHIFT 8 +#define BMCCmd_Core1_Sel_MASK 0x2000 + +/* Bits in TXE_BMCStatCtl */ +#define BMCStatCtl_TIDSel_SHIFT 0u +#define BMCStatCtl_STATSel_SHIFT 4u +#define BMCStatCtl_BQSelNum_SHIFT 0u +#define BMCStatCtl_BQSelType_SHIFT 6u +#define BMCStatCtl_STATSel_SHIFT_rev80 8u + +/* Bits in BMVpConfig */ +#define BMCVPConfig_SingleVpModePortA_SHIFT 4 + +/* Bits in TXE_PsmMSDUAccess */ +#define PsmMSDUAccess_TIDSel_SHIFT 0 +#define PsmMSDUAccess_MSDUIdx_SHIFT 4 +#define PsmMSDUAccess_ReadBusy_SHIFT 14 +#define PsmMSDUAccess_WriteBusy_SHIFT 15 + +/* Bits in TXE_PsmMSDUAccess for rev >= 80 */ +#define PsmMSDUAccess_BQSelType_SHIFT 5 +#define PsmMSDUAccess_MSDUIdx_SHIFT_rev80 7 +#define PsmMSDUAccess_BQSelType_Templ 2 +#define PsmMSDUAccess_BQSelType_TX 0 + +#ifdef WLRSDB +#define MAX_RSDB_MAC_NUM 2 +#else +#define MAX_RSDB_MAC_NUM 1 +#endif +#define MAX_MIMO_MAC_NUM 1 + +#ifdef WL_SCAN_CORE +#define MAX_MAC_CORE_NUM (MAX_RSDB_MAC_NUM + 1) +#else +#define MAX_MAC_CORE_NUM (MAX_RSDB_MAC_NUM) +#endif /* WL_SCAN_CORE */ + +#define MAC_CORE_UNIT_0 0x0u /**< First mac core unit */ +#define MAC_CORE_UNIT_1 0x1u /**< Second mac core unit */ + +/* HW unit of scan core. + * This is used to overwrite the tunables specific to scan core + */ +#define SCAN_CORE_UNIT 0x2u + +/* Supported phymodes / macmodes / opmodes */ +#define SINGLE_MAC_MODE 0x0 /**< only single mac is enabled */ +#define DUAL_MAC_MODE 0x1 /**< enables dual mac */ +/* (JIRA: CRDOT11ACPHY-652) Following two #defines support + * exclusive reg access to core 0/1 in MIMO mode + */ +#define SUPPORT_EXCLUSIVE_REG_ACCESS_CORE0 0x2 +#define SUPPORT_EXCLUSIVE_REG_ACCESS_CORE1 0x4 /**< not functional in 4349A0 */ +#define SUPPORT_CHANNEL_BONDING 0x8 /**< enables channel bonding, + * supported in single mac mode only + */ +#define SCAN_CORE_ACTIVE 0x10 /* scan core enabled for background DFS */ + +#define PHYMODE_MIMO (SINGLE_MAC_MODE) +#define PHYMODE_80P80 (SINGLE_MAC_MODE | SUPPORT_CHANNEL_BONDING) +#define PHYMODE_RSDB_SISO_0 (DUAL_MAC_MODE | SUPPORT_EXCLUSIVE_REG_ACCESS_CORE0) +#define PHYMODE_RSDB_SISO_1 (DUAL_MAC_MODE | SUPPORT_EXCLUSIVE_REG_ACCESS_CORE1) +#define PHYMODE_RSDB (PHYMODE_RSDB_SISO_0 | PHYMODE_RSDB_SISO_1) +#define PHYMODE_BGDFS 31 +#define PHYMODE_3x3_1x1 31 + +#define RX_INTR_FIFO_0 0x1 /**< FIFO-0 interrupt */ +#define RX_INTR_FIFO_1 0x2 /**< FIFO-1 interrupt */ +#define RX_INTR_FIFO_2 0x4 /**< FIFO-2 interrupt */ + +#define MAX_RX_FIFO 3 + +#define RX_CTL_FIFOSEL_SHIFT 8 +#define RX_CTL_FIFOSEL_MASK (0x3 << RX_CTL_FIFOSEL_SHIFT) + +#define RCO_EN (0x1u) /**< Receive checksum offload */ + +/* MAC_PTM_CTRL1 bit definitions */ +#define PTM_RX_TMSTMP_CAPTURE_EN 0x0001u +#define PTM_TX_TMSTMP_CAPTURE_EN 0x0001u +#define PTM_TMSTMP_OVERRIDE_EN 0x1000u + +/* For corerev >= 64 + * Additional DMA descriptor flags for AQM Descriptor. These are used in + * conjunction with the descriptor control flags defined in sbhnddma.h + */ +/* AQM DMA Descriptor control flags 1 */ +#define D64_AQM_CTRL1_SOFPTR 0x0000FFFF /* index of the descr which + * is SOF decriptor in DMA table + */ +#define D64_AQM_CTRL1_EPOCH 0x00010000 /* Epoch bit for the frame */ +#define D64_AQM_CTRL1_NUMD_MASK 0x00F00000 /* NumberofDescriptors(NUMD) */ +#define D64_AQM_CTRL1_NUMD_SHIFT 20 +#define D64_AQM_CTRL1_AC_MASK 0x0F000000 /* AC of the current frame */ +#define D64_AQM_CTRL1_AC_SHIFT 24 + +/* AQM DMA Descriptor control flags 2 */ +#define D64_AQM_CTRL2_MPDULEN_MASK 0x00003FFF /* Length of the entire MPDU */ +#define D64_AQM_CTRL2_TXDTYPE 0x00080000 /* When set to 1 the long form of the + * TXD is used for the frame. + */ +/* For corerev >= 83 + * DMA descriptor flags for AQM Descriptor. These are used in + * conjunction with the descriptor control flags defined in sbhnddma.h + */ +/* AQM DMA Descriptor control flags 1 */ +#define D11_REV83_AQM_DESC_CTRL1_SOFPTR 0x0000FFFFu /* index of the descr which + * is SOF decriptor in DMA table + */ +#define D11_REV83_AQM_DESC_CTRL1_EPOCH_SHIFT 16u +#define D11_REV83_AQM_DESC_CTRL1_EPOCH (1u << D11_REV83_AQM_DESC_CTRL1_EPOCH_SHIFT) +#define D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT_SHIFT 17u +#define D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT (1u << \ + D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT_SHIFT) +#define D11_REV83_AQM_DESC_CTRL1_EPOCH_MASK (D11_REV83_AQM_DESC_CTRL1_EPOCH | \ + D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT) +#define D11_REV83_AQM_DESC_CTRL1_RESV1 0x00040000u /* RESERVED */ +#define D11_REV83_AQM_DESC_CTRL1_FRAGALLOW_SHIFT 19u /* Fragmentation allowance flag + * shift. + */ +#define D11_REV83_AQM_DESC_CTRL1_FRAGALLOW (1u << D11_REV83_AQM_DESC_CTRL1_FRAGALLOW_SHIFT) + /* Fragmentation allowance flag + * of the frame + */ +#define D11_REV83_AQM_DESC_CTRL1_NUMD_SHIFT 20u /* NumberofDescriptors(NUMD) */ +#define D11_REV83_AQM_DESC_CTRL1_NUMD_MASK (0xFu << D11_REV83_AQM_DESC_CTRL1_NUMD_SHIFT) +#define D11_REV83_AQM_DESC_CTRL1_AC_SHIFT 24u /* AC of the current frame */ +#define D11_REV83_AQM_DESC_CTRL1_AC_MASK (0xFu << D11_REV83_AQM_DESC_CTRL1_AC_SHIFT) +#define D11_REV83_AQM_DESC_CTRL1_ET 0x10000000u /* End of table */ +#define D11_REV83_AQM_DESC_CTRL1_IC 0x20000000u /* Interrupt on Completion */ +#define D11_REV83_AQM_DESC_CTRL1_RESV2 0x40000000u /* Used to be EF: End of frame, + * and would have been set to 1. + */ +#define D11_REV83_AQM_DESC_CTRL1_RESV3 0x80000000u /* Used to be SF: Start of Frame, + * and would have been set to 1 + */ + +/* AQM DMA Descriptor control flags 2 */ +#define D11_REV83_AQM_DESC_CTRL2_MPDULEN_MASK 0x00003FFFu /* Length of the entire MPDU */ +#define D11_REV83_AQM_DESC_CTRL2_FTYPE_SHIFT 14u /* Frame Type, Indicate whether + * frame is Data, Management or + * Control Frame. 2 bits: + * 2'b00=Data, 2'b01=Management, + * 2'b10=Control, 2'b11=Invalid + * value + */ +#define D11_REV83_AQM_DESC_CTRL2_FTYPE_MASK (0x3u << D11_REV83_AQM_DESC_CTRL2_FTYPE_SHIFT) +#define D11_REV83_AQM_DESC_CTRL2_PTXDLENIDX_SHIFT 16u /* pTxD length index in 4-deep table */ +#define D11_REV83_AQM_DESC_CTRL2_PTXDLENIDX_MASK (0x3u << \ + D11_REV83_AQM_DESC_CTRL2_PTXDLENIDX_SHIFT) +#define D11_REV83_AQM_DESC_CTRL2_PT 0x00040000u /* Parity bit. Choose a + * value such that the entire + * descriptor haseven parity + */ +#define D11_REV83_AQM_DESC_CTRL2_USERIT 0x00080000u /* If set, the Rate Table Index and + * RIT entry are fetched into SHM by + * hardware. Otherwise, software + * uses pTxD to convey this + * information to ucode + */ +#define D11_REV83_AQM_DESC_CTRL2_USELIT 0x00100000u /* If set, the Link Info Table Index + * and LIT entry are fetched into + * SHM by hardware. Otherwise, + * software uses pTxD to convey this + * information to ucode + */ +#define D11_REV83_AQM_DESC_CTRL2_LIT_SHIFT 21u /* LTI(Link info Table Index) */ +#define D11_REV83_AQM_DESC_CTRL2_LIT_MASK (0x3Fu << D11_REV83_AQM_DESC_CTRL2_LIT_SHIFT) +#define D11_REV83_AQM_DESC_CTRL2_RIT_SHIFT 27u /* bit[4:0] of RTI(Rate info Table Index) */ +#define D11_REV83_AQM_DESC_CTRL2_RIT_MASK (0x1Fu << D11_REV83_AQM_DESC_CTRL2_RIT_SHIFT) + +/* AQM DMA Descriptor control flags 3 */ +#define D11_REV86_AQM_DESC_CTRL3_RTI_BIT5 0x00000001u /* bit[5] of RTI (cont'd from ctrl2) */ +#define D11_REV86_AQM_DESC_CTRL3_RTI_BIT5_MASK 1u /* bit[5] of RTI (cont'd from ctrl2) */ +#define D11_REV86_AQM_DESC_CTRL3_RTI_BIT5_SHIFT 0u +#define D11_REV83_AQM_DESC_CTRL3_AGGR_ID 0x0000000Eu /* Aggregation ID */ +#define D11_REV83_AQM_DESC_CTRL3_CO 0x00000010u /* Coherency */ +#define D11_REV84_AQM_DESC_CTRL3_TXDPTR_SHIFT 5u /* TxD ptr */ +#define D11_REV84_AQM_DESC_CTRL3_TXDPTR_MASK 0xFFFFFFu /* bit[23:0] of TxD addr */ +#define D11_REV86_AQM_DESC_CTRL3_TID_SHIFT 29u /* TID for BSR */ +#define D11_REV86_AQM_DESC_CTRL3_TID_MASK (0x7u << D11_REV86_AQM_DESC_CTRL3_TID_SHIFT) + +/* values for psm_patchcopy_ctrl (0x1AC) post corerev 60 */ +#define PSM_PATCHCC_PMODE_MASK (0x3) +#define PSM_PATCHCC_PMODE_RAM (0) /* default */ +#define PSM_PATCHCC_PMODE_ROM_RO (1) +#define PSM_PATCHCC_PMODE_ROM_PATCH (2) + +#define PSM_PATCHCC_PENG_TRIGGER_SHIFT (2) +#define PSM_PATCHCC_PENG_TRIGGER_MASK (1 << PSM_PATCHCC_PENG_TRIGGER_SHIFT) +#define PSM_PATCHCC_PENG_TRIGGER (1 << PSM_PATCHCC_PENG_TRIGGER_SHIFT) + +#define PSM_PATCHCC_PCTRL_RST_SHIFT (3) +#define PSM_PATCHCC_PCTRL_RST_MASK (0x3 << PSM_PATCHCC_PCTRL_RST_SHIFT) +#define PSM_PATCHCC_PCTRL_RST_RESET (0x0 << PSM_PATCHCC_PCTRL_RST_SHIFT) +#define PSM_PATCHCC_PCTRL_RST_HW (0x1 << PSM_PATCHCC_PCTRL_RST_SHIFT) + +#define PSM_PATCHCC_COPYEN_SHIFT (5) +#define PSM_PATCHCC_COPYEN_MASK (1 << PSM_PATCHCC_COPYEN_SHIFT) +#define PSM_PATCHCC_COPYEN (1 << PSM_PATCHCC_COPYEN_SHIFT) + +#define PSM_PATCHCC_UCIMGSEL_SHIFT (16) +#define PSM_PATCHCC_UCIMGSEL_MASK (0x30000) +#define PSM_PATCHCC_UCIMGSEL_DS0 (0x00000) /* default image */ +#define PSM_PATCHCC_UCIMGSEL_DS1 (0x10000) /* image 1 */ + +/* patch copy delay for psm: 2millisec */ +#define PSM_PATCHCOPY_DELAY (2000) + +/* START-below WAS in d11_if_shm.h which we can move to auto shm. + * Some of them are offsets, but some of them are not given by ucode [possibly legacy] + * so, not taken care by autoshm. + */ + +/* Addr is byte address used by SW; offset is word offset used by uCode */ + +/** Per AC TX limit settings */ +#define M_AC_TXLMT_ADDR(x, _ac) (M_AC_TXLMT_BLK(x) + (2 * (_ac))) + +/** delay from end of PLCP reception to RxTSFTime */ +#define M_APHY_PLCPRX_DLY 3 +#define M_BPHY_PLCPRX_DLY 4 + +/* btcx debug shmem size */ +#define C_BTCX_DBGBLK_SZ 6 /**< Number of 16bit words */ +#define C_BTCX_DBGBLK2_SZ 11 /* size of statistics at 2nd SHM segment */ + +#define C_BTCX_STATS_DBGBLK_SZ 18 /* total size of statistics at A2DP stats */ +#define C_BTCX_A2DP_PRI_SZ 6 /* size of a2dp priority counters stats */ +#define C_BTCX_A2DP_BUFCNT_SZ 8 /* size of a2dp buffer counters stats */ +#define C_BTCX_ANT_GRANT_SZ 4 /* size of ant granted duration to BT */ +#define C_BTCX_STATS_ECNTR_BLK_SZ C_BTCX_STATS_DBGBLK_SZ /* blk size for btcx ecounters */ + +#define D11_DMA_CHANNELS 6 + +/* WME shared memory */ +#define M_EDCF_STATUS_OFF(x) (0x007 * 2) + +/* Beacon-related parameters */ +#define M_BCN_LI(x) M_PS_MORE_DTIM_TBTT(x) /**< beacon listen interval */ + +/* prerev 40 defines */ +#define D11_PRE40_M_SECKINDXALGO_BLK(x) (0x2ea * 2) + +/* corerev 40 defines */ +/* BLK SIZE needs to change for GE64 */ +#define D11_POST80_MAX_KEY_SIZE 32 +#define D11_PRE80_MAX_KEY_SIZE 16 + +#define D11_MAX_KEY_SIZE(_corerev) ((D11REV_GE(_corerev, 80)) ? \ + D11_POST80_MAX_KEY_SIZE : D11_PRE80_MAX_KEY_SIZE) + +#define M_SECKINDXALGO_BLK_SZ(_corerev) (AMT_SIZE(_corerev) + 4 /* default keys */) + +#define C_CTX_PCTLWD_POS (0x4 * 2) + +#define D11_MAX_TX_FRMS 32 /**< max frames allowed in tx fifo */ + +/* Current channel number plus upper bits */ +#define D11_CURCHANNEL_5G 0x0100; +#define D11_CURCHANNEL_40 0x0200; +#define D11_CURCHANNEL_MAX 0x00FF; + +#define INVALIDFID 0xffff + +#define D11_RT_DIRMAP_SIZE 16 + +/** Rate table entry offsets */ +#define M_RT_PRS_PLCP_POS(x) 10 +#define M_RT_PRS_DUR_POS(x) 16 +#define M_RT_OFDM_PCTL1_POS(x) 18 +#define M_RT_TXPWROFF_POS(x) 20 +#define M_REV40_RT_TXPWROFF_POS(x) 14 + +#define MIMO_MAXSYM_DEF 0x8000 /* 32k */ +#define MIMO_MAXSYM_MAX 0xffff /* 64k */ + +#define WATCHDOG_8TU_DEF_LT42 5 +#define WATCHDOG_8TU_MAX_LT42 10 +#define WATCHDOG_8TU_DEF 3 +#define WATCHDOG_8TU_MAX 4 + +#define M_PKTENG_RXAVGPWR_ANT(x, w) (M_MFGTEST_RXAVGPWR_ANT0(x) + (w) * 2) + +/* M_MFGTEST_NUM (pkt eng) bit definitions */ +#define MFGTEST_TXMODE 0x0001 /* TX frames indefinitely */ +#define MFGTEST_RXMODE 0x0002 /* RX frames */ +#define MFGTEST_RXMODE_ACK 0x0402 /* RX frames with sending ACKs back */ +#define MFGTEST_RXMODE_FWD2FW 0x8000 /* RX frames - forward packet to the fw */ +#define MFGTEST_TXMODE_FRMCNT 0x0101 /* TX frames by frmcnt */ +#define MFGTEST_RU_TXMODE 0x0011 /* RU frames TX indefinetly */ +#define MFGTEST_RU_TXMODE_FRMCNT 0x0111 /* RU TX frames by frmcnt */ + +/* UOTA interface bit definitions */ +enum { + C_UOTA_CNTSRT_NBIT = 0, /* 0 OTA rx frame count start bit (14 LSB's) */ + C_UOTA_RXFST_NBIT = 14, /* 14 indicating first frame */ + C_UOTA_RSSION_NBIT = 15, /* 15 OTA rx ON bit position */ +}; + +#define M_EDCF_QLEN(x) (M_EDCF_QINFO1_OFFSET(x)) +#define M_PWRIND_MAP(x, core) (M_PWRIND_BLKS(x) + ((core)<<1)) + +#define M_BTCX_MAX_INDEX 320u +#define M_BTCX_BACKUP_SIZE 130 +#define BTCX_AMPDU_MAX_DUR 2500 + +#define ADDR_STAMON_NBIT (1 << 10) /* STA monitor bit in AMT_INFO_BLK entity */ + +#ifdef WLP2P_UCODE + +/** The number of scheduling blocks */ +#ifdef BCMFUZZ /* need more for fuzzing */ +#define M_P2P_BSS_MAX 8 +#else +#define M_P2P_BSS_MAX 4 +#endif /* BCMFUZZ */ + +/** WiFi P2P interrupt block positions */ +#define M_P2P_I_BLK_SZ 4 +#define M_P2P_I_BLK_OFFSET(x) (M_P2P_INTR_BLK(x) - M_P2P_INTF_BLK(x)) +#define M_P2P_I_BLK(x, b) (M_P2P_I_BLK_OFFSET(x) + (M_P2P_I_BLK_SZ * (b) * 2)) +#define M_P2P_I(x, b, i) (M_P2P_I_BLK(x, b) + ((i) * 2)) + +#define M_P2P_I_PRE_TBTT 0 /**< pretbtt, wake up just before beacon reception */ +#define M_P2P_I_CTW_END 1 /**< CTWindow ends */ +#define M_P2P_I_ABS 2 /**< absence period start, trigger for switching channels */ +#define M_P2P_I_PRS 3 /**< presence period starts */ + +/** P2P hps flags */ +#define M_P2P_HPS_CTW(b) (1 << (b)) +#define M_P2P_HPS_NOA(b) (1 << ((b) + M_P2P_BSS_MAX)) + +/** WiFi P2P address attribute block */ +#define M_ADDR_BMP_BLK_SZ 12 +#define M_ADDR_RANDMAC_BMP_BLK_SZ 40u + +#define M_ADDR_BMP_BLK(x, b) (M_ADDR_BMP_BLK_OFFSET(x) + ((b) * 2)) + +#define ADDR_BMP_RA (1 << 0) /**< Receiver Address (RA) */ +#define ADDR_BMP_TA (1 << 1) /**< Transmitter Address (TA) */ +#define ADDR_BMP_BSSID (1 << 2) /**< BSSID */ +#define ADDR_BMP_AP (1 << 3) /**< Infra-BSS Access Point (AP) */ +#define ADDR_BMP_STA (1 << 4) /**< Infra-BSS Station (STA) */ +#define ADDR_BMP_P2P_DISC (1 << 5) /**< P2P Device */ +#define ADDR_BMP_P2P_GO (1 << 6) /**< P2P Group Owner */ +#define ADDR_BMP_P2P_GC (1 << 7) /**< P2P Client */ +#define ADDR_BMP_BSS_IDX_MASK (3 << 8) /**< BSS control block index */ +#define ADDR_BMP_BSS_IDX_SHIFT 8 + +/** WiFi P2P address starts from this entry in RCMTA */ +#define P2P_ADDR_STRT_INDX (RCMTA_SIZE - M_ADDR_BMP_BLK_SZ) + +/* WiFi P2P per BSS control block positions. + * all time related fields are in units of (1< 29 && corerev < 40 */ +#define M_COREMASK_BLK_WOWL (0x7e8 *2) + +/* corerev >= 42 */ +#define D11AC_M_COREMASK_BLK_WOWL (0x1b0*2) + +#define M_EXTLNA_PWRSAVE(x) M_RADIO_PWR(x) /**< External LNA power control support */ + +/* D11AC shm location changes */ +#define D11AC_T_NULL_TPL_BASE (0x16 * 2) +#define D11AC_T_NULL_TPL_SIZE_BYTES (24) +#define D11_T_BCN0_TPL_BASE T_BCN0_TPL_BASE +#define D11AC_T_BCN0_TPL_BASE (0x100 * 2) +#define D11_T_BCN1_TPL_BASE T_BCN1_TPL_BASE +#define D11AC_T_BCN1_TPL_BASE (0x240 * 2) +#define D11AC_T_GACT_TWT_INFO_TPL_BASE (0xB0 * 2) +#define D11AC_T_GACT_TWT_INFO_TPL_SIZE_BYTES (36) + +/* The response (ACK/BA) phyctrl words */ +#define D11AC_RSP_TXPCTL0 (0x4c * 2) +#define D11AC_RSP_TXPCTL1 (0x4d * 2) + +#define D11AC_T_PRS_TPL_BASE (0x380 * 2) + +#define D11_M_RT_PRS_PLCP_POS(x) M_RT_PRS_PLCP_POS(x) +#define D11_M_RT_PRS_DUR_POS(x) M_RT_PRS_DUR_POS(x) +#define D11AC_M_RT_PRS_PLCP_POS 8 +#define D11AC_M_RT_PRS_DUR_POS 12 + +/* Field definitions for M_REV40_RT_TXPWROFF_POS */ +#define M_REV40_RT_HTTXPWR_OFFSET_MASK 0x01f8 /**< bit 8:3 */ +#define M_REV40_RT_HTTXPWR_OFFSET_SHIFT 3 + +/* for axphy */ +#define M_REV80_RT_TXPWR_OFFSET_MASK 0xff00 /* bit 15:8 */ +#define M_REV80_RT_TXPWR_OFFSET_SHIFT 9 /* 8 (byte align) + 1 (convert from S5.1 to S5.2) */ + +/* shmem locations for Beamforming */ +/* shmem defined with prefix M_ are in shmem */ +#define shm_addr(base, offset) (((base)+(offset))*2) + +#define C_BFI_REFRESH_THR_OFFSET (1u) +#define C_BFI_NDPA_TXLMT_OFFSET (2u) +#define C_BFI_NRXC_OFFSET (3u) +#define C_BFI_MLBF_LUT_OFFSET (4u) // for corerev < 64 only + +#define C_BFI_BLK_SIZE(corerev) ((D11REV_GE(corerev, 86) ? 18u: 16u)) + +/* BFI block definitions (Beamforming) */ +#define C_BFI_BFRIDX_POS (0) +#define C_BFI_NDPA_TST_POS (1) +#define C_BFI_NDPA_TXCNT_POS (2) +#define C_BFI_NDPA_SEQ_POS (3) +#define C_BFI_NDPA_FCTST_POS (4) +#define C_BFI_BFRCTL_POS (5) +#define C_BFI_BFR_CONFIG0_POS (6) +#define C_BFI_BFE_CONFIG0_POS (7) +#define C_BFI_BFE_MIMOCTL_POS (8) +#define C_BFI_BSSID0_POS (9) +#define C_BFI_BSSID1_POS (10) +#define C_BFI_BSSID2_POS (11) +#define C_BFI_STAINFO_POS (12) +#define C_BFI_STAINFO1_POS (13) +#define C_BFI_BFE_MYAID_POS (13) /* stainfo1 is mutually exclusive */ +#define C_BFI_BFMSTAT_POS (14) +#define C_BFI_BFE_MIMOCTL_EXT_POS (15) +/* below SHMs for rev >= 86 */ +#define C_BFI_BFE_11AXMIMOCTL_POS (16) /* phyreg bfeMimoCtlReg for 11AX */ +#define C_BFI_BFE_NDPNR_POS (17) +/* used by BFR */ +#define C_BFI_STA_ADDR_POS C_BFI_BSSID0_POS + +/* to be removed -start */ +#define M_BFI_BLK_SIZE (16u) +#define BFI_BLK_SIZE 18 +/* to be removed -end */ + +/* Phy cache index Bit<8> indicates the validity. Cleared during TxBf link Init + * to trigger a new sounding sequence. + */ +#define C_BFRIDX_VLD_NBIT 8 /* valid */ +#define C_BFRIDX_EN_NBIT 7 /* BFI block is enabled (has valid info), + * applicable only for MU BFI block in shmemx + */ +#define C_BFRIDX_BW_NBIT 12 + +#define C_STAINFO_FBT_NBIT 12 /* 0: SU; 1: MU */ +#define C_STAINFO_NCIDX_NBIT 13 /* Bits13-15: NC IDX; Reserved if Feedback Type is SU */ + +/* NDP control blk */ +#define C_BFI_BFRCTL_POS_NDP_TYPE_SHIFT (0) /* 0: HT NDP; 1: VHT NDP; HE no need */ +#define C_BFI_BFRCTL_POS_NSTS_SHIFT (1) /* 0: 2ss; 1: 3ss; 2: 4ss */ +#define C_BFI_BFRCTL_POS_MLBF_SHIFT (4) /* 1 enable MLBF(used for corerev < 64) */ +#define C_BFI_BFRCTL_POS_BFM_SHIFT (8) /* Bits15-8: BFM mask for BFM frame tx */ + +/** dynamic rflo ucode WAR defines */ +#define UCODE_WAR_EN 1 +#define UCODE_WAR_DIS 0 + +/** LTE coex definitions */ +#define LTECX_FLAGS_LPBK_OFF 0 + +/** LTECX shares BTCX shmem block */ +#define M_LTECX_BLK_PTR(x) M_BTCX_BLK_PTR(x) + +/** NR5GCX shares BTCX shmem block */ +#define M_NR5GCX_BLK_PTR(x) M_BTCX_BLK_PTR(x) + +/** RC1CX shares BTCX shmem block */ +#define M_RC1CX_BLK_PTR(x) M_BTCX_BLK_PTR(x) + +/** RC2CX shares BTCX shmem block */ +#define M_RC2CX_BLK_PTR(x) M_BTCX_BLK_PTR(x) + +/* CORE0 MODE */ +#define CORE0_MODE_RSDB 0x0 +#define CORE0_MODE_MIMO 0x1 +#define CORE0_MODE_80P80 0x2 + +#define CORE1_MODE_RSDB 0x100 + +#define HWACI_HOST_FLAG_ADDR (0x186) +#define HWACI_SET_SW_MITIGATION_MODE (0x0008) + +/* split RX war shm locations */ +#define RXFIFO_0_OFFSET 0x1A0 +#define RXFIFO_1_OFFSET 0x19E +#define HDRCONV_FIFO0_STSLEN 0x4 /* status length in header conversion mode */ + +/* GE80: + * [15:8]: Phy status length + * [7:0]: Ucode status length + */ +#define DEFAULT_FIFO0_STSLEN(corerev, corerev_minor) \ + (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? 0x2018 : \ + D11REV_GE(corerev, 80) ? 0x2010: 0x24) + +/* M_ULP_WAKEIND bits */ +#define C_WATCHDOG_EXPIRY (1 << 0) +#define C_FCBS_ERROR (1 << 1) +#define C_RETX_FAILURE (1 << 2) +#define C_HOST_WAKEUP (1 << 3) +#define C_INVALID_FCBS_BLOCK (1 << 4) +#define C_HUDI_DS1_EXIT (1 << 5) +#define C_LOB_SLEEP (1 << 6) + +/* values for M_ULP_FEATURES */ +#define C_P2P_NOA (0x0001) +#define C_INFINITE_NOA (0x0002) +#define C_P2P_CTWIN (0x0004) +#define C_P2P_GC (0x0008) +#define C_BCN_TRIM (0x0010) +#define C_BT_COEX (0x0020) +#define C_LTE_COEX (0x0040) +#define C_ADS1 (0x0080) +#define C_LTECX_PSPOLL_PRIO_EN (0x0100) +#define C_ULP_SLOWCAL_SKIP (0x0200) +#define C_HUDI_ENABLE (0x0400) + +#define M_WOWL_ULP_SW_DAT_BLK (0xBFF * 2) /* (0xFFF * 2) - 1024 */ +#define M_WOWL_ULP_SW_DAT_BLK_MAX_SZ (0x400) /* 1024 bytes */ + +#define RX_INTR_FIFO_0 0x1 /* FIFO-0 interrupt */ +#define RX_INTR_FIFO_1 0x2 /* FIFO-1 interrupt */ +#define RX_INTR_FIFO_2 0x4 /* FIFO-2 interrupt */ + +/* M_TOF_FLAG bits */ +typedef enum { + TOF_RX_FTM_NBIT = 0, + TOF_SEQ_DISRXENTX_RFCTL = 1, + TOF_IS_TARGET = 2, + TOF_TPC_FREEZE = 3 +} eTOFFlags; + +/* TOF feature flags */ +#define M_UCODE_F2_TOF_BIT 7 /* part of features_2 shm */ +#define M_UCODE_F3_AVB_BIT 2 /* part of features_3 shm */ +#define M_UCODE_F3_SEQ_BIT 3 /* part of features_3 shm */ + +/* New SHM definitions required for tsync based time stamping of FTM frames. +* More details in below conf +* http://confluence.broadcom.com/display/WLAN/NewUcodeInterfaceForProxdFeature +*/ +#define FTM_TIMESTAMP_SHIFT 16 +#define TXS_ACK_INDEX_SHIFT 3 +#define FTM_ACK_TS_BLOCK_SIZE 3 +#define RXH_ACK_SHIFT(corerev) (D11REV_GE((corerev), 80) ? 12u:8u) +#define FTM_INVALID_SHM_INDEX(corerev) (D11REV_GE((corerev), 80) ? 0x04u:0x0Fu) +#define FTM_ACK_INDEX_MASK 0x0F +#define NUM_UCODE_ACK_TS_BLKS 4 + +#define FTM_TXSTATUS_ACK_RSPEC_BLOCK_MASK 0xFF +#define FTM_TXSTATUS_ACK_RSPEC_BW_MASK 0x3 +#define FTM_TXSTATUS_ACK_RSPEC_BW_SHIFT 2 +#define FTM_TXSTATUS_ACK_RSPEC_BW_20 0 +#define FTM_TXSTATUS_ACK_RSPEC_BW_40 1 +#define FTM_TXSTATUS_ACK_RSPEC_BW_80 2 +#define FTM_TXSTATUS_ACK_RSPEC_BW_160 3 +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_SHIFT 4 +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_MASK 0x7 +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_CCK 0 +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_LEG 1 /* Legacy */ +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_HT 2 +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_VHT 3 +#define FTM_TXSTATUS_ACK_RSPEC_TYPE_HE 4 +#define FTM_TXSTATUS_ACK_RSPEC_RATE_6M(ackword) (ackword >> 7) +/* Following are the offsets in M_DRVR_UCODE_IF_PTR block. Start address of + * M_DRVR_UCODE_IF_PTR block is present in M_DRVR_UCODE_IF_PTR. + */ +#define M_ULP_FEATURES (0x0 * 2) + +/* M_HOST_FLAGS5 offset changed in ULP ucode */ +#define M_ULP_HOST_FLAGS5 (0x3d * 2) + +#define M_RADAR_REG_TMP (0x033 * 2) + +/* Bit masks for ClkGateUcodeReq2: Ucode MAC Clock Request2 (IHR Address 0x375) register */ +#define D11_FUNC16_MAC_CLOCKREQ_MASK (0x3) + +/* + * Clock gating registers + */ +#define CLKREQ_BLOCK 0 +#define CLKREQ_MAC_ILP 1 +#define CLKREQ_MAC_ALP 2 +#define CLKREQ_MAC_HT 3 + +/* ClkGateSts */ +#define CLKGTE_FORCE_MAC_CLK_REQ_SHIFT 0 +#define CLKGTE_MAC_PHY_CLK_REQ_SHIFT 4 + +/* ClkGateReqCtrl0 */ +#define CLKGTE_PSM_PATCHCOPY_CLK_REQ_SHIFT 0 +#define CLKGTE_RXKEEP_OCP_CLK_REQ_SHIFT 2 +#define CLKGTE_PSM_MAC_CLK_REQ_SHIFT 4 +#define CLKGTE_TSF_CLK_REQ_SHIFT 6 +#define CLKGTE_AQM_CLK_REQ_SHIFT 8 +#define CLKGTE_SERIAL_CLK_REQ_SHIFT 10 +#define CLKGTE_TX_CLK_REQ_SHIFT 12 +#define CLKGTE_POSTTX_CLK_REQ_SHIFT 14 + +/* ClkGateReqCtrl1 */ +#define CLKGTE_RX_CLK_REQ_SHIFT 0 +#define CLKGTE_TXKEEP_OCP_CLK_REQ_SHIFT 2 +#define CLKGTE_HOST_RW_CLK_REQ_SHIFT 4 +#define CLKGTE_IHR_WR_CLK_REQ_SHIFT 6 +#define CLKGTE_TKIP_KEY_CLK_REQ_SHIFT 8 +#define CLKGTE_TKIP_MISC_CLK_REQ_SHIFT 10 +#define CLKGTE_AES_CLK_REQ_SHIFT 12 +#define CLKGTE_WAPI_CLK_REQ_SHIFT 14 + +/* ClkGateReqCtrl2 */ +#define CLKGTE_WEP_CLK_REQ_SHIFT 0 +#define CLKGTE_PSM_CLK_REQ_SHIFT 2 +#define CLKGTE_MACPHY_CLK_REQ_BY_PHY_SHIFT 4 +#define CLKGTE_FCBS_CLK_REQ_SHIFT 6 +#define CLKGTE_HIN_AXI_MAC_CLK_REQ_SHIFT 8 + +/* ClkGateStretch0 */ +#define CLKGTE_MAC_HT_CLOCK_STRETCH_SHIFT 0 +#define CLKGTE_MAC_ALP_CLOCK_STRETCH_SHIFT 8 +#define CLKGTE_MAC_HT_CLOCK_STRETCH_VAL 0x4 + +/* ClkGateStretch1 */ +#define CLKGTE_MAC_PHY_CLOCK_STRETCH_SHIFT 13 + +/* ClkGateMisc */ +#define CLKGTE_TPF_CLK_REQTHRESH 0xF +#define CLKGTE_AQM_CLK_REQEXT 0x70 + +/* ClkGateDivCtrl */ +#define CLKGTE_MAC_ILP_OFF_COUNT_MASK 0x0007 +#define CLKGTE_MAC_ILP_OFF_COUNT_SHIFT 0 +#define CLKGTE_MAC_ILP_ON_COUNT_MASK 0x0020 +#define CLKGTE_MAC_ILP_ON_COUNT_MASK_GE_REV80 0x0030 +#define CLKGTE_MAC_ALP_OFF_COUNT_MASK 0x03C0 +#define CLKGTE_MAC_ALP_OFF_COUNT_SHIFT 6 + +/* ClkGatePhyClkCtrl */ +#define CLKGTE_PHY_MAC_PHY_CLK_REQ_EN_SHIFT 0 +#define CLKGTE_O2C_HIN_PHY_CLK_EN_SHIFT 1 +#define CLKGTE_HIN_PHY_CLK_EN_SHIFT 2 +#define CLKGTE_IHRP_PHY_CLK_EN_SHIFT 3 +#define CLKGTE_CCA_MAC_PHY_CLK_REQ_EN_SHIFT 4 +#define CLKGTE_TX_MAC_PHY_CLK_REQ_EN_SHIFT 5 +#define CLKGTE_HRP_MAC_PHY_CLK_REQ_EN_SHIFT 6 +#define CLKGTE_SYNC_MAC_PHY_CLK_REQ_EN_SHIFT 7 +#define CLKGTE_RX_FRAME_MAC_PHY_CLK_REQ_EN_SHIFT 8 +#define CLKGTE_RX_START_MAC_PHY_CLK_REQ_EN_SHIFT 9 +#define CLKGTE_FCBS_MAC_PHY_CLK_REQ_SHIFT 10 +#define CLKGTE_POSTRX_MAC_PHY_CLK_REQ_EN_SHIFT 11 +#define CLKGTE_DOT11_MAC_PHY_RXVALID_SHIFT 12 +#define CLKGTE_NOT_PHY_FIFO_EMPTY_SHIFT 13 +#define CLKGTE_DOT11_MAC_PHY_BFE_REPORT_DATA_READY 14 +#define CLKGTE_DOT11_MAC_PHY_CLK_BIT15 15 + +/* ClkGateExtReq0 */ +#define CLKGTE_TOE_SYNC_MAC_CLK_REQ_SHIFT 0 +#define CLKGTE_TXBF_SYNC_MAC_CLK_REQ_SHIFT 2 +#define CLKGTE_HIN_SYNC_MAC_CLK_REQ_SHIFT 4 +#define CLKGTE_SLOW_SYNC_CLK_REQ_SHIFT 6 +#define CLKGTE_ERCX_SYNC_CLK_REQ_SHIFT 8 +#define CLKGTE_BTCX_SYNC_CLK_REQ_SHIFT 10 +#define CLKGTE_IFS_CRS_SYNC_CLK_REQ_SHIFT 12 +#define CLKGTE_IFS_GCI_SYNC_CLK_REQ_SHIFT 14 + +#define CLKGTE_TOE_SYNC_MAC_CLK_REQ_80_SHIFT 2 +#define CLKGTE_TXBF_SYNC_MAC_CLK_REQ_80_SHIFT 4 +#define CLKGTE_HIN_SYNC_MAC_CLK_REQ_80_SHIFT 6 +#define CLKGTE_SLOW_SYNC_CLK_REQ_80_SHIFT 8 +#define CLKGTE_ERCX_SYNC_CLK_REQ_80_SHIFT 10 +#define CLKGTE_BTCX_SYNC_CLK_REQ_80_SHIFT 12 +#define CLKGTE_IFS_CRS_SYNC_CLK_REQ_80_SHIFT 14 + +#define CLKGTE_TOE_SYNC_MAC_CLK_REQ_83_SHIFT 2 +#define CLKGTE_TXBF_SYNC_MAC_CLK_REQ_83_SHIFT 4 +#define CLKGTE_HIN_SYNC_MAC_CLK_REQ_83_SHIFT 6 +#define CLKGTE_SLOW_SYNC_CLK_REQ_83_SHIFT 8 +#define CLKGTE_ERCX_SYNC_CLK_REQ_83_SHIFT 10 +#define CLKGTE_BTCX2_SYNC_CLK_REQ_83_SHIFT 12 +#define CLKGTE_BTCX_SYNC_CLK_REQ_83_SHIFT 14 + +/* ClkGateExtReq1 */ +#define CLKGTE_PHY_FIFO_SYNC_CLK_REQ_SHIFT 0 +#define CLKGTE_RXE_CHAN_SYNC_CLK_REQ_SHIFT 2 +#define CLKGTE_PMU_MDIS_SYNC_MAC_CLK_REQ_SHIFT 4 +#define CLKGTE_PSM_IPC_SYNC_CLK_REQ_SHIFT 6 + +#define CLKGTE_IFS_GCI_SYNC_CLK_REQ_80_SHIFT 0 +#define CLKGTE_PHY_FIFO_SYNC_CLK_REQ_80_SHIFT 2 +#define CLKGTE_RXE_CHAN_SYNC_CLK_REQ_80_SHIFT 4 +#define CLKGTE_PMU_MDIS_SYNC_MAC_CLK_REQ_80_SHIFT 6 +#define CLKGTE_PSM_IPC_SYNC_CLK_REQ_80_SHIFT 8 + +#define CLKGTE_IFS_CRS_SYNC_CLK_REQ_83_SHIFT 0 +#define CLKGTE_IFS_GCI_SYNC_CLK_REQ_83_SHIFT 2 +#define CLKGTE_PHY_FIFO_SYNC_CLK_REQ_83_SHIFT 4 +#define CLKGTE_RXE_CHAN_SYNC_CLK_REQ_83_SHIFT 6 +#define CLKGTE_PMU_MDIS_SYNC_MAC_CLK_REQ_83_SHIFT 8 +#define CLKGTE_PSM_IPC_SYNC_CLK_REQ_83_SHIFT 10 + +/* PFE CtlStat1 register */ +#define PFE_CTLSTAT1_ROUTE_PFE_TO_BMSTAT (1u << 15u) +#define PFE_CTLSTAT1_PFE_ENABLE (1u << 0u) + +/* PPR Ctrl1 register */ +#define PPR_CTMODE_SHIFT 8u +#define PPR_CTMODE_MASK (3u << PPR_CTMODE_SHIFT) + +#define PPR_CTMODE_A (0u << PPR_CTMODE_SHIFT) +#define PPR_CTMODE_B (1u << PPR_CTMODE_SHIFT) +#define PPR_CTMODE_C (2u << PPR_CTMODE_SHIFT) + +/* Ptxd Len */ +#define PTXD_LEN0_SHIFT (0u) +#define PTXD_LEN1_SHIFT (8u) +#define PTXD_LEN2_SHIFT (0u) +#define PTXD_LEN3_SHIFT (8u) +/* =========== LHL regs =========== */ +/* WL ARM Timer0 Interrupt Status (lhl_wl_armtim0_st_adr) */ +#define LHL_WL_ARMTIM0_ST_WL_ARMTIM_INT_ST 0x00000001 + +#define D11_AUTO_MEM_STBY_RET_SHIFT (4u) +#define D11_AUTO_MEM_STBY_RET_83_SHIFT (5u) +#define D11_AUTO_MEM_STBY_NON_RET_SHIFT (6u) +#define D11_AUTO_MEM_STBY_BM_SHIFT (9u) + +#define D11_AUTO_MEM_STBY_RET_SHIFT_REV(d11rev) \ + (((d11rev) >= 83) ? D11_AUTO_MEM_STBY_RET_83_SHIFT : D11_AUTO_MEM_STBY_RET_SHIFT) + +/* WiFi P2P TX stop timestamp block (only applicable with AC ucode) */ +#define P2P_TXSTOP_SHMPERBSS 2u /* 2 shmems per BSS */ +#define M_P2P_TXSTOP_TS(x, b, w) (M_P2P_TXSTOP_T_BLK(x) +\ + (P2P_TXSTOP_SHMPERBSS * (b) + (w)) * 2) + +#define D11TXHDR_RATEINFO_ACCESS_VAL(txh, corerev, member) \ + ((((txh)->corerev).RateInfo[3]).member) + +/* QoS + BSR information */ +#define D11_QOS_BSR_TIDQS_SHIFT 0u +#define D11_QOS_BSR_TIDQS_SZ 8u +#define D11_QOS_BSR_TIDQS_MASK (((1 << D11_QOS_BSR_TIDQS_SZ) - 1) << D11_QOS_BSR_TIDQS_SHIFT) + +#define D11_QOS_BSR_UV_SHIFT 8u +#define D11_QOS_BSR_UV_SZ 6u +#define D11_QOS_BSR_UV_MASK (((1 << D11_QOS_BSR_UV_SZ) - 1) << D11_QOS_BSR_UV_SHIFT) + +#define D11_QOS_BSR_SF_SHIFT 14u +#define D11_QOS_BSR_SF_SZ 2u +#define D11_QOS_BSR_SF_MASK (((1 << D11_QOS_BSR_SF_SZ) - 1) << D11_QOS_BSR_SF_SHIFT) + +/* Queue size in QoS control */ +#define D11_QOS_BSR_SF_0 0u +#define D11_QOS_BSR_SF_1 1u +#define D11_QOS_BSR_SF_2 2u +#define D11_QOS_BSR_SF_3 3u + +#define D11_QS_OFFSET_SF_0 0u +#define D11_QS_OFFSET_SF_1 1024u +#define D11_QS_OFFSET_SF_2 17408u +#define D11_QS_OFFSET_SF_3 148480u + +#define D11_QOS_BSR_SF_0_SHIFT 4u /* Scale: 16 bytes */ +#define D11_QOS_BSR_SF_1_SHIFT 8u /* Scale: 256 bytes */ +#define D11_QOS_BSR_SF_2_SHIFT 11u /* Scale: 2048 bytes */ +#define D11_QOS_BSR_SF_3_SHIFT 15u /* Scale: 32768 bytes */ + +#define D11_MIN_QS_UV 0u +#define D11_MAX_QS_UV 63u +#define D11_MAX_QS_UV_SF3 ((D11_MAX_QS_UV) - 1) + +/* 1008: 16 * UV when the Scaling Factor subfield is 0 */ +#define D11_MAX_QS_SF_0 (D11_QS_OFFSET_SF_0 + (D11_MAX_QS_UV << D11_QOS_BSR_SF_0_SHIFT)) +/* 17152: 1024 + 256 * UV when the Scaling Factor subfield is 1 */ +#define D11_MAX_QS_SF_1 (D11_QS_OFFSET_SF_1 + (D11_MAX_QS_UV << D11_QOS_BSR_SF_1_SHIFT)) +/* 146432: 17408 + 2048 * UV when the Scaling Factor subfield is 2 */ +#define D11_MAX_QS_SF_2 (D11_QS_OFFSET_SF_2 + (D11_MAX_QS_UV << D11_QOS_BSR_SF_2_SHIFT)) +/* 2147328: 148480 + 32768 * UV when the Scaling Factor subfield is 3 */ +#define D11_MAX_QS_SF_3 (D11_QS_OFFSET_SF_3 + ((D11_MAX_QS_UV_SF3-1) << D11_QOS_BSR_SF_3_SHIFT)) + +/* 2 bits for HE signature and 4 bits for control ID */ +#define D11_BSR_HE_SIG_SHIFT 6u +/* HE Variant with BSR control ID */ +#define D11_BSR_HE_SIG (0xf) +#define D11_BSR_ACI_BMAP_SHIFT (0 + D11_BSR_HE_SIG_SHIFT) +#define D11_BSR_DELTA_TID_SHIFT (4 + D11_BSR_HE_SIG_SHIFT) +#define D11_BSR_SF_SHIFT (8 + D11_BSR_HE_SIG_SHIFT) +#define D11_BSR_QUEUE_SIZE_HIGH_SHIFT (10 + D11_BSR_HE_SIG_SHIFT) +#define D11_BSR_QUEUE_SIZE_ALL_SHIFT (18 + D11_BSR_HE_SIG_SHIFT) + +#define D11_BSR_DELTA_TID_ALLTID_SIGNATURE 3u + +#define D11_BSR_QUEUE_SIZE_WIDTH 8u +#define D11_BSR_QUEUE_SIZE_WIDTH_VAL ((1 << D11_BSR_QUEUE_SIZE_WIDTH) - 1) +#define D11_BSR_QUEUE_SIZE_UNKNOWN (255u) +#define D11_BSR_QUEUE_SIZE_MAX (254u) +#define D11_BSR_QUEUE_SIZE_HIGH_MASK (D11_BSR_QUEUE_SIZE_WIDTH_VAL <<\ + D11_BSR_QUEUE_SIZE_HIGH_SHIFT) +#define D11_BSR_QUEUE_SIZE_ALL_MASK (D11_BSR_QUEUE_SIZE_WIDTH_VAL <<\ + D11_BSR_QUEUE_SIZE_ALL_SHIFT) + +#define D11_BSR_WD1_SHIFT 16u + +enum { + D11_BSR_SF_ID_16 = 0, /* 0 */ + D11_BSR_SF_ID_256 = 1, /* 1 */ + D11_BSR_SF_ID_2048 = 2, /* 2 */ + D11_BSR_SF_ID_32768 = 3 /* 3 */ +}; + +enum { + D11_PING_BLOCK_VALID = 0, /* 0 */ + D11_PONG_BLOCK_VALID = 1, /* 1 */ + D11_UC_READING_PING_BLOCK = 2, /* 2 */ + D11_UC_READING_PONG_BLOCK = 3 /* 3 */ +}; + +enum { + D11_BSR_TID0_POS = 0, /* 0 */ + D11_BSR_TID1_POS = 1, /* 1 */ + D11_BSR_TID2_POS = 2, /* 2 */ + D11_BSR_TID3_POS = 3, /* 3 */ + D11_BSR_TID4_POS = 4, /* 4 */ + D11_BSR_TID5_POS = 5, /* 5 */ + D11_BSR_TID6_POS = 6, /* 6 */ + D11_BSR_TID7_POS = 7, /* 7 */ + D11_BSR_WD0_POS = 8, /* 8 */ + D11_BSR_WD1_POS = 9, /* 9 */ +}; + +#define D11_IS_PING_PONG_IN_RESET(i) (((i) & ((1 << D11_PING_BLOCK_VALID) |\ + (1 << D11_UC_READING_PING_BLOCK) | (1 << D11_PONG_BLOCK_VALID) |\ + (1 << D11_UC_READING_PONG_BLOCK))) == 0) +#define D11_PING_BLOCK_VALID_MASK ((1 << D11_PONG_BLOCK_VALID) |\ + (1 << D11_UC_READING_PING_BLOCK)) +#define D11_PONG_BLOCK_VALID_MASK ((1 << D11_PING_BLOCK_VALID) |\ + (1 << D11_UC_READING_PONG_BLOCK)) +#define D11_PING_PONG_UPDATE_MASK ((1 << D11_PING_BLOCK_VALID) |\ + (1 << D11_PONG_BLOCK_VALID)) +#define D11_IS_PING_BLOCK_WRITABLE(i) (((i) & D11_PING_BLOCK_VALID_MASK) == \ + (1 << D11_PONG_BLOCK_VALID)) +#define D11_IS_PONG_BLOCK_WRITABLE(i) (((i) & D11_PONG_BLOCK_VALID_MASK) == \ + (1 << D11_PING_BLOCK_VALID)) +#define D11_SET_PING_BLOCK_VALID(i) (((i) & ~(1 << D11_PONG_BLOCK_VALID)) |\ + (1 << D11_PING_BLOCK_VALID)) +#define D11_SET_PONG_BLOCK_VALID(i) (((i) & ~(1 << D11_PING_BLOCK_VALID)) |\ + (1 << D11_PONG_BLOCK_VALID)) +#define D11_SET_PING_PONG_INVALID(i) (((i) & ~(1 << D11_PING_BLOCK_VALID)) |\ + ((i) & ~(1 << D11_PONG_BLOCK_VALID))) + +/* valid rx plcp check */ +#define PLCP_VALID(plcp) (((plcp)[0] | (plcp)[1] | (plcp)[2]) != 0) +enum { + D11_TXTRIG_EN = 0, /* 0 */ + D11_TXTRIG_PROG = 1, /* 1 */ + D11_TXTRIG_DONE = 2, /* 2 */ + D11_TXTRIG_TYPE = 4, /* 4 */ +}; + +#define D11_SET_TXTRIG_EN (1 << D11_TXTRIG_EN) +#define D11_TXTRIG_TYPE_MASK ((1 << D11_TXTRIG_TYPE) | (1 << (D11_TXTRIG_TYPE+1))) +#define D11_SET_TXTRIG_TYPE(i) (((i) << D11_TXTRIG_TYPE) & D11_TXTRIG_TYPE_MASK) + +enum { + D11_MUEDCA_AIFSN = 0, /* 0 */ + D11_MUEDCA_CWMIN = 1, /* 1 */ + D11_MUEDCA_CWMAX = 2, /* 2 */ + D11_MUEDCA_TIMER = 3, /* 3 */ + D11_MUEDCA_SU_AIFSN = 4, /* 4 */ + D11_MUEDCA_SU_CWMIN = 5, /* 5 */ + D11_MUEDCA_SU_CWMAX = 6, /* 6 */ + D11_MUEDCA_EXPIRY_TSF = 7, /* 7 */ + D11_MUEDCA_QINFO = 8, /* 8 */ + D11_MUEDCA_STAT = 9, /* 9 */ + D11_MUEDCA_BLK_SIZE = 10 /* 10 */ +}; +#define D11_MUEDCA_BLK(x, idx, offset) (M_MUEDCA_BLK((x)) +\ + (idx * (D11_MUEDCA_BLK_SIZE << 1)) + (offset << 1)) + +#define D11_BSSCOLOR_VALID_SHIFT 15u +#define D11_BSSCOLOR_VALID_MASK (1 << D11_BSSCOLOR_VALID_SHIFT) + +#ifdef BCMPCIE_HP2P +/* HP2P (High Priority P2P) shared memory EDCA parameters */ +typedef struct shm_hp2p_edca_params { + uint16 txop; + uint16 cwmin; + uint16 cwmax; + uint16 cwcur; + uint16 aifs; + uint16 bslots; + uint16 reggap; + uint16 status; +} shm_hp2p_edca_params_t; + +#define HP2P_STATUS_NEWPARAMS (1u << 8u) +#endif /* BCMPCIE_HP2P */ + +#define MAX_D11_GPIOS 16 + +/* Workaround register */ +#define WAR_TXDMA_NONMODIFIABLE_EN 0x00000010 /* For TxDMA initiated AXI reads */ +#define WAR_AQMDMA_NONMODIFIABLE_EN 0x00000020 /* For AQMDMA initiated AXI reads */ + +/* noise cal timeout when NAN is enabled. +* 54 * 256 = ~14ms . +* smallest NAN CRB possible is 16ms..choose 14ms +* as timeout to ensure noise cal happens within this 16ms +*/ +#define M_NOISE_CALTIMEOUT_FOR_NAN 54u + +#define TXPU_CMD_SET 1u /**< txpu set command */ + +#endif /* _D11_H */ diff --git a/bcmdhd.101.10.361.x/include/d11_cfg.h b/bcmdhd.101.10.361.x/include/d11_cfg.h new file mode 100755 index 0000000..b45c951 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/d11_cfg.h @@ -0,0 +1,115 @@ +/* + * Header file for splitrx mode definitions + * Explains different splitrx modes, macros for classify, conversion. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _d11_cfg_h_ +#define _d11_cfg_h_ + +#ifdef USE_BCMCONF_H +#include +#else +#if defined(BCMDONGLEHOST) && !defined(WINNT) +#define D11REV_IS(var, val) ((var) == (val)) +#define D11REV_GE(var, val) ((var) >= (val)) +#define D11REV_GT(var, val) ((var) > (val)) +#define D11REV_LT(var, val) ((var) < (val)) +#define D11REV_LE(var, val) ((var) <= (val)) + +#define D11MINORREV_IS(var, val) ((var) == (val)) +#define D11MINORREV_GE(var, val) ((var) >= (val)) +#define D11MINORREV_GT(var, val) ((var) > (val)) +#define D11MINORREV_LT(var, val) ((var) < (val)) +#define D11MINORREV_LE(var, val) ((var) <= (val)) + +#define D11REV_MAJ_MIN_GE(corerev, corerev_minor, maj, min) \ + ((D11REV_IS((corerev), (maj)) && D11MINORREV_GE((corerev_minor), (min))) || \ + D11REV_GT(corerev, (maj))) + +#endif /* BCMDONGLEHOST */ +#endif /* USE_BCMCONF_H */ + +#define RXMODE0 0 /* no split */ +#define RXMODE1 1 /* descriptor split */ +#define RXMODE2 2 /* descriptor split + classification */ +#define RXMODE3 3 /* fifo split + classification */ +#define RXMODE4 4 /* fifo split + classification + hdr conversion */ + +#ifdef BCMSPLITRX + extern bool _bcmsplitrx; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSPLITRX_ENAB() (_bcmsplitrx) +#elif defined(BCMSPLITRX_DISABLED) + #define BCMSPLITRX_ENAB() (0) +#else + #define BCMSPLITRX_ENAB() (1) +#endif + + extern uint8 _bcmsplitrx_mode; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) +#elif defined(BCMSPLITRX_DISABLED) + #define BCMSPLITRX_MODE() (0) +#else + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) +#endif +#else + #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) +#endif /* BCMSPLITRX */ + +#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1)) +#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2)) +#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3)) +#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4)) + +#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define HDR_CONV() (SPLIT_RXMODE4()) +#define HDRCONV_PAD 2 + +#define FRAG_CMN_MSG_HDROOM (16u) /* Common msg headroom required by PCIe to push txstatus */ + +#if defined(FMF_LIT) && !defined(FMF_LIT_DISABLED) +/* (188-4*24-16) required HEADROOM - 4 Rate info Block - CacheInfo */ +#define FRAG_HEADROOM_D11REV_GE83 76u +#else +#if (defined(WLC_TXDC) && !defined(WLC_TXDC_DISABLED)) || \ + (defined(FMF_RIT) && !defined(FMF_RIT_DISABLED)) +#define FRAG_HEADROOM_D11REV_GE83 92u /* (188-4*24) required HEADROOM - 4 Rate info Block */ +#else +/* required HEADROOM = PTXD (24) + LIT (16) + RIT (96) + + max dot11hdr (44):: + "FC+DUR+SEQ+A1+A2+A3"(24) + QOS(2) + max("HTC(4) + AES IV(8)", WAPI IV(18)) + + MSDU data size (22):: SFH (14) + LLC (8) + - ETHER_HDR_LEN + */ +#define FRAG_HEADROOM_D11REV_GE83 188u +#endif /* (WLC_TXDC && !WLC_TXDC_DISABLED) || (FMF_RIT && !FMF_RIT_DISABLED) */ +#endif /* defined(FMF_LIT) && !defined(FMF_LIT_DISABLED) */ +#define FRAG_HEADROOM_D11REV_LT80 226u /* TXOFF + amsdu header */ +#define FRAG_HEADROOM_D11REV_GE80 \ + (FRAG_HEADROOM_D11REV_GE83 + 4u) /* + TSO_HEADER_PASSTHROUGH_LENGTH(4) */ + +#ifdef USE_NEW_COREREV_API +#define FRAG_HEAD_ROOM(corerev) (D11REV_GE(corerev, 83) ? \ + FRAG_HEADROOM_D11REV_GE83 : D11REV_GE(corerev, 80) ? \ + FRAG_HEADROOM_D11REV_GE80 : FRAG_HEADROOM_D11REV_LT80) +#else +#define FRAG_HEAD_ROOM(sih, coreid) ((si_get_corerev(sih, coreid) >= 83) ? \ + FRAG_HEADROOM_D11REV_GE83 : ((si_get_corerev(sih, coreid) >= 80) ? \ + FRAG_HEADROOM_D11REV_GE80 : FRAG_HEADROOM_D11REV_LT80)) +#endif + +#endif /* _d11_cfg_h_ */ diff --git a/bcmdhd.101.10.361.x/include/d11reglist_proto.h b/bcmdhd.101.10.361.x/include/d11reglist_proto.h new file mode 100755 index 0000000..a7a0004 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/d11reglist_proto.h @@ -0,0 +1,66 @@ +/* D11reglist prototype for Broadcom 802.11abgn + * Networking Adapter Device Drivers. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ +#ifndef _d11reglist_proto_h_ +#define _d11reglist_proto_h_ + +/* this is for dump_mac */ +enum { + D11REG_TYPE_IHR16 = 0, + D11REG_TYPE_IHR32 = 1, + D11REG_TYPE_SCR = 2, + D11REG_TYPE_SHM = 3, + D11REG_TYPE_TPL = 4, + D11REG_TYPE_GE64 = 5, + D11REG_TYPE_KEYTB = D11REG_TYPE_GE64, + D11REG_TYPE_IHRX16 = 6, + D11REG_TYPE_SCRX = 7, + D11REG_TYPE_SHMX = 8, + D11REG_TYPE_MAX = 9 +}; + +#define D11REGTYPENAME { \ + "ihr", "ihr", "scr", "shm", \ + "tpl", "keytb", "ihrx", "scrx", \ + "shmx" \ +} + +typedef struct _d11regs_bmp_list { + uint8 type; + uint16 addr; + uint32 bitmap; + uint8 step; + uint16 cnt; /* can be used together with bitmap or by itself */ +} d11regs_list_t; + +#define D11REG_BLK_SIZE 32 +typedef struct _d11regs_addr_list { + uint8 type; + uint16 cnt; + uint16 addr[D11REG_BLK_SIZE]; /* allow up to 32 per list */ +} d11regs_addr_t; + +typedef struct _d11obj_cache_t { + uint32 sel; + uint32 val; + uint16 addr32; + bool cache_valid; +} d11obj_cache_t; + +typedef struct _svmp_list { + uint32 addr; + uint16 cnt; +} svmp_list_t; + +#endif /* _d11reglist_proto_h_ */ diff --git a/bcmdhd.101.10.361.x/include/d11regs.h b/bcmdhd.101.10.361.x/include/d11regs.h new file mode 100755 index 0000000..95b726f --- /dev/null +++ b/bcmdhd.101.10.361.x/include/d11regs.h @@ -0,0 +1,180 @@ +/* + * Chip-specific hardware definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _D11REGS_H +#define _D11REGS_H + +#include +#include +#include +#include + +#if !defined(BCMDONGLEHOST) +#include +#include +#endif + +#if defined(BCMDONGLEHOST) || defined(WL_UNITTEST) +typedef struct { + uint32 pad; +} d11regdefs_t; + +typedef volatile uint8 d11regs_t; +typedef struct _d11regs_info { + uint32 pad; +} d11regs_info_t; + +#else /* defined(BCMDONGLEHOST) || defined(WL_UNITTEST) */ + +typedef volatile struct d11regs d11regs_t; + +typedef struct _d11regs_info { + d11regs_t *regs; +} d11regs_info_t; + +#endif /* !defined(BCMDONGLEHOST) || !defined(WL_UNITTEST) */ + +typedef volatile struct { + uint32 intstatus; + uint32 intmask; +} intctrlregs_t; + +/** + * read: 32-bit register that can be read as 32-bit or as 2 16-bit + * write: only low 16b-it half can be written + */ +typedef volatile union { + uint32 pmqhostdata; /**< read only! */ + struct { + uint16 pmqctrlstatus; /**< read/write */ + uint16 PAD; + } w; +} pmqreg_t; + +/** dma corerev >= 11 */ +typedef volatile struct { + dma64regs_t dmaxmt; /* dma tx */ + pio4regs_t piotx; /* pio tx */ + dma64regs_t dmarcv; /* dma rx */ + pio4regs_t piorx; /* pio rx */ +} fifo64_t; + +/** indirect dma corerev >= 64 */ +typedef volatile struct { + dma64regs_t dma; /**< dma tx */ + uint32 indintstatus; + uint32 indintmask; +} ind_dma_t; + +/** indirect dma corerev 80, 81, 82 */ +typedef volatile struct { + uint32 indintstatus; + uint32 indintmask; + dma64regs_t dma; /**< dma tx, */ +} ind_dma_axc_t; + +/* access to register offsets and fields defined in dot11mac_all_regs.h */ + +#define D11_REG_OFF(regname) \ + dot11mac_##regname##_ADDR +#define D11_REG_FIELD_MASK(regname, regfield) \ + dot11mac_##regname##__##regfield##_MASK +#define D11_REG_FIELD_SHIFT(regname, regfield) \ + dot11mac_##regname##__##regfield##_SHIFT + +/* convert register offset to backplane address */ + +#ifndef D11_REG_ADDR_CHK +// #define D11_REG_ADDR_CHK +#endif + +#ifdef D11_REG_ADDR_CHK +#define D11_REG_ADDR_EXEMPT(regname) \ + (D11_REG_OFF(regname) == D11_REG_OFF(PHY_REG_ADDR) || \ + D11_REG_OFF(regname) == D11_REG_OFF(radioregaddr) || \ + D11_REG_OFF(regname) == D11_REG_OFF(radioregdata) || \ + D11_REG_OFF(regname) == D11_REG_OFF(OBJ_DATA) || \ + 0) +#define D11_REG32_ADDR(regbase, regname) \ + ({ \ + STATIC_ASSERT(D11_REG_ADDR_EXEMPT(regname) || D11_REG_OFF(regname) < 0x3e0); \ + (volatile uint32 *)((uintptr)(regbase) + D11_REG_OFF(regname)); \ + }) +#define D11_REG16_ADDR(regbase, regname) \ + ({ \ + STATIC_ASSERT(D11_REG_ADDR_EXEMPT(regname) || D11_REG_OFF(regname) >= 0x3e0); \ + (volatile uint16 *)((uintptr)(regbase) + D11_REG_OFF(regname)); \ + }) +#else /* !D11_REG_ADDR_CHK */ +#define D11_REG32_ADDR(regbase, regname) \ + (volatile uint32 *)((uintptr)(regbase) + D11_REG_OFF(regname)) +#define D11_REG16_ADDR(regbase, regname) \ + (volatile uint16 *)((uintptr)(regbase) + D11_REG_OFF(regname)) +#endif /* !D11_REG_ADDR_CHK */ + +/* used in table */ +#define D11_REG32_ADDR_ENTRY(regbase, regname) \ + (volatile uint32 *)((uintptr)(regbase) + D11_REG_OFF(regname)) +#define D11_REG16_ADDR_ENTRY(regbase, regname) \ + (volatile uint16 *)((uintptr)(regbase) + D11_REG_OFF(regname)) + +#ifndef D11_NEW_ACCESS_MACROS +/* MOVED TO src/wl/sys/wlc_hw_priv.h */ +#define GET_MACINTSTATUS(osh, hw) R_REG((osh), D11_MACINTSTATUS(hw)) +#define SET_MACINTSTATUS(osh, hw, val) W_REG((osh), D11_MACINTSTATUS(hw), (val)) +#define GET_MACINTMASK(osh, hw) R_REG((osh), D11_MACINTMASK(hw)) +#define SET_MACINTMASK(osh, hw, val) W_REG((osh), D11_MACINTMASK(hw), (val)) + +#define GET_MACINTSTATUS_X(osh, hw) R_REG((osh), D11_MACINTSTATUS_psmx(hw)) +#define SET_MACINTSTATUS_X(osh, hw, val) W_REG((osh), D11_MACINTSTATUS_psmx(hw), (val)) +#define GET_MACINTMASK_X(osh, hw) R_REG((osh), D11_MACINTMASK_psmx(hw)) +#define SET_MACINTMASK_X(osh, hw, val) W_REG((osh), D11_MACINTMASK_psmx(hw), (val)) + +#define GET_MACINTSTATUS_EXT(osh, hw) R_REG((osh), D11_MACINTSTATUS_EXT(hw)) +#define SET_MACINTSTATUS_EXT(osh, hw, val) W_REG((osh), D11_MACINTSTATUS_EXT(hw), (val)) +#define GET_MACINTMASK_EXT(osh, hw) R_REG((osh), D11_MACINTMASK_EXT(hw)) +#define SET_MACINTMASK_EXT(osh, hw, val) W_REG((osh), D11_MACINTMASK_EXT(hw), (val)) + +#define GET_MACINTSTATUS_EXT_X(osh, hw) R_REG((osh), D11_MACINTSTATUS_EXT_psmx(hw)) +#define SET_MACINTSTATUS_EXT_X(osh, hw, val) W_REG((osh), D11_MACINTSTATUS_EXT_psmx(hw), (val)) +#define GET_MACINTMASK_EXT_X(osh, hw) R_REG((osh), D11_MACINTMASK_EXT_psmx(hw)) +#define SET_MACINTMASK_EXT_X(osh, hw, val) W_REG((osh), D11_MACINTMASK_EXT_psmx(hw), (val)) + +#define D11Reggrp_intctrlregs(hw, ix) ((intctrlregs_t*)(((volatile uint8*)D11_intstat0(hw)) + \ + (sizeof(intctrlregs_t)*ix))) +#define D11Reggrp_inddma(hw, ix) (D11REV_GE(hw->corerev, 86) ? \ + ((ind_dma_t*)(((volatile uint8*)D11_ind_xmt_control(hw)) + (sizeof(ind_dma_t)*ix))) : \ + ((ind_dma_t*)(((volatile uint8*)D11_inddma(hw)) + (sizeof(ind_dma_t)*ix)))) +#define D11Reggrp_inddma_axc(hw, ix) ((ind_dma_axc_t*)(((volatile uint8*)D11_inddma(hw)) + \ + (sizeof(ind_dma_axc_t)*ix))) +#define D11Reggrp_indaqm(hw, ix) (D11REV_GE(hw->corerev, 86) ? \ + ((ind_dma_t*)(((volatile uint8*)D11_IndAQMctl(hw)) + (sizeof(ind_dma_t)*ix))) : \ + ((ind_dma_t*)(((volatile uint8*)D11_indaqm(hw)) + (sizeof(ind_dma_t)*ix)))) +#define D11Reggrp_pmqreg(hw, ix) ((pmqreg_t*)(((volatile uint8*)D11_PMQHOSTDATA(hw)) + \ + (sizeof(pmqreg_t)*ix))) +#define D11Reggrp_f64regs(hw, ix) ((fifo64_t*)(((volatile uint8*)D11_xmt0ctl(hw)) + \ + (sizeof(fifo64_t)*ix))) +#define D11Reggrp_dmafifo(hw, ix) ((dma32diag_t*)(((volatile uint8*)D11_fifobase(hw)) + \ + (sizeof(dma32diag_t)*ix))) +#define D11Reggrp_intrcvlazy(hw, ix) ((volatile uint32*)(((volatile uint8*)D11_intrcvlzy0(hw)) + \ + (sizeof(uint32)*ix))) +#define D11Reggrp_altintmask(hw, ix) ((volatile uint32*)(((volatile uint8*)D11_alt_intmask0(hw)) + \ + (sizeof(uint32)*ix))) +#define D11REG_ISVALID(ptr, addr) ((volatile uint16 *)(addr) != \ + ((volatile uint16 *) &((ptr)->regs->INVALID_ID))) +#endif /* D11_NEW_ACCESS_MACROS */ + +#endif /* _D11REGS_H */ diff --git a/bcmdhd.101.10.361.x/include/dbus.h b/bcmdhd.101.10.361.x/include/dbus.h new file mode 100755 index 0000000..7761dff --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dbus.h @@ -0,0 +1,627 @@ +/* + * Dongle BUS interface Abstraction layer + * target serial buses like USB, SDIO, SPI, etc. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef __DBUS_H__ +#define __DBUS_H__ + +#include "typedefs.h" +#include + +extern uint dbus_msglevel; +#define DBUS_ERROR_VAL 0x0001 +#define DBUS_TRACE_VAL 0x0002 +#define DBUS_INFO_VAL 0x0004 + +#if defined(DHD_DEBUG) +#define DBUSERR(args) do {if (dbus_msglevel & DBUS_ERROR_VAL) printf args;} while (0) +#define DBUSTRACE(args) do {if (dbus_msglevel & DBUS_TRACE_VAL) printf args;} while (0) +#define DBUSINFO(args) do {if (dbus_msglevel & DBUS_INFO_VAL) printf args;} while (0) +#else /* defined(DHD_DEBUG) */ +#define DBUSERR(args) +#define DBUSTRACE(args) +#define DBUSINFO(args) +#endif + +enum { + DBUS_OK = 0, + DBUS_ERR = -200, + DBUS_ERR_TIMEOUT, + DBUS_ERR_DISCONNECT, + DBUS_ERR_NODEVICE, + DBUS_ERR_UNSUPPORTED, + DBUS_ERR_PENDING, + DBUS_ERR_NOMEM, + DBUS_ERR_TXFAIL, + DBUS_ERR_TXTIMEOUT, + DBUS_ERR_TXDROP, + DBUS_ERR_RXFAIL, + DBUS_ERR_RXDROP, + DBUS_ERR_TXCTLFAIL, + DBUS_ERR_RXCTLFAIL, + DBUS_ERR_REG_PARAM, + DBUS_STATUS_CANCELLED, + DBUS_ERR_NVRAM, + DBUS_JUMBO_NOMATCH, + DBUS_JUMBO_BAD_FORMAT, + DBUS_NVRAM_NONTXT, + DBUS_ERR_RXZLP +}; + +#define ERR_CBMASK_TXFAIL 0x00000001 +#define ERR_CBMASK_RXFAIL 0x00000002 +#define ERR_CBMASK_ALL 0xFFFFFFFF + +#define DBUS_CBCTL_WRITE 0 +#define DBUS_CBCTL_READ 1 +#if defined(INTR_EP_ENABLE) +#define DBUS_CBINTR_POLL 2 +#endif /* defined(INTR_EP_ENABLE) */ + +#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */ +#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */ + +/* + * The max TCB/RCB data buffer size + * With USB RPC aggregation on, + * rx buffer has to be a single big chunk memory due to dongle->host aggregation + * Upper layer has to do byte copy to deaggregate the buffer to satisfy WL driver + * one buffer per pkt requirement + * Windows Vista may be able to use MDL to workaround this requirement + * tx buffer has to copy over RPC buffer since they are managed in different domain + * Without copy, DBUS and RPC has to break the encapsulation, which is not implemented + * RPC aggregated buffer arrives as a chained buffers. bypte copy needs to traverse the chain + * to form one continuous USB irb. + * These buffer size must accomodate the MAX rpc agg size in both direction + * #define BCM_RPC_TP_DNGL_AGG_MAX_BYTE + * #define BCM_RPC_TP_HOST_AGG_MAX_BYTE + * Without USB RPC aggregation, these buffer size can be smaller like normal 2K + * to fit max tcp pkt(ETH_MAX_DATA_SIZE) + d11/phy/rpc/overhead + * + * The number of buffer needed is upper layer dependent. e.g. rpc defines BCM_RPC_TP_DBUS_NTXQ + */ +#define DBUS_BUFFER_SIZE_TX 32000 +#define DBUS_BUFFER_SIZE_RX 24000 + +#define DBUS_BUFFER_SIZE_TX_NOAGG 2048 +#define DBUS_BUFFER_SIZE_RX_NOAGG 2048 + +/** DBUS types */ +enum { + DBUS_USB, + DBUS_SDIO, + DBUS_SPI, + DBUS_UNKNOWN +}; + +enum dbus_state { + DBUS_STATE_DL_PENDING, + DBUS_STATE_DL_DONE, + DBUS_STATE_UP, + DBUS_STATE_DOWN, + DBUS_STATE_PNP_FWDL, + DBUS_STATE_DISCONNECT, + DBUS_STATE_SLEEP, + DBUS_STATE_DL_NEEDED +}; + +enum dbus_pnp_state { + DBUS_PNP_DISCONNECT, + DBUS_PNP_SLEEP, + DBUS_PNP_RESUME +}; + +enum dbus_file { + DBUS_FIRMWARE, + DBUS_NVFILE +}; + +typedef enum _DEVICE_SPEED { + INVALID_SPEED = -1, + LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */ + FULL_SPEED, /**< USB 1.1: 12 Mbps */ + HIGH_SPEED, /**< USB 2.0: 480 Mbps */ + SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */ +} DEVICE_SPEED; + +typedef struct { + int bustype; + int vid; + int pid; + int devid; + int chiprev; /**< chip revsion number */ + int mtu; + int nchan; /**< Data Channels */ + int has_2nd_bulk_in_ep; +} dbus_attrib_t; + +/* FIX: Account for errors related to DBUS; + * Let upper layer account for packets/bytes + */ +typedef struct { + uint32 rx_errors; + uint32 tx_errors; + uint32 rx_dropped; + uint32 tx_dropped; +} dbus_stats_t; + +/** + * Configurable BUS parameters + */ +enum { + DBUS_CONFIG_ID_RXCTL_DEFERRES = 1, + DBUS_CONFIG_ID_AGGR_LIMIT, + DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET +}; + +typedef struct { + uint32 config_id; + union { + uint32 general_param; + bool rxctl_deferrespok; + struct { + int maxrxsf; + int maxrxsize; + int maxtxsf; + int maxtxsize; + } aggr_param; + }; +} dbus_config_t; + +/** + * External Download Info + */ +typedef struct dbus_extdl { + uint8 *fw; + int fwlen; + uint8 *vars; + int varslen; +} dbus_extdl_t; + +struct dbus_callbacks; +struct exec_parms; + +typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); +typedef void (*disconnect_cb_t)(void *arg); +typedef void *(*exec_cb_t)(struct exec_parms *args); + +/** Client callbacks registered during dbus_attach() */ +typedef struct dbus_callbacks { + void (*send_complete)(void *cbarg, void *info, int status); + void (*recv_buf)(void *cbarg, uint8 *buf, int len); + void (*recv_pkt)(void *cbarg, void *pkt); + void (*txflowcontrol)(void *cbarg, bool onoff); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); +} dbus_callbacks_t; + +struct dbus_pub; +struct bcmstrbuf; +struct dbus_irb; +struct dbus_irb_rx; +struct dbus_irb_tx; +struct dbus_intf_callbacks; + +typedef struct { + void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs); + void (*detach)(struct dbus_pub *pub, void *bus); + + int (*up)(void *bus); + int (*down)(void *bus); + int (*send_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb); + int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*send_ctl)(void *bus, uint8 *buf, int len); + int (*recv_ctl)(void *bus, uint8 *buf, int len); + int (*get_stats)(void *bus, dbus_stats_t *stats); + int (*get_attrib)(void *bus, dbus_attrib_t *attrib); + + int (*pnp)(void *bus, int evnt); + int (*remove)(void *bus); + int (*resume)(void *bus); + int (*suspend)(void *bus); + int (*stop)(void *bus); + int (*reset)(void *bus); + + /* Access to bus buffers directly */ + void *(*pktget)(void *bus, int len); + void (*pktfree)(void *bus, void *pkt); + + int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len, + bool set); + void (*dump)(void *bus, struct bcmstrbuf *strbuf); + int (*set_config)(void *bus, dbus_config_t *config); + int (*get_config)(void *bus, dbus_config_t *config); + + bool (*device_exists)(void *bus); + int (*dlneeded)(void *bus); + int (*dlstart)(void *bus, uint8 *fw, int len); + int (*dlrun)(void *bus); + bool (*recv_needed)(void *bus); + + void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args); + void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args); + + int (*tx_timer_init)(void *bus); + int (*tx_timer_start)(void *bus, uint timeout); + int (*tx_timer_stop)(void *bus); + + int (*sched_dpc)(void *bus); + int (*lock)(void *bus); + int (*unlock)(void *bus); + int (*sched_probe_cb)(void *bus); + + int (*shutdown)(void *bus); + + int (*recv_stop)(void *bus); + int (*recv_resume)(void *bus); + + int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx); + + int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value); + + /* Add from the bottom */ +} dbus_intf_t; + +typedef struct dbus_pub { + struct osl_info *osh; + dbus_stats_t stats; + dbus_attrib_t attrib; + enum dbus_state busstate; + DEVICE_SPEED device_speed; + int ntxq, nrxq, rxsize; + void *bus; + struct shared_info *sh; + void *dev_info; +} dbus_pub_t; + +#define BUS_INFO(bus, type) (((type *) bus)->pub->bus) + +#define ALIGNED_LOCAL_VARIABLE(var, align) \ + uint8 buffer[SDALIGN+64]; \ + uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align; + +/* + * Public Bus Function Interface + */ + +/* + * FIX: Is there better way to pass OS/Host handles to DBUS but still + * maintain common interface for all OS?? + * Under NDIS, param1 needs to be MiniportHandle + * For NDIS60, param2 is WdfDevice + * Under Linux, param1 and param2 are NULL; + */ +extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + void *param1, void *param2); +extern int dbus_deregister(void); + +//extern int dbus_download_firmware(dbus_pub_t *pub); +//extern int dbus_up(struct dhd_bus *pub); +extern int dbus_down(dbus_pub_t *pub); +//extern int dbus_stop(struct dhd_bus *pub); +extern int dbus_shutdown(dbus_pub_t *pub); +extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on); + +extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf); +extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info); +extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info); +//extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len); +//extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len); +extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx); +extern int dbus_poll_intr(dbus_pub_t *pub); +extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats); +extern int dbus_get_device_speed(dbus_pub_t *pub); +extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config); +extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config); +extern void * dbus_get_devinfo(dbus_pub_t *pub); + +extern void *dbus_pktget(dbus_pub_t *pub, int len); +extern void dbus_pktfree(dbus_pub_t *pub, void* pkt); + +extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask); +extern int dbus_pnp_sleep(dbus_pub_t *pub); +extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload); +extern int dbus_pnp_disconnect(dbus_pub_t *pub); + +//extern int dbus_iovar_op(dbus_pub_t *pub, const char *name, +// void *params, int plen, void *arg, int len, bool set); +#ifdef BCMDBG +extern void dbus_hist_dump(dbus_pub_t *pub, struct bcmstrbuf *b); +#endif /* BCMDBG */ + +extern void *dhd_dbus_txq(const dbus_pub_t *pub); +extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub); + +/* + * Private Common Bus Interface + */ + +/** IO Request Block (IRB) */ +typedef struct dbus_irb { + struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */ +} dbus_irb_t; + +typedef struct dbus_irb_rx { + struct dbus_irb irb; /* Must be first */ + uint8 *buf; + int buf_len; + int actual_len; + void *pkt; + void *info; + void *arg; +} dbus_irb_rx_t; + +typedef struct dbus_irb_tx { + struct dbus_irb irb; /** Must be first */ + uint8 *buf; /** mutually exclusive with struct member 'pkt' */ + int len; /** length of field 'buf' */ + void *pkt; /** mutually exclusive with struct member 'buf' */ + int retry_count; + void *info; + void *arg; + void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */ +} dbus_irb_tx_t; + +/** + * DBUS interface callbacks are different from user callbacks + * so, internally, different info can be passed to upper layer + */ +typedef struct dbus_intf_callbacks { + void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb); + void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status); + void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + bool (*isr)(void *cbarg, bool *wantdpc); + bool (*dpc)(void *cbarg, bool bounded); + void (*watchdog)(void *cbarg); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); + struct dbus_irb* (*getirb)(void *cbarg, bool send); + void (*rxerr_indicate)(void *cbarg, bool on); +} dbus_intf_callbacks_t; + +/* + * Porting: To support new bus, port these functions below + */ + +/* + * Bus specific Interface + * Implemented by dbus_usb.c/dbus_sdio.c + */ +extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_deregister(void); +extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp); + +/* + * Bus-specific and OS-specific Interface + * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c + */ +extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_osl_deregister(void); + +/* + * Bus-specific, OS-specific, HW-specific Interface + * Mainly for SDIO Host HW controller + */ +extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf); +extern int dbus_bus_osl_hw_deregister(void); + +extern uint usbdev_bulkin_eps(void); +#if defined(BCM_REQUEST_FW) +extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, + uint16 boardtype, uint16 boardrev); +extern void dbus_release_fw_nvfile(void *firmware); +#endif /* #if defined(BCM_REQUEST_FW) */ + +#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX) +/* + * Include file for the ECHI fastpath optimized USB + * Practically all the lines below have equivalent in some structures in other include (or even + * source) files This violates all kind of structure and layering, but cutting through layers is + * what the optimization is about. The definitions are NOT literally borrowed from any GPLd code; + * the file is intended to be GPL-clean + * + * Note that while some resemblance between this code and GPLd code in Linux might exist, it is + * due to the common sibling. See FreeBSD: head/sys/dev/usb/controller/ehci.h for the source of + * inspiration :-) + * + * The code assumes little endian throughout + */ + +#if !defined(__linux__) +#error "EHCI fastpath is for Linux only." +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + /* Backward compatibility */ + typedef unsigned int gfp_t; + + #define dma_pool pci_pool + #define dma_pool_create(name, dev, size, align, alloc) \ + pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC) + #define dma_pool_destroy(pool) pci_pool_destroy(pool) + #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle) + #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr) + + #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir) + #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir) + #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE + #define DMA_TO_DEVICE PCI_DMA_TODEVICE +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +/* Availability of these functions varies (when present, they have two arguments) */ +#ifndef hc32_to_cpu + #define hc32_to_cpu(x) le32_to_cpu(x) + #define cpu_to_hc32(x) cpu_to_le32(x) + typedef unsigned int __hc32; +#else + #error Two-argument functions needed +#endif + +/* Private USB opcode base */ +#define EHCI_FASTPATH 0x31 +#define EHCI_SET_EP_BYPASS EHCI_FASTPATH +#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1) +#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2) +#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3) +#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4) +#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5) + +/* + * EHCI QTD structure (hardware and extension) + * NOTE that is does not need to (and does not) match its kernel counterpart + */ +#define EHCI_QTD_NBUFFERS 5 +#define EHCI_QTD_ALIGN 32 +#define EHCI_BULK_PACKET_SIZE 512 +#define EHCI_QTD_XACTERR_MAX 32 + +struct ehci_qtd { + /* Hardware map */ + volatile uint32_t qtd_next; + volatile uint32_t qtd_altnext; + volatile uint32_t qtd_status; +#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff) +#define EHCI_QTD_IOC 0x00008000 +#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3) +#define EHCI_QTD_SET_CERR(x) ((x) << 10) +#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3) +#define EHCI_QTD_SET_PID(x) ((x) << 8) +#define EHCI_QTD_ACTIVE 0x80 +#define EHCI_QTD_HALTED 0x40 +#define EHCI_QTD_BUFERR 0x20 +#define EHCI_QTD_BABBLE 0x10 +#define EHCI_QTD_XACTERR 0x08 +#define EHCI_QTD_MISSEDMICRO 0x04 + volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; + volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; + + /* Implementation extension */ + dma_addr_t qtd_self; /**< own hardware address */ + struct ehci_qtd *obj_next; /**< software link to the next QTD */ + void *rpc; /**< pointer to the rpc buffer */ + size_t length; /**< length of the data in the buffer */ + void *buff; /**< pointer to the reassembly buffer */ + int xacterrs; /**< retry counter for qtd xact error */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */ + +#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1) + +/** + * Queue Head + * NOTE This structure is slightly different from the one in the kernel; but needs to stay + * compatible. + */ +struct ehci_qh { + /* Hardware map */ + volatile uint32_t qh_link; + volatile uint32_t qh_endp; + volatile uint32_t qh_endphub; + volatile uint32_t qh_curqtd; + + /* QTD overlay */ + volatile uint32_t ow_next; + volatile uint32_t ow_altnext; + volatile uint32_t ow_status; + volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS]; + volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS]; + + /* Extension (should match the kernel layout) */ + dma_addr_t unused0; + void *unused1; + struct list_head unused2; + struct ehci_qtd *dummy; + struct ehci_qh *unused3; + + struct ehci_hcd *unused4; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct kref unused5; + unsigned unused6; + + uint8_t unused7; + + /* periodic schedule info */ + uint8_t unused8; + uint8_t unused9; + uint8_t unused10; + uint16_t unused11; + uint16_t unused12; + uint16_t unused13; + struct usb_device *unused14; +#else + unsigned unused5; + + u8 unused6; + + /* periodic schedule info */ + u8 unused7; + u8 unused8; + u8 unused9; + unsigned short unused10; + unsigned short unused11; +#define NO_FRAME ((unsigned short)~0) +#ifdef EHCI_QUIRK_FIX + struct usb_device *unused12; +#endif /* EHCI_QUIRK_FIX */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + struct ehci_qtd *first_qtd; + /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */ + /* NOTE that ehci_qh in ehci.h shall reserve this word */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/** The corresponding structure in the kernel is used to get the QH */ +struct hcd_dev { /* usb_device.hcpriv points to this */ + struct list_head unused0; + struct list_head unused1; + + /* array of QH pointers */ + void *ep[32]; +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc, + int token, int len); +int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data, + int token, int len); +int optimize_submit_async(struct ehci_qtd *qtd, int epn); +void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma); +struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags); +void optimize_ehci_qtd_free(struct ehci_qtd *qtd); +void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf); +#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */ + +void dbus_flowctrl_tx(void *dbi, bool on); +#endif /* __DBUS_H__ */ diff --git a/bcmdhd.101.10.361.x/include/dhd_daemon.h b/bcmdhd.101.10.361.x/include/dhd_daemon.h new file mode 100755 index 0000000..d0cb12d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dhd_daemon.h @@ -0,0 +1,55 @@ +/* + * Header file for DHD daemon to handle timeouts + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef __BCM_DHDD_H__ +#define __BCM_DHDD_H__ + +#include +/** + * To maintain compatabily when dhd driver and dhd daemon is taken from different branches, + * make sure to keep this file same across dhd driver branch and dhd apps branch. + * TODO: Make this file as shared between apps and dhd.ko + */ + +#define BCM_TO_MAGIC 0x600DB055 +#define NO_TRAP 0 +#define DO_TRAP 1 + +typedef enum notify_dhd_daemon_reason { + REASON_COMMAND_TO, + REASON_OQS_TO, + REASON_SCAN_TO, + REASON_JOIN_TO, + REASON_DAEMON_STARTED, + REASON_DEVICE_TX_STUCK_WARNING, + REASON_DEVICE_TX_STUCK, + REASON_UNKOWN +} notify_dhd_daemon_reason_t; + +typedef struct bcm_to_info { + int magic; + int reason; + int trap; +} bcm_to_info_t; + +#endif /* __BCM_DHDD_H__ */ diff --git a/bcmdhd.101.10.361.x/include/dhdioctl.h b/bcmdhd.101.10.361.x/include/dhdioctl.h new file mode 100755 index 0000000..6404b2c --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dhdioctl.h @@ -0,0 +1,478 @@ +/* + * Definitions for ioctls to access DHD iovars. + * Based on wlioctl.h (for Broadcom 802.11abg driver). + * (Moves towards generic ioctls for BCM drivers/iovars.) + * + * Definitions subject to change without notice. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _dhdioctl_h_ +#define _dhdioctl_h_ + +#include + +/* Linux network driver ioctl encoding */ +typedef struct dhd_ioctl { + uint32 cmd; /* common ioctl definition */ + void *buf; /* pointer to user buffer */ + uint32 len; /* length of user buffer */ + uint32 set; /* get or set request boolean (optional) */ + uint32 used; /* bytes read or written (optional) */ + uint32 needed; /* bytes needed (optional) */ + uint32 driver; /* to identify target driver */ +} dhd_ioctl_t; + +/* Underlying BUS definition */ +enum { + BUS_TYPE_USB = 0, /* for USB dongles */ + BUS_TYPE_SDIO, /* for SDIO dongles */ + BUS_TYPE_PCIE /* for PCIE dongles */ +}; + +typedef enum { + DMA_XFER_SUCCESS = 0, + DMA_XFER_IN_PROGRESS, + DMA_XFER_FAILED +} dma_xfer_status_t; + +typedef enum d11_lpbk_type { + M2M_DMA_LPBK = 0, + D11_LPBK = 1, + BMC_LPBK = 2, + M2M_NON_DMA_LPBK = 3, + D11_HOST_MEM_LPBK = 4, + BMC_HOST_MEM_LPBK = 5, + M2M_WRITE_TO_RAM = 6, + M2M_READ_FROM_RAM = 7, + D11_WRITE_TO_RAM = 8, + D11_READ_FROM_RAM = 9, + MAX_LPBK = 10 +} dma_xfer_type_t; + +typedef struct dmaxfer_info { + uint16 version; + uint16 length; + dma_xfer_status_t status; + dma_xfer_type_t type; + uint src_delay; + uint dest_delay; + uint should_wait; + uint core_num; + int error_code; + uint32 num_bytes; + uint64 time_taken; + uint64 tput; +} dma_xfer_info_t; + +#define DHD_DMAXFER_VERSION 0x1 + +#define DHD_FILENAME_MAX 64 +#define DHD_PATHNAME_MAX 128 + +#ifdef EFI +struct control_signal_ops { + uint32 signal; + uint32 val; +}; +enum { + WL_REG_ON = 0, + DEVICE_WAKE = 1, + TIME_SYNC = 2 +}; + +typedef struct wifi_properties { + uint8 version; + uint32 vendor; + uint32 model; + uint8 mac_addr[6]; + uint32 chip_revision; + uint8 silicon_revision; + uint8 is_powered; + uint8 is_sleeping; + char module_revision[16]; /* null terminated string */ + uint8 is_fw_loaded; + char fw_filename[DHD_FILENAME_MAX]; /* null terminated string */ + char nvram_filename[DHD_FILENAME_MAX]; /* null terminated string */ + uint8 channel; + uint8 module_sn[6]; +} wifi_properties_t; + +#define DHD_WIFI_PROPERTIES_VERSION 0x1 + +#define DHD_OTP_SIZE_WORDS 912 + +typedef struct intr_poll_data { + uint16 version; + uint16 length; + uint32 type; + uint32 value; +} intr_poll_t; + +typedef enum intr_poll_data_type { + INTR_POLL_DATA_PERIOD = 0, + INTR_POLL_DATA_NUM_PKTS_THRESH, + INTR_POLL_DATA_PKT_INTVL_THRESH +} intr_poll_type_t; + +#define DHD_INTR_POLL_VERSION 0x1u +#endif /* EFI */ + +typedef struct tput_test { + uint16 version; + uint16 length; + uint8 direction; + uint8 tput_test_running; + uint8 mac_sta[6]; + uint8 mac_ap[6]; + uint8 PAD[2]; + uint32 payload_size; + uint32 num_pkts; + uint32 timeout_ms; + uint32 flags; + + uint32 pkts_good; + uint32 pkts_bad; + uint32 pkts_cmpl; + uint64 time_ms; + uint64 tput_bps; +} tput_test_t; + +typedef enum { + TPUT_DIR_TX = 0, + TPUT_DIR_RX +} tput_dir_t; + +/* + * Current supported roles considered for policy management are AP, P2P and NAN. + * Hence max value is limited to 3. + */ +#define DHD_MAX_IFACE_PRIORITY 3u +typedef enum dhd_iftype { + DHD_IF_TYPE_STA = 0, + DHD_IF_TYPE_AP = 1, + +#ifdef DHD_AWDL + DHD_IF_TYPE_AWDL = 2, +#endif /* DHD_AWDL */ + + DHD_IF_TYPE_NAN_NMI = 3, + DHD_IF_TYPE_NAN = 4, + DHD_IF_TYPE_P2P_GO = 5, + DHD_IF_TYPE_P2P_GC = 6, + DHD_IF_TYPE_P2P_DISC = 7, + DHD_IF_TYPE_IBSS = 8, + DHD_IF_TYPE_MONITOR = 9, + DHD_IF_TYPE_AIBSS = 10, + DHD_IF_TYPE_MAX +} dhd_iftype_t; + +typedef struct dhd_iface_mgmt_data { + uint8 policy; + uint8 priority[DHD_IF_TYPE_MAX]; +} dhd_iface_mgmt_data_t; + +typedef enum dhd_iface_mgmt_policy { + DHD_IF_POLICY_DEFAULT = 0, + DHD_IF_POLICY_FCFS = 1, + DHD_IF_POLICY_LP = 2, + DHD_IF_POLICY_ROLE_PRIORITY = 3, + DHD_IF_POLICY_CUSTOM = 4, + DHD_IF_POLICY_INVALID = 5 +} dhd_iface_mgmt_policy_t; + +#define TPUT_TEST_T_VER 1 +#define TPUT_TEST_T_LEN 68 +#define TPUT_TEST_MIN_PAYLOAD_SIZE 16 +#define TPUT_TEST_USE_ETHERNET_HDR 0x1 +#define TPUT_TEST_USE_802_11_HDR 0x2 + +/* per-driver magic numbers */ +#define DHD_IOCTL_MAGIC 0x00444944 + +/* bump this number if you change the ioctl interface */ +#define DHD_IOCTL_VERSION 1 + +/* + * Increase the DHD_IOCTL_MAXLEN to 16K for supporting download of NVRAM files of size + * > 8K. In the existing implementation when NVRAM is to be downloaded via the "vars" + * DHD IOVAR, the NVRAM is copied to the DHD Driver memory. Later on when "dwnldstate" is + * invoked with FALSE option, the NVRAM gets copied from the DHD driver to the Dongle + * memory. The simple way to support this feature without modifying the DHD application, + * driver logic is to increase the DHD_IOCTL_MAXLEN size. This macro defines the "size" + * of the buffer in which data is exchanged between the DHD App and DHD driver. + */ +#define DHD_IOCTL_MAXLEN (16384) /* max length ioctl buffer required */ +#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ + +/* + * For cases where 16K buf is not sufficient. + * Ex:- DHD dump output beffer is more than 16K. + */ +#define DHD_IOCTL_MAXLEN_32K (32768u) + +/* common ioctl definitions */ +#define DHD_GET_MAGIC 0 +#define DHD_GET_VERSION 1 +#define DHD_GET_VAR 2 +#define DHD_SET_VAR 3 + +/* message levels */ +#define DHD_ERROR_VAL 0x0001 +#define DHD_TRACE_VAL 0x0002 +#define DHD_INFO_VAL 0x0004 +#define DHD_DATA_VAL 0x0008 +#define DHD_CTL_VAL 0x0010 +#define DHD_TIMER_VAL 0x0020 +#define DHD_HDRS_VAL 0x0040 +#define DHD_BYTES_VAL 0x0080 +#define DHD_INTR_VAL 0x0100 +#define DHD_LOG_VAL 0x0200 +#define DHD_GLOM_VAL 0x0400 +#define DHD_EVENT_VAL 0x0800 +#define DHD_BTA_VAL 0x1000 +#if defined(NDIS) && (NDISVER >= 0x0630) && defined(BCMDONGLEHOST) +#define DHD_SCAN_VAL 0x2000 +#else +#define DHD_ISCAN_VAL 0x2000 +#endif +#define DHD_ARPOE_VAL 0x4000 +#define DHD_REORDER_VAL 0x8000 +#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */ +#define DHD_PNO_VAL 0x80000 +#define DHD_RTT_VAL 0x100000 +#define DHD_MSGTRACE_VAL 0x200000 +#define DHD_FWLOG_VAL 0x400000 +#define DHD_DBGIF_VAL 0x800000 +#ifdef DHD_PCIE_RUNTIMEPM +#define DHD_RPM_VAL 0x1000000 +#else +#define DHD_RPM_VAL DHD_ERROR_VAL +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#define DHD_PKT_MON_VAL 0x2000000 +#define DHD_PKT_MON_DUMP_VAL 0x4000000 +#define DHD_ERROR_MEM_VAL 0x8000000 +#define DHD_DNGL_IOVAR_SET_VAL 0x10000000 /**< logs the setting of dongle iovars */ +#define DHD_LPBKDTDUMP_VAL 0x20000000 +#define DHD_PRSRV_MEM_VAL 0x40000000 +#define DHD_IOVAR_MEM_VAL 0x80000000 +#define DHD_ANDROID_VAL 0x10000 +#define DHD_IW_VAL 0x20000 +#define DHD_CFG_VAL 0x40000 +#define DHD_CONFIG_VAL 0x80000 +#define DHD_DUMP_VAL 0x100000 +#define DUMP_EAPOL_VAL 0x0001 +#define DUMP_ARP_VAL 0x0002 +#define DUMP_DHCP_VAL 0x0004 +#define DUMP_ICMP_VAL 0x0008 +#define DUMP_DNS_VAL 0x0010 +#define DUMP_TRX_VAL 0x0080 + +#ifdef SDTEST +/* For pktgen iovar */ +typedef struct dhd_pktgen { + uint32 version; /* To allow structure change tracking */ + uint32 freq; /* Max ticks between tx/rx attempts */ + uint32 count; /* Test packets to send/rcv each attempt */ + uint32 print; /* Print counts every attempts */ + uint32 total; /* Total packets (or bursts) */ + uint32 minlen; /* Minimum length of packets to send */ + uint32 maxlen; /* Maximum length of packets to send */ + uint32 numsent; /* Count of test packets sent */ + uint32 numrcvd; /* Count of test packets received */ + uint32 numfail; /* Count of test send failures */ + uint32 mode; /* Test mode (type of test packets) */ + uint32 stop; /* Stop after this many tx failures */ +} dhd_pktgen_t; + +/* Version in case structure changes */ +#define DHD_PKTGEN_VERSION 2 + +/* Type of test packets to use */ +#define DHD_PKTGEN_ECHO 1 /* Send echo requests */ +#define DHD_PKTGEN_SEND 2 /* Send discard packets */ +#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */ +#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */ +#endif /* SDTEST */ + +/* Enter idle immediately (no timeout) */ +#define DHD_IDLE_IMMEDIATE (-1) + +/* Values for idleclock iovar: other values are the sd_divisor to use when idle */ +#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */ +#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ + +enum dhd_maclist_xtlv_type { + DHD_MACLIST_XTLV_R = 0x1, + DHD_MACLIST_XTLV_X = 0x2, + DHD_SVMPLIST_XTLV = 0x3 +}; + +typedef struct _dhd_maclist_t { + uint16 version; /* Version */ + uint16 bytes_len; /* Total bytes length of lists, XTLV headers and paddings */ + uint8 plist[1]; /* Pointer to the first list */ +} dhd_maclist_t; + +typedef struct _dhd_pd11regs_param { + uint16 start_idx; + uint8 verbose; + uint8 pad; + uint8 plist[1]; +} dhd_pd11regs_param; + +typedef struct _dhd_pd11regs_buf { + uint16 idx; + uint8 pad[2]; + uint8 pbuf[1]; +} dhd_pd11regs_buf; + +/* BT logging and memory dump */ + +#define BT_LOG_BUF_MAX_SIZE (DHD_IOCTL_MAXLEN - (2 * sizeof(int))) +#define BT_LOG_BUF_NOT_AVAILABLE 0 +#define BT_LOG_NEXT_BUF_NOT_AVAIL 1 +#define BT_LOG_NEXT_BUF_AVAIL 2 +#define BT_LOG_NOT_READY 3 + +typedef struct bt_log_buf_info { + int availability; + int size; + char buf[BT_LOG_BUF_MAX_SIZE]; +} bt_log_buf_info_t; + +/* request BT memory in chunks */ +typedef struct bt_mem_req { + int offset; /* offset from BT memory start */ + int buf_size; /* buffer size per chunk */ +} bt_mem_req_t; + +typedef struct fw_download_info { + uint32 fw_start_addr; + uint32 fw_size; + uint32 fw_entry_pt; + char fw_signature_fname[DHD_FILENAME_MAX]; + char bootloader_fname[DHD_FILENAME_MAX]; + uint32 bootloader_start_addr; + char fw_path[DHD_PATHNAME_MAX]; +} fw_download_info_t; + +/* max dest supported */ +#define DEBUG_BUF_DEST_MAX 4 + +/* debug buf dest stat */ +typedef struct debug_buf_dest_stat { + uint32 stat[DEBUG_BUF_DEST_MAX]; +} debug_buf_dest_stat_t; + +#ifdef DHD_PKTTS +/* max pktts flow config supported */ +#define PKTTS_CONFIG_MAX 8 + +#define PKTTS_OFFSET_INVALID ((uint32)(~0)) + +/* pktts flow configuration */ +typedef struct pktts_flow { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 src_ip; /**< source ip address */ + uint32 dst_ip; /**< destination ip address */ + uint32 src_port; /**< source port */ + uint32 dst_port; /**< destination port */ + uint32 proto; /**< protocol */ + uint32 ip_prec; /**< ip precedence */ + uint32 pkt_offset; /**< offset from data[0] (TCP/UDP payload) */ + uint32 chksum; /**< 5 tuple checksum */ +} pktts_flow_t; + +#define BCM_TS_MAGIC 0xB055B055 +#define BCM_TS_MAGIC_V2 0xB055B056 +#define BCM_TS_TX 1u +#define BCM_TS_RX 2u +#define BCM_TS_UTX 3u /* ucode tx timestamps */ + +#define PKTTS_MAX_FWTX 4u +#define PKTTS_MAX_UCTX 5u +#define PKTTS_MAX_UCCNT 8u +#define PKTTS_MAX_FWRX 2u + +/* Firmware timestamp header */ +typedef struct bcm_to_info_hdr { + uint magic; /**< magic word */ + uint type; /**< tx/rx type */ + uint flowid; /**< 5 tuple checksum */ + uint prec; /**< ip precedence (IP_PREC) */ + uint8 xbytes[16]; /**< 16bytes info from pkt offset */ +} bcm_to_info_hdr_t; + +/* Firmware tx timestamp payload structure */ +typedef struct bcm_to_info_tx_ts { + bcm_to_info_hdr_t hdr; + uint64 dhdt0; /**< system time - DHDT0 */ + uint64 dhdt5; /**< system time - DHDT5 */ + uint fwts[PKTTS_MAX_FWTX]; /**< fw timestamp - FWT0..FWT4 */ + uint ucts[PKTTS_MAX_UCTX]; /**< uc timestamp - UCT0..UCT4 */ + uint uccnt[PKTTS_MAX_UCCNT]; /**< uc counters */ +} bcm_to_info_tx_ts_t; + +/* Firmware rx timestamp payload structure */ +typedef struct bcm_to_info_rx_ts { + bcm_to_info_hdr_t hdr; + uint64 dhdr3; /**< system time - DHDR3 */ + uint fwts[PKTTS_MAX_FWRX]; /**< fw timestamp - FWT0, FWT1 */ +} bcm_to_info_rx_ts_t; +#endif /* DHD_PKTTS */ + +/* devreset */ +#define DHD_DEVRESET_VERSION 1 + +typedef struct devreset_info { + uint16 version; + uint16 length; + uint16 mode; + int16 status; +} devreset_info_t; + +#ifdef DHD_TX_PROFILE + +#define DHD_TX_PROFILE_VERSION 1 + +/* tx_profile structure for tagging */ +typedef struct dhd_tx_profile_protocol { + uint16 version; + uint8 profile_index; + uint8 layer; + uint32 protocol_number; + uint16 src_port; + uint16 dest_port; +} dhd_tx_profile_protocol_t; + +#define DHD_TX_PROFILE_DATA_LINK_LAYER (2u) /* data link layer protocols */ +#define DHD_TX_PROFILE_NETWORK_LAYER (3u) /* network layer protocols */ + +#define DHD_MAX_PROFILE_INDEX (7u) /* three bits are available to encode + the tx profile index in the rate + field in host_txbuf_post_t + */ +#define DHD_MAX_PROFILES (1u) /* ucode only supports 1 profile atm */ + +#endif /* defined(DHD_TX_PROFILE) */ +#endif /* _dhdioctl_h_ */ diff --git a/bcmdhd.101.10.361.x/include/dngl_rtlv.h b/bcmdhd.101.10.361.x/include/dngl_rtlv.h new file mode 100755 index 0000000..450caa2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dngl_rtlv.h @@ -0,0 +1,66 @@ +/* + * Interface definitions for reversed TLVs + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _dngl_rtlv_h_ +#define _dngl_rtlv_h_ + +#include + +/* Types of reverse TLVs downloaded to the top of dongle RAM. + * A reverse TLV consists of: + * data + * len <4 bytes> + * type <4 bytes> + */ +enum { + DNGL_RTLV_TYPE_NONE = 0, + /* replaces bcmrand.h BCM_NVRAM_RNG_SIGNATURE */ + DNGL_RTLV_TYPE_RNG_SIGNATURE = 0xFEEDC0DEu, /* RNG random data */ + DNGL_RTLV_TYPE_FW_SIGNATURE = 0xFEEDFE51, /* FW signature */ + DNGL_RTLV_TYPE_NVRAM_SIGNATURE = 0xFEEDFE52, /* NVRAM signature */ + DNGL_RTLV_TYPE_FWSIGN_MEM_MAP = 0xFEEDFE53, /* FW signing memory map */ + DNGL_RTLV_TYPE_FWSIGN_STATUS = 0xFEEDFE54, /* signature verification status */ + DNGL_RTLV_TYPE_END_MARKER = 0xFEED0E2D, /* end of rTLVs marker */ +}; +typedef uint32 dngl_rtlv_type_t; +typedef uint32 dngl_rtlv_len_t; + +/* Search for a reversed TLV with the given type, starting at the given address */ +int dngl_rtlv_find(const uint8 *rtlv_ptr, const uint8 *addr_limit, dngl_rtlv_type_t type, + dngl_rtlv_len_t *out_len, const uint8 **out_data); + +/* Search for a reversed TLV with the given type, starting at the top of RAM */ +int dngl_rtlv_find_from_ramtop(dngl_rtlv_type_t type, dngl_rtlv_len_t *out_len, + const uint8 **out_data); + +/* Search for the end of the reversed TLVs at the top of RAM to return the next RAM address */ +const uint8* dngl_rtlv_skipall(void); + +#ifdef RTLV_DEBUG +void dbg_log_rtlv(const char* str, const void* p1, const void* p2, const void *p3, + const void* p4, const void *p5); +#else /* RTLV_DEBUG */ +#define dbg_log_rtlv(str, p1, p2, p3, p4, p5) +#endif /* RTLV_DEBUG */ + +#endif /* _dngl_rtlv_h_ */ diff --git a/bcmdhd.101.10.361.x/include/dngl_stats.h b/bcmdhd.101.10.361.x/include/dngl_stats.h new file mode 100755 index 0000000..12195f2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dngl_stats.h @@ -0,0 +1,388 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +#include +#include <802.11.h> +#include + +/* XXX happens to mirror a section of linux's net_device_stats struct */ +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +typedef int32 wifi_radio; +typedef int32 wifi_channel; +typedef int32 wifi_rssi; +typedef struct { uint16 version; uint16 length; } ver_len; + +typedef enum wifi_channel_width { + WIFI_CHAN_WIDTH_20 = 0, + WIFI_CHAN_WIDTH_40 = 1, + WIFI_CHAN_WIDTH_80 = 2, + WIFI_CHAN_WIDTH_160 = 3, + WIFI_CHAN_WIDTH_80P80 = 4, + WIFI_CHAN_WIDTH_5 = 5, + WIFI_CHAN_WIDTH_10 = 6, + WIFI_CHAN_WIDTH_INVALID = -1 +} wifi_channel_width_t; + +typedef enum { + WIFI_DISCONNECTED = 0, + WIFI_AUTHENTICATING = 1, + WIFI_ASSOCIATING = 2, + WIFI_ASSOCIATED = 3, + WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */ + WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */ +} wifi_connection_state; + +typedef enum { + WIFI_ROAMING_IDLE = 0, + WIFI_ROAMING_ACTIVE = 1 +} wifi_roam_state; + +typedef enum { + WIFI_INTERFACE_STA = 0, + WIFI_INTERFACE_SOFTAP = 1, + WIFI_INTERFACE_IBSS = 2, + WIFI_INTERFACE_P2P_CLIENT = 3, + WIFI_INTERFACE_P2P_GO = 4, + WIFI_INTERFACE_NAN = 5, + WIFI_INTERFACE_MESH = 6 +} wifi_interface_mode; + +#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */ +#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11 + * beacon frame control protected bit set) + */ +#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities + * element interworking bit is set + */ +#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */ +#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities + * element UTF-8 SSID bit is set + */ +#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */ +#ifdef LINUX +#define PACK_ATTRIBUTE __attribute__ ((packed)) +#else +#define PACK_ATTRIBUTE +#endif +typedef struct { + wifi_interface_mode mode; /* interface mode */ + uint8 mac_addr[6]; /* interface mac address (self) */ + uint8 PAD[2]; + wifi_connection_state state; /* connection state (valid for STA, CLI only) */ + wifi_roam_state roaming; /* roaming state */ + uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */ + uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */ + uint8 bssid[ETHER_ADDR_LEN]; /* bssid */ + uint8 PAD[1]; + uint8 ap_country_str[3]; /* country string advertised by AP */ + uint8 country_str[3]; /* country string for this association */ + uint8 PAD[2]; +} wifi_interface_info; + +typedef wifi_interface_info *wifi_interface_handle; + +/* channel information */ +typedef struct { + wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */ + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center frequency (MHz) first segment */ + wifi_channel center_freq1; /* center frequency (MHz) second segment */ +} wifi_channel_info; + +/* wifi rate */ +typedef struct { + uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps + */ + /* HT/VHT it would be mcs index */ + uint32 reserved; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate; + +typedef struct { + uint32 preamble :3; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss :2; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw :3; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx :8; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps HT/VHT it would be + * mcs index + */ + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate_v1; + +/* channel statistics */ +typedef struct { + wifi_channel_info channel; /* channel */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number + * accruing over time) + */ +} wifi_channel_stat; + +/* radio statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + wifi_radio radio; /* wifi radio (if multiple radio supported) */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 tx_time; /* msecs the radio is transmitting (32 bits + * number accruing over time) + */ + uint32 rx_time; /* msecs the radio is in active receive (32 bits + * number accruing over time) + */ + uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits + * number accruing over time) + */ + uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits + * number accruing over time) + */ + uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits + * number accruing over time) + */ + uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits + * number accruing over time) + */ + uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits + * number accruing over time) + */ + uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and + * GAS exchange (32 bits number accruing over time) + */ + uint32 num_channels; /* number of channels */ + wifi_channel_stat channels[1]; /* channel statistics */ +} wifi_radio_stat; + +typedef struct { + wifi_radio radio; + uint32 on_time; + uint32 tx_time; + uint32 rx_time; + uint32 on_time_scan; + uint32 on_time_nbd; + uint32 on_time_gscan; + uint32 on_time_roam_scan; + uint32 on_time_pno_scan; + uint32 on_time_hs20; + uint32 num_channels; +} wifi_radio_stat_h; + +/* per rate statistics */ +typedef struct { + wifi_rate_v1 rate; /* rate information */ + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ +} wifi_rate_stat_v1; + +typedef struct { + uint16 version; + uint16 length; + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + wifi_rate rate; +} wifi_rate_stat; + +/* access categories */ +typedef enum { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4 +} wifi_traffic_ac; + +/* wifi peer type */ +typedef enum +{ + WIFI_PEER_STA, + WIFI_PEER_AP, + WIFI_PEER_P2P_GO, + WIFI_PEER_P2P_CLIENT, + WIFI_PEER_NAN, + WIFI_PEER_TDLS, + WIFI_PEER_INVALID +} wifi_peer_type; + +/* per peer statistics */ +typedef struct { + wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */ + uint8 peer_mac_address[6]; /* mac address */ + uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */ + uint32 num_rate; /* number of rates */ + wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */ +} wifi_peer_info; + +/* per access category statistics */ +typedef struct { + wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */ + uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts + * (ACK rcvd) + */ + uint32 rx_mpdu; /* number of received unicast mpdus */ + uint32 tx_mcast; /* number of succesfully transmitted multicast + * data packets + */ + /* STA case: implies ACK received from AP for the + * unicast packet in which mcast pkt was sent + */ + uint32 rx_mcast; /* number of received multicast data packets */ + uint32 rx_ampdu; /* number of received unicast a-mpdus */ + uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */ + uint32 mpdu_lost; /* number of data pkt losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + uint32 contention_time_min; /* data pkt min contention time (usecs) */ + uint32 contention_time_max; /* data pkt max contention time (usecs) */ + uint32 contention_time_avg; /* data pkt avg contention time (usecs) */ + uint32 contention_num_samples; /* num of data pkts used for contention statistics */ +} wifi_wmm_ac_stat; + +/* interface statistics */ +typedef struct { + wifi_interface_handle iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT) + * The average_tsf_offset field is used so as to calculate + * the typical beacon contention time on the channel as well + * may be used to debug beacon synchronization and related + * power consumption issue + */ + uint32 leaky_ap_detected; /* indicate that this AP + * typically leaks packets beyond + * the driver guard time. + */ + uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after + * frame with PM bit set was ACK'ed by AP + */ + uint32 leaky_ap_guard_time; /* guard time currently in force + * (when implementing IEEE power management + * based on frame control PM bit), How long + * driver waits before shutting down the radio and after + * receiving an ACK for a data frame with PM bit set) + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} wifi_iface_stat; + +#ifdef CONFIG_COMPAT +/* interface statistics */ +typedef struct { + compat_uptr_t iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT) + * The average_tsf_offset field is used so as to calculate + * the typical beacon contention time on the channel as well + * may be used to debug beacon synchronization and related + * power consumption issue + */ + uint32 leaky_ap_detected; /* indicate that this AP + * typically leaks packets beyond + * the driver guard time. + */ + uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after + * frame with PM bit set was ACK'ed by AP + */ + uint32 leaky_ap_guard_time; /* guard time currently in force + * (when implementing IEEE power management + * based on frame control PM bit), How long + * driver waits before shutting down the radio and after + * receiving an ACK for a data frame with PM bit set) + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} compat_wifi_iface_stat; +#endif /* CONFIG_COMPAT */ + +#endif /* _dngl_stats_h_ */ diff --git a/bcmdhd.101.10.361.x/include/dngl_wlhdr.h b/bcmdhd.101.10.361.x/include/dngl_wlhdr.h new file mode 100755 index 0000000..6e1f74e --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dngl_wlhdr.h @@ -0,0 +1,39 @@ +/* + * Dongle WL Header definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * $Id$ + */ + +#ifndef _dngl_wlhdr_h_ +#define _dngl_wlhdr_h_ + +typedef struct wl_header { + uint8 type; /* Header type */ + uint8 version; /* Header version */ + int8 rssi; /* RSSI */ + uint8 pad; /* Unused */ +} wl_header_t; + +#define WL_HEADER_LEN sizeof(wl_header_t) +#define WL_HEADER_TYPE 0 +#define WL_HEADER_VER 1 +#endif /* _dngl_wlhdr_h_ */ diff --git a/bcmdhd.101.10.361.x/include/dnglevent.h b/bcmdhd.101.10.361.x/include/dnglevent.h new file mode 100755 index 0000000..40a1676 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dnglevent.h @@ -0,0 +1,174 @@ +/* + * Broadcom Event protocol definitions + * + * Dependencies: bcmeth.h + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + * ----------------------------------------------------------------------------- + * + */ + +/* + * Broadcom dngl Ethernet Events protocol defines + * + */ + +#ifndef _DNGLEVENT_H_ +#define _DNGLEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include +#include +#ifdef HEALTH_CHECK +#include +#endif /* HEALTH_CHECK */ + +/* This marks the start of a packed structure section. */ +#include +#define BCM_DNGL_EVENT_MSG_VERSION 1 +#define DNGL_E_RSRVD_1 0x0 +#define DNGL_E_RSRVD_2 0x1 +#define DNGL_E_SOCRAM_IND 0x2 +#define DNGL_E_PROFILE_DATA_IND 0x3 +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; /* Current version is 1 */ + uint16 reserved; /* reserved for any future extension */ + uint16 event_type; /* DNGL_E_SOCRAM_IND */ + uint16 datalen; /* Length of the event payload */ +} BWL_POST_PACKED_STRUCT bcm_dngl_event_msg_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + bcm_dngl_event_msg_t dngl_event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_dngl_event_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_socramind { + uint16 tag; /* data tag */ + uint16 length; /* data length */ + uint8 value[1]; /* data value with variable length specified by length */ +} BWL_POST_PACKED_STRUCT bcm_dngl_socramind_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_profile_data_ind_t { + uint16 tag; + uint16 length; + uint8 value[]; +} BWL_POST_PACKED_STRUCT bcm_dngl_profile_data_ind_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_arm_event { + uint32 type; + uint32 value; +} BWL_POST_PACKED_STRUCT bcm_dngl_arm_event_t; + +#define PROFILE_DATA_IND_INFO 0x1 + +#define PROFILE_SUB_TYPE_ARM_STATS_INFO 0x1 + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_arm_stats_ind { + uint16 tag; + uint16 length; + uint8 value[]; +} BWL_POST_PACKED_STRUCT bcm_dngl_arm_stats_ind_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_arm_stats { + uint32 cycles; + uint32 timestamp; + uint16 freq; + uint16 roh; + uint16 num_events; + uint16 seq_no; + uint8 value[]; +} BWL_POST_PACKED_STRUCT bcm_dngl_arm_stats_t; + +/* SOCRAM_IND type tags */ +typedef enum socram_ind_tag { + SOCRAM_IND_ASSERT_TAG = 1, + SOCRAM_IND_TAG_HEALTH_CHECK = 2 +} socram_ind_tag_t; + +/* Health check top level module tags */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck { + uint16 top_module_tag; /* top level module tag */ + uint16 top_module_len; /* Type of PCIE issue indication */ + uint8 value[1]; /* data value with variable length specified by length */ +} BWL_POST_PACKED_STRUCT bcm_dngl_healthcheck_t; + +/* Health check top level module tags */ +#define HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE 1 +#define HEALTH_CHECK_PCIEDEV_VERSION_1 1 +#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT 0 +#define HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT 1 +#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT 2 +#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT 3 +#define HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT 4 +#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT 5 +#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3 1 << HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_AER 1 << HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN 1 << HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT 1 << HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_NODS 1 << HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE 1 << HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT +/* PCIE Module TAGs */ +#define HEALTH_CHECK_PCIEDEV_INDUCED_IND 0x1 +#define HEALTH_CHECK_PCIEDEV_H2D_DMA_IND 0x2 +#define HEALTH_CHECK_PCIEDEV_D2H_DMA_IND 0x3 +#define HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND 0x4 +#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5 +#define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6 +#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7 +#define HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND 0x8 +#define HEALTH_CHECK_PCIEDEV_FLOWRING_IND 0x9 +#define HEALTH_CHECK_PCIEDEV_HW_ASSERT_LONG_IND 0xA +#define HEALTH_CHECK_PCIEDEV_RXPOST_LONG_IND 0xB + +#define HC_PCIEDEV_CONFIG_REGLIST_MAX 25 +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc { + uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */ + uint16 reserved; + uint16 pcie_err_ind_type; /* PCIE Module TAGs */ + uint16 pcie_flag; + uint32 pcie_control_reg; + uint32 pcie_config_regs[HC_PCIEDEV_CONFIG_REGLIST_MAX]; +} BWL_POST_PACKED_STRUCT bcm_dngl_pcie_hc_t; + +/* define to avoid compile issues in older branches which define hchk_sw_entity_t */ +#ifdef HCHK_COMMON_SW_EVENT +/* Enumerating top level SW entities for use by health check */ +typedef enum { + HCHK_SW_ENTITY_UNDEFINED = 0, + HCHK_SW_ENTITY_PCIE = 1, + HCHK_SW_ENTITY_SDIO = 2, + HCHK_SW_ENTITY_USB = 3, + HCHK_SW_ENTITY_RTE = 4, + HCHK_SW_ENTITY_WL_PRIMARY = 5, /* WL instance 0 */ + HCHK_SW_ENTITY_WL_SECONDARY = 6, /* WL instance 1 */ + HCHK_SW_ENTITY_MAX +} hchk_sw_entity_t; +#endif /* HCHK_COMMON_SW_EVENT */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _DNGLEVENT_H_ */ diff --git a/bcmdhd.101.10.361.x/include/dnglioctl.h b/bcmdhd.101.10.361.x/include/dnglioctl.h new file mode 100755 index 0000000..a18c716 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/dnglioctl.h @@ -0,0 +1,177 @@ +/* + * HND Run Time Environment ioctl. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _dngl_ioctl_h_ +#define _dngl_ioctl_h_ + +/* ==== Dongle IOCTLs i.e. non-d11 IOCTLs ==== */ + +#ifndef _rte_ioctl_h_ +/* ================================================================ */ +/* These are the existing ioctls moved from src/include/rte_ioctl.h */ +/* ================================================================ */ + +/* RTE IOCTL definitions for generic ether devices */ +#define RTEIOCTLSTART 0x8901 +#define RTEGHWADDR 0x8901 +#define RTESHWADDR 0x8902 +#define RTEGMTU 0x8903 +#define RTEGSTATS 0x8904 +#define RTEGALLMULTI 0x8905 +#define RTESALLMULTI 0x8906 +#define RTEGPROMISC 0x8907 +#define RTESPROMISC 0x8908 +#define RTESMULTILIST 0x8909 +#define RTEGUP 0x890A +#define RTEGPERMADDR 0x890B +#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */ +#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */ +#define RTEDEVTIMESYNC 0x890E /* Device TimeSync */ +#define RTEDEVDSNOTIFY 0x890F /* Bus DS state notification */ +#define RTED11DMALPBK_INIT 0x8910 /* D11 DMA loopback init */ +#define RTED11DMALPBK_UNINIT 0x8911 /* D11 DMA loopback uninit */ +#define RTED11DMALPBK_RUN 0x8912 /* D11 DMA loopback run */ +#define RTEDEVTSBUFPOST 0x8913 /* Async interface for tsync buffer post */ +#define RTED11DMAHOSTLPBK_RUN 0x8914 /* D11 DMA host memory loopback run */ +#define RTEDEVGETTSF 0x8915 /* Get device TSF */ +#define RTEDURATIONUNIT 0x8916 /* Duration unit */ +#define RTEWRITE_WAR_REGS 0x8917 /* write workaround regs */ +#define RTEDEVRMPMK 0x8918 /* Remove PMK */ +#define RTEDEVDBGVAL 0x8919 /* Set debug val */ +/* Ensure last RTE IOCTL define val is assigned to RTEIOCTLEND */ +#define RTEIOCTLEND 0x8919 /* LAST RTE IOCTL value */ + +#define RTE_IOCTL_QUERY 0x00 +#define RTE_IOCTL_SET 0x01 +#define RTE_IOCTL_OVL_IDX_MASK 0x1e +#define RTE_IOCTL_OVL_RSV 0x20 +#define RTE_IOCTL_OVL 0x40 +#define RTE_IOCTL_OVL_IDX_SHIFT 1 + +enum hnd_ioctl_cmd { + HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */ + + /* PCIEDEV specific wl <--> bus ioctls */ + BUS_GET_VAR = 2, + BUS_SET_VAR = 3, + BUS_FLUSH_RXREORDER_Q = 4, + BUS_SET_LTR_STATE = 5, + BUS_FLUSH_CHAINED_PKTS = 6, + BUS_SET_COPY_COUNT = 7, + BUS_UPDATE_FLOW_PKTS_MAX = 8, + BUS_UPDATE_EXTRA_TXLFRAGS = 9, + BUS_UPDATE_FRWD_RESRV_BUFCNT = 10, + BUS_PCIE_CONFIG_ACCESS = 11, + BUS_HC_EVENT_MASK_UPDATE = 12, + BUS_SET_MAC_WAKE_STATE = 13, + BUS_FRWD_PKT_RXCMPLT = 14, + BUS_PCIE_LATENCY_ENAB = 15, /* to enable latency feature in pcie */ + BUS_GET_MAXITEMS = 16, + BUS_SET_BUS_CSO_CAP = 17, /* Update the CSO cap from wl layer to bus layer */ + BUS_DUMP_RX_DMA_STALL_RELATED_INFO = 18, + BUS_UPDATE_RESVPOOL_STATE = 19 /* Update resvpool state */ +}; + +#define SDPCMDEV_SET_MAXTXPKTGLOM 1 +#define RTE_MEMUSEINFO_VER 0x00 + +typedef struct memuse_info { + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + uint32 tot; /* Total memory */ + uint32 text_len; /* Size of Text segment memory */ + uint32 data_len; /* Size of Data segment memory */ + uint32 bss_len; /* Size of BSS segment memory */ + + uint32 arena_size; /* Total Heap size */ + uint32 arena_free; /* Heap memory available or free */ + uint32 inuse_size; /* Heap memory currently in use */ + uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */ + uint32 inuse_overhead; /* tally of allocated mem_t blocks */ + uint32 inuse_total; /* Heap in-use + Heap overhead memory */ + uint32 free_lwm; /* Least free size since reclaim */ + uint32 mf_count; /* Malloc failure count */ +} memuse_info_t; + +/* Different DMA loopback modes */ +#define M2M_DMA_LOOPBACK 0 /* PCIE M2M mode */ +#define D11_DMA_LOOPBACK 1 /* PCIE M2M and D11 mode without ucode */ +#define BMC_DMA_LOOPBACK 2 /* PCIE M2M and D11 mode with ucode */ +#define M2M_NON_DMA_LOOPBACK 3 /* Non DMA(indirect) mode */ +#define D11_DMA_HOST_MEM_LPBK 4 /* D11 mode */ +#define M2M_DMA_WRITE_TO_RAM 6 /* PCIE M2M write to specific memory mode */ +#define M2M_DMA_READ_FROM_RAM 7 /* PCIE M2M read from specific memory mode */ +#define D11_DMA_WRITE_TO_RAM 8 /* D11 write to specific memory mode */ +#define D11_DMA_READ_FROM_RAM 9 /* D11 read from specific memory mode */ + +/* For D11 DMA loopback test */ +typedef struct d11_dmalpbk_init_args { + uint8 core_num; + uint8 lpbk_mode; +} d11_dmalpbk_init_args_t; + +typedef struct d11_dmalpbk_args { + uint8 *buf; + int32 len; + void *p; + uint8 core_num; + uint8 pad[3]; +} d11_dmalpbk_args_t; + +typedef enum wl_config_var { + WL_VAR_TX_PKTFETCH_INDUCE = 1, + WL_VAR_LAST +} wl_config_var_t; + +typedef struct wl_config_buf { + wl_config_var_t var; + uint32 val; +} wl_config_buf_t; + +/* ================================================================ */ +/* These are the existing ioctls moved from src/include/rte_ioctl.h */ +/* ================================================================ */ +#endif /* _rte_ioctl_h_ */ + +/* MPU test iovar version */ +#define MPU_TEST_STRUCT_VER 0 + +/* MPU test OP */ +#define MPU_TEST_OP_READ 0 +#define MPU_TEST_OP_WRITE 1 +#define MPU_TEST_OP_EXECUTE 2 + +/* Debug iovar for MPU testing */ +typedef struct mpu_test_args { + /* version control */ + uint16 ver; + uint16 len; /* the length of this structure */ + /* data */ + uint32 addr; + uint8 op; /* see MPU_TEST_OP_XXXX */ + uint8 rsvd; + uint16 size; /* valid for read/write */ + uint8 val[]; +} mpu_test_args_t; + +#endif /* _dngl_ioctl_h_ */ diff --git a/bcmdhd.101.10.361.x/include/eap.h b/bcmdhd.101.10.361.x/include/eap.h new file mode 100755 index 0000000..cfd9e58 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/eap.h @@ -0,0 +1,121 @@ +/* + * Extensible Authentication Protocol (EAP) definitions + * + * See + * RFC 2284: PPP Extensible Authentication Protocol (EAP) + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _eap_h_ +#define _eap_h_ + +/* This marks the start of a packed structure section. */ +#include + +/* EAP packet format */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char code; /* EAP code */ + unsigned char id; /* Current request ID */ + unsigned short length; /* Length including header */ + unsigned char type; /* EAP type (optional) */ + unsigned char data[1]; /* Type data (optional) */ +} BWL_POST_PACKED_STRUCT eap_header_t; + +#define EAP_HEADER_LEN 4u +#define EAP_HEADER_LEN_WITH_TYPE 5u +#define ERP_FLAGS_LEN 1u +#define ERP_SEQ_LEN 2u +#define ERP_KEYNAMENAI_HEADER_LEN 2u +#define ERP_CRYPTOSUITE_LEN 1u + +/* EAP codes */ +#define EAP_REQUEST 1u +#define EAP_RESPONSE 2u +#define EAP_SUCCESS 3u +#define EAP_FAILURE 4u +#define EAP_INITIATE 5u +#define EAP_FINISH 6u + +/* EAP types */ +#define EAP_IDENTITY 1 +#define EAP_NOTIFICATION 2 +#define EAP_NAK 3 +#define EAP_MD5 4 +#define EAP_OTP 5 +#define EAP_GTC 6 +#define EAP_TLS 13 +#define EAP_EXPANDED 254 +#define BCM_EAP_SES 10 +#define BCM_EAP_EXP_LEN 12 /* EAP_LEN 5 + 3 bytes for SMI ID + 4 bytes for ven type */ +#define BCM_SMI_ID 0x113d +#define WFA_VENDOR_SMI 0x009F68 + +/* ERP types */ +#define EAP_ERP_TYPE_REAUTH_START 1u +#define EAP_ERP_TYPE_REAUTH 2u + +/* EAP FLAGS */ +#define ERP_R_FLAG 0x80 /* result flag, set = failure */ +#define ERP_B_FLAG 0x40 /* bootstrap flag, set = bootstrap */ +#define ERP_L_FLAG 0x20 /* rrk lifetime tlv is present */ + +/* ERP TV/TLV types */ +#define EAP_ERP_TLV_KEYNAME_NAI 1u + +/* ERP Cryptosuite */ +#define EAP_ERP_CS_HMAC_SHA256_128 2u + +#ifdef BCMCCX +#define EAP_LEAP 17 + +#define LEAP_VERSION 1 +#define LEAP_CHALLENGE_LEN 8 +#define LEAP_RESPONSE_LEN 24 + +/* LEAP challenge */ +typedef struct { + unsigned char version; /* should be value of LEAP_VERSION */ + unsigned char reserved; /* not used */ + unsigned char chall_len; /* always value of LEAP_CHALLENGE_LEN */ + unsigned char challenge[LEAP_CHALLENGE_LEN]; /* random */ + unsigned char username[1]; +} leap_challenge_t; + +#define LEAP_CHALLENGE_HDR_LEN 12 + +/* LEAP challenge reponse */ +typedef struct { + unsigned char version; /* should be value of LEAP_VERSION */ + unsigned char reserved; /* not used */ + unsigned char resp_len; /* always value of LEAP_RESPONSE_LEN */ + /* MS-CHAP hash of challenge and user's password */ + unsigned char response[LEAP_RESPONSE_LEN]; + unsigned char username[1]; +} leap_response_t; + +#define LEAP_RESPONSE_HDR_LEN 28 + +#endif /* BCMCCX */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _eap_h_ */ diff --git a/bcmdhd.101.10.361.x/include/eapol.h b/bcmdhd.101.10.361.x/include/eapol.h new file mode 100755 index 0000000..84b8b26 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/eapol.h @@ -0,0 +1,292 @@ +/* + * 802.1x EAPOL definitions + * + * See + * IEEE Std 802.1X-2001 + * IEEE 802.1X RADIUS Usage Guidelines + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _eapol_h_ +#define _eapol_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#if !defined(BCMCRYPTO_COMPONENT) +#include +#endif /* !BCMCRYPTO_COMPONENT */ + +/* EAPOL for 802.3/Ethernet */ +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_header eth; /* 802.3/Ethernet header */ + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ + unsigned char body[1]; /* Body (optional) */ +} BWL_POST_PACKED_STRUCT eapol_header_t; + +#define EAPOL_HEADER_LEN 18 + +typedef struct { + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ +} eapol_hdr_t; + +#define EAPOL_HDR_LEN 4u + +/* EAPOL version */ +#define WPA2_EAPOL_VERSION 2u +#define WPA_EAPOL_VERSION 1u +#define LEAP_EAPOL_VERSION 1u +#define SES_EAPOL_VERSION 1u + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1u +#define EAPOL_LOGOFF 2u +#define EAPOL_KEY 3u +#define EAPOL_ASF 4u + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1u +#define EAPOL_WPA2_KEY 2u /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254u /* WPA */ + +/* RC4 EAPOL-Key header field sizes */ +#define EAPOL_KEY_REPLAY_LEN 8u +#define EAPOL_KEY_IV_LEN 16u +#define EAPOL_KEY_SIG_LEN 16u + +/* RC4 EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short length; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */ + unsigned char index; /* Key Flags & Index */ + unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */ + unsigned char key[1]; /* Key (optional) */ +} BWL_POST_PACKED_STRUCT eapol_key_header_t; + +#define EAPOL_KEY_HEADER_LEN 44u + +/* RC4 EAPOL-Key flags */ +#define EAPOL_KEY_FLAGS_MASK 0x80u +#define EAPOL_KEY_BROADCAST 0u +#define EAPOL_KEY_UNICAST 0x80u + +/* RC4 EAPOL-Key index */ +#define EAPOL_KEY_INDEX_MASK 0x7fu + +/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_AKW_BLOCK_LEN 8 +#define EAPOL_WPA_KEY_REPLAY_LEN 8u +#define EAPOL_WPA_KEY_NONCE_LEN 32u +#define EAPOL_WPA_KEY_IV_LEN 16u +#define EAPOL_WPA_KEY_RSC_LEN 8u +#define EAPOL_WPA_KEY_ID_LEN 8u +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN) +#define EAPOL_WPA_MAX_KEY_SIZE 32u +#define EAPOL_WPA_KEY_MAX_MIC_LEN 32u +#define EAPOL_WPA_ENCR_KEY_MAX_LEN 64u +#define EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN 32u + +#define EAPOL_WPA_PMK_MAX_LEN 64u +#define EAPOL_WPA_PMK_SHA384_LEN 48u +#define EAPOL_WPA_PMK_DEFAULT_LEN 32u +#define EAPOL_WPA_KCK_DEFAULT_LEN 16u +#define EAPOL_WPA_KCK_SHA384_LEN 24u +#define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u +#define EAPOL_WPA_KCK_MIC_SHA384_LEN 24u +#define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u + +#define EAPOL_WPA_KEK2_SHA256_LEN 16u +#define EAPOL_WPA_KEK2_SHA384_LEN 32u +#define EAPOL_WPA_KCK2_SHA256_LEN 16u +#define EAPOL_WPA_KCK2_SHA384_LEN 24u + +#ifndef EAPOL_KEY_HDR_VER_V2 +#define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */ +#define EAPOL_WPA_KEY_LEN 95u /* deprecated */ +#endif + +#define EAPOL_PTK_KEY_MAX_LEN (EAPOL_WPA_KEY_MAX_MIC_LEN +\ + EAPOL_WPA_ENCR_KEY_MAX_LEN +\ + EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN +\ + EAPOL_WPA_KCK2_SHA384_LEN +\ + EAPOL_WPA_KEK2_SHA384_LEN) + +#ifndef EAPOL_KEY_HDR_VER_V2 + +/* WPA EAPOL-Key : deprecated */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */ + unsigned short data_len; /* Key Data Length */ + unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t; +#else +/* WPA EAPOL-Key : new structure to consider dynamic MIC length */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_v2_t; + +typedef eapol_wpa_key_header_v2_t eapol_wpa_key_header_t; +#endif /* EAPOL_KEY_HDR_VER_V2 */ + +#define EAPOL_WPA_KEY_DATA_LEN_SIZE 2u + +#ifdef EAPOL_KEY_HDR_VER_V2 +#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) (sizeof(eapol_wpa_key_header_v2_t) \ + + mic_len + EAPOL_WPA_KEY_DATA_LEN_SIZE) + +/* WPA EAPOL-Key header macros to reach out mic/data_len/data field */ +#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t)) +#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) \ + ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t) + mic_len) +#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) \ + ((uint8 *)pos + EAPOL_WPA_KEY_HDR_SIZE(mic_len)) +#else +#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) EAPOL_WPA_KEY_LEN +#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)&pos->mic) +#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) ((uint8 *)&pos->data_len) +#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) ((uint8 *)&pos->data) +#endif /* EAPOL_KEY_HDR_VER_V2 */ + +/* WPA/802.11i/WPA2 KEY KEY_INFO bits */ +#define WPA_KEY_DESC_OSEN 0x0 +#define WPA_KEY_DESC_V0 0x0 +#define WPA_KEY_DESC_V1 0x01 +#define WPA_KEY_DESC_V2 0x02 +#define WPA_KEY_DESC_V3 0x03 +#define WPA_KEY_PAIRWISE 0x08 +#define WPA_KEY_INSTALL 0x40 +#define WPA_KEY_ACK 0x80 +#define WPA_KEY_MIC 0x100 +#define WPA_KEY_SECURE 0x200 +#define WPA_KEY_ERROR 0x400 +#define WPA_KEY_REQ 0x800 +#define WPA_KEY_ENC_KEY_DATA 0x01000 /* Encrypted Key Data */ +#define WPA_KEY_SMK_MESSAGE 0x02000 /* SMK Message */ +#define WPA_KEY_DESC_VER(_ki) ((_ki) & 0x03u) + +#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2 + +/* WPA-only KEY KEY_INFO bits */ +#define WPA_KEY_INDEX_0 0x00 +#define WPA_KEY_INDEX_1 0x10 +#define WPA_KEY_INDEX_2 0x20 +#define WPA_KEY_INDEX_3 0x30 +#define WPA_KEY_INDEX_MASK 0x30 +#define WPA_KEY_INDEX_SHIFT 0x04 + +/* 802.11i/WPA2-only KEY KEY_INFO bits */ +#define WPA_KEY_ENCRYPTED_DATA 0x1000 + +/* Key Data encapsulation */ +/* this is really just a vendor-specific info element. should define + * this in 802.11.h + */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 type; + uint8 length; + uint8 oui[3]; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t; + +#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6 + +#define WPA2_KEY_DATA_SUBTYPE_GTK 1 +#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2 +#define WPA2_KEY_DATA_SUBTYPE_MAC 3 +#define WPA2_KEY_DATA_SUBTYPE_PMKID 4 +#define WPA2_KEY_DATA_SUBTYPE_IGTK 9 +#define WPA2_KEY_DATA_SUBTYPE_OCI 13 +#define WPA2_KEY_DATA_SUBTYPE_BIGTK 14 + +/* GTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 flags; + uint8 reserved; + uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t; + +#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2 + +#define WPA2_GTK_INDEX_MASK 0x03 +#define WPA2_GTK_INDEX_SHIFT 0x00 + +#define WPA2_GTK_TRANSMIT 0x04 + +/* IGTK encapsulation */ +#define EAPOL_RSN_IPN_SIZE 6u +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 key_id; + uint8 ipn[EAPOL_RSN_IPN_SIZE]; + uint8 key[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t; + +#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8u + +/* BIGTK encapsulation */ +#define EAPOL_RSN_BIPN_SIZE 6u +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 key_id; + uint8 bipn[EAPOL_RSN_BIPN_SIZE]; + uint8 key[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_bigtk_encap_t; + +#define EAPOL_WPA2_KEY_BIGTK_ENCAP_HDR_LEN 8u + +/* STAKey encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 reserved[2]; + uint8 mac[ETHER_ADDR_LEN]; + uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t; + +#define WPA2_KEY_DATA_PAD 0xdd + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _eapol_h_ */ diff --git a/bcmdhd.101.10.361.x/include/epivers.h b/bcmdhd.101.10.361.x/include/epivers.h new file mode 100755 index 0000000..231cfeb --- /dev/null +++ b/bcmdhd.101.10.361.x/include/epivers.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * +*/ + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 101 + +#define EPI_MINOR_VERSION 10 + +#define EPI_RC_NUMBER 361 + +#define EPI_INCREMENTAL_NUMBER 0 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 101, 10, 361, 0 + +#define EPI_VERSION_NUM 0x650a1690 + +#define EPI_VERSION_DEV 101.10.361 + +/* Driver Version String, ASCII, 32 chars max */ +#if defined (WLTEST) +#define EPI_VERSION_STR "101.10.361 (wlan=r892223 WLTEST)" +#elif (defined (BCMDBG_ASSERT) && !defined (BCMDBG_ASSERT_DISABLED)) +#define EPI_VERSION_STR "101.10.361 (wlan=r892223 ASSRT)" +#else +#define EPI_VERSION_STR "101.10.361.17 (wlan=r892223-20220415-1)(20220426-1)" +#endif /* BCMINTERNAL */ + +#endif /* _epivers_h_ */ diff --git a/bcmdhd.101.10.361.x/include/etd.h b/bcmdhd.101.10.361.x/include/etd.h new file mode 100755 index 0000000..013037b --- /dev/null +++ b/bcmdhd.101.10.361.x/include/etd.h @@ -0,0 +1,636 @@ +/* + * Extended Trap data component interface file. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _ETD_H_ +#define _ETD_H_ + +#if defined(ETD) && !defined(WLETD) +#include +#endif +#include +/* Tags for structures being used by etd info iovar. + * Related structures are defined in wlioctl.h. + */ +#define ETD_TAG_JOIN_CLASSIFICATION_INFO 10 /* general information about join request */ +#define ETD_TAG_JOIN_TARGET_CLASSIFICATION_INFO 11 /* per target (AP) join information */ +#define ETD_TAG_ASSOC_STATE 12 /* current state of the Device association state machine */ +#define ETD_TAG_CHANNEL 13 /* current channel on which the association was performed */ +#define ETD_TAG_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 /* number of join attempts (bss_retries) */ + +#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1 3 +#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2 6 + +#ifndef _LANGUAGE_ASSEMBLY + +#define HND_EXTENDED_TRAP_VERSION 1 +#define HND_EXTENDED_TRAP_BUFLEN 512 + +typedef struct hnd_ext_trap_hdr { + uint8 version; /* Extended trap version info */ + uint8 reserved; /* currently unused */ + uint16 len; /* Length of data excluding this header */ + uint8 data[]; /* TLV data */ +} hnd_ext_trap_hdr_t; + +typedef enum { + TAG_TRAP_NONE = 0u, /* None trap type */ + TAG_TRAP_SIGNATURE = 1u, /* Processor register dumps */ + TAG_TRAP_STACK = 2u, /* Processor stack dump (possible code locations) */ + TAG_TRAP_MEMORY = 3u, /* Memory subsystem dump */ + TAG_TRAP_DEEPSLEEP = 4u, /* Deep sleep health check failures */ + TAG_TRAP_PSM_WD = 5u, /* PSM watchdog information */ + TAG_TRAP_PHY = 6u, /* Phy related issues */ + TAG_TRAP_BUS = 7u, /* Bus level issues */ + TAG_TRAP_MAC_SUSP = 8u, /* Mac level suspend issues */ + TAG_TRAP_BACKPLANE = 9u, /* Backplane related errors */ + /* Values 10 through 14 are in use by etd_data info iovar */ + TAG_TRAP_PCIE_Q = 15u, /* PCIE Queue state during memory trap */ + TAG_TRAP_WLC_STATE = 16u, /* WLAN state during memory trap */ + TAG_TRAP_MAC_WAKE = 17u, /* Mac level wake issues */ + TAG_TRAP_PHYTXERR_THRESH = 18u, /* Phy Tx Err */ + TAG_TRAP_HC_DATA = 19u, /* Data collected by HC module */ + TAG_TRAP_LOG_DATA = 20u, + TAG_TRAP_CODE = 21u, /* The trap type */ + TAG_TRAP_HMAP = 22u, /* HMAP violation Address and Info */ + TAG_TRAP_PCIE_ERR_ATTN = 23u, /* PCIE error attn log */ + TAG_TRAP_AXI_ERROR = 24u, /* AXI Error */ + TAG_TRAP_AXI_HOST_INFO = 25u, /* AXI Host log */ + TAG_TRAP_AXI_SR_ERROR = 26u, /* AXI SR error log */ + TAG_TRAP_MEM_BIT_FLIP = 27u, /* Memory 1-Bit Flip error */ + TAG_TRAP_LAST /* This must be the last entry */ +} hnd_ext_tag_trap_t; + +typedef struct hnd_ext_trap_bp_err +{ + uint32 error; + uint32 coreid; + uint32 baseaddr; + uint32 ioctrl; + uint32 iostatus; + uint32 resetctrl; + uint32 resetstatus; + uint32 resetreadid; + uint32 resetwriteid; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; + uint32 itipoobaout; + uint32 itipoobbout; + uint32 itipoobcout; + uint32 itipoobdout; +} hnd_ext_trap_bp_err_t; + +#define HND_EXT_TRAP_AXISR_INFO_VER_1 1 +typedef struct hnd_ext_trap_axi_sr_err_v1 +{ + uint8 version; + uint8 pad[3]; + uint32 error; + uint32 coreid; + uint32 baseaddr; + uint32 ioctrl; + uint32 iostatus; + uint32 resetctrl; + uint32 resetstatus; + uint32 resetreadid; + uint32 resetwriteid; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; + uint32 itipoobaout; + uint32 itipoobbout; + uint32 itipoobcout; + uint32 itipoobdout; + + /* axi_sr_issue_debug */ + uint32 sr_pwr_control; + uint32 sr_corereset_wrapper_main; + uint32 sr_corereset_wrapper_aux; + uint32 sr_main_gci_status_0; + uint32 sr_aux_gci_status_0; + uint32 sr_dig_gci_status_0; +} hnd_ext_trap_axi_sr_err_v1_t; + +#define HND_EXT_TRAP_PSMWD_INFO_VER 1 +typedef struct hnd_ext_trap_psmwd_v1 { + uint16 xtag; + uint16 version; /* version of the information following this */ + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_phydebug; + uint32 i32_clk_ctl_st; + uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1]; + uint16 i16_0x1a8; /* gated clock en */ + uint16 i16_0x406; /* Rcv Fifo Ctrl */ + uint16 i16_0x408; /* Rx ctrl 1 */ + uint16 i16_0x41a; /* Rxe Status 1 */ + uint16 i16_0x41c; /* Rxe Status 2 */ + uint16 i16_0x424; /* rcv wrd count 0 */ + uint16 i16_0x426; /* rcv wrd count 1 */ + uint16 i16_0x456; /* RCV_LFIFO_STS */ + uint16 i16_0x480; /* PSM_SLP_TMR */ + uint16 i16_0x490; /* PSM BRC */ + uint16 i16_0x500; /* TXE CTRL */ + uint16 i16_0x50e; /* TXE Status */ + uint16 i16_0x55e; /* TXE_xmtdmabusy */ + uint16 i16_0x566; /* TXE_XMTfifosuspflush */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x694; /* IFS_TX_DUR */ + uint16 i16_0x6a0; /* SLow_CTL */ + uint16 i16_0x838; /* TXE_AQM fifo Ready */ + uint16 i16_0x8c0; /* Dagg ctrl */ + uint16 shm_prewds_cnt; + uint16 shm_txtplufl_cnt; + uint16 shm_txphyerr_cnt; + uint16 pad; +} hnd_ext_trap_psmwd_v1_t; + +typedef struct hnd_ext_trap_psmwd { + uint16 xtag; + uint16 version; /* version of the information following this */ + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_phydebug; + uint32 i32_clk_ctl_st; + uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2]; + uint16 i16_0x4b8; /* psm_brwk_0 */ + uint16 i16_0x4ba; /* psm_brwk_1 */ + uint16 i16_0x4bc; /* psm_brwk_2 */ + uint16 i16_0x4be; /* psm_brwk_2 */ + uint16 i16_0x1a8; /* gated clock en */ + uint16 i16_0x406; /* Rcv Fifo Ctrl */ + uint16 i16_0x408; /* Rx ctrl 1 */ + uint16 i16_0x41a; /* Rxe Status 1 */ + uint16 i16_0x41c; /* Rxe Status 2 */ + uint16 i16_0x424; /* rcv wrd count 0 */ + uint16 i16_0x426; /* rcv wrd count 1 */ + uint16 i16_0x456; /* RCV_LFIFO_STS */ + uint16 i16_0x480; /* PSM_SLP_TMR */ + uint16 i16_0x500; /* TXE CTRL */ + uint16 i16_0x50e; /* TXE Status */ + uint16 i16_0x55e; /* TXE_xmtdmabusy */ + uint16 i16_0x566; /* TXE_XMTfifosuspflush */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x694; /* IFS_TX_DUR */ + uint16 i16_0x6a0; /* SLow_CTL */ + uint16 i16_0x490; /* psm_brc */ + uint16 i16_0x4da; /* psm_brc_1 */ + uint16 i16_0x838; /* TXE_AQM fifo Ready */ + uint16 i16_0x8c0; /* Dagg ctrl */ + uint16 shm_prewds_cnt; + uint16 shm_txtplufl_cnt; + uint16 shm_txphyerr_cnt; +} hnd_ext_trap_psmwd_t; + +#define HEAP_HISTOGRAM_DUMP_LEN 6 +#define HEAP_MAX_SZ_BLKS_LEN 2 + +/* Ignore chunks for which there are fewer than this many instances, irrespective of size */ +#define HEAP_HISTOGRAM_INSTANCE_MIN 4 + +/* + * Use the last two length values for chunks larger than this, or when we run out of + * histogram entries (because we have too many different sized chunks) to store "other" + */ +#define HEAP_HISTOGRAM_SPECIAL 0xfffeu + +#define HEAP_HISTOGRAM_GRTR256K 0xffffu + +typedef struct hnd_ext_trap_heap_err { + uint32 arena_total; + uint32 heap_free; + uint32 heap_inuse; + uint32 mf_count; + uint32 stack_lwm; + uint16 heap_histogm[HEAP_HISTOGRAM_DUMP_LEN * 2]; /* size/number */ + uint16 max_sz_free_blk[HEAP_MAX_SZ_BLKS_LEN]; +} hnd_ext_trap_heap_err_t; + +#define MEM_TRAP_NUM_WLC_TX_QUEUES 6 +#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V2 2 + +/* already there are quite a few chips which are ROM'ed wth this structure + * Will not be adding version. This will be the V1 structure. + */ +typedef struct hnd_ext_trap_wlc_mem_err { + uint8 instance; + uint8 associated; + uint8 soft_ap_client_cnt; + uint8 peer_cnt; + uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; +} hnd_ext_trap_wlc_mem_err_t; + +typedef struct hnd_ext_trap_wlc_mem_err_v2 { + uint16 version; + uint16 pad; + uint8 instance; + uint8 stas_associated; + uint8 aps_associated; + uint8 soft_ap_client_cnt; + uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; +} hnd_ext_trap_wlc_mem_err_v2_t; + +#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V3 3 + +typedef struct hnd_ext_trap_wlc_mem_err_v3 { + uint8 version; + uint8 instance; + uint8 stas_associated; + uint8 aps_associated; + uint8 soft_ap_client_cnt; + uint8 peer_cnt; + uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; +} hnd_ext_trap_wlc_mem_err_v3_t; + +typedef struct hnd_ext_trap_pcie_mem_err { + uint16 d2h_queue_len; + uint16 d2h_req_queue_len; +} hnd_ext_trap_pcie_mem_err_t; + +#define MAX_DMAFIFO_ENTRIES_V1 1 +#define MAX_DMAFIFO_DESC_ENTRIES_V1 2 +#define HND_EXT_TRAP_AXIERROR_SIGNATURE 0xbabebabe +#define HND_EXT_TRAP_AXIERROR_VERSION_1 1 + +/* Structure to collect debug info of descriptor entry for dma channel on encountering AXI Error */ +/* Below three structures are dependant, any change will bump version of all the three */ + +typedef struct hnd_ext_trap_desc_entry_v1 { + uint32 ctrl1; /* descriptor entry at din < misc control bits > */ + uint32 ctrl2; /* descriptor entry at din */ + uint32 addrlo; /* descriptor entry at din
*/ + uint32 addrhi; /* descriptor entry at din
*/ +} dma_dentry_v1_t; + +/* Structure to collect debug info about a dma channel on encountering AXI Error */ +typedef struct hnd_ext_trap_dma_fifo_v1 { + uint8 valid; /* no of valid desc entries filled, non zero = fifo entry valid */ + uint8 direction; /* TX=1, RX=2, currently only using TX */ + uint16 index; /* Index of the DMA channel in system */ + uint32 dpa; /* Expected Address of Descriptor table from software state */ + uint32 desc_lo; /* Low Address of Descriptor table programmed in DMA register */ + uint32 desc_hi; /* High Address of Descriptor table programmed in DMA register */ + uint16 din; /* rxin / txin */ + uint16 dout; /* rxout / txout */ + dma_dentry_v1_t dentry[MAX_DMAFIFO_DESC_ENTRIES_V1]; /* Descriptor Entires */ +} dma_fifo_v1_t; + +typedef struct hnd_ext_trap_axi_error_v1 { + uint8 version; /* version = 1 */ + uint8 dma_fifo_valid_count; /* Number of valid dma_fifo entries */ + uint16 length; /* length of whole structure */ + uint32 signature; /* indicate that its filled with AXI Error data */ + uint32 axi_errorlog_status; /* errlog_status from slave wrapper */ + uint32 axi_errorlog_core; /* errlog_core from slave wrapper */ + uint32 axi_errorlog_lo; /* errlog_lo from slave wrapper */ + uint32 axi_errorlog_hi; /* errlog_hi from slave wrapper */ + uint32 axi_errorlog_id; /* errlog_id from slave wrapper */ + dma_fifo_v1_t dma_fifo[MAX_DMAFIFO_ENTRIES_V1]; +} hnd_ext_trap_axi_error_v1_t; + +#define HND_EXT_TRAP_MACSUSP_INFO_VER 1 +typedef struct hnd_ext_trap_macsusp { + uint16 xtag; + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_phydebug[4]; + uint32 i32_psmdebug[8]; + uint16 i16_0x41a; /* Rxe Status 1 */ + uint16 i16_0x41c; /* Rxe Status 2 */ + uint16 i16_0x490; /* PSM BRC */ + uint16 i16_0x50e; /* TXE Status */ + uint16 i16_0x55e; /* TXE_xmtdmabusy */ + uint16 i16_0x566; /* TXE_XMTfifosuspflush */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x694; /* IFS_TX_DUR */ + uint16 i16_0x7c0; /* WEP CTL */ + uint16 i16_0x838; /* TXE_AQM fifo Ready */ + uint16 i16_0x880; /* MHP_status */ + uint16 shm_prewds_cnt; + uint16 shm_ucode_dbgst; +} hnd_ext_trap_macsusp_t; + +#define HND_EXT_TRAP_MACENAB_INFO_VER 1 +typedef struct hnd_ext_trap_macenab { + uint16 xtag; + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_psmdebug[8]; + uint32 i32_clk_ctl_st; + uint32 i32_powerctl; + uint16 i16_0x1a8; /* gated clock en */ + uint16 i16_0x480; /* PSM_SLP_TMR */ + uint16 i16_0x490; /* PSM BRC */ + uint16 i16_0x600; /* TSF CTL */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x6a0; /* SLow_CTL */ + uint16 i16_0x6a6; /* SLow_FRAC */ + uint16 i16_0x6a8; /* fast power up delay */ + uint16 i16_0x6aa; /* SLow_PER */ + uint16 shm_ucode_dbgst; + uint16 PAD; +} hnd_ext_trap_macenab_t; + +#define HND_EXT_TRAP_PHY_INFO_VER_1 (1) +typedef struct hnd_ext_trap_phydbg { + uint16 err; + uint16 RxFeStatus; + uint16 TxFIFOStatus0; + uint16 TxFIFOStatus1; + uint16 RfseqMode; + uint16 RfseqStatus0; + uint16 RfseqStatus1; + uint16 RfseqStatus_Ocl; + uint16 RfseqStatus_Ocl1; + uint16 OCLControl1; + uint16 TxError; + uint16 bphyTxError; + uint16 TxCCKError; + uint16 TxCtrlWrd0; + uint16 TxCtrlWrd1; + uint16 TxCtrlWrd2; + uint16 TxLsig0; + uint16 TxLsig1; + uint16 TxVhtSigA10; + uint16 TxVhtSigA11; + uint16 TxVhtSigA20; + uint16 TxVhtSigA21; + uint16 txPktLength; + uint16 txPsdulengthCtr; + uint16 gpioClkControl; + uint16 gpioSel; + uint16 pktprocdebug; + uint16 PAD; + uint32 gpioOut[3]; +} hnd_ext_trap_phydbg_t; + +/* unique IDs for separate cores in SI */ +#define REGDUMP_MASK_MAC0 BCM_BIT(1) +#define REGDUMP_MASK_ARM BCM_BIT(2) +#define REGDUMP_MASK_PCIE BCM_BIT(3) +#define REGDUMP_MASK_MAC1 BCM_BIT(4) +#define REGDUMP_MASK_PMU BCM_BIT(5) + +typedef struct { + uint16 reg_offset; + uint16 core_mask; +} reg_dump_config_t; + +#define HND_EXT_TRAP_PHY_INFO_VER 2 +typedef struct hnd_ext_trap_phydbg_v2 { + uint8 version; + uint8 len; + uint16 err; + uint16 RxFeStatus; + uint16 TxFIFOStatus0; + uint16 TxFIFOStatus1; + uint16 RfseqMode; + uint16 RfseqStatus0; + uint16 RfseqStatus1; + uint16 RfseqStatus_Ocl; + uint16 RfseqStatus_Ocl1; + uint16 OCLControl1; + uint16 TxError; + uint16 bphyTxError; + uint16 TxCCKError; + uint16 TxCtrlWrd0; + uint16 TxCtrlWrd1; + uint16 TxCtrlWrd2; + uint16 TxLsig0; + uint16 TxLsig1; + uint16 TxVhtSigA10; + uint16 TxVhtSigA11; + uint16 TxVhtSigA20; + uint16 TxVhtSigA21; + uint16 txPktLength; + uint16 txPsdulengthCtr; + uint16 gpioClkControl; + uint16 gpioSel; + uint16 pktprocdebug; + uint32 gpioOut[3]; + uint32 additional_regs[1]; +} hnd_ext_trap_phydbg_v2_t; + +#define HND_EXT_TRAP_PHY_INFO_VER_3 (3) +typedef struct hnd_ext_trap_phydbg_v3 { + uint8 version; + uint8 len; + uint16 err; + uint16 RxFeStatus; + uint16 TxFIFOStatus0; + uint16 TxFIFOStatus1; + uint16 RfseqMode; + uint16 RfseqStatus0; + uint16 RfseqStatus1; + uint16 RfseqStatus_Ocl; + uint16 RfseqStatus_Ocl1; + uint16 OCLControl1; + uint16 TxError; + uint16 bphyTxError; + uint16 TxCCKError; + uint16 TxCtrlWrd0; + uint16 TxCtrlWrd1; + uint16 TxCtrlWrd2; + uint16 TxLsig0; + uint16 TxLsig1; + uint16 TxVhtSigA10; + uint16 TxVhtSigA11; + uint16 TxVhtSigA20; + uint16 TxVhtSigA21; + uint16 txPktLength; + uint16 txPsdulengthCtr; + uint16 gpioClkControl; + uint16 gpioSel; + uint16 pktprocdebug; + uint32 gpioOut[3]; + uint16 HESigURateFlagStatus; + uint16 HESigUsRateFlagStatus; + uint32 additional_regs[1]; +} hnd_ext_trap_phydbg_v3_t; + +/* Phy TxErr Dump Structure */ +#define HND_EXT_TRAP_PHYTXERR_INFO_VER 1 +#define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2 +typedef struct hnd_ext_trap_macphytxerr { + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint16 i16_0x63E; /* tsf_tmr_rx_ts */ + uint16 i16_0x640; /* tsf_tmr_tx_ts */ + uint16 i16_0x642; /* tsf_tmr_rx_end_ts */ + uint16 i16_0x846; /* TDC_FrmLen0 */ + uint16 i16_0x848; /* TDC_FrmLen1 */ + uint16 i16_0x84a; /* TDC_Txtime */ + uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */ + uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */ + uint16 i16_0x856; /* TDC_VhtPsduLen0 */ + uint16 i16_0x858; /* TDC_VhtPsduLen1 */ + uint16 i16_0x490; /* psm_brc */ + uint16 i16_0x4d8; /* psm_brc_1 */ + uint16 shm_txerr_reason; + uint16 shm_pctl0; + uint16 shm_pctl1; + uint16 shm_pctl2; + uint16 shm_lsig0; + uint16 shm_lsig1; + uint16 shm_plcp0; + uint16 shm_plcp1; + uint16 shm_plcp2; + uint16 shm_vht_sigb0; + uint16 shm_vht_sigb1; + uint16 shm_tx_tst; + uint16 shm_txerr_tm; + uint16 shm_curchannel; + uint16 shm_crx_rxtsf_pos; + uint16 shm_lasttx_tsf; + uint16 shm_s_rxtsftmrval; + uint16 i16_0x29; /* Phy indirect address */ + uint16 i16_0x2a; /* Phy indirect address */ +} hnd_ext_trap_macphytxerr_t; + +typedef struct hnd_ext_trap_macphytxerr_v2 { + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint16 i16_0x63E; /* tsf_tmr_rx_ts */ + uint16 i16_0x640; /* tsf_tmr_tx_ts */ + uint16 i16_0x642; /* tsf_tmr_rx_end_ts */ + uint16 i16_0x846; /* TDC_FrmLen0 */ + uint16 i16_0x848; /* TDC_FrmLen1 */ + uint16 i16_0x84a; /* TDC_Txtime */ + uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */ + uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */ + uint16 i16_0x856; /* TDC_VhtPsduLen0 */ + uint16 i16_0x858; /* TDC_VhtPsduLen1 */ + uint16 i16_0x490; /* psm_brc */ + uint16 i16_0x4d8; /* psm_brc_1 */ + uint16 shm_txerr_reason; + uint16 shm_pctl0; + uint16 shm_pctl1; + uint16 shm_pctl2; + uint16 shm_lsig0; + uint16 shm_lsig1; + uint16 shm_plcp0; + uint16 shm_plcp1; + uint16 shm_plcp2; + uint16 shm_vht_sigb0; + uint16 shm_vht_sigb1; + uint16 shm_tx_tst; + uint16 shm_txerr_tm; + uint16 shm_curchannel; + uint16 shm_crx_rxtsf_pos; + uint16 shm_lasttx_tsf; + uint16 shm_s_rxtsftmrval; + uint16 i16_0x29; /* Phy indirect address */ + uint16 i16_0x2a; /* Phy indirect address */ + uint8 phyerr_bmac_cnt; /* number of times bmac raised phy tx err */ + uint8 phyerr_bmac_rsn; /* bmac reason for phy tx error */ + uint16 pad; + uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */ +} hnd_ext_trap_macphytxerr_v2_t; + +#define HND_EXT_TRAP_PCIE_ERR_ATTN_VER_1 (1u) +#define MAX_AER_HDR_LOG_REGS (4u) +typedef struct hnd_ext_trap_pcie_err_attn_v1 { + uint8 version; + uint8 pad[3]; + uint32 err_hdr_logreg1; + uint32 err_hdr_logreg2; + uint32 err_hdr_logreg3; + uint32 err_hdr_logreg4; + uint32 err_code_logreg; + uint32 err_type; + uint32 err_code_state; + uint32 last_err_attn_ts; + uint32 cfg_tlp_hdr[MAX_AER_HDR_LOG_REGS]; +} hnd_ext_trap_pcie_err_attn_v1_t; + +#define MAX_EVENTLOG_BUFFERS 48 +typedef struct eventlog_trapdata_info { + uint32 num_elements; + uint32 seq_num; + uint32 log_arr_addr; +} eventlog_trapdata_info_t; + +typedef struct eventlog_trap_buf_info { + uint32 len; + uint32 buf_addr; +} eventlog_trap_buf_info_t; + +#define HND_MEM_HC_FB_MEM_VER_1 (1u) +typedef struct hnd_ext_trap_fb_mem_err { + uint16 version; + uint16 reserved; + uint32 flip_bit_err_time; +} hnd_ext_trap_fb_mem_err_t; + +#if defined(ETD) && !defined(WLETD) +#define ETD_SW_FLAG_MEM 0x00000001 + +int etd_init(osl_t *osh); +int etd_register_trap_ext_callback(void *cb, void *arg); +int (etd_register_trap_ext_callback_late)(void *cb, void *arg); +uint32 *etd_get_trap_ext_data(void); +uint32 etd_get_trap_ext_swflags(void); +void etd_set_trap_ext_swflag(uint32 flag); +void etd_notify_trap_ext_callback(trap_t *tr); +reg_dump_config_t *etd_get_reg_dump_config_tbl(void); +uint etd_get_reg_dump_config_len(void); + +extern bool _etd_enab; + +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define ETD_ENAB(pub) (_etd_enab) +#elif defined(ETD_DISABLED) + #define ETD_ENAB(pub) (0) +#else + #define ETD_ENAB(pub) (1) +#endif + +#else +#define ETD_ENAB(pub) (0) +#endif /* WLETD */ + +#endif /* !LANGUAGE_ASSEMBLY */ + +#endif /* _ETD_H_ */ diff --git a/bcmdhd.101.10.361.x/include/ethernet.h b/bcmdhd.101.10.361.x/include/ethernet.h new file mode 100755 index 0000000..f378dd2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/ethernet.h @@ -0,0 +1,252 @@ +/* + * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ +#define _NET_ETHERNET_H_ + +#ifndef _TYPEDEFS_H_ +#include "typedefs.h" +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* + * The number of bytes in an ethernet (MAC) address. + */ +#define ETHER_ADDR_LEN 6 + +/* + * The number of bytes in the type field. + */ +#define ETHER_TYPE_LEN 2 + +/* + * The number of bytes in the trailing CRC field. + */ +#define ETHER_CRC_LEN 4 + +/* + * The length of the combined header. + */ +#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) + +/* + * The minimum packet length. + */ +#define ETHER_MIN_LEN 64 + +/* + * The minimum packet user data length. + */ +#define ETHER_MIN_DATA 46 + +/* + * The maximum packet length. + */ +#define ETHER_MAX_LEN 1518 + +/* + * The maximum packet user data length. + */ +#define ETHER_MAX_DATA 1500 + +/* ether types */ +#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */ +#define ETHER_TYPE_IP 0x0800 /* IP */ +#define ETHER_TYPE_ARP 0x0806 /* ARP */ +#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */ +#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */ +#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */ +#define ETHER_TYPE_802_1X 0x888e /* 802.1x */ +#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */ +#define ETHER_TYPE_WAI 0x88b4 /* WAI */ +#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */ +#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */ +#define ETHER_TYPE_1905_1 0x893a /* IEEE 1905.1 MCDU */ + +#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */ + +#define ETHER_TYPE_IAPP_L2_UPDATE 0x6 /* IAPP L2 update frame */ + +/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */ +#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */ + +/* ether header */ +#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */ +#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */ +#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */ + +/* + * A macro to validate a length with + */ +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + +#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \ + ((uint8 *)ea)[0] = 0x01; \ + ((uint8 *)ea)[1] = 0x00; \ + ((uint8 *)ea)[2] = 0x5e; \ + ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \ + ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \ + ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \ +} + +#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */ +/* + * Structure of a 10Mb/s Ethernet header. + */ +BWL_PRE_PACKED_STRUCT struct ether_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + +/* + * Structure of a 48-bit Ethernet address. + */ +BWL_PRE_PACKED_STRUCT struct ether_addr { + uint8 octet[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +#endif /* __INCif_etherh */ +#ifdef __INCif_etherh +#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */ + +/* + * Takes a pointer, set, test, clear, toggle locally admininistered + * address bit in the 48-bit Ethernet address. + */ +#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) +#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) +#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd)) +#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) + +/* Takes a pointer, marks unicast address bit in the MAC address */ +#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) + +/* + * Takes a pointer, returns true if a 48-bit multicast address + * (including broadcast, since it is all ones) + */ +#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) + +/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */ +#if defined(DONGLEBUILD) && defined(__ARM_ARCH_7A__) && !defined(BCMFUZZ) +#define eacmp(a, b) (((*(const uint32 *)(a)) ^ (*(const uint32 *)(b))) || \ + ((*(const uint16 *)(((const uint8 *)(a)) + 4)) ^ \ + (*(const uint16 *)(((const uint8 *)(b)) + 4)))) + +#define ehcmp(a, b) ((((const uint32 *)(a))[0] ^ ((const uint32 *)(b))[0]) || \ + (((const uint32 *)(a))[1] ^ ((const uint32 *)(b))[1]) || \ + (((const uint32 *)(a))[2] ^ ((const uint32 *)(b))[2]) || \ + ((*(const uint16 *)(((const uint32 *)(a)) + 3)) ^ \ + (*(const uint16 *)(((const uint32 *)(b)) + 3)))) +#else +#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \ + (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \ + (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2])) + +#define ehcmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \ + (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \ + (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]) | \ + (((const uint16 *)(a))[3] ^ ((const uint16 *)(b))[3]) | \ + (((const uint16 *)(a))[4] ^ ((const uint16 *)(b))[4]) | \ + (((const uint16 *)(a))[5] ^ ((const uint16 *)(b))[5]) | \ + (((const uint16 *)(a))[6] ^ ((const uint16 *)(b))[6])) +#endif /* DONGLEBUILD && __ARM_ARCH_7A__ */ + +#define ether_cmp(a, b) eacmp(a, b) + +/* copy an ethernet address - assumes the pointers can be referenced as shorts */ +#if defined(DONGLEBUILD) && defined(__ARM_ARCH_7A__) && !defined(BCMFUZZ) +#define eacopy(s, d) \ +do { \ + (*(uint32 *)(d)) = (*(const uint32 *)(s)); \ + (*(uint16 *)(((uint8 *)(d)) + 4)) = (*(const uint16 *)(((const uint8 *)(s)) + 4)); \ +} while (0) +#else +#define eacopy(s, d) \ +do { \ + ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \ + ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \ + ((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \ +} while (0) +#endif /* DONGLEBUILD && __ARM_ARCH_7A__ */ + +#define ether_copy(s, d) eacopy(s, d) + +/* Copy an ethernet address in reverse order */ +#define ether_rcopy(s, d) \ +do { \ + ((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \ + ((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \ + ((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \ +} while (0) + +/* Copy 14B ethernet header: 32bit aligned source and destination. */ +#define ehcopy32(s, d) \ +do { \ + ((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \ + ((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \ + ((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \ + ((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \ +} while (0) + +/* Dongles use bcmutils functions instead of macros. + * Possibly slower but saves over 800 bytes off THUMB dongle image. + */ + +extern const struct ether_addr ether_bcast; +extern const struct ether_addr ether_null; +extern const struct ether_addr ether_ipv6_mcast; + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + +#define ETHER_ISBCAST(ea) ether_isbcast(ea) + +#if defined(__ARM_ARCH_7A__) && !defined(BCMFUZZ) +#define ETHER_ISNULLADDR(ea) (((*(const uint32 *)(ea)) | \ + (*(const uint16 *)(((const uint8 *)(ea)) + 4))) == 0) +#else +#define ETHER_ISNULLADDR(ea) ether_isnulladdr(ea) +#endif /* __ARM_ARCH_7A__ */ + +#define ETHER_ISNULLDEST(da) ((((const uint16 *)(da))[0] | \ + ((const uint16 *)(da))[1] | \ + ((const uint16 *)(da))[2]) == 0) +#define ETHER_ISNULLSRC(sa) ETHER_ISNULLDEST(sa) + +#define ETHER_MOVE_HDR(d, s) \ +do { \ + struct ether_header t; \ + t = *(struct ether_header *)(s); \ + *(struct ether_header *)(d) = t; \ +} while (0) + +#define ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NET_ETHERNET_H_ */ diff --git a/bcmdhd.101.10.361.x/include/event_log.h b/bcmdhd.101.10.361.x/include/event_log.h new file mode 100755 index 0000000..8ede661 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/event_log.h @@ -0,0 +1,666 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _EVENT_LOG_H_ +#define _EVENT_LOG_H_ + +#include +#include +#include +#include + +/* logstrs header */ +#define LOGSTRS_MAGIC 0x4C4F4753 +#define LOGSTRS_VERSION 0x1 + +/* max log size */ +#define EVENT_LOG_MAX_SIZE (64u * 1024u) + +/* We make sure that the block size will fit in a single packet + * (allowing for a bit of overhead on each packet + */ +#if defined(BCMPCIEDEV) +#define EVENT_LOG_MAX_BLOCK_SIZE 1648 +#else +#define EVENT_LOG_MAX_BLOCK_SIZE 1400 +#endif + +#define EVENT_LOG_BLOCK_SIZE_1K 0x400u +#define EVENT_LOG_WL_BLOCK_SIZE 0x200 +#define EVENT_LOG_PSM_BLOCK_SIZE 0x200 +#define EVENT_LOG_MEM_API_BLOCK_SIZE 0x200 +#define EVENT_LOG_BUS_BLOCK_SIZE 0x200 +#define EVENT_LOG_ERROR_BLOCK_SIZE 0x400 +#define EVENT_LOG_MSCH_BLOCK_SIZE 0x400 +#define EVENT_LOG_WBUS_BLOCK_SIZE 0x100 +#define EVENT_LOG_PRSV_PERIODIC_BLOCK_SIZE (0x200u) + +#define EVENT_LOG_WL_BUF_SIZE (EVENT_LOG_WL_BLOCK_SIZE * 3u) + +#define EVENT_LOG_TOF_INLINE_BLOCK_SIZE 1300u +#define EVENT_LOG_TOF_INLINE_BUF_SIZE (EVENT_LOG_TOF_INLINE_BLOCK_SIZE * 3u) + +#define EVENT_LOG_PRSRV_BUF_SIZE (EVENT_LOG_MAX_BLOCK_SIZE * 2) +#define EVENT_LOG_BUS_PRSRV_BUF_SIZE (EVENT_LOG_BUS_BLOCK_SIZE * 2) +#define EVENT_LOG_WBUS_PRSRV_BUF_SIZE (EVENT_LOG_WBUS_BLOCK_SIZE * 2) + +#define EVENT_LOG_BLOCK_SIZE_PRSRV_CHATTY (EVENT_LOG_MAX_BLOCK_SIZE * 1) +#define EVENT_LOG_BLOCK_SIZE_BUS_PRSRV_CHATTY (EVENT_LOG_MAX_BLOCK_SIZE * 1) + +/* Maximum event log record payload size = 1016 bytes or 254 words. */ +#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE 254 + +#define EVENT_LOG_EXT_HDR_IND (0x01) +#define EVENT_LOG_EXT_HDR_BIN_DATA_IND (0x01 << 1) +/* Format number to send binary data with extended event log header */ +#define EVENT_LOG_EXT_HDR_BIN_FMT_NUM (0x3FFE << 2) + +#define EVENT_LOGSET_ID_MASK 0x3F +/* For event_log_get iovar, set values from 240 to 255 mean special commands for a group of sets */ +#define EVENT_LOG_GET_IOV_CMD_MASK (0xF0u) +#define EVENT_LOG_GET_IOV_CMD_ID_MASK (0xFu) +#define EVENT_LOG_GET_IOV_CMD_ID_FORCE_FLUSH_PRSRV (0xEu) /* 240 + 14 = 254 */ +#define EVENT_LOG_GET_IOV_CMD_ID_FORCE_FLUSH_ALL (0xFu) /* 240 + 15 = 255 */ + +/* + * There are multiple levels of objects define here: + * event_log_set - a set of buffers + * event log groups - every event log call is part of just one. All + * event log calls in a group are handled the + * same way. Each event log group is associated + * with an event log set or is off. + */ + +#ifndef __ASSEMBLER__ + +/* On the external system where the dumper is we need to make sure + * that these types are the same size as they are on the ARM the + * produced them + */ +#ifdef EVENT_LOG_DUMPER +#define _EL_BLOCK_PTR uint32 +#define _EL_TYPE_PTR uint32 +#define _EL_SET_PTR uint32 +#define _EL_TOP_PTR uint32 +#else +#define _EL_BLOCK_PTR struct event_log_block * +#define _EL_TYPE_PTR uint32 * +#define _EL_SET_PTR struct event_log_set ** +#define _EL_TOP_PTR struct event_log_top * +#endif /* EVENT_LOG_DUMPER */ + +/* Event log sets (a logical circurlar buffer) consist of one or more + * event_log_blocks. The blocks themselves form a logical circular + * list. The log entries are placed in each event_log_block until it + * is full. Logging continues with the next event_log_block in the + * event_set until the last event_log_block is reached and then + * logging starts over with the first event_log_block in the + * event_set. + */ +typedef struct event_log_block { + _EL_BLOCK_PTR next_block; + _EL_BLOCK_PTR prev_block; + _EL_TYPE_PTR end_ptr; + + /* Start of packet sent for log tracing */ + uint16 pktlen; /* Size of rest of block */ + uint16 count; /* Logtrace counter */ + uint32 extra_hdr_info; /* LSB: 6 bits set id. MSB 24 bits reserved */ + uint32 event_logs; /* Pointer to BEGINNING of event logs */ + /* Event logs go here. Do not put extra fields below. */ +} event_log_block_t; + +/* Relative offset of extra_hdr_info field frpm pktlen field in log block */ +#define EVENT_LOG_BUF_EXTRA_HDR_INFO_REL_PKTLEN_OFFSET \ + (OFFSETOF(event_log_block_t, extra_hdr_info) - OFFSETOF(event_log_block_t, pktlen)) + +#define EVENT_LOG_SETID_MASK (0x3Fu) + +#define EVENT_LOG_BLOCK_HDRLEN (sizeof(((event_log_block_t *) 0)->pktlen) \ + + sizeof(((event_log_block_t *) 0)->count) \ + + sizeof(((event_log_block_t *) 0)->extra_hdr_info)) +#define EVENT_LOG_BLOCK_LEN (EVENT_LOG_BLOCK_HDRLEN + sizeof(event_log_hdr_t)) + +#define EVENT_LOG_PRESERVE_BLOCK (1 << 0) +#define EVENT_LOG_BLOCK_FLAG_MASK 0xff000000u +#define EVENT_LOG_BLOCK_FLAG_SHIFT 24u + +#define EVENT_LOG_BLOCK_GET_PREV_BLOCK(block) ((_EL_BLOCK_PTR)(((uint32)((block)->prev_block)) & \ + ~EVENT_LOG_BLOCK_FLAG_MASK)) +#define EVENT_LOG_BLOCK_SET_PREV_BLOCK(block, prev) ((block)->prev_block = \ + ((_EL_BLOCK_PTR)((((uint32)(block)->prev_block) & EVENT_LOG_BLOCK_FLAG_MASK) | \ + (((uint32)(prev)) & ~EVENT_LOG_BLOCK_FLAG_MASK)))) +#define EVENT_LOG_BLOCK_GET_FLAG(block) ((((uint32)(block)->prev_block) & \ + EVENT_LOG_BLOCK_FLAG_MASK) >> EVENT_LOG_BLOCK_FLAG_SHIFT) +#define EVENT_LOG_BLOCK_SET_FLAG(block, flag) ((block)->prev_block = \ + (_EL_BLOCK_PTR)(((uint32)EVENT_LOG_BLOCK_GET_PREV_BLOCK(block)) | flag)) +#define EVENT_LOG_BLOCK_OR_FLAG(block, flag) EVENT_LOG_BLOCK_SET_FLAG(block, \ + (EVENT_LOG_BLOCK_GET_FLAG(block) | flag) << EVENT_LOG_BLOCK_FLAG_SHIFT) + +typedef enum { + SET_DESTINATION_INVALID = -1, + SET_DESTINATION_HOST = 0, /* Eventlog buffer is sent out to host once filled. */ + SET_DESTINATION_NONE = 1, /* The buffer is not sent out, and it will be overwritten + * with new messages. + */ + SET_DESTINATION_FORCE_FLUSH_TO_HOST = 2, /* Buffers are sent to host once and then the + * value is reset back to SET_DESTINATION_NONE. + */ + SET_DESTINATION_FLUSH_ON_WATERMARK = 3, /* Buffers are sent to host when the watermark is + * reached, defined by the feature /chip + */ + SET_DESTINATION_MAX +} event_log_set_destination_t; + +/* sub destination for routing at the host */ +typedef enum { + SET_SUB_DESTINATION_0 = 0, + SET_SUB_DESTINATION_1 = 1, + SET_SUB_DESTINATION_2 = 2, + SET_SUB_DESTINATION_3 = 3, + SET_SUB_DESTINATION_DEFAULT = SET_SUB_DESTINATION_0 +} event_log_set_sub_destination_t; + +/* There can be multiple event_sets with each logging a set of + * associated events (i.e, "fast" and "slow" events). + */ +typedef struct event_log_set { + _EL_BLOCK_PTR first_block; /* Pointer to first event_log block */ + _EL_BLOCK_PTR last_block; /* Pointer to last event_log block */ + _EL_BLOCK_PTR logtrace_block; /* next block traced */ + _EL_BLOCK_PTR cur_block; /* Pointer to current event_log block */ + _EL_TYPE_PTR cur_ptr; /* Current event_log pointer */ + uint32 blockcount; /* Number of blocks */ + uint16 logtrace_count; /* Last count for logtrace */ + uint16 blockfill_count; /* Fill count for logtrace */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ + event_log_set_destination_t destination; + uint16 size; /* same size for all buffers in one set */ + uint16 flags; + uint16 num_preserve_blocks; + event_log_set_sub_destination_t sub_destination; + uint16 water_mark; /* not used yet: threshold to flush host in percent */ + uint32 period; /* period to flush host in ms */ + uint32 last_rpt_ts; /* last time to flush in ms */ +} event_log_set_t; + +/* Definition of flags in set */ +#define EVENT_LOG_SET_SHRINK_ACTIVE (1 << 0) +#define EVENT_LOG_SET_CONFIG_PARTIAL_BLK_SEND (0x1 << 1) +#define EVENT_LOG_SET_CHECK_LOG_RATE (1 << 2) +#define EVENT_LOG_SET_PERIODIC (1 << 3) +#define EVENT_LOG_SET_D3PRSV (1 << 4) + +/* Top data structure for access to everything else */ +typedef struct event_log_top { + uint32 magic; +#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */ + uint32 version; +#define EVENT_LOG_VERSION 1 + uint32 num_sets; + uint32 logstrs_size; /* Size of lognums + logstrs area */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ + _EL_SET_PTR sets; /* Ptr to array of set ptrs */ + uint16 log_count; /* Number of event logs from last flush */ + uint16 rate_hc; /* Max number of prints per second */ + uint32 hc_timestamp; /* Timestamp of last hc window starting */ + bool cpu_freq_changed; /* Set to TRUE when CPU freq changed */ + bool hostmem_access_enabled; /* Is host memory access enabled for log delivery */ + bool event_trace_enabled; /* WLC_E_TRACE enabled/disabled */ +} event_log_top_t; + +/* structure of the trailing 3 words in logstrs.bin */ +typedef struct { + uint32 fw_id; /* FWID will be written by tool later */ + uint32 flags; /* 0th bit indicates whether encrypted or not */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_trailer_t; + +/* Data structure of Keeping the Header from logstrs.bin */ +typedef struct { + uint32 logstrs_size; /* Size of the file */ + uint32 rom_lognums_offset; /* Offset to the ROM lognum */ + uint32 ram_lognums_offset; /* Offset to the RAM lognum */ + uint32 rom_logstrs_offset; /* Offset to the ROM logstr */ + uint32 ram_logstrs_offset; /* Offset to the RAM logstr */ + uint32 fw_id; /* FWID will be written by tool later */ + uint32 flags; /* 0th bit indicates whether encrypted or not */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_header_t; + +/* Data structure of Keeping the Header from logstrs.bin */ +typedef struct { + uint32 logstrs_size; /* Size of the file */ + uint32 rom_lognums_offset; /* Offset to the ROM lognum */ + uint32 ram_lognums_offset; /* Offset to the RAM lognum */ + uint32 rom_logstrs_offset; /* Offset to the ROM logstr */ + uint32 ram_logstrs_offset; /* Offset to the RAM logstr */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_header_v1_t; + +/* Event log configuration table */ +typedef struct evt_log_tag_entry { + uint16 tag; /* Tag value. */ + uint8 set; /* Set number. */ + uint8 refcnt; /* Ref_count if sdc is used */ +} evt_log_tag_entry_t; + +#ifdef BCMDRIVER +/* !!! The following section is for kernel mode code only !!! */ +#include + +extern bool d3_preserve_enab; +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define D3_PRESERVE_ENAB() (d3_preserve_enab) +#elif defined(EVENTLOG_D3_PRESERVE_DISABLED) + #define D3_PRESERVE_ENAB() (0) +#else + #define D3_PRESERVE_ENAB() (1) +#endif + +#if defined(EVENTLOG_PRSV_PERIODIC) +extern bool prsv_periodic_enab; +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define PRSV_PRD_ENAB() (prsv_periodic_enab) +#elif defined(EVENTLOG_PRSV_PERIODIC_DISABLED) + #define PRSV_PRD_ENAB() (0) +#else + #define PRSV_PRD_ENAB() (1) +#endif +#endif /* EVENTLOG_PRSV_PERIODIC */ + +/* + * Use the following macros for generating log events. + * + * The FAST versions check the enable of the tag before evaluating the arguments and calling the + * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments + * and call the event_log function unconditionally. The event_log function will then skip logging + * if this tag is disabled. + * + * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are + * two variants of these macros to help. + * + * First there are the CAST versions. The event_log function normally logs uint32 values or else + * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit + * any existing code. + * + * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments + * to be enclosed in parentheses. This allows us to make the following mapping of an existing + * msglevel macro: + * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args) + * + * The versions of the macros without FAST or COMPACT in their name are just synonyms for the + * COMPACT versions. + * + * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic + * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed. + * Use the FAST macro in performance sensitive paths. The key concept here is that you should be + * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely. + * + */ + +#if !defined(EVENT_LOG_DUMPER) && !defined(DHD_EFI) + +#ifndef EVENT_LOG_COMPILE + +/* Null define if no tracing */ +#define EVENT_LOG(tag, fmt, ...) +#define EVENT_LOG_FAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT(tag, fmt, ...) + +#define EVENT_LOG_CAST(tag, fmt, ...) +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) + +#define EVENT_LOG_IF_READY(tag, fmt, ...) +#define EVENT_LOG_IS_ON(tag) 0 +#define EVENT_LOG_IS_LOG_ON(tag) 0 + +#define EVENT_LOG_BUFFER(tag, buf, size) +#define EVENT_LOG_PRSRV_FLUSH() +#define EVENT_LOG_FORCE_FLUSH_ALL() +#define EVENT_LOG_FORCE_FLUSH_PRSRV_LOG_ALL() + +#else /* EVENT_LOG_COMPILE */ + +/* The first few _EVENT_LOGX() macros are special because they can be done more + * efficiently this way and they are the common case. Once there are too many + * parameters the code size starts to be an issue and a loop is better + * The trailing arguments to the _EVENT_LOGX() macros are the format string, 'fmt', + * followed by the variable parameters for the format. The format string is not + * needed in the event_logX() replacement text, so fmt is dropped in all cases. + */ +#define _EVENT_LOG0(tag, fmt_num, fmt) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG1(tag, fmt_num, fmt, t1) \ + event_log1(tag, fmt_num, t1) +#define _EVENT_LOG2(tag, fmt_num, fmt, t1, t2) \ + event_log2(tag, fmt_num, t1, t2) +#define _EVENT_LOG3(tag, fmt_num, fmt, t1, t2, t3) \ + event_log3(tag, fmt_num, t1, t2, t3) +#define _EVENT_LOG4(tag, fmt_num, fmt, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, t1, t2, t3, t4) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG5(tag, fmt_num, fmt, ...) event_logn(5, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG6(tag, fmt_num, fmt, ...) event_logn(6, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG7(tag, fmt_num, fmt, ...) event_logn(7, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG8(tag, fmt_num, fmt, ...) event_logn(8, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG9(tag, fmt_num, fmt, ...) event_logn(9, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGA(tag, fmt_num, fmt, ...) event_logn(10, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGB(tag, fmt_num, fmt, ...) event_logn(11, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGC(tag, fmt_num, fmt, ...) event_logn(12, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGD(tag, fmt_num, fmt, ...) event_logn(13, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGE(tag, fmt_num, fmt, ...) event_logn(14, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGF(tag, fmt_num, fmt, ...) event_logn(15, tag, fmt_num, __VA_ARGS__) + +/* Casting low level macros */ +#define _EVENT_LOG_CAST0(tag, fmt_num, fmt) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG_CAST1(tag, fmt_num, fmt, t1) \ + event_log1(tag, fmt_num, (uint32)(t1)) +#define _EVENT_LOG_CAST2(tag, fmt_num, fmt, t1, t2) \ + event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2)) +#define _EVENT_LOG_CAST3(tag, fmt_num, fmt, t1, t2, t3) \ + event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3)) +#define _EVENT_LOG_CAST4(tag, fmt_num, fmt, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4)) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__) + +/* Hack to make the proper routine call when variadic macros get + * passed. Note the max of 15 arguments. More than that can't be + * handled by the event_log entries anyways so best to catch it at compile + * time + * + * Here is what happens with this macro: when _EVENT_LOG expands this macro, + * its __VA_ARGS__ argument is expanded. If __VA_ARGS__ contains only ONE + * argument, for example, then F maps to _1, E maps to _2, and so on, so that + * N maps to 0, and the macro expands to BASE ## N or BASE ## 0 which is + * EVENT_LOG0. If __VA_ARGS__ contains two arguments, then everything is + * shifted down by one, because the second argument in __VA_ARGS__ now maps + * to _1, so F maps to _2, E maps to _3, and so on, and 1 (instead of 0) maps + * to N, and this macro expands to become _EVENT_LOG1. This continues all + * the way up until __VA_ARGS__ has 15 arguments, in which case, stuff in + * __VA_ARGS__ maps to all of the values _1 through _F, which makes F (in' + * the _EVENT_LOG macro) map to N, and this macro then expands to EVENT_LOGF. + */ + +#define _EVENT_LOG_VA_NUM_ARGS(BASE, _FMT, _1, _2, _3, _4, _5, _6, _7, _8, _9, \ + _A, _B, _C, _D, _E, _F, N, ...) BASE ## N + +/* Take a variable number of args and replace with only the first */ +#define FIRST_ARG(a1, ...) a1 + +/* base = _EVENT_LOG for no casting + * base = _EVENT_LOG_CAST for casting of fmt arguments to uint32. + * Only first 4 arguments are cast to uint32. event_logn() is called + * if more than 4 arguments are present. This function internally assumes + * all arguments are uint32 + * + * The variable args in this call are the format string followed by the variable + * parameters for the format. E.g. + * + * __VA_ARGS__ = "answer: %d", 42 + * + * This means __VA_ARGS__ always has one or more arguments. Guaranteeing a non-empty + * __VA_ARGS__ means the special use of " , ## __VA_ARGS__" is not required to deal + * with a dangling comma --- the comma will always be followed with at leaset the format + * string. The use of ## caused issues when the format args contained a function like + * macro that expanded to more than one arg. The ## prevented macro expansion, so the + * _EVENT_LOG_VA_NUM_ARGS() calculation of the number of args was incorrect. + * Without the ##, the __VA_ARGS__ are macro replaced, and the num args calculation is + * accurate. + * + * This macro is setup so that if __VA_ARGS__ is as short as possible, then the "0" will + * map to "N" in the _EVENT_LOG_VA_NUM_ARGS macro, and that macro then expands to become + * _EVENT_LOG0. As __VA_ARGS__ gets longer, then the item that gets mapped to "N" gets + * pushed further and further up, so that by the time __VA_ARGS__ has 15 additional + * arguments, then "F" maps to "N" in the _EVENT_LOG_VA_NUM_ARGS macro. + */ +#define _EVENT_LOG(base, tag, ...) \ + static char logstr[] __attribute__ ((section(".logstrs"))) = FIRST_ARG(__VA_ARGS__); \ + static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \ + _EVENT_LOG_VA_NUM_ARGS(base, __VA_ARGS__, \ + F, E, D, C, B, A, 9, 8, \ + 7, 6, 5, 4, 3, 2, 1, 0) \ + (tag, (int) &fmtnum, __VA_ARGS__) + +#define EVENT_LOG_FAST(tag, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \ + _EVENT_LOG(_EVENT_LOG, tag, __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT(tag, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG, tag, __VA_ARGS__); \ + } while (0) + +/* Event log macro with casting to uint32 of arguments */ +#define EVENT_LOG_FAST_CAST(tag, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT_CAST(tag, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, __VA_ARGS__); \ + } while (0) + +#define EVENT_LOG(tag, ...) EVENT_LOG_COMPACT(tag, __VA_ARGS__) + +#define EVENT_LOG_CAST(tag, ...) EVENT_LOG_COMPACT_CAST(tag, __VA_ARGS__) + +#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__ +#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +/* Minimal event logging. Event log internally calls event_logx() + * log return address in caller. + * Note that the if(0){..} below is to avoid compiler warnings + * due to unused variables caused by this macro + */ +#define EVENT_LOG_RA(tag, args) \ + do { \ + if (0) { \ + EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, args); \ + } \ + event_log_caller_return_address(tag); \ + } while (0) + +#define EVENT_LOG_IF_READY(_tag, ...) \ + do { \ + if (event_log_is_ready()) { \ + EVENT_LOG(_tag, __VA_ARGS__); \ + } \ + } \ + while (0) + +#define EVENT_LOG_IS_ON(tag) (*(event_log_tag_sets + (tag)) & ~EVENT_LOG_TAG_FLAG_SET_MASK) +#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG) + +#define EVENT_LOG_BUFFER(tag, buf, size) event_log_buffer(tag, buf, size) +#define EVENT_DUMP event_log_buffer + +/* EVENT_LOG_PRSRV_FLUSH() will be deprecated. Use EVENT_LOG_FORCE_FLUSH_ALL instead */ +#define EVENT_LOG_PRSRV_FLUSH() event_log_force_flush_all() +#define EVENT_LOG_FORCE_FLUSH_ALL() event_log_force_flush_all() + +#ifdef PRESERVE_LOG +#define EVENT_LOG_FORCE_FLUSH_PRSRV_LOG_ALL() event_log_force_flush_preserve_all() +#else +#define EVENT_LOG_FORCE_FLUSH_PRSRV_LOG_ALL() +#endif /* PRESERVE_LOG */ + +extern uint8 *event_log_tag_sets; + +extern int event_log_init(osl_t *osh); +extern int event_log_set_init(osl_t *osh, int set_num, int size); +extern int event_log_set_expand(osl_t *osh, int set_num, int size); +extern int event_log_set_shrink(osl_t *osh, int set_num, int size); + +extern int event_log_tag_start(int tag, int set_num, int flags); +extern int event_log_tag_set_retrieve(int tag); +extern int event_log_tag_flags_retrieve(int tag); +extern int event_log_tag_stop(int tag); + +typedef void (*event_log_logtrace_trigger_fn_t)(void *ctx); +void event_log_set_logtrace_trigger_fn(event_log_logtrace_trigger_fn_t fn, void *ctx); + +event_log_top_t *event_log_get_top(void); + +extern int event_log_get(int set_num, int buflen, void *buf); + +extern uint8 *event_log_next_logtrace(int set_num); +extern uint32 event_log_logtrace_max_buf_count(int set_num); +extern int event_log_set_type(int set_num, uint8 *type, int is_get); +extern int event_log_flush_set(wl_el_set_flush_prsrv_t *flush, int is_set); + +extern void event_log0(int tag, int fmtNum); +extern void event_log1(int tag, int fmtNum, uint32 t1); +extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2); +extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3); +extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4); +extern void event_logn(int num_args, int tag, int fmtNum, ...); +#ifdef ROM_COMPAT_MSCH_PROFILER +/* For compatibility with ROM, for old msch event log function to pass parameters in stack */ +extern void event_logv(int num_args, int tag, int fmtNum, va_list ap); +#endif /* ROM_COMPAT_MSCH_PROFILER */ + +extern void event_log_time_sync(uint32 ms); +extern bool event_log_time_sync_required(void); +extern void event_log_cpu_freq_changed(void); +extern void event_log_buffer(int tag, const uint8 *buf, int size); +extern void event_log_caller_return_address(int tag); +extern int event_log_set_destination_set(int set, event_log_set_destination_t dest); +extern event_log_set_destination_t event_log_set_destination_get(int set); +extern int event_log_set_sub_destination_set(uint set, event_log_set_sub_destination_t dest); +extern event_log_set_sub_destination_t event_log_set_sub_destination_get(uint set); +extern int event_log_flush_log_buffer(int set); +extern int event_log_force_flush_all(void); +extern int event_log_force_flush(int set); + +extern uint16 event_log_get_available_space(int set); +extern bool event_log_is_tag_valid(int tag); +/* returns number of blocks available for writing */ +extern int event_log_free_blocks_get(int set); +extern bool event_log_is_ready(void); +extern bool event_log_is_preserve_active(uint set); +extern uint event_log_get_percentage_available_space(uint set); +extern bool event_log_set_watermark_reached(int set_num); + +extern void event_log_set_config(int set, uint32 period, uint16 watermark, uint32 config_flags); +#ifdef EVENTLOG_D3_PRESERVE +#define EVENT_LOG_PRESERVE_EXPAND_SIZE 5u +extern int event_log_preserve_set_shrink(osl_t *osh, int set_num); +extern void event_log_d3_preserve_active_set(osl_t* osh, int set, bool active); +extern void event_log_d3_prsv_set_all(osl_t *osh, bool active); +#endif /* EVENTLOG_D3_PRESERVE */ + +#ifdef EVENTLOG_PRSV_PERIODIC +#define EVENT_LOG_SET_SIZE_INVALID 0xFFFFFFFFu +#define EVENT_LOG_DEFAULT_PERIOD 3000u +extern void event_log_prsv_periodic_wd_trigger(osl_t *osh); +#endif /* EVENTLOG_PRSV_PERIODIC */ + +/* Enable/disable rate health check for a set */ +#ifdef EVENT_LOG_RATE_HC +extern int event_log_enable_hc_for_set(int set_num, bool enable); +extern void event_log_set_hc_rate(uint16 num_prints); +extern uint16 event_log_get_hc_rate(void); +#endif /* EVENT_LOG_RATE_HC */ + +/* Configure a set with ability to send partial log blocks */ +extern int event_log_send_partial_block_set(int set_num); + +/* Get number of log blocks associated to a log set */ +extern int event_log_num_blocks_get(int set, uint32 *num_blocks); + +/* Get a log buffer of a desired set */ +extern int event_log_block_get(int set, uint32 **buf, uint16 *len); +extern uint32 event_log_get_maxsets(void); + +/* For all other non-logtrace consumers */ +extern int event_log_set_is_valid(int set); + +/* To be used by logtrace only */ +extern int event_log_get_num_sets(void); + +/* Given a buffer, return to which set it belongs to */ +extern int event_log_get_set_for_buffer(const void *buf); + +extern int event_log_flush_multiple_sets(const int *sets, uint16 num_sets); +extern int event_log_force_flush_preserve_all(void); +extern int event_log_get_iovar_handler(int set); +extern int event_log_enable_hostmem_access(bool hostmem_access_enabled); +extern int event_log_enable_event_trace(bool event_trace_enabled); +#endif /* EVENT_LOG_COMPILE */ + +#endif /* !EVENT_LOG_DUMPER && !DHD_EFI */ + +#endif /* BCMDRIVER */ + +#endif /* __ASSEMBLER__ */ + +#endif /* _EVENT_LOG_H_ */ diff --git a/bcmdhd.101.10.361.x/include/event_log_payload.h b/bcmdhd.101.10.361.x/include/event_log_payload.h new file mode 100755 index 0000000..4485fcc --- /dev/null +++ b/bcmdhd.101.10.361.x/include/event_log_payload.h @@ -0,0 +1,1775 @@ +/* + * EVENT_LOG System Definitions + * + * This file describes the payloads of event log entries that are data buffers + * rather than formatted string entries. The contents are generally XTLVs. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _EVENT_LOG_PAYLOAD_H_ +#define _EVENT_LOG_PAYLOAD_H_ + +#include +#include +#include +#include + +/** + * A (legacy) timestamp message + */ +typedef struct ts_message { + uint32 timestamp; + uint32 cyclecount; +} ts_msg_t; + +/** + * Enhanced timestamp message + */ +typedef struct enhanced_ts_message { + uint32 version; + /* More data, depending on version */ + uint8 data[]; +} ets_msg_t; + +#define ENHANCED_TS_MSG_VERSION_1 (1u) + +/** + * Enhanced timestamp message, version 1 + */ +typedef struct enhanced_ts_message_v1 { + uint32 version; + uint32 timestamp; /* PMU time, in milliseconds */ + uint32 cyclecount; + uint32 cpu_freq; +} ets_msg_v1_t; + +#define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */ +#define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */ +#define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */ +#define EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM 3 /**< XTLV ID for scb_ampdu_tx_summary_t */ +#define EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM 4 /**< XTLV ID for bsscfg_q_summary_t */ +#define EVENT_LOG_XTLV_ID_UCTXSTATUS 5 /**< XTLV ID for ucode TxStatus array */ +#define EVENT_LOG_XTLV_ID_TXQ_SUM_V2 6 /**< XTLV ID for txq_summary_v2_t */ +#define EVENT_LOG_XTLV_ID_BUF 7 /**< XTLV ID for event_log_buffer_t */ + +/** + * An XTLV holding a string + * String is not null terminated, length is the XTLV len. + */ +typedef struct xtlv_string { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_STR */ + uint16 len; /* XTLV Len (String length) */ + char str[1]; /* var len array characters */ +} xtlv_string_t; + +#define XTLV_STRING_FULL_LEN(str_len) (BCM_XTLV_HDR_SIZE + (str_len) * sizeof(char)) + +/** + * Summary for a single TxQ context + * Two of these will be used per TxQ context---one for the high TxQ, and one for + * the low txq that contains DMA prepared pkts. The high TxQ is a full multi-precidence + * queue and also has a BSSCFG map to identify the BSSCFGS associated with the queue context. + * The low txq counterpart does not populate the BSSCFG map. + * The excursion queue will have no bsscfgs associated and is the first queue dumped. + */ +typedef struct txq_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM */ + uint16 len; /* XTLV Len */ + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + uint32 stopped; /* flow control bitmap */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} txq_summary_t; + +#define TXQ_SUMMARY_LEN (OFFSETOF(txq_summary_t, plen)) +#define TXQ_SUMMARY_FULL_LEN(num_q) (TXQ_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +typedef struct txq_summary_v2 { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM_V2 */ + uint16 len; /* XTLV Len */ + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + uint32 stopped; /* flow control bitmap */ + uint32 hw_stopped; /* flow control bitmap */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} txq_summary_v2_t; + +#define TXQ_SUMMARY_V2_LEN (OFFSETOF(txq_summary_v2_t, plen)) +#define TXQ_SUMMARY_V2_FULL_LEN(num_q) (TXQ_SUMMARY_V2_LEN + (num_q) * sizeof(uint16)) + +/** + * Summary for tx datapath of an SCB cubby + * This is a generic summary structure (one size fits all) with + * a cubby ID and sub-ID to differentiate SCB cubby types and possible sub-queues. + */ +typedef struct scb_subq_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_SUM */ + uint16 len; /* XTLV Len */ + uint32 flags; /* cubby specficic flags */ + uint8 cubby_id; /* ID registered for cubby */ + uint8 sub_id; /* sub ID if a cubby has more than one queue */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} scb_subq_summary_t; + +#define SCB_SUBQ_SUMMARY_LEN (OFFSETOF(scb_subq_summary_t, plen)) +#define SCB_SUBQ_SUMMARY_FULL_LEN(num_q) (SCB_SUBQ_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +/* scb_subq_summary_t.flags for APPS */ +#define SCBDATA_APPS_F_PS 0x00000001 +#define SCBDATA_APPS_F_PSPEND 0x00000002 +#define SCBDATA_APPS_F_INPVB 0x00000004 +#define SCBDATA_APPS_F_APSD_USP 0x00000008 +#define SCBDATA_APPS_F_TXBLOCK 0x00000010 +#define SCBDATA_APPS_F_APSD_HPKT_TMR 0x00000020 +#define SCBDATA_APPS_F_APSD_TX_PEND 0x00000040 +#define SCBDATA_APPS_F_INTRANS 0x00000080 +#define SCBDATA_APPS_F_OFF_PEND 0x00000100 +#define SCBDATA_APPS_F_OFF_BLOCKED 0x00000200 +#define SCBDATA_APPS_F_OFF_IN_PROG 0x00000400 + +/** + * Summary for tx datapath AMPDU SCB cubby + * This is a specific data structure to describe the AMPDU datapath state for an SCB + * used instead of scb_subq_summary_t. + * Info is for one TID, so one will be dumped per BA TID active for an SCB. + */ +typedef struct scb_ampdu_tx_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM */ + uint16 len; /* XTLV Len */ + uint32 flags; /* misc flags */ + uint8 tid; /* initiator TID (priority) */ + uint8 ba_state; /* internal BA state */ + uint8 bar_cnt; /* number of bars sent with no progress */ + uint8 retry_bar; /* reason code if bar to be retried at watchdog */ + uint16 barpending_seq; /* seqnum for bar */ + uint16 bar_ackpending_seq; /* seqnum of bar for which ack is pending */ + uint16 start_seq; /* seqnum of the first unacknowledged packet */ + uint16 max_seq; /* max unacknowledged seqnum sent */ + uint32 released_bytes_inflight; /* Number of bytes pending in bytes */ + uint32 released_bytes_target; +} scb_ampdu_tx_summary_t; + +/* scb_ampdu_tx_summary.flags defs */ +#define SCBDATA_AMPDU_TX_F_BAR_ACKPEND 0x00000001 /* bar_ackpending */ + +/** XTLV stuct to summarize a BSSCFG's packet queue */ +typedef struct bsscfg_q_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM */ + uint16 len; /* XTLV Len */ + struct ether_addr BSSID; /* BSSID */ + uint8 bsscfg_idx; /* bsscfg index */ + uint8 type; /* bsscfg type enumeration: BSSCFG_TYPE_XXX */ + uint8 subtype; /* bsscfg subtype enumeration: BSSCFG_SUBTYPE_XXX */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} bsscfg_q_summary_t; + +#define BSSCFG_Q_SUMMARY_LEN (OFFSETOF(bsscfg_q_summary_t, plen)) +#define BSSCFG_Q_SUMMARY_FULL_LEN(num_q) (BSSCFG_Q_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +/** + * An XTLV holding a TxStats array + * TxStatus entries are 8 or 16 bytes, size in words (2 or 4) givent in + * entry_size field. + * Array is uint32 words + */ +typedef struct xtlv_uc_txs { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_UCTXSTATUS */ + uint16 len; /* XTLV Len */ + uint8 entry_size; /* num uint32 words per entry */ + uint8 pad[3]; /* reserved, zero */ + uint32 w[1]; /* var len array of words */ +} xtlv_uc_txs_t; + +#define XTLV_UCTXSTATUS_LEN (OFFSETOF(xtlv_uc_txs_t, w)) +#define XTLV_UCTXSTATUS_FULL_LEN(words) (XTLV_UCTXSTATUS_LEN + (words) * sizeof(uint32)) + +#define SCAN_SUMMARY_VERSION_1 1u +#ifndef WLSCAN_SUMMARY_VERSION_ALIAS +#define SCAN_SUMMARY_VERSION SCAN_SUMMARY_VERSION_1 +#endif +/* Scan flags */ +#define SCAN_SUM_CHAN_INFO 0x1 +/* Scan_sum flags */ +#define BAND5G_SIB_ENAB 0x2 +#define BAND2G_SIB_ENAB 0x4 +#define PARALLEL_SCAN 0x8 +#define SCAN_ABORT 0x10 +/* Note: Definitions being reused in chan_info as SCAN_SUM_SCAN_CORE need clean up */ +#define SC_LOWSPAN_SCAN 0x20 +/* Note: Definitions being reused in scan summary info as WL_SSUM_CLIENT_MASK need clean up */ +#define SC_SCAN 0x40 + +#define WL_SSUM_CLIENT_MASK 0x1C0u /* bit 8 - 6 */ +#define WL_SSUM_CLIENT_SHIFT 6u /* shift client scan opereration */ + +#define WL_SSUM_MODE_MASK 0xE00u /* bit 11 - 9 */ +#define WL_SSUM_MODE_SHIFT 9u /* shift mode scan operation */ + +/* Common bits for channel and scan summary info */ +#define SCAN_SUM_CHAN_RESHED 0x1000 /* Bit 12 as resched scan for chaninfo and scan summary */ + +#define WL_SSUM_CLIENT_ASSOCSCAN 0x0u /* Log as scan requested client is assoc scan */ +#define WL_SSUM_CLIENT_ROAMSCAN 0x1u /* Log as scan requested client is roam scan */ +#define WL_SSUM_CLIENT_FWSCAN 0x2u /* Log as scan requested client is other fw scan */ +#define WL_SSUM_CLIENT_HOSTSCAN 0x3u /* Log as scan requested client is host scan */ + +#define WL_SSUM_SCANFLAG_INVALID 0x7u /* Log for invalid scan client or mode */ + +/* scan_channel_info flags */ +#define ACTIVE_SCAN_SCN_SUM 0x2 +#define SCAN_SUM_WLC_CORE0 0x4 +#define SCAN_SUM_WLC_CORE1 0x8 +#define HOME_CHAN 0x10 +#define SCAN_SUM_SCAN_CORE 0x20 + +typedef struct wl_scan_ssid_info +{ + uint8 ssid_len; /* the length of SSID */ + uint8 ssid[32]; /* SSID string */ +} wl_scan_ssid_info_t; + +typedef struct wl_scan_channel_info { + uint16 chanspec; /* chanspec scanned */ + uint16 reserv; + uint32 start_time; /* Scan start time in + * milliseconds for the chanspec + * or home_dwell time start + */ + uint32 end_time; /* Scan end time in + * milliseconds for the chanspec + * or home_dwell time end + */ + uint16 probe_count; /* No of probes sent out. For future use + */ + uint16 scn_res_count; /* Count of scan_results found per + * channel. For future use + */ +} wl_scan_channel_info_t; + +typedef struct wl_scan_summary_info { + uint32 total_chan_num; /* Total number of channels scanned */ + uint32 scan_start_time; /* Scan start time in milliseconds */ + uint32 scan_end_time; /* Scan end time in milliseconds */ + wl_scan_ssid_info_t ssid[1]; /* SSID being scanned in current + * channel. For future use + */ +} wl_scan_summary_info_t; + +struct wl_scan_summary { + uint8 version; /* Version */ + uint8 reserved; + uint16 len; /* Length of the data buffer including SSID + * list. + */ + uint16 sync_id; /* Scan Sync ID */ + uint16 scan_flags; /* flags [0] or SCAN_SUM_CHAN_INFO = */ + /* channel_info, if not set */ + /* it is scan_summary_info */ + /* when channel_info is used, */ + /* the following flag bits are overridden: */ + /* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */ + /* passive if not set */ + /* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */ + /* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */ + /* flags[4] or HOME_CHAN = if set, represents home-channel */ + /* flags[5] or SCAN_SUM_SCAN_CORE = if set, + * represents chan_info from scan core. + */ + /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */ + /* flags[6:11, 13:15] = reserved */ + /* when scan_summary_info is used, */ + /* the following flag bits are used: */ + /* flags[1] or BAND5G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 5G band */ + /* flags[2] or BAND2G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 2G band */ + /* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */ + /* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */ + /* flags[5] = reserved */ + /* flags[6:8] is used as count value to identify SCAN CLIENT + * WL_SSUM_CLIENT_ASSOCSCAN 0x0u, WL_SSUM_CLIENT_ROAMSCAN 0x1u, + * WL_SSUM_CLIENT_FWSCAN 0x2u, WL_SSUM_CLIENT_HOSTSCAN 0x3u + */ + /* flags[9:11] is used as count value to identify SCAN MODE + * WL_SCAN_MODE_HIGH_ACC 0u, WL_SCAN_MODE_LOW_SPAN 1u, + * WL_SCAN_MODE_LOW_POWER 2u + */ + /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */ + /* flags[13:15] = reserved */ + union { + wl_scan_channel_info_t scan_chan_info; /* scan related information + * for each channel scanned + */ + wl_scan_summary_info_t scan_sum_info; /* Cumulative scan related + * information. + */ + } u; +}; + +#define SCAN_SUMMARY_VERSION_2 2u +struct wl_scan_summary_v2 { + uint8 version; /* Version */ + uint8 reserved; + uint16 len; /* Length of the data buffer including SSID + * list. + */ + uint16 sync_id; /* Scan Sync ID */ + uint16 scan_flags; /* flags [0] or SCAN_SUM_CHAN_INFO = */ + /* channel_info, if not set */ + /* it is scan_summary_info */ + /* when channel_info is used, */ + /* the following flag bits are overridden: */ + /* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */ + /* passive if not set */ + /* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */ + /* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */ + /* flags[4] or HOME_CHAN = if set, represents home-channel */ + /* flags[5] or SCAN_SUM_SCAN_CORE = if set, + * represents chan_info from scan core. + */ + /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */ + /* flags[6:11, 13:15] = reserved */ + /* when scan_summary_info is used, */ + /* the following flag bits are used: */ + /* flags[1] or BAND5G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 5G band */ + /* flags[2] or BAND2G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 2G band */ + /* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */ + /* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */ + /* flags[5] = reserved */ + /* flags[6:8] is used as count value to identify SCAN CLIENT + * WL_SSUM_CLIENT_ASSOCSCAN 0x0u, WL_SSUM_CLIENT_ROAMSCAN 0x1u, + * WL_SSUM_CLIENT_FWSCAN 0x2u, WL_SSUM_CLIENT_HOSTSCAN 0x3u + */ + /* flags[9:11] is used as count value to identify SCAN MODE + * WL_SCAN_MODE_HIGH_ACC 0u, WL_SCAN_MODE_LOW_SPAN 1u, + * WL_SCAN_MODE_LOW_POWER 2u + */ + /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */ + /* flags[13:15] = reserved */ + /* scan_channel_ctx_t chan_cnt; */ + uint8 channel_cnt_aux; /* Number of channels to be scanned on Aux core */ + uint8 channel_cnt_main; /* Number of channels to be scanned on Main core */ + uint8 channel_cnt_sc; /* Number of channels to be scanned on Scan core */ + uint8 active_channel_cnt; + uint8 passive_channel_cnt; + char pad[3]; /* Pad to keep it 32 bit aligned */ + union { + wl_scan_channel_info_t scan_chan_info; /* scan related information + * for each channel scanned + */ + wl_scan_summary_info_t scan_sum_info; /* Cumulative scan related + * information. + */ + } u; +}; +/* Channel switch log record structure + * Host may map the following structure on channel switch event log record + * received from dongle. Note that all payload entries in event log record are + * uint32/int32. + */ +typedef struct wl_chansw_event_log_record { + uint32 time; /* Time in us */ + uint32 old_chanspec; /* Old channel spec */ + uint32 new_chanspec; /* New channel spec */ + uint32 chansw_reason; /* Reason for channel change */ + int32 dwell_time; +} wl_chansw_event_log_record_t; + +typedef struct wl_chansw_event_log_record_v2 { + uint32 time; /* Time in us */ + uint32 old_chanspec; /* Old channel spec */ + uint32 new_chanspec; /* New channel spec */ + uint32 chansw_reason; /* Reason for channel change */ + int32 dwell_time; + uint32 core; + int32 phychanswtime; /* channel switch time */ +} wl_chansw_event_log_record_v2_t; + +/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */ +typedef enum { + WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_RXMCSx2 = 1, + WL_AMPDU_STATS_TYPE_RXMCSx3 = 2, + WL_AMPDU_STATS_TYPE_RXMCSx4 = 3, + WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_RXVHTx2 = 5, + WL_AMPDU_STATS_TYPE_RXVHTx3 = 6, + WL_AMPDU_STATS_TYPE_RXVHTx4 = 7, + WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_TXMCSx2 = 9, + WL_AMPDU_STATS_TYPE_TXMCSx3 = 10, + WL_AMPDU_STATS_TYPE_TXMCSx4 = 11, + WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_TXVHTx2 = 13, + WL_AMPDU_STATS_TYPE_TXVHTx3 = 14, + WL_AMPDU_STATS_TYPE_TXVHTx4 = 15, + WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */ + WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */ + WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */ + WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */ + WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */ + WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */ + WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */ + WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */ + WL_AMPDU_STATS_TYPE_RX_HE_SUOK = 32, /* DL SU MPDU frame per MCS */ + WL_AMPDU_STATS_TYPE_RX_HE_SU_DENS = 33, /* DL SU AMPDU DENSITY */ + WL_AMPDU_STATS_TYPE_RX_HE_MUMIMOOK = 34, /* DL MUMIMO Frame per MCS */ + WL_AMPDU_STATS_TYPE_RX_HE_MUMIMO_DENS = 35, /* DL MUMIMO AMPDU Density */ + WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMAOK = 36, /* DL OFDMA Frame per MCS */ + WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_DENS = 37, /* DL OFDMA AMPDU Density */ + WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_HIST = 38, /* DL OFDMA frame RU histogram */ + WL_AMPDU_STATS_TYPE_TX_HE_MCSALL = 39, /* TX HE (SU+MU) frames, all rates */ + WL_AMPDU_STATS_TYPE_TX_HE_MCSOK = 40, /* TX HE (SU+MU) frames succeeded */ + WL_AMPDU_STATS_TYPE_TX_HE_MUALL = 41, /* TX MU (UL OFDMA) frames all rates */ + WL_AMPDU_STATS_TYPE_TX_HE_MUOK = 42, /* TX MU (UL OFDMA) frames succeeded */ + WL_AMPDU_STATS_TYPE_TX_HE_RUBW = 43, /* TX UL RU by BW histogram */ + WL_AMPDU_STATS_TYPE_TX_HE_PADDING = 44, /* TX padding total (single value) */ + WL_AMPDU_STATS_TYPE_RX_COUNTERS = 45, /* Additional AMPDU_RX module counters + * per-slice + */ + WL_AMPDU_STATS_MAX_CNTS = 64 +} wl_ampdu_stat_enum_t; +typedef struct { + uint16 type; /* AMPDU statistics sub-type */ + uint16 len; /* Number of 32-bit counters */ + uint32 counters[WL_AMPDU_STATS_MAX_CNTS]; +} wl_ampdu_stats_generic_t; + +typedef wl_ampdu_stats_generic_t wl_ampdu_stats_rx_t; +typedef wl_ampdu_stats_generic_t wl_ampdu_stats_tx_t; + +typedef struct { + uint16 type; /* AMPDU statistics sub-type */ + uint16 len; /* Number of 32-bit counters + 2 */ + uint32 total_ampdu; + uint32 total_mpdu; + uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1]; +} wl_ampdu_stats_aggrsz_t; + +/* AMPDU_RX module's per-slice counters. Sent by ecounters as subtype of + * WL_IFSTATS_XTLV_RX_AMPDU_STATS ecounters type + */ +#define WLC_AMPDU_RX_STATS_V1 (1u) +typedef struct wlc_ampdu_rx_stats { + uint16 version; + uint16 len; + /* responder side counters */ + uint32 rxampdu; /**< ampdus recd */ + uint32 rxmpdu; /**< mpdus recd in a ampdu */ + uint32 rxht; /**< mpdus recd at ht rate and not in a ampdu */ + uint32 rxlegacy; /**< mpdus recd at legacy rate */ + uint32 rxampdu_sgi; /**< ampdus recd with sgi */ + uint32 rxampdu_stbc; /**< ampdus recd with stbc */ + uint32 rxnobapol; /**< mpdus recd without a ba policy */ + uint32 rxholes; /**< missed seq numbers on rx side */ + uint32 rxqed; /**< pdus buffered before sending up */ + uint32 rxdup; /**< duplicate pdus */ + uint32 rxstuck; /**< watchdog bailout for stuck state */ + uint32 rxoow; /**< out of window pdus */ + uint32 rxoos; /**< out of seq pdus */ + uint32 rxaddbareq; /**< addba req recd */ + uint32 txaddbaresp; /**< addba resp sent */ + uint32 rxbar; /**< bar recd */ + uint32 txba; /**< ba sent */ + + /* general: both initiator and responder */ + uint32 rxunexp; /**< unexpected packets */ + uint32 txdelba; /**< delba sent */ + uint32 rxdelba; /**< delba recd */ +} wlc_ampdu_rx_stats_t; + +/* Sub-block type for WL_IFSTATS_XTLV_HE_TXMU_STATS */ +typedef enum { + /* Reserve 0 to avoid potential concerns */ + WL_HE_TXMU_STATS_TYPE_TIME = 1, /* per-dBm, total usecs transmitted */ + WL_HE_TXMU_STATS_TYPE_PAD_TIME = 2, /* per-dBm, padding usecs transmitted */ +} wl_he_txmu_stat_enum_t; +#define WL_IFSTATS_HE_TXMU_MAX 32u + +/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */ +#define WL_MSCH_PROFILER_START 0 /* start event check */ +#define WL_MSCH_PROFILER_EXIT 1 /* exit event check */ +#define WL_MSCH_PROFILER_REQ 2 /* request event */ +#define WL_MSCH_PROFILER_CALLBACK 3 /* call back event */ +#define WL_MSCH_PROFILER_MESSAGE 4 /* message event */ +#define WL_MSCH_PROFILER_PROFILE_START 5 +#define WL_MSCH_PROFILER_PROFILE_END 6 +#define WL_MSCH_PROFILER_REQ_HANDLE 7 +#define WL_MSCH_PROFILER_REQ_ENTITY 8 +#define WL_MSCH_PROFILER_CHAN_CTXT 9 +#define WL_MSCH_PROFILER_EVENT_LOG 10 +#define WL_MSCH_PROFILER_REQ_TIMING 11 +#define WL_MSCH_PROFILER_TYPE_MASK 0x00ff +#define WL_MSCH_PROFILER_WLINDEX_SHIFT 8 +#define WL_MSCH_PROFILER_WLINDEX_MASK 0x0f00 +#define WL_MSCH_PROFILER_VER_SHIFT 12 +#define WL_MSCH_PROFILER_VER_MASK 0xf000 + +/* MSCH Event data current verion */ +#define WL_MSCH_PROFILER_VER 2 + +/* msch version history */ +#define WL_MSCH_PROFILER_RSDB_VER 1 +#define WL_MSCH_PROFILER_REPORT_VER 2 + +/* msch collect header size */ +#define WL_MSCH_PROFILE_HEAD_SIZE OFFSETOF(msch_collect_tlv_t, value) + +/* msch event log header size */ +#define WL_MSCH_EVENT_LOG_HEAD_SIZE OFFSETOF(msch_event_log_profiler_event_data_t, data) + +/* MSCH data buffer size */ +#define WL_MSCH_PROFILER_BUFFER_SIZE 512 + +/* request type used in wlc_msch_req_param_t struct */ +#define WL_MSCH_RT_BOTH_FIXED 0 /* both start and end time is fixed */ +#define WL_MSCH_RT_START_FLEX 1 /* start time is flexible and duration is fixed */ +#define WL_MSCH_RT_DUR_FLEX 2 /* start time is fixed and end time is flexible */ +#define WL_MSCH_RT_BOTH_FLEX 3 /* Both start and duration is flexible */ + +/* Flags used in wlc_msch_req_param_t struct */ +#define WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS (1 << 0) /* Don't break up channels in chanspec_list */ +#define WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS (1 << 1) /* No slot end if slots are continous */ +#define WL_MSCH_REQ_FLAGS_PREMTABLE (1 << 2) /* Req can be pre-empted by PREMT_CURTS req */ +#define WL_MSCH_REQ_FLAGS_PREMT_CURTS (1 << 3) /* Pre-empt request at the end of curts */ +#define WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE (1 << 4) /* Pre-empt cur_ts immediately */ + +/* Requested slot Callback states + * req->pend_slot/cur_slot->flags + */ +#define WL_MSCH_RC_FLAGS_ONCHAN_FIRE (1 << 0) +#define WL_MSCH_RC_FLAGS_START_FIRE_DONE (1 << 1) +#define WL_MSCH_RC_FLAGS_END_FIRE_DONE (1 << 2) +#define WL_MSCH_RC_FLAGS_ONFIRE_DONE (1 << 3) +#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_START (1 << 4) +#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_END (1 << 5) +#define WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE (1 << 6) + +/* Request entity flags */ +#define WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE (1 << 0) + +/* Request Handle flags */ +#define WL_MSCH_REQ_HDL_FLAGS_NEW_REQ (1 << 0) /* req_start callback */ + +/* MSCH state flags (msch_info->flags) */ +#define WL_MSCH_STATE_IN_TIEMR_CTXT 0x1 +#define WL_MSCH_STATE_SCHD_PENDING 0x2 + +/* MSCH callback type */ +#define WL_MSCH_CT_REQ_START 0x1 +#define WL_MSCH_CT_ON_CHAN 0x2 +#define WL_MSCH_CT_SLOT_START 0x4 +#define WL_MSCH_CT_SLOT_END 0x8 +#define WL_MSCH_CT_SLOT_SKIP 0x10 +#define WL_MSCH_CT_OFF_CHAN 0x20 +#define WL_MSCH_CT_OFF_CHAN_DONE 0x40 +#define WL_MSCH_CT_REQ_END 0x80 +#define WL_MSCH_CT_PARTIAL 0x100 +#define WL_MSCH_CT_PRE_ONCHAN 0x200 +#define WL_MSCH_CT_PRE_REQ_START 0x400 + +/* MSCH command bits */ +#define WL_MSCH_CMD_ENABLE_BIT 0x01 +#define WL_MSCH_CMD_PROFILE_BIT 0x02 +#define WL_MSCH_CMD_CALLBACK_BIT 0x04 +#define WL_MSCH_CMD_REGISTER_BIT 0x08 +#define WL_MSCH_CMD_ERROR_BIT 0x10 +#define WL_MSCH_CMD_DEBUG_BIT 0x20 +#define WL_MSCH_CMD_INFOM_BIT 0x40 +#define WL_MSCH_CMD_TRACE_BIT 0x80 +#define WL_MSCH_CMD_ALL_BITS 0xfe +#define WL_MSCH_CMD_SIZE_MASK 0x00ff0000 +#define WL_MSCH_CMD_SIZE_SHIFT 16 +#define WL_MSCH_CMD_VER_MASK 0xff000000 +#define WL_MSCH_CMD_VER_SHIFT 24 + +/* maximum channels returned by the get valid channels iovar */ +#define WL_MSCH_NUMCHANNELS 64 + +typedef struct msch_collect_tlv { + uint16 type; + uint16 size; + char value[1]; +} msch_collect_tlv_t; + +typedef struct msch_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; +} msch_profiler_event_data_t; + +typedef struct msch_start_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 status; +} msch_start_profiler_event_data_t; + +typedef struct msch_message_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + char message[1]; /* message */ +} msch_message_profiler_event_data_t; + +typedef struct msch_event_log_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + event_log_hdr_t hdr; /* event log header */ + uint32 data[9]; /* event data */ +} msch_event_log_profiler_event_data_t; + +typedef struct msch_req_param_profiler_event_data { + uint16 flags; /* Describe various request properties */ + uint8 req_type; /* Describe start and end time flexiblilty */ + uint8 priority; /* Define the request priority */ + uint32 start_time_l; /* Requested start time offset in us unit */ + uint32 start_time_h; + uint32 duration; /* Requested duration in us unit */ + uint32 interval; /* Requested periodic interval in us unit, + * 0 means non-periodic + */ + union { + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + struct { + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */ + uint32 hi_prio_time_l; + uint32 hi_prio_time_h; + uint32 hi_prio_interval; /* repeated high priority interval */ + } bf; + } flex; +} msch_req_param_profiler_event_data_t; + +typedef struct msch_req_timing_profiler_event_data { + uint32 p_req_timing; + uint32 p_prev; + uint32 p_next; + uint16 flags; + uint16 timeslot_ptr; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 start_time_l; + uint32 start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 p_timeslot; +} msch_req_timing_profiler_event_data_t; + +typedef struct msch_chan_ctxt_profiler_event_data { + uint32 p_chan_ctxt; + uint32 p_prev; + uint32 p_next; + uint16 chanspec; + uint16 bf_sch_pending; + uint32 bf_link_prev; + uint32 bf_link_next; + uint32 onchan_time_l; + uint32 onchan_time_h; + uint32 actual_onchan_dur_l; + uint32 actual_onchan_dur_h; + uint32 pend_onchan_dur_l; + uint32 pend_onchan_dur_h; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 bf_entity_list_cnt; + uint16 bf_entity_list_ptr; + uint32 bf_skipped_count; +} msch_chan_ctxt_profiler_event_data_t; + +typedef struct msch_req_entity_profiler_event_data { + uint32 p_req_entity; + uint32 req_hdl_link_prev; + uint32 req_hdl_link_next; + uint32 chan_ctxt_link_prev; + uint32 chan_ctxt_link_next; + uint32 rt_specific_link_prev; + uint32 rt_specific_link_next; + uint32 start_fixed_link_prev; + uint32 start_fixed_link_next; + uint32 both_flex_list_prev; + uint32 both_flex_list_next; + uint16 chanspec; + uint16 priority; + uint16 cur_slot_ptr; + uint16 pend_slot_ptr; + uint16 pad; + uint16 chan_ctxt_ptr; + uint32 p_chan_ctxt; + uint32 p_req_hdl; + uint32 bf_last_serv_time_l; + uint32 bf_last_serv_time_h; + uint16 onchan_chn_idx; + uint16 cur_chn_idx; + uint32 flags; + uint32 actual_start_time_l; + uint32 actual_start_time_h; + uint32 curts_fire_time_l; + uint32 curts_fire_time_h; +} msch_req_entity_profiler_event_data_t; + +typedef struct msch_req_handle_profiler_event_data { + uint32 p_req_handle; + uint32 p_prev; + uint32 p_next; + uint32 cb_func; + uint32 cb_ctxt; + uint16 req_param_ptr; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 chan_cnt; + uint32 flags; + uint16 chanspec_list; + uint16 chanspec_cnt; + uint16 chan_idx; + uint16 last_chan_idx; + uint32 req_time_l; + uint32 req_time_h; +} msch_req_handle_profiler_event_data_t; + +typedef struct msch_profiler_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 free_req_hdl_list; + uint32 free_req_entity_list; + uint32 free_chan_ctxt_list; + uint32 free_chanspec_list; + uint16 cur_msch_timeslot_ptr; + uint16 next_timeslot_ptr; + uint32 p_cur_msch_timeslot; + uint32 p_next_timeslot; + uint32 cur_armed_timeslot; + uint32 flags; + uint32 ts_id; + uint32 service_interval; + uint32 max_lo_prio_interval; + uint16 flex_list_cnt; + uint16 msch_chanspec_alloc_cnt; + uint16 msch_req_entity_alloc_cnt; + uint16 msch_req_hdl_alloc_cnt; + uint16 msch_chan_ctxt_alloc_cnt; + uint16 msch_timeslot_alloc_cnt; + uint16 msch_req_hdl_list_cnt; + uint16 msch_req_hdl_list_ptr; + uint16 msch_chan_ctxt_list_cnt; + uint16 msch_chan_ctxt_list_ptr; + uint16 msch_req_timing_list_cnt; + uint16 msch_req_timing_list_ptr; + uint16 msch_start_fixed_list_cnt; + uint16 msch_start_fixed_list_ptr; + uint16 msch_both_flex_req_entity_list_cnt; + uint16 msch_both_flex_req_entity_list_ptr; + uint16 msch_start_flex_list_cnt; + uint16 msch_start_flex_list_ptr; + uint16 msch_both_flex_list_cnt; + uint16 msch_both_flex_list_ptr; + uint32 slotskip_flag; +} msch_profiler_profiler_event_data_t; + +typedef struct msch_req_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 chanspec_cnt; + uint16 chanspec_ptr; + uint16 req_param_ptr; + uint16 pad; +} msch_req_profiler_event_data_t; + +typedef struct msch_callback_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 type; /* callback type */ + uint16 chanspec; /* actual chanspec, may different with requested one */ + uint32 start_time_l; /* time slot start time low 32bit */ + uint32 start_time_h; /* time slot start time high 32bit */ + uint32 end_time_l; /* time slot end time low 32 bit */ + uint32 end_time_h; /* time slot end time high 32 bit */ + uint32 timeslot_id; /* unique time slot id */ + uint32 p_req_hdl; + uint32 onchan_idx; /* Current channel index */ + uint32 cur_chan_seq_start_time_l; /* start time of current sequence */ + uint32 cur_chan_seq_start_time_h; +} msch_callback_profiler_event_data_t; + +typedef struct msch_timeslot_profiler_event_data { + uint32 p_timeslot; + uint32 timeslot_id; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 sch_dur_l; + uint32 sch_dur_h; + uint32 p_chan_ctxt; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 state; +} msch_timeslot_profiler_event_data_t; + +typedef struct msch_register_params { + uint16 wlc_index; /* Optional wlc index */ + uint16 flags; /* Describe various request properties */ + uint32 req_type; /* Describe start and end time flexiblilty */ + uint16 id; /* register id */ + uint16 priority; /* Define the request priority */ + uint32 start_time; /* Requested start time offset in ms unit */ + uint32 duration; /* Requested duration in ms unit */ + uint32 interval; /* Requested periodic interval in ms unit, + * 0 means non-periodic + */ + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */ + uint32 hi_prio_time; + uint32 hi_prio_interval; /* repeated high priority interval */ + uint32 chanspec_cnt; + uint16 chanspec_list[WL_MSCH_NUMCHANNELS]; +} msch_register_params_t; + +typedef struct { + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxtoolate; /**< receive too late */ + uint32 goodfcs; /**< Good fcs counters */ + uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */ + uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */ +} phy_periodic_counters_v1_t; + +typedef struct { + + /* RX error related */ + uint32 rxrsptmout; /* number of response timeouts for transmitted frames + * expecting a response + */ + uint32 rxbadplcp; /* number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ + uint32 rxnodelim; /* number of no valid delimiter detected by ampdu parser */ + uint32 bphy_badplcp; /* number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ + uint32 rxtoolate; /* receive too late */ + uint32 rxf0ovfl; /* Rx FIFO0 overflow counters information */ + uint32 rxf1ovfl; /* Rx FIFO1 overflow counters information */ + uint32 rxanyerr; /* Any RX error that is not counted by other counters. */ + uint32 rxdropped; /* Frame dropped */ + uint32 rxnobuf; /* Rx error due to no buffer */ + uint32 rxrunt; /* Runt frame counter */ + uint32 rxfrmtoolong; /* Number of received frame that are too long */ + uint32 rxdrop20s; + + /* RX related */ + uint32 rxstrt; /* number of received frames with a good PLCP */ + uint32 rxbeaconmbss; /* beacons received from member of BSS */ + uint32 rxdtucastmbss; /* number of received DATA frames with good FCS and matching RA */ + uint32 rxdtocast; /* number of received DATA frames (good FCS and no matching RA) */ + uint32 goodfcs; /* Good fcs counters */ + uint32 rxctl; /* Number of control frames */ + uint32 rxaction; /* Number of action frames */ + uint32 rxback; /* Number of block ack frames rcvd */ + uint32 rxctlucast; /* Number of received unicast ctl frames */ + uint32 rxframe; /* Number of received frames */ + + /* TX related */ + uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txmpdu; /* Numer of transmitted mpdus */ + uint32 txackbackctsfrm; /* Number of ACK + BACK + CTS */ + + /* TX error related */ + uint32 txrtsfail; /* RTS TX failure count */ + uint32 txphyerr; /* PHY TX error count */ + + uint16 nav_cntr_l; /* The state of the NAV */ + uint16 nav_cntr_h; +} phy_periodic_counters_v3_t; + +typedef struct phy_periodic_counters_v4 { + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */ + uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxtoolate; /**< receive too late */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxdropped; + uint32 rxcrc; + uint32 rxnobuf; + uint32 rxrunt; + uint32 rxgiant; + uint32 rxctl; + uint32 rxaction; + uint32 rxdrop20s; + uint32 rxctsucast; + uint32 rxrtsucast; + uint32 txctsfrm; + uint32 rxackucast; + uint32 rxback; + uint32 txphyerr; + uint32 txrtsfrm; + uint32 txackfrm; + uint32 txback; + uint32 rxnodelim; + uint32 rxfrmtoolong; + uint32 rxctlucast; + uint32 txbcnfrm; + uint32 txdnlfrm; + uint32 txampdu; + uint32 txmpdu; + uint32 txinrtstxop; + uint32 prs_timeout; +} phy_periodic_counters_v4_t; + +typedef struct phycal_log_cmn { + uint16 chanspec; /* Current phy chanspec */ + uint8 last_cal_reason; /* Last Cal Reason */ + uint8 pad1; /* Padding byte to align with word */ + uint32 last_cal_time; /* Last cal time in sec */ +} phycal_log_cmn_t; + +typedef struct phycal_log_cmn_v2 { + uint16 chanspec; /* current phy chanspec */ + uint8 reason; /* cal reason */ + uint8 phase; /* cal phase */ + uint32 time; /* time at which cal happened in sec */ + uint16 temp; /* temperature at the time of cal */ + uint16 dur; /* duration of cal in usec */ + + /* Misc general purpose debug counters (will be used for future debugging) */ + uint16 debug_01; + uint16 debug_02; + uint16 debug_03; + uint16 debug_04; +} phycal_log_cmn_v2_t; + +typedef struct phycal_log_core { + uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */ + uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */ + uint16 ofdm_txd; /* contain di & dq */ + uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */ + uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */ + uint16 bphy_txd; /* contain di & dq */ + + uint16 rxa; /* Rx IQ Cal A coeffecient */ + uint16 rxb; /* Rx IQ Cal B coeffecient */ + int32 rxs; /* FDIQ Slope coeffecient */ + + uint8 baseidx; /* TPC Base index */ + uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */ + uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */ + uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */ + uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */ + uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */ + uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */ + uint8 pad; /* Padding byte to align with word */ +} phycal_log_core_t; + +typedef struct phycal_log_core_v3 { + uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */ + uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */ + uint16 ofdm_txd; /* contain di & dq */ + uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */ + uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */ + uint16 bphy_txd; /* contain di & dq */ + + uint16 rxa; /* Rx IQ Cal A coeffecient */ + uint16 rxb; /* Rx IQ Cal B coeffecient */ + int32 rxs; /* FDIQ Slope coeffecient */ + + uint8 baseidx; /* TPC Base index */ + uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */ + uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */ + uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */ + uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */ + uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */ + uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */ + uint8 pad; /* Padding byte to align with word */ + + /* Gain index based txiq ceffiecients for 2G(3 gain indices) */ + uint16 txiqlo_2g_a0; /* 2G TXIQ Cal a coeff for high TX gain */ + uint16 txiqlo_2g_b0; /* 2G TXIQ Cal b coeff for high TX gain */ + uint16 txiqlo_2g_a1; /* 2G TXIQ Cal a coeff for mid TX gain */ + uint16 txiqlo_2g_b1; /* 2G TXIQ Cal b coeff for mid TX gain */ + uint16 txiqlo_2g_a2; /* 2G TXIQ Cal a coeff for low TX gain */ + uint16 txiqlo_2g_b2; /* 2G TXIQ Cal b coeff for low TX gain */ + + uint16 rxa_vpoff; /* Rx IQ Cal A coeff Vp off */ + uint16 rxb_vpoff; /* Rx IQ Cal B coeff Vp off */ + uint16 rxa_ipoff; /* Rx IQ Cal A coeff Ip off */ + uint16 rxb_ipoff; /* Rx IQ Cal B coeff Ip off */ + int32 rxs_vpoff; /* FDIQ Slope coeff Vp off */ + int32 rxs_ipoff; /* FDIQ Slope coeff Ip off */ +} phycal_log_core_v3_t; + +#define PHYCAL_LOG_VER1 (1u) + +typedef struct phycal_log_v1 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Numbe of cores for which core specific data present */ + uint16 length; /* Length of the entire structure */ + phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */ + /* This will be a variable length based on the numcores field defined above */ + phycal_log_core_t phycal_log_core[1]; +} phycal_log_v1_t; + +typedef struct phy_periodic_log_cmn { + uint16 chanspec; /* Current phy chanspec */ + uint16 vbatmeas; /* Measured VBAT sense value */ + uint16 featureflag; /* Currently active feature flags */ + int8 chiptemp; /* Chip temparature */ + int8 femtemp; /* Fem temparature */ + + uint32 nrate; /* Current Tx nrate */ + + uint8 cal_phase_id; /* Current Multi phase cal ID */ + uint8 rxchain; /* Rx Chain */ + uint8 txchain; /* Tx Chain */ + uint8 ofdm_desense; /* OFDM desense */ + + uint8 bphy_desense; /* BPHY desense */ + uint8 pll_lockstatus; /* PLL Lock status */ + uint8 pad1; /* Padding byte to align with word */ + uint8 pad2; /* Padding byte to align with word */ + + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + +} phy_periodic_log_cmn_t; + +typedef struct phy_periodic_log_cmn_v2 { + uint16 chanspec; /* Current phy chanspec */ + uint16 vbatmeas; /* Measured VBAT sense value */ + uint16 featureflag; /* Currently active feature flags */ + int8 chiptemp; /* Chip temparature */ + int8 femtemp; /* Fem temparature */ + + uint32 nrate; /* Current Tx nrate */ + + uint8 cal_phase_id; /* Current Multi phase cal ID */ + uint8 rxchain; /* Rx Chain */ + uint8 txchain; /* Tx Chain */ + uint8 ofdm_desense; /* OFDM desense */ + + uint8 bphy_desense; /* BPHY desense */ + uint8 pll_lockstatus; /* PLL Lock status */ + + uint32 duration; /* millisecs spent sampling this channel */ + uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */ + /* move if cur bss moves channels) */ + uint32 congest_obss; /* traffic not in our bss */ + uint32 interference; /* millisecs detecting a non 802.11 interferer. */ + + uint8 slice; + uint8 version; /* version of fw/ucode for debug purposes */ + bool phycal_disable; /* Set if calibration is disabled */ + uint8 pad; + uint16 phy_log_counter; + uint16 noise_mmt_overdue; /* Count up if ucode noise mmt is overdue for 5 sec */ + uint16 chan_switch_tm; /* Channel switch time */ + + /* HP2P related params */ + uint16 shm_mpif_cnt_val; + uint16 shm_thld_cnt_val; + uint16 shm_nav_cnt_val; + uint16 shm_cts_cnt_val; + + uint16 shm_m_prewds_cnt; /* Count of pre-wds fired in the ucode */ + uint32 last_cal_time; /* Last cal execution time */ + uint16 deaf_count; /* Depth of stay_in_carrier_search function */ + uint32 ed20_crs0; /* ED-CRS status on core 0 */ + uint32 ed20_crs1; /* ED-CRS status on core 1 */ + uint32 noise_cal_req_ts; /* Time-stamp when noise cal was requested */ + uint32 noise_cal_intr_ts; /* Time-stamp when noise cal was completed */ + uint32 phywdg_ts; /* Time-stamp when wd was fired */ + uint32 phywd_dur; /* Duration of the watchdog */ + uint32 noise_mmt_abort_crs; /* Count of CRS during noise mmt */ + uint32 chanspec_set_ts; /* Time-stamp when chanspec was set */ + uint32 vcopll_failure_cnt; /* Number of VCO cal failures + * (including failures detected in ucode). + */ + uint32 dcc_fail_counter; /* Number of DC cal failures */ + uint32 log_ts; /* Time-stamp when this log was collected */ + + uint16 btcxovrd_dur; /* Cumulative btcx overide between WDGs */ + uint16 btcxovrd_err_cnt; /* BTCX override flagged errors */ + + uint16 femtemp_read_fail_counter; /* Fem temparature read fail counter */ + /* Misc general purpose debug counters (will be used for future debugging) */ + uint16 debug_01; + uint16 debug_02; +} phy_periodic_log_cmn_v2_t; + +typedef struct phy_periodic_log_cmn_v3 { + uint32 nrate; /* Current Tx nrate */ + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + uint32 noise_cfg_exit1; + uint32 noise_cfg_exit2; + uint32 noise_cfg_exit3; + uint32 noise_cfg_exit4; + uint32 ed20_crs0; + uint32 ed20_crs1; + uint32 noise_cal_req_ts; + uint32 noise_cal_crs_ts; + uint32 log_ts; + uint32 last_cal_time; + uint32 phywdg_ts; + uint32 chanspec_set_ts; + uint32 noise_zero_inucode; + uint32 phy_crs_during_noisemmt; + uint32 wd_dur; + + int32 deaf_count; + + uint16 chanspec; /* Current phy chanspec */ + uint16 vbatmeas; /* Measured VBAT sense value */ + uint16 featureflag; /* Currently active feature flags */ + uint16 nav_cntr_l; + uint16 nav_cntr_h; + uint16 chanspec_set_last; + uint16 ucode_noise_fb_overdue; + uint16 phy_log_counter; + uint16 shm_mpif_cnt_val; + uint16 shm_thld_cnt_val; + uint16 shm_nav_cnt_val; + uint16 shm_dc_cnt_val; + uint16 shm_txff_cnt_val; + uint16 shm_cts_cnt_val; + uint16 shm_m_prewds_cnt; + + uint8 cal_phase_id; /* Current Multi phase cal ID */ + uint8 rxchain; /* Rx Chain */ + uint8 txchain; /* Tx Chain */ + uint8 ofdm_desense; /* OFDM desense */ + uint8 bphy_desense; /* BPHY desense */ + uint8 pll_lockstatus; /* PLL Lock status */ + int8 chiptemp; /* Chip temparature */ + int8 femtemp; /* Fem temparature */ + + bool phycal_disable; + uint8 pad; /* Padding byte to align with word */ +} phy_periodic_log_cmn_v3_t; + +typedef struct phy_periodic_log_cmn_v4 { + uint16 chanspec; /* Current phy chanspec */ + uint16 vbatmeas; /* Measured VBAT sense value */ + + uint16 featureflag; /* Currently active feature flags */ + int8 chiptemp; /* Chip temparature */ + int8 femtemp; /* Fem temparature */ + + uint32 nrate; /* Current Tx nrate */ + + uint8 cal_phase_id; /* Current Multi phase cal ID */ + uint8 rxchain; /* Rx Chain */ + uint8 txchain; /* Tx Chain */ + uint8 ofdm_desense; /* OFDM desense */ + + uint8 slice; + uint8 dbgfw_ver; /* version of fw/ucode for debug purposes */ + uint8 bphy_desense; /* BPHY desense */ + uint8 pll_lockstatus; /* PLL Lock status */ + + uint32 duration; /* millisecs spent sampling this channel */ + uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */ + /* move if cur bss moves channels) */ + uint32 congest_obss; /* traffic not in our bss */ + uint32 interference; /* millisecs detecting a non 802.11 interferer. */ + + /* HP2P related params */ + uint16 shm_mpif_cnt_val; + uint16 shm_thld_cnt_val; + uint16 shm_nav_cnt_val; + uint16 shm_cts_cnt_val; + + uint16 shm_m_prewds_cnt; /* Count of pre-wds fired in the ucode */ + uint16 deaf_count; /* Depth of stay_in_carrier_search function */ + uint32 last_cal_time; /* Last cal execution time */ + uint32 ed20_crs0; /* ED-CRS status on core 0 */ + uint32 ed20_crs1; /* ED-CRS status on core 1 */ + uint32 noise_cal_req_ts; /* Time-stamp when noise cal was requested */ + uint32 noise_cal_intr_ts; /* Time-stamp when noise cal was completed */ + uint32 phywdg_ts; /* Time-stamp when wd was fired */ + uint32 phywd_dur; /* Duration of the watchdog */ + uint32 noise_mmt_abort_crs; /* Count of CRS during noise mmt */ + uint32 chanspec_set_ts; /* Time-stamp when chanspec was set */ + uint32 vcopll_failure_cnt; /* Number of VCO cal failures + * (including failures detected in ucode). + */ + uint16 dcc_attempt_counter; /* Number of DC cal attempts */ + uint16 dcc_fail_counter; /* Number of DC cal failures */ + uint32 log_ts; /* Time-stamp when this log was collected */ + + uint16 btcxovrd_dur; /* Cumulative btcx overide between WDGs */ + uint16 btcxovrd_err_cnt; /* BTCX override flagged errors */ + + uint16 femtemp_read_fail_counter; /* Fem temparature read fail counter */ + uint16 phy_log_counter; + uint16 noise_mmt_overdue; /* Count up if ucode noise mmt is overdue for 5 sec */ + uint16 chan_switch_tm; /* Channel switch time */ + + bool phycal_disable; /* Set if calibration is disabled */ + + /* dccal dcoe & idacc */ + uint8 dcc_err; /* dccal health check error status */ + uint8 dcoe_num_tries; /* number of retries on dcoe cal */ + uint8 idacc_num_tries; /* number of retries on idac cal */ + + uint8 dccal_phyrxchain; /* phy rxchain during dc calibration */ + uint8 dccal_type; /* DC cal type: single/multi phase, chan change, etc. */ + uint16 dcc_hcfail; /* dcc health check failure count */ + uint16 dcc_calfail; /* dcc failure count */ + + /* Misc general purpose debug counters (will be used for future debugging) */ + uint16 debug_01; + uint16 debug_02; + uint16 debug_03; + uint16 debug_04; + uint16 debug_05; +} phy_periodic_log_cmn_v4_t; + +typedef struct phy_periodic_log_core { + uint8 baseindxval; /* TPC Base index */ + int8 tgt_pwr; /* Programmed Target power */ + int8 estpwradj; /* Current Est Power Adjust value */ + int8 crsmin_pwr; /* CRS Min/Noise power */ + int8 rssi_per_ant; /* RSSI Per antenna */ + int8 snr_per_ant; /* SNR Per antenna */ + int8 pad1; /* Padding byte to align with word */ + int8 pad2; /* Padding byte to align with word */ +} phy_periodic_log_core_t; + +typedef struct phy_periodic_log_core_v3 { + uint8 baseindxval; /* TPC Base index */ + int8 tgt_pwr; /* Programmed Target power */ + int8 estpwradj; /* Current Est Power Adjust value */ + int8 crsmin_pwr; /* CRS Min/Noise power */ + int8 rssi_per_ant; /* RSSI Per antenna */ + int8 snr_per_ant; /* SNR Per antenna */ + + /* dccal dcoe & idacc */ + uint16 dcoe_done_0; /* dccal control register 44 */ + uint16 dcoe_done_1; /* dccal control register 45 */ + uint16 dcoe_done_2; /* dccal control register 46 */ + uint16 idacc_done_0; /* dccal control register 21 */ + uint16 idacc_done_1; /* dccal control register 60 */ + uint16 idacc_done_2; /* dccal control register 61 */ + int16 psb; /* psb read during dccal health check */ + uint8 pktproc; /* pktproc read during dccal health check */ + + int8 pad1; /* Padding byte to align with word */ + int8 pad2; /* Padding byte to align with word */ + int8 pad3; /* Padding byte to align with word */ +} phy_periodic_log_core_v3_t; + +typedef struct phy_periodic_log_core_v2 { + int32 rxs; /* FDIQ Slope coeffecient */ + + uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */ + uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */ + uint16 ofdm_txd; /* contain di & dq */ + uint16 rxa; /* Rx IQ Cal A coeffecient */ + uint16 rxb; /* Rx IQ Cal B coeffecient */ + uint16 baseidx; /* TPC Base index */ + + uint8 baseindxval; /* TPC Base index */ + + int8 tgt_pwr; /* Programmed Target power */ + int8 estpwradj; /* Current Est Power Adjust value */ + int8 crsmin_pwr; /* CRS Min/Noise power */ + int8 rssi_per_ant; /* RSSI Per antenna */ + int8 snr_per_ant; /* SNR Per antenna */ + int8 pad1; /* Padding byte to align with word */ + int8 pad2; /* Padding byte to align with word */ +} phy_periodic_log_core_v2_t; + +#define PHY_PERIODIC_LOG_VER1 (1u) + +typedef struct phy_periodic_log_v1 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Number of cores for which core specific data present */ + uint16 length; /* Length of the entire structure */ + phy_periodic_log_cmn_t phy_perilog_cmn; + phy_periodic_counters_v1_t counters_peri_log; + /* This will be a variable length based on the numcores field defined above */ + phy_periodic_log_core_t phy_perilog_core[1]; +} phy_periodic_log_v1_t; + +#define PHYCAL_LOG_VER3 (3u) +#define PHY_PERIODIC_LOG_VER3 (3u) + +/* 4387 onwards */ +typedef struct phy_periodic_log_v3 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Number of cores for which core specific data present */ + uint16 length; /* Length of the structure */ + + /* Logs general PHY parameters */ + phy_periodic_log_cmn_v2_t phy_perilog_cmn; + + /* Logs ucode counters and NAVs */ + phy_periodic_counters_v3_t counters_peri_log; + + /* Logs data pertaining to each core */ + phy_periodic_log_core_t phy_perilog_core[1]; +} phy_periodic_log_v3_t; + +#define PHY_PERIODIC_LOG_VER5 (5u) + +typedef struct phy_periodic_log_v5 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Number of cores for which core specific data present */ + uint16 length; /* Length of the structure */ + + /* Logs general PHY parameters */ + phy_periodic_log_cmn_v4_t phy_perilog_cmn; + + /* Logs ucode counters and NAVs */ + phy_periodic_counters_v3_t counters_peri_log; + + /* Logs data pertaining to each core */ + phy_periodic_log_core_v3_t phy_perilog_core[1]; +} phy_periodic_log_v5_t; + +typedef struct phycal_log_v3 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Number of cores for which core specific data present */ + uint16 length; /* Length of the entire structure */ + phycal_log_cmn_v2_t phycal_log_cmn; /* Logging common structure */ + /* This will be a variable length based on the numcores field defined above */ + phycal_log_core_v3_t phycal_log_core[1]; +} phycal_log_v3_t; + +/* Note: The version 2 is reserved for 4357 only. Future chips must not use this version. */ + +#define MAX_CORE_4357 (2u) +#define PHYCAL_LOG_VER2 (2u) +#define PHY_PERIODIC_LOG_VER2 (2u) + +typedef struct { + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */ + uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxtoolate; /**< receive too late */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ +} phy_periodic_counters_v2_t; + +/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */ + +typedef struct phycal_log_core_v2 { + uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */ + uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */ + uint16 ofdm_txd; /* contain di & dq */ + uint16 rxa; /* Rx IQ Cal A coeffecient */ + uint16 rxb; /* Rx IQ Cal B coeffecient */ + uint8 baseidx; /* TPC Base index */ + uint8 pad; + int32 rxs; /* FDIQ Slope coeffecient */ +} phycal_log_core_v2_t; + +/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */ + +typedef struct phycal_log_v2 { + uint8 version; /* Logging structure version */ + uint16 length; /* Length of the entire structure */ + uint8 pad; + phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */ + phycal_log_core_v2_t phycal_log_core[MAX_CORE_4357]; +} phycal_log_v2_t; + +/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */ + +typedef struct phy_periodic_log_v2 { + uint8 version; /* Logging structure version */ + uint16 length; /* Length of the entire structure */ + uint8 pad; + phy_periodic_log_cmn_t phy_perilog_cmn; + phy_periodic_counters_v2_t counters_peri_log; + phy_periodic_log_core_t phy_perilog_core[MAX_CORE_4357]; +} phy_periodic_log_v2_t; + +#define PHY_PERIODIC_LOG_VER4 (4u) + +/* + * Note: The version 4 is reserved for 4357 Deafness Debug only. + * All future chips must not use this version. + */ +typedef struct phy_periodic_log_v4 { + uint8 version; /* Logging structure version */ + uint8 pad; + uint16 length; /* Length of the entire structure */ + phy_periodic_log_cmn_v3_t phy_perilog_cmn; + phy_periodic_counters_v4_t counters_peri_log; + phy_periodic_log_core_v2_t phy_perilog_core[MAX_CORE_4357]; +} phy_periodic_log_v4_t; + +/* Event log payload for enhanced roam log */ +typedef enum { + ROAM_LOG_SCANSTART = 1, /* EVT log for roam scan start */ + ROAM_LOG_SCAN_CMPLT = 2, /* EVT log for roam scan completeted */ + ROAM_LOG_ROAM_CMPLT = 3, /* EVT log for roam done */ + ROAM_LOG_NBR_REQ = 4, /* EVT log for Neighbor REQ */ + ROAM_LOG_NBR_REP = 5, /* EVT log for Neighbor REP */ + ROAM_LOG_BCN_REQ = 6, /* EVT log for BCNRPT REQ */ + ROAM_LOG_BCN_REP = 7, /* EVT log for BCNRPT REP */ + ROAM_LOG_BTM_REP = 8, /* EVT log for BTM REP */ + ROAM_LOG_WIPS_EVENT = 9, /* EVT log for WIPS Event */ + PRSV_PERIODIC_ID_MAX +} prsv_periodic_id_enum_t; + +typedef struct prsv_periodic_log_hdr { + uint8 version; + uint8 id; + uint16 length; +} prsv_periodic_log_hdr_t; + +#define ROAM_LOG_VER_1 (1u) +#define ROAM_LOG_VER_2 (2u) +#define ROAM_LOG_VER_3 (3u) +#define ROAM_SSID_LEN (32u) +typedef struct roam_log_trig_v1 { + prsv_periodic_log_hdr_t hdr; + int8 rssi; + uint8 current_cu; + uint8 pad[2]; + uint reason; + int result; + union { + struct { + uint rcvd_reason; + } prt_roam; + struct { + uint8 req_mode; + uint8 token; + uint16 nbrlist_size; + uint32 disassoc_dur; + uint32 validity_dur; + uint32 bss_term_dur; + } bss_trans; + }; +} roam_log_trig_v1_t; + +typedef struct roam_log_trig_v2 { + prsv_periodic_log_hdr_t hdr; + int8 rssi; + uint8 current_cu; + uint8 full_scan; + uint8 pad; + uint reason; + int result; + union { + struct { + uint rcvd_reason; + } prt_roam; + struct { + uint8 req_mode; + uint8 token; + uint16 nbrlist_size; + uint32 disassoc_dur; + uint32 validity_dur; + uint32 bss_term_dur; + } bss_trans; + struct { + int rssi_threshold; + } low_rssi; + }; +} roam_log_trig_v2_t; + +#define ROAM_LOG_RPT_SCAN_LIST_SIZE 3 +#define ROAM_LOG_INVALID_TPUT 0xFFFFFFFFu +typedef struct roam_scan_ap_info { + int8 rssi; + uint8 cu; + uint8 pad[2]; + uint32 score; + uint16 chanspec; + struct ether_addr addr; + uint32 estm_tput; +} roam_scan_ap_info_t; + +typedef struct roam_log_scan_cmplt_v1 { + prsv_periodic_log_hdr_t hdr; + uint8 full_scan; + uint8 scan_count; + uint8 scan_list_size; + uint8 pad; + int32 score_delta; + roam_scan_ap_info_t cur_info; + roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE]; +} roam_log_scan_cmplt_v1_t; + +#define ROAM_CHN_UNI_2A 36u +#define ROAM_CHN_UNI_2A_MAX 64u +#define ROAM_CHN_UNI_2C 100u +#define ROAM_CHN_UNI_2C_MAX 144u +#define ROAM_CHN_UNI_3 149u +#define ROAM_CHN_UNI_3_MAX 165u +#define ROAM_CHN_SPACE 2u /* channel index space for 5G */ + +typedef struct roam_log_scan_cmplt_v2 { + prsv_periodic_log_hdr_t hdr; + uint8 scan_count; + uint8 scan_list_size; + uint8 chan_num; + uint8 pad; + uint16 band2g_chan_list; + uint16 uni2a_chan_list; + uint8 uni2c_chan_list[3]; + uint8 uni3_chan_list; + int32 score_delta; + roam_scan_ap_info_t cur_info; + roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE]; +} roam_log_scan_cmplt_v2_t; + +typedef struct roam_log_cmplt_v1 { + prsv_periodic_log_hdr_t hdr; + uint status; /* status code WLC_E STATUS */ + uint reason; /* roam trigger reason */ + uint16 chanspec; /* new bssid chansepc */ + struct ether_addr addr; /* ether addr */ + uint8 pad[3]; + uint8 retry; +} roam_log_cmplt_v1_t; + +typedef roam_log_cmplt_v1_t roam_log_cmplt_v2_t; + +typedef struct roam_log_nbrrep { + prsv_periodic_log_hdr_t hdr; + uint channel_num; +} roam_log_nbrrep_v1_t; + +typedef struct roam_log_nbrrep_v2 { + prsv_periodic_log_hdr_t hdr; + uint channel_num; + uint16 band2g_chan_list; /* channel bit map */ + uint16 uni2a_chan_list; + uint8 uni2c_chan_list[3]; + uint8 uni3_chan_list; +} roam_log_nbrrep_v2_t; + +typedef struct roam_log_nbrreq { + prsv_periodic_log_hdr_t hdr; + uint token; +} roam_log_nbrreq_v1_t; + +typedef roam_log_nbrreq_v1_t roam_log_nbrreq_v2_t; + +typedef struct roam_log_bcnrptreq { + prsv_periodic_log_hdr_t hdr; + int32 result; + uint8 reg; /* operating class */ + uint8 channel; /* number of requesting channel */ + uint8 mode; /* request mode d11 rmreq bcn */ + uint8 bssid_wild; /* is wild bssid */ + uint8 ssid_len; /* length of SSID */ + uint8 pad; + uint16 duration; /* duration */ + uint8 ssid[ROAM_SSID_LEN]; +} roam_log_bcnrpt_req_v1_t; + +typedef roam_log_bcnrpt_req_v1_t roam_log_bcnrpt_req_v2_t; + +typedef struct roam_log_bcnrptrep { + prsv_periodic_log_hdr_t hdr; + uint32 count; +} roam_log_bcnrpt_rep_v1_t; + +typedef struct roam_log_bcnrptrep_v2 { + prsv_periodic_log_hdr_t hdr; + uint8 scan_inprogress; /* if scan in progress TRUE */ + uint8 reason; /* report mode d11 RMREP mode */ + uint32 count; +} roam_log_bcnrpt_rep_v2_t; + +typedef struct roam_log_btmrep_v2 { + prsv_periodic_log_hdr_t hdr; + uint8 req_mode; /* d11 BSSTRANS req mode */ + uint8 status; /* d11 BSSTRANS response status code */ + uint16 pad[2]; + int result; +} roam_log_btm_rep_v2_t; + +/* ROAM_LOG_VER_3 specific structures */ +typedef struct roam_log_btmrep_v3 { + prsv_periodic_log_hdr_t hdr; + uint8 req_mode; /* d11 BSSTRANS req mode */ + uint8 status; /* d11 BSSTRANS response status code */ + uint16 pad[2]; + struct ether_addr target_addr; /* bssid to move */ + int result; +} roam_log_btm_rep_v3_t; + +typedef struct roam_log_bcnrptreq_v3 { + prsv_periodic_log_hdr_t hdr; + int32 result; + uint8 reg; /* operating class */ + uint8 channel; /* number of requesting channel */ + uint8 mode; /* request mode d11 rmreq bcn */ + uint8 bssid_wild; /* is wild bssid */ + uint8 ssid_len; /* length of SSID */ + uint8 pad; + uint16 duration; /* duration */ + uint8 ssid[ROAM_SSID_LEN]; + uint channel_num; /* number of scan channel */ + uint16 band2g_chan_list; /* channel bit map */ + uint16 uni2a_chan_list; + uint8 uni2c_chan_list[3]; + uint8 uni3_chan_list; +} roam_log_bcnrpt_req_v3_t; + +#define BCNRPT_RSN_SUCCESS 0 +#define BCNRPT_RSN_BADARG 1 +#define BCNRPT_RSN_SCAN_ING 2 +#define BCNRPT_RSN_SCAN_FAIL 3 + +typedef struct roam_log_bcnrptrep_v3 { + prsv_periodic_log_hdr_t hdr; + uint8 scan_status; /* scan status */ + uint8 reason; /* report mode d11 RMREP mode */ + uint16 reason_detail; + uint32 count; + uint16 duration; /* duration */ + uint16 pad; +} roam_log_bcnrpt_rep_v3_t; + +typedef struct roam_log_wips_evt_v3 { + prsv_periodic_log_hdr_t hdr; + uint32 timestamp; + struct ether_addr bssid; /* ether addr */ + uint16 misdeauth; + int16 current_rssi; + int16 deauth_rssi; +} roam_log_wips_evt_v3_t; + +#define EVENT_LOG_BUFFER_ID_PMK 0 +#define EVENT_LOG_BUFFER_ID_ANONCE 1 +#define EVENT_LOG_BUFFER_ID_SNONCE 2 +#define EVENT_LOG_BUFFER_ID_WPA_M3_KEYDATA 3 +#define EVENT_LOG_BUFFER_ID_WPA_CACHED_KEYDATA 4 + +typedef struct event_log_buffer { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_BUF */ + uint16 len; /* XTLV Len */ + uint16 buf_id; /* One of the above EVENT_LOG_BUFFER_ID_XXXs */ + uint16 pad; /* for 4-byte start alignment of data */ + uint8 data[]; /* the payload of interest */ +} event_log_buffer_t; + +#define XTLV_EVENT_LOG_BUFFER_LEN (OFFSETOF(event_log_buffer_t, data)) +#define XTLV_EVENT_LOG_BUFFER_FULL_LEN(buf_len) ALIGN_SIZE((XTLV_EVENT_LOG_BUFFER_LEN + \ + (buf_len) * sizeof(uint8)), sizeof(uint32)) + +/* Structures for parsing FSM log data + * Only used by host to parse data coming in FSM log set + * Following log tags use this structured data: + * EVENT_LOG_TAG_ASSOC_SM + * EVENT_LOG_TAG_SUP_SM + * EVENT_LOG_TAG_AUTH_SM + * EVENT_LOG_TAG_SAE_SM + * EVENT_LOG_TAG_FTM_SM + * EVENT_LOG_TAG_NAN_SM + * More state machine log tags may also use this format + */ + +/* Generic FSM structure for logging. Must be wrapped into a proper structure. The wrapper + * structure can add more information but this needs to be one of the members of the wrapper + * structure. + */ +typedef struct event_log_generic_fsm_struct { + uint32 old_state; + uint32 new_state; + uint32 reason; + uint32 caller; +} event_log_generic_fsm_struct_t; + +typedef struct event_log_wl_fsm_struct { + uint32 unit; + uint32 bsscfg_idx; + event_log_generic_fsm_struct_t generic_fsm; + uint32 data[]; /* Any other information relevant to this state transition */ +} event_log_wl_fsm_struct_t; + +/* To be used by DVFS event log FSM logging */ +typedef struct event_log_rte_dvfs_fsm_struct { + event_log_generic_fsm_struct_t generic_fsm; + uint32 data[]; /* Any other information relevant to this state transition */ +} event_log_rte_dvfs_fsm_struct_t; + +#endif /* _EVENT_LOG_PAYLOAD_H_ */ diff --git a/bcmdhd.101.10.361.x/include/event_log_set.h b/bcmdhd.101.10.361.x/include/event_log_set.h new file mode 100755 index 0000000..5e098d8 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/event_log_set.h @@ -0,0 +1,142 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _EVENT_LOG_SET_H_ +#define _EVENT_LOG_SET_H_ + +/* Set assignments */ +#define EVENT_LOG_SET_BUS (0u) +#define EVENT_LOG_SET_WL (1u) +#define EVENT_LOG_SET_PSM (2u) +#define EVENT_LOG_SET_ERROR (3u) + +/* MSCH logging */ +#define EVENT_LOG_SET_MSCH_PROFILER (4u) + +/* A particular customer uses sets 5, 6, and 7. There is a request + * to not name these log sets as that could limit their ability to + * use different log sets in future. + * Sets 5, 6, and 7 are instantiated by host + * In such case, ecounters could be mapped to any set that host + * configures. They may or may not use set 5. + */ +#define EVENT_LOG_SET_5 (5u) +#define EVENT_LOG_SET_ECOUNTERS (EVENT_LOG_SET_5) +#define EVENT_LOG_SET_6 (6u) +#define EVENT_LOG_SET_7 (7u) + +/* Temporary change to satisfy compilation across branches + * Will be removed after checkin + */ +#define EVENT_LOG_SET_8 (8u) +#define EVENT_LOG_SET_PRSRV (EVENT_LOG_SET_8) + +#define EVENT_LOG_SET_9 (9u) +/* General purpose preserve chatty. + * EVENT_LOG_SET_PRSRV_CHATTY log set should not be used by FW as it is + * used by customer host. FW should use EVENT_LOG_SET_GP_PRSRV_CHATTY + * for general purpose preserve chatty logs. + */ +#define EVENT_LOG_SET_GP_PRSRV_CHATTY (EVENT_LOG_SET_9) +#define EVENT_LOG_SET_PRSRV_CHATTY (EVENT_LOG_SET_6) + +/* BUS preserve */ +#define EVENT_LOG_SET_PRSRV_BUS (10u) + +/* WL preserve */ +#define EVENT_LOG_SET_PRSRV_WL (11u) + +/* Slotted BSS set */ +#define EVENT_LOG_SET_WL_SLOTTED_BSS (12u) + +/* PHY entity logging */ +#define EVENT_LOG_SET_PHY (13u) + +/* PHY preserve */ +#define EVENT_LOG_SET_PRSRV_PHY (14u) + +/* RTE entity */ +#define EVENT_LOG_SET_RTE (15u) + +/* Malloc and free logging */ +#define EVENT_LOG_SET_MEM_API (16u) + +/* Console buffer */ +#define EVENT_LOG_SET_RTE_CONS_BUF (17u) + +/* three log sets for general debug purposes */ +#define EVENT_LOG_SET_GENERAL_DBG_1 (18u) +#define EVENT_LOG_SET_GENERAL_DBG_2 (19u) +#define EVENT_LOG_SET_GENERAL_DBG_3 (20u) + +/* Log sets for capturing power related logs. Note that these sets + * are to be used across entire system and not just WL. + */ +#define EVENT_LOG_SET_POWER_1 (21u) +#define EVENT_LOG_SET_POWER_2 (22u) + +/* Used for timestamp plotting, TS_LOG() */ +#define EVENT_LOG_SET_TS_LOG (23u) + +/* BUS preserve chatty */ +#define EVENT_LOG_SET_PRSRV_BUS_CHATTY (24u) + +/* PRESERVE_PREIODIC_LOG_SET */ +/* flush if host is in D0 at every period */ +#define EVENT_LOG_SET_PRSV_PERIODIC (25u) + +/* AMT logging and other related information */ +#define EVENT_LOG_SET_AMT (26u) + +/* State machine logging. Part of preserve logs */ +#define EVENT_LOG_SET_FSM (27u) + +/* wbus related logging */ +#define EVENT_LOG_SET_WBUS (28u) + +/* bcm trace logging */ +#define EVENT_LOG_SET_BCM_TRACE (29u) + +/* For PM alert related logging */ +#define EVENT_LOG_SET_WL_PS_LOG (30u) + +#ifndef NUM_EVENT_LOG_SETS +/* Set a maximum number of sets here. It is not dynamic for + * efficiency of the EVENT_LOG calls. Old branches could define + * this to an appropriat enumber in their makefiles to reduce + * ROM invalidation + */ +#ifdef NUM_EVENT_LOG_SETS_V2 +/* for v2, everything has became unsigned */ +#define NUM_EVENT_LOG_SETS (31u) +#else /* NUM_EVENT_LOG_SETS_V2 */ +#define NUM_EVENT_LOG_SETS (31) +#endif /* NUM_EVENT_LOG_SETS_V2 */ +#endif /* NUM_EVENT_LOG_SETS */ + +/* send delayed logs when >= 50% of buffer is full */ +#ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE +#define ECOUNTERS_DELAYED_FLUSH_PERCENTAGE (50) +#endif + +#endif /* _EVENT_LOG_SET_H_ */ diff --git a/bcmdhd.101.10.361.x/include/event_log_tag.h b/bcmdhd.101.10.361.x/include/event_log_tag.h new file mode 100755 index 0000000..54d93c6 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/event_log_tag.h @@ -0,0 +1,617 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _EVENT_LOG_TAG_H_ +#define _EVENT_LOG_TAG_H_ + +#include + +/* Define new event log tags here */ +#define EVENT_LOG_TAG_NULL 0 /* Special null tag */ +#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */ + +/* HSIC Legacy support */ +/* Possible candidates for reuse */ +#define EVENT_LOG_TAG_BUS_OOB 2 +#define EVENT_LOG_TAG_BUS_STATE 3 +#define EVENT_LOG_TAG_BUS_PROTO 4 +#define EVENT_LOG_TAG_BUS_CTL 5 +#define EVENT_LOG_TAG_BUS_EVENT 6 +#define EVENT_LOG_TAG_BUS_PKT 7 +#define EVENT_LOG_TAG_BUS_FRAME 8 +#define EVENT_LOG_TAG_BUS_DESC 9 +#define EVENT_LOG_TAG_BUS_SETUP 10 +#define EVENT_LOG_TAG_BUS_MISC 11 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_AWDL_ERR 12 +#define EVENT_LOG_TAG_AWDL_WARN 13 +#define EVENT_LOG_TAG_AWDL_INFO 14 +#define EVENT_LOG_TAG_AWDL_DEBUG 15 +#define EVENT_LOG_TAG_AWDL_TRACE_TIMER 16 +#define EVENT_LOG_TAG_AWDL_TRACE_SYNC 17 +#define EVENT_LOG_TAG_AWDL_TRACE_CHAN 18 +#define EVENT_LOG_TAG_AWDL_TRACE_DP 19 +#define EVENT_LOG_TAG_AWDL_TRACE_MISC 20 +#define EVENT_LOG_TAG_AWDL_TEST 21 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_SRSCAN 22 +#define EVENT_LOG_TAG_PWRSTATS_INFO 23 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_AWDL_TRACE_CHANSW 24 +#define EVENT_LOG_TAG_AWDL_TRACE_PEER_OPENCLOSE 25 +#endif /* WLAWDL */ + +/* Timestamp logging for plotting. */ +#define EVENT_LOG_TAG_TSLOG 26 + +/* Possible candidates for reuse */ +#define EVENT_LOG_TAG_UCODE_FIFO 27 + +#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28 +#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29 +#define EVENT_LOG_TAG_SCAN_ERROR 30 +#define EVENT_LOG_TAG_SCAN_WARN 31 +#define EVENT_LOG_TAG_MPF_ERR 32 +#define EVENT_LOG_TAG_MPF_WARN 33 +#define EVENT_LOG_TAG_MPF_INFO 34 +#define EVENT_LOG_TAG_MPF_DEBUG 35 +#define EVENT_LOG_TAG_EVENT_INFO 36 +#define EVENT_LOG_TAG_EVENT_ERR 37 +#define EVENT_LOG_TAG_PWRSTATS_ERROR 38 +#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39 +#define EVENT_LOG_TAG_IOCTL_LOG 40 +#define EVENT_LOG_TAG_PFN_ERR 41 +#define EVENT_LOG_TAG_PFN_WARN 42 +#define EVENT_LOG_TAG_PFN_INFO 43 +#define EVENT_LOG_TAG_PFN_DEBUG 44 +#define EVENT_LOG_TAG_BEACON_LOG 45 +#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46 +#define EVENT_LOG_TAG_TRACE_CHANSW 47 +#define EVENT_LOG_TAG_PCI_ERROR 48 +#define EVENT_LOG_TAG_PCI_TRACE 49 +#define EVENT_LOG_TAG_PCI_WARN 50 +#define EVENT_LOG_TAG_PCI_INFO 51 +#define EVENT_LOG_TAG_PCI_DBG 52 +#define EVENT_LOG_TAG_PCI_DATA 53 +#define EVENT_LOG_TAG_PCI_RING 54 + +#ifdef WLAWDL +/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA + * keeping it here to avoid compilation error on trunk + */ +#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_RANGING_TRACE 55 +#define EVENT_LOG_TAG_WL_ERROR 56 +#define EVENT_LOG_TAG_PHY_ERROR 57 +#define EVENT_LOG_TAG_OTP_ERROR 58 +#define EVENT_LOG_TAG_NOTIF_ERROR 59 +#define EVENT_LOG_TAG_MPOOL_ERROR 60 +#define EVENT_LOG_TAG_OBJR_ERROR 61 +#define EVENT_LOG_TAG_DMA_ERROR 62 +#define EVENT_LOG_TAG_PMU_ERROR 63 +#define EVENT_LOG_TAG_BSROM_ERROR 64 +#define EVENT_LOG_TAG_SI_ERROR 65 +#define EVENT_LOG_TAG_ROM_PRINTF 66 +#define EVENT_LOG_TAG_RATE_CNT 67 +#define EVENT_LOG_TAG_CTL_MGT_CNT 68 +#define EVENT_LOG_TAG_AMPDU_DUMP 69 +#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70 +#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71 +#define EVENT_LOG_TAG_MEM_FREE 72 +#define EVENT_LOG_TAG_WL_ASSOC_LOG 73 +#define EVENT_LOG_TAG_WL_PS_LOG 74 +#define EVENT_LOG_TAG_WL_ROAM_LOG 75 +#define EVENT_LOG_TAG_WL_MPC_LOG 76 +#define EVENT_LOG_TAG_WL_WSEC_LOG 77 +#define EVENT_LOG_TAG_WL_WSEC_DUMP 78 +#define EVENT_LOG_TAG_WL_MCNX_LOG 79 +#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80 +#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81 +#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82 +#define EVENT_LOG_TAG_WL_COUNTERS 83 +#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84 +#define EVENT_LOG_TAG_WL_P2P_LOG 85 +#define EVENT_LOG_TAG_SDIO_ERROR 86 +#define EVENT_LOG_TAG_SDIO_TRACE 87 +#define EVENT_LOG_TAG_SDIO_DBG 88 +#define EVENT_LOG_TAG_SDIO_PRHDRS 89 +#define EVENT_LOG_TAG_SDIO_PRPKT 90 +#define EVENT_LOG_TAG_SDIO_INFORM 91 +#define EVENT_LOG_TAG_MIMO_PS_ERROR 92 +#define EVENT_LOG_TAG_MIMO_PS_TRACE 93 +#define EVENT_LOG_TAG_MIMO_PS_INFO 94 +#define EVENT_LOG_TAG_BTCX_STATS 95 +#define EVENT_LOG_TAG_LEAKY_AP_STATS 96 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_MIMO_PS_STATS 98 +#define EVENT_LOG_TAG_PWRSTATS_PHY 99 +#define EVENT_LOG_TAG_PWRSTATS_SCAN 100 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_PWRSTATS_AWDL 101 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102 +#define EVENT_LOG_TAG_LQM 103 +#define EVENT_LOG_TAG_TRACE_WL_INFO 104 +#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105 +#define EVENT_LOG_TAG_ECOUNTERS_TIME_DATA 106 +#define EVENT_LOG_TAG_NAN_ERROR 107 +#define EVENT_LOG_TAG_NAN_INFO 108 +#define EVENT_LOG_TAG_NAN_DBG 109 +#define EVENT_LOG_TAG_STF_ARBITRATOR_ERROR 110 +#define EVENT_LOG_TAG_STF_ARBITRATOR_TRACE 111 +#define EVENT_LOG_TAG_STF_ARBITRATOR_WARN 112 +#define EVENT_LOG_TAG_SCAN_SUMMARY 113 +#define EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT 114 +#define EVENT_LOG_TAG_OCL_INFO 115 +#define EVENT_LOG_TAG_RSDB_PMGR_DEBUG 116 +#define EVENT_LOG_TAG_RSDB_PMGR_ERR 117 +#define EVENT_LOG_TAG_NAT_ERR 118 +#define EVENT_LOG_TAG_NAT_WARN 119 +#define EVENT_LOG_TAG_NAT_INFO 120 +#define EVENT_LOG_TAG_NAT_DEBUG 121 +#define EVENT_LOG_TAG_STA_INFO 122 +#define EVENT_LOG_TAG_PROXD_ERROR 123 +#define EVENT_LOG_TAG_PROXD_TRACE 124 +#define EVENT_LOG_TAG_PROXD_INFO 125 +#define EVENT_LOG_TAG_IE_ERROR 126 +#define EVENT_LOG_TAG_ASSOC_ERROR 127 +#define EVENT_LOG_TAG_SCAN_ERR 128 +#define EVENT_LOG_TAG_AMSDU_ERROR 129 +#define EVENT_LOG_TAG_AMPDU_ERROR 130 +#define EVENT_LOG_TAG_KM_ERROR 131 +#define EVENT_LOG_TAG_DFS 132 +#define EVENT_LOG_TAG_REGULATORY 133 +#define EVENT_LOG_TAG_CSA 134 +#define EVENT_LOG_TAG_WNM_BSSTRANS_ERR 135 +#define EVENT_LOG_TAG_SUP_INFO 136 +#define EVENT_LOG_TAG_SUP_ERROR 137 +#define EVENT_LOG_TAG_CHANCTXT_TRACE 138 +#define EVENT_LOG_TAG_CHANCTXT_INFO 139 +#define EVENT_LOG_TAG_CHANCTXT_ERROR 140 +#define EVENT_LOG_TAG_CHANCTXT_WARN 141 +#define EVENT_LOG_TAG_MSCHPROFILE 142 +#define EVENT_LOG_TAG_4WAYHANDSHAKE 143 +#define EVENT_LOG_TAG_MSCHPROFILE_TLV 144 +#define EVENT_LOG_TAG_ADPS 145 +#define EVENT_LOG_TAG_MBO_DBG 146 +#define EVENT_LOG_TAG_MBO_INFO 147 +#define EVENT_LOG_TAG_MBO_ERR 148 +#define EVENT_LOG_TAG_TXDELAY 149 +#define EVENT_LOG_TAG_BCNTRIM_INFO 150 +#define EVENT_LOG_TAG_BCNTRIM_TRACE 151 +#define EVENT_LOG_TAG_OPS_INFO 152 +#define EVENT_LOG_TAG_STATS 153 +#define EVENT_LOG_TAG_BAM 154 +#define EVENT_LOG_TAG_TXFAIL 155 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156 +#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157 +#define EVENT_LOG_TAG_AWDL_PEER_DBG 158 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_RANDMAC_INFO 159 +#define EVENT_LOG_TAG_RANDMAC_DBG 160 +#define EVENT_LOG_TAG_RANDMAC_ERR 161 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_MSCH_CAL 163 +#define EVENT_LOG_TAG_MSCH_OPP_CAL 164 +#define EVENT_LOG_TAG_MSCH 165 +#define EVENT_LOG_TAG_NAN_SYNC 166 +#define EVENT_LOG_TAG_NAN_DPE 167 +#define EVENT_LOG_TAG_NAN_SCHED 168 +#define EVENT_LOG_TAG_NAN_RNG 169 +#define EVENT_LOG_TAG_NAN_DAM 170 +#define EVENT_LOG_TAG_NAN_NA 171 +#define EVENT_LOG_TAG_NAN_NDL 172 +#define EVENT_LOG_TAG_NAN_NDP 173 +#define EVENT_LOG_TAG_NAN_SEC 174 +#define EVENT_LOG_TAG_NAN_MAC 175 +#define EVENT_LOG_TAG_NAN_FSM 176 + +#define EVENT_LOG_TAG_TPA_ERR 192 +#define EVENT_LOG_TAG_TPA_INFO 193 +#define EVENT_LOG_TAG_OCE_DBG 194 +#define EVENT_LOG_TAG_OCE_INFO 195 +#define EVENT_LOG_TAG_OCE_ERR 196 +#define EVENT_LOG_TAG_WL_WARN 197 +#define EVENT_LOG_TAG_SB_ERR 198 +#define EVENT_LOG_TAG_SB_INFO 199 +#define EVENT_LOG_TAG_SB_SCHED 200 +#define EVENT_LOG_TAG_ADPS_INFO 201 +#define EVENT_LOG_TAG_SB_CMN_SYNC_INFO 202 +#define EVENT_LOG_TAG_PHY_CAL_INFO 203 /* PHY CALs scheduler info */ +#define EVENT_LOG_TAG_EVT_NOTIF_INFO 204 +#define EVENT_LOG_TAG_PHY_HC_ERROR 205 +#define EVENT_LOG_TAG_PHY_TXPWR_WARN 206 +#define EVENT_LOG_TAG_PHY_TXPWR_INFO 207 +#define EVENT_LOG_TAG_PHY_ACI_INFO 208 +#define EVENT_LOG_TAG_WL_COUNTERS_AUX 209 +#define EVENT_LOG_TAG_AMPDU_DUMP_AUX 210 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_PWRSTATS_AWDL_AUX 211 +#endif /* WLAWDL */ + +#define EVENT_LOG_TAG_PWRSTATS_PHY_AUX 212 +#define EVENT_LOG_TAG_PWRSTATS_SCAN_AUX 213 +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2_AUX 214 +#define EVENT_LOG_TAG_SVT_TESTING 215 /* SVT testing/verification */ +#define EVENT_LOG_TAG_HND_SMD_ERROR 216 +#define EVENT_LOG_TAG_PSBW_INFO 217 +#define EVENT_LOG_TAG_PHY_CAL_DBG 218 +#define EVENT_LOG_TAG_FILS_DBG 219 +#define EVENT_LOG_TAG_FILS_INFO 220 +#define EVENT_LOG_TAG_FILS_ERROR 221 +#define EVENT_LOG_TAG_UNUSED1 222 +#define EVENT_LOG_TAG_UNUSED2 223 +#define EVENT_LOG_TAG_PPR_ERROR 224 + +/* Arbitrator callback log tags */ +#define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224 +#define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225 +#define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226 +#define EVENT_LOG_TAG_RTE_ERROR 227 +#define EVENT_LOG_TAG_CPLT_ERROR 228 +#define EVENT_LOG_TAG_DNGL_ERROR 229 +#define EVENT_LOG_TAG_NVRAM_ERROR 230 +#define EVENT_LOG_TAG_NAC 231 +#define EVENT_LOG_TAG_HP2P_ERR 232 +#define EVENT_LOG_TAG_SB_SCHED_DBG_SYNC 233 +#define EVENT_LOG_TAG_ENHANCED_TS 234 + +/* Available space for new tags for Dingo, Iguana and branches + * prior to Koala only. From Koala onwards, new tags must be greater + * than 255. If a tag is required for Koala and legacy productization branches, + * add that tag here. Tags > 255 will generate extended header. Legacy code + * does not understand extended header. + */ + +/* Debug tags for making debug builds */ +#define EVENT_LOG_TAG_DBG1 251 +#define EVENT_LOG_TAG_DBG2 252 +#define EVENT_LOG_TAG_DBG3 253 +#define EVENT_LOG_TAG_DBG4 254 +#define EVENT_LOG_TAG_DBG5 255 + +/* Insert new tags here for Koala onwards */ + +/* NAN INFO/ERR evnt tags */ +#define EVENT_LOG_TAG_NAN_SYNC_INFO 256 +#define EVENT_LOG_TAG_NAN_DPE_INFO 257 +#define EVENT_LOG_TAG_NAN_SCHED_INFO 258 +#define EVENT_LOG_TAG_NAN_RNG_INFO 259 +#define EVENT_LOG_TAG_NAN_DAM_INFO 260 +#define EVENT_LOG_TAG_NAN_NA_INFO 261 +#define EVENT_LOG_TAG_NAN_NDL_INFO 262 +#define EVENT_LOG_TAG_NAN_NDP_INFO 263 +#define EVENT_LOG_TAG_NAN_SEC_INFO 264 +#define EVENT_LOG_TAG_NAN_MAC_INFO 265 +#define EVENT_LOG_TAG_NAN_FSM_INFO 266 +#define EVENT_LOG_TAG_NAN_PEER_INFO 267 +#define EVENT_LOG_TAG_NAN_AVAIL_INFO 268 +#define EVENT_LOG_TAG_NAN_CMN_INFO 269 +#define EVENT_LOG_TAG_NAN_SYNC_ERR 270 +#define EVENT_LOG_TAG_NAN_DPE_ERR 271 +#define EVENT_LOG_TAG_NAN_SCHED_ERR 272 +#define EVENT_LOG_TAG_NAN_RNG_ERR 273 +#define EVENT_LOG_TAG_NAN_DAM_ERR 274 +#define EVENT_LOG_TAG_NAN_NA_ERR 275 +#define EVENT_LOG_TAG_NAN_NDL_ERR 276 +#define EVENT_LOG_TAG_NAN_NDP_ERR 277 +#define EVENT_LOG_TAG_NAN_SEC_ERR 278 +#define EVENT_LOG_TAG_NAN_MAC_ERR 279 +#define EVENT_LOG_TAG_NAN_FSM_ERR 280 +#define EVENT_LOG_TAG_NAN_PEER_ERR 281 +#define EVENT_LOG_TAG_NAN_AVAIL_ERR 282 +#define EVENT_LOG_TAG_NAN_CMN_ERR 283 + +/* More NAN DBG evt Tags */ +#define EVENT_LOG_TAG_NAN_PEER 284 +#define EVENT_LOG_TAG_NAN_AVAIL 285 +#define EVENT_LOG_TAG_NAN_CMN 286 + +#define EVENT_LOG_TAG_SAE_ERROR 287 +#define EVENT_LOG_TAG_SAE_INFO 288 + +/* rxsig module logging */ +#define EVENT_LOG_TAG_RXSIG_ERROR 289 +#define EVENT_LOG_TAG_RXSIG_DEBUG 290 +#define EVENT_LOG_TAG_RXSIG_INFO 291 + +/* HE TWT HEB EVEVNT_LOG_TAG */ +#define EVENT_LOG_TAG_WL_HE_INFO 292 +#define EVENT_LOG_TAG_WL_HE_TRACE 293 +#define EVENT_LOG_TAG_WL_HE_WARN 294 +#define EVENT_LOG_TAG_WL_HE_ERROR 295 +#define EVENT_LOG_TAG_WL_TWT_INFO 296 +#define EVENT_LOG_TAG_WL_TWT_TRACE 297 +#define EVENT_LOG_TAG_WL_TWT_WARN 298 +#define EVENT_LOG_TAG_WL_TWT_ERROR 299 +#define EVENT_LOG_TAG_WL_HEB_ERROR 300 +#define EVENT_LOG_TAG_WL_HEB_TRACE 301 + +/* RRM EVENT_LOG_TAG */ +#define EVENT_LOG_TAG_RRM_DBG 302 +#define EVENT_LOG_TAG_RRM_INFO 303 +#define EVENT_LOG_TAG_RRM_ERR 304 + +/* scan core */ +#define EVENT_LOG_TAG_SC 305 + +#define EVENT_LOG_TAG_ESP_DBG 306 +#define EVENT_LOG_TAG_ESP_INFO 307 +#define EVENT_LOG_TAG_ESP_ERR 308 + +/* SDC */ +#define EVENT_LOG_TAG_SDC_DBG 309 +#define EVENT_LOG_TAG_SDC_INFO 310 +#define EVENT_LOG_TAG_SDC_ERR 311 + +/* RTE */ +#define EVENT_LOG_TAG_RTE_ERR 312 + +/* TX FIFO */ +#define EVENT_LOG_TAG_FIFO_INFO 313 + +/* PKTTS */ +#define EVENT_LOG_TAG_LATENCY_INFO 314 + +/* TDLS */ +#define EVENT_LOG_TAG_WL_TDLS_INFO 315 +#define EVENT_LOG_TAG_WL_TDLS_DBG 316 +#define EVENT_LOG_TAG_WL_TDLS_ERR 317 + +/* MSCH messages */ +#define EVENT_LOG_TAG_MSCH_DATASTRUCT 319 /* don't use, kept for backward compatibility */ +#define EVENT_LOG_TAG_MSCH_PROFILE 319 +#define EVENT_LOG_TAG_MSCH_REGISTER 320 +#define EVENT_LOG_TAG_MSCH_CALLBACK 321 +#define EVENT_LOG_TAG_MSCH_ERROR 322 +#define EVENT_LOG_TAG_MSCH_DEBUG 323 +#define EVENT_LOG_TAG_MSCH_INFORM 324 +#define EVENT_LOG_TAG_MSCH_TRACE 325 + +/* bus low power related info messages */ +#define EVENT_LOG_TAG_WL_BUS_LP_INFO 326 +#define EVENT_LOG_TAG_PCI_LP_INFO 327 + +/* SBSS BT-Coex */ +#define EVENT_LOG_TAG_SB_BTCX_INFO 328 + +/* wbus */ +#define EVENT_LOG_TAG_WBUS_ERR 329 +#define EVENT_LOG_TAG_WBUS_INFO 330 +#define EVENT_LOG_TAG_WBUS_SCHED 331 + +/* MODESW */ +#define EVENT_LOG_TAG_MODESW_ERR 332 + +/* LPHS */ +#define EVENT_LOG_TAG_LPHS_ERR 333 + +/* CPU statistics */ +#define EVENT_LOG_TAG_ARM_STAT 334 + +/* Event log tags for SOE */ +#define EVENT_LOG_TAG_SOE_ERROR 335 +#define EVENT_LOG_TAG_SOE_INFO 336 + +/* Event log tags for GCI Shared Memory */ +#define EVENT_LOG_TAG_GCISHM_ERR 337 +#define EVENT_LOG_TAG_GCISHM_INFO 338 + +/* Eevent log tags for Enhanced Roam Log */ +#define EVENT_LOG_TAG_ROAM_ENHANCED_LOG 339 + +/* WL BTCEC */ +#define EVENT_LOG_TAG_BTCEC_ERR 340 +#define EVENT_LOG_TAG_BTCEC_INFO 341 +#define EVENT_LOG_TAG_BTCEC_SCHED 342 + +#ifdef WLAWDL +#define EVENT_LOG_TAG_AWDL_HC 343 +#endif /* WLAWDL */ + +#ifdef SLOT_SCHED +#define EVENT_LOG_TAG_SBSS_HC 344 +#endif /* SLOT_SCHED */ + +/* wlc_chan_cal */ +#define EVENT_LOG_TAG_WCC_ERR 345 +#define EVENT_LOG_TAG_WCC_INFO 346 +#define EVENT_LOG_TAG_WCC_TRACE 347 + +/* AMT logging */ +#define EVENT_LOG_TAG_AMT_ERR 348 +#define EVENT_LOG_TAG_AMT_INFO 349 +#define EVENT_LOG_TAG_AMT_TRACE 350 + +/* OBSS hw logging */ +#define EVENT_LOG_TAG_WLC_OBSS_ERR 351 +#define EVENT_LOG_TAG_WLC_OBSS_TRACE 352 +#define EVENT_LOG_TAG_WLC_OBSS_INFO 353 + +#define EVENT_LOG_TAG_ALLOC_TRACE 354 + +/* ASSOC and SUP state machine log tags */ +#define EVENT_LOG_TAG_ASSOC_SM 355 +#define EVENT_LOG_TAG_SUP_SM 356 +/* Place holders for additional state machine logging */ +#define EVENT_LOG_TAG_AUTH_SM 357 +#define EVENT_LOG_TAG_SAE_SM 358 +#define EVENT_LOG_TAG_FTM_SM 359 +#define EVENT_LOG_TAG_NAN_SM 360 + +/* HP2P - RLLW logging */ +#define EVENT_LOG_TAG_RLLW_TRACE 361 + +#define EVENT_LOG_TAG_SDTC_INFO 362 +#define EVENT_LOG_TAG_SDTC_ERR 363 + +/* KEEPALIVE logging */ +#define EVENT_LOG_TAG_KEEPALIVE 364 +#define EVENT_LOG_TAG_DTIM_SCHED_LOG 365 + +/* For printing PHY init time in the event logs for both slices. */ +#define EVENT_LOG_TAG_PHY_INIT_TM 366 + +/* SensorC Coex logging */ +#define EVENT_LOG_TAG_SSCCX_ERR 367 +#define EVENT_LOG_TAG_SSCCX_INFO 368 +#define EVENT_LOG_TAG_SSCCX_TRACE 369 +/* TAG for channel info */ +#define EVENT_LOG_TAG_SCAN_CHANNEL_INFO 370 +/* Robust Audio Video (RAV) - Mirrored Stream Classification Service (MSCS) */ +#define EVENT_LOG_TAG_RAV_MSCS_ERROR 371 +#define EVENT_LOG_TAG_RAV_MSCS_INFO 372 + +/* DVFS state machine related tag */ +#define EVENT_LOG_TAG_DVFS_SM 373 + +/* IPL info */ +#define EVENT_LOG_TAG_IPL_INFO 374 + +/* bcmtrace */ +#define EVENT_LOG_TAG_BCM_TRACE 375 + +/* noise cal */ +#define EVENT_LOG_TAG_NOISE_CAL 376 + +/* FTM hw */ +#define EVENT_LOG_TAG_FTM_HW_ERR 377 +#define EVENT_LOG_TAG_FTM_HW_INFO 378 +#define EVENT_LOG_TAG_FTM_HW_TRACE 379 + +#define EVENT_LOG_TAG_NOISE_CAL_DBG 380 + +/* EHT EVEVNT_LOG_TAG */ +#define EVENT_LOG_TAG_WL_EHT_INFO 381 +#define EVENT_LOG_TAG_WL_EHT_TRACE 382 +#define EVENT_LOG_TAG_WL_EHT_WARN 383 +#define EVENT_LOG_TAG_WL_EHT_ERROR 384 + +#define EVENT_LOG_TAG_CHNCTX_INFO 385 +#define EVENT_LOG_TAG_CHNCTX_ERROR 386 +#define EVENT_LOG_TAG_ECOUNTERS_INFORM 387 +#define EVENT_LOG_TAG_STA_SC_OFLD_ERR 388 + +#define EVENT_LOG_TAG_PKTFLTR_INFO 389 +#define EVENT_LOG_TAG_PKTFLTR_TRACE 390 +#define EVENT_LOG_TAG_PKTFLTR_WARN 391 +#define EVENT_LOG_TAG_PKTFLTR_ERROR 392 +/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */ +#define EVENT_LOG_TAG_MAX 392 + +typedef enum wl_el_set_type_def { + EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */ + EVENT_LOG_SET_TYPE_PRSRV = 1, /* flush the log buffer based on fw or host trigger */ + EVENT_LOG_SET_TYPE_DFLUSH = 2 /* flush the log buffer once the watermark is reached */ +} wl_el_set_type_def_t; + +#define EVENT_LOG_TAG_FLUSH_NONE 0x00 /* No flush */ +#define EVENT_LOG_TAG_FLUSH_ALL 0x40 /* Flush all preserved sets */ +#define EVENT_LOG_TAG_FLUSH_SETNUM 0x80 /* Flush preserved set */ +#define EVENT_LOG_TAG_FLUSH_MASK 0x3f /* SetNum Mask */ + +typedef enum wl_el_flush_type { + EL_TAG_PRSRV_FLUSH_NONE = 0, /* No flush of preserve buf on this tag */ + EL_TAG_PRSRV_FLUSH_SETNUM, /* Flush the buffer set specifid on this tag */ + EL_TAG_PRSRV_FLUSH_ALL /* Flush all preserved buffer set on this tag */ +} wl_el_flush_type_t; + +#define EVENT_LOG_FLUSH_CURRENT_VERSION 0 +typedef struct wl_el_set_flush_prsrv_s { + uint16 version; + uint16 len; + uint16 tag; /* Tag for which preserve flush should be done */ + uint8 flush_type; /* Check wl_el_flush_type_t */ + uint8 set_num; /* Log set num to flush. Max is NUM_EVENT_LOG_SETS. Valid only when + * action is EVENT_LOG_TAG_FLUSH_SETNUM + */ +} wl_el_set_flush_prsrv_t; + +#define SD_PRHDRS(i, s, h, p, n, l) +#define SD_PRPKT(m, b, n) +#define SD_INFORM(args) + +/* Flags for tag control */ +#define EVENT_LOG_TAG_FLAG_NONE 0 +#define EVENT_LOG_TAG_FLAG_LOG 0x80 +#define EVENT_LOG_TAG_FLAG_PRINT 0x40 +#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f + +/* Each event log entry has a type. The type is the LAST word of the + * event log. The printing code walks the event entries in reverse + * order to find the first entry. + */ +typedef union event_log_hdr { + struct { + uint8 tag; /* Event_log entry tag */ + uint8 count; /* Count of 4-byte entries */ + uint16 fmt_num; /* Format number */ + }; + uint32 t; /* Type cheat */ +} event_log_hdr_t; + +/* for internal use - legacy max. tag */ +#define EVENT_LOG_TAG_MAX_LEGACY_FORMAT 255 + +/* + * The position of the extended header in the event log stream will be as follows: + * + * Extended header could be due to count > 255 or tag > 255. + * + * Extended count: 6 bits long. 8 bits (existing) + 6 bits => + * 2^14 words = 65536 bytes payload max + * Extended count field is currently reserved + * Extended tag: 8 (existing) + 4 bits = 12 bits =>2^12 = 4096 tags + * bits[7..4] of extended tags are reserved. + * MSB 16 bits of the extended header are reserved for future use. + */ + +typedef union event_log_extended_hdr { + struct { + uint8 extended_tag; /* Extended tag, bits[7..4] are reserved */ + uint8 extended_count; /* Extended count. Reserved for now. */ + uint16 rsvd; /* Reserved */ + }; + + uint32 t; /* Type cheat */ +} event_log_extended_hdr_t; +#endif /* _EVENT_LOG_TAG_H_ */ diff --git a/bcmdhd.101.10.361.x/include/event_trace.h b/bcmdhd.101.10.361.x/include/event_trace.h new file mode 100755 index 0000000..3be93c6 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/event_trace.h @@ -0,0 +1,187 @@ +/* + * Trace log blocks sent over HBUS + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/** + * @file + * @brief + * Define the trace event ID and tag ID + */ + +#ifndef _WL_DIAG_H +#define _WL_DIAG_H + +#include + +#define DIAG_MAJOR_VERSION 1 /* 4 bits */ +#define DIAG_MINOR_VERSION 0 /* 4 bits */ +#define DIAG_MICRO_VERSION 0 /* 4 bits */ + +#define DIAG_VERSION \ + ((DIAG_MICRO_VERSION&0xF) | (DIAG_MINOR_VERSION&0xF)<<4 | \ + (DIAG_MAJOR_VERSION&0xF)<<8) + /* bit[11:8] major ver */ + /* bit[7:4] minor ver */ + /* bit[3:0] micro ver */ + +/* event ID for trace purpose only, to avoid the conflict with future new +* WLC_E_ , starting from 0x8000 +*/ +#define TRACE_FW_AUTH_STARTED 0x8000 +#define TRACE_FW_ASSOC_STARTED 0x8001 +#define TRACE_FW_RE_ASSOC_STARTED 0x8002 +#define TRACE_G_SCAN_STARTED 0x8003 +#define TRACE_ROAM_SCAN_STARTED 0x8004 +#define TRACE_ROAM_SCAN_COMPLETE 0x8005 +#define TRACE_FW_EAPOL_FRAME_TRANSMIT_START 0x8006 +#define TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP 0x8007 +#define TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE 0x8008 /* protocol status */ +#define TRACE_BT_COEX_BT_SCO_START 0x8009 +#define TRACE_BT_COEX_BT_SCO_STOP 0x800a +#define TRACE_BT_COEX_BT_SCAN_START 0x800b +#define TRACE_BT_COEX_BT_SCAN_STOP 0x800c +#define TRACE_BT_COEX_BT_HID_START 0x800d +#define TRACE_BT_COEX_BT_HID_STOP 0x800e +#define TRACE_ROAM_AUTH_STARTED 0x800f +/* Event ID for NAN, start from 0x9000 */ +#define TRACE_NAN_CLUSTER_STARTED 0x9000 +#define TRACE_NAN_CLUSTER_JOINED 0x9001 +#define TRACE_NAN_CLUSTER_MERGED 0x9002 +#define TRACE_NAN_ROLE_CHANGED 0x9003 +#define TRACE_NAN_SCAN_COMPLETE 0x9004 +#define TRACE_NAN_STATUS_CHNG 0x9005 + +/* Parameters of wifi logger events are TLVs */ +/* Event parameters tags are defined as: */ +#define TRACE_TAG_VENDOR_SPECIFIC 0 /* take a byte stream as parameter */ +#define TRACE_TAG_BSSID 1 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR 2 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_SSID 3 /* takes a 32 bytes SSID address as parameter */ +#define TRACE_TAG_STATUS 4 /* takes an integer as parameter */ +#define TRACE_TAG_CHANNEL_SPEC 5 /* takes one or more wifi_channel_spec as */ + /* parameter */ +#define TRACE_TAG_WAKE_LOCK_EVENT 6 /* takes a wake_lock_event struct as parameter */ +#define TRACE_TAG_ADDR1 7 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR2 8 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR3 9 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR4 10 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_TSF 11 /* take a 64 bits TSF value as parameter */ +#define TRACE_TAG_IE 12 /* take one or more specific 802.11 IEs */ + /* parameter, IEs are in turn indicated in */ + /* TLV format as per 802.11 spec */ +#define TRACE_TAG_INTERFACE 13 /* take interface name as parameter */ +#define TRACE_TAG_REASON_CODE 14 /* take a reason code as per 802.11 */ + /* as parameter */ +#define TRACE_TAG_RATE_MBPS 15 /* take a wifi rate in 0.5 mbps */ +#define TRACE_TAG_REQUEST_ID 16 /* take an integer as parameter */ +#define TRACE_TAG_BUCKET_ID 17 /* take an integer as parameter */ +#define TRACE_TAG_GSCAN_PARAMS 18 /* takes a wifi_scan_cmd_params struct as parameter */ +#define TRACE_TAG_GSCAN_CAPABILITIES 19 /* takes a wifi_gscan_capabilities struct as parameter */ +#define TRACE_TAG_SCAN_ID 20 /* take an integer as parameter */ +#define TRACE_TAG_RSSI 21 /* take an integer as parameter */ +#define TRACE_TAG_CHANNEL 22 /* take an integer as parameter */ +#define TRACE_TAG_LINK_ID 23 /* take an integer as parameter */ +#define TRACE_TAG_LINK_ROLE 24 /* take an integer as parameter */ +#define TRACE_TAG_LINK_STATE 25 /* take an integer as parameter */ +#define TRACE_TAG_LINK_TYPE 26 /* take an integer as parameter */ +#define TRACE_TAG_TSCO 27 /* take an integer as parameter */ +#define TRACE_TAG_RSCO 28 /* take an integer as parameter */ +#define TRACE_TAG_EAPOL_MESSAGE_TYPE 29 /* take an integer as parameter */ + /* M1-1, M2-2, M3-3, M4-4 */ + +typedef union { + struct { + uint16 event: 16; + uint16 version: 16; + }; + uint32 t; +} wl_event_log_id_ver_t; + +#define ETHER_ADDR_PACK_LOW(addr) (((addr)->octet[3])<<24 | ((addr)->octet[2])<<16 | \ + ((addr)->octet[1])<<8 | ((addr)->octet[0])) +#define ETHER_ADDR_PACK_HI(addr) (((addr)->octet[5])<<8 | ((addr)->octet[4])) +#define SSID_PACK(addr) (((uint8)(addr)[0])<<24 | ((uint8)(addr)[1])<<16 | \ + ((uint8)(addr)[2])<<8 | ((uint8)(addr)[3])) + +/* for each event id with logging data, define its logging data structure */ + +typedef union { + struct { + uint16 status: 16; + uint16 paraset: 16; + }; + uint32 t; +} wl_event_log_blk_ack_t; + +typedef union { + struct { + uint8 mode: 8; + uint8 count: 8; + uint16 ch: 16; + }; + uint32 t; +} wl_event_log_csa_t; + +typedef union { + struct { + uint8 status: 1; + uint16 notused: 15; + uint16 frag_tx_cnt: 16; + }; + uint32 t; +} wl_event_log_eapol_tx_t; + +typedef union { + struct { + uint16 tag; + uint16 length; /* length of value in bytes */ + }; + uint32 t; +} wl_event_log_tlv_hdr_t; + +#ifdef WL_EVENT_LOG_COMPILE +#define _WL_EVENT_LOG(tag, event, ...) \ + do { \ + event_log_top_t * event_log_top = event_log_get_top(); \ + wl_event_log_id_ver_t entry = {{event, DIAG_VERSION}}; \ + event_log_top->timestamp = OSL_SYSUPTIME(); \ + EVENT_LOG(tag, "WL event", entry.t , ## __VA_ARGS__); \ + } while (0) +#define WL_EVENT_LOG(args) _WL_EVENT_LOG args +#else +#define WL_EVENT_LOG(args) +#endif /* WL_EVENT_LOG_COMPILE */ + +#ifdef NAN_EVENT_LOG_COMPILE +#define _NAN_EVENT_LOG(tag, event, ...) \ + do { \ + event_log_top_t * event_log_top = event_log_get_top(); \ + wl_event_log_id_ver_t hdr = {{event, DIAG_VERSION}}; \ + event_log_top->timestamp = OSL_SYSUPTIME(); \ + EVENT_LOG(tag, "NAN event", hdr.t , ## __VA_ARGS__); \ + } while (0) +#define NAN_EVENT_LOG(args) _NAN_EVENT_LOG args +#else +#define NAN_EVENT_LOG(args) +#endif /* NAN_EVENT_LOG_COMPILE */ + +#endif /* _WL_DIAG_H */ diff --git a/bcmdhd.101.10.361.x/include/fils.h b/bcmdhd.101.10.361.x/include/fils.h new file mode 100755 index 0000000..1797abf --- /dev/null +++ b/bcmdhd.101.10.361.x/include/fils.h @@ -0,0 +1,424 @@ +/* + * Fundamental types and constants relating to FILS AUTHENTICATION + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _FILSAUTH_H_ +#define _FILSAUTH_H_ + +/* This marks the start of a packed structure section. */ +#include + +/* 11ai D6.0 8.6.8.36 FILS Discovery frame format + category + action + fils_discovery_info_field_t + fils_rnr_element_t + fils_indication_element_t + fils_vendor_specific_element_t +*/ + +/* 11revmc D4.0 8.4.2.25 Vendor Specific element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_vendor_specific_element { + uint8 elementid; + uint8 length; + /* variable len info */ + uint8 orgid_vendorspecific_content[]; +} BWL_POST_PACKED_STRUCT fils_vendor_specific_element_t; + +#define FILS_VS_ELEM_HDR_LEN (sizeof(fils_vendor_specific_element_t)) + +/* 11ai D6.0 8.4.2.178 FILS Indication element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_indication_element { + uint8 elementid; + uint8 length; + uint16 fils_info; + /* variable len info */ + uint8 cache_domain_publickey_id[]; +} BWL_POST_PACKED_STRUCT fils_indication_element_t; + +#define FILS_INDICATION_ELEM_HDR_LEN (sizeof(fils_indication_element_t)) + +#define FILS_INDICATION_IE_TAG_FIXED_LEN 2 + +#define FI_INFO_CACHE_IND_SUBFIELD_SIZE 2 + +/* FILS Indication Information field */ +#define FI_INFO_PUB_KEY_IDENTS_MASK (0x0007) +#define FI_INFO_REALM_IDENTS_MASK (0x0038) +#define FI_INFO_IP_ADDR_CFG_MASK (0x0040) +#define FI_INFO_CACHE_IDENT_MASK (0x0080) +#define FI_INFO_HESSID_MASK (0x0100) +#define FI_INFO_SHRKEY_AUTH_WOPFS_MASK (0x0200) +#define FI_INFO_SHRKEY_AUTH_WPFS_MASK (0x0400) +#define FI_INFO_PUBKEY_AUTH_MASK (0x0800) + +#define FI_INFO_CACHE_IDENT(fc) (((fc) & FI_INFO_CACHE_IDENT_MASK) != 0) +#define FI_INFO_HESSID(fc) (((fc) & FI_INFO_HESSID_MASK) != 0) +#define FI_INFO_SHRKEY_AUTH_WOPFS(fc) (((fc) & FI_INFO_SHRKEY_AUTH_WOPFS_MASK) != 0) +#define FI_INFO_SHRKEY_AUTH_WPFS(fc) (((fc) & FI_INFO_SHRKEY_AUTH_WPFS_MASK) != 0) + +typedef struct ether_addr tbtt_bssid_t; + +/* As per D5.0 in 802.11ax Table 9 281 TBTT Information field contents . */ + +typedef BWL_PRE_PACKED_STRUCT union rnr_tbtt_info_field { + BWL_PRE_PACKED_STRUCT struct len2 { + uint8 tbtt_offset; + uint8 bss_params; + } BWL_POST_PACKED_STRUCT len2_t; + + BWL_PRE_PACKED_STRUCT struct len5 { + uint8 tbtt_offset; + uint32 short_ssid; + } BWL_POST_PACKED_STRUCT len5_t; + + BWL_PRE_PACKED_STRUCT struct len6 { + uint8 tbtt_offset; + uint32 short_ssid; + uint8 bss_params; + } BWL_POST_PACKED_STRUCT len6_t; + + BWL_PRE_PACKED_STRUCT struct len7 { + uint8 tbtt_offset; + tbtt_bssid_t bssid; + } BWL_POST_PACKED_STRUCT len7_t; + + BWL_PRE_PACKED_STRUCT struct len8 { + uint8 tbtt_offset; + tbtt_bssid_t bssid; + uint8 bss_params; + } BWL_POST_PACKED_STRUCT len8_t; + + BWL_PRE_PACKED_STRUCT struct len9 { + uint8 tbtt_offset; + tbtt_bssid_t bssid; + uint8 bss_params; + uint8 psd_20mhz; + } BWL_POST_PACKED_STRUCT len9_t; + + BWL_PRE_PACKED_STRUCT struct len11 { + uint8 tbtt_offset; + tbtt_bssid_t bssid; + uint32 short_ssid; + } BWL_POST_PACKED_STRUCT len11_t; + + BWL_PRE_PACKED_STRUCT struct len12 { + uint8 tbtt_offset; + tbtt_bssid_t bssid; + uint32 short_ssid; + uint8 bss_params; + } BWL_POST_PACKED_STRUCT len12_t; + + BWL_PRE_PACKED_STRUCT struct len13 { + uint8 tbtt_offset; + tbtt_bssid_t bssid; + uint32 short_ssid; + uint8 bss_params; + uint8 psd_20mhz; + } BWL_POST_PACKED_STRUCT len13_t; +} BWL_POST_PACKED_STRUCT rnr_tbtt_info_field_t; + +/* 11ai D11.0 9.4.2.171.1 TBTT Information field */ +typedef BWL_PRE_PACKED_STRUCT struct tbtt_info_field { + uint8 tbtt_offset; + struct ether_addr bssid; + uint32 short_ssid; + uint8 bss_params; +} BWL_POST_PACKED_STRUCT tbtt_info_field_t; +#define TBTT_INFO_FIELD_HDR_LEN (sizeof(tbtt_info_field_t)) + +/* 11ai D11.0 9.4.2.171.1 Neighbor AP Information field */ +typedef BWL_PRE_PACKED_STRUCT struct neighbor_ap_info_field { + uint16 tbtt_info_header; + uint8 op_class; + uint8 channel; + /* variable len info */ + uint8 tbtt_info_field[]; +} BWL_POST_PACKED_STRUCT neighbor_ap_info_field_t; + +#define NEIGHBOR_AP_INFO_FIELD_HDR_LEN (sizeof(neighbor_ap_info_field_t)) + +/* 11ai D11.0 9.4.2.171 Reduced Neighbor Report element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_rnr_element { + uint8 elementid; + uint8 length; + /* variable len info */ + uint8 neighbor_ap_info[]; +} BWL_POST_PACKED_STRUCT fils_rnr_element_t; + +#define FILS_RNR_ELEM_HDR_LEN (sizeof(fils_rnr_element_t)) + +/* TBTT Info Header macros */ +#define TBTT_INFO_HDR_FIELD_TYPE_MASK (0x0003u) +#define TBTT_INFO_HDR_FN_AP_MASK (0x0004u) +#define TBTT_INFO_HDR_COUNT_MASK (0x00f0u) +#define TBTT_INFO_HDR_LENGTH_MASK (0xff00u) + +#define TBTT_INFO_HDR_FIELD_TYPE(hdr)\ + ((hdr) & TBTT_INFO_HDR_FIELD_TYPE_MASK) +#define TBTT_INFO_HDR_FN_AP(hdr)\ + (((hdr) & TBTT_INFO_HDR_FN_AP_MASK) != 0) +#define TBTT_INFO_HDR_COUNT(hdr)\ + (((hdr) & TBTT_INFO_HDR_COUNT_MASK) >> 4u) +#define TBTT_INFO_HDR_LENGTH(hdr)\ + (((hdr) & TBTT_INFO_HDR_LENGTH_MASK) >> 8u) + +/* BSS Params Macros */ +#define RNR_BSS_PARAMS_OCT_REC_MASK (0x01u) +#define RNR_BSS_PARAMS_SAME_SSID_MASK (0x02u) +#define RNR_BSS_PARAMS_MUTIPLE_BSSID_MASK (0x04u) +#define RNR_BSS_PARAMS_TRANSMITTED_BSSID_MASK (0x08u) +#define RNR_BSS_MEMBER_OF_ESS_MASK (0x10u) +#define RNR_BSS_20_TU_PRB_RSP_ACTIVE_MASK (0x20u) +#define RNR_BSS_COLOCATED_AP_MASK (0x40u) + +#define RNR_BSS_PARAMS_OCT_REC(bss)\ + (((bss) & RNR_BSS_PARAMS_OCT_REC_MASK) != 0) +#define RNR_BSS_PARAMS_SAME_SSID(bss)\ + (((bss) & RNR_BSS_PARAMS_SAME_SSID_MASK) != 0) +#define RNR_BSS_PARAMS_MUTIPLE_BSSID(bss)\ + (((bss) & RNR_BSS_PARAMS_MUTIPLE_BSSID_MASK) != 0) +#define RNR_BSS_PARAMS_TRANSMITTED_BSSID(bss)\ + (((bss) & RNR_BSS_PARAMS_TRANSMITTED_BSSID_MASK) != 0) +#define RNR_BSS_MEMBER_OF_ESS(bss)\ + (((bss) & RNR_BSS_MEMBER_OF_ESS_MASK) != 0) +#define RNR_BSS_20_TU_PRB_RSP_ACTIVE(bss)\ + (((bss) & RNR_BSS_20_TU_PRB_RSP_ACTIVE_MASK) != 0) +#define RNR_BSS_COLOCATED_AP(bss)\ + (((bss) & RNR_BSS_COLOCATED_AP_MASK) != 0) + +/* TBTT Information field Contents */ +/* NBR_AP TBTT OFFSET field ( 1 Byte) */ +#define NBR_AP_TBTT_LEN 1U + +/* NBR_AP TBTT OFFSETfield(1) + BSSPARAMS(1) 2Bytes */ +#define NBR_AP_TBTT_BSS_LEN 2U + +/* NBR_AP TBTT OFFSETfield(1) + SHORTSSID (4) 5 Bytes */ +#define NBR_AP_TBTT_SHORT_SSID_LEN 5U + +/* NBR_AP TBTT OFFSETfield(1)+SHORTSSID (4)+BSS(1) 6 Bytes */ +#define NBR_AP_TBTT_BSS_SHORT_SSID_LEN 6U + +/* NBR_AP TBTT OFFSETfield(1) + BSSID(6) 7BYTES */ +#define NBR_AP_TBTT_BSSID_LEN 7U + +/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+BSS(1) 8BYTES */ +#define NBR_AP_TBTT_BSSID_BSS_LEN 8U + +/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+BSS(1) + 20Mhz PSD(1) = 9BYTES */ +#define NBR_AP_TBTT_BSSID_BSS_PSD_LEN 9U + +/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+SHORTSSID (4) 11Bytes */ +#define NBR_AP_TBTT_BSSID_SHORT_SSID_LEN 11U + +/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+SHORTSSID (4)+BSS(1) 12 BYTES */ +#define NBR_AP_TBTT_BSSID_SHORT_SSID_BSS_LEN 12U + +/* NBR_AP TBTT OFFSETfield(1) + BSSID(6) + + * SHORTSSID (4)+BSS(1) + 20Mhz PSD(1) = 13 BYTES + */ +#define NBR_AP_TBTT_BSSID_SHORT_SSID_BSS_PSD_LEN 13U + +/* FILS Nonce element */ +#define FILS_NONCE_LENGTH 16u + +typedef BWL_PRE_PACKED_STRUCT struct fils_nonce_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 fils_nonce[FILS_NONCE_LENGTH]; +} BWL_POST_PACKED_STRUCT fils_nonce_element_t; + +/* 11ai 9.4.2.186 FILS Key Delivery element */ +#define FILS_KEY_RSC_LENGTH 8u + +typedef BWL_PRE_PACKED_STRUCT struct fils_key_delivery_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 key_rsc[FILS_KEY_RSC_LENGTH]; + uint8 kde_list[]; /* Key Data Elements */ +} BWL_POST_PACKED_STRUCT fils_key_delivery_element_t; + +/* 8.4.2.175 FILS Session element */ +#define FILS_SESSION_LENGTH 8u + +typedef BWL_PRE_PACKED_STRUCT struct fils_session_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 fils_session[FILS_SESSION_LENGTH]; +} BWL_POST_PACKED_STRUCT fils_session_element_t; + +#define FILS_SESSION_ELEM_LEN (sizeof(fils_session_element_t)) + +/* 9.4.2.179 FILS key confirmation element */ +#define FILS_KEY_CONFIRMATION_HEADER_LEN 3u + +typedef BWL_PRE_PACKED_STRUCT struct fils_key_conf_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + /* variable len info */ + uint8 key_auth[]; +} BWL_POST_PACKED_STRUCT fils_key_conf_element_t; + +/* 8.4.2.174 FILS Key Confirmation element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_key_confirm_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + /* variable len info */ + uint8 keyauth[]; +} BWL_POST_PACKED_STRUCT fils_key_confirm_element_t; + +#define FILS_CONFIRM_ELEM_HDR_LEN (sizeof(fils_key_confirm_element_t)) + +/* 9.4.2.180 FILS Public Key element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_public_key_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 key_type; + /* variable len info */ + uint8 pub_key[]; +} BWL_POST_PACKED_STRUCT fils_public_key_element_t; + +/* 11ai D6.0 8.6.8.36 FILS Discovery frame format */ +typedef BWL_PRE_PACKED_STRUCT struct fils_discovery_info_field { + uint16 framecontrol; + uint32 timestamp[2]; + uint16 bcninterval; + /* variable len info */ + uint8 disc_info[]; +} BWL_POST_PACKED_STRUCT fils_discovery_info_field_t; + +#define FD_INFO_FIELD_HDR_LEN (sizeof(fils_discovery_info_field_t)) + +#define FD_INFO_LENGTH_FIELD_SIZE 1u +#define FD_INFO_CAP_SUBFIELD_SIZE 2u +#define FD_INFO_OPCLASS_SUBFIED_SIZE 1u +#define FD_INFO_PRIM_CHAN_SUBFIELD_SIZE 1u +#define FD_INFO_APCSN_SUBFIELD_SIZE 1u +#define FD_INFO_ANO_SUBFIELD_SIZE 1u +#define FD_INFO_RSN_INFO_SUBFIELD_SIZE 5u +#define FD_INFO_CH_CENTER_FR_SUBFIELD_SIZE 1u +#define FD_INFO_MD_SUBFIELD_SIZE 3u + +/* FILS Discovery Information field */ +#define FD_INFO_SSID_LENGTH_MASK (0x001f) +#define FD_INFO_CAP_IND_MASK (0x0020) +#define FD_INFO_SHORT_SSID_IND_MASK (0x0040) +#define FD_INFO_APCSN_IND_MASK (0x0080) +#define FD_INFO_ANO_IND_MASK (0x0100) +#define FD_INFO_CH_CENTER_FR_IND_MASK (0x0200) +#define FD_INFO_PRIMARY_CH_IND_MASK (0x0400) +#define FD_INFO_RSN_IND_MASK (0x0800) +#define FD_INFO_LENGTH_IND_MASK (0x1000) +#define FD_INFO_MD_IND_MASK (0x2000) + +#define FD_INFO_SET_SSID_LENGTH(fc, len) ((fc) |= ((uint16)(len) & FD_INFO_SSID_LENGTH_MASK)) +#define FD_INFO_SET_CAP_PRESENT(fc) ((fc) |= FD_INFO_CAP_IND_MASK) +#define FD_INFO_SET_SHORT_SSID_PRESENT(fc) ((fc) |= FD_INFO_SHORT_SSID_IND_MASK) +#define FD_INFO_SET_APCSN_PRESENT(fc) ((fc) |= FD_INFO_APCSN_IND_MASK) +#define FD_INFO_SET_ANO_PRESENT(fc) ((fc) |= FD_INFO_ANO_IND_MASK) +#define FD_INFO_SET_CH_CENTER_FR_PRESENT(fc) ((fc) |= FD_INFO_CH_CENTER_FR_IND_MASK) +#define FD_INFO_SET_PRIMARY_CH_PRESENT(fc) ((fc) |= FD_INFO_PRIMARY_CH_IND_MASK) +#define FD_INFO_SET_RSN_PRESENT(fc) ((fc) |= FD_INFO_RSN_IND_MASK) +#define FD_INFO_SET_LENGTH_PRESENT(fc) ((fc) |= FD_INFO_LENGTH_IND_MASK) +#define FD_INFO_SET_MD_PRESENT(fc) ((fc) |= FD_INFO_MD_IND_MASK) + +#define FD_INFO_SSID_LENGTH(fc) ((fc) & FD_INFO_SSID_LENGTH_MASK) +#define FD_INFO_IS_CAP_PRESENT(fc) (((fc) & FD_INFO_CAP_IND_MASK) != 0) +#define FD_INFO_IS_SHORT_SSID_PRESENT(fc) (((fc) & FD_INFO_SHORT_SSID_IND_MASK) != 0) +#define FD_INFO_IS_APCSN_PRESENT(fc) (((fc) & FD_INFO_APCSN_IND_MASK) != 0) +#define FD_INFO_IS_ANO_PRESENT(fc) (((fc) & FD_INFO_ANO_IND_MASK) != 0) +#define FD_INFO_IS_CH_CENTER_FR_PRESENT(fc) (((fc) & FD_INFO_CH_CENTER_FR_IND_MASK) != 0) +#define FD_INFO_IS_PRIMARY_CH_PRESENT(fc) (((fc) & FD_INFO_PRIMARY_CH_IND_MASK) != 0) +#define FD_INFO_IS_RSN_PRESENT(fc) (((fc) & FD_INFO_RSN_IND_MASK) != 0) +#define FD_INFO_IS_LENGTH_PRESENT(fc) (((fc) & FD_INFO_LENGTH_IND_MASK) != 0) +#define FD_INFO_IS_MD_PRESENT(fc) (((fc) & FD_INFO_MD_IND_MASK) != 0) + +/* FILS Discovery Capability subfield */ +#define FD_CAP_ESS_MASK (0x0001) +#define FD_CAP_PRIVACY_MASK (0x0002) +#define FD_CAP_BSS_CH_WIDTH_MASK (0x001c) +#define FD_CAP_MAX_NSS_MASK (0x00e0) +#define FD_CAP_MULTI_BSS_MASK (0x0200) +#define FD_CAP_PHY_INDEX_MASK (0x1c00) +#define FD_CAP_FILS_MIN_RATE_MASK (0xe000) + +#define FD_CAP_ESS(cap) (((cap) & FD_CAP_ESS_MASK) != 0) +#define FD_CAP_PRIVACY(cap) (((cap) & FD_CAP_PRIVACY_MASK) != 0) +#define FD_CAP_BSS_CH_WIDTH(cap) (((cap) & FD_CAP_BSS_CH_WIDTH_MASK) >> 2) +#define FD_CAP_MAX_NSS(cap) (((cap) & FD_CAP_MAX_NSS_MASK) >> 5) +#define FD_CAP_MULTI_BSS(cap) (((cap) & FD_CAP_MULTI_BSS_MASK) != 0) +#define FD_CAP_PHY_INDEX(cap) (((cap) & FD_CAP_PHY_INDEX_MASK) >> 10) +#define FD_CAP_FILS_MIN_RATE(cap) (((cap) & FD_CAP_FILS_MIN_RATE_MASK) >> 13) + +#define FD_CAP_SET_ESS(cap) (((cap) |= FD_CAP_ESS_MASK)) +#define FD_CAP_SET_PRIVACY(cap) (((cap) & FD_CAP_PRIVACY_MASK) >> 1) +#define FD_CAP_SET_BSS_CH_WIDTH(cap, w) ((cap) |= (((w) << 2) & FD_CAP_BSS_CH_WIDTH_MASK)) +#define FD_CAP_SET_MAX_NSS(cap) (((cap) & FD_CAP_MAX_NSS_MASK) >> 5) +#define FD_CAP_SET_MULTI_BSS(cap) (((cap) & FD_CAP_MULTI_BSS_MASK) >> 9) +#define FD_CAP_SET_PHY_INDEX(cap) (((cap) & FD_CAP_PHY_INDEX_MASK) >> 10) +#define FD_CAP_SET_FILS_MIN_RATE(cap) (((cap) & FD_CAP_FILS_MIN_RATE_MASK) >> 13) + +/* 11ai D6.0 8.4.2.173 FILS Request Parameters element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_request_parameters_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 params_bitmap; + /* variable len info */ + uint8 params_fields[]; +} BWL_POST_PACKED_STRUCT fils_request_parameters_element_t; + +#define FILS_PARAM_MAX_CHANNEL_TIME (1 << 2) + +/* 11ai 9.4.2.184 FILS HLP Container element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_hlp_container_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 dest_addr[ETHER_ADDR_LEN]; + uint8 src_addr[ETHER_ADDR_LEN]; + /* variable len hlp packet */ + uint8 hlp[]; +} BWL_POST_PACKED_STRUCT fils_hlp_container_element_t; + +/* 11ai 9.4.2.184 FILS Wrapped Data element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_wrapped_data_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + /* variable len wrapped data packet */ + uint8 wrapped_data[]; +} BWL_POST_PACKED_STRUCT fils_wrapped_data_element_t; + +#define FILS_HLP_CONTAINER_ELEM_LEN (sizeof(fils_hlp_container_element_t)) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* __FILSAUTH_H__ */ diff --git a/bcmdhd.101.10.361.x/include/hnd_armtrap.h b/bcmdhd.101.10.361.x/include/hnd_armtrap.h new file mode 100755 index 0000000..ca41851 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnd_armtrap.h @@ -0,0 +1,86 @@ +/* + * HND arm trap handling. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hnd_armtrap_h_ +#define _hnd_armtrap_h_ + +/* ARM trap handling */ + +/* Trap types defined by ARM (see arminc.h) */ + +/* Trap locations in lo memory */ +#define TRAP_STRIDE 4 +#define FIRST_TRAP TR_RST +#define LAST_TRAP (TR_FIQ * TRAP_STRIDE) + +#if defined(__ARM_ARCH_7M__) +#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) +#endif /* __ARM_ARCH_7M__ */ + +/* The trap structure is defined here as offsets for assembly */ +#define TR_TYPE 0x00 +#define TR_EPC 0x04 +#define TR_CPSR 0x08 +#define TR_SPSR 0x0c +#define TR_REGS 0x10 +#define TR_REG(n) (TR_REGS + (n) * 4) +#define TR_SP TR_REG(13) +#define TR_LR TR_REG(14) +#define TR_PC TR_REG(15) + +/* Number of core ARM registers. */ +#define TR_REGS_NUM 16u + +#define TRAP_T_SIZE 80 +#define ASSERT_TRAP_SVC_NUMBER 255 + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +typedef struct _trap_struct { + uint32 type; + uint32 epc; + uint32 cpsr; + uint32 spsr; + uint32 r0; /* a1 */ + uint32 r1; /* a2 */ + uint32 r2; /* a3 */ + uint32 r3; /* a4 */ + uint32 r4; /* v1 */ + uint32 r5; /* v2 */ + uint32 r6; /* v3 */ + uint32 r7; /* v4 */ + uint32 r8; /* v5 */ + uint32 r9; /* sb/v6 */ + uint32 r10; /* sl/v7 */ + uint32 r11; /* fp/v8 */ + uint32 r12; /* ip */ + uint32 r13; /* sp */ + uint32 r14; /* lr */ + uint32 pc; /* r15 */ +} trap_t; + +#endif /* !_LANGUAGE_ASSEMBLY */ + +#endif /* _hnd_armtrap_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hnd_cons.h b/bcmdhd.101.10.361.x/include/hnd_cons.h new file mode 100755 index 0000000..d8ade3d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnd_cons.h @@ -0,0 +1,98 @@ +/* + * Console support for RTE - for host use only. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _hnd_cons_h_ +#define _hnd_cons_h_ + +#include + +#if defined(RWL_DONGLE) || defined(UART_REFLECTOR) +/* For Dongle uart tranport max cmd len is 256 bytes + header length (16 bytes) + * In case of ASD commands we are not sure about how much is the command size + * To be on the safe side, input buf len CBUF_LEN is increased to max (512) bytes. + */ +#define RWL_MAX_DATA_LEN (512 + 8) /* allow some extra bytes for '/n' termination */ +#define CBUF_LEN (RWL_MAX_DATA_LEN + 64) /* allow 64 bytes for header ("rwl...") */ +#else +#define CBUF_LEN (128) +#endif /* RWL_DONGLE || UART_REFLECTOR */ + +#ifndef LOG_BUF_LEN +#if defined(BCMDBG) || defined (BCM_BIG_LOG) +#define LOG_BUF_LEN (16 * 1024) +#elif defined(ATE_BUILD) +#define LOG_BUF_LEN (2 * 1024) +#elif defined(BCMQT) +#define LOG_BUF_LEN (16 * 1024) +#else +#define LOG_BUF_LEN 1024 +#endif +#endif /* LOG_BUF_LEN */ + +#ifdef BOOTLOADER_CONSOLE_OUTPUT +#undef RWL_MAX_DATA_LEN +#undef CBUF_LEN +#undef LOG_BUF_LEN +#define RWL_MAX_DATA_LEN (4 * 1024 + 8) +#define CBUF_LEN (RWL_MAX_DATA_LEN + 64) +#define LOG_BUF_LEN (16 * 1024) +#endif + +typedef struct { +#ifdef BCMDONGLEHOST + uint32 buf; /* Can't be pointer on (64-bit) hosts */ +#else + /* Physical buffer address, read by host code to dump console. */ + char* PHYS_ADDR_N(buf); +#endif + uint buf_size; + uint idx; + uint out_idx; /* output index */ + uint dump_idx; /* read idx for wl dump */ +} hnd_log_t; + +typedef struct { + /* Virtual UART + * When there is no UART (e.g. Quickturn), the host should write a complete + * input line directly into cbuf and then write the length into vcons_in. + * This may also be used when there is a real UART (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + volatile uint vcons_in; + volatile uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host polls. + */ + hnd_log_t log; + + /* Console input line buffer + * Characters are read one at a time into cbuf until is received, then + * the buffer is processed as a command line. Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +} hnd_cons_t; + +#endif /* _hnd_cons_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hnd_debug.h b/bcmdhd.101.10.361.x/include/hnd_debug.h new file mode 100755 index 0000000..c7ffe2a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnd_debug.h @@ -0,0 +1,250 @@ +/* + * HND Run Time Environment debug info area + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _HND_DEBUG_H +#define _HND_DEBUG_H + +/* Magic number at a magic location to find HND_DEBUG pointers */ +#define HND_DEBUG_PTR_PTR_MAGIC 0x50504244u /* DBPP */ + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +/* Includes only when building dongle code */ +#ifdef _RTE_ +#include +#include +#include +#endif + +/* We use explicit sizes here since this gets included from different + * systems. The sizes must be the size of the creating system + * (currently 32 bit ARM) since this is gleaned from dump. + */ + +#ifdef FWID +extern uint32 gFWID; +#endif + +enum hnd_debug_reloc_entry_type { + HND_DEBUG_RELOC_ENTRY_TYPE_ROM = 0u, + HND_DEBUG_RELOC_ENTRY_TYPE_RAM = 1u, + HND_DEBUG_RELOC_ENTRY_TYPE_MTH_STACK = 2u, /* main thread stack */ +}; +typedef uint32 hnd_debug_reloc_entry_type_t; + +typedef struct hnd_debug_reloc_entry { + /* Identifies the type(hnd_debug_reloc_entry_type) of the data */ + hnd_debug_reloc_entry_type_t type; + uint32 phys_addr; /* Physical address */ + uint32 virt_addr; /* Virtual address */ + uint32 size; /* Specifies the size of the segment */ +} hnd_debug_reloc_entry_t; + +#ifdef _RTE_ +/* Define pointers for normal ARM use */ +#define _HD_EVLOG_P event_log_top_t * +#define _HD_CONS_P hnd_cons_t * +#define _HD_TRAP_P trap_t * +#define _HD_DEBUG_RELOC_ENTRY_P hnd_debug_reloc_entry_t * +#define _HD_DEBUG_RELOC_P hnd_debug_reloc_t * + +#else +/* Define pointers for use on other systems */ +#define _HD_EVLOG_P uint32 +#define _HD_CONS_P uint32 +#define _HD_TRAP_P uint32 +#define _HD_DEBUG_RELOC_ENTRY_P uint32 +#define _HD_DEBUG_RELOC_P uint32 + +#endif /* _RTE_ */ + +/* MMU relocation info in the debug area */ +typedef struct hnd_debug_reloc { + _HD_DEBUG_RELOC_ENTRY_P hnd_reloc_ptr; /* contains the pointer to the MMU reloc table */ + uint32 hnd_reloc_ptr_size; /* Specifies the size of the MMU reloc table */ +} hnd_debug_reloc_t; + +/* Number of MMU relocation entries supported in v2 */ +#define RELOC_NUM_ENTRIES 4u + +/* Total MMU relocation table size for v2 */ +#define HND_DEBUG_RELOC_PTR_SIZE (RELOC_NUM_ENTRIES * sizeof(hnd_debug_reloc_entry_t)) + +#define HND_DEBUG_VERSION_1 1u /* Legacy, version 1 */ +#define HND_DEBUG_VERSION_2 2u /* Version 2 contains the MMU information + * used for stack virtualization, etc. + */ + +/* Legacy debug version for older branches. */ +#define HND_DEBUG_VERSION HND_DEBUG_VERSION_1 + +/* This struct is placed at a well-defined location, and contains a pointer to hnd_debug. */ +typedef struct hnd_debug_ptr { + uint32 magic; + + /* RAM address of 'hnd_debug'. For legacy versions of this struct, it is a 0-indexed + * offset instead. + */ + uint32 hnd_debug_addr; + + /* Base address of RAM. This field does not exist for legacy versions of this struct. */ + uint32 ram_base_addr; + +} hnd_debug_ptr_t; +extern hnd_debug_ptr_t debug_info_ptr; + +#define HND_DEBUG_EPIVERS_MAX_STR_LEN 32u + +/* chip id string is 8 bytes long with null terminator. Example 43452a3 */ +#define HND_DEBUG_BUILD_SIGNATURE_CHIPID_LEN 13u + +#define HND_DEBUG_BUILD_SIGNATURE_FWID_LEN 17u + +/* ver=abc.abc.abc.abcdefgh size = 24bytes. 6 bytes extra for expansion */ +#define HND_DEBUG_BUILD_SIGNATURE_VER_LEN 30u + +typedef struct hnd_debug { + uint32 magic; +#define HND_DEBUG_MAGIC 0x47424544u /* 'DEBG' */ + +#ifndef HND_DEBUG_USE_V2 + uint32 version; /* Legacy, debug struct version */ +#else + /* Note: The original uint32 version is split into two fields: + * uint16 version and uint16 length to accomidate future expansion + * of the strucutre. + * + * The length field is not populated for the version 1 of the structure. + */ + uint16 version; /* Debug struct version */ + uint16 length; /* Size of the whole structure in bytes */ +#endif /* HND_DEBUG_USE_V2 */ + + uint32 fwid; /* 4 bytes of fw info */ + char epivers[HND_DEBUG_EPIVERS_MAX_STR_LEN]; + + _HD_TRAP_P PHYS_ADDR_N(trap_ptr); /* trap_t data struct physical address. */ + _HD_CONS_P PHYS_ADDR_N(console); /* Console physical address. */ + + uint32 ram_base; + uint32 ram_size; + + uint32 rom_base; + uint32 rom_size; + + _HD_EVLOG_P event_log_top; /* EVENT_LOG address. */ + + /* To populated fields below, + * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled + */ + char fwid_signature[HND_DEBUG_BUILD_SIGNATURE_FWID_LEN]; /* fwid= */ + /* ver=abc.abc.abc.abcdefgh size = 24bytes. 6 bytes extra for expansion */ + char ver_signature[HND_DEBUG_BUILD_SIGNATURE_VER_LEN]; + char chipid_signature[HND_DEBUG_BUILD_SIGNATURE_CHIPID_LEN]; /* chip=12345a3 */ + +#ifdef HND_DEBUG_USE_V2 + /* Version 2 fields */ + /* Specifies the hnd debug MMU info */ + _HD_DEBUG_RELOC_P hnd_debug_reloc_ptr; +#endif /* HND_DEBUG_USE_V2 */ +} hnd_debug_t; + +#ifdef HND_DEBUG_USE_V2 +#define HND_DEBUG_V1_SIZE (OFFSETOF(hnd_debug_t, chipid_signature) + \ + sizeof(((hnd_debug_t *)0)->chipid_signature)) + +#define HND_DEBUG_V2_BASE_SIZE (OFFSETOF(hnd_debug_t, hnd_debug_reloc_ptr) + \ + sizeof(((hnd_debug_t *)0)->hnd_debug_reloc_ptr)) +#endif /* HND_DEBUG_USE_V2 */ + +/* The following structure is used in populating build information */ +typedef struct hnd_build_info { + uint8 version; /* Same as HND_DEBUG_VERSION */ + uint8 rsvd[3]; /* Reserved fields for padding purposes */ + /* To populated fields below, + * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled + */ + uint32 fwid; + uint32 ver[4]; + char chipid_signature[HND_DEBUG_BUILD_SIGNATURE_CHIPID_LEN]; /* chip=12345a3 */ +} hnd_build_info_t; + +/* + * timeval_t and prstatus_t are copies of the Linux structures. + * Included here because we need the definitions for the target processor + * (32 bits) and not the definition on the host this is running on + * (which could be 64 bits). + */ + +typedef struct { /* Time value with microsecond resolution */ + uint32 tv_sec; /* Seconds */ + uint32 tv_usec; /* Microseconds */ +} timeval_t; + +/* Linux/ARM 32 prstatus for notes section */ +typedef struct prstatus { + int32 si_signo; /* Signal number */ + int32 si_code; /* Extra code */ + int32 si_errno; /* Errno */ + uint16 pr_cursig; /* Current signal. */ + uint16 unused; + uint32 pr_sigpend; /* Set of pending signals. */ + uint32 pr_sighold; /* Set of held signals. */ + uint32 pr_pid; + uint32 pr_ppid; + uint32 pr_pgrp; + uint32 pr_sid; + timeval_t pr_utime; /* User time. */ + timeval_t pr_stime; /* System time. */ + timeval_t pr_cutime; /* Cumulative user time. */ + timeval_t pr_cstime; /* Cumulative system time. */ + uint32 uregs[18]; + int32 pr_fpvalid; /* True if math copro being used. */ +} prstatus_t; + +/* for mkcore and other utilities use */ +#define DUMP_INFO_PTR_PTR_0 0x74 +#define DUMP_INFO_PTR_PTR_1 0x78 +#define DUMP_INFO_PTR_PTR_2 0xf0 +#define DUMP_INFO_PTR_PTR_3 0xf8 +#define DUMP_INFO_PTR_PTR_4 0x874 +#define DUMP_INFO_PTR_PTR_5 0x878 +#define DUMP_INFO_PTR_PTR_END 0xffffffff +#define DUMP_INFO_PTR_PTR_LIST DUMP_INFO_PTR_PTR_0, \ + DUMP_INFO_PTR_PTR_1, \ + DUMP_INFO_PTR_PTR_2, \ + DUMP_INFO_PTR_PTR_3, \ + DUMP_INFO_PTR_PTR_4, \ + DUMP_INFO_PTR_PTR_5, \ + DUMP_INFO_PTR_PTR_END + +extern bool hnd_debug_info_in_trap_context(void); + +/* Get build information. */ +extern int hnd_build_info_get(void *ctx, void *arg2, uint32 *buf, uint16 *len); + +#endif /* !LANGUAGE_ASSEMBLY */ + +#endif /* _HND_DEBUG_H */ diff --git a/bcmdhd.101.10.361.x/include/hnd_pktpool.h b/bcmdhd.101.10.361.x/include/hnd_pktpool.h new file mode 100755 index 0000000..ce241e8 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnd_pktpool.h @@ -0,0 +1,288 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hnd_pktpool_h_ +#define _hnd_pktpool_h_ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTPOOL_MUTEX_DECL(mutex) +#endif + +#ifdef BCMPKTPOOL +#define POOL_ENAB(pool) ((pool) && (pool)->inited) +#else /* BCMPKTPOOL */ +#define POOL_ENAB(bus) 0 +#endif /* BCMPKTPOOL */ + +#ifndef PKTPOOL_LEN_MAX +#define PKTPOOL_LEN_MAX 40 +#endif /* PKTPOOL_LEN_MAX */ +#define PKTPOOL_CB_MAX 3 +#define PKTPOOL_CB_MAX_AVL 4 + +/* REMOVE_RXCPLID is an arg for pktpool callback function for removing rxcplID + * and host addr associated with the rxfrag or shared pool buffer during pktpool_reclaim(). + */ +#define REMOVE_RXCPLID 2 + +#define FREE_ALL_PKTS 0 +#define FREE_ALL_FRAG_PKTS 1 + +/* forward declaration */ +struct pktpool; + +typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg); +typedef struct { + pktpool_cb_t cb; + void *arg; + uint8 refcnt; +} pktpool_cbinfo_t; + +/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */ +typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, int arg2, + uint *pktcnt); +typedef struct { + pktpool_cb_extn_t cb; + void *arg; +} pktpool_cbextn_info_t; + +#ifdef BCMDBG_POOL +/* pkt pool debug states */ +#define POOL_IDLE 0 +#define POOL_RXFILL 1 +#define POOL_RXDH 2 +#define POOL_RXD11 3 +#define POOL_TXDH 4 +#define POOL_TXD11 5 +#define POOL_AMPDU 6 +#define POOL_TXENQ 7 + +typedef struct { + void *p; + uint32 cycles; + uint32 dur; +} pktpool_dbg_t; + +typedef struct { + uint8 txdh; /* tx to host */ + uint8 txd11; /* tx to d11 */ + uint8 enq; /* waiting in q */ + uint8 rxdh; /* rx from host */ + uint8 rxd11; /* rx from d11 */ + uint8 rxfill; /* dma_rxfill */ + uint8 idle; /* avail in pool */ +} pktpool_stats_t; +#endif /* BCMDBG_POOL */ + +typedef struct pktpool { + bool inited; /**< pktpool_init was successful */ + uint8 type; /**< type of lbuf: basic, frag, etc */ + uint8 id; /**< pktpool ID: index in registry */ + bool istx; /**< direction: transmit or receive data path */ + HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */ + + void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */ + uint16 avail; /**< number of packets in pool's free list */ + uint16 n_pkts; /**< number of packets managed by pool */ + uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */ + uint16 max_pkt_bytes; /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */ + + bool empty; + uint8 cbtoggle; + uint8 cbcnt; + uint8 ecbcnt; + uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */ + pktpool_cbinfo_t *availcb_excl; + pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL]; + pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; + pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */ + pktpool_cbextn_info_t rxcplidfn; + pktpool_cbinfo_t dmarxfill; + /* variables for pool_heap management */ + uint32 poolheap_flag; + uint16 poolheap_count; /* Number of allocation done from this pool */ + uint16 min_backup_buf; /* Minimum number of buffer that should be kept in pool */ + bool is_heap_pool; /* Whether this pool can be used as heap */ + bool release_active; + uint8 mem_handle; +#ifdef BCMDBG_POOL + uint8 dbg_cbcnt; + pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX]; + uint16 dbg_qlen; + pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1]; +#endif +} pktpool_t; + +pktpool_t *get_pktpools_registry(int id); +#define pktpool_get(pktp) (pktpool_get_ext((pktp), (pktp)->type, NULL)) + +/* Incarnate a pktpool registry. On success returns total_pools. */ +extern int pktpool_attach(osl_t *osh, uint32 total_pools); +extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */ + +extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type, + bool is_heap_pool, uint32 heap_pool_flag, uint16 min_backup_buf); +extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp); +extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal); +extern int pktpool_empty(osl_t *osh, pktpool_t *pktp); +extern uint16 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt, uint8 action); +void pktpool_update_freelist(pktpool_t *pktp, void *p, uint pkts_consumed); +extern void* pktpool_get_ext(pktpool_t *pktp, uint8 type, uint *pktcnt); +extern void pktpool_free(pktpool_t *pktp, void *p); +void pktpool_nfree(pktpool_t *pktp, void *head, void *tail, uint count); +extern int pktpool_add(pktpool_t *pktp, void *p); +extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp); +extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb); +extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 max_pkts); +extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 max_pkts); +extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable); +extern bool pktpool_emptycb_disabled(pktpool_t *pktp); +extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1); +extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg); +extern void pktpool_invoke_dmarxfill(pktpool_t *pktp); +extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_avail(pktpool_t *pktpool); + +#define POOLPTR(pp) ((pktpool_t *)(pp)) +#define POOLID(pp) (POOLPTR(pp)->id) + +#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid)) + +#define pktpool_tot_pkts(pp) (POOLPTR(pp)->n_pkts) /**< n_pkts = avail + in_use <= max_pkts */ +#define pktpool_max_pkt_bytes(pp) (POOLPTR(pp)->max_pkt_bytes) +#define pktpool_max_pkts(pp) (POOLPTR(pp)->maxlen) + +/* + * ---------------------------------------------------------------------------- + * A pool ID is assigned with a pkt pool during pool initialization. This is + * done by maintaining a registry of all initialized pools, and the registry + * index at which the pool is registered is used as the pool's unique ID. + * ID 0 is reserved and is used to signify an invalid pool ID. + * All packets henceforth allocated from a pool will be tagged with the pool's + * unique ID. Packets allocated from the heap will use the reserved ID = 0. + * Packets with non-zero pool id signify that they were allocated from a pool. + * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used + * in place of a 32bit pool pointer in each packet. + * ---------------------------------------------------------------------------- + */ +#define PKTPOOL_INVALID_ID (0) +#define PKTPOOL_MAXIMUM_ID (15) + +/* Registry of pktpool(s) */ +/* Pool ID to/from Pool Pointer converters */ +#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id)) +#define PKTPOOL_PTR2ID(pp) (POOLID(pp)) + +#ifndef PKTID_POOL +/* max pktids reserved for pktpool is updated properly in Makeconf */ +#define PKTID_POOL (PKT_MAXIMUM_ID - 32u) +#endif /* PKTID_POOL */ +extern uint32 total_pool_pktid_count; + +#ifdef BCMDBG_POOL +extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_start_trigger(pktpool_t *pktp, void *p); +extern int pktpool_dbg_dump(pktpool_t *pktp); +extern int pktpool_dbg_notify(pktpool_t *pktp); +extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); +#endif /* BCMDBG_POOL */ + +#ifdef BCMPKTPOOL +#define SHARED_POOL (pktpool_shared) +extern pktpool_t *pktpool_shared; +#ifdef BCMFRAGPOOL +#define SHARED_FRAG_POOL (pktpool_shared_lfrag) +extern pktpool_t *pktpool_shared_lfrag; +#endif + +#ifdef BCMALFRAGPOOL +#define SHARED_ALFRAG_POOL (pktpool_shared_alfrag) +extern pktpool_t *pktpool_shared_alfrag; + +#define SHARED_ALFRAG_DATA_POOL (pktpool_shared_alfrag_data) +extern pktpool_t *pktpool_shared_alfrag_data; +#endif + +#ifdef BCMRESVFRAGPOOL +#define RESV_FRAG_POOL (pktpool_resv_lfrag) +#define RESV_POOL_INFO (resv_pool_info) +#else +#define RESV_FRAG_POOL ((struct pktpool *)NULL) +#define RESV_POOL_INFO (NULL) +#endif /* BCMRESVFRAGPOOL */ + +/** PCIe SPLITRX related */ +#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) +extern pktpool_t *pktpool_shared_rxlfrag; + +#define SHARED_RXDATA_POOL (pktpool_shared_rxdata) +extern pktpool_t *pktpool_shared_rxdata; + +int hnd_pktpool_init(osl_t *osh); +void hnd_pktpool_deinit(osl_t *osh); +int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); +void hnd_pktpool_refill(bool minimal); + +#ifdef BCMRESVFRAGPOOL +extern pktpool_t *pktpool_resv_lfrag; +extern struct resv_info *resv_pool_info; +#endif /* BCMRESVFRAGPOOL */ + +/* Current identified use case flags for pool heap manager */ +#define POOL_HEAP_FLAG_D3 (1 << 0) +#define POOL_HEAP_FLAG_RSRVPOOL (1 << 1) + +#ifdef POOL_HEAP_RECONFIG +typedef void (*pktpool_heap_cb_t)(void *arg, bool entry); + +extern void hnd_pktpool_heap_handle(osl_t *osh, uint32 flag, bool enable); +extern int hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn, void *ctxt, uint32 flag); +extern int hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn); +extern void *hnd_pktpool_freelist_alloc(uint size, uint alignbits, uint32 flag); +extern uint16 hnd_pktpool_get_min_bkup_buf(pktpool_t *pktp); +#endif /* POOL_HEAP_RECONFIG */ +extern uint32 hnd_pktpool_get_total_poolheap_count(void); + +#else /* BCMPKTPOOL */ +#define SHARED_POOL ((struct pktpool *)NULL) +#endif /* BCMPKTPOOL */ + +#ifdef __cplusplus + } +#endif + +#endif /* _hnd_pktpool_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hnd_pktq.h b/bcmdhd.101.10.361.x/include/hnd_pktq.h new file mode 100755 index 0000000..375ebd8 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnd_pktq.h @@ -0,0 +1,330 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hnd_pktq_h_ +#define _hnd_pktq_h_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTQ_MUTEX_DECL(mutex) +#endif + +/* osl multi-precedence packet queue */ +#define PKTQ_LEN_MAX 0xFFFFu /* Max uint16 65535 packets */ +#ifndef PKTQ_LEN_DEFAULT +#define PKTQ_LEN_DEFAULT 128u /* Max 128 packets */ +#endif +#ifndef PKTQ_MAX_PREC +#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */ +#endif + +/** Queue for a single precedence level */ +typedef struct pktq_prec { + void *head; /**< first packet to dequeue */ + void *tail; /**< last packet to dequeue */ + uint16 n_pkts; /**< number of queued packets */ + uint16 max_pkts; /**< maximum number of queued packets */ + uint16 stall_count; /**< # seconds since no packets are dequeued */ + uint16 dequeue_count; /**< # of packets dequeued in last 1 second */ +} pktq_prec_t; + +#ifdef PKTQ_LOG +typedef struct { + uint32 requested; /**< packets requested to be stored */ + uint32 stored; /**< packets stored */ + uint32 saved; /**< packets saved, + because a lowest priority queue has given away one packet + */ + uint32 selfsaved; /**< packets saved, + because an older packet from the same queue has been dropped + */ + uint32 full_dropped; /**< packets dropped, + because pktq is full with higher precedence packets + */ + uint32 dropped; /**< packets dropped because pktq per that precedence is full */ + uint32 sacrificed; /**< packets dropped, + in order to save one from a queue of a highest priority + */ + uint32 busy; /**< packets droped because of hardware/transmission error */ + uint32 retry; /**< packets re-sent because they were not received */ + uint32 ps_retry; /**< packets retried again prior to moving power save mode */ + uint32 suppress; /**< packets which were suppressed and not transmitted */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 max_avail; /**< the high-water mark of the queue capacity for packets - + goes to zero as queue fills + */ + uint32 max_used; /**< the high-water mark of the queue utilisation for packets - + increases with use ('inverse' of max_avail) + */ + uint32 queue_capacity; /**< the maximum capacity of the queue */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running totoal of primary phy rate of all packets */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ + uint32 _logtime; /**< timestamp of last counter clear */ +} pktq_counters_t; + +#define PKTQ_LOG_COMMON \ + uint32 pps_time; /**< time spent in ps pretend state */ \ + uint32 _prec_log; + +typedef struct { + PKTQ_LOG_COMMON + pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */ +} pktq_log_t; +#else +typedef struct pktq_log pktq_log_t; +#endif /* PKTQ_LOG */ + +/** multi-priority packet queue */ +struct pktq { + HND_PKTQ_MUTEX_DECL(mutex) + pktq_log_t *pktqlog; + uint16 num_prec; /**< number of precedences in use */ + uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ + uint16 max_pkts; /**< max packets */ + uint16 n_pkts_tot; /**< total (cummulative over all precedences) number of packets */ + /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ + struct pktq_prec q[PKTQ_MAX_PREC]; +}; + +/** simple, non-priority packet queue */ +struct spktq { + HND_PKTQ_MUTEX_DECL(mutex) + struct pktq_prec q; +}; + +#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) + +/* fn(pkt, arg). return true if pkt belongs to bsscfg */ +typedef bool (*ifpkt_cb_t)(void*, int); + +/* + * pktq filter support + */ + +/** filter function return values */ +typedef enum { + PKT_FILTER_NOACTION = 0, /**< restore the pkt to its position in the queue */ + PKT_FILTER_DELETE = 1, /**< delete the pkt */ + PKT_FILTER_REMOVE = 2, /**< do not restore the pkt to the queue, + * filter fn has taken ownership of the pkt + */ +} pktq_filter_result_t; + +/** + * Caller supplied filter function to pktq_pfilter(), pktq_filter(). + * Function filter(ctx, pkt) is called with its ctx pointer on each pkt in the + * pktq. When the filter function is called, the supplied pkt will have been + * unlinked from the pktq. The filter function returns a pktq_filter_result_t + * result specifying the action pktq_filter()/pktq_pfilter() should take for + * the pkt. + * Here are the actions taken by pktq_filter/pfilter() based on the supplied + * filter function's return value: + * + * PKT_FILTER_NOACTION - The filter will re-link the pkt at its + * previous location. + * + * PKT_FILTER_DELETE - The filter will not relink the pkt and will + * call the user supplied defer_free_pkt fn on the packet. + * + * PKT_FILTER_REMOVE - The filter will not relink the pkt. The supplied + * filter fn took ownership (or deleted) the pkt. + * + * WARNING: pkts inserted by the user (in pkt_filter and/or flush callbacks + * and chains) in the prec queue will not be seen by the filter, and the prec + * queue will be temporarily be removed from the queue hence there're side + * effects including pktq_n_pkts_tot() on the queue won't reflect the correct number + * of packets in the queue. + */ + +typedef pktq_filter_result_t (*pktq_filter_t)(void* ctx, void* pkt); + +/** + * The defer_free_pkt callback is invoked when the the pktq_filter callback + * returns PKT_FILTER_DELETE decision, which allows the user to deposite + * the packet appropriately based on the situation (free the packet or + * save it in a temporary queue etc.). + */ +typedef void (*defer_free_pkt_fn_t)(void *ctx, void *pkt); + +/** + * The flush_free_pkt callback is invoked when all packets in the pktq + * are processed. + */ +typedef void (*flush_free_pkt_fn_t)(void *ctx); + +#if defined(PROP_TXSTATUS) +/* this callback will be invoked when in low_txq_scb flush() + * two back-to-back pkts has same epoch value. + */ +typedef void (*flip_epoch_t)(void *ctx, void *pkt, uint8 *flipEpoch, uint8 *lastEpoch); +#endif /* defined(PROP_TXSTATUS) */ + +/** filter a pktq, using the caller supplied filter/deposition/flush functions */ +extern void pktq_filter(struct pktq *pq, pktq_filter_t fn, void* arg, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); +/** filter a particular precedence in pktq, using the caller supplied filter function */ +extern void pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fn, void* arg, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); +/** filter a simple non-precedence in spktq, using the caller supplied filter function */ +extern void spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); + +/* operations on a specific precedence in packet queue */ +#define pktqprec_max_pkts(pq, prec) ((pq)->q[prec].max_pkts) +#define pktqprec_n_pkts(pq, prec) ((pq)->q[prec].n_pkts) +#define pktqprec_empty(pq, prec) ((pq)->q[prec].n_pkts == 0) +#define pktqprec_peek(pq, prec) ((pq)->q[prec].head) +#define pktqprec_peek_tail(pq, prec) ((pq)->q[prec].tail) +#define spktq_peek_tail(pq) ((pq)->q.tail) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktqprec_avail_pkts(struct pktq *pq, int prec); +extern bool pktqprec_full(struct pktq *pq, int prec); +#else +#define pktqprec_avail_pkts(pq, prec) ((pq)->q[prec].max_pkts - (pq)->q[prec].n_pkts) +#define pktqprec_full(pq, prec) ((pq)->q[prec].n_pkts >= (pq)->q[prec].max_pkts) +#endif /* HND_PKTQ_THREAD_SAFE */ + +extern void pktq_append(struct pktq *pq, int prec, struct spktq *list); +extern void spktq_append(struct spktq *spq, struct spktq *list); +extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list); +extern void spktq_prepend(struct spktq *spq, struct spktq *list); +extern void *pktq_penq(struct pktq *pq, int prec, void *p); +extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); +extern void *pktq_pdeq(struct pktq *pq, int prec); +extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p); +extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg); +extern void *pktq_pdeq_tail(struct pktq *pq, int prec); +/** Remove a specified packet from its queue */ +extern bool pktq_pdel(struct pktq *pq, void *p, int prec); + +/* For single precedence queues */ +extern void *spktq_enq_chain(struct spktq *dspq, struct spktq *sspq); +extern void *spktq_enq(struct spktq *spq, void *p); +extern void *spktq_enq_head(struct spktq *spq, void *p); +extern void *spktq_deq(struct spktq *spq); +extern void *spktq_deq_virt(struct spktq *spq); +extern void *spktq_deq_tail(struct spktq *spq); + +/* operations on a set of precedences in packet queue */ + +extern int pktq_mlen(struct pktq *pq, uint prec_bmp); +extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); +extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); + +/* operations on packet queue as a whole */ + +#define pktq_n_pkts_tot(pq) ((int)(pq)->n_pkts_tot) +#define pktq_max(pq) ((int)(pq)->max_pkts) +#define pktq_empty(pq) ((pq)->n_pkts_tot == 0) +#define spktq_n_pkts(spq) ((int)(spq)->q.n_pkts) +#define spktq_empty(spq) ((spq)->q.n_pkts == 0) + +#define spktq_max(spq) ((int)(spq)->q.max_pkts) +#define spktq_empty(spq) ((spq)->q.n_pkts == 0) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_avail(struct pktq *pq); +extern bool pktq_full(struct pktq *pq); +extern int spktq_avail(struct spktq *spq); +extern bool spktq_full(struct spktq *spq); +#else +#define pktq_avail(pq) ((int)((pq)->max_pkts - (pq)->n_pkts_tot)) +#define pktq_full(pq) ((pq)->n_pkts_tot >= (pq)->max_pkts) +#define spktq_avail(spq) ((int)((spq)->q.max_pkts - (spq)->q.n_pkts)) +#define spktq_full(spq) ((spq)->q.n_pkts >= (spq)->q.max_pkts) +#endif /* HND_PKTQ_THREAD_SAFE */ + +/* operations for single precedence queues */ +#define pktenq(pq, p) pktq_penq((pq), 0, (p)) +#define pktenq_head(pq, p) pktq_penq_head((pq), 0, (p)) +#define pktdeq(pq) pktq_pdeq((pq), 0) +#define pktdeq_tail(pq) pktq_pdeq_tail((pq), 0) +#define pktqflush(osh, pq, dir) pktq_pflush(osh, (pq), 0, (dir)) +#define pktqinit(pq, max_pkts) pktq_init((pq), 1, (max_pkts)) +#define pktqdeinit(pq) pktq_deinit((pq)) +#define pktqavail(pq) pktq_avail((pq)) +#define pktqfull(pq) pktq_full((pq)) +#define pktqfilter(pq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \ + pktq_pfilter((pq), 0, (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx)) + +/* operations for simple non-precedence queues */ +#define spktenq(spq, p) spktq_enq((spq), (p)) +#define spktenq_head(spq, p) spktq_enq_head((spq), (p)) +#define spktdeq(spq) spktq_deq((spq)) +#define spktdeq_tail(spq) spktq_deq_tail((spq)) +#define spktqflush(osh, spq, dir) spktq_flush((osh), (spq), (dir)) +#define spktqinit(spq, max_pkts) spktq_init((spq), (max_pkts)) +#define spktqdeinit(spq) spktq_deinit((spq)) +#define spktqavail(spq) spktq_avail((spq)) +#define spktqfull(spq) spktq_full((spq)) + +#define spktqfilter(spq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \ + spktq_filter((spq), (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx)) +extern bool pktq_init(struct pktq *pq, int num_prec, uint max_pkts); +extern bool pktq_deinit(struct pktq *pq); +extern bool spktq_init(struct spktq *spq, uint max_pkts); +extern bool spktq_init_list(struct spktq *spq, uint max_pkts, + void *head, void *tail, uint16 n_pkts); +extern bool spktq_deinit(struct spktq *spq); + +extern void pktq_set_max_plen(struct pktq *pq, int prec, uint max_pkts); + +/* prec_out may be NULL if caller is not interested in return value */ +extern void *pktq_deq(struct pktq *pq, int *prec_out); +extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); +extern void *pktq_peek(struct pktq *pq, int *prec_out); +extern void *spktq_peek(struct spktq *spq); +extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); + +/** flush pktq */ +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir); +/* single precedence queue with callback before deleting a packet */ +extern void spktq_flush_ext(osl_t *osh, struct spktq *spq, bool dir, + void (*pktq_flush_cb)(void *ctx, void *pkt), void *pktq_flush_ctx); +/* single precedence queue */ +#define spktq_flush(osh, spq, dir) spktq_flush_ext(osh, spq, dir, NULL, NULL) +/** Empty the queue at particular precedence level */ +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir); + +typedef void (*spktq_cb_t)(void *arg, struct spktq *spq); +extern void spktq_free_register(spktq_cb_t cb, void *arg); +extern void spktq_cb(void *spq); +#define SPKTQFREE spktq_cb + +#ifdef __cplusplus +} +#endif + +#endif /* _hnd_pktq_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hnd_trap.h b/bcmdhd.101.10.361.x/include/hnd_trap.h new file mode 100755 index 0000000..eded5da --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnd_trap.h @@ -0,0 +1,33 @@ +/* + * HND Trap handling. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hnd_trap_h_ +#define _hnd_trap_h_ + +#if defined(__arm__) || defined(__thumb__) || defined(__thumb2__) || defined(WLETD) +#include +#else +#error "unsupported CPU architecture" +#endif + +#endif /* _hnd_trap_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hndchipc.h b/bcmdhd.101.10.361.x/include/hndchipc.h new file mode 100755 index 0000000..26e53b3 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndchipc.h @@ -0,0 +1,47 @@ +/* + * HND SiliconBackplane chipcommon support - OS independent. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hndchipc_h_ +#define _hndchipc_h_ + +#include +#include + +#ifdef RTE_UART +typedef void (*si_serial_init_fn)(si_t *sih, void *regs, uint irq, uint baud_base, uint reg_shift); +#else +typedef void (*si_serial_init_fn)(void *regs, uint irq, uint baud_base, uint reg_shift); +#endif +extern void si_serial_init(si_t *sih, si_serial_init_fn add); + +extern volatile void *hnd_jtagm_init(si_t *sih, uint clkd, bool exttap, uint32 *prev_jtagctrl); +extern void hnd_jtagm_disable(si_t *sih, volatile void *h, uint32 *prev_jtagctrl); +extern uint32 jtag_scan(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint32 ir1, + uint drsz, uint32 dr0, uint32 *dr1, bool rti); +extern uint32 jtag_read_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz, + uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3); +extern uint32 jtag_write_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz, + uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3); +extern int jtag_setbit_128(si_t *sih, uint32 jtagureg_addr, uint8 bit_pos, uint8 bit_val); + +#endif /* _hndchipc_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hndd11.h b/bcmdhd.101.10.361.x/include/hndd11.h new file mode 100755 index 0000000..bd6f1da --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndd11.h @@ -0,0 +1,121 @@ +/* + * Generic functions for d11 access + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _hndd11_h_ +#define _hndd11_h_ + +#include +#include +#include +#include +#include + +/* This marks the start of a packed structure section. */ +#include + +#ifndef WL_RSSI_ANT_MAX +#define WL_RSSI_ANT_MAX 4 /**< max possible rx antennas */ +#elif WL_RSSI_ANT_MAX != 4 +#error "WL_RSSI_ANT_MAX does not match" +#endif + +BWL_PRE_PACKED_STRUCT struct wl_d11rxrssi { + int8 dBm; /* number of full dBms */ + /* sub-dbm resolution */ + int8 decidBm; /* sub dBms : value after the decimal point */ +} BWL_POST_PACKED_STRUCT; + +typedef struct wl_d11rxrssi wlc_d11rxrssi_t; + +BWL_PRE_PACKED_STRUCT struct wlc_d11rxhdr { + /* SW header */ + uint32 tsf_l; /**< TSF_L reading */ + int8 rssi; /**< computed instantaneous rssi */ + int8 rssi_qdb; /**< qdB portion of the computed rssi */ + int16 snr; /**< computed snginal-to-noise instantaneous snr */ + int8 rxpwr[ROUNDUP(WL_RSSI_ANT_MAX,2)]; /**< rssi for supported antennas */ + /** + * Even though rxhdr can be in short or long format, always declare it here + * to be in long format. So the offsets for the other fields are always the same. + */ + d11rxhdr_t rxhdr; +} BWL_POST_PACKED_STRUCT; + +/* SW RXHDR + HW RXHDR */ +typedef struct wlc_d11rxhdr wlc_d11rxhdr_t; + +/* extension of wlc_d11rxhdr.. + * This extra block can be used to store extra internal information that cannot fit into + * wlc_d11rxhdr. + * At the moment, it is only used to store and possibly transmit the per-core quater dbm rssi + * information produced by the phy. + * NOTE: To avoid header overhead and amsdu handling complexities this usage is limited to + * only in case that host need to get the extra info. e.g., monitoring mode packet. + */ + +BWL_PRE_PACKED_STRUCT struct wlc_d11rxhdr_ext { +#ifdef BCM_MON_QDBM_RSSI + wlc_d11rxrssi_t rxpwr[WL_RSSI_ANT_MAX]; +#endif + wlc_d11rxhdr_t wlc_d11rx; +} BWL_POST_PACKED_STRUCT; + +typedef struct wlc_d11rxhdr_ext wlc_d11rxhdr_ext_t; + +/* Length of software rx header extension */ +#define WLC_SWRXHDR_EXT_LEN (OFFSETOF(wlc_d11rxhdr_ext_t, wlc_d11rx)) + +/* Length of SW header (12 bytes) */ +#define WLC_RXHDR_LEN (OFFSETOF(wlc_d11rxhdr_t, rxhdr)) +/* Length of RX headers - SW header + HW/ucode/PHY RX status */ +#define WL_RXHDR_LEN(corerev, corerev_minor) \ + (WLC_RXHDR_LEN + D11_RXHDR_LEN(corerev, corerev_minor)) +#define WL_RXHDR_LEN_TMP(corerev, corerev_minor) \ + (WLC_RXHDR_LEN + D11_RXHDR_LEN_TMP(corerev, corerev_minor)) + +/* This marks the end of a packed structure section. */ +#include + +/* Structure to hold d11 corerev information */ +typedef struct d11_info d11_info_t; +struct d11_info { + uint major_revid; + uint minor_revid; +}; + +/* ulp dbg macro */ +#define HNDD11_DBG(x) +#define HNDD11_ERR(x) printf x + +/* d11 slice index */ +#define DUALMAC_MAIN 0 +#define DUALMAC_AUX 1 +#define DUALMAC_SCAN 2 + +extern void hndd11_read_shm(si_t *sih, uint coreunit, uint offset, void* buf); +extern void hndd11_write_shm(si_t *sih, uint coreunit, uint offset, const void* buf); + +extern void hndd11_copyfrom_shm(si_t *sih, uint coreunit, uint offset, void* buf, int len); +extern void hndd11_copyto_shm(si_t *sih, uint coreunit, uint offset, const void* buf, int len); + +extern uint32 hndd11_bm_read(osl_t *osh, d11regs_info_t *regsinfo, uint32 offset, uint32 len, + uint32 *buf); +extern uint32 hndd11_bm_write(osl_t *osh, d11regs_info_t *regsinfo, uint32 offset, uint32 len, + const uint32 *buf); +extern void hndd11_bm_dump(osl_t *osh, d11regs_info_t *regsinfo, uint32 offset, uint32 len); + +extern int hndd11_get_reginfo(si_t *sih, d11regs_info_t *regsinfo, uint coreunit); + +#endif /* _hndd11_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hnddma.h b/bcmdhd.101.10.361.x/include/hnddma.h new file mode 100755 index 0000000..bbc8455 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hnddma.h @@ -0,0 +1,338 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine SW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hnddma_h_ +#define _hnddma_h_ + +#include +#include +#include +#include +#include +#include + +#ifndef _hnddma_pub_ +#define _hnddma_pub_ +typedef const struct hnddma_pub hnddma_t; +#endif /* _hnddma_pub_ */ + +/* range param for dma_getnexttxp() and dma_txreclaim */ +typedef enum txd_range { + HNDDMA_RANGE_ALL = 1, + HNDDMA_RANGE_TRANSMITTED, + HNDDMA_RANGE_TRANSFERED +} txd_range_t; + +/* dma parameters id */ +enum dma_param_id { + HNDDMA_PID_TX_MULTI_OUTSTD_RD = 0, + HNDDMA_PID_TX_PREFETCH_CTL, + HNDDMA_PID_TX_PREFETCH_THRESH, + HNDDMA_PID_TX_BURSTLEN, + HNDDMA_PID_TX_CHAN_SWITCH, + + HNDDMA_PID_RX_PREFETCH_CTL = 0x100, + HNDDMA_PID_RX_PREFETCH_THRESH, + HNDDMA_PID_RX_BURSTLEN, + HNDDMA_PID_BURSTLEN_CAP, + HNDDMA_PID_BURSTLEN_WAR, + HNDDMA_SEP_RX_HDR, /**< SPLITRX related */ + HNDDMA_SPLIT_FIFO, + HNDDMA_PID_D11RX_WAR, + HNDDMA_PID_RX_WAIT_CMPL, + HNDDMA_NRXPOST, + HNDDMA_NRXBUFSZ, + HNDDMA_PID_RXCTL_MOW, + HNDDMA_M2M_RXBUF_RAW /* rx buffers are raw buffers, not lbufs/lfrags */ +}; + +#define SPLIT_FIFO_0 1 +#define SPLIT_FIFO_1 2 + +typedef void (*setup_context_t)(void *ctx, void *p, uint8 **desc0, uint16 *len0, + uint8 **desc1, uint16 *len1); + +/** + * Exported data structure (read-only) + */ +/* export structure */ +struct hnddma_pub { + uint dmastflags; /* dma status flags */ + uint dmactrlflags; /**< dma control flags */ + + /* rx error counters */ + uint rxgiants; /**< rx giant frames */ + uint rxnobuf; /**< rx out of dma descriptors */ + /* tx error counters */ + uint txnobuf; /**< tx out of dma descriptors */ + uint txnodesc; /**< tx out of dma descriptors running count */ +}; + +/* DMA status flags */ +#define BCM_DMA_STF_RX (1u << 0u) /* the channel is RX DMA */ + +typedef struct dma_common dma_common_t; +typedef struct dma_dd_pool dma_dd_pool_t; + +/* Flags for dma_attach_ext function */ +#define BCM_DMA_IND_INTF_FLAG 0x00000001 /* set for using INDIRECT DMA INTERFACE */ +#define BCM_DMA_DESC_ONLY_FLAG 0x00000002 /* For DMA that posts descriptors only and + * no packets + */ +#define BCM_DMA_CHAN_SWITCH_EN 0x00000008 /* for d11 corerev 64+ to help arbitrate + * btw dma channels. + */ +#define BCM_DMA_ROEXT_SUPPORT 0x00000010 /* for d11 corerev 128+ to support receive + * frame offset >=128B and <= 255B + */ +#define BCM_DMA_RX_ALIGN_8BYTE 0x00000020 /* RXDMA address 8-byte aligned */ +#define BCM_DMA_DESC_SHARED_POOL 0x00000100 /* For TX DMA that uses shared desc pool */ +#define BCM_DMA_RXP_LIST 0x00000200 /* linked list for RXP instead of array */ + +typedef int (*rxpkt_error_check_t)(const void* ctx, void* pkt); + +extern dma_common_t * dma_common_attach(osl_t *osh, volatile uint32 *indqsel, + volatile uint32 *suspreq, volatile uint32 *flushreq, rxpkt_error_check_t cb, void *ctx); +extern void dma_common_detach(dma_common_t *dmacommon); +extern void dma_common_set_ddpool_ctx(dma_common_t *dmacommon, void *desc_pool); +extern void * dma_common_get_ddpool_ctx(dma_common_t *dmacommon, void **va); +extern bool dma_check_last_desc(hnddma_t *dmah); +extern void dma_txfrwd(hnddma_t *dmah); + +#ifdef BCM_DMA_INDIRECT +/* Use indirect registers for non-ctmode */ +#define DMA_INDQSEL_IA (1 << 31) +extern void dma_set_indqsel(hnddma_t *di, bool force); +extern bool dma_is_indirect(hnddma_t *dmah); +#else +#define dma_set_indqsel(a, b) +#define dma_is_indirect(a) FALSE +#endif /* #ifdef BCM_DMA_INDIRECT */ + +extern hnddma_t * dma_attach_ext(dma_common_t *dmac, osl_t *osh, const char *name, si_t *sih, + volatile void *dmaregstx, volatile void *dmaregsrx, uint32 flags, uint8 qnum, + uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset, + uint *msg_level, uint coreunit); + +extern hnddma_t * dma_attach(osl_t *osh, const char *name, si_t *sih, + volatile void *dmaregstx, volatile void *dmaregsrx, + uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, + uint rxoffset, uint *msg_level); + +void dma_rx_desc_init(hnddma_t *dmah, uint rxfifo); +void dma_detach(hnddma_t *dmah); +bool dma_txreset(hnddma_t *dmah); +bool dma_rxreset(hnddma_t *dmah); +bool dma_rxidle(hnddma_t *dmah); +void dma_txinit(hnddma_t *dmah); +bool dma_txenabled(hnddma_t *dmah); +void dma_rxinit(hnddma_t *dmah); +void dma_txsuspend(hnddma_t *dmah); +void dma_txresume(hnddma_t *dmah); +bool dma_txsuspended(hnddma_t *dmah); +bool dma_txsuspendedidle(hnddma_t *dmah); +void dma_txflush(hnddma_t *dmah); +void dma_txflush_clear(hnddma_t *dmah); +int dma_txfast_ext(hnddma_t *dmah, void *p0, bool commit, uint16 *pre_txout, uint16 *numd); +int dma_txfast_alfrag(hnddma_t *dmah, hnddma_t *aqm_dmah, void *p, bool commit, dma64dd_t *aqmdesc, + uint d11_txh_len, bool ptxd_hw_enab); +#define dma_txfast(dmah, p0, commit) \ + dma_txfast_ext((dmah), (p0), (commit), NULL, NULL) +void dma_txcommit(hnddma_t *dmah); +int dma_txunframed(hnddma_t *dmah, void *buf, uint len, bool commit); +void *dma_getpos(hnddma_t *dmah, bool direction); +void dma_fifoloopbackenable(hnddma_t *dmah); +void dma_fifoloopbackdisable(hnddma_t *dmah); +bool dma_txstopped(hnddma_t *dmah); +bool dma_rxstopped(hnddma_t *dmah); +void dma_rxenable(hnddma_t *dmah); +bool dma_rxenabled(hnddma_t *dmah); +void *dma_rx(hnddma_t *dmah); +#ifdef APP_RX +void dma_getnextrxp_app(hnddma_t *dmah, bool forceall, uint *pktcnt, + void **head, void **tail); +void dma_rxfill_haddr_getparams(hnddma_t *dmah, uint *nrxd, uint16 *rxout, + dma64dd_t **ddring, uint *rxextrahdrroom, uint32 **rxpktid); +void dma_rxfill_haddr_setparams(hnddma_t *dmah, uint16 rxout); +#endif /* APP_RX */ +uint dma_rx_get_rxoffset(hnddma_t *dmah); +bool dma_rxfill(hnddma_t *dmah); +bool dma_rxfill_required(hnddma_t *dmah); +void dma_txreclaim(hnddma_t *dmah, txd_range_t range); +void dma_rxreclaim(hnddma_t *dmah); +#define _DMA_GETUINTVARPTR_ +uint *dma_getuintvarptr(hnddma_t *dmah, const char *name); +uint8 dma_getuint8var(hnddma_t *dmah, const char *name); +uint16 dma_getuint16var(hnddma_t *dmah, const char *name); +uint32 dma_getuint32var(hnddma_t *dmah, const char *name); +void * dma_getnexttxp(hnddma_t *dmah, txd_range_t range); +void * dma_getnextp(hnddma_t *dmah); +void * dma_getnextrxp(hnddma_t *dmah, bool forceall); +void * dma_peeknexttxp(hnddma_t *dmah, txd_range_t range); +int dma_peekntxp(hnddma_t *dmah, int *len, void *txps[], txd_range_t range); +void * dma_peeknextrxp(hnddma_t *dmah); +void dma_rxparam_get(hnddma_t *dmah, uint16 *rxoffset, uint16 *rxbufsize); +bool dma_is_rxfill_suspend(hnddma_t *dmah); +void dma_txblock(hnddma_t *dmah); +void dma_txunblock(hnddma_t *dmah); +uint dma_txactive(hnddma_t *dmah); +uint dma_rxactive(hnddma_t *dmah); +void dma_txrotate(hnddma_t *dmah); +void dma_counterreset(hnddma_t *dmah); +uint dma_ctrlflags(hnddma_t *dmah, uint mask, uint flags); +uint dma_txpending(hnddma_t *dmah); +uint dma_txcommitted(hnddma_t *dmah); +int dma_pktpool_set(hnddma_t *dmah, pktpool_t *pool); +int dma_rxdatapool_set(hnddma_t *dmah, pktpool_t *pktpool); +pktpool_t *dma_rxdatapool_get(hnddma_t *dmah); + +void dma_dump_txdmaregs(hnddma_t *dmah, uint32 **buf); +void dma_dump_rxdmaregs(hnddma_t *dmah, uint32 **buf); +#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_DMA) +void dma_dump(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring); +void dma_dumptx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring); +void dma_dumprx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring); +#endif +bool dma_rxtxerror(hnddma_t *dmah, bool istx); +void dma_burstlen_set(hnddma_t *dmah, uint8 rxburstlen, uint8 txburstlen); +uint dma_avoidance_cnt(hnddma_t *dmah); +void dma_param_set(hnddma_t *dmah, uint16 paramid, uint16 paramval); +void dma_param_get(hnddma_t *dmah, uint16 paramid, uint *paramval); +void dma_context(hnddma_t *dmah, setup_context_t fn, void *ctx); + +bool dma_glom_enable(hnddma_t *dmah, uint32 val); +uint dma_activerxbuf(hnddma_t *dmah); +bool dma_rxidlestatus(hnddma_t *dmah); +uint dma_get_rxpost(hnddma_t *dmah); + +/* return addresswidth allowed + * This needs to be done after SB attach but before dma attach. + * SB attach provides ability to probe backplane and dma core capabilities + * This info is needed by DMA_ALLOC_CONSISTENT in dma attach + */ +extern uint dma_addrwidth(si_t *sih, void *dmaregs); + +/* count the number of tx packets that are queued to the dma ring */ +extern uint dma_txp(hnddma_t *di); + +extern void dma_txrewind(hnddma_t *di); + +/* pio helpers */ +extern int dma_msgbuf_txfast(hnddma_t *di, dma64addr_t p0, bool com, uint32 ln, bool fst, bool lst); +extern int dma_ptrbuf_txfast(hnddma_t *dmah, dma64addr_t p0, void *p, bool commit, + uint32 len, bool first, bool last); + +extern int dma_rxfast(hnddma_t *di, dma64addr_t p, uint32 len); +extern int dma_rxfill_suspend(hnddma_t *dmah, bool suspended); +extern void dma_link_handle(hnddma_t *dmah1, hnddma_t *dmah2); +extern void dma_unlink_handle(hnddma_t *dmah1, hnddma_t *dmah2); +extern int dma_rxfill_unframed(hnddma_t *di, void *buf, uint len, bool commit); + +extern uint16 dma_get_next_txd_idx(hnddma_t *di, bool txout); +extern uint16 dma_get_txd_count(hnddma_t *dmah, uint16 start, bool txout); +extern uintptr dma_get_txd_addr(hnddma_t *di, uint16 idx); + +/* returns the memory address (hi and low) of the buffer associated with the dma descriptor + * having index idx. + */ +extern void dma_get_txd_memaddr(hnddma_t *dmah, uint32 *addrlo, uint32 *addrhi, uint idx); + +extern int dma_txdesc(hnddma_t *dmah, dma64dd_t *dd, bool commit); +extern int dma_nexttxdd(hnddma_t *dmah, txd_range_t range, uint32 *flags1, uint32 *flags2, + bool advance); + +extern void dma_update_rxfill(hnddma_t *dmah); +extern void dma_rxchan_reset(hnddma_t *di); +extern void dma_txchan_reset(hnddma_t *di); +extern void dma_chan_reset(hnddma_t *dmah); +extern pktpool_t* dma_pktpool_get(hnddma_t *dmah); +extern void dma_clearrxp(hnddma_t *dmah); +extern void dma_cleartxp(hnddma_t *dmah); + +#define dma_getnexttxdd(dmah, range, flags1, flags2) \ + dma_nexttxdd((dmah), (range), (flags1), (flags2), TRUE) + +#define dma_peeknexttxdd(dmah, range, flags1, flags2) \ + dma_nexttxdd((dmah), (range), (flags1), (flags2), FALSE) + +#define NUM_VEC_PCIE 4 + +#define XFER_FROM_LBUF 0x1 +#define XFER_TO_LBUF 0x2 +#define XFER_INJ_ERR 0x4 + +typedef struct m2m_vec_s { + dma64addr_t addr; + uint32 len; +} m2m_vec_t; + +typedef struct m2m_desc_s { + uint8 num_rx_vec; + uint8 num_tx_vec; + uint8 flags; + bool commit; + m2m_vec_t vec[]; +} m2m_desc_t; + +#define INIT_M2M_DESC(desc) \ +{\ + desc->num_rx_vec = 0; \ + desc->num_tx_vec = 0; \ + desc->flags = 0; \ + desc->commit = TRUE; \ +} + +#define SETUP_RX_DESC(desc, rxaddr, rxlen) \ +{\ + ASSERT(desc->num_tx_vec == 0); \ + desc->vec[desc->num_rx_vec].addr = rxaddr; \ + desc->vec[desc->num_rx_vec].len = rxlen; \ + desc->num_rx_vec++; \ +} + +#define SETUP_TX_DESC(desc, txaddr, txlen) \ +{\ + desc->vec[desc->num_tx_vec + desc->num_rx_vec].addr = txaddr; \ + desc->vec[desc->num_tx_vec + desc->num_rx_vec].len = txlen; \ + desc->num_tx_vec++; \ +} + +#define SETUP_XFER_FLAGS(desc, flag) \ +{\ + desc->flags |= flag; \ +} + +#define DD_IS_SHARED_POOL(di) ((di)->dmactrlflags & DMA_CTRL_SHARED_POOL) + +extern int dma_m2m_submit(hnddma_t *dmah, m2m_desc_t *desc, bool implicit); +extern void dma_chan_enable(hnddma_t *dmah, bool enable); + +extern bool dma_rxfill_p(hnddma_t *dmah, void *p); +extern void dma_aqm_di_link(hnddma_t *dmah_aqm, hnddma_t *dmah_hw); +extern void dma_dump_aqminfo(hnddma_t * dmah, struct bcmstrbuf *b, uint16 fifonum); + +/* To dump ntxd and nrxd from the DMA ring */ +void dma_dump_info(hnddma_t *dmah, uint16 fifonum, struct bcmstrbuf *b); + +#endif /* _hnddma_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hndlhl.h b/bcmdhd.101.10.361.x/include/hndlhl.h new file mode 100755 index 0000000..e2068a3 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndlhl.h @@ -0,0 +1,94 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hndlhl_h_ +#define _hndlhl_h_ + +enum { + LHL_MAC_TIMER = 0, + LHL_ARM_TIMER = 1 +}; + +typedef struct { + uint16 offset; + uint32 mask; + uint32 val; +} lhl_reg_set_t; + +#define LHL_REG_OFF(reg) OFFSETOF(gciregs_t, reg) + +extern void si_lhl_timer_config(si_t *sih, osl_t *osh, int timer_type); +extern void si_lhl_timer_enable(si_t *sih); +extern void si_lhl_timer_reset(si_t *sih, uint coreid, uint coreunit); + +extern void si_lhl_setup(si_t *sih, osl_t *osh); +extern void si_lhl_enable(si_t *sih, osl_t *osh, bool enable); +extern void si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period); +extern void si_lhl_enable_sdio_wakeup(si_t *sih, osl_t *osh); +extern void si_lhl_disable_sdio_wakeup(si_t *sih); +extern int si_lhl_set_lpoclk(si_t *sih, osl_t *osh, uint32 lpo_force); +extern void si_set_lv_sleep_mode_lhl_config_4369(si_t *sih); +extern void si_set_lv_sleep_mode_lhl_config_4362(si_t *sih); +extern void si_set_lv_sleep_mode_lhl_config_4378(si_t *sih); +extern void si_set_lv_sleep_mode_lhl_config_4387(si_t *sih); +extern void si_set_lv_sleep_mode_lhl_config_4389(si_t *sih); + +#define HIB_EXT_WAKEUP_CAP(sih) (PMUREV(sih->pmurev) >= 33) + +#ifdef WL_FWSIGN +#define LHL_IS_PSMODE_0(sih) (1) +#define LHL_IS_PSMODE_1(sih) (0) +#else +#define LHL_IS_PSMODE_0(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_0) +#define LHL_IS_PSMODE_1(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_1) +#endif /* WL_FWSIGN */ + +/* LHL revid in capabilities register */ +#define LHL_CAP_REV_MASK 0x000000ff + +/* LHL rev 6 requires this bit to be set first */ +#define LHL_PWRSEQCTL_WL_FLLPU_EN (1 << 7) + +#define LHL_CBUCK_VOLT_SLEEP_SHIFT 12u +#define LHL_CBUCK_VOLT_SLEEP_MASK 0x0000F000 + +#define LHL_ABUCK_VOLT_SLEEP_SHIFT 0u +#define LHL_ABUCK_VOLT_SLEEP_MASK 0x0000000F + +extern void si_lhl_mactim0_set(si_t *sih, uint32 val); + +/* LHL Chip Control 1 Register */ +#define LHL_1MHZ_FLL_DAC_EXT_SHIFT (9u) +#define LHL_1MHZ_FLL_DAC_EXT_MASK (0xffu << 9u) +#define LHL_1MHZ_FLL_PRELOAD_MASK (1u << 17u) + +/* LHL Top Level Power Sequence Control Register */ +#define LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK (1u << 0) +#define LHL_TOP_PWRSEQ_TOP_ISO_EN_MASK (1u << 3u) +#define LHL_TOP_PWRSEQ_TOP_SLB_EN_MASK (1u << 4u) +#define LHL_TOP_PWRSEQ_TOP_PWRSW_EN_MASK (1u << 5u) +#define LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK (1u << 6u) +#define LHL_TOP_PWRSEQ_SERDES_SLB_EN_MASK (1u << 9u) +#define LHL_TOP_PWRSEQ_SERDES_CLK_DIS_EN_MASK (1u << 10u) + +#endif /* _hndlhl_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hndmem.h b/bcmdhd.101.10.361.x/include/hndmem.h new file mode 100755 index 0000000..b77b751 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndmem.h @@ -0,0 +1,74 @@ +/* + * Utility routines for configuring different memories in Broadcom chips. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _HNDMEM_H_ +#define _HNDMEM_H_ + +typedef enum { + MEM_SOCRAM = 0, + MEM_BM = 1, + MEM_UCM = 2, + MEM_SHM = 3, + MEM_MAX = 4 +} hndmem_type_t; + +/* PDA (Power Down Array) configuration */ +typedef enum { + PDA_CONFIG_CLEAR = 0, /* Clear PDA, i.e. Turns on the memory bank */ + PDA_CONFIG_SET_FULL = 1, /* Set PDA, i.e. Truns off the memory bank */ + PDA_CONFIG_SET_PARTIAL = 2, /* Set PDA, i.e. Truns off the memory bank */ + PDA_CONFIG_MAX = 3 +} hndmem_config_t; + +/* Returns the number of banks in a given memory */ +extern int hndmem_num_banks(si_t *sih, int mem); + +/* Returns the size of a give bank in a given memory */ +extern int hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num); + +/* Returns the start address of given memory */ +extern uint32 hndmem_mem_base(si_t *sih, hndmem_type_t mem); + +#ifdef BCMDEBUG +/* Dumps the complete memory information */ +extern void hndmem_dump_meminfo_all(si_t *sih); +#endif /* BCMDEBUG */ + +/* Configures the Sleep PDA for a particular bank for a given memory type */ +extern int hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem, + int bank_num, hndmem_config_t config, uint32 pda); +/* Configures the Active PDA for a particular bank for a given memory type */ +extern int hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem, + int bank_num, hndmem_config_t config, uint32 pda); + +/* Configures the Sleep PDA for all the banks for a given memory type */ +extern int hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem, + hndmem_config_t config); +/* Configures the Active PDA for all the banks for a given memory type */ +extern int hndmem_activepda_config(si_t *sih, hndmem_type_t mem, + hndmem_config_t config); + +/* Turn off/on all the possible banks in a given memory range */ +extern int hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem, + uint32 mem_start, uint32 size, hndmem_config_t config); +#endif /* _HNDMEM_H_ */ diff --git a/bcmdhd.101.10.361.x/include/hndoobr.h b/bcmdhd.101.10.361.x/include/hndoobr.h new file mode 100755 index 0000000..c27070e --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndoobr.h @@ -0,0 +1,93 @@ +/* + * HND OOBR interface header + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hndoobr_h_ +#define _hndoobr_h_ + +#include +#include + +/* for 'srcpidx' of hnd_oobr_get_intr_config() */ +#define HND_CORE_MAIN_INTR 0 +#define HND_CORE_ALT_INTR 1 + +uint32 hnd_oobr_get_clkpwrreq(si_t *sih, uint coreid); +uint32 hnd_oobr_get_intstatus(si_t *sih); +int hnd_oobr_get_intr_config(si_t *sih, uint srccidx, uint srcpidx, uint dstcidx, uint *dstpidx); +int hnd_oobr_set_intr_src(si_t *sih, uint dstcidx, uint dstpidx, uint intrnum); +void hnd_oobr_init(si_t *sih); + +#ifdef BCMDBG +/* dump oobr registers values to console */ +void hnd_oobr_dump(si_t *sih); +#endif + +#define OOBR_INVALID_PORT 0xFFu + +/* per core source/dest sel reg */ +#define OOBR_INTR_PER_CONFREG 4u /* 4 interrupts per configure reg */ +#define OOBR_INTR_NUM_MASK 0x7Fu +#define OOBR_INTR_EN 0x80u +/* per core config reg */ +#define OOBR_CORECNF_OUTPUT_MASK 0x0000FF00u +#define OOBR_CORECNF_OUTPUT_SHIFT 8u +#define OOBR_CORECNF_INPUT_MASK 0x00FF0000u +#define OOBR_CORECNF_INPUT_SHIFT 16u + +#define OOBR_EXT_RSRC_REQ_PERCORE_OFFSET 0x34u +#define OOBR_EXT_RSRC_OFFSET 0x100u +#define OOBR_EXT_RSRC_SHIFT 7u +#define OOBR_EXT_RSRC_REQ_ADDR(oodr_base, core_idx) (uint32)((uintptr)(oodr_base) +\ + OOBR_EXT_RSRC_OFFSET + ((core_idx) << OOBR_EXT_RSRC_SHIFT) +\ + OOBR_EXT_RSRC_REQ_PERCORE_OFFSET) + +typedef volatile struct hndoobr_percore_reg { + uint32 sourcesel[OOBR_INTR_PER_CONFREG]; /* 0x00 - 0x0c */ + uint32 destsel[OOBR_INTR_PER_CONFREG]; /* 0x10 - 0x1c */ + uint32 reserved[4]; + uint32 clkpwrreq; /* 0x30 */ + uint32 extrsrcreq; /* 0x34 */ + uint32 config; /* 0x38 */ + uint32 reserved1[17]; /* 0x3c to 0x7c */ +} hndoobr_percore_reg_t; + +/* capability reg */ +#define OOBR_CAP_CORECNT_MASK 0x0000001Fu +#define OOBR_CAP_MAX_INT2CORE_MASK 0x00F00000u +#define OOBR_CAP_MAX_INT2CORE_SHIFT 20u + +#define OOBR_MAX_INT_PER_REG 4u + +/* CoreNConfig reg */ +#define OOBR_PERCORE_CORENCONFIG_INTOUTPUTS_MASK 0x0000FF00u +#define OOBR_PERCORE_CORENCONFIG_INTOUTPUTS_SHIFT 8u + +typedef volatile struct hndoobr_reg { + uint32 capability; /* 0x00 */ + uint32 reserved[3]; + uint32 intstatus[4]; /* 0x10 - 0x1c */ + uint32 reserved1[56]; /* 0x20 - 0xfc */ + hndoobr_percore_reg_t percore_reg[1]; /* 0x100 */ +} hndoobr_reg_t; + +#endif /* _hndoobr_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hndpmu.h b/bcmdhd.101.10.361.x/include/hndpmu.h new file mode 100755 index 0000000..63d14f9 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndpmu.h @@ -0,0 +1,348 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + +#include +#include +#include +#include +#if defined(BTOVERPCIE) || defined(BT_WLAN_REG_ON_WAR) +#include +#endif /* BTOVERPCIE || BT_WLAN_REG_ON_WAR */ + +#if !defined(BCMDONGLEHOST) + +#define SET_LDO_VOLTAGE_LDO1 1 +#define SET_LDO_VOLTAGE_LDO2 2 +#define SET_LDO_VOLTAGE_LDO3 3 +#define SET_LDO_VOLTAGE_PAREF 4 +#define SET_LDO_VOLTAGE_CLDO_PWM 5 +#define SET_LDO_VOLTAGE_CLDO_BURST 6 +#define SET_LDO_VOLTAGE_CBUCK_PWM 7 +#define SET_LDO_VOLTAGE_CBUCK_BURST 8 +#define SET_LDO_VOLTAGE_LNLDO1 9 +#define SET_LDO_VOLTAGE_LNLDO2_SEL 10 +#define SET_LNLDO_PWERUP_LATCH_CTRL 11 +#define SET_LDO_VOLTAGE_LDO3P3 12 + +#define BBPLL_NDIV_FRAC_BITS 24 +#define P1_DIV_SCALE_BITS 12 + +#define PMUREQTIMER (1 << 0) + +#define XTAL_FREQ_40MHZ 40000 +#define XTAL_FREQ_54MHZ 54000 + +/* selects core based on AOB_ENAB() */ +#define PMUREGADDR(sih, pmur, ccr, member) \ + (AOB_ENAB(sih) ? (&(pmur)->member) : (&(ccr)->member)) + +/* prevents backplane stall caused by subsequent writes to 'ilp domain' PMU registers */ +#define HND_PMU_SYNC_WR(sih, pmur, ccr, osh, r, v) do { \ + if ((sih) && (sih)->pmurev >= 22) { \ + while (R_REG(osh, PMUREGADDR(sih, pmur, ccr, pmustatus)) & \ + PST_SLOW_WR_PENDING) { \ + ; /* empty */ \ + } \ + } \ + W_REG(osh, r, v); \ + (void)R_REG(osh, r); \ +} while (0) + +/* PMU Stat Timer */ + +/* for count mode */ +enum { + PMU_STATS_LEVEL_HIGH = 0, + PMU_STATS_LEVEL_LOW, + PMU_STATS_EDGE_RISE, + PMU_STATS_EDGE_FALL +}; + +typedef struct { + uint8 src_num; /* predefined source hw signal num to map timer */ + bool enable; /* timer enable/disable */ + bool int_enable; /* overflow interrupts enable/disable */ + uint8 cnt_mode; +} pmu_stats_timer_t; + +/* internal hw signal source number for Timer */ +#define SRC_PMU_RESRC_OFFSET 0x40 + +#define SRC_LINK_IN_L12 0 +#define SRC_LINK_IN_L23 1 +#define SRC_PM_ST_IN_D0 2 +#define SRC_PM_ST_IN_D3 3 + +#define SRC_XTAL_PU (SRC_PMU_RESRC_OFFSET + RES4347_XTAL_PU) +#define SRC_CORE_RDY_MAIN (SRC_PMU_RESRC_OFFSET + RES4347_CORE_RDY_MAIN) +#define SRC_CORE_RDY_AUX (SRC_PMU_RESRC_OFFSET + RES4347_CORE_RDY_AUX) + +#ifdef BCMPMU_STATS +extern bool _pmustatsenab; +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define PMU_STATS_ENAB() (_pmustatsenab) +#elif defined(BCMPMU_STATS_DISABLED) + #define PMU_STATS_ENAB() (0) +#else + #define PMU_STATS_ENAB() (1) +#endif +#else + #define PMU_STATS_ENAB() (0) +#endif /* BCMPMU_STATS */ + +#define RES4369_HTAVAIL_VAL 0x00a80022 + +#if defined(BTOVERPCIE) && defined(BT_WLAN_REG_ON_WAR) +#error "'BT over PCIe' and 'WLAN/BT REG_ON WAR' are mutually exclusive as " + "both share the same GCI semaphore - THREAD_0_GCI_SEM_3_ID" +#endif /* BTOVERPCIE && BT_WLAN_REG_ON_WAR */ + +#if defined(BTOVERPCIE) +#define GCI_PLL_LOCK_SEM THREAD_0_GCI_SEM_3_ID +/* changed from msec to usec */ +#define GCI_PLL_LOCK_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000) +#endif /* BTOVERPCIE */ + +#if defined(BT_WLAN_REG_ON_WAR) +#define GCI_BT_WLAN_REG_ON_WAR_SEM THREAD_0_GCI_SEM_3_ID +#define GCI_BT_WLAN_REG_ON_WAR_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000) +#endif /* BT_WLAN_REG_ON_WAR */ + +#define GCI_INDIRECT_ACCESS_SEM THREAD_0_GCI_SEM_2_ID +#define GCI_INDIRECT_ACCESS_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000) + +#define GCI_TREFUP_DS_SEM THREAD_0_GCI_SEM_5_ID +#define GCI_TREFUP_DS_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000) + +#define GCI_BT_BOOTSTAGE_MEMOFFSET (0x570u) +#define GCI_BT_BOOTSTAGE_FW_WAIT 0u /* BT ROM code waiting on FW boot */ +#define GCI_BT_BOOTSTAGE_FW_BOOT 2u /* upon FW boot/start */ +#define GCI_BT_BOOTSTAGE_FW_TRAP 3u /* upon a trap */ +#define GCI_BT_BOOTSTAGE_FW_INVALID 0xFFu + +#define GCI_TREFUP_DS_MEMOFFSET (0x57Cu) +#define GCI_TREFUP_DS_WLAN (1u << 0u) +#define GCI_TREFUP_DS_BT (1u << 1u) +#define GCI_SHARED_SFLASH_RSVD (1u << 2u) + +#define GCI_SHARED_SFLASH_SEM THREAD_0_GCI_SEM_6_ID +#define GCI_SHARED_SFLASH_SEM_TIMEOUT GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000 +#define GCI_SHARED_SFLASH_SEM_ERASE_RSVD_TIMEOUT 50 + 30 /* 50 us + headroom */ + +#define SLEW_RATE_VALUE_REG_4369 (PMU_VREG_6) +#define SLEW_RATE_SHIFT_4369(x) (9u + (x * 8u)) +#define SLEW_RATE_SIZE_4369 (3u) +#define SLEW_RATE_MASK_4369 ((1u << SLEW_RATE_SIZE_4369) - 1u) +#define SOFT_START_EN_REG_4369 (PMU_VREG_5) +#define SOFT_START_EN_SHIFT_4369(x) (4u + x) +#define SOFT_START_EN_SIZE_4369 (1u) +#define SOFT_START_EN_MASK_4369 ((1u << SOFT_START_EN_SIZE_4369) - 1u) +#define SOFT_START_EN_VALUE_4369 (1u) + +#define SLEW_RATE_VALUE_REG_4378 (PMU_VREG_6) +#define SLEW_RATE_SHIFT_4378(x) (9u + (x * 8u)) +#define SLEW_RATE_SIZE_4378 (3u) +#define SLEW_RATE_MASK_4378 ((1u << SLEW_RATE_SIZE_4378) - 1u) +#define SOFT_START_EN_REG_4378 (PMU_VREG_5) +#define SOFT_START_EN_SHIFT_4378(x) (4u + x) +#define SOFT_START_EN_SIZE_4378 (1u) +#define SOFT_START_EN_MASK_4378 ((1u << SOFT_START_EN_SIZE_4378) - 1u) +#define SOFT_START_EN_VALUE_4378 (1u) +#define SOFT_START_EN_VALUE_4378_REV37 (0u) + +#define SLEW_RATE_VALUE_REG_4387 (PMU_VREG_6) +#define SLEW_RATE_SHIFT_4387(x) (18u) +#define SLEW_RATE_SIZE_4387 (2u) +#define SLEW_RATE_MASK_4387 ((1u << SLEW_RATE_SIZE_4387) - 1u) +#define SOFT_START_EN_REG_4387 (PMU_VREG_6) +#define SOFT_START_EN_SHIFT_4387(x) (17u) +#define SOFT_START_EN_SIZE_4387 (1u) +#define SOFT_START_EN_MASK_4387 ((1u << SOFT_START_EN_SIZE_4387) - 1u) +#define SOFT_START_EN_VALUE_4387 (0u) + +extern void si_pmu_init(si_t *sih, osl_t *osh); +extern void si_pmu_chip_init(si_t *sih, osl_t *osh); +extern void si_pmu_pll_init(si_t *sih, osl_t *osh, uint32 xtalfreq); +extern void si_pmu_res_init(si_t *sih, osl_t *osh); +extern void si_pmu_swreg_init(si_t *sih, osl_t *osh); +extern void si_pmu_res_minmax_update(si_t *sih, osl_t *osh); +extern void si_pmu_clear_intmask(si_t *sih); + +extern uint32 si_pmu_si_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */ +extern uint32 si_pmu_cpu_clock(si_t *sih, osl_t *osh); /* returns [hz] units */ +extern uint32 si_pmu_mem_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */ +extern uint32 si_pmu_alp_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */ +extern void si_pmu_ilp_clock_set(uint32 cycles); +extern uint32 si_pmu_ilp_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */ + +extern void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, uint8 ldo, uint8 voltage); +extern uint16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh); +extern uint si_pmu_fast_pwrup_delay_dig(si_t *sih, osl_t *osh); +extern void si_pmu_pllupd(si_t *sih); +extern void si_pmu_spuravoid(si_t *sih, osl_t *osh, uint8 spuravoid); +extern void si_pmu_pll_off_PARR(si_t *sih, osl_t *osh, uint32 *min_res_mask, + uint32 *max_res_mask, uint32 *clk_ctl_st); +extern uint32 si_pmu_pll28nm_fvco(si_t *sih); +/* below function are only for BBPLL parallel purpose */ +extern void si_pmu_gband_spurwar(si_t *sih, osl_t *osh); + +extern bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh); +extern uint32 si_pmu_measure_alpclk(si_t *sih, osl_t *osh); + +extern uint32 si_pmu_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); +#if defined(SAVERESTORE) +extern void si_set_abuck_mode_4362(si_t *sih, uint8 mode); +#endif /* SAVERESTORE */ + +#define si_pmu_regcontrol si_pmu_vreg_control /* prevents build err because of usage in PHY */ +extern uint32 si_pmu_vreg_control(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_pmu_pllcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); +extern void si_pmu_pllupd(si_t *sih); + +extern uint32 si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, uint32 clk, uint32 delay); +extern uint32 si_pmu_get_bb_vcofreq(si_t *sih, osl_t *osh, int xtalfreq); +typedef void (*si_pmu_callback_t)(void* arg); + +extern uint32 si_mac_clk(si_t *sih, osl_t *osh); +extern void si_pmu_switch_on_PARLDO(si_t *sih, osl_t *osh); +extern void si_pmu_switch_off_PARLDO(si_t *sih, osl_t *osh); + +/* TODO: need a better fn name or better abstraction than the raw fvco + * and MAC clock channel divisor... + */ +extern int si_pmu_fvco_macdiv(si_t *sih, uint32 *fvco, uint32 *div); + +extern bool si_pmu_reset_ret_sleep_log(si_t *sih, osl_t *osh); +extern bool si_pmu_reset_chip_sleep_log(si_t *sih, osl_t *osh); +extern int si_pmu_openloop_cal(si_t *sih, uint16 currtemp); + +#ifdef LDO3P3_MIN_RES_MASK +extern int si_pmu_min_res_ldo3p3_set(si_t *sih, osl_t *osh, bool on); +extern int si_pmu_min_res_ldo3p3_get(si_t *sih, osl_t *osh, int *res); +#endif /* LDO3P3_MIN_RES_MASK */ + +void si_pmu_bt_ldo_pu(si_t *sih, bool up); + +int si_pmu_ldo3p3_soft_start_wl_get(si_t *sih, osl_t *osh, int *res); +int si_pmu_ldo3p3_soft_start_wl_set(si_t *sih, osl_t *osh, uint32 slew_rate); +int si_pmu_ldo3p3_soft_start_bt_get(si_t *sih, osl_t *osh, int *res); +int si_pmu_ldo3p3_soft_start_bt_set(si_t *sih, osl_t *osh, uint32 slew_rate); +extern int si_pmu_min_res_otp_pu_set(si_t *sih, osl_t *osh, bool on); +#endif /* !defined(BCMDONGLEHOST) */ + +#if defined(EDV) +extern uint32 si_pmu_get_backplaneclkspeed(si_t *sih); +extern void si_pmu_update_backplane_clock(si_t *sih, osl_t *osh, uint reg, uint32 mask, uint32 val); +#endif + +extern uint32 si_pmu_rsrc_macphy_clk_deps(si_t *sih, osl_t *osh, int maccore_index); +extern uint32 si_pmu_rsrc_ht_avail_clk_deps(si_t *sih, osl_t *osh); +extern uint32 si_pmu_rsrc_cb_ready_deps(si_t *sih, osl_t *osh); + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); +extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag); +extern uint32 si_pmu_dump_pmucap_binary(si_t *sih, uchar *p); +extern uint32 si_pmu_dump_buf_size_pmucap(si_t *sih); +extern int si_pmu_wait_for_steady_state(si_t *sih, osl_t *osh, pmuregs_t *pmu); +#ifdef ATE_BUILD +extern void hnd_pmu_clr_int_sts_req_active(osl_t *hnd_osh, si_t *hnd_sih); +#endif +extern uint32 si_pmu_wake_bit_offset(si_t *sih); +extern uint32 si_pmu_get_pmutimer(si_t *sih); +extern void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask); +extern void si_pmu_set_mac_rsrc_req(si_t *sih, int macunit); +extern void si_pmu_set_mac_rsrc_req_sc(si_t *sih, osl_t *osh); +extern bool si_pmu_fast_lpo_enable_pcie(si_t *sih); +extern bool si_pmu_fast_lpo_enable_pmu(si_t *sih); +extern uint32 si_cur_pmu_time(si_t *sih); +extern bool si_pmu_cap_fast_lpo(si_t *sih); +extern int si_pmu_fast_lpo_disable(si_t *sih); +extern void si_pmu_dmn1_perst_wakeup(si_t *sih, bool set); +#ifdef BCMPMU_STATS +extern void si_pmustatstimer_init(si_t *sih); +extern void si_pmustatstimer_dump(si_t *sih); +extern void si_pmustatstimer_start(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_stop(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_clear(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_clear_overflow(si_t *sih); +extern uint32 si_pmustatstimer_read(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid); +extern void si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid); +extern void si_pmustatstimer_int_enable(si_t *sih); +extern void si_pmustatstimer_int_disable(si_t *sih); +#endif /* BCMPMU_STATS */ +extern int si_pmu_min_res_set(si_t *sih, osl_t *osh, uint min_mask, bool set); +extern void si_pmu_disable_intr_pwrreq(si_t *sih); + +#ifdef DONGLEBUILD +/* Get PMU registers in rodata */ +extern int si_pmu_regs_in_rodata_dump(void *sih, void *arg2, uint32 *bufptr, uint16 *len); +#endif + +extern void si_pmu_fis_setup(si_t *sih); + +extern uint si_pmu_get_mac_rsrc_req_tmr_cnt(si_t *sih); +extern uint si_pmu_get_pmu_interrupt_rcv_cnt(si_t *sih); + +extern bool _bcm_pwr_opt_dis; +#define BCM_PWR_OPT_ENAB() (FALSE) + +extern int si_pmu_mem_pwr_off(si_t *sih, int core_idx); +extern int si_pmu_mem_pwr_on(si_t *sih); +extern int si_pmu_lvm_csr_update(si_t *sih, bool lvm); + +#if defined(BT_WLAN_REG_ON_WAR) +#define REG_ON_WAR_PMU_EXT_WAKE_REQ_MASK0_VAL 0x060000CDu + +extern void si_pmu_reg_on_war_ext_wake_perst_set(si_t *sih); +extern void si_pmu_reg_on_war_ext_wake_perst_clear(si_t *sih); +#endif /* BT_WLAN_REG_ON_WAR */ + +#if defined (BCMSRTOPOFF) + extern bool _srtopoff_enab; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSRTOPOFF_ENAB() (_srtopoff_enab) +#elif defined(BCMSRTOPOFF_DISABLED) + #define BCMSRTOPOFF_ENAB() (0) +#else + #define BCMSRTOPOFF_ENAB() (_srtopoff_enab) +#endif +#else + #define BCMSRTOPOFF_ENAB() (0) +#endif /* BCMSRTOPOFF */ + +#ifdef BCM_PMU_FLL_PU_MANAGE +#define PMU_FLL_PU_ENAB() (TRUE) +#else +#define PMU_FLL_PU_ENAB() (FALSE) +#endif + +extern pmuregs_t *hnd_pmur; /* PMU core regs */ +extern void si_pmu_res_state_wait(si_t *sih, uint rsrc); +#endif /* _hndpmu_h_ */ diff --git a/bcmdhd.101.10.361.x/include/hndsoc.h b/bcmdhd.101.10.361.x/include/hndsoc.h new file mode 100755 index 0000000..7349586 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/hndsoc.h @@ -0,0 +1,353 @@ +/* + * Broadcom HND chip & on-chip-interconnect-related definitions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _HNDSOC_H +#define _HNDSOC_H + +/* Include the soci specific files */ +#include +#include + +/* + * SOC Interconnect Address Map. + * All regions may not exist on all chips. + */ +#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ +#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI_MEM_SZ (64 * 1024 * 1024) +#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ +#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ + +#ifndef SI_ENUM_BASE_DEFAULT +#define SI_ENUM_BASE_DEFAULT 0x18000000 /* Enumeration space base */ +#endif + +#ifndef SI_WRAP_BASE_DEFAULT +#define SI_WRAP_BASE_DEFAULT 0x18100000 /* Wrapper space base */ +#endif + +#define WL_BRIDGE1_S (0x18132000) +#define WL_BRIDGE2_S (0x18133000) + +/** new(er) chips started locating their chipc core at a different BP address than 0x1800_0000 */ +#ifdef DONGLEBUILD +// firmware is always compiled for a particular chip +#define SI_ENUM_BASE(sih) SI_ENUM_BASE_DEFAULT +#define SI_WRAP_BASE(sih) SI_WRAP_BASE_DEFAULT +#else +// NIC and DHD driver binaries should support both old(er) and new(er) chips at the same time +#define SI_ENUM_BASE(sih) ((sih)->enum_base) +#define SI_WRAP_BASE(sih) (SI_ENUM_BASE(sih) + 0x00100000) +#endif /* DONGLEBUILD */ + +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ + +#define SI_NIC400_GPV_BASE 0x18200000 /* NIC-400 Global Programmers View (GPV) */ +#define SI_GPV_WR_CAP_ADDR 0x4008 /* WR-CAP offset */ +#define SI_GPV_RD_CAP_EN 0x1 /* issue read */ +#define SI_GPV_WR_CAP_EN 0x2 /* issue write */ + +#define SI_GPV_SL4_BM_ADDR 0x44024 /* NIC-400 Slave interface 4 Bypass merge */ +#define SI_GPV_SL6_BM_ADDR 0x46024 /* NIC-400 Slave interface 6 Bypass merge */ +#define SI_GPV_SL8_BM_ADDR 0x4a024 /* NIC-400 Slave interface 8 Bypass merge */ +#define SI_GPV_SL9_BM_ADDR 0x4b024 /* NIC-400 Slave interface 9 Bypass merge */ + +/* AXI Slave Interface Block (ASIB) offsets */ +#define ASIB_FN_MOD2 0x24 + +#ifndef SI_MAXCORES +#ifdef _RTE_ +#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ +#else +#define SI_MAXCORES 32 /* NorthStar has more cores */ +#endif /* _RTE_ */ +#endif /* SI_MAXCORES */ + +#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ + +#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ +#define SI_FASTRAM_SWAPPED 0x19800000 + +#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ +#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ +#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define SI_FLASH_WINDOW 0x01000000 /* Flash XIP Window */ + +#define SI_NS_NANDFLASH 0x1c000000 /* NorthStar NAND flash base */ +#define SI_NS_NORFLASH 0x1e000000 /* NorthStar NOR flash base */ +#define SI_NS_ROM 0xfffd0000 /* NorthStar ROM */ +#define SI_NS_FLASH_WINDOW 0x02000000 /* Flash XIP Window */ + +#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ +#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */ +#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ +#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */ +#ifndef SI_ARMCA7_RAM +#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */ +#endif +#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ +#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ + +#define SI_SFLASH 0x14000000 +#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ +#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +/* APB bridge code */ +#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */ + +/* ADB bridge code */ +#define ADB_BRIDGE_ID 0x031 + +/* AXI-AHB bridge code */ +#define AXI2AHB_BRIDGE_ID 0x240 /* AXI_AHB Bridge */ + +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ +#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ + +#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */ +#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */ +#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */ +#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */ +#define GCI_CORE_ID 0x840 /* GCI Core */ +#define SR_CORE_ID 0x841 /* SR_CORE ID */ +#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */ +#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */ +#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */ +#define SYSMEM_CORE_ID 0x849 /* System memory core */ +#define HUB_CORE_ID 0x84b /* Hub core ID */ +#define HWA_CORE_ID 0x851 /* HWA Core ID */ +#define SPMI_SLAVE_CORE_ID 0x855 /* SPMI Slave Core ID */ +#define BT_CORE_ID 0x857 /* Bluetooth Core ID */ +#define HND_OOBR_CORE_ID 0x85c /* Hnd oob router core ID */ +#define SOE_CORE_ID 0x85d /* SOE core */ +#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ +#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ +#define EROM_CORE_ID 0x366 /* EROM core ID */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define CCI400_CORE_ID 0x420 /* CCI-400 (Cache Coherent Interconnect) core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all + * unused address ranges + */ + +#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ +#define NS_DMA_CORE_ID 0x502 /* DMA core */ +#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ +#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ +#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ +#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ +#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ +#define NS_ROM_CORE_ID 0x508 /* ROM core */ +#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ +#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ +#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ +#define NS_SOCRAM_CORE_ID 0x50e /* internal memory core */ +#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ +#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */ +#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */ +#define ALTA_CORE_ID 0x534 /* I2S core */ +#define DDR23_PHY_CORE_ID 0x5dd + +#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ +#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */ + +/* There are TWO constants on all HND chips: SI_ENUM_BASE_DEFAULT above, + * and chipcommon being the first core: + */ +#define SI_CC_IDX 0 +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0u +#define SOCI_AI 1u +#define SOCI_UBUS 2u +#define SOCI_NAI 3u +#define SOCI_DVTBUS 4u /* BCM7XXX Digital Video Tech bus */ +#define SOCI_NCI 6u /* NCI (non coherent interconnect) i.e. BOOKER */ + +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_PCEN 0x0004 +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff +#define SISF_CORE_BITS_SCAN 0x0010 /* SCAN core */ + +/* Norstar core status flags */ +#define SISF_NS_BOOTDEV_MASK 0x0003 /* ROM core */ +#define SISF_NS_BOOTDEV_NOR 0x0000 /* ROM core */ +#define SISF_NS_BOOTDEV_NAND 0x0001 /* ROM core */ +#define SISF_NS_BOOTDEV_ROM 0x0002 /* ROM core */ +#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */ +#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */ + +/* dot11 core-specific status flags */ +#define SISF_MINORREV_D11_SHIFT 16 +#define SISF_MINORREV_D11_MASK 0xF /**< minor corerev (corerev == 61) */ + +/* A register that is common to all cores to + * communicate w/PMU regarding clock control. + */ +#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ +#define SI_PWR_CTL_ST 0x1e8 /* For memory clock gating */ + +/* clk_ctl_st register */ +#define CCS_FORCEALP 0x00000001 /* force ALP request */ +#define CCS_FORCEHT 0x00000002 /* force HT request */ +#define CCS_FORCEILP 0x00000004 /* force ILP request */ +#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ +#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ +#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ +#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */ +#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */ +#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */ +#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */ +#define CCS_SFLASH_CLKREQ 0x00000200 /* Sflash clk request */ +#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */ +#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ +#define CCS_ERSRC_REQ_SHIFT 8 +#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ +#define CCS_HTAVAIL 0x00020000 /* HT is available */ +#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */ +#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */ +#define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */ +#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ +#define CCS_ERSRC_STS_SHIFT 24 +#define CCS_SECI_AVAIL 0x01000000 /* RO: SECI is available */ + +/* Not really related to SOC Interconnect, but a couple of software + * conventions for the use the flash space: + */ + +/* Minumum amount of flash we support */ +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +/* A boot/binary may have an embedded block that describes its size */ +#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ +#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ +#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ +#define BISZ_TXTST_IDX 1 /* 1: text start */ +#define BISZ_TXTEND_IDX 2 /* 2: text end */ +#define BISZ_DATAST_IDX 3 /* 3: data start */ +#define BISZ_DATAEND_IDX 4 /* 4: data end */ +#define BISZ_BSSST_IDX 5 /* 5: bss start */ +#define BISZ_BSSEND_IDX 6 /* 6: bss end */ +#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */ + +/* Boot/Kernel related defintion and functions */ +#define SOC_BOOTDEV_ROM 0x00000001 +#define SOC_BOOTDEV_PFLASH 0x00000002 +#define SOC_BOOTDEV_SFLASH 0x00000004 +#define SOC_BOOTDEV_NANDFLASH 0x00000008 + +#define SOC_KNLDEV_NORFLASH 0x00000002 +#define SOC_KNLDEV_NANDFLASH 0x00000004 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) +int soc_boot_dev(void *sih); +int soc_knl_dev(void *sih); +#endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */ + +#define PMU_BASE_OFFSET 0x00012000 /* PMU offset is changed for ccrev >= 56 */ +#endif /* _HNDSOC_H */ diff --git a/bcmdhd.101.10.361.x/include/ieee80211_radiotap.h b/bcmdhd.101.10.361.x/include/ieee80211_radiotap.h new file mode 100755 index 0000000..1f08faa --- /dev/null +++ b/bcmdhd.101.10.361.x/include/ieee80211_radiotap.h @@ -0,0 +1,400 @@ +/* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.11 2007/12/13 01:23:40 sam Exp $ */ +/* $NetBSD: ieee80211_radiotap.h,v 1.16 2007/01/06 05:51:15 dyoung Exp $ */ +/* FILE-CSTYLED */ + +/* + * Copyright (c) 2003, 2004 David Young. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of David Young may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID + * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +/* + * <> + */ + +#ifndef _NET80211_IEEE80211_RADIOTAP_H_ +#define _NET80211_IEEE80211_RADIOTAP_H_ + +/* A generic radio capture format is desirable. It must be + * rigidly defined (e.g., units for fields should be given), + * and easily extensible. + * + * The following is an extensible radio capture format. It is + * based on a bitmap indicating which fields are present. + * + * I am trying to describe precisely what the application programmer + * should expect in the following, and for that reason I tell the + * units and origin of each measurement (where it applies), or else I + * use sufficiently weaselly language ("is a monotonically nondecreasing + * function of...") that I cannot set false expectations for lawyerly + * readers. + */ +#if defined(__KERNEL__) || defined(_KERNEL) +#ifndef DLT_IEEE802_11_RADIO +#define DLT_IEEE802_11_RADIO 127 /* 802.11 plus WLAN header */ +#endif +#endif /* defined(__KERNEL__) || defined(_KERNEL) */ + +#define IEEE80211_RADIOTAP_HDRLEN 64 /* deprecated */ + +/* This marks the start of a packed structure section. */ +#include + +/* + * The radio capture header precedes the 802.11 header. + * + * Note well: all radiotap fields are little-endian. + */ +BWL_PRE_PACKED_STRUCT struct ieee80211_radiotap_header { + uint8 it_version; /* Version 0. Only increases + * for drastic changes, + * introduction of compatible + * new fields does not count. + */ + uint8 it_pad; + uint16 it_len; /* length of the whole + * header in bytes, including + * it_version, it_pad, + * it_len, and data fields. + */ + uint32 it_present; /* A bitmap telling which + * fields are present. Set bit 31 + * (0x80000000) to extend the + * bitmap by another 32 bits. + * Additional extensions are made + * by setting bit 31. + */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +/* + * Name Data type Units + * ---- --------- ----- + * + * IEEE80211_RADIOTAP_TSFT uint64_t microseconds + * + * Value in microseconds of the MAC's 64-bit 802.11 Time + * Synchronization Function timer when the first bit of the + * MPDU arrived at the MAC. For received frames, only. + * + * IEEE80211_RADIOTAP_CHANNEL 2 x uint16_t MHz, bitmap + * + * Tx/Rx frequency in MHz, followed by flags (see below). + * + * IEEE80211_RADIOTAP_FHSS uint16_t see below + * + * For frequency-hopping radios, the hop set (first byte) + * and pattern (second byte). + * + * IEEE80211_RADIOTAP_RATE uint8_t 500kb/s or index + * + * Tx/Rx data rate. If bit 0x80 is set then it represents an + * an MCS index and not an IEEE rate. + * + * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from + * one milliwatt (dBm) + * + * RF signal power at the antenna, decibel difference from + * one milliwatt. + * + * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from + * one milliwatt (dBm) + * + * RF noise power at the antenna, decibel difference from one + * milliwatt. + * + * IEEE80211_RADIOTAP_DB_ANTSIGNAL uint8_t decibel (dB) + * + * RF signal power at the antenna, decibel difference from an + * arbitrary, fixed reference. + * + * IEEE80211_RADIOTAP_DB_ANTNOISE uint8_t decibel (dB) + * + * RF noise power at the antenna, decibel difference from an + * arbitrary, fixed reference point. + * + * IEEE80211_RADIOTAP_TXFLAGS uint16_t txflags + * Properties of Transmitted frames + * + * IEEE80211_RADIOTAP_RETRIES uint8_t retries + * Number of retries + * + * IEEE80211_RADIOTAP_LOCK_QUALITY uint16_t unitless + * + * Quality of Barker code lock. Unitless. Monotonically + * nondecreasing with "better" lock strength. Called "Signal + * Quality" in datasheets. (Is there a standard way to measure + * this?) + * + * IEEE80211_RADIOTAP_TX_ATTENUATION uint16_t unitless + * + * Transmit power expressed as unitless distance from max + * power set at factory calibration. 0 is max power. + * Monotonically nondecreasing with lower power levels. + * + * IEEE80211_RADIOTAP_DB_TX_ATTENUATION uint16_t decibels (dB) + * + * Transmit power expressed as decibel distance from max power + * set at factory calibration. 0 is max power. Monotonically + * nondecreasing with lower power levels. + * + * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from + * one milliwatt (dBm) + * + * Transmit power expressed as dBm (decibels from a 1 milliwatt + * reference). This is the absolute power level measured at + * the antenna port. + * + * IEEE80211_RADIOTAP_FLAGS uint8_t bitmap + * + * Properties of transmitted and received frames. See flags + * defined below. + * + * IEEE80211_RADIOTAP_ANTENNA uint8_t antenna index + * + * Unitless indication of the Rx/Tx antenna for this packet. + * The first antenna is antenna 0. + * + * IEEE80211_RADIOTAP_XCHANNEL uint32_t bitmap + * uint16_t MHz + * uint8_t channel number + * int8_t .5 dBm + * + * Extended channel specification: flags (see below) followed by + * frequency in MHz, the corresponding IEEE channel number, and + * finally the maximum regulatory transmit power cap in .5 dBm + * units. This property supersedes IEEE80211_RADIOTAP_CHANNEL + * and only one of the two should be present. + * + * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless + * + * Contains a bitmap of known fields/flags, the flags, and + * the MCS index. + * + */ +enum ieee80211_radiotap_type { + IEEE80211_RADIOTAP_TSFT = 0, + IEEE80211_RADIOTAP_FLAGS = 1, + IEEE80211_RADIOTAP_RATE = 2, + IEEE80211_RADIOTAP_CHANNEL = 3, + IEEE80211_RADIOTAP_FHSS = 4, + IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, + IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, + IEEE80211_RADIOTAP_LOCK_QUALITY = 7, + IEEE80211_RADIOTAP_TX_ATTENUATION = 8, + IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, + IEEE80211_RADIOTAP_DBM_TX_POWER = 10, + IEEE80211_RADIOTAP_ANTENNA = 11, + IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, + IEEE80211_RADIOTAP_DB_ANTNOISE = 13, + /* NB: gap for netbsd definitions */ + IEEE80211_RADIOTAP_TXFLAGS = 15, + IEEE80211_RADIOTAP_RETRIES = 17, + IEEE80211_RADIOTAP_XCHANNEL = 18, + IEEE80211_RADIOTAP_MCS = 19, + IEEE80211_RADIOTAP_AMPDU = 20, + IEEE80211_RADIOTAP_VHT = 21, + IEEE80211_RADIOTAP_HE = 23, + IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, + IEEE80211_RADIOTAP_EXT = 31, + + }; + +#ifndef _KERNEL +/* channel attributes */ +#define IEEE80211_CHAN_TURBO 0x00000010 /* Turbo channel */ +#define IEEE80211_CHAN_CCK 0x00000020 /* CCK channel */ +#define IEEE80211_CHAN_OFDM 0x00000040 /* OFDM channel */ +#define IEEE80211_CHAN_2GHZ 0x00000080 /* 2 GHz spectrum channel. */ +#define IEEE80211_CHAN_5GHZ 0x00000100 /* 5 GHz spectrum channel */ +#define IEEE80211_CHAN_PASSIVE 0x00000200 /* Only passive scan allowed */ +#define IEEE80211_CHAN_DYN 0x00000400 /* Dynamic CCK-OFDM channel */ +#define IEEE80211_CHAN_GFSK 0x00000800 /* GFSK channel (FHSS PHY) */ +#define IEEE80211_CHAN_GSM 0x00001000 /* 900 MHz spectrum channel */ +#define IEEE80211_CHAN_STURBO 0x00002000 /* 11a static turbo channel only */ +#define IEEE80211_CHAN_HALF 0x00004000 /* Half rate channel */ +#define IEEE80211_CHAN_QUARTER 0x00008000 /* Quarter rate channel */ +#define IEEE80211_CHAN_HT20 0x00010000 /* HT 20 channel */ +#define IEEE80211_CHAN_HT40U 0x00020000 /* HT 40 channel w/ ext above */ +#define IEEE80211_CHAN_HT40D 0x00040000 /* HT 40 channel w/ ext below */ +#endif /* !_KERNEL */ + +/* For IEEE80211_RADIOTAP_FLAGS */ +#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received + * during CFP + */ +#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received + * with short + * preamble + */ +#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received + * with WEP encryption + */ +#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received + * with fragmentation + */ +#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ +#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between + * 802.11 header and payload + * (to 32-bit boundary) + */ +#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* does not pass FCS check */ + +/* For IEEE80211_RADIOTAP_MCS */ +#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01 +#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02 +#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04 +#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08 +#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10 + +#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03 +#define IEEE80211_RADIOTAP_MCS_BW_20 0 +#define IEEE80211_RADIOTAP_MCS_BW_40 1 +#define IEEE80211_RADIOTAP_MCS_BW_20L 2 +#define IEEE80211_RADIOTAP_MCS_BW_20U 3 +#define IEEE80211_RADIOTAP_MCS_SGI 0x04 +#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 +#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 + +/* remove, only used on macos */ +#define IEEE80211_RADIOTAP_MCS_BW_80 0x20 +#define IEEE80211_RADIOTAP_MCS_BW_20LL 0x40 +#define IEEE80211_RADIOTAP_MCS_BW_20LU 0x60 +#define IEEE80211_RADIOTAP_MCS_BW_20UL 0x80 +#define IEEE80211_RADIOTAP_MCS_BW_20UU 0xa0 +#define IEEE80211_RADIOTAP_MCS_BW_40L 0xc0 +#define IEEE80211_RADIOTAP_MCS_BW_40U 0xe0 + +/* For IEEE80211_RADIOTAP_VHT */ +#define IEEE80211_RADIOTAP_VHT_HAVE_STBC 0x0001 +#define IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS 0x0002 +#define IEEE80211_RADIOTAP_VHT_HAVE_GI 0x0004 +#define IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA 0x0008 +#define IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA 0x0010 +#define IEEE80211_RADIOTAP_VHT_HAVE_BF 0x0020 +#define IEEE80211_RADIOTAP_VHT_HAVE_BW 0x0040 +#define IEEE80211_RADIOTAP_VHT_HAVE_GID 0x0080 +#define IEEE80211_RADIOTAP_VHT_HAVE_PAID 0x0100 + +#define IEEE80211_RADIOTAP_VHT_STBC 0x01 +#define IEEE80211_RADIOTAP_VHT_TXOP_PS 0x02 +#define IEEE80211_RADIOTAP_VHT_SGI 0x04 +#define IEEE80211_RADIOTAP_VHT_SGI_NSYM_DA 0x08 +#define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA 0x10 +#define IEEE80211_RADIOTAP_VHT_BF 0x20 + +#define IEEE80211_RADIOTAP_VHT_NSS 0x0f +#define IEEE80211_RADIOTAP_VHT_MCS 0xf0 + +#define IEEE80211_RADIOTAP_VHT_CODING_LDPC 0x01 + +#define IEEE80211_RADIOTAP_VHT_BW_20 IEEE80211_RADIOTAP_MCS_BW_20 +#define IEEE80211_RADIOTAP_VHT_BW_40 IEEE80211_RADIOTAP_MCS_BW_40 +#define IEEE80211_RADIOTAP_VHT_BW_20L IEEE80211_RADIOTAP_MCS_BW_20L +#define IEEE80211_RADIOTAP_VHT_BW_20U IEEE80211_RADIOTAP_MCS_BW_20U +#define IEEE80211_RADIOTAP_VHT_BW_80 4 +#define IEEE80211_RADIOTAP_VHT_BW_40L 5 +#define IEEE80211_RADIOTAP_VHT_BW_40U 6 +#define IEEE80211_RADIOTAP_VHT_BW_20LL 7 +#define IEEE80211_RADIOTAP_VHT_BW_20LU 8 +#define IEEE80211_RADIOTAP_VHT_BW_20UL 9 +#define IEEE80211_RADIOTAP_VHT_BW_20UU 10 +#define IEEE80211_RADIOTAP_VHT_BW_160 11 +#define IEEE80211_RADIOTAP_VHT_BW_80L 12 +#define IEEE80211_RADIOTAP_VHT_BW_80U 13 +#define IEEE80211_RADIOTAP_VHT_BW_40LL 14 +#define IEEE80211_RADIOTAP_VHT_BW_40LU 15 +#define IEEE80211_RADIOTAP_VHT_BW_40UL 16 +#define IEEE80211_RADIOTAP_VHT_BW_40UU 17 +#define IEEE80211_RADIOTAP_VHT_BW_20LLL 18 +#define IEEE80211_RADIOTAP_VHT_BW_20LLU 19 +#define IEEE80211_RADIOTAP_VHT_BW_20LUL 20 +#define IEEE80211_RADIOTAP_VHT_BW_20LUU 21 +#define IEEE80211_RADIOTAP_VHT_BW_20ULL 22 +#define IEEE80211_RADIOTAP_VHT_BW_20ULU 23 +#define IEEE80211_RADIOTAP_VHT_BW_20UUL 24 +#define IEEE80211_RADIOTAP_VHT_BW_20UUU 25 + +/* For IEEE80211_RADIOTAP_HE */ +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_FORMAT 0x0003 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_BSS_COLOR 0x0004 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_BEAM_CHANGE 0x0008 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_DL_UL 0x0010 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_MCS 0x0020 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_DCM 0x0040 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_CODING 0x0080 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_LDPC 0x0100 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_STBC 0x0200 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_SR 0x0400 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_BW 0x4000 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_DOPPLER 0x8000 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_LTF 0x0004 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_TXBF 0x0010 +#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_TXOP 0x0040 + +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_FORMAT 0x0001 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_BEAM_CHANGE 0x0002 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_DL_UL 0x0004 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_MCS 0x0008 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_DCM 0x0010 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_BSS_COLOR 0x0020 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_B14 0x0040 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_SR 0x0080 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_BW 0x0100 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_LTF 0x0200 +#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_NSTS 0x0400 + +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_TXOP 0x0001 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_CODING 0x0002 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_LDPC 0x0004 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_STBC 0x0008 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_TXBF 0x0010 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_FEC 0x0020 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_PED 0x0040 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_DOPPLER 0x0100 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_CRC 0x0400 +#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_TAIL 0x0800 + +/* For IEEE80211_RADIOTAP_TXFLAGS */ +#define IEEE80211_RADIOTAP_TXF_FAIL 0x0001 /* TX failed due to excessive retries */ +#define IEEE80211_RADIOTAP_TXF_CTS 0x0002 /* TX used CTS-to-self protection */ +#define IEEE80211_RADIOTAP_TXF_RTSCTS 0x0004 /* TX used RTS/CTS */ +#define IEEE80211_RADIOTAP_TXF_NOACK 0x0008 /* For injected TX: don't expect ACK */ +#define IEEE80211_RADIOTAP_TXF_SEQOVR 0x0010 /* For injected TX: use pre-configured seq */ + +/* For IEEE80211_RADIOTAP_AMPDU_STATUS */ +#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 +#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002 +#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004 +#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008 +#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010 +#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020 +#define IEEE80211_RADIOTAP_AMPDU_MPDU_ONLY 0x8000 + +#endif /* !_NET80211_IEEE80211_RADIOTAP_H_ */ diff --git a/bcmdhd.101.10.361.x/include/linux_osl.h b/bcmdhd.101.10.361.x/include/linux_osl.h new file mode 100755 index 0000000..a0a0937 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/linux_osl.h @@ -0,0 +1,868 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _linux_osl_h_ +#define _linux_osl_h_ + +#include +#define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x))) + +/* Linux Kernel: File Operations: start */ +extern void * osl_os_open_image(char * filename); +extern int osl_os_get_image_block(char * buf, int len, void * image); +extern void osl_os_close_image(void * image); +extern int osl_os_image_size(void *image); +/* Linux Kernel: File Operations: end */ + +#ifdef BCMDRIVER + +/* OSL initialization */ +#ifdef SHARED_OSL_CMN +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn); +#else +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); +#endif /* SHARED_OSL_CMN */ + +extern void osl_detach(osl_t *osh); +extern int osl_static_mem_init(osl_t *osh, void *adapter); +extern int osl_static_mem_deinit(osl_t *osh, void *adapter); +extern void osl_set_bus_handle(osl_t *osh, void *bus_handle); +extern void* osl_get_bus_handle(osl_t *osh); +#ifdef DHD_MAP_LOGGING +extern void osl_dma_map_dump(osl_t *osh); +#define OSL_DMA_MAP_DUMP(osh) osl_dma_map_dump(osh) +#else +#define OSL_DMA_MAP_DUMP(osh) do {} while (0) +#endif /* DHD_MAP_LOGGING */ + +/* Global ASSERT type */ +extern uint32 g_assert_type; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRI_FMT_x "llx" +#define PRI_FMT_X "llX" +#define PRI_FMT_o "llo" +#define PRI_FMT_d "lld" +#else +#define PRI_FMT_x "x" +#define PRI_FMT_X "X" +#define PRI_FMT_o "o" +#define PRI_FMT_d "d" +#endif /* CONFIG_PHYS_ADDR_T_64BIT */ +/* ASSERT */ +#ifndef ASSERT +#if (defined(BCMDBG_ASSERT) || defined(BCMASSERT_LOG)) && !defined(BINCMP) + #define ASSERT(exp) \ + do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0) +extern void osl_assert(const char *exp, const char *file, int line); +#else +#ifdef __GNUC__ + #define GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#if GCC_VERSION > 30100 + #define ASSERT(exp) do {} while (0) +#else + /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */ + #define ASSERT(exp) +#endif /* GCC_VERSION > 30100 */ +#endif /* __GNUC__ */ +#endif /* (BCMDBG_ASSERT || BCMASSERT_LOG) && !BINCMP */ +#endif /* ASSERT */ + +#define ASSERT_FP(exp) ASSERT(exp) + +/* microsecond delay */ +#define OSL_DELAY(usec) osl_delay(usec) +extern void osl_delay(uint usec); + +#define OSL_SLEEP(ms) osl_sleep(ms) +extern void osl_sleep(uint ms); + +/* PCI configuration space access macros */ +#define OSL_PCI_READ_CONFIG(osh, offset, size) \ + osl_pci_read_config((osh), (offset), (size)) +#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ + osl_pci_write_config((osh), (offset), (size), (val)) +extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); +extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); + +#ifdef BCMPCIE +/* PCI device bus # and slot # */ +#define OSL_PCI_BUS(osh) osl_pci_bus(osh) +#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) +#define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh) +#define OSL_PCIE_BUS(osh) osl_pcie_bus(osh) +extern uint osl_pci_bus(osl_t *osh); +extern uint osl_pci_slot(osl_t *osh); +extern uint osl_pcie_domain(osl_t *osh); +extern uint osl_pcie_bus(osl_t *osh); +extern struct pci_dev *osl_pci_device(osl_t *osh); +#endif + +/* precommit failed when this is removed */ +/* BLAZAR_BRANCH_101_10_DHD_003/build/dhd/linux-fc30/brix-brcm */ +/* TBD: Revisit later */ +#if defined(BCMINTERNAL) +/* Flags that can be used to handle OSL specifcs */ +#define OSL_PHYS_MEM_LESS_THAN_16MB (1<<0L) +#endif /* BCMINTERNAL */ + +#define OSL_ACP_COHERENCE (1<<1L) +#define OSL_FWDERBUF (1<<2L) + +/* Pkttag flag should be part of public information */ +typedef struct { + bool pkttag; + bool mmbus; /**< Bus supports memory-mapped register accesses */ + pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */ + void *tx_ctx; /**< Context to the callback function */ +#ifdef OSLREGOPS + osl_rreg_fn_t rreg_fn; /**< Read Register function */ + osl_wreg_fn_t wreg_fn; /**< Write Register function */ + void *reg_ctx; /**< Context to the reg callback functions */ +#else + void *unused[3]; /**< temp fix for USBAP cftpool handle currption */ +#endif + void (*rx_fn)(void *rx_ctx, void *p); + void *rx_ctx; +} osl_pubinfo_t; + +extern void osl_flag_set(osl_t *osh, uint32 mask); +extern void osl_flag_clr(osl_t *osh, uint32 mask); +extern bool osl_is_flag_set(osl_t *osh, uint32 mask); + +#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ + ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ + } while (0) + +#define PKTFREESETRXCB(osh, _rx_fn, _rx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->rx_fn = _rx_fn; \ + ((osl_pubinfo_t*)osh)->rx_ctx = _rx_ctx; \ + } while (0) + +#ifdef OSLREGOPS +#define REGOPSSET(osh, rreg, wreg, ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->rreg_fn = rreg; \ + ((osl_pubinfo_t*)osh)->wreg_fn = wreg; \ + ((osl_pubinfo_t*)osh)->reg_ctx = ctx; \ + } while (0) +#endif /* OSLREGOPS */ + +/* host/bus architecture-specific byte swap */ +#define BUS_SWAP32(v) (v) + +#if defined(BCMDBG_MEM) && !defined(BINCMP) + #define MALLOC(osh, size) osl_debug_malloc((osh), (size), __LINE__, __FILE__) + #define MALLOCZ(osh, size) osl_debug_mallocz((osh), (size), __LINE__, __FILE__) + #define MFREE(osh, addr, size) \ + ({osl_debug_mfree((osh), ((void *)addr), (size), __LINE__, __FILE__);(addr) = NULL;}) + #define VMALLOC(osh, size) osl_debug_vmalloc((osh), (size), __LINE__, __FILE__) + #define VMALLOCZ(osh, size) osl_debug_vmallocz((osh), (size), __LINE__, __FILE__) + #define VMFREE(osh, addr, size) osl_debug_vmfree((osh), (addr), (size), __LINE__, __FILE__) + #define MALLOCED(osh) osl_malloced((osh)) + #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh) + #define MALLOC_DUMP(osh, b) osl_debug_memdump((osh), (b)) + extern void *osl_debug_malloc(osl_t *osh, uint size, int line, const char* file); + extern void *osl_debug_mallocz(osl_t *osh, uint size, int line, const char* file); + extern void osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, const char* file); + extern void *osl_debug_vmalloc(osl_t *osh, uint size, int line, const char* file); + extern void *osl_debug_vmallocz(osl_t *osh, uint size, int line, const char* file); + extern void osl_debug_vmfree(osl_t *osh, void *addr, uint size, int line, const char* file); + extern uint osl_malloced(osl_t *osh); + struct bcmstrbuf; + extern int osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b); + extern uint osl_check_memleak(osl_t *osh); +#else /* BCMDBG_MEM && !BINCMP */ + #define MALLOC(osh, size) osl_malloc((osh), (size)) + #define MALLOCZ(osh, size) osl_mallocz((osh), (size)) + #define MALLOC_RA(osh, size, callsite) osl_mallocz((osh), (size)) + #define MFREE(osh, addr, size) ({osl_mfree((osh), ((void *)addr), (size));(addr) = NULL;}) + #define VMALLOC(osh, size) osl_vmalloc((osh), (size)) + #define VMALLOCZ(osh, size) osl_vmallocz((osh), (size)) + #define VMFREE(osh, addr, size) osl_vmfree((osh), (addr), (size)) + #define MALLOCED(osh) osl_malloced((osh)) + #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh) + extern void *osl_malloc(osl_t *osh, uint size); + extern void *osl_mallocz(osl_t *osh, uint size); + extern void osl_mfree(osl_t *osh, void *addr, uint size); + extern void *osl_vmalloc(osl_t *osh, uint size); + extern void *osl_vmallocz(osl_t *osh, uint size); + extern void osl_vmfree(osl_t *osh, void *addr, uint size); + extern uint osl_malloced(osl_t *osh); + extern uint osl_check_memleak(osl_t *osh); +#endif /* BCMDBG_MEM && !BINCMP */ + +extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n); +extern int memset_s(void *dest, size_t destsz, int c, size_t n); +#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) +extern uint osl_malloc_failed(osl_t *osh); + +/* allocate/free shared (dma-able) consistent memory */ +#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align() +#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +#define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +extern uint osl_dma_consistent_align(void); +extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, + uint *tot, dmaaddr_t *pap); +extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); + +/* map/unmap direction */ +#define DMA_NO 0 /* Used to skip cache op */ +#define DMA_TX 1 /* TX direction for DMA */ +#define DMA_RX 2 /* RX direction for DMA */ + +/* map/unmap shared (dma-able) memory */ +#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ + osl_dma_unmap((osh), (pa), (size), (direction)) +extern void osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *txp_dmah); +extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *txp_dmah); +extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction); + +#ifndef PHYS_TO_VIRT +#define PHYS_TO_VIRT(pa) osl_phys_to_virt(pa) +#endif +#ifndef VIRT_TO_PHYS +#define VIRT_TO_PHYS(va) osl_virt_to_phys(va) +#endif +extern void * osl_phys_to_virt(void * pa); +extern void * osl_virt_to_phys(void * va); + +/* API for DMA addressing capability */ +#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);}) + +#define OSL_SMP_WMB() smp_wmb() + +/* API for CPU relax */ +extern void osl_cpu_relax(void); +#define OSL_CPU_RELAX() osl_cpu_relax() + +extern void osl_preempt_disable(osl_t *osh); +extern void osl_preempt_enable(osl_t *osh); +#define OSL_DISABLE_PREEMPTION(osh) osl_preempt_disable(osh) +#define OSL_ENABLE_PREEMPTION(osh) osl_preempt_enable(osh) + +#if (defined(BCMPCIE) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) + + extern void osl_cache_flush(void *va, uint size); + extern void osl_cache_inv(void *va, uint size); + extern void osl_prefetch(const void *ptr); + #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *)(va), len) + #define OSL_CACHE_INV(va, len) osl_cache_inv((void *)(va), len) + #define OSL_PREFETCH(ptr) osl_prefetch(ptr) +#else /* !__ARM_ARCH_7A__ */ + #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va) + #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va) + #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr) +#endif /* !__ARM_ARCH_7A__ */ + +#ifdef AXI_TIMEOUTS_NIC +extern void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx); +extern void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size); +#endif /* AXI_TIMEOUTS_NIC */ + +/* register access macros */ +#if defined(BCMSDIO) + #include + #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)), (v))) + #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)))) +#elif defined(AXI_TIMEOUTS_NIC) +#define OSL_READ_REG(osh, r) \ + ({\ + __typeof(*(r)) __osl_v; \ + osl_bpt_rreg(osh, (uintptr)(r), &__osl_v, sizeof(*(r))); \ + __osl_v; \ + }) +#endif + +#if defined(AXI_TIMEOUTS_NIC) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;}) +#else /* !AXI_TIMEOUTS_NIC */ +#if defined(BCMSDIO) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ + mmap_op else bus_op + #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \ + mmap_op : bus_op +#else + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) +#endif /* defined(BCMSDIO) */ +#endif /* AXI_TIMEOUTS_NIC */ + +#define OSL_ERROR(bcmerror) osl_error(bcmerror) +extern int osl_error(int bcmerror); + +/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */ +#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */ + +#define OSH_NULL NULL + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + * Macros expand to calls to functions defined in linux_osl.c . + */ +#ifndef BINOSL +#include /* use current 2.4.x calling conventions */ +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +extern uint64 osl_sysuptime_us(void); +#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) +#define OSL_SYSUPTIME_US() osl_sysuptime_us() +extern uint64 osl_localtime_ns(void); +extern void osl_get_localtime(uint64 *sec, uint64 *usec); +extern uint64 osl_systztime_us(void); +#define OSL_LOCALTIME_NS() osl_localtime_ns() +#define OSL_GET_LOCALTIME(sec, usec) osl_get_localtime((sec), (usec)) +#define OSL_SYSTZTIME_US() osl_systztime_us() +#define printf(fmt, args...) printk(PERCENT_S DHD_LOG_PREFIXS fmt, PRINTF_SYSTEM_TIME, ## args) +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +/* bcopy's: Linux kernel doesn't provide these (anymore) */ +#define bcopy_hw(src, dst, len) memcpy((dst), (src), (len)) +#define bcopy_hw_async(src, dst, len) memcpy((dst), (src), (len)) +#define bcopy_hw_poll_for_completion() +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_GS101) +extern int pcie_ch_num; +extern int exynos_pcie_l1_exit(int ch_num); +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 + * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_GS101 + */ + +/* register access macros */ +#if defined(OSLREGOPS) +#define R_REG(osh, r) (\ + sizeof(*(r)) == sizeof(uint8) ? osl_readb((osh), (volatile uint8*)(r)) : \ + sizeof(*(r)) == sizeof(uint16) ? osl_readw((osh), (volatile uint16*)(r)) : \ + sizeof(*(r)) == sizeof(uint32) ? osl_readl((osh), (volatile uint32*)(r)) : \ + osl_readq((osh), (volatile uint64*)(r)) \ +) + +#define W_REG(osh, r, v) do { \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): osl_writeb((osh), (volatile uint8*)(r), (uint8)(v)); break; \ + case sizeof(uint16): osl_writew((osh), (volatile uint16*)(r), (uint16)(v)); break; \ + case sizeof(uint32): osl_writel((osh), (volatile uint32*)(r), (uint32)(v)); break; \ + case sizeof(uint64): osl_writeq((osh), (volatile uint64*)(r), (uint64)(v)); break; \ + } \ +} while (0) + +extern uint8 osl_readb(osl_t *osh, volatile uint8 *r); +extern uint16 osl_readw(osl_t *osh, volatile uint16 *r); +extern uint32 osl_readl(osl_t *osh, volatile uint32 *r); +extern uint32 osl_readq(osl_t *osh, volatile uint64 *r); +extern void osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v); +extern void osl_writew(osl_t *osh, volatile uint16 *r, uint16 v); +extern void osl_writel(osl_t *osh, volatile uint32 *r, uint32 v); +extern void osl_writeq(osl_t *osh, volatile uint64 *r, uint64 v); + +#else /* OSLREGOPS */ + +#ifndef IL_BIGENDIAN +#ifdef CONFIG_64BIT +/* readq is defined only for 64 bit platform */ +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_GS101) +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + exynos_pcie_l1_exit(pcie_ch_num); \ + BCM_REFERENCE(osh); \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#else +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + BCM_REFERENCE(osh); \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 + * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_GS101 + */ +#else /* !CONFIG_64BIT */ +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#endif /* CONFIG_64BIT */ + +#ifdef CONFIG_64BIT +/* writeq is defined only for 64 bit platform */ +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_GS101) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + ({ \ + exynos_pcie_l1_exit(pcie_ch_num); \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), \ + (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), \ + (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), \ + (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), \ + (volatile uint64*)(r)); break; \ + } \ + }), \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#else +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 + * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_GS101 + */ +#else /* !CONFIG_64BIT */ +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* CONFIG_64BIT */ + +#else /* IL_BIGENDIAN */ + +#ifdef CONFIG_64BIT +/* readq and writeq is defined only for 64 bit platform */ +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)((uintptr)(r)^3)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)((uintptr)(r)^2)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), \ + (volatile uint8*)((uintptr)(r)^3)); break; \ + case sizeof(uint16): writew((uint16)(v), \ + (volatile uint16*)((uintptr)(r)^2)); break; \ + case sizeof(uint32): writel((uint32)(v), \ + (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), \ + (volatile uint64*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) + +#else /* !CONFIG_64BIT */ +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)((uintptr)(r)^3)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)((uintptr)(r)^2)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), \ + (volatile uint8*)((uintptr)(r)^3)); break; \ + case sizeof(uint16): writew((uint16)(v), \ + (volatile uint16*)((uintptr)(r)^2)); break; \ + case sizeof(uint32): writel((uint32)(v), \ + (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* CONFIG_64BIT */ +#endif /* IL_BIGENDIAN */ + +#endif /* OSLREGOPS */ + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) + +/* bcopy, bcmp, and bzero functions */ +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* uncached/cached virtual address */ +#define OSL_UNCACHED(va) ((void *)va) +#define OSL_CACHED(va) ((void *)va) + +#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va) +#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va) + +/* get processor cycle count */ +#if defined(__i386__) +#define OSL_GETCYCLES(x) rdtscl((x)) +#else +#define OSL_GETCYCLES(x) ((x) = 0) +#endif /* __i386__ */ + +/* dereference an address that may cause a bus exception */ +#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) + +/* map/unmap physical to virtual I/O */ +#if !defined(CONFIG_MMC_MSM7X00A) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) +#define REG_MAP(pa, size) ioremap((unsigned long)(pa), (unsigned long)(size)) +#else +#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) +#endif +#else +#define REG_MAP(pa, size) (void *)(0) +#endif /* !defined(CONFIG_MMC_MSM7X00A */ +#define REG_UNMAP(va) iounmap((va)) + +/* shared (dma-able) memory access macros */ +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define OR_SM(r, v) (*(r) |= (v)) +#define BZERO_SM(r, len) memset((r), '\0', (len)) + +/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for + * performance reasons), we need the Linux headers. + */ +#include /* use current 2.4.x calling conventions */ + +#else /* BINOSL */ + +/* Where to get the declarations for mem, str, printf, bcopy's? Two basic approaches. + * + * First, use the Linux header files and the C standard library replacmenent versions + * built-in to the kernel. Use this approach when compiling non hybrid code or compling + * the OS port files. The second approach is to use our own defines/prototypes and + * functions we have provided in the Linux OSL, i.e. linux_osl.c. Use this approach when + * compiling the files that make up the hybrid binary. We are ensuring we + * don't directly link to the kernel replacement routines from the hybrid binary. + * + * NOTE: The issue we are trying to avoid is any questioning of whether the + * hybrid binary is derived from Linux. The wireless common code (wlc) is designed + * to be OS independent through the use of the OSL API and thus the hybrid binary doesn't + * derive from the Linux kernel at all. But since we defined our OSL API to include + * a small collection of standard C library routines and these routines are provided in + * the kernel we want to avoid even the appearance of deriving at all even though clearly + * usage of a C standard library API doesn't represent a derivation from Linux. Lastly + * note at the time of this checkin 4 references to memcpy/memset could not be eliminated + * from the binary because they are created internally by GCC as part of things like + * structure assignment. I don't think the compiler should be doing this but there is + * no options to disable it on Intel architectures (there is for MIPS so somebody must + * agree with me). I may be able to even remove these references eventually with + * a GNU binutil such as objcopy via a symbol rename (i.e. memcpy to osl_memcpy). + */ + #define printf(fmt, args...) printk(fmt , ## args) + #include /* for vsn/printf's */ + #include /* for mem*, str* */ + /* bcopy's: Linux kernel doesn't provide these (anymore) */ + #define bcopy(src, dst, len) memcpy((dst), (src), (len)) + #define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) + #define bzero(b, len) memset((b), '\0', (len)) + + /* These are provided only because when compiling linux_osl.c there + * must be an explicit prototype (separate from the definition) because + * we are compiling with GCC option -Wstrict-prototypes. Conversely + * these could be placed directly in linux_osl.c. + */ + extern int osl_printf(const char *format, ...); + extern int osl_sprintf(char *buf, const char *format, ...); + extern int osl_snprintf(char *buf, size_t n, const char *format, ...); + extern int osl_vsprintf(char *buf, const char *format, va_list ap); + extern int osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap); + extern int osl_strcmp(const char *s1, const char *s2); + extern int osl_strncmp(const char *s1, const char *s2, uint n); + extern int osl_strlen(const char *s); + extern char* osl_strcpy(char *d, const char *s); + extern char* osl_strncpy(char *d, const char *s, uint n); + extern char* osl_strchr(const char *s, int c); + extern char* osl_strrchr(const char *s, int c); + extern void *osl_memset(void *d, int c, size_t n); + extern void *osl_memcpy(void *d, const void *s, size_t n); + extern void *osl_memmove(void *d, const void *s, size_t n); + extern int osl_memcmp(const void *s1, const void *s2, size_t n); + +/* register access macros */ +#if !defined(BCMSDIO) +#define R_REG(osh, r) \ + ({ \ + BCM_REFERENCE(osh); \ + sizeof(*(r)) == sizeof(uint8) ? osl_readb((volatile uint8*)(r)) : \ + sizeof(*(r)) == sizeof(uint16) ? osl_readw((volatile uint16*)(r)) : \ + sizeof(*(r)) == sizeof(uint32) ? osl_readl((volatile uint32*)(r)) : \ + osl_readq((volatile uint64*)(r)); \ + }) +#define W_REG(osh, r, v) do { \ + BCM_REFERENCE(osh); \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): osl_writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): osl_writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): osl_writel((uint32)(v), (volatile uint32*)(r)); break; \ + case sizeof(uint64): osl_writeq((uint64)(v), (volatile uint64*)(r)); break; \ + } \ +} while (0) + +#else +#define R_REG(osh, r) OSL_READ_REG(osh, r) +#define W_REG(osh, r, v) do { OSL_WRITE_REG(osh, r, v); } while (0) +#endif /* !defined(BCMSDIO) */ + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +extern uint8 osl_readb(volatile uint8 *r); +extern uint16 osl_readw(volatile uint16 *r); +extern uint32 osl_readl(volatile uint32 *r); +extern uint64 osl_readq(volatile uint64 *r); +extern void osl_writeb(uint8 v, volatile uint8 *r); +extern void osl_writew(uint16 v, volatile uint16 *r); +extern void osl_writel(uint32 v, volatile uint32 *r); +extern void osl_writeq(uint64 v, volatile uint64 *r); + +/* system up time in ms */ +#define OSL_SYSUPTIME() osl_sysuptime() +extern uint32 osl_sysuptime(void); + +/* uncached/cached virtual address */ +#define OSL_UNCACHED(va) osl_uncached((va)) +extern void *osl_uncached(void *va); +#define OSL_CACHED(va) osl_cached((va)) +extern void *osl_cached(void *va); + +#define OSL_PREF_RANGE_LD(va, sz) +#define OSL_PREF_RANGE_ST(va, sz) + +/* get processor cycle count */ +#define OSL_GETCYCLES(x) ((x) = osl_getcycles()) +extern uint osl_getcycles(void); + +/* dereference an address that may target abort */ +#define BUSPROBE(val, addr) osl_busprobe(&(val), (addr)) +extern int osl_busprobe(uint32 *val, uint32 addr); + +/* map/unmap physical to virtual */ +#define REG_MAP(pa, size) osl_reg_map((pa), (size)) +#define REG_UNMAP(va) osl_reg_unmap((va)) +extern void *osl_reg_map(uint32 pa, uint size); +extern void osl_reg_unmap(void *va); + +/* shared (dma-able) memory access macros */ +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define OR_SM(r, v) (*(r) |= (v)) +#define BZERO_SM(r, len) bzero((r), (len)) + +#endif /* BINOSL */ + +#define OSL_RAND() osl_rand() +extern uint32 osl_rand(void); + +#define DMA_FLUSH(osh, va, size, direction, p, dmah) \ + osl_dma_flush((osh), (va), (size), (direction), (p), (dmah)) +#define DMA_MAP(osh, va, size, direction, p, dmah) \ + osl_dma_map((osh), (va), (size), (direction), (p), (dmah)) + +#else /* ! BCMDRIVER */ + +/* ASSERT */ +#ifdef BCMDBG_ASSERT + #include + #define ASSERT assert +#else /* BCMDBG_ASSERT */ + #define ASSERT(exp) do {} while (0) +#endif /* BCMDBG_ASSERT */ + +#define ASSERT_FP(exp) ASSERT(exp) + +/* MALLOC and MFREE */ +#define MALLOC(o, l) malloc(l) +#define MFREE(o, p, l) free(p) +#include + +/* str* and mem* functions */ +#include + +/* *printf functions */ +#include + +/* bcopy, bcmp, and bzero */ +extern void bcopy(const void *src, void *dst, size_t len); +extern int bcmp(const void *b1, const void *b2, size_t len); +extern void bzero(void *b, size_t len); +#endif /* ! BCMDRIVER */ + +typedef struct sk_buff_head PKT_LIST; +#define PKTLIST_INIT(x) skb_queue_head_init((x)) +#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y)) +#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x)) +#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) +#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) + +#ifndef _linuxver_h_ +typedef struct timer_list_compat timer_list_compat_t; +#endif /* _linuxver_h_ */ +typedef struct osl_timer { + timer_list_compat_t *timer; + bool set; +#ifdef BCMDBG + char *name; /* Desription of the timer */ +#endif +} osl_timer_t; + +typedef void (*linux_timer_fn)(ulong arg); + +extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg); +extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); +extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); +extern bool osl_timer_del(osl_t *osh, osl_timer_t *t); + +#ifdef BCMDRIVER +typedef atomic_t osl_atomic_t; +#define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x) +#define OSL_ATOMIC_INIT(osh, v) atomic_set(v, 0) +#define OSL_ATOMIC_INC(osh, v) atomic_inc(v) +#define OSL_ATOMIC_INC_RETURN(osh, v) atomic_inc_return(v) +#define OSL_ATOMIC_DEC(osh, v) atomic_dec(v) +#define OSL_ATOMIC_DEC_RETURN(osh, v) atomic_dec_return(v) +#define OSL_ATOMIC_READ(osh, v) atomic_read(v) +#define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x) + +#ifndef atomic_set_mask +#define OSL_ATOMIC_OR(osh, v, x) atomic_or(x, v) +#define OSL_ATOMIC_AND(osh, v, x) atomic_and(x, v) +#else +#define OSL_ATOMIC_OR(osh, v, x) atomic_set_mask(x, v) +#define OSL_ATOMIC_AND(osh, v, x) atomic_clear_mask(~x, v) +#endif +#endif /* BCMDRIVER */ + +extern void *osl_spin_lock_init(osl_t *osh); +extern void osl_spin_lock_deinit(osl_t *osh, void *lock); +extern unsigned long osl_spin_lock(void *lock); +extern void osl_spin_unlock(void *lock, unsigned long flags); +extern unsigned long osl_spin_lock_irq(void *lock); +extern void osl_spin_unlock_irq(void *lock, unsigned long flags); +extern unsigned long osl_spin_lock_bh(void *lock); +extern void osl_spin_unlock_bh(void *lock, unsigned long flags); + +extern void *osl_mutex_lock_init(osl_t *osh); +extern void osl_mutex_lock_deinit(osl_t *osh, void *lock); +extern unsigned long osl_mutex_lock(void *lock); +void osl_mutex_unlock(void *lock, unsigned long flags); + +typedef struct osl_timespec { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) + __kernel_old_time_t tv_sec; /* seconds */ +#else + __kernel_time_t tv_sec; /* seconds */ +#endif + __kernel_suseconds_t tv_usec; /* microseconds */ + long tv_nsec; /* nanoseconds */ +} osl_timespec_t; +extern void osl_do_gettimeofday(struct osl_timespec *ts); +extern void osl_get_monotonic_boottime(struct osl_timespec *ts); +extern uint32 osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts); +#endif /* _linux_osl_h_ */ diff --git a/bcmdhd.101.10.361.x/include/linux_pkt.h b/bcmdhd.101.10.361.x/include/linux_pkt.h new file mode 100755 index 0000000..f2fcf5f --- /dev/null +++ b/bcmdhd.101.10.361.x/include/linux_pkt.h @@ -0,0 +1,421 @@ +/* + * Linux Packet (skb) interface + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _linux_pkt_h_ +#define _linux_pkt_h_ + +#include + +#ifdef __ARM_ARCH_7A__ +#define PKT_HEADROOM_DEFAULT NET_SKB_PAD /**< NET_SKB_PAD is defined in a linux kernel header */ +#else +#define PKT_HEADROOM_DEFAULT 16 +#endif /* __ARM_ARCH_7A__ */ + +#ifdef BCMDRIVER +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + * Macros expand to calls to functions defined in linux_osl.c . + */ +#ifndef BINOSL +/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for + * performance reasons), we need the Linux headers. + */ +#include + +/* packet primitives */ +#ifndef BCMDBG_PKT +#ifdef BCMDBG_CTRACE +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) +#else +#ifdef BCM_OBJECT_TRACE +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FUNCTION__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__) +#else +#define PKTGET(osh, len, send) linux_pktget((osh), (len)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh) +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#else /* BCMDBG_PKT pkt logging for debugging */ +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) +#define PKTLIST_DUMP(osh, buf) osl_pktlist_dump(osh, buf) +#define BCMDBG_PTRACE +#define PKTLIST_IDX(skb) ((uint16 *)((char *)PKTTAG(skb) + \ + sizeof(((struct sk_buff*)(skb))->cb) - sizeof(uint16))) +#define PKTDBG_TRACE(osh, pkt, bit) osl_pkttrace(osh, pkt, bit) +#endif /* BCMDBG_PKT */ +#if defined(BCM_OBJECT_TRACE) +#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__) +#else +#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send)) +#endif /* BCM_OBJECT_TRACE */ +#ifdef CONFIG_DHD_USE_STATIC_BUF +#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) +#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) +#else +#define PKTGET_STATIC PKTGET +#define PKTFREE_STATIC PKTFREE +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);}) +#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);}) +#define PKTHEAD(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->head);}) +#define PKTSOCK(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->sk);}) +#define PKTSETHEAD(osh, skb, h) ({BCM_REFERENCE(osh); \ + (((struct sk_buff *)(skb))->head = (h));}) +#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) +#define PKTEXPHEADROOM(osh, skb, b) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_realloc_headroom((struct sk_buff*)(skb), (b)); \ + }) +#define PKTTAILROOM(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_tailroom((struct sk_buff*)(skb)); \ + }) +#define PKTPADTAILROOM(osh, skb, padlen) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pad((struct sk_buff*)(skb), (padlen)); \ + }) +#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);}) +#define PKTSETNEXT(osh, skb, x) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \ + }) +#define PKTSETLEN(osh, skb, len) \ + ({ \ + BCM_REFERENCE(osh); \ + __skb_trim((struct sk_buff*)(skb), (len)); \ + }) +#define PKTPUSH(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_push((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTPULL(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pull((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) +#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh) +#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#define PKTFREELIST(skb) PKTLINK(skb) +#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x)) +#define PKTPTR(skb) (skb) +#define PKTID(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);}) +#define PKTIDAVAIL() (0xFFFFFFFFu) +#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;}) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER) +#define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq) +extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq); +#else +#define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;}) +#endif /* Linux Version >= 3.6 */ + +#ifdef BCMDBG_CTRACE +#define DEL_CTRACE(zosh, zskb) { \ + unsigned long zflags; \ + OSL_CTRACE_LOCK(&(zosh)->ctrace_lock, zflags); \ + list_del(&(zskb)->ctrace_list); \ + (zosh)->ctrace_num--; \ + (zskb)->ctrace_start = 0; \ + (zskb)->ctrace_count = 0; \ + OSL_CTRACE_UNLOCK(&(zosh)->ctrace_lock, zflags); \ +} + +#define UPDATE_CTRACE(zskb, zfile, zline) { \ + struct sk_buff *_zskb = (struct sk_buff *)(zskb); \ + if (_zskb->ctrace_count < CTRACE_NUM) { \ + _zskb->func[_zskb->ctrace_count] = zfile; \ + _zskb->line[_zskb->ctrace_count] = zline; \ + _zskb->ctrace_count++; \ + } \ + else { \ + _zskb->func[_zskb->ctrace_start] = zfile; \ + _zskb->line[_zskb->ctrace_start] = zline; \ + _zskb->ctrace_start++; \ + if (_zskb->ctrace_start >= CTRACE_NUM) \ + _zskb->ctrace_start = 0; \ + } \ +} + +#define ADD_CTRACE(zosh, zskb, zfile, zline) { \ + unsigned long zflags; \ + OSL_CTRACE_LOCK(&(zosh)->ctrace_lock, zflags); \ + list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \ + (zosh)->ctrace_num++; \ + UPDATE_CTRACE(zskb, zfile, zline); \ + OSL_CTRACE_UNLOCK(&(zosh)->ctrace_lock, zflags); \ +} + +#define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__) +#endif /* BCMDBG_CTRACE */ + +#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#define PKTLITIDX(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETLITIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);}) +#define PKTRESETLITIDX(skb) ({BCM_REFERENCE(skb);}) +#define PKTRITIDX(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETRITIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);}) +#define PKTRESETRITIDX(skb) ({BCM_REFERENCE(skb);}) + +#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) + +#define PKTFRAGLEN(osh, lb, ix) (0) +#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) + +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) + +#ifdef BCMFA +#ifdef BCMFA_HW_HASH +#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx) +#else +#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);}) +#endif /* BCMFA_SW_HASH */ +#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx) +#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp) +#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev) + +#define AUX_TCP_FIN_RST (1 << 0) +#define AUX_FREED (1 << 1) +#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST) +#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST)) +#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST) +#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED) +#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED)) +#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED) +#define PKTISFABRIDGED(skb) PKTISFAAUX(skb) +#else +#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;}) + +#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb) +#define PKTSETFAFREED(skb) BCM_REFERENCE(skb) +#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb) +#endif /* BCMFA */ + +#if defined(BCM_OBJECT_TRACE) +extern void linux_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller); +#else +extern void linux_pktfree(osl_t *osh, void *skb, bool send); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pktget_static(osl_t *osh, uint len); +extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); +extern void osl_pktclone(osl_t *osh, void **pkt); + +#ifdef BCMDBG_PKT /* pkt logging for debugging */ +extern void *linux_pktget(osl_t *osh, uint len, int line, char *file); +extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file); +extern void osl_pktlist_add(osl_t *osh, void *p, int line, char *file); +extern void osl_pktlist_remove(osl_t *osh, void *p); +extern char *osl_pktlist_dump(osl_t *osh, char *buf); +#ifdef BCMDBG_PTRACE +extern void osl_pkttrace(osl_t *osh, void *pkt, uint16 bit); +#endif /* BCMDBG_PTRACE */ +#else /* BCMDBG_PKT */ +#ifdef BCMDBG_CTRACE +#define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b)) +extern void *linux_pktget(osl_t *osh, uint len, int line, char *file); +extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file); +extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file); +struct bcmstrbuf; +extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b); +#else +#ifdef BCM_OBJECT_TRACE +extern void *linux_pktget(osl_t *osh, uint len, int line, const char *caller); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller); +#else +extern void *linux_pktget(osl_t *osh, uint len); +extern void *osl_pktdup(osl_t *osh, void *skb); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pkt_frmnative(osl_t *osh, void *skb); +#endif /* BCMDBG_CTRACE */ +#endif /* BCMDBG_PKT */ +extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); +#ifdef BCMDBG_PKT +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \ + (struct sk_buff*)(skb), __LINE__, __FILE__) +#else /* BCMDBG_PKT */ +#ifdef BCMDBG_CTRACE +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \ + (struct sk_buff*)(skb), __LINE__, __FILE__) +#define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb)) +#else +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb)) +#endif /* BCMDBG_CTRACE */ +#endif /* BCMDBG_PKT */ +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt)) + +#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) +#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) +#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) +#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) +#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) +#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ + ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) +/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */ +#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) + +#ifdef CONFIG_NF_CONNTRACK_MARK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define PKTMARK(p) (((struct sk_buff *)(p))->mark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m) +#else /* !2.6.0 */ +#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m) +#endif /* 2.6.0 */ +#else /* CONFIG_NF_CONNTRACK_MARK */ +#define PKTMARK(p) 0 +#define PKTSETMARK(p, m) +#endif /* CONFIG_NF_CONNTRACK_MARK */ + +#else /* BINOSL */ + +#define OSL_PREF_RANGE_LD(va, sz) +#define OSL_PREF_RANGE_ST(va, sz) + +/* packet primitives */ +#ifdef BCMDBG_PKT +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb), __LINE__, __FILE__) +#define PKTLIST_DUMP(osh, buf) osl_pktlist_dump(osh, buf) +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#else /* BCMDBG_PKT */ +#ifdef BCMDBG_CTRACE +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb), __LINE__, __FILE__) +#else +#ifdef BCM_OBJECT_TRACE +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FUNCTION__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__) +#else +#define PKTGET(osh, len, send) linux_pktget((osh), (len)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#endif /* BCM_OBJECT_TRACE */ +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb)) +#endif /* BCMDBG_CTRACE */ +#define PKTLIST_DUMP(osh, buf) ({BCM_REFERENCE(osh); BCM_REFERENCE(buf);}) +#define PKTDBG_TRACE(osh, pkt, bit) ({BCM_REFERENCE(osh); BCM_REFERENCE(pkt);}) +#endif /* BCMDBG_PKT */ +#if defined(BCM_OBJECT_TRACE) +#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__) +#else +#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send)) +#endif /* BCM_OBJECT_TRACE */ +#define PKTDATA(osh, skb) osl_pktdata((osh), (skb)) +#define PKTLEN(osh, skb) osl_pktlen((osh), (skb)) +#define PKTHEADROOM(osh, skb) osl_pktheadroom((osh), (skb)) +#define PKTTAILROOM(osh, skb) osl_pkttailroom((osh), (skb)) +#define PKTNEXT(osh, skb) osl_pktnext((osh), (skb)) +#define PKTSETNEXT(osh, skb, x) ({BCM_REFERENCE(osh); osl_pktsetnext((skb), (x));}) +#define PKTSETLEN(osh, skb, len) osl_pktsetlen((osh), (skb), (len)) +#define PKTPUSH(osh, skb, bytes) osl_pktpush((osh), (skb), (bytes)) +#define PKTPULL(osh, skb, bytes) osl_pktpull((osh), (skb), (bytes)) +#define PKTTAG(skb) osl_pkttag((skb)) +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osh), (pkt)) +#define PKTLINK(skb) osl_pktlink((skb)) +#define PKTSETLINK(skb, x) osl_pktsetlink((skb), (x)) +#define PKTPRIO(skb) osl_pktprio((skb)) +#define PKTSETPRIO(skb, x) osl_pktsetprio((skb), (x)) +#define PKTSHARED(skb) osl_pktshared((skb)) +#define PKTSETPOOL(osh, skb, x, y) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#define PKTFREELIST(skb) PKTLINK(skb) +#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x)) +#define PKTPTR(skb) (skb) +#define PKTID(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);}) +#define PKTIDAVAIL() (0xFFFFFFFFu) + +#ifdef BCMDBG_PKT /* pkt logging for debugging */ +extern void *linux_pktget(osl_t *osh, uint len, int line, char *file); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file); +extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file); +#else /* BCMDBG_PKT */ +#ifdef BCM_OBJECT_TRACE +extern void *linux_pktget(osl_t *osh, uint len, int line, const char *caller); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller); +#else +extern void *linux_pktget(osl_t *osh, uint len); +extern void *osl_pktdup(osl_t *osh, void *skb); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pkt_frmnative(osl_t *osh, void *skb); +#endif /* BCMDBG_PKT */ +#if defined(BCM_OBJECT_TRACE) +extern void linux_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller); +#else +extern void linux_pktfree(osl_t *osh, void *skb, bool send); +#endif /* BCM_OBJECT_TRACE */ +extern uchar *osl_pktdata(osl_t *osh, void *skb); +extern uint osl_pktlen(osl_t *osh, void *skb); +extern uint osl_pktheadroom(osl_t *osh, void *skb); +extern uint osl_pkttailroom(osl_t *osh, void *skb); +extern void *osl_pktnext(osl_t *osh, void *skb); +extern void osl_pktsetnext(void *skb, void *x); +extern void osl_pktsetlen(osl_t *osh, void *skb, uint len); +extern uchar *osl_pktpush(osl_t *osh, void *skb, int bytes); +extern uchar *osl_pktpull(osl_t *osh, void *skb, int bytes); +extern void *osl_pkttag(void *skb); +extern void *osl_pktlink(void *skb); +extern void osl_pktsetlink(void *skb, void *x); +extern uint osl_pktprio(void *skb); +extern void osl_pktsetprio(void *skb, uint x); +extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); +extern bool osl_pktshared(void *skb); + +#ifdef BCMDBG_PKT /* pkt logging for debugging */ +extern char *osl_pktlist_dump(osl_t *osh, char *buf); +extern void osl_pktlist_add(osl_t *osh, void *p, int line, char *file); +extern void osl_pktlist_remove(osl_t *osh, void *p); +#endif /* BCMDBG_PKT */ + +#endif /* BINOSL */ + +#define PKTALLOCED(osh) osl_pktalloced(osh) +extern uint osl_pktalloced(osl_t *osh); + +#define PKTPOOLHEAPCOUNT() (0u) + +#endif /* BCMDRIVER */ + +#endif /* _linux_pkt_h_ */ diff --git a/bcmdhd.101.10.361.x/include/linuxver.h b/bcmdhd.101.10.361.x/include/linuxver.h new file mode 100755 index 0000000..44f32ce --- /dev/null +++ b/bcmdhd.101.10.361.x/include/linuxver.h @@ -0,0 +1,945 @@ +/* + * Linux-specific abstractions to gain some independence from linux kernel versions. + * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _linuxver_h_ +#define _linuxver_h_ + +/* + * The below pragmas are added as workaround for errors caused by update + * of gcc version to 4.8.2. GCC 4.6 adds -Wunused-but-set-variable and + * -Wunused-but-set-parameter to -Wall, for some configurations those + * warnings are produced in linux kernel. So for now the below pragmas + * disable the offending warnings. Permanent solution is to use -isystem + * but there is a performance problem with this change on RHEL5 servers + * + */ +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-parameter" +#endif + +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#include +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)) +#include +#else +#include +#endif +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) +#include +#endif +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) +/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */ +#ifdef __UNDEF_NO_VERSION__ +#undef __NO_VERSION__ +#else +#define __NO_VERSION__ +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") +#define module_param_string(_name_, _string_, _size_, _perm_) \ + MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) +#endif + +/* linux/malloc.h is deprecated, use linux/slab.h instead. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#else +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) +#undef IP_TOS +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */ +#include + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) +#include +#else +#include +#ifndef work_struct +#define work_struct tq_struct +#endif +#ifndef INIT_WORK +#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) +#endif +#ifndef schedule_work +#define schedule_work(_work) schedule_task((_work)) +#endif +#ifndef flush_scheduled_work +#define flush_scheduled_work() flush_scheduled_tasks() +#endif +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */ + +/* + * TODO: + * daemonize() API is deprecated from kernel-3.8 onwards. More debugging + * has to be done whether this can cause any issue in case, if driver is + * loaded as a module from userspace. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define DAEMONIZE(a) do { \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); \ + } while (0) +#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func) +#else +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work) +#if (!(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && (RHEL_MAJOR == 5))) +/* Exclude RHEL 5 */ +typedef void (*work_func_t)(void *work); +#endif +#endif /* >= 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* Some distributions have their own 2.6.x compatibility layers */ +#ifndef IRQ_NONE +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif +#else +typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define IRQF_SHARED SA_SHIRQ +#endif /* < 2.6.18 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) +#ifdef CONFIG_NET_RADIO +#endif +#endif /* < 2.6.17 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) +#define MOD_INC_USE_COUNT +#define MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif /* LINUX_VERS >= 4.11.0 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) +#include +#endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */ + +#ifndef __exit +#define __exit +#endif +#ifndef __devexit +#define __devexit +#endif +#ifndef __devinit +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) + #define __devinit __init +#else +/* All devices are hotpluggable since linux 3.8.0 */ + #define __devinit +#endif +#endif /* !__devinit */ +#ifndef __devinitdata +#define __devinitdata +#endif +#ifndef __devexit_p +#define __devexit_p(x) x +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) + +#define pci_get_drvdata(dev) (dev)->sysdata +#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) + +/* + * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration + */ + +struct pci_device_id { + unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */ + unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ + unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */ + unsigned long driver_data; /* Data private to the driver */ +}; + +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct pci_dev *dev, + const struct pci_device_id *id); /* New device inserted */ + void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug + * capable driver) + */ + void (*suspend)(struct pci_dev *dev); /* Device suspended */ + void (*resume)(struct pci_dev *dev); /* Device woken up */ +}; + +#define MODULE_DEVICE_TABLE(type, name) +#define PCI_ANY_ID (~0) + +/* compatpci.c */ +#define pci_module_init pci_register_driver +extern int pci_register_driver(struct pci_driver *drv); +extern void pci_unregister_driver(struct pci_driver *drv); + +#endif /* PCI registration */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) +#define pci_module_init pci_register_driver +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) +#ifdef MODULE +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#else +#define module_init(x) __initcall(x); +#define module_exit(x) __exitcall(x); +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) +#define WL_USE_NETDEV_OPS +#else +#undef WL_USE_NETDEV_OPS +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL) +#define WL_CONFIG_RFKILL +#else +#undef WL_CONFIG_RFKILL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) +#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) +#define pci_enable_device(dev) do { } while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) +#define net_device device +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) + +/* + * DMA mapping + * + * See linux/Documentation/DMA-mapping.txt + */ + +#ifndef PCI_DMA_TODEVICE +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#endif + +typedef u32 dma_addr_t; + +/* Pure 2^n version of get_order */ +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC | GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + bzero(ret, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} +#ifdef ILSIM +extern uint pci_map_single(void *dev, void *va, uint size, int direction); +extern void pci_unmap_single(void *dev, uint pa, uint size, int direction); +#else +#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) +#define pci_unmap_single(cookie, address, size, dir) +#endif + +#endif /* DMA mapping */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + +typedef struct timer_list timer_list_compat_t; + +#define init_timer_compat(timer_compat, cb, priv) \ + init_timer(timer_compat); \ + (timer_compat)->data = (ulong)priv; \ + (timer_compat)->function = cb +#define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv +#define timer_expires(timer_compat) (timer_compat)->expires + +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */ + +typedef struct timer_list_compat { + struct timer_list timer; + void *arg; + void (*callback)(ulong arg); +} timer_list_compat_t; + +extern void timer_cb_compat(struct timer_list *tl); + +#define init_timer_compat(timer_compat, cb, priv) \ + (timer_compat)->arg = priv; \ + (timer_compat)->callback = cb; \ + timer_setup(&(timer_compat)->timer, timer_cb_compat, 0); +#define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv +#define timer_expires(timer_compat) (timer_compat)->timer.expires + +#define del_timer(t) del_timer(&((t)->timer)) +#ifndef del_timer_sync +#define del_timer_sync(t) del_timer_sync(&((t)->timer)) +#endif +#define timer_pending(t) timer_pending(&((t)->timer)) +#define add_timer(t) add_timer(&((t)->timer)) +#define mod_timer(t, j) mod_timer(&((t)->timer), j) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +#define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b) +#else +#define rtc_time_to_tm(a, b) rtc_time_to_tm(a, b) +#endif /* LINUX_VER >= 3.19.0 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) +#define time_to_tm(a, b, c) time64_to_tm(a, b, c) +#else +#define time_to_tm(a, b, c) time_to_tm(a, b, c) +#endif /* LINUX_VER >= 4.20.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) + +#define dev_kfree_skb_any(a) dev_kfree_skb(a) +#define netif_down(dev) do { (dev)->start = 0; } while (0) + +/* pcmcia-cs provides its own netdevice compatibility layer */ +#ifndef _COMPAT_NETDEVICE_H + +/* + * SoftNet + * + * For pre-softnet kernels we need to tell the upper layer not to + * re-enter start_xmit() while we are in there. However softnet + * guarantees not to enter while we are in there so there is no need + * to do the netif_stop_queue() dance unless the transmit queue really + * gets stuck. This should also improve performance according to tests + * done by Aman Singla. + */ + +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#define netif_wake_queue(dev) \ + do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) +#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) + +static inline void netif_start_queue(struct net_device *dev) +{ + dev->tbusy = 0; + dev->interrupt = 0; + dev->start = 1; +} + +#define netif_queue_stopped(dev) (dev)->tbusy +#define netif_running(dev) (dev)->start + +#endif /* _COMPAT_NETDEVICE_H */ + +#define netif_device_attach(dev) netif_start_queue(dev) +#define netif_device_detach(dev) netif_stop_queue(dev) + +/* 2.4.x renamed bottom halves to tasklets */ +#define tasklet_struct tq_struct +static inline void tasklet_schedule(struct tasklet_struct *tasklet) +{ + queue_task(tasklet, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static inline void tasklet_init(struct tasklet_struct *tasklet, + void (*func)(unsigned long), + unsigned long data) +{ + tasklet->next = NULL; + tasklet->sync = 0; + tasklet->routine = (void (*)(void *))func; + tasklet->data = (void *)data; +} +#define tasklet_kill(tasklet) { do {} while (0); } + +/* 2.4.x introduced del_timer_sync() */ +#define del_timer_sync(timer) del_timer(timer) + +#else + +#define netif_down(dev) + +#endif /* SoftNet */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/* + * Emit code to initialise a tq_struct's routine and data pointers + */ +#define PREPARE_TQUEUE(_tq, _routine, _data) \ + do { \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) + +/* + * Emit code to initialise all of a tq_struct + */ +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + PREPARE_TQUEUE((_tq), (_routine), (_data)); \ + } while (0) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */ + +/* Power management related macro & routines */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9) +#define PCI_SAVE_STATE(a, b) pci_save_state(a) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a) +#else +#define PCI_SAVE_STATE(a, b) pci_save_state(a, b) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) +static inline int +pci_save_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + if (buffer) { + /* 100% dword access ok here? */ + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &buffer[i]); + } + return 0; +} + +static inline int +pci_restore_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + + if (buffer) { + for (i = 0; i < 16; i++) + pci_write_config_dword(dev, i * 4, buffer[i]); + } + /* + * otherwise, write the context information we know from bootup. + * This works around a problem where warm-booting from Windows + * combined with a D3(hot)->D0 transition causes PCI config + * header data to be forgotten. + */ + else { + for (i = 0; i < 6; i ++) + pci_write_config_dword(dev, + PCI_BASE_ADDRESS_0 + (i * 4), + pci_resource_start(dev, i)); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } + return 0; +} +#endif /* PCI power management */ + +/* Old cp0 access macros deprecated in 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) +#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) +#endif + +/* Module refcount handled internally in 2.6.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#else +#define OLD_MOD_INC_USE_COUNT do {} while (0) +#define OLD_MOD_DEC_USE_COUNT do {} while (0) +#endif +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#endif +#ifndef MOD_INC_USE_COUNT +#define MOD_INC_USE_COUNT do {} while (0) +#endif +#ifndef MOD_DEC_USE_COUNT +#define MOD_DEC_USE_COUNT do {} while (0) +#endif +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) do {} while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef HAVE_FREE_NETDEV +#define free_netdev(dev) kfree(dev) +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* struct packet_type redefined in 2.6.x */ +#define af_packet_priv data +#endif + +/* suspend args */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) +#define DRV_SUSPEND_STATE_TYPE pm_message_t +#else +#define DRV_SUSPEND_STATE_TYPE uint32 +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define CHECKSUM_HW CHECKSUM_PARTIAL +#endif + +typedef struct { + void *parent; /* some external entity that the thread supposed to work for */ + char *proc_name; + struct task_struct *p_task; + long thr_pid; + int prio; /* priority */ + struct semaphore sema; + int terminated; + struct completion completed; + int flush_ind; + struct completion flushed; + spinlock_t spinlock; + int up_cnt; +} tsk_ctl_t; + +/* ANDREY: new MACROs to start stop threads(OLD kthread API STYLE) */ +/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */ +/* note this macro assumes there may be only one context waiting on thread's completion */ +#ifdef KERNEL_TIMESTAMP +extern char *dhd_log_dump_get_timestamp(void); +#ifdef SYSTEM_TIMESTAMP +extern char* dhd_dbg_get_system_timestamp(void); +#define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp(), dhd_dbg_get_system_timestamp() +#define PERCENT_S "[%s][%s]" +#else +#define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp() +#define PERCENT_S "[%s]" +#endif +#else +#define PRINTF_SYSTEM_TIME "" +#define PERCENT_S "%s" +#endif +#ifndef DHD_LOG_PREFIX +#define DHD_LOG_PREFIX "[dhd]" +#endif +#define DHD_LOG_PREFIXS DHD_LOG_PREFIX" " +#ifdef DHD_DEBUG +#define printf_thr(fmt, args...) printk(PERCENT_S DHD_LOG_PREFIXS fmt, PRINTF_SYSTEM_TIME, ## args) +#define DBG_THR(args) do {printf_thr args;} while (0) +#else +#define DBG_THR(x) +#endif + +extern unsigned long osl_spin_lock(void *lock); +extern void osl_spin_unlock(void *lock, unsigned long flags); + +#define TSK_LOCK(lock, flags) (flags) = osl_spin_lock(lock) +#define TSK_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags)) + +static inline bool binary_sema_down(tsk_ctl_t *tsk) +{ + if (down_interruptible(&tsk->sema) == 0) { + unsigned long flags = 0; + TSK_LOCK(&tsk->spinlock, flags); + if (tsk->up_cnt == 1) + tsk->up_cnt--; + else { + DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt)); + } + TSK_UNLOCK(&tsk->spinlock, flags); + return false; + } else + return true; +} + +static inline bool binary_sema_up(tsk_ctl_t *tsk) +{ + bool sem_up = false; + unsigned long flags = 0; + + TSK_LOCK(&tsk->spinlock, flags); + if (tsk->up_cnt == 0) { + tsk->up_cnt++; + sem_up = true; + } else if (tsk->up_cnt == 1) { + /* dhd_sched_dpc: dpc is alread up! */ + } else + DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt)); + + TSK_UNLOCK(&tsk->spinlock, flags); + + if (sem_up) + up(&tsk->sema); + + return sem_up; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) +#define SMP_RD_BARRIER_DEPENDS(x) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x) +#else +#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x) +#endif + +#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \ +{ \ + sema_init(&((tsk_ctl)->sema), 0); \ + init_completion(&((tsk_ctl)->completed)); \ + init_completion(&((tsk_ctl)->flushed)); \ + (tsk_ctl)->parent = owner; \ + (tsk_ctl)->proc_name = name; \ + (tsk_ctl)->terminated = FALSE; \ + (tsk_ctl)->flush_ind = FALSE; \ + (tsk_ctl)->up_cnt = 0; \ + (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \ + if (IS_ERR((tsk_ctl)->p_task)) { \ + (tsk_ctl)->thr_pid = -1; \ + DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \ + (tsk_ctl)->proc_name)); \ + } else { \ + (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \ + spin_lock_init(&((tsk_ctl)->spinlock)); \ + DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + }; \ +} + +#define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */ + +#define PROC_STOP(tsk_ctl) \ +{ \ + uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + up(&((tsk_ctl)->sema)); \ + DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ + if (timeout == 0) \ + DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + else \ + DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->parent = NULL; \ + (tsk_ctl)->proc_name = NULL; \ + (tsk_ctl)->thr_pid = -1; \ + (tsk_ctl)->up_cnt = 0; \ +} + +#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \ +{ \ + uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + binary_sema_up(tsk_ctl); \ + DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ + if (timeout == 0) \ + DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + else \ + DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->parent = NULL; \ + (tsk_ctl)->proc_name = NULL; \ + (tsk_ctl)->thr_pid = -1; \ +} + +/* +* Flush is non-rentrant, so callers must make sure +* there is no race condition. +* For safer exit, added wait_for_completion_timeout +* with 1 sec timeout. +*/ +#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \ +{ \ + uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + (tsk_ctl)->flush_ind = TRUE; \ + smp_wmb(); \ + binary_sema_up(tsk_ctl); \ + DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \ + if (timeout == 0) \ + DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + else \ + DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ +} + +/* ----------------------- */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +#define KILL_PROC(nr, sig) \ +{ \ +struct task_struct *tsk; \ +struct pid *pid; \ +pid = find_get_pid((pid_t)nr); \ +tsk = pid_task(pid, PIDTYPE_PID); \ +if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 30)) +#define KILL_PROC(pid, sig) \ +{ \ + struct task_struct *tsk; \ + tsk = find_task_by_vpid(pid); \ + if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#define KILL_PROC(pid, sig) \ +{ \ + kill_proc(pid, sig, 1); \ +} +#endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#include +#else +#include + +#define __wait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +/* +For < 2.6.24, wl creates its own netdev but doesn't +align the priv area like the genuine alloc_netdev(). +Since netdev_priv() always gives us the aligned address, it will +not match our unaligned address for < 2.6.24 +*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#define DEV_PRIV(dev) (dev->priv) +#else +#define DEV_PRIV(dev) netdev_priv(dev) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#define WL_ISR(i, d, p) wl_isr((i), (d)) +#else +#define WL_ISR(i, d, p) wl_isr((i), (d), (p)) +#endif /* < 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_priv(dev) dev->priv +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) +#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled())) +#else +#define CAN_SLEEP() (FALSE) +#endif + +#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define RANDOM32 prandom_u32 +#define RANDOM_BYTES prandom_bytes +#else +#define RANDOM32 random32 +#define RANDOM_BYTES get_random_bytes +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define SRANDOM32(entropy) prandom_seed(entropy) +#else +#define SRANDOM32(entropy) srandom32(entropy) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +/* + * Overide latest kfifo functions with + * older version to work on older kernels + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c) +#define kfifo_esize(a) 1 +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d) +#define kfifo_esize(a) 1 +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#include +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos) +#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos) +int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count); +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */ +#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) +#define netdev_tx_t int +#endif + +#endif /* _linuxver_h_ */ diff --git a/bcmdhd.101.10.361.x/include/lpflags.h b/bcmdhd.101.10.361.x/include/lpflags.h new file mode 100755 index 0000000..f284bbb --- /dev/null +++ b/bcmdhd.101.10.361.x/include/lpflags.h @@ -0,0 +1,39 @@ +/* + * Chip related low power flags + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _lpflags_h_ +#define _lpflags_h_ + +/* Chip related low power flags (lpflags) */ +#define LPFLAGS_SI_GLOBAL_DISABLE (1 << 0) +#define LPFLAGS_SI_MEM_STDBY_DISABLE (1 << 1) +#define LPFLAGS_SI_SFLASH_DISABLE (1 << 2) +#define LPFLAGS_SI_BTLDO3P3_DISABLE (1 << 3) +#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE (1 << 4) +#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5) +#define LPFLAGS_SI_DS0_SLEEP_PDA_DISABLE (1 << 6) +#define LPFLAGS_SI_DS1_SLEEP_PDA_DISABLE (1 << 7) +#define LPFLAGS_PHY_GLOBAL_DISABLE (1 << 16) +#define LPFLAGS_PHY_LP_DISABLE (1 << 17) +#define LPFLAGS_PSM_PHY_CTL (1 << 18) + +#endif /* _lpflags_h_ */ diff --git a/bcmdhd.101.10.361.x/include/mbo.h b/bcmdhd.101.10.361.x/include/mbo.h new file mode 100755 index 0000000..14bd92b --- /dev/null +++ b/bcmdhd.101.10.361.x/include/mbo.h @@ -0,0 +1,279 @@ +/* + * Fundamental types and constants relating to WFA MBO + * (Multiband Operation) + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _MBO_H_ +#define _MBO_H_ + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi MBO OUI values */ +#define MBO_OUI WFA_OUI /* WiFi OUI 50:6F:9A */ +/* oui_type field identifying the type and version of the MBO IE. */ +#define MBO_OUI_TYPE WFA_OUI_TYPE_MBO /* OUI Type/Version */ +/* IEEE 802.11 vendor specific information element. */ +#define MBO_IE_ID 0xdd + +/* MBO ATTR related macros */ +#define MBO_ATTR_ID_OFF 0 +#define MBO_ATTR_LEN_OFF 1 +#define MBO_ATTR_DATA_OFF 2 + +#define MBO_ATTR_ID_LEN 1 /* Attr ID field length */ +#define MBO_ATTR_LEN_LEN 1 /* Attr Length field length */ +#define MBO_ATTR_HDR_LEN 2 /* ID + 1-byte length field */ + +/* MBO subelemts related */ +#define MBO_SUBELEM_ID 0xdd +#define MBO_SUBELEM_OUI WFA_OUI + +#define MBO_SUBELEM_ID_LEN 1 /* SubElement ID field length */ +#define MBO_SUBELEM_LEN_LEN 1 /* SubElement length field length */ +#define MBO_SUBELEM_HDR_LEN 6 /* ID + length + OUI + OUY TYPE */ + +#define MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L) (7 + (L)) /* value of length field */ +#define MBO_NON_PREF_CHAN_SUBELEM_TOT_LEN(L) \ + (MBO_SUBELEM_ID_LEN + MBO_SUBELEM_LEN_LEN + MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L)) +/* MBO attributes as defined in the mbo spec */ +enum { + MBO_ATTR_MBO_AP_CAPABILITY = 1, + MBO_ATTR_NON_PREF_CHAN_REPORT = 2, + MBO_ATTR_CELL_DATA_CAP = 3, + MBO_ATTR_ASSOC_DISALLOWED = 4, + MBO_ATTR_CELL_DATA_CONN_PREF = 5, + MBO_ATTR_TRANS_REASON_CODE = 6, + MBO_ATTR_TRANS_REJ_REASON_CODE = 7, + MBO_ATTR_ASSOC_RETRY_DELAY = 8 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ie_s { + uint8 id; /* IE ID: MBO_IE_ID 0xDD */ + uint8 len; /* IE length */ + uint8 oui[WFA_OUI_LEN]; /* MBO_OUI 50:6F:9A */ + uint8 oui_type; /* MBO_OUI_TYPE 0x16 */ + uint8 attr[1]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_mbo_ie_t; + +#define MBO_IE_HDR_SIZE (OFFSETOF(wifi_mbo_ie_t, attr)) +/* oui:3 bytes + oui type:1 byte */ +#define MBO_IE_NO_ATTR_LEN 4 + +/* MBO AP Capability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ap_cap_ind_attr_s { + /* Attribute ID - 0x01. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* AP capability bitmap */ + uint8 cap_ind; +} BWL_POST_PACKED_STRUCT wifi_mbo_ap_cap_ind_attr_t; + +/* MBO AP Capability Indication Field Values */ +#define MBO_AP_CAP_IND_CELLULAR_AWARE 0x40 + +/* Non-preferred Channel Report Attribute */ +#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_OFF 2 +#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF 3 +#define MBO_NON_PREF_CHAN_ATTR_PREF_OFF(L) \ + (MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF + (L)) + +#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_LEN 1 +#define MBO_NON_PREF_CHAN_ATTR_PREF_LEN 1 +#define MBO_NON_PREF_CHAN_ATTR_REASON_LEN 1 + +#define MBO_NON_PREF_CHAN_ATTR_LEN(L) ((L) + 3) +#define MBO_NON_PREF_CHAN_ATTR_TOT_LEN(L) (MBO_ATTR_HDR_LEN + (L) + 3) + +/* attribute len - (opclass + Pref + Reason) */ +#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_LEN(L) ((L) - 3) + +/* MBO Non-preferred Channel Report: "Preference" field value */ +enum { + MBO_STA_NON_OPERABLE_BAND_CHAN = 0, + MBO_STA_NON_PREFERRED_BAND_CHAN = 1, + MBO_STA_PREFERRED_BAND_CHAN = 255 +}; + +/* MBO Non-preferred Channel Report: "Reason Code" field value */ +enum { + MBO_NON_PREF_CHAN_RC_UNSPECIFIED = 0, + MBO_NON_PREF_CHAN_RC_BCN_STRENGTH = 1, + MBO_NON_PREF_CHAN_RC_CO_LOC_INTERFERENCE = 2, + MBO_NON_PREF_CHAN_RC_IN_DEV_INTERFERENCE = 3 +}; + +/* Cellular Data Capability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_cap_attr_s { + /* Attribute ID - 0x03. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* MBO STA's cellular capability */ + uint8 cell_conn; +} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_cap_attr_t; + +/* MBO Cellular Data Capability: "Cellular Connectivity" field value */ +enum { + MBO_CELL_DATA_CONN_AVAILABLE = 1, + MBO_CELL_DATA_CONN_NOT_AVAILABLE = 2, + MBO_CELL_DATA_CONN_NOT_CAPABLE = 3 +}; + +/* Association Disallowed attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_disallowed_attr_s { + /* Attribute ID - 0x04. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Reason of not accepting new association */ + uint8 reason_code; +} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_disallowed_attr_t; + +/* Association Disallowed attr Reason code field values */ +enum { + MBO_ASSOC_DISALLOWED_RC_UNSPECIFIED = 1, + MBO_ASSOC_DISALLOWED_RC_MAX_STA_REACHED = 2, + MBO_ASSOC_DISALLOWED_RC_AIR_IFACE_OVERLOADED = 3, + MBO_ASSOC_DISALLOWED_RC_AUTH_SRVR_OVERLOADED = 4, + MBO_ASSOC_DISALLOWED_RC_INSUFFIC_RSSI = 5 +}; + +/* Cellular Data Conn Pref attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_conn_pref_attr_s { + /* Attribute ID - 0x05. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Preference value of cellular connection */ + uint8 cell_pref; +} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_conn_pref_attr_t; + +/* Cellular Data Conn Pref attr: Cellular Pref field values */ +enum { + MBO_CELLULAR_DATA_CONN_EXCLUDED = 1, + MBO_CELLULAR_DATA_CONN_NOT_PREFERRED = 2, + MBO_CELLULAR_DATA_CONN_PREFERRED = 255 +}; + +/* Transition Reason Code Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_reason_code_attr_s { + /* Attribute ID - 0x06. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Reason of transition recommendation */ + uint8 trans_reason_code; +} BWL_POST_PACKED_STRUCT wifi_mbo_trans_reason_code_attr_t; + +/* Transition Reason Code Attr: trans reason code field values */ +enum { + MBO_TRANS_REASON_UNSPECIFIED = 0, + MBO_TRANS_REASON_EXCESSV_FRM_LOSS_RATE = 1, + MBO_TRANS_REASON_EXCESSV_TRAFFIC_DELAY = 2, + MBO_TRANS_REASON_INSUFF_BW = 3, + MBO_TRANS_REASON_LOAD_BALANCING = 4, + MBO_TRANS_REASON_LOW_RSSI = 5, + MBO_TRANS_REASON_EXCESSV_RETRANS_RCVD = 6, + MBO_TRANS_REASON_HIGH_INTERFERENCE = 7, + MBO_TRANS_REASON_GRAY_ZONE = 8, + MBO_TRANS_REASON_PREMIUM_AP_TRANS = 9 +}; + +/* Transition Rejection Reason Code Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_rej_reason_code_attr_s { + /* Attribute ID - 0x07. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Reason of transition rejection */ + uint8 trans_rej_reason_code; +} BWL_POST_PACKED_STRUCT wifi_mbo_trans_rej_reason_code_attr_t; + +/* Transition Rej Reason Code Attr: trans rej reason code field values */ +enum { + MBO_TRANS_REJ_REASON_UNSPECIFIED = 0, + MBO_TRANS_REJ_REASON_EXSSIV_FRM_LOSS_RATE = 1, + MBO_TRANS_REJ_REASON_EXSSIV_TRAFFIC_DELAY = 2, + MBO_TRANS_REJ_REASON_INSUFF_QOS_CAPACITY = 3, + MBO_TRANS_REJ_REASON_LOW_RSSI = 4, + MBO_TRANS_REJ_REASON_HIGH_INTERFERENCE = 5, + MBO_TRANS_REJ_REASON_SERVICE_UNAVAIL = 6 +}; + +/* Assoc Retry Delay Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_retry_delay_attr_s { + /* Attribute ID - 0x08. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* No of Seconds before next assoc attempt */ + uint16 reassoc_delay; +} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_retry_delay_attr_t; + +#define MBO_ANQP_OUI_TYPE 0x12 /* OUTI Type/Version */ + +/* MBO ANQP Element */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_anqp_elem_s { + /* ID - 56797 */ + uint16 info_id; + /* Length of the OUI + Vendor Specific content */ + uint16 len; + /* WFA_OUI 50:6F:9A */ + uint8 oui[WFA_OUI_LEN]; + /* MBO_ANQP_OUI_TYPE 0x12 */ + uint8 oui_type; + /* MBO ANQP element type */ + uint8 sub_type; + /* variable len payload */ + uint8 payload[1]; +} BWL_POST_PACKED_STRUCT wifi_mbo_anqp_elem_t; + +#define MBO_ANQP_ELEM_HDR_SIZE (OFFSETOF(wifi_mbo_anqp_elem_t, payload)) + +/* oui:3 bytes + oui type:1 byte + sub type:1 byte */ +#define MBO_ANQP_ELEM_NO_PAYLOAD_LEN 5 + +/* MBO ANQP Subtype Values */ +enum { + MBO_ANQP_ELEM_MBO_QUERY_LIST = 1, + MBO_ANQP_ELEM_CELL_DATA_CONN_PREF = 2 +}; + +/* MBO sub-elements */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_cap_subelem_s { + /* 0xDD */ + uint8 sub_elem_id; + /* Length of the following fields in sub-element */ + uint8 len; + /* WFA_OUI 50:6F:9A */ + uint8 oui[WFA_OUI_LEN]; + /* OUI_TYPE 0x03 */ + uint8 oui_type; + /* STA cellular capability */ + uint8 cell_conn; +} BWL_POST_PACKED_STRUCT wifi_mbo_cell_cap_subelem_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* __MBO_H__ */ diff --git a/bcmdhd.101.10.361.x/include/miniopt.h b/bcmdhd.101.10.361.x/include/miniopt.h new file mode 100755 index 0000000..b486c07 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/miniopt.h @@ -0,0 +1,73 @@ +/* + * Command line options parser. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef MINI_OPT_H +#define MINI_OPT_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Include Files ---------------------------------------------------- */ + +/* ---- Constants and Types ---------------------------------------------- */ + +#define MINIOPT_MAXKEY 128 /* Max options */ +typedef struct miniopt { + + /* These are persistent after miniopt_init() */ + const char* name; /* name for prompt in error strings */ + const char* flags; /* option chars that take no args */ + bool longflags; /* long options may be flags */ + bool opt_end; /* at end of options (passed a "--") */ + + /* These are per-call to miniopt() */ + + int consumed; /* number of argv entries cosumed in + * the most recent call to miniopt() + */ + bool positional; + bool good_int; /* 'val' member is the result of a sucessful + * strtol conversion of the option value + */ + char opt; + char key[MINIOPT_MAXKEY]; + char* valstr; /* positional param, or value for the option, + * or null if the option had + * no accompanying value + */ + uint uval; /* strtol translation of valstr */ + int val; /* strtol translation of valstr */ +} miniopt_t; + +void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags); +int miniopt(miniopt_t *t, char **argv); + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#ifdef __cplusplus + } +#endif + +#endif /* MINI_OPT_H */ diff --git a/bcmdhd.101.10.361.x/include/monitor.h b/bcmdhd.101.10.361.x/include/monitor.h new file mode 100755 index 0000000..4b92cda --- /dev/null +++ b/bcmdhd.101.10.361.x/include/monitor.h @@ -0,0 +1,230 @@ +/* + * Monitor Mode definitions. + * This header file housing the define and function prototype use by + * both the wl firmware and drivers. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ +#ifndef _MONITOR_H_ +#define _MONITOR_H_ + +#include + +#include +/* wl_monitor rx status per packet */ +typedef struct BWL_PRE_PACKED_STRUCT wl_rxsts { + uint pkterror; /* error flags per pkt */ + uint phytype; /* 802.11 A/B/G /N */ + chanspec_t chanspec; /* channel spec */ + uint16 datarate; /* rate in 500kbps */ + uint8 mcs; /* MCS for HT frame */ + uint8 htflags; /* HT modulation flags */ + uint antenna; /* antenna pkts received on */ + uint pktlength; /* pkt length minus bcm phy hdr */ + uint32 mactime; /* time stamp from mac, count per 1us */ + uint sq; /* signal quality */ + int32 signal; /* in dBm */ + int32 noise; /* in dBm */ + uint preamble; /* Unknown, short, long */ + uint encoding; /* Unknown, CCK, PBCC, OFDM, HT, VHT */ + uint nfrmtype; /* special 802.11n frames(AMPDU, AMSDU) */ + uint8 nss; /* Number of spatial streams for VHT frame */ + uint8 coding; + uint16 aid; /* Partial AID for VHT frame */ + uint8 gid; /* Group ID for VHT frame */ + uint8 bw; /* Bandwidth for VHT frame */ + uint16 vhtflags; /* VHT modulation flags */ + uint16 bw_nonht; /* non-HT bw advertised in rts/cts */ + uint32 ampdu_counter; /* AMPDU counter for sniffer */ + uint32 sig_a1; /* TODO: this unused field needs to be removed */ + uint32 sig_a2; /* TODO: this unused field needs to be removed */ + uint16 data1; + uint16 data2; + uint16 data3; + uint16 data4; + uint16 data5; + uint16 data6; + uint8 ru_channel1[4]; + uint8 ru_channel2[4]; + uint16 flag1; + uint16 flag2; +} BWL_POST_PACKED_STRUCT wl_rxsts_t, wl_mon_rxsts_t; +#include + +#define WLMONRXSTS_SIZE sizeof(wl_rxsts_t) + +/* phy type */ +#define WL_RXS_PHY_N 0x00000004 /* N phy type */ + +/* encoding */ +#define WL_RXS_ENCODING_UNKNOWN 0x00000000 +#define WL_RXS_ENCODING_DSSS_CCK 0x00000001 /* DSSS/CCK encoding (1, 2, 5.5, 11) */ +#define WL_RXS_ENCODING_OFDM 0x00000002 /* OFDM encoding */ +#define WL_RXS_ENCODING_HT 0x00000003 /* HT encoding */ +#define WL_RXS_ENCODING_VHT 0x00000004 /* VHT encoding */ +#define WL_RXS_ENCODING_HE 0x00000005 /* HE encoding */ +#define WL_RXS_ENCODING_EHT 0x00000006 /* EHT encoding */ + +/* status per error RX pkt */ +#define WL_RXS_CRC_ERROR 0x00000001 /* CRC Error in packet */ +#define WL_RXS_RUNT_ERROR 0x00000002 /* Runt packet */ +#define WL_RXS_ALIGN_ERROR 0x00000004 /* Misaligned packet */ +#define WL_RXS_OVERSIZE_ERROR 0x00000008 /* packet bigger than RX_LENGTH (usually 1518) */ +#define WL_RXS_WEP_ICV_ERROR 0x00000010 /* Integrity Check Value error */ +#define WL_RXS_WEP_ENCRYPTED 0x00000020 /* Encrypted with WEP */ +#define WL_RXS_PLCP_SHORT 0x00000040 /* Short PLCP error */ +#define WL_RXS_DECRYPT_ERR 0x00000080 /* Decryption error */ +#define WL_RXS_OTHER_ERR 0x80000000 /* Other errors */ + +/* preamble */ +#define WL_RXS_UNUSED_STUB 0x0 /**< stub to match with wlc_ethereal.h */ +#define WL_RXS_PREAMBLE_SHORT 0x00000001 /**< Short preamble */ +#define WL_RXS_PREAMBLE_LONG 0x00000002 /**< Long preamble */ +#define WL_RXS_PREAMBLE_HT_MM 0x00000003 /**< HT mixed mode preamble */ +#define WL_RXS_PREAMBLE_HT_GF 0x00000004 /**< HT green field preamble */ + +/* htflags */ +#define WL_RXS_HTF_BW_MASK 0x07 +#define WL_RXS_HTF_40 0x01 +#define WL_RXS_HTF_20L 0x02 +#define WL_RXS_HTF_20U 0x04 +#define WL_RXS_HTF_SGI 0x08 +#define WL_RXS_HTF_STBC_MASK 0x30 +#define WL_RXS_HTF_STBC_SHIFT 4 +#define WL_RXS_HTF_LDPC 0x40 + +#ifdef WLTXMONITOR +/* reuse bw-bits in ht for vht */ +#define WL_RXS_VHTF_BW_MASK 0x87 +#define WL_RXS_VHTF_40 0x01 +#define WL_RXS_VHTF_20L WL_RXS_VHTF_20LL +#define WL_RXS_VHTF_20U WL_RXS_VHTF_20LU +#define WL_RXS_VHTF_80 0x02 +#define WL_RXS_VHTF_20LL 0x03 +#define WL_RXS_VHTF_20LU 0x04 +#define WL_RXS_VHTF_20UL 0x05 +#define WL_RXS_VHTF_20UU 0x06 +#define WL_RXS_VHTF_40L 0x07 +#define WL_RXS_VHTF_40U 0x80 +#endif /* WLTXMONITOR */ + +/* vhtflags */ +#define WL_RXS_VHTF_STBC 0x01 +#define WL_RXS_VHTF_TXOP_PS 0x02 +#define WL_RXS_VHTF_SGI 0x04 +#define WL_RXS_VHTF_SGI_NSYM_DA 0x08 +#define WL_RXS_VHTF_LDPC_EXTRA 0x10 +#define WL_RXS_VHTF_BF 0x20 +#define WL_RXS_VHTF_DYN_BW_NONHT 0x40 +#define WL_RXS_VHTF_CODING_LDCP 0x01 + +#define WL_RXS_VHT_BW_20 0 +#define WL_RXS_VHT_BW_40 1 +#define WL_RXS_VHT_BW_20L 2 +#define WL_RXS_VHT_BW_20U 3 +#define WL_RXS_VHT_BW_80 4 +#define WL_RXS_VHT_BW_40L 5 +#define WL_RXS_VHT_BW_40U 6 +#define WL_RXS_VHT_BW_20LL 7 +#define WL_RXS_VHT_BW_20LU 8 +#define WL_RXS_VHT_BW_20UL 9 +#define WL_RXS_VHT_BW_20UU 10 +#define WL_RXS_VHT_BW_160 11 +#define WL_RXS_VHT_BW_80L 12 +#define WL_RXS_VHT_BW_80U 13 +#define WL_RXS_VHT_BW_40LL 14 +#define WL_RXS_VHT_BW_40LU 15 +#define WL_RXS_VHT_BW_40UL 16 +#define WL_RXS_VHT_BW_40UU 17 +#define WL_RXS_VHT_BW_20LLL 18 +#define WL_RXS_VHT_BW_20LLU 19 +#define WL_RXS_VHT_BW_20LUL 20 +#define WL_RXS_VHT_BW_20LUU 21 +#define WL_RXS_VHT_BW_20ULL 22 +#define WL_RXS_VHT_BW_20ULU 23 +#define WL_RXS_VHT_BW_20UUL 24 +#define WL_RXS_VHT_BW_20UUU 25 + +#define WL_RXS_NFRM_AMPDU_FIRST 0x00000001 /* first MPDU in A-MPDU */ +#define WL_RXS_NFRM_AMPDU_SUB 0x00000002 /* subsequent MPDU(s) in A-MPDU */ +#define WL_RXS_NFRM_AMSDU_FIRST 0x00000004 /* first MSDU in A-MSDU */ +#define WL_RXS_NFRM_AMSDU_SUB 0x00000008 /* subsequent MSDU(s) in A-MSDU */ + +/* HE flags */ +#define WL_RXS_HEF_SIGA_PPDU_SU 0x0000 +#define WL_RXS_HEF_SIGA_PPDU_EXT_SU 0x0001 +#define WL_RXS_HEF_SIGA_PPDU_MU 0x0002 +#define WL_RXS_HEF_SIGA_PPDU_TRIG 0x0003 +#define WL_RXS_HEF_SIGA_BSS_COLOR 0x0004 +#define WL_RXS_HEF_SIGA_BEAM_CHANGE 0x0008 +#define WL_RXS_HEF_SIGA_DL_UL 0x0010 +#define WL_RXS_HEF_SIGA_MCS 0x0020 +#define WL_RXS_HEF_SIGA_DCM 0x0040 +#define WL_RXS_HEF_SIGA_CODING 0x0080 +#define WL_RXS_HEF_SIGA_LDPC 0x0100 +#define WL_RXS_HEF_SIGA_STBC 0x0200 +#define WL_RXS_HEF_SIGA_SPATIAL_REUSE 0x0400 +#define WL_RXS_HEF_SIGA_STA_ID 0x0800 +#define WL_RXS_HEF_SIGA_SPATIAL_REUSE2 0x0800 +#define WL_RXS_HEF_SIGA_SPATIAL_REUSE3 0x1000 +#define WL_RXS_HEF_SIGA_SPATIAL_REUSE4 0x2000 +#define WL_RXS_HEF_SIGA_BW 0x4000 +#define WL_RXS_HEF_SIGA_RU_ALLOC 0x4000 +#define WL_RXS_HEF_SIGA_DOPPLER 0x8000 +#define WL_RXS_HEF_SIGA_GI 0x0002 +#define WL_RXS_HEF_SIGA_LTF_SIZE 0x0004 /* no explicit known field */ +#define WL_RXS_HEF_SIGA_NUM_LTF 0x0004 +#define WL_RXS_HEF_SIGA_PADDING 0x0008 +#define WL_RXS_HEF_SIGA_TXBF 0x0010 +#define WL_RXS_HEF_SIGA_PE 0x0020 +#define WL_RXS_HEF_SIGA_TXOP 0x0040 +#define WL_RXS_HEF_SIGA_MIDAMBLE 0x0080 + +/* https://www.radiotap.org/fields/HE-MU.html */ +#define WL_RXS_HEF_SIGB_MCS_KNOWN 0x0010 +#define WL_RXS_HEF_SIGB_DCM_KNOWN 0x0040 +#define WL_RXS_HEF_CH2_26TONE_RU_KNOWN 0x0080 +#define WL_RXS_HEF_CH1_RU_KNOWN 0x0100 +#define WL_RXS_HEF_CH2_RU_KNOWN 0x0200 +#define WL_RXS_HEF_CH1_26TONE_RU_KNOWN 0x1000 +#define WL_RXS_HEF_SIGB_COMP_KNOWN 0x4000 +#define WL_RXS_HEF_NUM_SIGB_SYMB_KNOWN 0x8000 +#define WL_RXS_HEF_BW_SIGA_KNOWN 0x0004 +#define WL_RXS_HEF_PREPUNCR_SIGA_KNOWN 0x0400 +#define WL_RXS_HEF_SIGB_SYMB_KNOWN 0x8000 +#define WL_RXS_HEF_PREPUNCR_KNOWN 0x0400 + +#include +typedef struct BWL_PRE_PACKED_STRUCT wl_txsts { + uint pkterror; /**< error flags per pkt */ + uint phytype; /**< 802.11 A/B/G /N */ + chanspec_t chanspec; /**< channel spec */ + uint16 datarate; /**< rate in 500kbps */ + uint8 mcs; /**< MCS for HT frame */ + uint8 htflags; /**< HT modulation flags */ + uint antenna; /**< antenna pkt transmitted on */ + uint pktlength; /**< pkt length minus bcm phy hdr */ + uint32 mactime; /**< ? time stamp from mac, count per 1us */ + uint preamble; /**< Unknown, short, long */ + uint encoding; /**< Unknown, CCK, PBCC, OFDM, HT */ + uint nfrmtype; /**< special 802.11n frames(AMPDU, AMSDU) */ + uint txflags; /**< As defined in radiotap field 15 */ + uint retries; /**< Number of retries */ + struct wl_if *wlif; /**< wl interface */ +} BWL_POST_PACKED_STRUCT wl_txsts_t; +#include + +#define WL_TXS_TXF_FAIL 0x01 /**< TX failed due to excessive retries */ +#define WL_TXS_TXF_CTS 0x02 /**< TX used CTS-to-self protection */ +#define WL_TXS_TXF_RTSCTS 0x04 /**< TX used RTS/CTS */ + +#endif /* _MONITOR_H_ */ diff --git a/bcmdhd.101.10.361.x/include/msf.h b/bcmdhd.101.10.361.x/include/msf.h new file mode 100755 index 0000000..1511ef1 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/msf.h @@ -0,0 +1,60 @@ +/* + * Common interface to MSF (multi-segment format) definitions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _WLC_MSF_H_ +#define _WLC_MSF_H_ + +struct wl_segment { + uint32 type; + uint32 offset; + uint32 length; + uint32 crc32; + uint32 flags; +}; +typedef struct wl_segment wl_segment_t; + +struct wl_segment_info { + uint8 magic[4]; + uint32 hdr_len; + uint32 crc32; + uint32 file_type; + uint32 num_segments; + wl_segment_t segments[1]; +}; +typedef struct wl_segment_info wl_segment_info_t; + +typedef struct wlc_blob_segment { + uint32 type; + uint8 *data; + uint32 length; +} wlc_blob_segment_t; + +/** Segment types in Binary Eventlog Archive file */ +enum bea_seg_type_e { + MSF_SEG_TYP_RTECDC_BIN = 1, + MSF_SEG_TYP_LOGSTRS_BIN = 2, + MSF_SEG_TYP_FW_SYMBOLS = 3, + MSF_SEG_TYP_ROML_BIN = 4 +}; + +#endif /* _WLC_MSF_H */ diff --git a/bcmdhd.101.10.361.x/include/msgtrace.h b/bcmdhd.101.10.361.x/include/msgtrace.h new file mode 100755 index 0000000..f564999 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/msgtrace.h @@ -0,0 +1,56 @@ +/* + * Trace messages sent over HBUS + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _MSGTRACE_H +#define _MSGTRACE_H + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#define MSGTRACE_VERSION 1 + +/* Message trace header */ +typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { + uint8 version; + uint8 trace_type; +#define MSGTRACE_HDR_TYPE_MSG 0 +#define MSGTRACE_HDR_TYPE_LOG 1 + uint16 len; /* Len of the trace */ + uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost + * because of DMA error or a bus reset (ex: SDIO Func2) + */ + /* Msgtrace type only */ + uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */ + uint32 discarded_printf; /* Number of discarded printf because of trace overflow */ +} BWL_POST_PACKED_STRUCT msgtrace_hdr_t; + +#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _MSGTRACE_H */ diff --git a/bcmdhd.101.10.361.x/include/nan.h b/bcmdhd.101.10.361.x/include/nan.h new file mode 100755 index 0000000..dbbb8ee --- /dev/null +++ b/bcmdhd.101.10.361.x/include/nan.h @@ -0,0 +1,1562 @@ +/* + * Fundamental types and constants relating to WFA NAN + * (Neighbor Awareness Networking) + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _NAN_H_ +#define _NAN_H_ + +#include +#include <802.11.h> + +/* Do we want to include p2p.h for constants like P2P_WFDS_HASH_LEN and + * maybe P2P_WFDS_MAX_SVC_NAME_LEN etc.? + */ + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi NAN OUI values */ +#define NAN_OUI "\x50\x6F\x9A" /* WFA OUI. WiFi-Alliance OUI */ +/* For oui_type field identifying the type and version of the NAN IE. */ +#define NAN_OUI_TYPE 0x13 /* Type/Version */ +#define NAN_AF_OUI_TYPE 0x18 /* Type/Version */ +/* IEEE 802.11 vendor specific information element. (Same as P2P_IE_ID.) */ +#define NAN_IE_ID 0xdd + +/* Same as P2P_PUB_AF_CATEGORY and DOT11_ACTION_CAT_PUBLIC */ +#define NAN_PUB_AF_CATEGORY DOT11_ACTION_CAT_PUBLIC +/* Protected dual public action frame category */ +#define NAN_PROT_DUAL_PUB_AF_CATEGORY DOT11_ACTION_CAT_PDPA +/* IEEE 802.11 Public Action Frame Vendor Specific. (Same as P2P_PUB_AF_ACTION.) */ +#define NAN_PUB_AF_ACTION DOT11_PUB_ACTION_VENDOR_SPEC +/* Number of octents in hash of service name. (Same as P2P_WFDS_HASH_LEN.) */ +#define NAN_SVC_HASH_LEN 6 +/* Size of fixed length part of nan_pub_act_frame_t before attributes. */ +#define NAN_PUB_ACT_FRAME_FIXED_LEN 6 +/* Number of octents in master rank value. */ +#define NAN_MASTER_RANK_LEN 8 +/* NAN public action frame header size */ +#define NAN_PUB_ACT_FRAME_HDR_SIZE (OFFSETOF(nan_pub_act_frame_t, data)) +/* NAN network ID */ +#define NAN_NETWORK_ID "\x51\x6F\x9A\x01\x00\x00" +/* Service Control Type length */ +#define NAN_SVC_CONTROL_TYPE_LEN 2 +/* Binding Bitmap length */ +#define NAN_BINDING_BITMAP_LEN 2 +/* Service Response Filter (SRF) control field masks */ +#define NAN_SRF_BLOOM_MASK 0x01 +#define NAN_SRF_INCLUDE_MASK 0x02 +#define NAN_SRF_INDEX_MASK 0x0C +/* SRF Bloom Filter index shift */ +#define NAN_SRF_BLOOM_SHIFT 2 +#define NAN_SRF_INCLUDE_SHIFT 1 +/* Mask for CRC32 output, used in hash function for NAN bloom filter */ +#define NAN_BLOOM_CRC32_MASK 0xFFFF + +/* Attribute TLV header size */ +#define NAN_ATTR_ID_OFF 0 +#define NAN_ATTR_LEN_OFF 1 +#define NAN_ATTR_DATA_OFF 3 + +#define NAN_ATTR_ID_LEN 1u /* ID field length */ +#define NAN_ATTR_LEN_LEN 2u /* Length field length */ +#define NAN_ATTR_HDR_LEN (NAN_ATTR_ID_LEN + NAN_ATTR_LEN_LEN) +#define NAN_ENTRY_CTRL_LEN 1 /* Entry control field length from FAM attribute */ +#define NAN_MAP_ID_LEN 1 /* MAP ID length to signify band */ +#define NAN_OPERATING_CLASS_LEN 1 /* operating class field length from NAN FAM */ +#define NAN_CHANNEL_NUM_LEN 1 /* channel number field length 1 byte */ + +/* generic nan attribute total length */ +#define NAN_ATTR_TOT_LEN(_nan_attr) (ltoh16_ua(((const uint8 *)(_nan_attr)) + \ + NAN_ATTR_ID_LEN) + NAN_ATTR_HDR_LEN) + +/* NAN slot duration / period */ +#define NAN_MIN_TU 16 +#define NAN_TU_PER_DW 512 +#define NAN_MAX_DW 16 +#define NAN_MAX_TU (NAN_MAX_DW * NAN_TU_PER_DW) + +#define NAN_SLOT_DUR_0TU 0 +#define NAN_SLOT_DUR_16TU 16 +#define NAN_SLOT_DUR_32TU 32 +#define NAN_SLOT_DUR_64TU 64 +#define NAN_SLOT_DUR_128TU 128 +#define NAN_SLOT_DUR_256TU 256 +#define NAN_SLOT_DUR_512TU 512 +#define NAN_SLOT_DUR_1024TU 1024 +#define NAN_SLOT_DUR_2048TU 2048 +#define NAN_SLOT_DUR_4096TU 4096 +#define NAN_SLOT_DUR_8192TU 8192 + +#define NAN_SOC_CHAN_2G 6 /* NAN 2.4G discovery channel */ +#define NAN_SOC_CHAN_5G_CH149 149 /* NAN 5G discovery channel if upper band allowed */ +#define NAN_SOC_CHAN_5G_CH44 44 /* NAN 5G discovery channel if only lower band allowed */ + +/* size of ndc id */ +#define NAN_DATA_NDC_ID_SIZE 6 + +#define NAN_AVAIL_ENTRY_LEN_RES0 7 /* Avail entry len in FAM attribute for resolution 16TU */ +#define NAN_AVAIL_ENTRY_LEN_RES1 5 /* Avail entry len in FAM attribute for resolution 32TU */ +#define NAN_AVAIL_ENTRY_LEN_RES2 4 /* Avail entry len in FAM attribute for resolution 64TU */ + +/* map id field */ +#define NAN_MAPID_SPECIFIC_MAP_MASK 0x01 /* apply to specific map */ +#define NAN_MAPID_MAPID_MASK 0x1E +#define NAN_MAPID_MAPID_SHIFT 1 +#define NAN_MAPID_SPECIFIC_MAP(_mapid) ((_mapid) & NAN_MAPID_SPECIFIC_MAP_MASK) +#define NAN_MAPID_ALL_MAPS(_mapid) (!NAN_MAPID_SPECIFIC_MAP(_mapid)) +#define NAN_MAPID_MAPID(_mapid) (((_mapid) & NAN_MAPID_MAPID_MASK) \ + >> NAN_MAPID_MAPID_SHIFT) +#define NAN_MAPID_SET_SPECIFIC_MAPID(map_id) ((((map_id) << NAN_MAPID_MAPID_SHIFT) \ + & NAN_MAPID_MAPID_MASK) | NAN_MAPID_SPECIFIC_MAP_MASK) + +/* Vendor-specific public action frame for NAN */ +typedef BWL_PRE_PACKED_STRUCT struct nan_pub_act_frame_s { + /* NAN_PUB_AF_CATEGORY 0x04 */ + uint8 category_id; + /* NAN_PUB_AF_ACTION 0x09 */ + uint8 action_field; + /* NAN_OUI 0x50-6F-9A */ + uint8 oui[DOT11_OUI_LEN]; + /* NAN_OUI_TYPE 0x13 */ + uint8 oui_type; + /* One or more NAN Attributes follow */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT nan_pub_act_frame_t; + +/* NAN attributes as defined in the nan spec */ +enum { + NAN_ATTR_MASTER_IND = 0, + NAN_ATTR_CLUSTER = 1, + NAN_ATTR_SVC_ID_LIST = 2, + NAN_ATTR_SVC_DESCRIPTOR = 3, + NAN_ATTR_CONN_CAP = 4, + NAN_ATTR_INFRA = 5, + NAN_ATTR_P2P = 6, + NAN_ATTR_IBSS = 7, + NAN_ATTR_MESH = 8, + NAN_ATTR_FURTHER_NAN_SD = 9, + NAN_ATTR_FURTHER_AVAIL = 10, + NAN_ATTR_COUNTRY_CODE = 11, + NAN_ATTR_RANGING = 12, + NAN_ATTR_CLUSTER_DISC = 13, + /* nan 2.0 */ + NAN_ATTR_SVC_DESC_EXTENSION = 14, + NAN_ATTR_NAN_DEV_CAP = 15, + NAN_ATTR_NAN_NDP = 16, + NAN_ATTR_NAN_NMSG = 17, + NAN_ATTR_NAN_AVAIL = 18, + NAN_ATTR_NAN_NDC = 19, + NAN_ATTR_NAN_NDL = 20, + NAN_ATTR_NAN_NDL_QOS = 21, + NAN_ATTR_MCAST_SCHED = 22, + NAN_ATTR_UNALIGN_SCHED = 23, + NAN_ATTR_PAGING_UCAST = 24, + NAN_ATTR_PAGING_MCAST = 25, + NAN_ATTR_RANGING_INFO = 26, + NAN_ATTR_RANGING_SETUP = 27, + NAN_ATTR_FTM_RANGE_REPORT = 28, + NAN_ATTR_ELEMENT_CONTAINER = 29, + NAN_ATTR_WLAN_INFRA_EXT = 30, + NAN_ATTR_EXT_P2P_OPER = 31, + NAN_ATTR_EXT_IBSS = 32, + NAN_ATTR_EXT_MESH = 33, + NAN_ATTR_CIPHER_SUITE_INFO = 34, + NAN_ATTR_SEC_CTX_ID_INFO = 35, + NAN_ATTR_SHARED_KEY_DESC = 36, + NAN_ATTR_MCAST_SCHED_CHANGE = 37, + NAN_ATTR_MCAST_SCHED_OWNER_CHANGE = 38, + NAN_ATTR_PUBLIC_AVAILABILITY = 39, + NAN_ATTR_SUB_SVC_ID_LIST = 40, + NAN_ATTR_NDPE = 41, + /* change NAN_ATTR_MAX_ID to max ids + 1, excluding NAN_ATTR_VENDOR_SPECIFIC. + * This is used in nan_parse.c + */ + NAN_ATTR_MAX_ID = NAN_ATTR_NDPE + 1, + + NAN_ATTR_VENDOR_SPECIFIC = 221 +}; + +enum wifi_nan_avail_resolution { + NAN_AVAIL_RES_16_TU = 0, + NAN_AVAIL_RES_32_TU = 1, + NAN_AVAIL_RES_64_TU = 2, + NAN_AVAIL_RES_INVALID = 255 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ie_s { + uint8 id; /* IE ID: NAN_IE_ID 0xDD */ + uint8 len; /* IE length */ + uint8 oui[DOT11_OUI_LEN]; /* NAN_OUI 50:6F:9A */ + uint8 oui_type; /* NAN_OUI_TYPE 0x13 */ + uint8 attr[]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_nan_ie_t; + +#define NAN_IE_HDR_SIZE (OFFSETOF(wifi_nan_ie_t, attr)) + +/* master indication record */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_master_ind_attr_s { + uint8 id; + uint16 len; + uint8 master_preference; + uint8 random_factor; +} BWL_POST_PACKED_STRUCT wifi_nan_master_ind_attr_t; + +/* cluster attr record */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_cluster_attr_s { + uint8 id; + uint16 len; + uint8 amr[NAN_MASTER_RANK_LEN]; + uint8 hop_count; + /* Anchor Master Beacon Transmission Time */ + uint32 ambtt; +} BWL_POST_PACKED_STRUCT wifi_nan_cluster_attr_t; + +/* container for service ID records */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_id_attr_s { + uint8 id; + uint16 len; + uint8 svcid[0]; /* 6*len of srvc IDs */ +} BWL_POST_PACKED_STRUCT wifi_nan_svc_id_attr_t; + +/* service_control bitmap for wifi_nan_svc_descriptor_attr_t below */ +#define NAN_SC_PUBLISH 0x0 +#define NAN_SC_SUBSCRIBE 0x1 +#define NAN_SC_FOLLOWUP 0x2 +/* Set to 1 if a Matching Filter field is included in descriptors. */ +#define NAN_SC_MATCHING_FILTER_PRESENT 0x4 +/* Set to 1 if a Service Response Filter field is included in descriptors. */ +#define NAN_SC_SR_FILTER_PRESENT 0x8 +/* Set to 1 if a Service Info field is included in descriptors. */ +#define NAN_SC_SVC_INFO_PRESENT 0x10 +/* range is close proximity only */ +#define NAN_SC_RANGE_LIMITED 0x20 +/* Set to 1 if binding bitamp is present in descriptors */ +#define NAN_SC_BINDING_BITMAP_PRESENT 0x40 + +/* Service descriptor */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_descriptor_attr_s { + /* Attribute ID - 0x03. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Hash of the Service Name */ + uint8 svc_hash[NAN_SVC_HASH_LEN]; + /* Publish or subscribe instance id */ + uint8 instance_id; + /* Requestor Instance ID */ + uint8 requestor_id; + /* Service Control Bitmask. Also determines what data follows. */ + uint8 svc_control; + /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_svc_descriptor_attr_t; + +/* IBSS attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s { + /* Attribute ID - 0x07. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* BSSID of the ibss */ + struct ether_addr bssid; + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + /* avail. intervals bitmap, var len */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t; + +/* Country code attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_country_code_attr_s { + /* Attribute ID - 0x0B. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Condensed Country String first two octets */ + uint8 country_str[2]; +} BWL_POST_PACKED_STRUCT wifi_nan_country_code_attr_t; + +/* Further Availability MAP attr */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s { + /* Attribute ID - 0x0A. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* MAP id: val [0..15], values[16-255] reserved */ + uint8 map_id; + /* availibility entry, var len */ + uint8 avil_entry[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_favail_attr_t; + +/* Further Availability MAP attr */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_s { + /* + entry control + [0-1]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; + [2:7] reserved + */ + uint8 entry_ctrl; + /* operating class: freq band etc IEEE 802.11 */ + uint8 opclass; + /* channel number */ + uint8 chan; + /* avail bmp, var len */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_t; + +/* Map control Field */ +#define NAN_MAPCTRL_IDMASK 0x7 +#define NAN_MAPCTRL_DURSHIFT 4 +#define NAN_MAPCTRL_DURMASK 0x30 +#define NAN_MAPCTRL_REPEAT 0x40 +#define NAN_MAPCTRL_REPEATSHIFT 6 + +#define NAN_VENDOR_TYPE_RTT 0 +#define NAN_VENDOR_TYPE_P2P 1 + +/* Vendor Specific Attribute - old definition */ +/* TODO remove */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vendor_attr_s { + uint8 id; /* 0xDD */ + uint16 len; /* IE length */ + uint8 oui[DOT11_OUI_LEN]; /* 00-90-4C */ + uint8 type; /* attribute type */ + uint8 attr[1]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_nan_vendor_attr_t; + +#define NAN_VENDOR_HDR_SIZE (OFFSETOF(wifi_nan_vendor_attr_t, attr)) + +/* vendor specific attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vndr_attr_s { + uint8 id; /* 0xDD */ + uint16 len; /* length of following fields */ + uint8 oui[DOT11_OUI_LEN]; /* vendor specific OUI */ + uint8 body[]; +} BWL_POST_PACKED_STRUCT wifi_nan_vndr_attr_t; + +/* p2p operation attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_p2p_op_attr_s { + /* Attribute ID - 0x06. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* P2P device role */ + uint8 dev_role; + /* BSSID of the ibss */ + struct ether_addr p2p_dev_addr; + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + /* avail. intervals bitmap */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_p2p_op_attr_t; + +/* ranging attribute */ +#define NAN_RANGING_MAP_CTRL_ID_SHIFT 0 +#define NAN_RANGING_MAP_CTRL_ID_MASK 0x0F +#define NAN_RANGING_MAP_CTRL_DUR_SHIFT 4 +#define NAN_RANGING_MAP_CTRL_DUR_MASK 0x30 +#define NAN_RANGING_MAP_CTRL_REPEAT_SHIFT 6 +#define NAN_RANGING_MAP_CTRL_REPEAT_MASK 0x40 +#define NAN_RANGING_MAP_CTRL_REPEAT_DW(_ctrl) (((_ctrl) & \ + NAN_RANGING_MAP_CTRL_DUR_MASK) ? 16 : 1) +#define NAN_RANGING_MAP_CTRL(_id, _dur, _repeat) (\ + (((_id) << NAN_RANGING_MAP_CTRL_ID_SHIFT) & \ + NAN_RANGING_MAP_CTRL_ID_MASK) | \ + (((_dur) << NAN_RANGING_MAP_CTRL_DUR_SHIFT) & \ + NAN_RANGING_MAP_CTRL_DUR_MASK) | \ + (((_repeat) << NAN_RANGING_MAP_CTRL_REPEAT_SHIFT) & \ + NAN_RANGING_MAP_CTRL_REPEAT_MASK)) + +enum { + NAN_RANGING_PROTO_FTM = 0 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_attr_s { + uint8 id; /* 0x0C */ + uint16 len; /* length that follows */ + struct ether_addr dev_addr; /* device mac address */ + + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + + uint8 protocol; /* FTM = 0 */ + uint32 avail_bmp; /* avail interval bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_attr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_info_attr_s { + uint8 id; /* 0x1A */ + uint16 len; /* length that follows */ + /* + location info availability bit map + 0: LCI Local Coordinates + 1: Geospatial LCI WGS84 + 2: Civi Location + 3: Last Movement Indication + [4-7]: reserved + */ + uint8 lc_info_avail; + /* + Last movement indication + present if bit 3 is set in lc_info_avail + cluster TSF[29:14] at the last detected platform movement + */ + uint16 last_movement; + +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_info_attr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_hdr_s { + uint8 id; /* 0x1B */ + uint16 len; /* length that follows */ + uint8 dialog_token; /* Identify req and resp */ + uint8 type_status; /* bits 0-3 type, 4-7 status */ + /* reason code + i. when frm type = response & status = reject + ii. frm type = termination + */ + uint8 reason; +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_s { + + wifi_nan_ranging_setup_attr_hdr_t setup_attr_hdr; + /* Below fields not required when frm type = termination */ + uint8 ranging_ctrl; /* Bit 0: ranging report required or not */ + uint8 ftm_params[3]; + uint8 data[]; /* schedule entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_t; + +#define NAN_RANGE_SETUP_ATTR_OFFSET_TBM_INFO (OFFSETOF(wifi_nan_ranging_setup_attr_t, data)) + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_report_attr_s { + uint8 id; /* 0x1C */ + uint16 len; /* length that follows */ + /* FTM report format in spec. + See definition in 9.4.2.22.18 in 802.11mc D5.0 + */ + uint8 entry_count; + uint8 data[]; /* Variable size range entry */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_report_attr_t; + +/* Ranging control flags */ +#define NAN_RNG_REPORT_REQUIRED 0x01 +#define NAN_RNG_FTM_PARAMS_PRESENT 0x02 +#define NAN_RNG_SCHED_ENTRY_PRESENT 0X04 + +/* Location info flags */ +#define NAN_RNG_LOCATION_FLAGS_LOCAL_CORD 0x1 +#define NAN_RNG_LOCATION_FLAGS_GEO_SPATIAL 0x2 +#define NAN_RNG_LOCATION_FLAGS_CIVIC 0x4 +#define NAN_RNG_LOCATION_FLAGS_LAST_MVMT 0x8 + +/* Last movement mask and shift value */ +#define NAN_RNG_LOCATION_MASK_LAST_MVT_TSF 0x3FFFC000 +#define NAN_RNG_LOCATION_SHIFT_LAST_MVT_TSF 14 + +/* FTM params shift values */ +#define NAN_FTM_MAX_BURST_DUR_SHIFT 0 +#define NAN_FTM_MIN_FTM_DELTA_SHIFT 4 +#define NAN_FTM_NUM_FTM_SHIFT 10 +#define NAN_FTM_FORMAT_BW_SHIFT 15 + +/* FTM params mask */ +#define NAN_FTM_MAX_BURST_DUR_MASK 0x00000F +#define NAN_FTM_MIN_FTM_DELTA_MASK 0x00003F +#define NAN_FTM_NUM_FTM_MASK 0x00001F +#define NAN_FTM_FORMAT_BW_MASK 0x00003F + +#define FTM_PARAMS_BURSTTMO_FACTOR 250 + +/* set to value to uint32 */ +#define NAN_FTM_SET_BURST_DUR(ftm, dur) (ftm |= (((dur + 2) & NAN_FTM_MAX_BURST_DUR_MASK) <<\ + NAN_FTM_MAX_BURST_DUR_SHIFT)) +#define NAN_FTM_SET_FTM_DELTA(ftm, delta) (ftm |= (((delta/100) & NAN_FTM_MIN_FTM_DELTA_MASK) <<\ + NAN_FTM_MIN_FTM_DELTA_SHIFT)) +#define NAN_FTM_SET_NUM_FTM(ftm, delta) (ftm |= ((delta & NAN_FTM_NUM_FTM_MASK) <<\ + NAN_FTM_NUM_FTM_SHIFT)) +#define NAN_FTM_SET_FORMAT_BW(ftm, delta) (ftm |= ((delta & NAN_FTM_FORMAT_BW_MASK) <<\ + NAN_FTM_FORMAT_BW_SHIFT)) +/* set uint32 to attribute */ +#define NAN_FTM_PARAMS_UINT32_TO_ATTR(ftm_u32, ftm_attr) {ftm_attr[0] = ftm_u32 & 0xFF; \ + ftm_attr[1] = (ftm_u32 >> 8) & 0xFF; ftm_attr[2] = (ftm_u32 >> 16) & 0xFF;} + +/* get atrribute to uint32 */ +#define NAN_FTM_PARAMS_ATTR_TO_UINT32(ftm_p, ftm_u32) (ftm_u32 = ftm_p[0] | ftm_p[1] << 8 | \ + ftm_p[2] << 16) +/* get param values from uint32 */ +#define NAN_FTM_GET_BURST_DUR(ftm) (((ftm >> NAN_FTM_MAX_BURST_DUR_SHIFT) &\ + NAN_FTM_MAX_BURST_DUR_MASK)) +#define NAN_FTM_GET_BURST_DUR_USEC(_val) ((1 << ((_val)-2)) * FTM_PARAMS_BURSTTMO_FACTOR) +#define NAN_FTM_GET_FTM_DELTA(ftm) (((ftm >> NAN_FTM_MIN_FTM_DELTA_SHIFT) &\ + NAN_FTM_MIN_FTM_DELTA_MASK)*100) +#define NAN_FTM_GET_NUM_FTM(ftm) ((ftm >> NAN_FTM_NUM_FTM_SHIFT) &\ + NAN_FTM_NUM_FTM_MASK) +#define NAN_FTM_GET_FORMAT_BW(ftm) ((ftm >> NAN_FTM_FORMAT_BW_SHIFT) &\ + NAN_FTM_FORMAT_BW_MASK) + +#define NAN_CONN_CAPABILITY_WFD 0x0001 +#define NAN_CONN_CAPABILITY_WFDS 0x0002 +#define NAN_CONN_CAPABILITY_TDLS 0x0004 +#define NAN_CONN_CAPABILITY_INFRA 0x0008 +#define NAN_CONN_CAPABILITY_IBSS 0x0010 +#define NAN_CONN_CAPABILITY_MESH 0x0020 + +#define NAN_DEFAULT_MAP_ID 0 /* nan default map id */ +#define NAN_DEFAULT_MAP_CTRL 0 /* nan default map control */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_conn_cap_attr_s { + /* Attribute ID - 0x04. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + uint16 conn_cap_bmp; /* Connection capability bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_conn_cap_attr_t; + +/* NAN Element container Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_container_attr_s { + uint8 id; /* id - 0x20 */ + uint16 len; /* Total length of following IEs */ + uint8 map_id; /* map id */ + uint8 data[1]; /* Data pointing to one or more IEs */ +} BWL_POST_PACKED_STRUCT wifi_nan_container_attr_t; + +/* NAN 2.0 NAN avail attribute */ + +/* Availability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_attr_s { + uint8 id; /* id - 0x12 */ + uint16 len; /* total length */ + uint8 seqid; /* sequence id */ + uint16 ctrl; /* attribute control */ + uint8 entry[1]; /* availability entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_avail_attr_t; + +/* for processing/building time bitmap info in nan_avail_entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_time_bitmap_s { + uint16 ctrl; /* Time bitmap control */ + uint8 len; /* Time bitmap length */ + uint8 bitmap[]; /* Time bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_time_bitmap_t; + +/* Availability Entry format */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_attr_s { + uint16 len; /* Length */ + uint16 entry_cntrl; /* Entry Control */ + uint8 var[]; /* Time bitmap and channel entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_attr_t; + +/* FAC Channel Entry (section 10.7.19.1.5) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_chan_entry_s { + uint8 oper_class; /* Operating Class */ + uint16 chan_bitmap; /* Channel Bitmap */ + uint8 primary_chan_bmp; /* Primary Channel Bitmap */ + uint8 aux_chan[0]; /* Auxiliary Channel bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_chan_entry_t; + +/* Channel entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_s { + uint8 opclass; /* Operating class */ + uint16 chan_bitmap; /* Channel bitmap */ + uint8 prim_bitmap; /* Primary channel bitmap */ + uint16 aux_bitmap; /* Time bitmap length */ +} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_t; + +/* Type of Availability: committed */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL_MASK 0x1 +/* Type of Availability: potential */ +#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL_MASK 0x2 +/* Type of Availability: conditional */ +#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL_MASK 0x4 + +#define NAN_AVAIL_CTRL_MAP_ID_MASK 0x000F +#define NAN_AVAIL_CTRL_MAP_ID(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MAP_ID_MASK) +#define NAN_AVAIL_CTRL_COMM_CHANGED_MASK 0x0010 +#define NAN_AVAIL_CTRL_COMM_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_COMM_CHANGED_MASK) +#define NAN_AVAIL_CTRL_POTEN_CHANGED_MASK 0x0020 +#define NAN_AVAIL_CTRL_POTEN_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_POTEN_CHANGED_MASK) +#define NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK 0x0040 +#define NAN_AVAIL_CTRL_PUBLIC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK) +#define NAN_AVAIL_CTRL_NDC_CHANGED_MASK 0x0080 +#define NAN_AVAIL_CTRL_NDC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_NDC_CHANGED_MASK) +#define NAN_AVAIL_CTRL_MCAST_CHANGED_MASK 0x0100 +#define NAN_AVAIL_CTRL_MCAST_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHANGED_MASK) +#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK 0x0200 +#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK) +#define NAN_AVAIL_CTRL_CHANGED_FLAGS_MASK 0x03f0 + +#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK 0x07 +#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE(_flags) ((_flags) & NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK) +#define NAN_AVAIL_ENTRY_CTRL_USAGE_MASK 0x18 +#define NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT 3 +#define NAN_AVAIL_ENTRY_CTRL_USAGE(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_USAGE_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0xE0 +#define NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT 5 +#define NAN_AVAIL_ENTRY_CTRL_UTIL(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_UTIL_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK 0xF00 +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT 8 +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK 0x1000 +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT 12 +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT(_flags) (((_flags) & \ + NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK) >> NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_TIME_BITMAP_PRESENT 1 +#define NAN_AVAIL_ENTRY_CTRL_USAGE_PREFERENCE 0x3 + +#define NAN_TIME_BMAP_CTRL_BITDUR_MASK 0x07 +#define NAN_TIME_BMAP_CTRL_BITDUR(_flags) ((_flags) & NAN_TIME_BMAP_CTRL_BITDUR_MASK) +#define NAN_TIME_BMAP_CTRL_PERIOD_MASK 0x38 +#define NAN_TIME_BMAP_CTRL_PERIOD_SHIFT 3 +#define NAN_TIME_BMAP_CTRL_PERIOD(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_PERIOD_MASK) \ + >> NAN_TIME_BMAP_CTRL_PERIOD_SHIFT) +#define NAN_TIME_BMAP_CTRL_OFFSET_MASK 0x7FC0 +#define NAN_TIME_BMAP_CTRL_OFFSET_SHIFT 6 +#define NAN_TIME_BMAP_CTRL_OFFSET(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_OFFSET_MASK) \ + >> NAN_TIME_BMAP_CTRL_OFFSET_SHIFT) +#define NAN_TIME_BMAP_LEN(avail_entry) \ + (*(uint8 *)(((wifi_nan_avail_entry_attr_t *)avail_entry)->var + 2)) + +#define NAN_AVAIL_CHAN_LIST_HDR_LEN 1 +#define NAN_AVAIL_CHAN_LIST_TYPE_BAND 0x00 +#define NAN_AVAIL_CHAN_LIST_TYPE_CHANNEL 0x01 +#define NAN_AVAIL_CHAN_LIST_NON_CONTIG_BW 0x02 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK 0xF0 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT 4 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES(_ctrl) (((_ctrl) & NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK) \ + >> NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT) + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_list_s { + uint8 chan_info; + uint8 var[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_list_t; + +/* define for chan_info */ +#define NAN_CHAN_OP_CLASS_MASK 0x01 +#define NAN_CHAN_NON_CONT_BW_MASK 0x02 +#define NAN_CHAN_RSVD_MASK 0x03 +#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0 + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s { + uint8 band[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t; + +/* Type of Availability: committed */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL 0x1 +/* Type of Availability: potential */ +#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL 0x2 +/* Type of Availability: conditional */ +#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL 0x4 +/* Committed + Potential */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_POTEN \ + (NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL) +/* Conditional + Potential */ +#define NAN_ENTRY_CNTRL_TYPE_COND_POTEN \ + (NAN_ENTRY_CNTRL_TYPE_COND_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL) + +/* Type of Availability */ +#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_MASK 0x07 +#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_SHIFT 0 +/* Usage Preference */ +#define NAN_ENTRY_CNTRL_USAGE_PREF_MASK 0x18 +#define NAN_ENTRY_CNTRL_USAGE_PREF_SHIFT 3 +/* Utilization */ +#define NAN_ENTRY_CNTRL_UTIL_MASK 0x1E0 +#define NAN_ENTRY_CNTRL_UTIL_SHIFT 5 + +/* Time Bitmap Control field (section 5.7.18.2.3) */ + +/* Reserved */ +#define NAN_TIME_BMP_CNTRL_RSVD_MASK 0x01 +#define NAN_TIME_BMP_CNTRL_RSVD_SHIFT 0 +/* Bitmap Len */ +#define NAN_TIME_BMP_CNTRL_BMP_LEN_MASK 0x7E +#define NAN_TIME_BMP_CNTRL_BMP_LEN_SHIFT 1 +/* Bit Duration */ +#define NAN_TIME_BMP_CNTRL_BIT_DUR_MASK 0x380 +#define NAN_TIME_BMP_CNTRL_BIT_DUR_SHIFT 7 +/* Bitmap Len */ +#define NAN_TIME_BMP_CNTRL_PERIOD_MASK 0x1C00 +#define NAN_TIME_BMP_CNTRL_PERIOD_SHIFT 10 +/* Start Offset */ +#define NAN_TIME_BMP_CNTRL_START_OFFSET_MASK 0x3FE000 +#define NAN_TIME_BMP_CNTRL_START_OFFSET_SHIFT 13 +/* Reserved */ +#define NAN_TIME_BMP_CNTRL_RESERVED_MASK 0xC00000 +#define NAN_TIME_BMP_CNTRL_RESERVED_SHIFT 22 + +/* Time Bitmap Control field: Period */ +typedef enum +{ + NAN_TIME_BMP_CTRL_PERIOD_128TU = 1, + NAN_TIME_BMP_CTRL_PERIOD_256TU = 2, + NAN_TIME_BMP_CTRL_PERIOD_512TU = 3, + NAN_TIME_BMP_CTRL_PERIOD_1024TU = 4, + NAN_TIME_BMP_CTRL_PERIOD_2048U = 5, + NAN_TIME_BMP_CTRL_PERIOD_4096U = 6, + NAN_TIME_BMP_CTRL_PERIOD_8192U = 7 +} nan_time_bmp_ctrl_repeat_interval_t; + +enum +{ + NAN_TIME_BMP_BIT_DUR_16TU_IDX = 0, + NAN_TIME_BMP_BIT_DUR_32TU_IDX = 1, + NAN_TIME_BMP_BIT_DUR_64TU_IDX = 2, + NAN_TIME_BMP_BIT_DUR_128TU_IDX = 3 +}; + +enum +{ + NAN_TIME_BMP_BIT_DUR_IDX_0 = 16, + NAN_TIME_BMP_BIT_DUR_IDX_1 = 32, + NAN_TIME_BMP_BIT_DUR_IDX_2 = 64, + NAN_TIME_BMP_BIT_DUR_IDX_3 = 128 +}; + +enum +{ + NAN_TIME_BMP_CTRL_PERIOD_IDX_1 = 128, + NAN_TIME_BMP_CTRL_PERIOD_IDX_2 = 256, + NAN_TIME_BMP_CTRL_PERIOD_IDX_3 = 512, + NAN_TIME_BMP_CTRL_PERIOD_IDX_4 = 1024, + NAN_TIME_BMP_CTRL_PERIOD_IDX_5 = 2048, + NAN_TIME_BMP_CTRL_PERIOD_IDX_6 = 4096, + NAN_TIME_BMP_CTRL_PERIOD_IDX_7 = 8192 +}; + +/* Channel Entries List field */ + +/* Type */ +#define NAN_CHAN_ENTRY_TYPE_MASK 0x01 +#define NAN_CHAN_ENTRY_TYPE_SHIFT 0 +/* Channel Entry Length Indication */ +#define NAN_CHAN_ENTRY_LEN_IND_MASK 0x02 +#define NAN_CHAN_ENTRY_LEN_IND_SHIFT 1 +/* Reserved */ +#define NAN_CHAN_ENTRY_RESERVED_MASK 0x0C +#define NAN_CHAN_ENTRY_RESERVED_SHIFT 2 +/* Number of FAC Band or Channel Entries */ +#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_MASK 0xF0 +#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_SHIFT 4 + +#define NAN_CHAN_ENTRY_TYPE_BANDS 0 +#define NAN_CHAN_ENTRY_TYPE_OPCLASS_CHANS 1 + +#define NAN_CHAN_ENTRY_BW_LT_80MHZ 0 +#define NAN_CHAN_ENTRY_BW_EQ_160MHZ 1 + +/* + * NDL Attribute WFA Tech. Spec ver 1.0.r12 (section 10.7.19.2) + */ +#define NDL_ATTR_IM_MAP_ID_LEN 1 +#define NDL_ATTR_IM_TIME_BMP_CTRL_LEN 2 +#define NDL_ATTR_IM_TIME_BMP_LEN_LEN 1 + +/* + * NDL Control field - Table xx + */ +#define NDL_ATTR_CTRL_PEER_ID_PRESENT_MASK 0x01 +#define NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT 0 +#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_MASK 0x02 +#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT 1 +#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_MASK 0x04 +#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT 2 +#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_MASK 0x08 +#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT 3 +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_MASK 0x10 /* max idle period */ +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT 4 +#define NDL_ATTR_CTRL_NDL_TYPE_MASK 0x20 /* NDL type */ +#define NDL_ATTR_CTRL_NDL_TYPE_SHIFT 5 +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_MASK 0xC0 /* NDL Setup Reason */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_SHIFT 6 + +/* NDL setup Reason */ +#define NDL_ATTR_CTRL_NDL_TYPE_S_NDL 0x0 /* S-NDL */ +#define NDL_ATTR_CTRL_NDL_TYPE_P_NDL 0x1 /* P-NDL */ + +/* NDL setup Reason */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_NDP_RANG 0x0 /* NDP or Ranging */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_FSD_GAS 0x1 /* FSD using GAS */ + +#define NAN_NDL_TYPE_MASK 0x0F +#define NDL_ATTR_TYPE_STATUS_REQUEST 0x00 +#define NDL_ATTR_TYPE_STATUS_RESPONSE 0x01 +#define NDL_ATTR_TYPE_STATUS_CONFIRM 0x02 +#define NDL_ATTR_TYPE_STATUS_CONTINUED 0x00 +#define NDL_ATTR_TYPE_STATUS_ACCEPTED 0x10 +#define NDL_ATTR_TYPE_STATUS_REJECTED 0x20 + +#define NAN_NDL_TYPE_CHECK(_ndl, x) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == (x)) +#define NAN_NDL_REQUEST(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_REQUEST) +#define NAN_NDL_RESPONSE(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_RESPONSE) +#define NAN_NDL_CONFIRM(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_CONFIRM) + +#define NAN_NDL_STATUS_SHIFT 4 +#define NAN_NDL_STATUS_MASK 0xF0 +#define NAN_NDL_CONT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_CONTINUED) +#define NAN_NDL_ACCEPT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_ACCEPTED) +#define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_REJECTED) +#define NAN_NDL_FRM_STATUS(_ndl) \ + (((_ndl)->type_status & NAN_NDL_STATUS_MASK) >> NAN_NDL_STATUS_SHIFT) + +#define NDL_ATTR_CTRL_NONE 0 +#define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_IMSCHED_PRESENT (1 << NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_NDC_PRESENT (1 << NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_NDL_QOS_PRESENT (1 << NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT (1 << NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT) + +#define NA_NDL_IS_IMMUT_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_IMSCHED_PRESENT) +#define NA_NDL_IS_PEER_ID_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_PEER_ID_PRESENT) +#define NA_NDL_IS_MAX_IDLE_PER_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT) + +#define NDL_ATTR_PEERID_LEN 1 +#define NDL_ATTR_MAX_IDLE_PERIOD_LEN 2 + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_attr_s { + uint8 id; /* NAN_ATTR_NAN_NDL = 0x17 */ + uint16 len; /* Length of the fields in the attribute */ + uint8 dialog_token; /* Identify req and resp */ + uint8 type_status; /* Bits[3-0] type subfield, Bits[7-4] status subfield */ + uint8 reason; /* Identifies reject reason */ + uint8 ndl_ctrl; /* NDL control field */ + uint8 var[]; /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndl_attr_t; + +/* + * NDL QoS Attribute WFA Tech. Spec ver r26 + */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_qos_attr_s { + uint8 id; /* NAN_ATTR_NAN_NDL_QOS = 24 */ + uint16 len; /* Length of the attribute field following */ + uint8 min_slots; /* Min. number of FAW slots needed per DW interval */ + uint16 max_latency; /* Max interval between non-cont FAW */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndl_qos_attr_t; + +/* no preference to min time slots */ +#define NAN_NDL_QOS_MIN_SLOT_NO_PREF 0 +/* no preference to no. of slots between two non-contiguous slots */ +#define NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF + +/* Device Capability Attribute */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_dev_cap_s { + uint8 id; /* 0x0F */ + uint16 len; /* Length */ + uint8 map_id; /* map id */ + uint16 commit_dw_info; /* Committed DW Info */ + uint8 bands_supported; /* Supported Bands */ + uint8 op_mode; /* Operation Mode */ + uint8 num_antennas; /* Bit 0-3 tx, 4-7 rx */ + uint16 chan_switch_time; /* Max channel switch time in us */ + uint8 capabilities; /* DFS Master, Extended key id etc */ +} BWL_POST_PACKED_STRUCT wifi_nan_dev_cap_t; + +/* map id related */ + +/* all maps */ +#define NAN_DEV_CAP_ALL_MAPS_FLAG_MASK 0x1 /* nan default map control */ +#define NAN_DEV_CAP_ALL_MAPS_FLAG_SHIFT 0 +/* map id */ +#define NAN_DEV_CAP_MAPID_MASK 0x1E +#define NAN_DEV_CAP_MAPID_SHIFT 1 + +/* Awake DW Info field format */ + +/* 2.4GHz DW */ +#define NAN_DEV_CAP_AWAKE_DW_2G_MASK 0x07 +/* 5GHz DW */ +#define NAN_DEV_CAP_AWAKE_DW_5G_MASK 0x38 +/* Reserved */ +#define NAN_DEV_CAP_AWAKE_DW_RSVD_MASK 0xC0 + +/* bit shift for dev cap */ +#define NAN_DEV_CAP_AWAKE_DW_2G_SHIFT 0 +#define NAN_DEV_CAP_AWAKE_DW_5G_SHIFT 3 + +/* Device Capability Attribute Format */ + +/* Committed DW Info field format */ +/* 2.4GHz DW */ +#define NAN_DEV_CAP_COMMIT_DW_2G_MASK 0x07 +#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_MASK 0x3C0 +/* 5GHz DW */ +#define NAN_DEV_CAP_COMMIT_DW_5G_MASK 0x38 +#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_MASK 0x3C00 +/* Reserved */ +#define NAN_DEV_CAP_COMMIT_DW_RSVD_MASK 0xC000 +/* Committed DW bit shift for dev cap */ +#define NAN_DEV_CAP_COMMIT_DW_2G_SHIFT 0 +#define NAN_DEV_CAP_COMMIT_DW_5G_SHIFT 3 +#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_SHIFT 6 +#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_SHIFT 10 +/* Operation Mode */ +#define NAN_DEV_CAP_OP_PHY_MODE_HT_ONLY 0x00 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT 0x01 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT_8080 0x02 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT_160 0x04 +#define NAN_DEV_CAP_OP_PAGING_NDL 0x08 + +#define NAN_DEV_CAP_OP_MODE_VHT_MASK 0x01 +#define NAN_DEV_CAP_OP_MODE_VHT_SHIFT 0 +#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK 0x02 +#define NAN_DEV_CAP_OP_MODE_VHT8080_SHIFT 1 +#define NAN_DEV_CAP_OP_MODE_VHT160_MASK 0x04 +#define NAN_DEV_CAP_OP_MODE_VHT160_SHIFT 2 +#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_MASK 0x08 +#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_SHIFT 3 + +#define NAN_DEV_CAP_RX_ANT_SHIFT 4 +#define NAN_DEV_CAP_TX_ANT_MASK 0x0F +#define NAN_DEV_CAP_RX_ANT_MASK 0xF0 +#define NAN_DEV_CAP_TX_ANT(_ant) ((_ant) & NAN_DEV_CAP_TX_ANT_MASK) +#define NAN_DEV_CAP_RX_ANT(_ant) (((_ant) & NAN_DEV_CAP_RX_ANT_MASK) \ + >> NAN_DEV_CAP_RX_ANT_SHIFT) + +/* Device capabilities */ + +/* DFS master capability */ +#define NAN_DEV_CAP_DFS_MASTER_MASK 0x01 +#define NAN_DEV_CAP_DFS_MASTER_SHIFT 0 +/* extended iv cap */ +#define NAN_DEV_CAP_EXT_KEYID_MASK 0x02 +#define NAN_DEV_CAP_EXT_KEYID_SHIFT 1 +/* NDPE attribute support */ +#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK 0x08 +#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT(_cap) ((_cap) & NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK) + +/* Band IDs */ +enum { + NAN_BAND_ID_TVWS = 0, + NAN_BAND_ID_SIG = 1, /* Sub 1 GHz */ + NAN_BAND_ID_2G = 2, /* 2.4 GHz */ + NAN_BAND_ID_3G = 3, /* 3.6 GHz */ + NAN_BAND_ID_5G = 4, /* 4.9 & 5 GHz */ + NAN_BAND_ID_60G = 5, /* 60 GHz */ + NAN_BAND_ID_6G = 6 /* 6 GHz (proprietary) */ +}; +typedef uint8 nan_band_id_t; + +/* NAN supported band in device capability */ +#define NAN_DEV_CAP_SUPPORTED_BANDS_2G (1 << NAN_BAND_ID_2G) +#define NAN_DEV_CAP_SUPPORTED_BANDS_5G (1 << NAN_BAND_ID_5G) + +/* + * Unaligned schedule attribute section 10.7.19.6 spec. ver r15 + */ +#define NAN_ULW_ATTR_CTRL_SCHED_ID_MASK 0x000F +#define NAN_ULW_ATTR_CTRL_SCHED_ID_SHIFT 0 +#define NAN_ULW_ATTR_CTRL_SEQ_ID_MASK 0xFF00 +#define NAN_ULW_ATTR_CTRL_SEQ_ID_SHIFT 8 + +#define NAN_ULW_OVWR_ALL_MASK 0x01 +#define NAN_ULW_OVWR_ALL_SHIFT 0 +#define NAN_ULW_OVWR_MAP_ID_MASK 0x1E +#define NAN_ULW_OVWR_MAP_ID_SHIFT 1 + +#define NAN_ULW_CTRL_TYPE_MASK 0x03 +#define NAN_ULW_CTRL_TYPE_SHIFT 0 +#define NAN_ULW_CTRL_TYPE(ctrl) (ctrl & NAN_ULW_CTRL_TYPE_MASK) +#define NAN_ULW_CTRL_CHAN_AVAIL_MASK 0x04 +#define NAN_ULW_CTRL_CHAN_AVAIL_SHIFT 2 +#define NAN_ULW_CTRL_CHAN_AVAIL(ctrl) ((ctrl & NAN_ULW_CTRL_CHAN_AVAIL_MASK) \ + >> NAN_ULW_CTRL_CHAN_AVAIL_SHIFT) +#define NAN_ULW_CTRL_RX_NSS_MASK 0x78 +#define NAN_ULW_CTRL_RX_NSS_SHIFT 3 + +#define NAN_ULW_CTRL_TYPE_BAND 0 +#define NAN_ULW_CTRL_TYPE_CHAN_NOAUX 1 +#define NAN_ULW_CTRL_TYPE_CHAN_AUX 2 + +#define NAN_ULW_CNT_DOWN_NO_EXPIRE 0xFF /* ULWs doen't end until next sched update */ +#define NAN_ULW_CNT_DOWN_CANCEL 0x0 /* cancel remaining ulws */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ulw_attr_s { + uint8 id; + uint16 len; + uint16 ctrl; + uint32 start; /* low 32 bits of tsf */ + uint32 dur; + uint32 period; + uint8 count_down; + uint8 overwrite; + /* + * ulw[0] == optional field ULW control when present. + * band ID or channel follows + */ + uint8 ulw_entry[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ulw_attr_t; + +/* NAN2 Management Frame (section 5.6) */ + +/* Public action frame for NAN2 */ +typedef BWL_PRE_PACKED_STRUCT struct nan2_pub_act_frame_s { + /* NAN_PUB_AF_CATEGORY 0x04 */ + uint8 category_id; + /* NAN_PUB_AF_ACTION 0x09 */ + uint8 action_field; + /* NAN_OUI 0x50-6F-9A */ + uint8 oui[DOT11_OUI_LEN]; + /* NAN_OUI_TYPE TBD */ + uint8 oui_type; + /* NAN_OUI_SUB_TYPE TBD */ + uint8 oui_sub_type; + /* One or more NAN Attributes follow */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT nan2_pub_act_frame_t; + +#define NAN2_PUB_ACT_FRM_SIZE (OFFSETOF(nan2_pub_act_frame_t, data)) + +/* NAN Action Frame Subtypes */ +/* Subtype-0 is Reserved */ +#define NAN_MGMT_FRM_SUBTYPE_RESERVED 0 +#define NAN_MGMT_FRM_SUBTYPE_INVALID 0 +/* NAN Ranging Request */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_REQ 1 +/* NAN Ranging Response */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_RESP 2 +/* NAN Ranging Termination */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_TERM 3 +/* NAN Ranging Report */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_RPT 4 +/* NDP Request */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_REQ 5 +/* NDP Response */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_RESP 6 +/* NDP Confirm */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_CONFIRM 7 +/* NDP Key Installment */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_KEY_INST 8 +/* NDP Termination */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_END 9 +/* Schedule Request */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_REQ 10 +/* Schedule Response */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_RESP 11 +/* Schedule Confirm */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_CONF 12 +/* Schedule Update */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13 + +/* Vendor specific NAN OOB AF subtype */ +#define NAN_MGMT_FRM_SUBTYPE_NAN_OOB_AF 0xDD + +#define NAN_SCHEDULE_AF(_naf_subtype) \ + ((_naf_subtype >= NAN_MGMT_FRM_SUBTYPE_SCHED_REQ) && \ + (_naf_subtype <= NAN_MGMT_FRM_SUBTYPE_SCHED_UPD)) + +/* Reason code defines */ +#define NAN_REASON_RESERVED 0x0 +#define NAN_REASON_UNSPECIFIED 0x1 +#define NAN_REASON_RESOURCE_LIMIT 0x2 +#define NAN_REASON_INVALID_PARAMS 0x3 +#define NAN_REASON_FTM_PARAM_INCAP 0x4 +#define NAN_REASON_NO_MOVEMENT 0x5 +#define NAN_REASON_INVALID_AVAIL 0x6 +#define NAN_REASON_IMMUT_UNACCEPT 0x7 +#define NAN_REASON_SEC_POLICY 0x8 +#define NAN_REASON_QOS_UNACCEPT 0x9 +#define NAN_REASON_NDP_REJECT 0xa +#define NAN_REASON_NDL_UNACCEPTABLE 0xb + +/* nan 2.0 qos (not attribute) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_qos_s { + uint8 tid; /* traffic identifier */ + uint16 pkt_size; /* service data pkt size */ + uint8 data_rate; /* mean data rate */ + uint8 svc_interval; /* max service interval */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndp_qos_t; + +/* NDP control bitmap defines */ +#define NAN_NDP_CTRL_CONFIRM_REQUIRED 0x01 +#define NAN_NDP_CTRL_SECURTIY_PRESENT 0x04 +#define NAN_NDP_CTRL_PUB_ID_PRESENT 0x08 +#define NAN_NDP_CTRL_RESP_NDI_PRESENT 0x10 +#define NAN_NDP_CTRL_SPEC_INFO_PRESENT 0x20 +#define NAN_NDP_CTRL_RESERVED 0xA0 + +/* Used for both NDP Attribute and NDPE Attribute, since the structures are identical */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s { + uint8 id; /* NDP: 0x10, NDPE: 0x29 */ + uint16 len; /* length */ + uint8 dialog_token; /* dialog token */ + uint8 type_status; /* bits 0-3 type, 4-7 status */ + uint8 reason; /* reason code */ + struct ether_addr init_ndi; /* ndp initiator's data interface address */ + uint8 ndp_id; /* ndp identifier (created by initiator */ + uint8 control; /* ndp control field */ + uint8 var[]; /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndp_attr_t; +/* NDP attribute type and status macros */ +#define NAN_NDP_TYPE_MASK 0x0F +#define NAN_NDP_TYPE_REQUEST 0x0 +#define NAN_NDP_TYPE_RESPONSE 0x1 +#define NAN_NDP_TYPE_CONFIRM 0x2 +#define NAN_NDP_TYPE_SECURITY 0x3 +#define NAN_NDP_TYPE_TERMINATE 0x4 +#define NAN_NDP_REQUEST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_REQUEST) +#define NAN_NDP_RESPONSE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_RESPONSE) +#define NAN_NDP_CONFIRM(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_CONFIRM) +#define NAN_NDP_SECURITY_INST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \ + NAN_NDP_TYPE_SECURITY) +#define NAN_NDP_TERMINATE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \ + NAN_NDP_TYPE_TERMINATE) +#define NAN_NDP_STATUS_SHIFT 4 +#define NAN_NDP_STATUS_MASK 0xF0 +#define NAN_NDP_STATUS_CONT (0 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_STATUS_ACCEPT (1 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_STATUS_REJECT (2 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_CONT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == NAN_NDP_STATUS_CONT) +#define NAN_NDP_ACCEPT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ + NAN_NDP_STATUS_ACCEPT) +#define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ + NAN_NDP_STATUS_REJECT) + +#define NAN_NDP_FRM_STATUS(_ndp) \ + (((_ndp)->type_status & NAN_NDP_STATUS_MASK) >> NAN_NDP_STATUS_SHIFT) + +/* NDP Setup Status */ +#define NAN_NDP_SETUP_STATUS_OK 1 +#define NAN_NDP_SETUP_STATUS_FAIL 0 +#define NAN_NDP_SETUP_STATUS_REJECT 2 + +/* NDPE TLV list */ +#define NDPE_TLV_TYPE_IPV6 0x00 +#define NDPE_TLV_TYPE_SVC_INFO 0x01 +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndpe_tlv_s { + uint8 type; /* Operating Class */ + uint16 length; /* Channel Bitmap */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ndpe_tlv_t; + +/* Rng setup attribute type and status macros */ +#define NAN_RNG_TYPE_MASK 0x0F +#define NAN_RNG_TYPE_REQUEST 0x0 +#define NAN_RNG_TYPE_RESPONSE 0x1 +#define NAN_RNG_TYPE_TERMINATE 0x2 + +#define NAN_RNG_STATUS_SHIFT 4 +#define NAN_RNG_STATUS_MASK 0xF0 +#define NAN_RNG_STATUS_ACCEPT (0 << NAN_RNG_STATUS_SHIFT) +#define NAN_RNG_STATUS_REJECT (1 << NAN_RNG_STATUS_SHIFT) + +#define NAN_RNG_ACCEPT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \ + NAN_RNG_STATUS_ACCEPT) +#define NAN_RNG_REJECT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \ + NAN_RNG_STATUS_REJECT) + +/* schedule entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sched_entry_s { + uint8 map_id; /* map id */ + uint16 tbmp_ctrl; /* time bitmap control */ + uint8 tbmp_len; /* time bitmap len */ + uint8 tbmp[]; /* time bitmap - Optional */ +} BWL_POST_PACKED_STRUCT wifi_nan_sched_entry_t; + +#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F +#define NAN_SCHED_ENTRY_MIN_SIZE OFFSETOF(wifi_nan_sched_entry_t, tbmp) +#define NAN_SCHED_ENTRY_SIZE(_entry) (NAN_SCHED_ENTRY_MIN_SIZE + (_entry)->tbmp_len) + +/* for dev cap, element container etc. */ +#define NAN_DEV_ELE_MAPID_CTRL_MASK 0x1 +#define NAN_DEV_ELE_MAPID_CTRL_SHIFT 0 +#define NAN_DEV_ELE_MAPID_MASK 0x1E +#define NAN_DEV_ELE_MAPID_SHIFT 1 + +#define NAN_DEV_ELE_MAPID_CTRL_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_CTRL_MASK; \ + (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_CTRL_SHIFT) & \ + NAN_DEV_ELE_MAPID_CTRL_MASK); \ + } while (0); + +#define NAN_DEV_ELE_MAPID_CTRL_GET(_mapid_field) \ + (((_mapid_field) & NAN_DEV_ELE_MAPID_CTRL_MASK) >> \ + NAN_DEV_ELE_MAPID_CTRL_SHIFT) + +#define NAN_DEV_ELE_MAPID_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_MASK; \ + (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_SHIFT) & \ + NAN_DEV_ELE_MAPID_MASK); \ + } while (0); + +#define NAN_DEV_ELE_MAPID_GET(_mapid_field) \ + (((_mapid_field) & NAN_DEV_ELE_MAPID_MASK) >> \ + NAN_DEV_ELE_MAPID_SHIFT) + +/* schedule entry map id handling */ +#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F +#define NAN_SCHED_ENTRY_MAPID_SHIFT 0 + +#define NAN_SCHED_ENTRY_MAPID_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_SCHED_ENTRY_MAPID_MASK; \ + (_mapid_field) |= ((value << NAN_SCHED_ENTRY_MAPID_SHIFT) & \ + NAN_SCHED_ENTRY_MAPID_MASK); \ + } while (0); + +#define NAN_SCHED_ENTRY_MAPID_GET(_mapid_field) \ + (((_mapid_field) & NAN_SCHED_ENTRY_MAPID_MASK) >> \ + NAN_SCHED_ENTRY_MAPID_SHIFT) + +/* NDC attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndc_attr_s { + uint8 id; + uint16 len; + uint8 ndc_id[NAN_DATA_NDC_ID_SIZE]; + uint8 attr_cntrl; + uint8 var[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ndc_attr_t; + +/* Attribute control subfield of NDC attr */ +/* Proposed NDC */ +#define NAN_NDC_ATTR_PROPOSED_NDC_MASK 0x1 +#define NAN_NDC_ATTR_PROPOSED_NDC_SHIFT 0 + +/* get & set */ +#define NAN_NDC_GET_PROPOSED_FLAG(_attr) \ + (((_attr)->attr_cntrl & NAN_NDC_ATTR_PROPOSED_NDC_MASK) >> \ + NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) +#define NAN_NDC_SET_PROPOSED_FLAG(_attr, value) \ + do {((_attr)->attr_cntrl &= ~NAN_NDC_ATTR_PROPOSED_NDC_MASK); \ + ((_attr)->attr_cntrl |= \ + (((value) << NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) & NAN_NDC_ATTR_PROPOSED_NDC_MASK)); \ + } while (0) + +/* Service descriptor extension attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_desc_ext_attr_s { + /* Attribute ID - 0x11 */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Instance id of associated service descriptor attribute */ + uint8 instance_id; + /* SDE control field */ + uint16 control; + /* range limit, svc upd indicator etc. */ + uint8 var[]; +} BWL_POST_PACKED_STRUCT wifi_nan_svc_desc_ext_attr_t; + +#define NAN_SDE_ATTR_MIN_LEN OFFSETOF(wifi_nan_svc_desc_ext_attr_t, var) +#define NAN_SDE_ATTR_RANGE_LEN 4 +#define NAN_SDE_ATTR_SUI_LEN 1 +#define NAN_SDE_ATTR_INFO_LEN_PARAM_LEN 2 +#define NAN_SDE_ATTR_RANGE_INGRESS_LEN 2 +#define NAN_SDE_ATTR_RANGE_EGRESS_LEN 2 +#define NAN_SDE_ATTR_CTRL_LEN 2 +/* max length of variable length field (matching filter, service response filter, + * or service info) in service descriptor attribute + */ +#define NAN_DISC_SDA_FIELD_MAX_LEN 255 + +/* SDEA control field bit definitions and access macros */ +#define NAN_SDE_CF_FSD_REQUIRED (1 << 0) +#define NAN_SDE_CF_FSD_GAS (1 << 1) +#define NAN_SDE_CF_DP_REQUIRED (1 << 2) +#define NAN_SDE_CF_DP_TYPE (1 << 3) +#define NAN_SDE_CF_MULTICAST_TYPE (1 << 4) +#define NAN_SDE_CF_QOS_REQUIRED (1 << 5) +#define NAN_SDE_CF_SECURITY_REQUIRED (1 << 6) +#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7) +#define NAN_SDE_CF_RANGE_PRESENT (1 << 8) +#define NAN_SDE_CF_SVC_UPD_IND_PRESENT (1 << 9) +/* Using Reserved Bits as per Spec */ +#define NAN_SDE_CF_LIFE_CNT_PUB_RX (1 << 15) +#define NAN_SDE_FSD_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_FSD_REQUIRED) +#define NAN_SDE_FSD_GAS(_sde) ((_sde)->control & NAN_SDE_CF_FSD_GAS) +#define NAN_SDE_DP_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_DP_REQUIRED) +#define NAN_SDE_DP_MULTICAST(_sde) ((_sde)->control & NAN_SDE_CF_DP_TYPE) +#define NAN_SDE_MULTICAST_M_TO_M(_sde) ((_sde)->control & NAN_SDE_CF_MULTICAST_TYPE) +#define NAN_SDE_QOS_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_QOS_REQUIRED) +#define NAN_SDE_SECURITY_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_SECURITY_REQUIRED) +#define NAN_SDE_RANGING_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_RANGING_REQUIRED) +#define NAN_SDE_RANGE_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_RANGE_PRESENT) +#define NAN_SDE_SVC_UPD_IND_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_SVC_UPD_IND_PRESENT) +#define NAN_SDE_LIFE_COUNT_FOR_PUB_RX(_sde) (_sde & NAN_SDE_CF_LIFE_CNT_PUB_RX) + +/* nan2 security */ + +/* + * Cipher suite information Attribute. + * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.2) + */ +#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_4 0 +#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_16 (1 << 0) + +/* enum security algo. +*/ +enum nan_sec_csid { + NAN_SEC_ALGO_NONE = 0, + NAN_SEC_ALGO_NCS_SK_CCM_128 = 1, /* CCMP 128 */ + NAN_SEC_ALGO_NCS_SK_GCM_256 = 2, /* GCMP 256 */ + NAN_SEC_ALGO_LAST = 3 +}; +typedef int8 nan_sec_csid_e; + +/* nan2 cipher suite attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_field_s { + uint8 cipher_suite_id; + uint8 inst_id; /* Instance Id */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_field_t; + +/* nan2 cipher suite information attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_info_attr_s { + uint8 attr_id; /* 0x22 - NAN_ATTR_CIPHER_SUITE_INFO */ + uint16 len; + uint8 capabilities; + uint8 var[]; /* cipher suite list */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_info_attr_t; + +/* + * Security context identifier attribute + * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.4) + */ + +#define NAN_SEC_CTX_ID_TYPE_PMKID (1 << 0) + +/* nan2 security context identifier attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_field_s { + uint16 sec_ctx_id_type_len; /* length of security ctx identifier */ + uint8 sec_ctx_id_type; + uint8 inst_id; /* Instance Id */ + uint8 var[]; /* security ctx identifier */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_field_t; + +/* nan2 security context identifier info attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_info_attr_s { + uint8 attr_id; /* 0x23 - NAN_ATTR_SEC_CTX_ID_INFO */ + uint16 len; + uint8 var[]; /* security context identifier list */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_info_attr_t; + +/* + * Nan shared key descriptor attribute + * WFA Tech. Spec ver 23 + */ + +#define NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN 8 +#define NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN 32 + +/* nan shared key descriptor attr field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ncssk_key_desc_attr_s { + uint8 attr_id; /* 0x24 - NAN_ATTR_SHARED_KEY_DESC */ + uint16 len; + uint8 inst_id; /* Publish service instance ID */ + uint8 desc_type; + uint16 key_info; + uint16 key_len; + uint8 key_replay_cntr[NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN]; + uint8 key_nonce[NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN]; + uint8 reserved[32]; /* EAPOL IV + Key RSC + Rsvd fields in EAPOL Key */ + uint8 mic[]; /* mic + key data len + key data */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ncssk_key_desc_attr_t; + +/* Key Info fields */ +#define NAN_SEC_NCSSK_DESC_MASK 0x7 +#define NAN_SEC_NCSSK_DESC_SHIFT 0 +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK 0x8 +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT 3 +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK 0x40 +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT 6 +#define NAN_SEC_NCSSK_DESC_KEY_ACK_MASK 0x80 +#define NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT 7 +#define NAN_SEC_NCSSK_DESC_KEY_MIC_MASK 0x100 +#define NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT 8 +#define NAN_SEC_NCSSK_DESC_KEY_SEC_MASK 0x200 +#define NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT 9 +#define NAN_SEC_NCSSK_DESC_KEY_ERR_MASK 0x400 +#define NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT 10 +#define NAN_SEC_NCSSK_DESC_KEY_REQ_MASK 0x800 +#define NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT 11 +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK 0x1000 +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT 12 +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK 0x2000 +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT 13 + +/* Key Info get & set macros */ +#define NAN_SEC_NCSSK_KEY_DESC_VER_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_MASK) >> NAN_SEC_NCSSK_DESC_SHIFT) +#define NAN_SEC_NCSSK_KEY_DESC_VER_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK) >> NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ACK_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ACK_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ACK_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ACK_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ACK_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_MIC_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_MIC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_MIC_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_MIC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_MIC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_SEC_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SEC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_SEC_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SEC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_SEC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ERR_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ERR_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ERR_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ERR_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ERR_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_REQ_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_REQ_MASK) >> NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_REQ_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_REQ_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_REQ_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK);} while (0) + +#define NAN_SEC_NCSSK_IEEE80211_KDESC_TYPE 2 /* IEEE 802.11 Key Descriptor Type */ +#define NAN_SEC_NCSSK_KEY_DESC_VER 0 /* NCSSK-128/256 */ +#define NAN_SEC_NCSSK_KEY_TYPE_PAIRWISE 1 /* Pairwise */ +#define NAN_SEC_NCSSK_LIFETIME_KDE 7 /* Lifetime KDE type */ + +/* TODO include MTK related attributes */ + +/* NAN Multicast service group(NMSG) definitions */ +/* Length of NMSG_ID -- (NDI * 2^16 + pub_id * 2^8 + Random_factor) */ +#define NAN_NMSG_ID_LEN 8 + +#define NAN_NMSG_TYPE_MASK 0x0F +#define NMSG_ATTR_TYPE_STATUS_REQUEST 0x00 +#define NMSG_ATTR_TYPE_STATUS_RESPONSE 0x01 +#define NMSG_ATTR_TYPE_STATUS_CONFIRM 0x02 +#define NMSG_ATTR_TYPE_STATUS_SEC_INSTALL 0x03 +#define NMSG_ATTR_TYPE_STATUS_TERMINATE 0x04 +#define NMSG_ATTR_TYPE_STATUS_IMPLICIT_ENROL 0x05 + +#define NMSG_ATTR_TYPE_STATUS_CONTINUED 0x00 +#define NMSG_ATTR_TYPE_STATUS_ACCEPTED 0x10 +#define NMSG_ATTR_TYPE_STATUS_REJECTED 0x20 + +#define NMSG_CTRL_PUB_ID_PRESENT 0x0001 +#define NMSG_CTRL_NMSG_ID_PRESENT 0x0002 +#define NMSG_CTRL_SECURITY_PRESENT 0x0004 +#define NMSG_CTRL_MANY_TO_MANY_PRESENT 0x0008 +#define NMSG_CTRL_SVC_INFO_PRESENT 0x0010 + +/* NMSG attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_nmsg_attr_s { + uint8 id; /* Attribute ID - 0x11 */ + uint16 len; /* Length including pubid, NMSGID and svc info */ + uint8 dialog_token; + uint8 type_status; /* Type and Status field byte */ + uint8 reason_code; + uint8 mc_id; /* Multicast id similar to NDPID */ + uint8 nmsg_ctrl; /* NMSG control field */ + /* Optional publish id, NMSGID and svc info are included in var[] */ + uint8 var[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_nmsg_attr_t; + +#define NMSG_ATTR_MCAST_SCHED_MAP_ID_MASK 0x1E +#define NMSG_ATTR_MCAST_SCHED_MAP_ID_SHIFT 1 +#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_MASK 0x20 +#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_SHIFT 5 + +/* NAN Multicast Schedule atribute structure */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_mcast_sched_attr_s { + uint8 id; /* 0x16 */ + uint16 len; + uint8 nmsg_id[NAN_NMSG_ID_LEN]; + uint8 attr_cntrl; + uint8 sched_own[ETHER_ADDR_LEN]; + uint8 var[]; /* multicast sched entry list (schedule_entry_list) */ +} BWL_POST_PACKED_STRUCT wifi_nan_mcast_sched_attr_t; + +/* FAC Channel Entry (section 10.7.19.1.5) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_fac_chan_entry_s { + uint8 oper_class; /* Operating Class */ + uint16 chan_bitmap; /* Channel Bitmap */ + uint8 primary_chan_bmp; /* Primary Channel Bitmap */ + uint16 aux_chan; /* Auxiliary Channel bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_fac_chan_entry_t; + +/* TODO move this from nan.h */ +#define NAN_ALL_NAN_MGMT_FRAMES (NAN_FRM_SCHED_AF | \ + NAN_FRM_NDP_AF | NAN_FRM_NDL_AF | \ + NAN_FRM_DISC_BCN | NAN_FRM_SYNC_BCN | \ + NAN_FRM_SVC_DISC | NAN_FRM_RNG_REQ_AF | \ + NAN_FRM_RNG_RESP_AF | NAN_FRM_RNG_REPORT_AF | \ + NAN_FRM_RNG_TERM_AF) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NAN_H_ */ diff --git a/bcmdhd.101.10.361.x/include/nci.h b/bcmdhd.101.10.361.x/include/nci.h new file mode 100755 index 0000000..188016a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/nci.h @@ -0,0 +1,96 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the BOOKER NCI (non coherent interconnect) based Broadcom chips. + * + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + * + */ + +#ifndef _NCI_H +#define _NCI_H + +#include + +#ifdef SOCI_NCI_BUS +void nci_uninit(void *nci); +uint32 nci_scan(si_t *sih); +void nci_dump_erom(void *nci); +void* nci_init(si_t *sih, chipcregs_t *cc, uint bustype); +volatile void *nci_setcore(si_t *sih, uint coreid, uint coreunit); +volatile void *nci_setcoreidx(si_t *sih, uint coreidx); +uint nci_findcoreidx(const si_t *sih, uint coreid, uint coreunit); +volatile uint32 *nci_corereg_addr(si_t *sih, uint coreidx, uint regoff); +uint nci_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +uint nci_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +uint nci_corerev_minor(const si_t *sih); +uint nci_corerev(const si_t *sih); +uint nci_corevendor(const si_t *sih); +uint nci_get_wrap_reg(const si_t *sih, uint32 offset, uint32 mask, uint32 val); +void nci_core_reset(const si_t *sih, uint32 bits, uint32 resetbits); +void nci_core_disable(const si_t *sih, uint32 bits); +bool nci_iscoreup(const si_t *sih); +uint32 nci_coreid(const si_t *sih, uint coreidx); +uint nci_numcoreunits(const si_t *sih, uint coreid); +uint32 nci_addr_space(const si_t *sih, uint spidx, uint baidx); +uint32 nci_addr_space_size(const si_t *sih, uint spidx, uint baidx); +bool nci_iscoreup(const si_t *sih); +uint nci_intflag(si_t *sih); +uint nci_flag(si_t *sih); +uint nci_flag_alt(const si_t *sih); +void nci_setint(const si_t *sih, int siflag); +uint32 nci_oobr_baseaddr(const si_t *sih, bool second); +uint nci_coreunit(const si_t *sih); +uint nci_corelist(const si_t *sih, uint coreid[]); +int nci_numaddrspaces(const si_t *sih); +uint32 nci_addrspace(const si_t *sih, uint spidx, uint baidx); +uint32 nci_addrspacesize(const si_t *sih, uint spidx, uint baidx); +void nci_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size); +uint32 nci_core_cflags(const si_t *sih, uint32 mask, uint32 val); +void nci_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val); +uint32 nci_core_sflags(const si_t *sih, uint32 mask, uint32 val); +uint nci_wrapperreg(const si_t *sih, uint32 offset, uint32 mask, uint32 val); +void nci_invalidate_second_bar0win(si_t *sih); +int nci_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read); +int nci_backplane_access_64(si_t *sih, uint addr, uint size, uint64 *val, bool read); +uint nci_num_slaveports(const si_t *sih, uint coreid); +#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP) +void nci_dumpregs(const si_t *sih, struct bcmstrbuf *b); +#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */ +#ifdef BCMDBG +void nci_view(si_t *sih, bool verbose); +void nci_viewall(si_t *sih, bool verbose); +#endif /* BCMDBG */ +uint32 nci_get_nth_wrapper(const si_t *sih, int32 wrap_pos); +uint32 nci_get_axi_addr(const si_t *sih, uint32 *size); +uint32* nci_wrapper_dump_binary_one(const si_info_t *sii, uint32 *p32, uint32 wrap_ba); +uint32 nci_wrapper_dump_binary(const si_t *sih, uchar *p); +uint32 nci_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, + uint32 *core, uint32 *ba, uchar *p); +bool nci_check_enable_backplane_log(const si_t *sih); +uint32 nci_get_core_baaddr(const si_t *sih, uint32 *size, int32 baidx); +uint32 nci_clear_backplane_to(si_t *sih); +uint32 nci_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap); +bool nci_ignore_errlog(const si_info_t *sii, const aidmp_t *ai, + uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts); +void nci_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core, uint32 *lo, + uint32 *hi, uint32 *id); +uint32 nci_get_axi_timeout_reg(void); +uint32 nci_findcoreidx_by_axiid(const si_t *sih, uint32 axiid); +uint32* nci_wrapper_dump_binary_one(const si_info_t *sii, uint32 *p32, uint32 wrap_ba); +uint32 nci_wrapper_dump_binary(const si_t *sih, uchar *p); +uint32 nci_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, + uint32 *core, uint32 *ba, uchar *p); +bool nci_check_enable_backplane_log(const si_t *sih); +uint32 ai_wrapper_dump_buf_size(const si_t *sih); +uint32 nci_wrapper_dump_buf_size(const si_t *sih); +#endif /* SOCI_NCI_BUS */ +#endif /* _NCI_H */ diff --git a/bcmdhd.101.10.361.x/include/osl.h b/bcmdhd.101.10.361.x/include/osl.h new file mode 100755 index 0000000..7ea182e --- /dev/null +++ b/bcmdhd.101.10.361.x/include/osl.h @@ -0,0 +1,482 @@ +/* + * OS Abstraction Layer + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _osl_h_ +#define _osl_h_ + +#include + +enum { + TAIL_BYTES_TYPE_FCS = 1, + TAIL_BYTES_TYPE_ICV = 2, + TAIL_BYTES_TYPE_MIC = 3 +}; + +#ifdef DHD_EFI +#define OSL_PKTTAG_SZ 40 /* Size of PktTag */ +#elif defined(MACOSX) +#define OSL_PKTTAG_SZ 56 +#elif defined(__linux__) +#define OSL_PKTTAG_SZ 48 /* standard linux pkttag size is 48 bytes */ +#else +#ifndef OSL_PKTTAG_SZ +#define OSL_PKTTAG_SZ 32 /* Size of PktTag */ +#endif /* !OSL_PKTTAG_SZ */ +#endif /* DHD_EFI */ + +/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */ +typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); + +/* Drivers use REGOPSSET() to register register read/write funcitons */ +typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size); +typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size); + +#if defined(EFI) +#include +#elif defined(WL_UNITTEST) +#include +#elif defined(__linux__) +#include +#include +#elif defined(NDIS) +#include +#elif defined(_RTE_) +#include +#include +#elif defined(MACOSX) +#include +#else +#error "Unsupported OSL requested" +#endif /* defined(DOS) */ + +#ifndef PKTDBG_TRACE +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#endif + +#ifndef BCM_UPTIME_PROFILE +#define OSL_GETCYCLES_PROF(x) +#endif + +/* -------------------------------------------------------------------------- +** Register manipulation macros. +*/ + +#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) + +#ifndef AND_REG +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#endif /* !AND_REG */ + +#ifndef OR_REG +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +#endif /* !OR_REG */ + +#if !defined(OSL_SYSUPTIME) +#define OSL_SYSUPTIME() (0) +#define OSL_SYSUPTIME_NOT_DEFINED 1 +#endif /* !defined(OSL_SYSUPTIME) */ + +#if !defined(OSL_SYSUPTIME_US) +#define OSL_SYSUPTIME_US() (0) +#define OSL_SYSUPTIME_US_NOT_DEFINED 1 +#endif /* !defined(OSL_SYSUPTIME) */ + +#if defined(OSL_SYSUPTIME_NOT_DEFINED) && defined(OSL_SYSUPTIME_US_NOT_DEFINED) +#define OSL_SYSUPTIME_SUPPORT FALSE +#else +#define OSL_SYSUPTIME_SUPPORT TRUE +#endif /* OSL_SYSUPTIME */ + +#ifndef OSL_GET_LOCALTIME +#define OSL_GET_LOCALTIME(sec, usec) \ + do { \ + BCM_REFERENCE(sec); \ + BCM_REFERENCE(usec); \ + } while (0) +#endif /* OSL_GET_LOCALTIME */ + +#ifndef OSL_LOCALTIME_NS +#define OSL_LOCALTIME_NS() (OSL_SYSUPTIME_US() * NSEC_PER_USEC) +#endif /* OSL_LOCALTIME_NS */ + +#ifndef OSL_SYSTZTIME_US +#define OSL_SYSTZTIME_US() OSL_SYSUPTIME_US() +#endif /* OSL_GET_SYSTZTIME */ + +#if !defined(OSL_CPU_COUNTS_PER_US) +#define OSL_CPU_COUNTS_PER_US() (0) +#define OSL_CPU_COUNTS_PER_US_NOT_DEFINED 1 +#endif /* !defined(OSL_CPU_COUNTS_PER_US) */ + +#ifndef OSL_SYS_HALT +#ifdef __COVERITY__ +/* + * For Coverity builds, provide a definition that allows Coverity + * to model the lack of return. This avoids Coverity False Positive + * defects associated with data inconsistency being detected after + * we otherwise would have halted. + */ +#define OSL_SYS_HALT() __coverity_panic__() +#else /* __COVERITY__ */ +#define OSL_SYS_HALT() do {} while (0) +#endif /* __COVERITY__ */ +#endif /* OSL_SYS_HALT */ + +#ifndef DMB +#define DMB() do {} while (0) +#endif /* DMB */ + +#ifndef OSL_MEM_AVAIL +#define OSL_MEM_AVAIL() (0xffffffff) +#endif + +#ifndef OSL_OBFUSCATE_BUF +#if defined (_RTE_) +#define OSL_OBFUSCATE_BUF(x) osl_obfuscate_ptr(x) +#else +#define OSL_OBFUSCATE_BUF(x) (x) +#endif /* _RTE_ */ +#endif /* OSL_OBFUSCATE_BUF */ + +#ifndef OSL_GET_HCAPISTIMESYNC +#if defined (_RTE_) +#define OSL_GET_HCAPISTIMESYNC() osl_get_hcapistimesync() +#else +#define OSL_GET_HCAPISTIMESYNC() +#endif /* _RTE_ */ +#endif /* OSL_GET_HCAPISTIMESYNC */ + +#ifndef OSL_GET_HCAPISPKTTXS +#if defined (_RTE_) +#define OSL_GET_HCAPISPKTTXS() osl_get_hcapispkttxs() +#else +#define OSL_GET_HCAPISPKTTXS() +#endif /* _RTE_ */ +#endif /* OSL_GET_HCAPISPKTTXS */ + +#if !defined(PKTC_DONGLE) +#define PKTCGETATTR(skb) (0) +#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb) +#define PKTCCLRATTR(skb) BCM_REFERENCE(skb) +#define PKTCCNT(skb) (1) +#define PKTCLEN(skb) PKTLEN(NULL, skb) +#define PKTCGETFLAGS(skb) (0) +#define PKTCSETFLAGS(skb, f) BCM_REFERENCE(skb) +#define PKTCCLRFLAGS(skb) BCM_REFERENCE(skb) +#define PKTCFLAGS(skb) (0) +#define PKTCSETCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCINCRCNT(skb) BCM_REFERENCE(skb) +#define PKTCADDCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCSETLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCADDLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCSETFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCCLRFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCLINK(skb) NULL +#define PKTSETCLINK(skb, x) BCM_REFERENCE(skb) +#define FOREACH_CHAINED_PKT(skb, nskb) \ + for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb)) +#define PKTCFREE PKTFREE +#define PKTCENQTAIL(h, t, p) \ +do { \ + if ((t) == NULL) { \ + (h) = (t) = (p); \ + } \ +} while (0) +#endif /* !PKTC_DONGLE */ + +#ifndef PKTSETCHAINED +#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh) +#endif +#ifndef PKTCLRCHAINED +#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh) +#endif +#ifndef PKTISCHAINED +#define PKTISCHAINED(skb) FALSE +#endif + +#ifndef PKTGETPROFILEIDX +#define PKTGETPROFILEIDX(p) (-1) +#endif + +#ifndef PKTCLRPROFILEIDX +#define PKTCLRPROFILEIDX(p) +#endif + +#ifndef PKTSETPROFILEIDX +#define PKTSETPROFILEIDX(p, idx) BCM_REFERENCE(idx) +#endif + +#ifndef _RTE_ +/* Lbuf with fraglist */ +#ifndef PKTFRAGPKTID +#define PKTFRAGPKTID(osh, lb) (0) +#endif +#ifndef PKTSETFRAGPKTID +#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGTOTNUM +#define PKTFRAGTOTNUM(osh, lb) (0) +#endif +#ifndef PKTSETFRAGTOTNUM +#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGTOTLEN +#define PKTFRAGTOTLEN(osh, lb) (0) +#endif +#ifndef PKTSETFRAGTOTLEN +#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh) +#endif +#ifndef PKTIFINDEX +#define PKTIFINDEX(osh, lb) (0) +#endif +#ifndef PKTSETIFINDEX +#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh) +#endif +#ifndef PKTGETLF +#define PKTGETLF(osh, len, send, lbuf_type) (0) +#endif + +/* in rx path, reuse totlen as used len */ +#ifndef PKTFRAGUSEDLEN +#define PKTFRAGUSEDLEN(osh, lb) (0) +#endif +#ifndef PKTSETFRAGUSEDLEN +#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGLEN +#define PKTFRAGLEN(osh, lb, ix) (0) +#endif +#ifndef PKTSETFRAGLEN +#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGDATA_LO +#define PKTFRAGDATA_LO(osh, lb, ix) (0) +#endif +#ifndef PKTSETFRAGDATA_LO +#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGDATA_HI +#define PKTFRAGDATA_HI(osh, lb, ix) (0) +#endif +#ifndef PKTSETFRAGDATA_HI +#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh) +#endif + +#ifndef PKTFRAGMOVE +#define PKTFRAGMOVE(osh, dst, src) (BCM_REFERENCE(osh), BCM_REFERENCE(dst), BCM_REFERENCE(src)) +#endif + +/* RX FRAG */ +#ifndef PKTISRXFRAG +#define PKTISRXFRAG(osh, lb) (0) +#endif +#ifndef PKTSETRXFRAG +#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETRXFRAG +#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif + +/* TX FRAG */ +#ifndef PKTISTXFRAG +#define PKTISTXFRAG(osh, lb) (0) +#endif +#ifndef PKTSETTXFRAG +#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif + +/* TX ALFRAG */ +#ifndef PKTISTXALFRAG +#define PKTISTXALFRAG(osh, lb) (0) +#endif +#ifndef PKTSETTXALFRAG +#define PKTSETTXALFRAG(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETTXALFRAG +#define PKTRESETTXALFRAG(osh, lb) BCM_REFERENCE(osh) +#endif + +#ifndef PKTNUMMPDUS +#define PKTNUMMPDUS(osh, lb) (1) +#endif +#ifndef PKTNUMPKTS +#define PKTNUMPKTS(osh, lb) (1) +#endif + +#ifndef PKTISHWCSO +#define PKTISHWCSO(osh, lb) (FALSE) +#endif + +#ifndef PKTISSUBMSDUTOEHDR +#define PKTISSUBMSDUTOEHDR(osh, lb) (FALSE) +#endif + +#ifndef PKT_IS_HOST_SFHLLC +#define PKT_IS_HOST_SFHLLC(osh, lb) (FALSE) +#endif + +#ifndef PKT_SET_HOST_SFHLLC +#define PKT_SET_HOST_SFHLLC(osh, lb) BCM_REFERENCE(osh) +#endif + +#ifndef PKT_IS_HOST_SFHLLC_DONE +#define PKT_IS_HOST_SFHLLC_DONE(osh, lb) (FALSE) +#endif + +#ifndef PKT_SET_HOST_SFHLLC_DONE +#define PKT_SET_HOST_SFHLLC_DONE(osh, lb) BCM_REFERENCE(osh) +#endif + +/* Need Rx completion used for AMPDU reordering */ +#ifndef PKTNEEDRXCPL +#define PKTNEEDRXCPL(osh, lb) (TRUE) +#endif +#ifndef PKTSETNORXCPL +#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETNORXCPL +#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTISFRAG +#define PKTISFRAG(osh, lb) (0) +#endif +#ifndef PKTFRAGISCHAINED +#define PKTFRAGISCHAINED(osh, i) (0) +#endif +#ifndef PKTISHDRCONVTD +#define PKTISHDRCONVTD(osh, lb) (0) +#endif + +/* Forwarded pkt indication */ +#ifndef PKTISFRWDPKT +#define PKTISFRWDPKT(osh, lb) 0 +#endif +#ifndef PKTSETFRWDPKT +#define PKTSETFRWDPKT(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETFRWDPKT +#define PKTRESETFRWDPKT(osh, lb) BCM_REFERENCE(osh) +#endif + +/* PKT consumed for totlen calculation */ +#ifndef PKTISUSEDTOTLEN +#define PKTISUSEDTOTLEN(osh, lb) 0 +#endif +#ifndef PKTSETUSEDTOTLEN +#define PKTSETUSEDTOTLEN(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETUSEDTOTLEN +#define PKTRESETUSEDTOTLEN(osh, lb) BCM_REFERENCE(osh) +#endif + +/* UDR Packet Indication */ +#ifndef PKTISUDR +#define PKTISUDR(osh, lb) 0 +#endif + +#ifndef PKTSETUDR +#define PKTSETUDR(osh, lb) BCM_REFERENCE(osh) +#endif + +#ifndef PKTSETUDR +#define PKTRESETUDR(osh, lb) BCM_REFERENCE(osh) +#endif +#endif /* _RTE_ */ + +#if !(defined(__linux__)) +#define PKTLIST_INIT(x) BCM_REFERENCE(x) +#define PKTLIST_ENQ(x, y) BCM_REFERENCE(x) +#define PKTLIST_DEQ(x) BCM_REFERENCE(x) +#define PKTLIST_UNLINK(x, y) BCM_REFERENCE(x) +#define PKTLIST_FINI(x) BCM_REFERENCE(x) +#endif + +#ifndef ROMMABLE_ASSERT +#define ROMMABLE_ASSERT(exp) ASSERT(exp) +#endif /* ROMMABLE_ASSERT */ + +#ifndef MALLOC_NOPERSIST + #define MALLOC_NOPERSIST MALLOC +#endif /* !MALLOC_NOPERSIST */ + +#ifndef MALLOC_PERSIST + #define MALLOC_PERSIST MALLOC +#endif /* !MALLOC_PERSIST */ + +#ifndef MALLOC_RA + #define MALLOC_RA(osh, size, callsite) MALLOCZ(osh, size) +#endif /* !MALLOC_RA */ + +#ifndef MALLOC_PERSIST_ATTACH + #define MALLOC_PERSIST_ATTACH MALLOC +#endif /* !MALLOC_PERSIST_ATTACH */ + +#ifndef MALLOCZ_PERSIST_ATTACH + #define MALLOCZ_PERSIST_ATTACH MALLOCZ +#endif /* !MALLOCZ_PERSIST_ATTACH */ + +#ifndef MALLOCZ_NOPERSIST + #define MALLOCZ_NOPERSIST MALLOCZ +#endif /* !MALLOCZ_NOPERSIST */ + +#ifndef MALLOCZ_PERSIST + #define MALLOCZ_PERSIST MALLOCZ +#endif /* !MALLOCZ_PERSIST */ + +#ifndef MFREE_PERSIST + #define MFREE_PERSIST MFREE +#endif /* !MFREE_PERSIST */ + +#ifndef MALLOC_SET_NOPERSIST + #define MALLOC_SET_NOPERSIST(osh) do { } while (0) +#endif /* !MALLOC_SET_NOPERSIST */ + +#ifndef MALLOC_CLEAR_NOPERSIST + #define MALLOC_CLEAR_NOPERSIST(osh) do { } while (0) +#endif /* !MALLOC_CLEAR_NOPERSIST */ + +#if defined(OSL_MEMCHECK) +#define MEMCHECK(f, l) osl_memcheck(f, l) +#else +#define MEMCHECK(f, l) +#endif /* OSL_MEMCHECK */ + +#ifndef BCMDBGPERF +#define PERF_TRACE_START(id) do {} while (0) +#define PERF_TRACE_END(id) do {} while (0) +#define PERF_TRACE_END2(id, mycounters) do {} while (0) +#define PERF_TRACE_END3(id, mycounters, coreunit) do {} while (0) +#define UPDATE_PERF_TRACE_COUNTER(counter, val) do {} while (0) +#define ADD_PERF_TRACE_COUNTER(counter, val) do {} while (0) +#endif /* OSL_MEMCHECK */ + +/* Virtual/physical address translation. */ +#if !defined(OSL_VIRT_TO_PHYS_ADDR) + #define OSL_VIRT_TO_PHYS_ADDR(va) ((void*)(uintptr)(va)) +#endif + +#if !defined(OSL_PHYS_TO_VIRT_ADDR) + #define OSL_PHYS_TO_VIRT_ADDR(pa) ((void*)(uintptr)(pa)) +#endif + +#endif /* _osl_h_ */ diff --git a/bcmdhd.101.10.361.x/include/osl_decl.h b/bcmdhd.101.10.361.x/include/osl_decl.h new file mode 100755 index 0000000..a86805a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/osl_decl.h @@ -0,0 +1,31 @@ +/* + * osl forward declarations + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _osl_decl_h_ +#define _osl_decl_h_ + +/* osl handle type forward declaration */ +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; +extern unsigned int lmtest; /* low memory test */ +#endif diff --git a/bcmdhd.101.10.361.x/include/osl_ext.h b/bcmdhd.101.10.361.x/include/osl_ext.h new file mode 100755 index 0000000..460bc3a --- /dev/null +++ b/bcmdhd.101.10.361.x/include/osl_ext.h @@ -0,0 +1,759 @@ +/* + * OS Abstraction Layer Extension - the APIs defined by the "extension" API + * are only supported by a subset of all operating systems. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _osl_ext_h_ +#define _osl_ext_h_ + +/* ---- Include Files ---------------------------------------------------- */ + +#if defined(THREADX) + #include +#else + #define OSL_EXT_DISABLED +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Constants and Types ---------------------------------------------- */ + +/* ----------------------------------------------------------------------- + * Generic OS types. + */ +typedef enum osl_ext_status_t +{ + OSL_EXT_SUCCESS, + OSL_EXT_ERROR, + OSL_EXT_TIMEOUT + +} osl_ext_status_t; + +#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status; + +#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1)) + +typedef unsigned int osl_ext_time_ms_t; +typedef unsigned int osl_ext_time_us_t; + +typedef unsigned int osl_ext_event_bits_t; + +typedef unsigned int osl_ext_interrupt_state_t; + +/* ----------------------------------------------------------------------- + * Timers. + */ +typedef enum +{ + /* One-shot timer. */ + OSL_EXT_TIMER_MODE_ONCE, + + /* Periodic timer. */ + OSL_EXT_TIMER_MODE_REPEAT + +} osl_ext_timer_mode_t; + +/* User registered callback and parameter to invoke when timer expires. */ +typedef void* osl_ext_timer_arg_t; +typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg); + +/* ----------------------------------------------------------------------- + * Tasks. + */ + +/* Task entry argument. */ +typedef void* osl_ext_task_arg_t; + +/* Task entry function. */ +typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg); + +/* Abstract task priority levels. */ +typedef enum +{ + OSL_EXT_TASK_IDLE_PRIORITY, + OSL_EXT_TASK_CPUUTIL_PRIORITY, + OSL_EXT_TASK_LOW_PRIORITY, + OSL_EXT_TASK_LOW_NORMAL_PRIORITY, + OSL_EXT_TASK_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGH_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGHEST_PRIORITY, + OSL_EXT_TASK_TIME_CRITICAL_PRIORITY, + + /* This must be last. */ + OSL_EXT_TASK_NUM_PRIORITES +} osl_ext_task_priority_t; + +#ifndef OSL_EXT_DISABLED + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +/* -------------------------------------------------------------------------- +** Semaphore +*/ + +/**************************************************************************** +* Function: osl_ext_sem_create +* +* Purpose: Creates a counting semaphore object, which can subsequently be +* used for thread notification. +* +* Parameters: name (in) Name to assign to the semaphore (must be unique). +* init_cnt (in) Initial count that the semaphore should have. +* sem (out) Newly created semaphore. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_delete +* +* Purpose: Destroys a previously created semaphore object. +* +* Parameters: sem (mod) Semaphore object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_give +* +* Purpose: Increments the count associated with the semaphore. This will +* cause one thread blocked on a take to wake up. +* +* Parameters: sem (mod) Semaphore object to give. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_take +* +* Purpose: Decrements the count associated with the semaphore. If the count +* is less than zero, then the calling task will become blocked until +* another thread does a give on the semaphore. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: sem (mod) Semaphore object to take. +* timeout_msec (in) Number of milliseconds to wait for the +* semaphore to enter a state where it can be +* taken. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec); + +/* -------------------------------------------------------------------------- +** Mutex +*/ + +/**************************************************************************** +* Function: osl_ext_mutex_create +* +* Purpose: Creates a mutex object, which can subsequently be used to control +* mutually exclusion of resources. +* +* Parameters: name (in) Name to assign to the mutex (must be unique). +* mutex (out) Mutex object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_delete +* +* Purpose: Destroys a previously created mutex object. +* +* Parameters: mutex (mod) Mutex object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_acquire +* +* Purpose: Acquires the indicated mutual exclusion object. If the object is +* currently acquired by another task, then this function will wait +* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT. +* +* Parameters: mutex (mod) Mutex object to acquire. +* timeout_msec (in) Number of milliseconds to wait for the mutex. +* +* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec); + +/**************************************************************************** +* Function: osl_ext_mutex_release +* +* Purpose: Releases the indicated mutual exclusion object. This makes it +* available for another task to acquire. +* +* Parameters: mutex (mod) Mutex object to release. +* +* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex); + +/* -------------------------------------------------------------------------- +** Timers +*/ + +/**************************************************************************** +* Function: osl_ext_timer_create +* +* Purpose: Creates a timer object. +* +* Parameters: name (in) Name of timer. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* func (in) Callback function to invoke on timer expiry. +* arg (in) Argument to callback function. +* timer (out) Timer object to create. +* +* Note: The function callback occurs in interrupt context. The application is +* required to provide context switch for the callback if required. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode, + osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_delete +* +* Purpose: Destroys a previously created timer object. +* +* Parameters: timer (mod) Timer object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start(osl_ext_timer_t *timer, + osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_usec (in) Invoke callback after this number of micro-seconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start_us(osl_ext_timer_t *timer, + osl_ext_time_us_t timeout_usec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_stop +* +* Purpose: Stop a previously created timer object. +* +* Parameters: timer (in) Timer object. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_stop(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_time_get +* +* Purpose: Returns incrementing time counter. +* +* Parameters: None. +* +* Returns: Returns incrementing time counter in msec. +***************************************************************************** +*/ +osl_ext_time_ms_t osl_ext_time_get(void); + +/* -------------------------------------------------------------------------- +** Tasks +*/ + +/**************************************************************************** +* Function: osl_ext_task_create +* +* Purpose: Create a task. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \ + (arg), TRUE, (task)) + +/**************************************************************************** +* Function: osl_ext_task_create_ex +* +* Purpose: Create a task with autostart option. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* autostart (in) TRUE to start task after creation. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +osl_ext_status_t osl_ext_task_create_ex(char* name, + void *stack, unsigned int stack_size, osl_ext_task_priority_t priority, + osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg, + bool autostart, osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_change_priority +* +* Purpose: Change priority of a task. +* +* Parameters: task +* new_priority (in) New task priority. +* old_priority (out) Old task priroty. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the priority could not be changed.. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_change_priority(osl_ext_task_t *task, + osl_ext_task_priority_t new_priority, osl_ext_task_priority_t *old_priority); + +/**************************************************************************** +* Function: osl_ext_task_delete +* +* Purpose: Destroy a task. +* +* Parameters: task (mod) Task to destroy. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_is_running +* +* Purpose: Returns current running task. +* +* Parameters: None. +* +* Returns: osl_ext_task_t of current running task. +***************************************************************************** +*/ +osl_ext_task_t *osl_ext_task_current(void); + +/**************************************************************************** +* Function: osl_ext_task_yield +* +* Purpose: Yield the CPU to other tasks of the same priority that are +* ready-to-run. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_yield(void); + +/**************************************************************************** +* Function: osl_ext_task_suspend +* +* Purpose: Suspend a task. +* +* Parameters: task (mod) Task to suspend. +* +* Returns: OSL_EXT_SUCCESS if the task was suspended successfully, or an +* error code if the task could not be suspended. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_suspend(osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_resume +* +* Purpose: Resume a task. +* +* Parameters: task (mod) Task to resume. +* +* Returns: OSL_EXT_SUCCESS if the task was resumed successfully, or an +* error code if the task could not be resumed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_resume(osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_enable_stack_check +* +* Purpose: Enable task stack checking. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_enable_stack_check(void); + +/* -------------------------------------------------------------------------- +** Queue +*/ + +/**************************************************************************** +* Function: osl_ext_queue_create +* +* Purpose: Create a queue. +* +* Parameters: name (in) Name to assign to the queue (must be unique). +* buffer (in) Queue buffer. NULL to allocate. +* size (in) Size of the queue. +* queue (out) Newly created queue. +* +* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an +* error code if the queue could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_create(char *name, + void *queue_buffer, unsigned int queue_size, + osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_delete +* +* Purpose: Destroys a previously created queue object. +* +* Parameters: queue (mod) Queue object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an +* error code if the queue could not be deleteed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_send +* +* Purpose: Send/add data to the queue. This function will not block the +* calling thread if the queue is full. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_send_synchronous +* +* Purpose: Send/add data to the queue. This function will block the +* calling thread until the data is dequeued. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_receive +* +* Purpose: Receive/remove data from the queue. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: queue (mod) Queue object. +* timeout_msec (in) Number of milliseconds to wait for the +* data from the queue. +* data (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an +* error code if the data could not be dequeued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue, + osl_ext_time_ms_t timeout_msec, void **data); + +/**************************************************************************** +* Function: osl_ext_queue_count +* +* Purpose: Returns the number of items in the queue. +* +* Parameters: queue (mod) Queue object. +* count (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an +* error code if the count is invalid. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count); + +/* -------------------------------------------------------------------------- +** Event +*/ + +/**************************************************************************** +* Function: osl_ext_event_create +* +* Purpose: Creates a event object, which can subsequently be used to +* notify and trigger tasks. +* +* Parameters: name (in) Name to assign to the event (must be unique). +* event (out) Event object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_delete +* +* Purpose: Destroys a previously created event object. +* +* Parameters: event (mod) Event object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_get +* +* Purpose: Get event from specified event object. +* +* Parameters: event (mod) Event object to get. +* requested (in) Requested event to get. +* timeout_msec (in) Number of milliseconds to wait for the event. +* event_bits (out) Event bits retrieved. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event, + osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec, + osl_ext_event_bits_t *event_bits); + +/**************************************************************************** +* Function: osl_ext_event_set +* +* Purpose: Set event of specified event object. +* +* Parameters: event (mod) Event object to set. +* event_bits (in) Event bits to set. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event, + osl_ext_event_bits_t event_bits); + +/* -------------------------------------------------------------------------- +** Interrupt +*/ + +/**************************************************************************** +* Function: osl_ext_interrupt_disable +* +* Purpose: Disable CPU interrupt. +* +* Parameters: None. +* +* Returns: The interrupt state before disable for restoring interrupt. +***************************************************************************** +*/ +osl_ext_interrupt_state_t osl_ext_interrupt_disable(void); + +/**************************************************************************** +* Function: osl_ext_interrupt_restore +* +* Purpose: Restore CPU interrupt state. +* +* Parameters: state (in) Interrupt state to restore returned from +* osl_ext_interrupt_disable(). +* +* Returns: None. +***************************************************************************** +*/ +void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state); + +#else + +/* ---- Constants and Types ---------------------------------------------- */ + +/* Interrupt control */ +#define OSL_INTERRUPT_SAVE_AREA +#define OSL_DISABLE +#define OSL_RESTORE + +/* Semaphore. */ +#define osl_ext_sem_t +#define OSL_EXT_SEM_DECL(sem) + +/* Mutex. */ +#define osl_ext_mutex_t +#define OSL_EXT_MUTEX_DECL(mutex) + +/* Timer. */ +#define osl_ext_timer_t +#define OSL_EXT_TIMER_DECL(timer) + +/* Task. */ +#define osl_ext_task_t void +#define OSL_EXT_TASK_DECL(task) + +/* Queue. */ +#define osl_ext_queue_t +#define OSL_EXT_QUEUE_DECL(queue) + +/* Event. */ +#define osl_ext_event_t +#define OSL_EXT_EVENT_DECL(event) + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS) + +#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS) + +#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \ + (OSL_EXT_SUCCESS) +#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS) +#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS) +#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS) +#define osl_ext_time_get() (0) + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + (OSL_EXT_SUCCESS) +#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS) +#define osl_ext_task_current() (NULL) +#define osl_ext_task_yield() (OSL_EXT_SUCCESS) +#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS) + +#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_receive(queue, timeout_msec, data) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS) + +#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS) +#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS) +#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \ + (OSL_EXT_SUCCESS) +#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS) + +#define osl_ext_interrupt_disable(void) (0) +#define osl_ext_interrupt_restore(state) + +#endif /* OSL_EXT_DISABLED */ + +#ifdef __cplusplus +} +#endif + +#endif /* _osl_ext_h_ */ diff --git a/bcmdhd.101.10.361.x/include/p2p.h b/bcmdhd.101.10.361.x/include/p2p.h new file mode 100755 index 0000000..727fe96 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/p2p.h @@ -0,0 +1,695 @@ +/* + * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _P2P_H_ +#define _P2P_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include +#include <802.11.h> + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi P2P OUI values */ +#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */ + +#define P2P_IE_ID 0xdd /* P2P IE element ID */ + +/* WiFi P2P IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie { + uint8 id; /* IE ID: 0xDD */ + uint8 len; /* IE length */ + uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */ + uint8 oui_type; /* Identifies P2P version: P2P_VER */ + uint8 subelts[1]; /* variable length subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ie wifi_p2p_ie_t; + +#define P2P_IE_FIXED_LEN 6 + +#define P2P_ATTR_ID_OFF 0 +#define P2P_ATTR_LEN_OFF 1 +#define P2P_ATTR_DATA_OFF 3 + +#define P2P_ATTR_ID_LEN 1 /* ID filed length */ +#define P2P_ATTR_LEN_LEN 2 /* length field length */ +#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */ + +#define P2P_WFDS_HASH_LEN 6 +#define P2P_WFDS_MAX_SVC_NAME_LEN 32 + +/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */ +#define P2P_SEID_STATUS 0 /* Status */ +#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */ +#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */ +#define P2P_SEID_DEV_ID 3 /* P2P Device ID */ +#define P2P_SEID_INTENT 4 /* Group Owner Intent */ +#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */ +#define P2P_SEID_CHANNEL 6 /* Listen channel */ +#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */ +#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */ +#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */ +#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */ +#define P2P_SEID_CHAN_LIST 11 /* Channel List */ +#define P2P_SEID_ABSENCE 12 /* Notice of Absence */ +#define P2P_SEID_DEV_INFO 13 /* Device Info */ +#define P2P_SEID_GROUP_INFO 14 /* Group Info */ +#define P2P_SEID_GROUP_ID 15 /* Group ID */ +#define P2P_SEID_P2P_IF 16 /* P2P Interface */ +#define P2P_SEID_OP_CHANNEL 17 /* Operating Channel */ +#define P2P_SEID_INVITE_FLAGS 18 /* Invitation Flags */ +#define P2P_SEID_SERVICE_HASH 21 /* Service hash */ +#define P2P_SEID_SESSION 22 /* Session information */ +#define P2P_SEID_CONNECT_CAP 23 /* Connection capability */ +#define P2P_SEID_ADVERTISE_ID 24 /* Advertisement ID */ +#define P2P_SEID_ADVERTISE_SERVICE 25 /* Advertised service */ +#define P2P_SEID_SESSION_ID 26 /* Session ID */ +#define P2P_SEID_FEATURE_CAP 27 /* Feature capability */ +#define P2P_SEID_PERSISTENT_GROUP 28 /* Persistent group */ +#define P2P_SEID_SESSION_INFO_RESP 29 /* Session Information Response */ +#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */ + +#define P2P_SE_VS_ID_SERVICES 0x1b /* BRCM proprietary subel: L2 Services */ + +/* WiFi P2P IE subelement: P2P Capability (capabilities info) */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 dev; /* Device Capability Bitmap */ + uint8 group; /* Group Capability Bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t; + +/* P2P Capability subelement's Device Capability Bitmap bit values */ +#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */ +#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */ +#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */ +#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */ +#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */ +#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */ + +/* P2P Capability subelement's Group Capability Bitmap bit values */ +#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */ +#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */ +#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */ +#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */ +#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */ +#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */ +#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */ + +/* WiFi P2P IE subelement: Group Owner Intent */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTENT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t; + +/* WiFi P2P IE subelement: Configuration Timeout */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 go_tmo; /* GO config timeout in units of 10 ms */ + uint8 client_tmo; /* Client config timeout in units of 10 ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t; + +/* WiFi P2P IE subelement: Listen Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t; + +/* WiFi P2P IE subelement: P2P Group BSSID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GRP_BSSID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P group bssid */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t; + +/* WiFi P2P IE subelement: P2P Group ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_ID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ssid[1]; /* ssid. device id. variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t; + +/* WiFi P2P IE subelement: P2P Interface */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_IF */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ifaddrs; /* P2P Interface Address count */ + uint8 ifaddr[1][6]; /* P2P Interface Address list */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t; + +/* WiFi P2P IE subelement: Status */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 status; /* Status Code: P2P_STATSE_* */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t; + +/* Status subelement Status Code definitions */ +#define P2P_STATSE_SUCCESS 0 + /* Success */ +#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1 + /* Failed, information currently unavailable */ +#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL + /* Old name for above in P2P spec 1.08 and older */ +#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2 + /* Failed, incompatible parameters */ +#define P2P_STATSE_FAIL_LIMIT_REACHED 3 + /* Failed, limit reached */ +#define P2P_STATSE_FAIL_INVALID_PARAMS 4 + /* Failed, invalid parameters */ +#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5 + /* Failed, unable to accomodate request */ +#define P2P_STATSE_FAIL_PROTO_ERROR 6 + /* Failed, previous protocol error or disruptive behaviour */ +#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7 + /* Failed, no common channels */ +#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8 + /* Failed, unknown P2P Group */ +#define P2P_STATSE_FAIL_INTENT 9 + /* Failed, both peers indicated Intent 15 in GO Negotiation */ +#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10 + /* Failed, incompatible provisioning method */ +#define P2P_STATSE_FAIL_USER_REJECT 11 + /* Failed, rejected by user */ +#define P2P_STATSE_SUCCESS_USER_ACCEPT 12 + /* Success, accepted by user */ + +/* WiFi P2P IE attribute: Extended Listen Timing */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s { + uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */ + uint8 len[2]; /* length not including eltId, len fields */ + uint8 avail[2]; /* availibility period */ + uint8 interval[2]; /* availibility interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t; + +#define P2P_EXT_MIN 10 /* minimum 10ms */ + +/* WiFi P2P IE subelement: Intended P2P Interface Address */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* intended P2P interface MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t; + +/* WiFi P2P IE subelement: Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 band; /* Regulatory Class (band) */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t; + +/* Channel Entry structure within the Channel List SE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s { + uint8 band; /* Regulatory Class (band) */ + uint8 num_channels; /* # of channels in the channel list */ + uint8 channels[WL_NUMCHANNELS]; /* Channel List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t; +#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2 + +/* WiFi P2P IE subelement: Channel List */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHAN_LIST */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 num_entries; /* # of channel entries */ + wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES]; + /* Channel Entry List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t; + +/* WiFi Primary Device Type structure */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s { + uint16 cat_id; /* Category ID */ + uint8 OUI[3]; /* WFA OUI: 0x0050F2 */ + uint8 oui_type; /* WPS_OUI_TYPE */ + uint16 sub_cat_id; /* Sub Category ID */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t; + +/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category + * maximum values for each category + */ +#define P2P_DISE_SUBCATEGORY_MINVAL 1 +#define P2P_DISE_CATEGORY_COMPUTER 1 +#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL 8 +#define P2P_DISE_CATEGORY_INPUT_DEVICE 2 +#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL 9 +#define P2P_DISE_CATEGORY_PRINTER 3 +#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL 5 +#define P2P_DISE_CATEGORY_CAMERA 4 +#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL 4 +#define P2P_DISE_CATEGORY_STORAGE 5 +#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL 1 +#define P2P_DISE_CATEGORY_NETWORK_INFRA 6 +#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL 4 +#define P2P_DISE_CATEGORY_DISPLAY 7 +#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL 4 +#define P2P_DISE_CATEGORY_MULTIMEDIA 8 +#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL 6 +#define P2P_DISE_CATEGORY_GAMING 9 +#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL 5 +#define P2P_DISE_CATEGORY_TELEPHONE 10 +#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL 5 +#define P2P_DISE_CATEGORY_AUDIO 11 +#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL 6 + +/* WiFi P2P IE's Device Info subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P Device MAC address */ + uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pri_devtype[8]; /* Primary Device Type */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t; + +#define P2P_DEV_TYPE_LEN 8 + +/* WiFi P2P IE's Group Info subelement Client Info Descriptor */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s { + uint8 len; + uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */ + uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */ + uint8 devcap; /* Device Capability */ + uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */ + uint8 secdts; /* Number of Secondary Device Types */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t; + +/* WiFi P2P IE's Device ID subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s { + uint8 eltId; + uint8 len[2]; + struct ether_addr addr; /* P2P Device MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t; + +/* WiFi P2P IE subelement: P2P Manageability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mg_bitmap; /* manageability bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t; +/* mg_bitmap field bit values */ +#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */ + +/* WiFi P2P IE subelement: Group Info */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t; + +/* WiFi IE subelement: Operating Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_OP_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t; + +/* WiFi IE subelement: INVITATION FLAGS */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INVITE_FLAGS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 flags; /* Flags */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t; + +/* WiFi P2P IE subelement: Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SERVICE_HASH */ + uint8 len[2]; /* SE length not including eltId, len fields + * in multiple of 6 Bytes + */ + uint8 hash[1]; /* Variable length - SHA256 hash of + * service names (can be more than one hashes) + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t; + +/* WiFi P2P IE subelement: Service Instance Data */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 ssn_info[1]; /* Variable length - Session information as specified by + * the service layer, type matches serv. name + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t; + +/* WiFi P2P IE subelement: Connection capability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 conn_cap; /* 1byte capability as specified by the + * service layer, valid bitmask/values + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t; + +/* WiFi P2P IE subelement: Advertisement ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 advt_id[4]; /* 4byte Advertisement ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 advt_mac[6]; /* P2P device address of the service advertiser */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t; + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s { + uint8 advt_id[4]; /* SE Advertise ID for the service */ + uint16 nw_cfg_method; /* SE Network Config method for the service */ + uint8 serv_name_len; /* SE length of the service name */ + uint8 serv_name[1]; /* Variable length service name field */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t; + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */ + uint8 len[2]; /* SE length not including eltId, len fields mutiple len of + * wifi_p2p_adv_serv_info_t entries + */ + wifi_p2p_adv_serv_info_t p_advt_serv_info[1]; /* Variable length + of multiple instances + of the advertise service info + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t; + +/* WiFi P2P IE subelement: Session ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 ssn_id[4]; /* 4byte Session ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 ssn_mac[6]; /* P2P device address of the seeker - session mac */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t; + +#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */ +#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id + + * nw_config_method + serv_name_len + */ + +/* WiFi P2P Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame { + uint8 category; /* P2P_AF_CATEGORY */ + uint8 OUI[3]; /* OUI - P2P_OUI */ + uint8 type; /* OUI Type - P2P_VER */ + uint8 subtype; /* OUI Subtype - P2P_AF_* */ + uint8 dialog_token; /* nonzero, identifies req/resp tranaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t; +#define P2P_AF_CATEGORY 0x7f + +#define P2P_AF_FIXED_LEN 7 + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + +/* WiFi P2P Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame { + uint8 category; /* P2P_PUB_AF_CATEGORY */ + uint8 action; /* P2P_PUB_AF_ACTION */ + uint8 oui[3]; /* P2P_OUI */ + uint8 oui_type; /* OUI type - P2P_VER */ + uint8 subtype; /* OUI subtype - P2P_TYPE_* */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t; +#define P2P_PUB_AF_FIXED_LEN 8 +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */ +#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */ + +/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */ +#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ +#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP +#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF + +/* WiFi P2P IE subelement: Notice of Absence */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc { + uint8 cnt_type; /* Count/Type */ + uint32 duration; /* Duration */ + uint32 interval; /* Interval */ + uint32 start; /* Start Time */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t; + +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se { + uint8 eltId; /* Subelement ID */ + uint8 len[2]; /* Length */ + uint8 index; /* Index */ + uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */ + wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t; + +#define P2P_NOA_SE_FIXED_LEN 5 + +#define P2P_NOA_SE_MAX_DESC 2 /* max NoA descriptors in presence request */ + +/* cnt_type field values */ +#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */ +#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */ +#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */ +#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */ + +/* ctw_ops_parms field values */ +#define P2P_NOA_CTW_MASK 0x7f +#define P2P_NOA_OPS_MASK 0x80 +#define P2P_NOA_OPS_SHIFT 7 + +#define P2P_CTW_MIN 10 /* minimum 10TU */ + +/* + * P2P Service Discovery related + */ +#define P2PSD_ACTION_CATEGORY 0x04 + /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a + /* Action value for GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b + /* Action value for GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c + /* Action value for GAS Comeback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d + /* Action value for GAS Comeback Response AF */ +#define P2PSD_AD_EID 0x6c + /* Advertisement Protocol IE ID */ +#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00 + /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */ +#define P2PSD_ADP_PROTO_ID 0x00 + /* Advertisement Protocol ID. Always 0 for P2P SD */ +#define P2PSD_GAS_OUI P2P_OUI + /* WFA OUI */ +#define P2PSD_GAS_OUI_SUBTYPE P2P_VER + /* OUI Subtype for GAS IE */ +#define P2PSD_GAS_NQP_INFOID 0xDDDD + /* NQP Query Info ID: 56797 */ +#define P2PSD_GAS_COMEBACKDEALY 0x00 + /* Not used in the Native GAS protocol */ + +/* Service Protocol Type */ +typedef enum p2psd_svc_protype { + SVC_RPOTYPE_ALL = 0, + SVC_RPOTYPE_BONJOUR = 1, + SVC_RPOTYPE_UPNP = 2, + SVC_RPOTYPE_WSD = 3, + SVC_RPOTYPE_WFDS = 11, + SVC_RPOTYPE_VENDOR = 255 +} p2psd_svc_protype_t; + +/* Service Discovery response status code */ +typedef enum { + P2PSD_RESP_STATUS_SUCCESS = 0, + P2PSD_RESP_STATUS_PROTYPE_NA = 1, + P2PSD_RESP_STATUS_DATA_NA = 2, + P2PSD_RESP_STATUS_BAD_REQUEST = 3 +} p2psd_resp_status_t; + +/* Advertisement Protocol IE tuple field */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl { + uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus + * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0 + */ + uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t; + +/* Advertisement Protocol IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie { + uint8 id; /* IE ID: 0x6c - 108 */ + uint8 len; /* IE length */ + wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one + * tuple is defined for P2P Service Discovery + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t; + +/* NQP Vendor-specific Content */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc { + uint8 oui_subtype; /* OUI Subtype: 0x09 */ + uint16 svc_updi; /* Service Update Indicator */ + uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request, + * wifi_p2psd_qresp_tlv_t type for service response + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t; + +/* Service Request TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t; + +/* Query Request Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Length of service request TLV, 5 plus the size of request data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t; + +/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame { + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qreq_len; /* Query Request Length */ + uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t; + +/* Service Response TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 status; /* Value defined in Table 57 of P2P spec. */ + uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t; + +/* Query Response Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t; + +/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t; + +/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint8 fragment_id; /* Fragmentation ID */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t; + +/* Wi-Fi GAS Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame { + uint8 category; /* 0x04 Public Action Frame */ + uint8 action; /* 0x6c Advertisement Protocol */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t + * or wifi_p2psd_gas_iresp_frame_t format + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _P2P_H_ */ diff --git a/bcmdhd.101.10.361.x/include/packed_section_end.h b/bcmdhd.101.10.361.x/include/packed_section_end.h new file mode 100755 index 0000000..fcdad85 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/packed_section_end.h @@ -0,0 +1,62 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is NOT defined at this + * point, then there is a missing include of packed_section_start.h. + */ +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif + +#if defined(_MSC_VER) +#pragma warning(disable:4103) +#pragma pack(pop) +#endif + +#if defined(__GNUC__) && defined(EFI) +#pragma pack(pop) +#endif + +/* Compiler-specific directives for structure packing are declared in + * packed_section_start.h. This marks the end of the structure packing section, + * so, undef them here. + */ +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/bcmdhd.101.10.361.x/include/packed_section_start.h b/bcmdhd.101.10.361.x/include/packed_section_start.h new file mode 100755 index 0000000..d6c35a2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/packed_section_start.h @@ -0,0 +1,117 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +/* EFI does not support STATIC_ASSERT */ +#if defined(EFI) +#define _alignment_test_ +#endif /* EFI */ + +#ifndef _alignment_test_ +#define _alignment_test_ + +/* ASSERT default packing */ +typedef struct T4 { + uint8 a; + uint32 b; + uint16 c; + uint8 d; +} T4_t; + +/* 4 byte alignment support */ +/* +* a . . . +* b b b b +* c c d . +*/ + +/* + * Below function is meant to verify that this file is compiled with the default alignment of 4. + * Function will fail to compile if the condition is not met. + */ +#ifdef __GNUC__ +#define VARIABLE_IS_NOT_USED __attribute__ ((unused)) +#else +#define VARIABLE_IS_NOT_USED +#endif +static void alignment_test(void); +static void +VARIABLE_IS_NOT_USED alignment_test(void) +{ + /* verify 4 byte alignment support */ + STATIC_ASSERT(sizeof(T4_t) == 12); +} +#endif /* _alignment_test_ */ + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is already defined at this + * point, then there is a missing include of packed_section_end.h. + */ +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif + +#if defined(BWL_DEFAULT_PACKING) + /* generate an error if BWL_DEFAULT_PACKING is defined */ + #error "BWL_DEFAULT_PACKING not supported any more." +#endif /* BWL_PACKED_SECTION */ + +#if defined(_MSC_VER) +#pragma warning(disable:4103) +#pragma pack(push) +#pragma pack(1) +#endif + +#if defined(__GNUC__) && defined(EFI) +#pragma pack(push) +#pragma pack(1) +#endif + +/* Declare compiler-specific directives for structure packing. */ +#if defined(_MSC_VER) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT +#elif defined(__GNUC__) || defined(__lint) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif diff --git a/bcmdhd.101.10.361.x/include/pcicfg.h b/bcmdhd.101.10.361.x/include/pcicfg.h new file mode 100755 index 0000000..663be79 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/pcicfg.h @@ -0,0 +1,730 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + +/* The following inside ifndef's so we don't collide with NTDDK.H */ +#ifndef PCI_MAX_BUS +#define PCI_MAX_BUS 0x100 +#endif +#ifndef PCI_MAX_DEVICES +#define PCI_MAX_DEVICES 0x20 +#endif +#ifndef PCI_MAX_FUNCTION +#define PCI_MAX_FUNCTION 0x8 +#endif + +#ifndef PCI_INVALID_VENDORID +#define PCI_INVALID_VENDORID 0xffff +#endif +#ifndef PCI_INVALID_DEVICEID +#define PCI_INVALID_DEVICEID 0xffff +#endif + +/* Convert between bus-slot-function-register and config addresses */ + +#define PCICFG_BUS_SHIFT 16 /* Bus shift */ +#define PCICFG_SLOT_SHIFT 11 /* Slot shift */ +#define PCICFG_FUN_SHIFT 8 /* Function shift */ +#define PCICFG_OFF_SHIFT 0 /* Register shift */ + +#define PCICFG_BUS_MASK 0xff /* Bus mask */ +#define PCICFG_SLOT_MASK 0x1f /* Slot mask */ +#define PCICFG_FUN_MASK 7 /* Function mask */ +#define PCICFG_OFF_MASK 0xff /* Bus mask */ + +#define PCI_CONFIG_ADDR(b, s, f, o) \ + ((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT) \ + | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT) \ + | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT) \ + | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT)) + +#define PCI_CONFIG_BUS(a) (((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK) +#define PCI_CONFIG_SLOT(a) (((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK) +#define PCI_CONFIG_FUN(a) (((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK) +#define PCI_CONFIG_OFF(a) (((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK) + +/* PCIE Config space accessing MACROS */ + +#define PCIECFG_BUS_SHIFT 24 /* Bus shift */ +#define PCIECFG_SLOT_SHIFT 19 /* Slot/Device shift */ +#define PCIECFG_FUN_SHIFT 16 /* Function shift */ +#define PCIECFG_OFF_SHIFT 0 /* Register shift */ + +#define PCIECFG_BUS_MASK 0xff /* Bus mask */ +#define PCIECFG_SLOT_MASK 0x1f /* Slot/Device mask */ +#define PCIECFG_FUN_MASK 7 /* Function mask */ +#define PCIECFG_OFF_MASK 0xfff /* Register mask */ + +#define PCIE_CONFIG_ADDR(b, s, f, o) \ + ((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT) \ + | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT) \ + | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT) \ + | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT)) + +#define PCIE_CONFIG_BUS(a) (((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK) +#define PCIE_CONFIG_SLOT(a) (((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK) +#define PCIE_CONFIG_FUN(a) (((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK) +#define PCIE_CONFIG_OFF(a) (((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK) + +/* The actual config space */ + +#define PCI_BAR_MAX 6 + +#define PCI_ROM_BAR 8 + +#define PCR_RSVDA_MAX 2 + +/* Bits in PCI bars' flags */ + +#define PCIBAR_FLAGS 0xf +#define PCIBAR_IO 0x1 +#define PCIBAR_MEM1M 0x2 +#define PCIBAR_MEM64 0x4 +#define PCIBAR_PREFETCH 0x8 +#define PCIBAR_MEM32_MASK 0xFFFFFF80 + +typedef struct _pci_config_regs { + uint16 vendor; + uint16 device; + uint16 command; + uint16 status; + uint8 rev_id; + uint8 prog_if; + uint8 sub_class; + uint8 base_class; + uint8 cache_line_size; + uint8 latency_timer; + uint8 header_type; + uint8 bist; + uint32 base[PCI_BAR_MAX]; + uint32 cardbus_cis; + uint16 subsys_vendor; + uint16 subsys_id; + uint32 baserom; + uint32 rsvd_a[PCR_RSVDA_MAX]; + uint8 int_line; + uint8 int_pin; + uint8 min_gnt; + uint8 max_lat; + uint8 dev_dep[192]; +} pci_config_regs; + +#define SZPCR (sizeof (pci_config_regs)) +#define MINSZPCR 64 /* offsetof (dev_dep[0] */ + +/* pci config status reg has a bit to indicate that capability ptr is present */ + +#define PCI_CAPPTR_PRESENT 0x0010 + +/* A structure for the config registers is nice, but in most + * systems the config space is not memory mapped, so we need + * field offsetts. :-( + */ +#define PCI_CFG_VID 0 +#define PCI_CFG_DID 2 +#define PCI_CFG_CMD 4 +#define PCI_CFG_STAT 6 +#define PCI_CFG_REV 8 +#define PCI_CFG_PROGIF 9 +#define PCI_CFG_SUBCL 0xa +#define PCI_CFG_BASECL 0xb +#define PCI_CFG_CLSZ 0xc +#define PCI_CFG_LATTIM 0xd +#define PCI_CFG_HDR 0xe +#define PCI_CFG_BIST 0xf +#define PCI_CFG_BAR0 0x10 +#define PCI_CFG_BAR1 0x18 +#define PCI_CFG_BAR2 0x20 +#define PCI_CFG_CIS 0x28 +#define PCI_CFG_SVID 0x2c +#define PCI_CFG_SSID 0x2e +#define PCI_CFG_ROMBAR 0x30 +#define PCI_CFG_CAPPTR 0x34 +#define PCI_CFG_INT 0x3c +#define PCI_CFG_PIN 0x3d +#define PCI_CFG_MINGNT 0x3e +#define PCI_CFG_MAXLAT 0x3f +#define PCI_CFG_DEVCTRL 0xd8 +#define PCI_CFG_TLCNTRL_5 0x814 +#define PCI_CFG_ERRATTN_MASK_FN0 0x8a0 +#define PCI_CFG_ERRATTN_STATUS_FN0 0x8a4 +#define PCI_CFG_ERRATTN_MASK_FN1 0x8a8 +#define PCI_CFG_ERRATTN_STATUS_FN1 0x8ac +#define PCI_CFG_ERRATTN_MASK_CMN 0x8b0 +#define PCI_CFG_ERRATTN_STATUS_CMN 0x8b4 + +#ifdef EFI +#undef PCI_CLASS_BRIDGE +#undef PCI_CLASS_OLD +#undef PCI_CLASS_DISPLAY +#undef PCI_CLASS_SERIAL +#undef PCI_CLASS_SATELLITE +#endif /* EFI */ + +/* Classes and subclasses */ + +typedef enum { + PCI_CLASS_OLD = 0, + PCI_CLASS_DASDI, + PCI_CLASS_NET, + PCI_CLASS_DISPLAY, + PCI_CLASS_MMEDIA, + PCI_CLASS_MEMORY, + PCI_CLASS_BRIDGE, + PCI_CLASS_COMM, + PCI_CLASS_BASE, + PCI_CLASS_INPUT, + PCI_CLASS_DOCK, + PCI_CLASS_CPU, + PCI_CLASS_SERIAL, + PCI_CLASS_INTELLIGENT = 0xe, + PCI_CLASS_SATELLITE, + PCI_CLASS_CRYPT, + PCI_CLASS_DSP, + PCI_CLASS_XOR = 0xfe +} pci_classes; + +typedef enum { + PCI_DASDI_SCSI, + PCI_DASDI_IDE, + PCI_DASDI_FLOPPY, + PCI_DASDI_IPI, + PCI_DASDI_RAID, + PCI_DASDI_OTHER = 0x80 +} pci_dasdi_subclasses; + +typedef enum { + PCI_NET_ETHER, + PCI_NET_TOKEN, + PCI_NET_FDDI, + PCI_NET_ATM, + PCI_NET_OTHER = 0x80 +} pci_net_subclasses; + +typedef enum { + PCI_DISPLAY_VGA, + PCI_DISPLAY_XGA, + PCI_DISPLAY_3D, + PCI_DISPLAY_OTHER = 0x80 +} pci_display_subclasses; + +typedef enum { + PCI_MMEDIA_VIDEO, + PCI_MMEDIA_AUDIO, + PCI_MMEDIA_PHONE, + PCI_MEDIA_OTHER = 0x80 +} pci_mmedia_subclasses; + +typedef enum { + PCI_MEMORY_RAM, + PCI_MEMORY_FLASH, + PCI_MEMORY_OTHER = 0x80 +} pci_memory_subclasses; + +typedef enum { + PCI_BRIDGE_HOST, + PCI_BRIDGE_ISA, + PCI_BRIDGE_EISA, + PCI_BRIDGE_MC, + PCI_BRIDGE_PCI, + PCI_BRIDGE_PCMCIA, + PCI_BRIDGE_NUBUS, + PCI_BRIDGE_CARDBUS, + PCI_BRIDGE_RACEWAY, + PCI_BRIDGE_OTHER = 0x80 +} pci_bridge_subclasses; + +typedef enum { + PCI_COMM_UART, + PCI_COMM_PARALLEL, + PCI_COMM_MULTIUART, + PCI_COMM_MODEM, + PCI_COMM_OTHER = 0x80 +} pci_comm_subclasses; + +typedef enum { + PCI_BASE_PIC, + PCI_BASE_DMA, + PCI_BASE_TIMER, + PCI_BASE_RTC, + PCI_BASE_PCI_HOTPLUG, + PCI_BASE_OTHER = 0x80 +} pci_base_subclasses; + +typedef enum { + PCI_INPUT_KBD, + PCI_INPUT_PEN, + PCI_INPUT_MOUSE, + PCI_INPUT_SCANNER, + PCI_INPUT_GAMEPORT, + PCI_INPUT_OTHER = 0x80 +} pci_input_subclasses; + +typedef enum { + PCI_DOCK_GENERIC, + PCI_DOCK_OTHER = 0x80 +} pci_dock_subclasses; + +typedef enum { + PCI_CPU_386, + PCI_CPU_486, + PCI_CPU_PENTIUM, + PCI_CPU_ALPHA = 0x10, + PCI_CPU_POWERPC = 0x20, + PCI_CPU_MIPS = 0x30, + PCI_CPU_COPROC = 0x40, + PCI_CPU_OTHER = 0x80 +} pci_cpu_subclasses; + +typedef enum { + PCI_SERIAL_IEEE1394, + PCI_SERIAL_ACCESS, + PCI_SERIAL_SSA, + PCI_SERIAL_USB, + PCI_SERIAL_FIBER, + PCI_SERIAL_SMBUS, + PCI_SERIAL_OTHER = 0x80 +} pci_serial_subclasses; + +typedef enum { + PCI_INTELLIGENT_I2O +} pci_intelligent_subclasses; + +typedef enum { + PCI_SATELLITE_TV, + PCI_SATELLITE_AUDIO, + PCI_SATELLITE_VOICE, + PCI_SATELLITE_DATA, + PCI_SATELLITE_OTHER = 0x80 +} pci_satellite_subclasses; + +typedef enum { + PCI_CRYPT_NETWORK, + PCI_CRYPT_ENTERTAINMENT, + PCI_CRYPT_OTHER = 0x80 +} pci_crypt_subclasses; + +typedef enum { + PCI_DSP_DPIO, + PCI_DSP_OTHER = 0x80 +} pci_dsp_subclasses; + +typedef enum { + PCI_XOR_QDMA, + PCI_XOR_OTHER = 0x80 +} pci_xor_subclasses; + +/* Overlay for a PCI-to-PCI bridge */ + +#define PPB_RSVDA_MAX 2 +#define PPB_RSVDD_MAX 8 + +typedef struct _ppb_config_regs { + uint16 vendor; + uint16 device; + uint16 command; + uint16 status; + uint8 rev_id; + uint8 prog_if; + uint8 sub_class; + uint8 base_class; + uint8 cache_line_size; + uint8 latency_timer; + uint8 header_type; + uint8 bist; + uint32 rsvd_a[PPB_RSVDA_MAX]; + uint8 prim_bus; + uint8 sec_bus; + uint8 sub_bus; + uint8 sec_lat; + uint8 io_base; + uint8 io_lim; + uint16 sec_status; + uint16 mem_base; + uint16 mem_lim; + uint16 pf_mem_base; + uint16 pf_mem_lim; + uint32 pf_mem_base_hi; + uint32 pf_mem_lim_hi; + uint16 io_base_hi; + uint16 io_lim_hi; + uint16 subsys_vendor; + uint16 subsys_id; + uint32 rsvd_b; + uint8 rsvd_c; + uint8 int_pin; + uint16 bridge_ctrl; + uint8 chip_ctrl; + uint8 diag_ctrl; + uint16 arb_ctrl; + uint32 rsvd_d[PPB_RSVDD_MAX]; + uint8 dev_dep[192]; +} ppb_config_regs; + +/* Everything below is BRCM HND proprietary */ + +/* Brcm PCI configuration registers */ +#define cap_list rsvd_a[0] +#define bar0_window dev_dep[0x80 - 0x40] +#define bar1_window dev_dep[0x84 - 0x40] +#define sprom_control dev_dep[0x88 - 0x40] + +/* PCI CAPABILITY DEFINES */ +#define PCI_CAP_POWERMGMTCAP_ID 0x01 +#define PCI_CAP_MSICAP_ID 0x05 +#define PCI_CAP_VENDSPEC_ID 0x09 +#define PCI_CAP_PCIECAP_ID 0x10 +#define PCI_CAP_MSIXCAP_ID 0x11 + +/* Data structure to define the Message Signalled Interrupt facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_msi { + uint8 capID; + uint8 nextptr; + uint16 msgctrl; + uint32 msgaddr; +} pciconfig_cap_msi; +#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */ + +/* Data structure to define the Power managment facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_pwrmgmt { + uint8 capID; + uint8 nextptr; + uint16 pme_cap; + uint16 pme_sts_ctrl; + uint8 pme_bridge_ext; + uint8 data; +} pciconfig_cap_pwrmgmt; + +#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ +#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ +#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ +#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ + +/* Data structure to define the PCIE capability */ +typedef struct _pciconfig_cap_pcie { + uint8 capID; + uint8 nextptr; + uint16 pcie_cap; + uint32 dev_cap; + uint16 dev_ctrl; + uint16 dev_status; + uint32 link_cap; + uint16 link_ctrl; + uint16 link_status; + uint32 slot_cap; + uint16 slot_ctrl; + uint16 slot_status; + uint16 root_ctrl; + uint16 root_cap; + uint32 root_status; +} pciconfig_cap_pcie; + +/* PCIE Enhanced CAPABILITY DEFINES */ +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCIE_ADVERRREP_CAPID 0x0001 +#define PCIE_VC_CAPID 0x0002 +#define PCIE_DEVSNUM_CAPID 0x0003 +#define PCIE_PWRBUDGET_CAPID 0x0004 + +/* PCIE Extended configuration */ +#define PCIE_ADV_CORR_ERR_MASK 0x114 +#define PCIE_ADV_CORR_ERR_MASK_OFFSET 0x14 +#define CORR_ERR_RE (1 << 0) /* Receiver */ +#define CORR_ERR_BT (1 << 6) /* Bad TLP */ +#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ +#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ +#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ +#define CORR_ERR_AE (1 << 13) /* Adviosry Non-Fital Error Mask */ +#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ + CORR_ERR_RR | CORR_ERR_RT) + +/* PCIE Root Control Register bits (Host mode only) */ +#define PCIE_RC_CORR_SERR_EN 0x0001 +#define PCIE_RC_NONFATAL_SERR_EN 0x0002 +#define PCIE_RC_FATAL_SERR_EN 0x0004 +#define PCIE_RC_PME_INT_EN 0x0008 +#define PCIE_RC_CRS_EN 0x0010 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define PCIE_RC_CRS_VISIBILITY 0x0001 + +/* PCIe PMCSR Register bits */ +#define PCIE_PMCSR_PMESTAT 0x8000 + +/* Header to define the PCIE specific capabilities in the extended config space */ +typedef struct _pcie_enhanced_caphdr { + uint16 capID; + uint16 cap_ver : 4; + uint16 next_ptr : 12; +} pcie_enhanced_caphdr; + +#define PCIE_CFG_PMCSR 0x4C +#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ +#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ +#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ +#define PCIE_CFG_SUBSYSTEM_CONTROL 0x88 /* used as subsystem control in PCIE devices */ +#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ +#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ +#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ +#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ +#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ +#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ +#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ +#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ +#define PCIE_CFG_DEVICE_CAPABILITY 0xb0 /* used as device capability in PCIE devices */ +#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ +#define PCIE_CFG_DEVICE_CONTROL 0xb4 /* 0xb4 is used as device control in PCIE devices */ +#define PCIE_DC_AER_CORR_EN (1u << 0u) +#define PCIE_DC_AER_NON_FATAL_EN (1u << 1u) +#define PCIE_DC_AER_FATAL_EN (1u << 2u) +#define PCIE_DC_AER_UNSUP_EN (1u << 3u) + +#define PCI_BAR0_WIN2_OFFSET 0x1000u +#define PCIE2_BAR0_CORE2_WIN2_OFFSET 0x5000u + +#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ +#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */ + +/* Private Registers */ +#define PCI_STAT_CTRL 0xa80 +#define PCI_L0_EVENTCNT 0xa84 +#define PCI_L0_STATETMR 0xa88 +#define PCI_L1_EVENTCNT 0xa8c +#define PCI_L1_STATETMR 0xa90 +#define PCI_L1_1_EVENTCNT 0xa94 +#define PCI_L1_1_STATETMR 0xa98 +#define PCI_L1_2_EVENTCNT 0xa9c +#define PCI_L1_2_STATETMR 0xaa0 +#define PCI_L2_EVENTCNT 0xaa4 +#define PCI_L2_STATETMR 0xaa8 + +#define PCI_LINK_STATUS 0x4dc +#define PCI_LINK_SPEED_MASK (15u << 0u) +#define PCI_LINK_SPEED_SHIFT (0) +#define PCIE_LNK_SPEED_GEN1 0x1 +#define PCIE_LNK_SPEED_GEN2 0x2 +#define PCIE_LNK_SPEED_GEN3 0x3 + +#define PCI_PL_SPARE 0x1808 /* Config to Increase external clkreq deasserted minimum time */ +#define PCI_CONFIG_EXT_CLK_MIN_TIME_MASK (1u << 31u) +#define PCI_CONFIG_EXT_CLK_MIN_TIME_SHIFT (31) + +#define PCI_ADV_ERR_CAP 0x100 +#define PCI_UC_ERR_STATUS 0x104 +#define PCI_UNCORR_ERR_MASK 0x108 +#define PCI_UCORR_ERR_SEVR 0x10c +#define PCI_CORR_ERR_STATUS 0x110 +#define PCI_CORR_ERR_MASK 0x114 +#define PCI_ERR_CAP_CTRL 0x118 +#define PCI_TLP_HDR_LOG1 0x11c +#define PCI_TLP_HDR_LOG2 0x120 +#define PCI_TLP_HDR_LOG3 0x124 +#define PCI_TLP_HDR_LOG4 0x128 +#define PCI_TL_CTRL_5 0x814 +#define PCI_TL_HDR_FC_ST 0x980 +#define PCI_TL_TGT_CRDT_ST 0x990 +#define PCI_TL_SMLOGIC_ST 0x998 +#define PCI_DL_ATTN_VEC 0x1040 +#define PCI_DL_STATUS 0x1048 + +#define PCI_PHY_CTL_0 0x1800 +#define PCI_SLOW_PMCLK_EXT_RLOCK (1 << 7) +#define PCI_REG_TX_DEEMPH_3_5_DB (1 << 21) + +#define PCI_LINK_STATE_DEBUG 0x1c24 +#define PCI_RECOVERY_HIST 0x1ce4 +#define PCI_PHY_LTSSM_HIST_0 0x1cec +#define PCI_PHY_LTSSM_HIST_1 0x1cf0 +#define PCI_PHY_LTSSM_HIST_2 0x1cf4 +#define PCI_PHY_LTSSM_HIST_3 0x1cf8 +#define PCI_PHY_DBG_CLKREG_0 0x1e10 +#define PCI_PHY_DBG_CLKREG_1 0x1e14 +#define PCI_PHY_DBG_CLKREG_2 0x1e18 +#define PCI_PHY_DBG_CLKREG_3 0x1e1c + +#define PCI_TL_CTRL_0 0x800u +#define PCI_BEACON_DIS (1u << 20u) /* Disable Beacon Generation */ + +/* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */ +#define PCIE_BAR1COHERENTACCEN_BIT 8 +#define PCIE_BAR2COHERENTACCEN_BIT 9 +#define PCIE_SSRESET_STATUS_BIT 13 +#define PCIE_SSRESET_DISABLE_BIT 14 +#define PCIE_SSRESET_DIS_ENUM_RST_BIT 15 + +#define PCIE_BARCOHERENTACCEN_MASK 0x300 + +/* Bit settings for PCI_UC_ERR_STATUS register */ +#define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */ +#define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */ +#define PCI_UC_ERR_MTLPS (1 << 18) /* Malformed TLP Status */ +#define PCI_UC_ERR_ROS (1 << 17) /* Receiver Overflow Status */ +#define PCI_UC_ERR_UCS (1 << 16) /* Unexpected Completion Status */ +#define PCI_UC_ERR_CAS (1 << 15) /* Completer Abort Status */ +#define PCI_UC_ERR_CTS (1 << 14) /* Completer Timeout Status */ +#define PCI_UC_ERR_FCPES (1 << 13) /* Flow Control Protocol Error Status */ +#define PCI_UC_ERR_PTLPS (1 << 12) /* Poisoned TLP Status */ +#define PCI_UC_ERR_DLPES (1 << 4) /* Data Link Protocol Error Status */ + +#define PCI_DL_STATUS_PHY_LINKUP (1 << 13) /* Status of LINK */ + +#define PCI_PMCR_REFUP 0x1814 /* Trefup time */ +#define PCI_PMCR_TREFUP_LO_MASK 0x3f +#define PCI_PMCR_TREFUP_LO_SHIFT 24 +#define PCI_PMCR_TREFUP_LO_BITS 6 +#define PCI_PMCR_TREFUP_HI_MASK 0xf +#define PCI_PMCR_TREFUP_HI_SHIFT 5 +#define PCI_PMCR_TREFUP_HI_BITS 4 +#define PCI_PMCR_TREFUP_MAX 0x400 +#define PCI_PMCR_TREFUP_MAX_SCALE 0x2000 + +#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ +#define PCI_PMCR_TREFUP_EXT_SHIFT 22 +#define PCI_PMCR_TREFUP_EXT_SCALE 3 +#define PCI_PMCR_TREFUP_EXT_ON 1 +#define PCI_PMCR_TREFUP_EXT_OFF 0 + +#define PCI_TPOWER_SCALE_MASK 0x3 +#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */ + +#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ +#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the + * 8KB window, so their address is the "regular" + * address plus 4K + */ +/* + * PCIE GEN2 changed some of the above locations for + * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase + * BAR0 maps 32K of register space +*/ +#define PCIE2_BAR0_WIN2 0x70 /* config register to map 2nd 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN 0x74 /* config register to map 5th 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN2 0x78 /* config register to map 6th 4KB of BAR0 */ + +/* PCIE GEN2 BAR0 window size */ +#define PCIE2_BAR0_WINSZ 0x8000 + +#define PCI_BAR0_WIN2_OFFSET 0x1000u +#define PCI_CORE_ENUM_OFFSET 0x2000u +#define PCI_CC_CORE_ENUM_OFFSET 0x3000u +#define PCI_SEC_BAR0_WIN_OFFSET 0x4000u +#define PCI_SEC_BAR0_WRAP_OFFSET 0x5000u +#define PCI_CORE_ENUM2_OFFSET 0x6000u +#define PCI_CC_CORE_ENUM2_OFFSET 0x7000u +#define PCI_TER_BAR0_WIN_OFFSET 0x9000u +#define PCI_TER_BAR0_WRAP_OFFSET 0xa000u + +#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ +/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ +#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ +#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */ + +/* On AI chips we have a second window to map DMP regs are mapped: */ +#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */ + +/* PCI_INT_STATUS */ +#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */ + +/* PCI_INT_MASK */ +#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */ +#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */ +#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */ +#define PCI_CTO_INT_SHIFT 16 /* backplane SBErr interrupt mask */ +#define PCI_CTO_INT_MASK (1 << PCI_CTO_INT_SHIFT) /* backplane SBErr interrupt mask */ + +/* PCI_SPROM_CONTROL */ +#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */ +#define SPROM_LOCKED 0x08 /* SPROM Locked */ +#define SPROM_BLANK 0x04 /* indicating a blank SPROM */ +#define SPROM_WRITEEN 0x10 /* SPROM write enable */ +#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */ +#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */ +#define SPROM_OTPIN_USE 0x80 /* device OTP In use */ +#define SPROM_BAR1_COHERENT_ACC_EN 0x100 /* PCIe acceeses through BAR1 are coherent */ +#define SPROM_BAR2_COHERENT_ACC_EN 0x200 /* PCIe acceeses through BAR2 are coherent */ +#define SPROM_CFG_TO_SB_RST 0x400 /* backplane reset */ + +/* Bits in PCI command and status regs */ +#define PCI_CMD_IO 0x00000001 /* I/O enable */ +#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */ +#define PCI_CMD_MASTER 0x00000004 /* Master enable */ +#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */ +#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */ +#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */ +#define PCI_STAT_TA 0x08000000 /* target abort status */ + +/* Header types */ +#define PCI_HEADER_MULTI 0x80 +#define PCI_HEADER_MASK 0x7f +typedef enum { + PCI_HEADER_NORMAL, + PCI_HEADER_BRIDGE, + PCI_HEADER_CARDBUS +} pci_header_types; + +#define PCI_CONFIG_SPACE_SIZE 256 + +#define DWORD_ALIGN(x) ((x) & ~(0x03)) +#define BYTE_POS(x) ((x) & 0x3) +#define WORD_POS(x) ((x) & 0x1) + +#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) +#define WORD_SHIFT(x) (16 * WORD_POS(x)) + +#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) +#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) + +#define read_pci_cfg_byte(a) \ + BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) + +#define read_pci_cfg_word(a) \ + WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) + +#define write_pci_cfg_byte(a, val) do { \ + uint32 tmpval; \ + tmpval = OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4); \ + tmpval &= ~(0xFF << BYTE_SHIFT(a)); \ + tmpval |= ((uint8)(val)) << BYTE_SHIFT(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#define write_pci_cfg_word(a, val) do { \ + uint32 tmpval; \ + tmpval = OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4); \ + tmpval &= ~(0xFFFF << WORD_SHIFT(a))); \ + tmpval |= ((uint16)(val)) << WORD_SHIFT(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#endif /* _h_pcicfg_ */ diff --git a/bcmdhd.101.10.361.x/include/pcie_core.h b/bcmdhd.101.10.361.x/include/pcie_core.h new file mode 100755 index 0000000..80bc4c2 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/pcie_core.h @@ -0,0 +1,1485 @@ +/* + * BCM43XX PCIE core hardware definitions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _PCIE_CORE_H +#define _PCIE_CORE_H + +#include +#include + +#define REV_GE_73(rev) (PCIECOREREV((rev)) >= 73) +#define REV_GE_69(rev) (PCIECOREREV((rev)) >= 69) +#define REV_GE_68(rev) (PCIECOREREV((rev)) >= 68) +#define REV_GE_64(rev) (PCIECOREREV((rev)) >= 64) +#define REV_GE_15(rev) (PCIECOREREV((rev)) >= 15) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* PCIE Enumeration space offsets */ +#define PCIE_CORE_CONFIG_OFFSET 0x0 +#define PCIE_FUNC0_CONFIG_OFFSET 0x400 +#define PCIE_FUNC1_CONFIG_OFFSET 0x500 +#define PCIE_FUNC2_CONFIG_OFFSET 0x600 +#define PCIE_FUNC3_CONFIG_OFFSET 0x700 +#define PCIE_SPROM_SHADOW_OFFSET 0x800 +#define PCIE_SBCONFIG_OFFSET 0xE00 + +#define PCIEDEV_MAX_DMAS 4 + +/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */ +#define PCIE_DEV_BAR0_SIZE 0x4000 +#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0 +#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000 +#define PCIE_BAR0_PCIECORE_OFFSET 0x2000 +#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000 + +/* different register spaces to access thr'u pcie indirect access */ +#define PCIE_CONFIGREGS 1 /* Access to config space */ +#define PCIE_PCIEREGS 2 /* Access to pcie registers */ + +#define PCIEDEV_HOSTADDR_MAP_BASE 0x8000000 +#define PCIEDEV_HOSTADDR_MAP_WIN_MASK 0xFE000000 + +#define PCIEDEV_TR0_WINDOW_START 0x08000000 +#define PCIEDEV_TR0_WINDOW_END 0x09FFFFFF + +#define PCIEDEV_TR1_WINDOW_START 0x0A000000 +#define PCIEDEV_TR1_WINDOW_END 0x0BFFFFFF + +#define PCIEDEV_TR2_WINDOW_START 0x0C000000 +#define PCIEDEV_TR2_WINDOW_END 0x0DFFFFFF + +#define PCIEDEV_TR3_WINDOW_START 0x0E000000 +#define PCIEDEV_TR3_WINDOW_END 0x0FFFFFFF + +#define PCIEDEV_TRANS_WIN_LEN 0x2000000 +#define PCIEDEV_ARM_ADDR_SPACE 0x0FFFFFFF + +/* PCIe translation windoes */ +#define PCIEDEV_TRANS_WIN_0 0 +#define PCIEDEV_TRANS_WIN_1 1 +#define PCIEDEV_TRANS_WIN_2 2 +#define PCIEDEV_TRANS_WIN_3 3 + +#define PCIEDEV_ARM_ADDR(host_addr, win) \ + (((host_addr) & 0x1FFFFFF) | ((win) << 25) | PCIEDEV_HOSTADDR_MAP_BASE) + +/* Current mapping of PCIe translation windows to SW features */ + +#define PCIEDEV_TRANS_WIN_TRAP_HANDLER PCIEDEV_TRANS_WIN_0 +#define PCIEDEV_TRANS_WIN_HOSTMEM PCIEDEV_TRANS_WIN_1 +#define PCIEDEV_TRANS_WIN_SWPAGING PCIEDEV_TRANS_WIN_1 +#define PCIEDEV_TRANS_WIN_BT PCIEDEV_TRANS_WIN_2 +#define PCIEDEV_TRANS_WIN_FWTRACE PCIEDEV_TRANS_WIN_3 + +/* dma regs to control the flow between host2dev and dev2host */ +typedef volatile struct pcie_devdmaregs { + dma64regs_t tx; + uint32 PAD[2]; + dma64regs_t rx; + uint32 PAD[2]; +} pcie_devdmaregs_t; + +#define PCIE_DB_HOST2DEV_0 0x1 +#define PCIE_DB_HOST2DEV_1 0x2 +#define PCIE_DB_DEV2HOST_0 0x3 +#define PCIE_DB_DEV2HOST_1 0x4 +#define PCIE_DB_DEV2HOST1_0 0x5 + +/* door bell register sets */ +typedef struct pcie_doorbell { + uint32 host2dev_0; + uint32 host2dev_1; + uint32 dev2host_0; + uint32 dev2host_1; +} pcie_doorbell_t; + +/* Flow Ring Manager */ +#define IFRM_FR_IDX_MAX 256 +#define IFRM_FR_CONFIG_GID 2 +#define IFRM_FR_GID_MAX 4 +#define IFRM_FR_DEV_MAX 8 +#define IFRM_FR_TID_MAX 8 +#define IFRM_FR_DEV_VALID 2 + +#define IFRM_VEC_REG_BITS 32 + +#define IFRM_FR_PER_VECREG 4 +#define IFRM_FR_PER_VECREG_SHIFT 2 +#define IFRM_FR_PER_VECREG_MASK ((0x1 << IFRM_FR_PER_VECREG_SHIFT) - 1) + +#define IFRM_VEC_BITS_PER_FR (IFRM_VEC_REG_BITS/IFRM_FR_PER_VECREG) + +/* IFRM_DEV_0 : d11AC, IFRM_DEV_1 : d11AD */ +#define IFRM_DEV_0 0 +#define IFRM_DEV_1 1 +#define IHRM_FR_SW_MASK (1u << IFRM_DEV_0) +#define IHRM_FR_HW_MASK (1u << IFRM_DEV_1) + +#define IFRM_FR_GID_0 0 +#define IFRM_FR_GID_1 1 +#define IFRM_FR_GID_2 2 +#define IFRM_FR_GID_3 3 + +#define IFRM_TIDMASK 0xffffffff + +/* ifrm_ctrlst register */ +#define IFRM_EN (1<<0) +#define IFRM_BUFF_INIT_DONE (1<<1) +#define IFRM_COMPARE_EN0 (1<<4) +#define IFRM_COMPARE_EN1 (1<<5) +#define IFRM_COMPARE_EN2 (1<<6) +#define IFRM_COMPARE_EN3 (1<<7) +#define IFRM_INIT_DV0 (1<<8) +#define IFRM_INIT_DV1 (1<<9) +#define IFRM_INIT_DV2 (1<<10) +#define IFRM_INIT_DV3 (1<<11) + +/* ifrm_msk_arr.addr, ifrm_tid_arr.addr register */ +#define IFRM_ADDR_SHIFT 0 +#define IFRM_FRG_ID_SHIFT 8 + +/* ifrm_vec.diff_lat register */ +#define IFRM_DV_LAT (1<<0) +#define IFRM_DV_LAT_DONE (1<<1) +#define IFRM_SDV_OFFSET_SHIFT 4 +#define IFRM_SDV_FRGID_SHIFT 8 +#define IFRM_VECSTAT_MASK 0x3 +#define IFRM_VEC_MASK 0xff + +/* HMAP Windows */ +#define HMAP_MAX_WINDOWS 8 + +/* idma frm array */ +typedef struct pcie_ifrm_array { + uint32 addr; + uint32 data; +} pcie_ifrm_array_t; + +/* idma frm vector */ +typedef struct pcie_ifrm_vector { + uint32 diff_lat; + uint32 sav_tid; + uint32 sav_diff; + uint32 PAD[1]; +} pcie_ifrm_vector_t; + +/* idma frm interrupt */ +typedef struct pcie_ifrm_intr { + uint32 intstat; + uint32 intmask; +} pcie_ifrm_intr_t; + +/* HMAP window register set */ +typedef volatile struct pcie_hmapwindow { + uint32 baseaddr_lo; /* BaseAddrLower */ + uint32 baseaddr_hi; /* BaseAddrUpper */ + uint32 windowlength; /* Window Length */ + uint32 PAD[1]; +} pcie_hmapwindow_t; + +typedef struct pcie_hmapviolation { + uint32 hmap_violationaddr_lo; /* violating address lo */ + uint32 hmap_violationaddr_hi; /* violating addr hi */ + uint32 hmap_violation_info; /* violation info */ + uint32 PAD[1]; +} pcie_hmapviolation_t; + +#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) || \ + defined(ATE_BUILD) || defined(BCMDVFS) +/* SB side: PCIE core and host control registers */ +typedef volatile struct sbpcieregs { + uint32 control; /* host mode only */ + uint32 iocstatus; /* PCIE2: iostatus */ + uint32 PAD[1]; + uint32 biststatus; /* bist Status: 0x00C */ + uint32 gpiosel; /* PCIE gpio sel: 0x010 */ + uint32 gpioouten; /* PCIE gpio outen: 0x14 */ + uint32 gpioout; /* PCIE gpio out: 0x18 */ + uint32 PAD; + uint32 intstatus; /* Interrupt status: 0x20 */ + uint32 intmask; /* Interrupt mask: 0x24 */ + uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */ + uint32 obffcontrol; /* PCIE2: 0x2C */ + uint32 obffintstatus; /* PCIE2: 0x30 */ + uint32 obffdatastatus; /* PCIE2: 0x34 */ + uint32 PAD[1]; + uint32 ctoctrl; /* PCIE2: 0x3C */ + uint32 errlog; /* PCIE2: 0x40 */ + uint32 errlogaddr; /* PCIE2: 0x44 */ + uint32 mailboxint; /* PCIE2: 0x48 */ + uint32 mailboxintmsk; /* PCIE2: 0x4c */ + uint32 ltrspacing; /* PCIE2: 0x50 */ + uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */ + uint32 msivectorassign; /* PCIE2: 0x58 */ + uint32 intmask2; /* PCIE2: 0x5C */ + uint32 PAD[40]; + uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */ + uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */ + uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */ + uint32 sbtopcie0upper; /* sb to pcie translation 0: 0x10C */ + uint32 sbtopcie1upper; /* sb to pcie translation 1: 0x110 */ + uint32 PAD[3]; + + /* pcie core supports in direct access to config space */ + uint32 configaddr; /* pcie config space access: Address field: 0x120 */ + uint32 configdata; /* pcie config space access: Data field: 0x124 */ + union { + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiodata; /* Data to the mdio access: 0x12c */ + /* pcie protocol phy/dllp/tlp register indirect access mechanism */ + uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */ + uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */ + uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */ + uint32 PAD[177]; /* last 0x3FC */ + /* 0x400 - 0x7FF, PCIE Cfg Space, note: not used anymore in PcieGen2 */ + uint32 pciecfg[4][64]; + } pcie1; + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiowrdata; /* write data to mdio 0x12C */ + uint32 mdiorddata; /* read data to mdio 0x130 */ + uint32 PAD[3]; /* 0x134-0x138-0x13c */ + /* door bell registers available from gen2 rev5 onwards */ + pcie_doorbell_t dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */ + uint32 dataintf; /* 0x180 */ + uint32 PAD[1]; /* 0x184 */ + uint32 d2h_intrlazy_0; /* 0x188 */ + uint32 h2d_intrlazy_0; /* 0x18c */ + uint32 h2d_intstat_0; /* 0x190 */ + uint32 h2d_intmask_0; /* 0x194 */ + uint32 d2h_intstat_0; /* 0x198 */ + uint32 d2h_intmask_0; /* 0x19c */ + uint32 ltr_state; /* 0x1A0 */ + uint32 pwr_int_status; /* 0x1A4 */ + uint32 pwr_int_mask; /* 0x1A8 */ + uint32 pme_source; /* 0x1AC */ + uint32 err_hdr_logreg1; /* 0x1B0 */ + uint32 err_hdr_logreg2; /* 0x1B4 */ + uint32 err_hdr_logreg3; /* 0x1B8 */ + uint32 err_hdr_logreg4; /* 0x1BC */ + uint32 err_code_logreg; /* 0x1C0 */ + uint32 axi_dbg_ctl; /* 0x1C4 */ + uint32 axi_dbg_data0; /* 0x1C8 */ + uint32 axi_dbg_data1; /* 0x1CC */ + uint32 PAD[4]; /* 0x1D0 - 0x1DF */ + uint32 clk_ctl_st; /* 0x1E0 */ + uint32 PAD[1]; /* 0x1E4 */ + uint32 powerctl; /* 0x1E8 */ + uint32 powerctl2; /* 0x1EC */ + uint32 PAD[4]; /* 0x1F0 - 0x1FF */ + pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */ + pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */ + pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */ + pcie_devdmaregs_t d2h1_dmaregs; /* 0x2c0 - 0x2fc */ + pcie_devdmaregs_t h2d2_dmaregs; /* 0x300 - 0x33c */ + pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */ + pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */ + pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */ + uint32 d2h_intrlazy_1; /* 0x400 */ + uint32 h2d_intrlazy_1; /* 0x404 */ + uint32 h2d_intstat_1; /* 0x408 */ + uint32 h2d_intmask_1; /* 0x40c */ + uint32 d2h_intstat_1; /* 0x410 */ + uint32 d2h_intmask_1; /* 0x414 */ + uint32 PAD[2]; /* 0x418 - 0x41C */ + uint32 d2h_intrlazy_2; /* 0x420 */ + uint32 h2d_intrlazy_2; /* 0x424 */ + uint32 h2d_intstat_2; /* 0x428 */ + uint32 h2d_intmask_2; /* 0x42c */ + uint32 d2h_intstat_2; /* 0x430 */ + uint32 d2h_intmask_2; /* 0x434 */ + uint32 PAD[10]; /* 0x438 - 0x45F */ + uint32 ifrm_ctrlst; /* 0x460 */ + uint32 PAD[1]; /* 0x464 */ + pcie_ifrm_array_t ifrm_msk_arr; /* 0x468 - 0x46F */ + pcie_ifrm_array_t ifrm_tid_arr[IFRM_FR_DEV_VALID]; + /* 0x470 - 0x47F */ + pcie_ifrm_vector_t ifrm_vec[IFRM_FR_DEV_MAX]; + /* 0x480 - 0x4FF */ + pcie_ifrm_intr_t ifrm_intr[IFRM_FR_DEV_MAX]; + /* 0x500 - 0x53F */ + /* HMAP regs for PCIE corerev >= 24 [0x540 - 0x5DF] */ + pcie_hmapwindow_t hmapwindow[HMAP_MAX_WINDOWS]; /* 0x540 - 0x5BF */ + pcie_hmapviolation_t hmapviolation; /* 0x5C0 - 0x5CF */ + uint32 hmap_window_config; /* 0x5D0 */ + uint32 PAD[3]; /* 0x5D4 - 0x5DF */ + uint32 idma_hwa_status; /* 0x5E0 */ + uint32 PAD[7]; /* 0x5E4 - 0x5FF */ + uint32 PAD[2][64]; /* 0x600 - 0x7FF */ + } pcie2; + } u; + uint16 sprom[64]; /* SPROM shadow Area : 0x800 - 0x880 */ + uint32 PAD[96]; /* 0x880 - 0x9FF */ + /* direct memory access (pcie2 rev19 and after) : 0xA00 - 0xAFF */ + union { + /* corerev < 64 */ + struct { + uint32 dar_ctrl; /* 0xA00 */ + uint32 PAD[7]; /* 0xA04-0xA1F */ + uint32 intstatus; /* 0xA20 */ + uint32 PAD[1]; /* 0xA24 */ + uint32 h2d_db_0_0; /* 0xA28 */ + uint32 h2d_db_0_1; /* 0xA2C */ + uint32 h2d_db_1_0; /* 0xA30 */ + uint32 h2d_db_1_1; /* 0xA34 */ + uint32 h2d_db_2_0; /* 0xA38 */ + uint32 h2d_db_2_1; /* 0xA3C */ + uint32 errlog; /* 0xA40 */ + uint32 erraddr; /* 0xA44 */ + uint32 mbox_int; /* 0xA48 */ + uint32 fis_ctrl; /* 0xA4C */ + uint32 PAD[36]; /* 0xA50 - 0xADC */ + uint32 clk_ctl_st; /* 0xAE0 */ + uint32 PAD[1]; /* 0xAE4 */ + uint32 powerctl; /* 0xAE8 */ + uint32 PAD[5]; /* 0xAEC-0xAFF */ + } dar; + /* corerev > = 64 */ + struct { + uint32 dar_ctrl; /* 0xA00 */ + uint32 dar_cap; /* 0xA04 */ + uint32 clk_ctl_st; /* 0xA08 */ + uint32 powerctl; /* 0xA0C */ + uint32 intstatus; /* 0xA10 */ + uint32 PAD[3]; /* 0xA14-0xA1F */ + uint32 h2d_db_0_0; /* 0xA20 */ + uint32 h2d_db_0_1; /* 0xA24 */ + uint32 h2d_db_1_0; /* 0xA28 */ + uint32 h2d_db_1_1; /* 0xA2C */ + uint32 h2d_db_2_0; /* 0xA30 */ + uint32 h2d_db_2_1; /* 0xA34 */ + uint32 h2d_db_3_0; /* 0xA38 */ + uint32 h2d_db_3_1; /* 0xA3C */ + uint32 h2d_db_4_0; /* 0xA40 */ + uint32 h2d_db_4_1; /* 0xA44 */ + uint32 h2d_db_5_0; /* 0xA48 */ + uint32 h2d_db_5_1; /* 0xA4C */ + uint32 h2d_db_6_0; /* 0xA50 */ + uint32 h2d_db_6_1; /* 0xA54 */ + uint32 h2d_db_7_0; /* 0xA58 */ + uint32 h2d_db_7_1; /* 0xA5C */ + uint32 errlog; /* 0xA60 */ + uint32 erraddr; /* 0xA64 */ + uint32 mbox_int; /* 0xA68 */ + uint32 fis_ctrl; /* 0xA6C */ + uint32 PAD[36]; /* 0xA70-0xAFF */ + } dar_64; + } u1; + uint32 PAD[64]; /* 0xB00-0xBFF */ + /* Function Control/Status Registers for corerev >= 64 */ + /* 0xC00 - 0xCFF */ + struct { + uint32 control; /* 0xC00 */ + uint32 iostatus; /* 0xC04 */ + uint32 capability; /* 0xC08 */ + uint32 PAD[1]; /* 0xC0C */ + uint32 intstatus; /* 0xC10 */ + uint32 intmask; /* 0xC14 */ + uint32 pwr_intstatus; /* 0xC18 */ + uint32 pwr_intmask; /* 0xC1C */ + uint32 msi_vector; /* 0xC20 */ + uint32 msi_intmask; /* 0xC24 */ + uint32 msi_intstatus; /* 0xC28 */ + uint32 msi_pend_cnt; /* 0xC2C */ + uint32 mbox_intstatus; /* 0xC30 */ + uint32 mbox_intmask; /* 0xC34 */ + uint32 ltr_state; /* 0xC38 */ + uint32 PAD[1]; /* 0xC3C */ + uint32 intr_vector; /* 0xC40 */ + uint32 intr_addrlow; /* 0xC44 */ + uint32 intr_addrhigh; /* 0xC48 */ + uint32 PAD[45]; /* 0xC4C-0xCFF */ + } ftn_ctrl; +} sbpcieregs_t; +#endif /* !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) || */ + /* defined(ATE_BUILD) defined(BCMDVFS) */ + +#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */ + +/* 10th and 11th 4KB BAR0 windows */ +#define PCIE_TER_BAR0_WIN 0xc50 +#define PCIE_TER_BAR0_WRAPPER 0xc54 + +/* PCI control */ +#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */ +#define PCIE_RST 0x02 /* Value driven out to pin */ +#define PCIE_SPERST 0x04 /* SurvivePeRst */ +#define PCIE_FORCECFGCLKON_ALP 0x08 +#define PCIE_DISABLE_L1CLK_GATING 0x10 +#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */ +#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */ +#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */ +#define PCIE_MULTIMSI_EN 0x2000 /* enable multi-vector MSI messages */ +#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */ +#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */ +#define PCIE_EN_MDIO_IN_PERST 0x20000 /* enable access to internal registers when PERST */ +#define PCIE_HWDisableL1EntryEnable 0x40000 /* set, Hw requests can do entry/exit from L1 ASPM */ +#define PCIE_MSI_B2B_EN 0x100000 /* enable back-to-back MSI messages */ +#define PCIE_MSI_FIFO_CLEAR 0x200000 /* reset MSI FIFO */ +#define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */ +#define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */ +#define PCIE_REQ_PEND_DIS_L1 0x1000000 /* prevents entering L1 on pending requests from host */ +#define PCIE_DIS_L23CLK_GATE 0x10000000 /* disable clk gating in L23(pcie_tl_clk) */ + +/* Function control (corerev > 64) */ +#define PCIE_CPLCA_ENABLE 0x01 +/* 1: send CPL with CA on BP error, 0: send CPLD with SC and data is FFFF */ +#define PCIE_DLY_PERST_TO_COE 0x02 +/* when set, PERST is holding asserted until sprom-related register updates has completed */ + +#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ +#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ +#define PCIE_SWPME_FN0 0x10000 +#define PCIE_SWPME_FN0_SHF 16 + +/* Interrupt status/mask */ +#define PCIE_INTA 0x01 /* PCIE INTA message is received */ +#define PCIE_INTB 0x02 /* PCIE INTB message is received */ +#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */ +#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */ +#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */ +#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */ +#define PCIE_PERST 0x40 /* PCIE Reset Interrupt */ + +#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */ +#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */ +#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */ +#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */ +#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */ +#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */ +#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */ +#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */ + +/* PCIE MSI Vector Assignment register */ +#define MSIVEC_MB_0 (0x1 << 1) /* MSI Vector offset for mailbox0 is 2 */ +#define MSIVEC_MB_1 (0x1 << 2) /* MSI Vector offset for mailbox1 is 3 */ +#define MSIVEC_D2H0_DB0 (0x1 << 3) /* MSI Vector offset for interface0 door bell 0 is 4 */ +#define MSIVEC_D2H0_DB1 (0x1 << 4) /* MSI Vector offset for interface0 door bell 1 is 5 */ + +/* PCIE MailboxInt/MailboxIntMask register */ +#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */ +#define PCIE_MB_TOSB_FN0_1 0x0002 +#define PCIE_MB_TOSB_FN1_0 0x0004 +#define PCIE_MB_TOSB_FN1_1 0x0008 +#define PCIE_MB_TOSB_FN2_0 0x0010 +#define PCIE_MB_TOSB_FN2_1 0x0020 +#define PCIE_MB_TOSB_FN3_0 0x0040 +#define PCIE_MB_TOSB_FN3_1 0x0080 +#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */ +#define PCIE_MB_TOPCIE_FN0_1 0x0200 +#define PCIE_MB_TOPCIE_FN1_0 0x0400 +#define PCIE_MB_TOPCIE_FN1_1 0x0800 +#define PCIE_MB_TOPCIE_FN2_0 0x1000 +#define PCIE_MB_TOPCIE_FN2_1 0x2000 +#define PCIE_MB_TOPCIE_FN3_0 0x4000 +#define PCIE_MB_TOPCIE_FN3_1 0x8000 + +#define PCIE_MB_TOPCIE_DB0_D2H0(rev) (REV_GE_64(rev) ? 0x0001 : 0x010000) +#define PCIE_MB_TOPCIE_DB0_D2H1(rev) (REV_GE_64(rev) ? 0x0002 : 0x020000) +#define PCIE_MB_TOPCIE_DB1_D2H0(rev) (REV_GE_64(rev) ? 0x0004 : 0x040000) +#define PCIE_MB_TOPCIE_DB1_D2H1(rev) (REV_GE_64(rev) ? 0x0008 : 0x080000) +#define PCIE_MB_TOPCIE_DB2_D2H0(rev) (REV_GE_64(rev) ? 0x0010 : 0x100000) +#define PCIE_MB_TOPCIE_DB2_D2H1(rev) (REV_GE_64(rev) ? 0x0020 : 0x200000) +#define PCIE_MB_TOPCIE_DB3_D2H0(rev) (REV_GE_64(rev) ? 0x0040 : 0x400000) +#define PCIE_MB_TOPCIE_DB3_D2H1(rev) (REV_GE_64(rev) ? 0x0080 : 0x800000) +#define PCIE_MB_TOPCIE_DB4_D2H0(rev) (REV_GE_64(rev) ? 0x0100 : 0x0) +#define PCIE_MB_TOPCIE_DB4_D2H1(rev) (REV_GE_64(rev) ? 0x0200 : 0x0) +#define PCIE_MB_TOPCIE_DB5_D2H0(rev) (REV_GE_64(rev) ? 0x0400 : 0x0) +#define PCIE_MB_TOPCIE_DB5_D2H1(rev) (REV_GE_64(rev) ? 0x0800 : 0x0) +#define PCIE_MB_TOPCIE_DB6_D2H0(rev) (REV_GE_64(rev) ? 0x1000 : 0x0) +#define PCIE_MB_TOPCIE_DB6_D2H1(rev) (REV_GE_64(rev) ? 0x2000 : 0x0) +#define PCIE_MB_TOPCIE_DB7_D2H0(rev) (REV_GE_64(rev) ? 0x4000 : 0x0) +#define PCIE_MB_TOPCIE_DB7_D2H1(rev) (REV_GE_64(rev) ? 0x8000 : 0x0) + +#define PCIE_MB_D2H_MB_MASK(rev) \ + (PCIE_MB_TOPCIE_DB0_D2H0(rev) | PCIE_MB_TOPCIE_DB0_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB1_D2H0(rev) | PCIE_MB_TOPCIE_DB1_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB2_D2H0(rev) | PCIE_MB_TOPCIE_DB2_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB3_D2H0(rev) | PCIE_MB_TOPCIE_DB3_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB4_D2H0(rev) | PCIE_MB_TOPCIE_DB4_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB5_D2H0(rev) | PCIE_MB_TOPCIE_DB5_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB6_D2H0(rev) | PCIE_MB_TOPCIE_DB6_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB7_D2H0(rev) | PCIE_MB_TOPCIE_DB7_D2H1(rev)) + +#define SBTOPCIE0_BASE 0x08000000 +#define SBTOPCIE1_BASE 0x0c000000 + +/* Protection Control register */ +#define PROTECT_CFG (1 << 0) +#define PROTECT_DMABADDR (1 << 1) + +#define PROTECT_FN_CFG_WRITE (1 << 0) +#define PROTECT_FN_CFG_READ (1 << 1) +#define PROTECT_FN_ENUM_WRITE (1 << 2) +#define PROTECT_FN_ENUM_READ (1 << 3) +#define PROTECT_FN_DMABADDR (1 << 4) + +/* On chips with CCI-400, the small pcie 128 MB region base has shifted */ +#define CCI400_SBTOPCIE0_BASE 0x20000000 +#define CCI400_SBTOPCIE1_BASE 0x24000000 + +/* SB to PCIE translation masks */ +#define SBTOPCIE0_MASK 0xfc000000 +#define SBTOPCIE1_MASK 0xfc000000 +#define SBTOPCIE2_MASK 0xc0000000 + +/* Access type bits (0:1) */ +#define SBTOPCIE_MEM 0 +#define SBTOPCIE_IO 1 +#define SBTOPCIE_CFG0 2 +#define SBTOPCIE_CFG1 3 + +/* Prefetch enable bit 2 */ +#define SBTOPCIE_PF 4 + +/* Write Burst enable for memory write bit 3 */ +#define SBTOPCIE_WR_BURST 8 + +/* config access */ +#define CONFIGADDR_FUNC_MASK 0x7000 +#define CONFIGADDR_FUNC_SHF 12 +#define CONFIGADDR_REG_MASK 0x0FFF +#define CONFIGADDR_REG_SHF 0 + +#define PCIE_CONFIG_INDADDR(f, r) ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \ + (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF)) + +/* PCIE protocol regs Indirect Address */ +#define PCIEADDR_PROT_MASK 0x300 +#define PCIEADDR_PROT_SHF 8 +#define PCIEADDR_PL_TLP 0 +#define PCIEADDR_PL_DLLP 1 +#define PCIEADDR_PL_PLP 2 + +#define PCIE_CORE_REG_CONTROL 0x00u /* Control */ +#define PCIE_CORE_REG_IOSTATUS 0x04u /* IO status */ +#define PCIE_CORE_REG_BITSTATUS 0x0Cu /* bitstatus */ +#define PCIE_CORE_REG_GPIO_SEL 0x10u /* gpio sel */ +#define PCIE_CORE_REG_GPIO_OUT_EN 0x14u /* gpio out en */ +#define PCIE_CORE_REG_INT_STATUS 0x20u /* int status */ +#define PCIE_CORE_REG_INT_MASK 0x24u /* int mask */ +#define PCIE_CORE_REG_SB_PCIE_MB 0x28u /* sbpcie mb */ +#define PCIE_CORE_REG_ERRLOG 0x40u /* errlog */ +#define PCIE_CORE_REG_ERR_ADDR 0x44u /* errlog addr */ +#define PCIE_CORE_REG_MB_INTR 0x48u /* MB intr */ +#define PCIE_CORE_REG_SB_PCIE_0 0x100u /* sbpcie0 map */ +#define PCIE_CORE_REG_SB_PCIE_1 0x104u /* sbpcie1 map */ +#define PCIE_CORE_REG_SB_PCIE_2 0x108u /* sbpcie2 map */ + +/* PCIE Config registers */ +#define PCIE_CFG_DEV_STS_CTRL_2 0x0d4u /* "dev_sts_control_2 */ +#define PCIE_CFG_ADV_ERR_CAP 0x100u /* adv_err_cap */ +#define PCIE_CFG_UC_ERR_STS 0x104u /* uc_err_status */ +#define PCIE_CFG_UC_ERR_MASK 0x108u /* ucorr_err_mask */ +#define PCIE_CFG_UNCOR_ERR_SERV 0x10cu /* ucorr_err_sevr */ +#define PCIE_CFG_CORR_ERR_STS 0x110u /* corr_err_status */ +#define PCIE_CFG_CORR_ERR_MASK 0x114u /* corr_err_mask */ +#define PCIE_CFG_ADV_ERR_CTRL 0x118u /* adv_err_cap_control */ +#define PCIE_CFG_HDR_LOG1 0x11Cu /* header_log1 */ +#define PCIE_CFG_HDR_LOG2 0x120u /* header_log2 */ +#define PCIE_CFG_HDR_LOG3 0x124u /* header_log3 */ +#define PCIE_CFG_HDR_LOG4 0x128u /* header_log4 */ +#define PCIE_CFG_PML1_SUB_CAP_ID 0x240u /* PML1sub_capID */ +#define PCIE_CFG_PML1_SUB_CAP_REG 0x244u /* PML1_sub_Cap_reg */ +#define PCIE_CFG_PML1_SUB_CTRL1 0x248u /* PML1_sub_control1 */ +#define PCIE_CFG_PML1_SUB_CTRL3 0x24Cu /* PML1_sub_control2 */ +#define PCIE_CFG_TL_CTRL_5 0x814u /* tl_control_5 */ +#define PCIE_CFG_PHY_ERR_ATT_VEC 0x1820u /* phy_err_attn_vec */ +#define PCIE_CFG_PHY_ERR_ATT_MASK 0x1824u /* phy_err_attn_mask */ + +/* PCIE protocol PHY diagnostic registers */ +#define PCIE_PLP_MODEREG 0x200u /* Mode */ +#define PCIE_PLP_STATUSREG 0x204u /* Status */ +#define PCIE_PLP_LTSSMCTRLREG 0x208u /* LTSSM control */ +#define PCIE_PLP_LTLINKNUMREG 0x20cu /* Link Training Link number */ +#define PCIE_PLP_LTLANENUMREG 0x210u /* Link Training Lane number */ +#define PCIE_PLP_LTNFTSREG 0x214u /* Link Training N_FTS */ +#define PCIE_PLP_ATTNREG 0x218u /* Attention */ +#define PCIE_PLP_ATTNMASKREG 0x21Cu /* Attention Mask */ +#define PCIE_PLP_RXERRCTR 0x220u /* Rx Error */ +#define PCIE_PLP_RXFRMERRCTR 0x224u /* Rx Framing Error */ +#define PCIE_PLP_RXERRTHRESHREG 0x228u /* Rx Error threshold */ +#define PCIE_PLP_TESTCTRLREG 0x22Cu /* Test Control reg */ +#define PCIE_PLP_SERDESCTRLOVRDREG 0x230u /* SERDES Control Override */ +#define PCIE_PLP_TIMINGOVRDREG 0x234u /* Timing param override */ +#define PCIE_PLP_RXTXSMDIAGREG 0x238u /* RXTX State Machine Diag */ +#define PCIE_PLP_LTSSMDIAGREG 0x23Cu /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define PCIE_DLLP_LCREG 0x100u /* Link Control */ +#define PCIE_DLLP_LSREG 0x104u /* Link Status */ +#define PCIE_DLLP_LAREG 0x108u /* Link Attention */ +#define PCIE_DLLP_LAMASKREG 0x10Cu /* Link Attention Mask */ +#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110u /* Next Tx Seq Num */ +#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114u /* Acked Tx Seq Num */ +#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118u /* Purged Tx Seq Num */ +#define PCIE_DLLP_RXSEQNUMREG 0x11Cu /* Rx Sequence Number */ +#define PCIE_DLLP_LRREG 0x120u /* Link Replay */ +#define PCIE_DLLP_LACKTOREG 0x124u /* Link Ack Timeout */ +#define PCIE_DLLP_PMTHRESHREG 0x128u /* Power Management Threshold */ +#define PCIE_DLLP_RTRYWPREG 0x12Cu /* Retry buffer write ptr */ +#define PCIE_DLLP_RTRYRPREG 0x130u /* Retry buffer Read ptr */ +#define PCIE_DLLP_RTRYPPREG 0x134u /* Retry buffer Purged ptr */ +#define PCIE_DLLP_RTRRWREG 0x138u /* Retry buffer Read/Write */ +#define PCIE_DLLP_ECTHRESHREG 0x13Cu /* Error Count Threshold */ +#define PCIE_DLLP_TLPERRCTRREG 0x140u /* TLP Error Counter */ +#define PCIE_DLLP_ERRCTRREG 0x144u /* Error Counter */ +#define PCIE_DLLP_NAKRXCTRREG 0x148u /* NAK Received Counter */ +#define PCIE_DLLP_TESTREG 0x14Cu /* Test */ +#define PCIE_DLLP_PKTBIST 0x150u /* Packet BIST */ +#define PCIE_DLLP_PCIE11 0x154u /* DLLP PCIE 1.1 reg */ + +#define PCIE_DLLP_LSREG_LINKUP (1u << 16u) + +/* PCIE protocol TLP diagnostic registers */ +#define PCIE_TLP_CONFIGREG 0x000u /* Configuration */ +#define PCIE_TLP_WORKAROUNDSREG 0x004u /* TLP Workarounds */ +#define PCIE_TLP_WRDMAUPPER 0x010u /* Write DMA Upper Address */ +#define PCIE_TLP_WRDMALOWER 0x014u /* Write DMA Lower Address */ +#define PCIE_TLP_WRDMAREQ_LBEREG 0x018u /* Write DMA Len/ByteEn Req */ +#define PCIE_TLP_RDDMAUPPER 0x01Cu /* Read DMA Upper Address */ +#define PCIE_TLP_RDDMALOWER 0x020u /* Read DMA Lower Address */ +#define PCIE_TLP_RDDMALENREG 0x024u /* Read DMA Len Req */ +#define PCIE_TLP_MSIDMAUPPER 0x028u /* MSI DMA Upper Address */ +#define PCIE_TLP_MSIDMALOWER 0x02Cu /* MSI DMA Lower Address */ +#define PCIE_TLP_MSIDMALENREG 0x030u /* MSI DMA Len Req */ +#define PCIE_TLP_SLVREQLENREG 0x034u /* Slave Request Len */ +#define PCIE_TLP_FCINPUTSREQ 0x038u /* Flow Control Inputs */ +#define PCIE_TLP_TXSMGRSREQ 0x03Cu /* Tx StateMachine and Gated Req */ +#define PCIE_TLP_ADRACKCNTARBLEN 0x040u /* Address Ack XferCnt and ARB Len */ +#define PCIE_TLP_DMACPLHDR0 0x044u /* DMA Completion Hdr 0 */ +#define PCIE_TLP_DMACPLHDR1 0x048u /* DMA Completion Hdr 1 */ +#define PCIE_TLP_DMACPLHDR2 0x04Cu /* DMA Completion Hdr 2 */ +#define PCIE_TLP_DMACPLMISC0 0x050u /* DMA Completion Misc0 */ +#define PCIE_TLP_DMACPLMISC1 0x054u /* DMA Completion Misc1 */ +#define PCIE_TLP_DMACPLMISC2 0x058u /* DMA Completion Misc2 */ +#define PCIE_TLP_SPTCTRLLEN 0x05Cu /* Split Controller Req len */ +#define PCIE_TLP_SPTCTRLMSIC0 0x060u /* Split Controller Misc 0 */ +#define PCIE_TLP_SPTCTRLMSIC1 0x064u /* Split Controller Misc 1 */ +#define PCIE_TLP_BUSDEVFUNC 0x068u /* Bus/Device/Func */ +#define PCIE_TLP_RESETCTR 0x06Cu /* Reset Counter */ +#define PCIE_TLP_RTRYBUF 0x070u /* Retry Buffer value */ +#define PCIE_TLP_TGTDEBUG1 0x074u /* Target Debug Reg1 */ +#define PCIE_TLP_TGTDEBUG2 0x078u /* Target Debug Reg2 */ +#define PCIE_TLP_TGTDEBUG3 0x07Cu /* Target Debug Reg3 */ +#define PCIE_TLP_TGTDEBUG4 0x080u /* Target Debug Reg4 */ + +/* PCIE2 MDIO register offsets */ +#define PCIE2_MDIO_CONTROL 0x128 +#define PCIE2_MDIO_WR_DATA 0x12C +#define PCIE2_MDIO_RD_DATA 0x130 + +/* MDIO control */ +#define MDIOCTL_DIVISOR_MASK 0x7fu /* clock to be used on MDIO */ +#define MDIOCTL_DIVISOR_VAL 0x2u +#define MDIOCTL_PREAM_EN 0x80u /* Enable preamble sequnce */ +#define MDIOCTL_ACCESS_DONE 0x100u /* Tranaction complete */ + +/* MDIO Data */ +#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define MDIODATA_TA 0x00020000 /* Turnaround */ +#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define MDIODATA_READ 0x20000000 /* Read Transaction */ +#define MDIODATA_START 0x40000000 /* start of Transaction */ + +#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ + +/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */ +#define MDIOCTL2_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define MDIOCTL2_DIVISOR_VAL 0x2 +#define MDIOCTL2_REGADDR_SHF 8 /* Regaddr shift */ +#define MDIOCTL2_REGADDR_MASK 0x00FFFF00 /* Regaddr Mask */ +#define MDIOCTL2_DEVADDR_SHF 24 /* Physmedia devaddr shift */ +#define MDIOCTL2_DEVADDR_MASK 0x0f000000 /* Physmedia devaddr Mask */ +#define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */ +#define MDIOCTL2_READ 0x20000000 /* IP slave bypass */ + +#define MDIODATA2_DONE 0x80000000u /* rd/wr transaction done */ +#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */ +#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */ + +/* MDIO devices (SERDES modules) + * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks. + * two layers mapping (blockidx, register offset) is required + */ +#define MDIO_DEV_IEEE0 0x000 +#define MDIO_DEV_IEEE1 0x001 +#define MDIO_DEV_BLK0 0x800 +#define MDIO_DEV_BLK1 0x801 +#define MDIO_DEV_BLK2 0x802 +#define MDIO_DEV_BLK3 0x803 +#define MDIO_DEV_BLK4 0x804 +#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */ +#define MDIO_DEV_TXCTRL0 0x820 +#define MDIO_DEV_SERDESID 0x831 +#define MDIO_DEV_RXCTRL0 0x840 + +/* XgxsBlk1_A Register Offsets */ +#define BLK1_PWR_MGMT0 0x16 +#define BLK1_PWR_MGMT1 0x17 +#define BLK1_PWR_MGMT2 0x18 +#define BLK1_PWR_MGMT3 0x19 +#define BLK1_PWR_MGMT4 0x1A + +/* serdes regs (rev < 10) */ +#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ + /* SERDES RX registers */ +#define SERDES_RX_CTRL 1 /* Rx cntrl */ +#define SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define SERDES_RX_CDR 6 /* CDR */ +#define SERDES_RX_CDRBW 7 /* CDR BW */ + + /* SERDES RX control register */ +#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ + + /* SERDES PLL registers */ +#define SERDES_PLL_CTRL 1 /* PLL control reg */ +#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* Power management threshold */ +#define PCIE_L0THRESHOLDTIME_MASK 0xFF00u /* bits 0 - 7 */ +#define PCIE_L1THRESHOLDTIME_MASK 0xFF00u /* bits 8 - 15 */ +#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */ +#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */ +#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ + +/* SPROM offsets */ +#define SRSH_ASPM_OFFSET 4 /* word 4 */ +#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */ +#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */ +#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */ +#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */ +#define SRSH_L23READY_EXIT_NOPERST 0x8000u /* bit 15 */ +#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ +#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */ +#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */ +#define SRSH_BD_OFFSET 6 /* word 6 */ +#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */ + +/* PCI Capability ID's + * Reference include/linux/pci_regs.h + * #define PCI_CAP_LIST_ID 0 // Capability ID + * #define PCI_CAP_ID_PM 0x01 // Power Management + * #define PCI_CAP_ID_AGP 0x02 // Accelerated Graphics Port + * #define PCI_CAP_ID_VPD 0x03 // Vital Product Data + * #define PCI_CAP_ID_SLOTID 0x04 // Slot Identification + * #define PCI_CAP_ID_MSI 0x05 // Message Signalled Interrupts + * #define PCI_CAP_ID_CHSWP 0x06 // CompactPCI HotSwap + * #define PCI_CAP_ID_PCIX 0x07 // PCI-X + * #define PCI_CAP_ID_HT 0x08 // HyperTransport + * #define PCI_CAP_ID_VNDR 0x09 // Vendor-Specific + * #define PCI_CAP_ID_DBG 0x0A // Debug port + * #define PCI_CAP_ID_CCRC 0x0B // CompactPCI Central Resource Control + * #define PCI_CAP_ID_SHPC 0x0C // PCI Standard Hot-Plug Controller + * #define PCI_CAP_ID_SSVID 0x0D // Bridge subsystem vendor/device ID + * #define PCI_CAP_ID_AGP3 0x0E // AGP Target PCI-PCI bridge + * #define PCI_CAP_ID_SECDEV 0x0F // Secure Device + * #define PCI_CAP_ID_MSIX 0x11 // MSI-X + * #define PCI_CAP_ID_SATA 0x12 // SATA Data/Index Conf. + * #define PCI_CAP_ID_AF 0x13 // PCI Advanced Features + * #define PCI_CAP_ID_EA 0x14 // PCI Enhanced Allocation + * #define PCI_CAP_ID_MAX PCI_CAP_ID_EA + */ + +#define PCIE_CAP_ID_EXP 0x10 // PCI Express + +/* PCIe Capabilities Offsets + * Reference include/linux/pci_regs.h + * #define PCIE_CAP_FLAGS 2 // Capabilities register + * #define PCIE_CAP_DEVCAP 4 // Device capabilities + * #define PCIE_CAP_DEVCTL 8 // Device Control + * #define PCIE_CAP_DEVSTA 10 // Device Status + * #define PCIE_CAP_LNKCAP 12 // Link Capabilities + * #define PCIE_CAP_LNKCTL 16 // Link Control + * #define PCIE_CAP_LNKSTA 18 // Link Status + * #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 // v1 endpoints end here + * #define PCIE_CAP_SLTCAP 20 // Slot Capabilities + * #define PCIE_CAP_SLTCTL 24 // Slot Control + * #define PCIE_CAP_SLTSTA 26 // Slot Status + * #define PCIE_CAP_RTCTL 28 // Root Control + * #define PCIE_CAP_RTCAP 30 // Root Capabilities + * #define PCIE_CAP_RTSTA 32 // Root Status + */ + +/* Linkcapability reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCAP_OFFSET 12 /* linkcap offset in pcie cap */ +#define PCIE_CAP_LINKCAP_LNKSPEED_MASK 0xf /* Supported Link Speeds */ +#define PCIE_CAP_LINKCAP_GEN2 0x2 /* Value for GEN2 */ + +/* Uc_Err reg offset in AER Cap */ +#define PCIE_EXTCAP_ID_ERR 0x01 /* Advanced Error Reporting */ +#define PCIE_EXTCAP_AER_UCERR_OFFSET 4 /* Uc_Err reg offset in AER Cap */ +#define PCIE_EXTCAP_ERR_HEADER_LOG_0 28 +#define PCIE_EXTCAP_ERR_HEADER_LOG_1 32 +#define PCIE_EXTCAP_ERR_HEADER_LOG_2 36 +#define PCIE_EXTCAP_ERR_HEADER_LOG_3 40 + +/* L1SS reg offset in L1SS Ext Cap */ +#define PCIE_EXTCAP_ID_L1SS 0x1e /* PCI Express L1 PM Substates Capability */ +#define PCIE_EXTCAP_L1SS_CAP_OFFSET 4 /* L1SSCap reg offset in L1SS Cap */ +#define PCIE_EXTCAP_L1SS_CONTROL_OFFSET 8 /* L1SSControl reg offset in L1SS Cap */ +#define PCIE_EXTCAP_L1SS_CONTROL2_OFFSET 0xc /* L1SSControl reg offset in L1SS Cap */ + +/* Linkcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */ +#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */ +#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */ +#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */ +#define PCIE_LINKSPEED_MASK 0xF0000u /* bits 0 - 3 of high word */ +#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */ +#define PCIE_LINK_STS_LINKSPEED_5Gbps (0x2 << PCIE_LINKSPEED_SHIFT) /* PCIE_LINKSPEED 5Gbps */ + +/* Devcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */ +#define PCIE_CAP_DEVCTRL_MRRS_MASK 0x7000 /* Max read request size mask */ +#define PCIE_CAP_DEVCTRL_MRRS_SHIFT 12 /* Max read request size shift */ +#define PCIE_CAP_DEVCTRL_MRRS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_1024B 3 /* 1024 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_MASK 0x00e0 /* Max payload size mask */ +#define PCIE_CAP_DEVCTRL_MPS_SHIFT 5 /* Max payload size shift */ +#define PCIE_CAP_DEVCTRL_MPS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */ + +#define PCIE_ASPM_CTRL_MASK 3 /* bit 0 and 1 */ +#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */ + +#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */ +#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */ + +#define PCIE_EXT_L1SS_MASK 0xf /* Bits [3:0] of L1SSControl 0x248 */ +#define PCIE_EXT_L1SS_ENAB 0xf /* Bits [3:0] of L1SSControl 0x248 */ + +/* NumMsg and NumMsgEn in PCIE MSI Cap */ +#define MSICAP_NUM_MSG_SHF 17 +#define MSICAP_NUM_MSG_MASK (0x7 << MSICAP_NUM_MSG_SHF) +#define MSICAP_NUM_MSG_EN_SHF 20 +#define MSICAP_NUM_MSG_EN_MASK (0x7 << MSICAP_NUM_MSG_EN_SHF) + +/* Devcontrol2 reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */ +#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13 /* Enable OBFF mechanism, select signaling method */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */ + +/* LTR registers in PCIE Cap */ +#define PCIE_LTR0_REG_OFFSET 0x844u /* ltr0_reg offset in pcie cap */ +#define PCIE_LTR1_REG_OFFSET 0x848u /* ltr1_reg offset in pcie cap */ +#define PCIE_LTR2_REG_OFFSET 0x84cu /* ltr2_reg offset in pcie cap */ +#define PCIE_LTR0_REG_DEFAULT_60 0x883c883cu /* active latency default to 60usec */ +#define PCIE_LTR0_REG_DEFAULT_150 0x88968896u /* active latency default to 150usec */ +#define PCIE_LTR1_REG_DEFAULT 0x88648864u /* idle latency default to 100usec */ +#define PCIE_LTR2_REG_DEFAULT 0x90039003u /* sleep latency default to 3msec */ +#define PCIE_LTR_LAT_VALUE_MASK 0x3FFu /* LTR Latency mask */ +#define PCIE_LTR_LAT_SCALE_SHIFT 10u /* LTR Scale shift */ +#define PCIE_LTR_LAT_SCALE_MASK 0x1C00u /* LTR Scale mask */ +#define PCIE_LTR_SNOOP_REQ_SHIFT 15u /* LTR SNOOP REQ shift */ +#define PCIE_LTR_SNOOP_REQ_MASK 0x8000u /* LTR SNOOP REQ mask */ + +/* Status reg PCIE_PLP_STATUSREG */ +#define PCIE_PLP_POLARITYINV_STAT 0x10u + +/* PCIE BRCM Vendor CAP REVID reg bits */ +#define BRCMCAP_PCIEREV_CT_MASK 0xF00u +#define BRCMCAP_PCIEREV_CT_SHIFT 8u +#define BRCMCAP_PCIEREV_REVID_MASK 0xFFu +#define BRCMCAP_PCIEREV_REVID_SHIFT 0 + +#define PCIE_REVREG_CT_PCIE1 0 +#define PCIE_REVREG_CT_PCIE2 1 + +/* PCIE GEN2 specific defines */ +/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */ +#define PCIE2R0_BRCMCAP_REVID_OFFSET 4 +#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET 8 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET 12 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET 16 +#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET 20 +#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET 24 +#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET 28 +#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET 32 +#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET 36 +#define PCIE2R0_BRCMCAP_INTMASK_OFFSET 40 +#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET 44 +#define PCIE2R0_BRCMCAP_BPADDR_OFFSET 48 +#define PCIE2R0_BRCMCAP_BPDATA_OFFSET 52 +#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET 56 + +/* + * definition of configuration space registers of PCIe gen2 + */ +#define PCIECFGREG_STATUS_CMD 0x4 +#define PCIECFGREG_PM_CSR 0x4C +#define PCIECFGREG_MSI_CAP 0x58 +#define PCIECFGREG_MSI_ADDR_L 0x5C +#define PCIECFGREG_MSI_ADDR_H 0x60 +#define PCIECFGREG_MSI_DATA 0x64 +#define PCIECFGREG_SPROM_CTRL 0x88 +#define PCIECFGREG_LINK_STATUS_CTRL 0xBCu +#define PCIECFGREG_LINK_STATUS_CTRL2 0xDCu +#define PCIECFGREG_DEV_STATUS_CTRL 0xB4u +#define PCIECFGGEN_DEV_STATUS_CTRL2 0xD4 +#define PCIECFGREG_RBAR_CTRL 0x228 +#define PCIECFGREG_PML1_SUB_CTRL1 0x248 +#define PCIECFGREG_PML1_SUB_CTRL2 0x24C +#define PCIECFGREG_REG_BAR2_CONFIG 0x4E0 +#define PCIECFGREG_REG_BAR3_CONFIG 0x4F4 +#define PCIECFGREG_PDL_CTRL1 0x1004 +#define PCIECFGREG_PDL_CTRL5 (0x1014u) +#define PCIECFGREG_PDL_IDDQ 0x1814 +#define PCIECFGREG_REG_PHY_CTL7 0x181c +#define PCIECFGREG_PHY_DBG_CLKREQ0 0x1E10 +#define PCIECFGREG_PHY_DBG_CLKREQ1 0x1E14 +#define PCIECFGREG_PHY_DBG_CLKREQ2 0x1E18 +#define PCIECFGREG_PHY_DBG_CLKREQ3 0x1E1C +#define PCIECFGREG_PHY_LTSSM_HIST_0 0x1CEC +#define PCIECFGREG_PHY_LTSSM_HIST_1 0x1CF0 +#define PCIECFGREG_PHY_LTSSM_HIST_2 0x1CF4 +#define PCIECFGREG_PHY_LTSSM_HIST_3 0x1CF8 +#define PCIECFGREG_TREFUP 0x1814 +#define PCIECFGREG_TREFUP_EXT 0x1818 + +/* PCIECFGREG_STATUS_CMD reg bit definitions */ +#define PCIECFG_STS_CMD_MEM_SPACE_SHIFT (1u) +#define PCIECFG_STS_CMD_BUS_MASTER_SHIFT (2u) +/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */ +#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */ +#define PCI_PM_L1_1_ENA_MASK 0x00000002 /* PCI-PM L1.1 Enabled */ +#define ASPM_L1_2_ENA_MASK 0x00000004 /* ASPM L1.2 Enabled */ +#define ASPM_L1_1_ENA_MASK 0x00000008 /* ASPM L1.1 Enabled */ + +/* PCIECFGREG_PDL_CTRL1 reg bit definitions */ +#define PCIECFG_PDL_CTRL1_RETRAIN_REQ_MASK (0x4000u) +#define PCIECFG_PDL_CTRL1_RETRAIN_REQ_SHIFT (14u) +#define PCIECFG_PDL_CTRL1_MAX_DLP_L1_ENTER_MASK (0x7Fu) +#define PCIECFG_PDL_CTRL1_MAX_DLP_L1_ENTER_SHIFT (16u) +#define PCIECFG_PDL_CTRL1_MAX_DLP_L1_ENTER_VAL (0x6Fu) + +/* PCIECFGREG_PDL_CTRL5 reg bit definitions */ +#define PCIECFG_PDL_CTRL5_DOWNSTREAM_PORT_SHIFT (8u) +#define PCIECFG_PDL_CTRL5_GLOOPBACK_SHIFT (9u) + +/* PCIe gen2 mailbox interrupt masks */ +#define I_MB 0x3 +#define I_BIT0 0x1 +#define I_BIT1 0x2 + +/* PCIE gen2 config regs */ +#define PCIIntstatus 0x090 +#define PCIIntmask 0x094 +#define PCISBMbx 0x98 + +/* enumeration Core regs */ +#define PCIH2D_MailBox 0x140 +#define PCIH2D_DB1 0x144 +#define PCID2H_MailBox 0x148 +#define PCIH2D_MailBox_1 0x150 /* for dma channel1 */ +#define PCIH2D_DB1_1 0x154 +#define PCID2H_MailBox_1 0x158 +#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */ +#define PCIH2D_DB1_2 0x164 +#define PCID2H_MailBox_2 0x168 +#define PCIH2D_DB1_3 0x174 +#define PCIE_CLK_CTRL 0x1E0 +#define PCIE_PWR_CTRL 0x1E8 + +#define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00) +/* for corerev < 64 idma_en is in PCIControl regsiter */ +#define IDMAControl(rev) (REV_GE_64(rev) ? 0x480 : 0x00) +#define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48) +#define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C) +#define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20) +#define PCIFunctionIntmask(rev) (REV_GE_64(rev) ? 0xC14 : 0x24) +#define PCIPowerIntstatus(rev) (REV_GE_64(rev) ? 0xC18 : 0x1A4) +#define PCIPowerIntmask(rev) (REV_GE_64(rev) ? 0xC1C : 0x1A8) +#define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0) +#define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8) +#define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20) +#define PCIDARH2D_DB0(rev) (REV_GE_64(rev) ? 0xA20 : 0xA28) +#define PCIDARErrlog(rev) (REV_GE_64(rev) ? 0xA60 : 0xA40) +#define PCIDARErrlog_Addr(rev) (REV_GE_64(rev) ? 0xA64 : 0xA44) +#define PCIDARMailboxint(rev) (REV_GE_64(rev) ? 0xA68 : 0xA48) + +#define PCIMSIVecAssign 0x58 + +/* base of all HMAP window registers */ +/* base of all HMAP window registers */ +#define PCI_HMAP_WINDOW_BASE(rev) (REV_GE_64(rev) ? 0x580u : 0x540u) +#define PCI_HMAP_VIOLATION_ADDR_L(rev) (REV_GE_64(rev) ? 0x600u : 0x5C0u) +#define PCI_HMAP_VIOLATION_ADDR_U(rev) (REV_GE_64(rev) ? 0x604u : 0x5C4u) +#define PCI_HMAP_VIOLATION_INFO(rev) (REV_GE_64(rev) ? 0x608u : 0x5C8u) +#define PCI_HMAP_WINDOW_CONFIG(rev) (REV_GE_64(rev) ? 0x610u : 0x5D0u) + +/* HMAP Register related offsets */ +#define PCI_HMAP_NWINDOWS_SHIFT 8U +#define PCI_HMAP_NWINDOWS_MASK 0x0000ff00U /* bits 8:15 */ +#define PCI_HMAP_VIO_ID_MASK 0x0000007fU /* 0:14 */ +#define PCI_HMAP_VIO_ID_SHIFT 0U +#define PCI_HMAP_VIO_SRC_MASK 0x00008000U /* bit 15 */ +#define PCI_HMAP_VIO_SRC_SHIFT 15U +#define PCI_HMAP_VIO_TYPE_MASK 0x00010000U /* bit 16 */ +#define PCI_HMAP_VIO_TYPE_SHIFT 16U +#define PCI_HMAP_VIO_ERR_MASK 0x00060000U /* bit 17:18 */ +#define PCI_HMAP_VIO_ERR_SHIFT 17U + +#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */ +#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */ + +#define PCIECFGREG_DEVCONTROL 0xB4 +#define PCIECFGREG_BASEADDR0 0x10 +#define PCIECFGREG_BASEADDR1 0x18 +#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12 +#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT) +#define PCIECFGREG_DEVCTRL_MPS_SHFT 5 +#define PCIECFGREG_DEVCTRL_MPS_MASK (0x7 << PCIECFGREG_DEVCTRL_MPS_SHFT) +#define PCIECFGREG_PM_CSR_STATE_MASK 0x00000003 +#define PCIECFGREG_PM_CSR_STATE_D0 0 +#define PCIECFGREG_PM_CSR_STATE_D1 1 +#define PCIECFGREG_PM_CSR_STATE_D2 2 +#define PCIECFGREG_PM_CSR_STATE_D3_HOT 3 +#define PCIECFGREG_PM_CSR_STATE_D3_COLD 4 + +/* Direct Access regs */ +#define DAR_ERRLOG(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.errlog) : \ + OFFSETOF(sbpcieregs_t, u1.dar.errlog)) +#define DAR_ERRADDR(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.erraddr) : \ + OFFSETOF(sbpcieregs_t, u1.dar.erraddr)) +#define DAR_CLK_CTRL(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.clk_ctl_st) : \ + OFFSETOF(sbpcieregs_t, u1.dar.clk_ctl_st)) +#define DAR_INTSTAT(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.intstatus) : \ + OFFSETOF(sbpcieregs_t, u1.dar.intstatus)) +#define DAR_PCIH2D_DB0_0(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_0) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_0)) +#define DAR_PCIH2D_DB0_1(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_1) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_1)) +#define DAR_PCIH2D_DB1_0(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_0) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_0)) +#define DAR_PCIH2D_DB1_1(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_1) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_1)) +#define DAR_PCIH2D_DB2_0(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_0) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_0)) +#define DAR_PCIH2D_DB2_1(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_1) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_1)) +#define DAR_PCIH2D_DB3_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_0) +#define DAR_PCIH2D_DB3_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_1) +#define DAR_PCIH2D_DB4_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_0) +#define DAR_PCIH2D_DB4_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_1) +#define DAR_PCIH2D_DB5_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_0) +#define DAR_PCIH2D_DB5_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_1) +#define DAR_PCIH2D_DB6_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_0) +#define DAR_PCIH2D_DB6_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_1) +#define DAR_PCIH2D_DB7_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_0) +#define DAR_PCIH2D_DB7_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_1) + +#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) +#define DAR_PCIMailBoxInt(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.mbox_int) : \ + OFFSETOF(sbpcieregs_t, u1.dar.mbox_int)) +#define DAR_PCIE_PWR_CTRL(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.powerctl) : \ + OFFSETOF(sbpcieregs_t, u1.dar.powerctl)) +#define DAR_PCIE_DAR_CTRL(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.dar_ctrl) : \ + OFFSETOF(sbpcieregs_t, u1.dar.dar_ctrl)) +#else +#define DAR_PCIMailBoxInt(rev) PCIE_dar_mailboxint_OFFSET(rev) +#define DAR_PCIE_PWR_CTRL(rev) PCIE_dar_power_control_OFFSET(rev) +#define DAR_PCIE_DAR_CTRL(rev) PCIE_dar_control_OFFSET(rev) +#endif + +#define DAR_FIS_CTRL(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.fis_ctrl) + +#define DAR_FIS_START_SHIFT 0u +#define DAR_FIS_START_MASK (1u << DAR_FIS_START_SHIFT) + +#define PCIE_PWR_REQ_PCIE (0x1 << 8) + +/* SROM hardware region */ +#define SROM_OFFSET_BAR1_CTRL 52 + +#define BAR1_ENC_SIZE_MASK 0x000e +#define BAR1_ENC_SIZE_SHIFT 1 + +#define BAR1_ENC_SIZE_1M 0 +#define BAR1_ENC_SIZE_2M 1 +#define BAR1_ENC_SIZE_4M 2 + +#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB 0x400 + +/* + * Latency Tolerance Reporting (LTR) states + * Active has the least tolerant latency requirement + * Sleep is most tolerant + */ +#define LTR_ACTIVE 2 +#define LTR_ACTIVE_IDLE 1 +#define LTR_SLEEP 0 +#define LTR_FINAL_MASK 0x300 +#define LTR_FINAL_SHIFT 8 + +/* pwrinstatus, pwrintmask regs */ +#define PCIEGEN2_PWRINT_D0_STATE_SHIFT 0 +#define PCIEGEN2_PWRINT_D1_STATE_SHIFT 1 +#define PCIEGEN2_PWRINT_D2_STATE_SHIFT 2 +#define PCIEGEN2_PWRINT_D3_STATE_SHIFT 3 +#define PCIEGEN2_PWRINT_L0_LINK_SHIFT 4 +#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT 5 +#define PCIEGEN2_PWRINT_L1_LINK_SHIFT 6 +#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT 7 +#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT 8 + +#define PCIEGEN2_PWRINT_D0_STATE_MASK (1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D1_STATE_MASK (1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D2_STATE_MASK (1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D3_STATE_MASK (1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT) +#define PCIEGEN2_PWRINT_L0_LINK_MASK (1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L0s_LINK_MASK (1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L1_LINK_MASK (1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK (1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT) +#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK (1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT) + +/* sbtopcie mail box */ +#define SBTOPCIE_MB_FUNC0_SHIFT 8 +#define SBTOPCIE_MB_FUNC1_SHIFT 10 +#define SBTOPCIE_MB_FUNC2_SHIFT 12 +#define SBTOPCIE_MB_FUNC3_SHIFT 14 + +#define SBTOPCIE_MB1_FUNC0_SHIFT 9 +#define SBTOPCIE_MB1_FUNC1_SHIFT 11 +#define SBTOPCIE_MB1_FUNC2_SHIFT 13 +#define SBTOPCIE_MB1_FUNC3_SHIFT 15 + +/* pcieiocstatus */ +#define PCIEGEN2_IOC_D0_STATE_SHIFT 8 +#define PCIEGEN2_IOC_D1_STATE_SHIFT 9 +#define PCIEGEN2_IOC_D2_STATE_SHIFT 10 +#define PCIEGEN2_IOC_D3_STATE_SHIFT 11 +#define PCIEGEN2_IOC_L0_LINK_SHIFT 12 +#define PCIEGEN2_IOC_L1_LINK_SHIFT 13 +#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14 +#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15 +#define PCIEGEN2_IOC_BME_SHIFT 20 + +#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT) +#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT) +#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT) +#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT) +#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT) +#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT) +#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT) +#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT) +#define PCIEGEN2_IOC_BME_MASK (1 << PCIEGEN2_IOC_BME_SHIFT) + +/* stat_ctrl */ +#define PCIE_STAT_CTRL_RESET 0x1 +#define PCIE_STAT_CTRL_ENABLE 0x2 +#define PCIE_STAT_CTRL_INTENABLE 0x4 +#define PCIE_STAT_CTRL_INTSTATUS 0x8 + +/* cpl_timeout_ctrl_reg */ +#define PCIE_CTO_TO_THRESHOLD_SHIFT 0 +#define PCIE_CTO_TO_THRESHHOLD_MASK (0xfffff << PCIE_CTO_TO_THRESHOLD_SHIFT) + +#define PCIE_CTO_CLKCHKCNT_SHIFT 24 +#define PCIE_CTO_CLKCHKCNT_MASK (0xf << PCIE_CTO_CLKCHKCNT_SHIFT) + +#define PCIE_CTO_ENAB_SHIFT 31 +#define PCIE_CTO_ENAB_MASK (0x1 << PCIE_CTO_ENAB_SHIFT) + +/* + * For corerev >= 69, core_fref is always 29.9MHz instead of 37.4MHz. + * Use different default threshold value to have 10ms timeout (0x49FB6 * 33ns). + */ +#define PCIE_CTO_TO_THRESH_DEFAULT 0x58000 +#define PCIE_CTO_TO_THRESH_DEFAULT_REV69 0x49FB6 + +#define PCIE_CTO_CLKCHKCNT_VAL 0xA + +/* ErrLog */ +#define PCIE_SROMRD_ERR_SHIFT 5 +#define PCIE_SROMRD_ERR_MASK (0x1 << PCIE_SROMRD_ERR_SHIFT) + +#define PCIE_CTO_ERR_SHIFT 8 +#define PCIE_CTO_ERR_MASK (0x1 << PCIE_CTO_ERR_SHIFT) + +#define PCIE_CTO_ERR_CODE_SHIFT 9 +#define PCIE_CTO_ERR_CODE_MASK (0x3 << PCIE_CTO_ERR_CODE_SHIFT) + +#define PCIE_BP_CLK_OFF_ERR_SHIFT 12 +#define PCIE_BP_CLK_OFF_ERR_MASK (0x1 << PCIE_BP_CLK_OFF_ERR_SHIFT) + +#define PCIE_BP_IN_RESET_ERR_SHIFT 13 +#define PCIE_BP_IN_RESET_ERR_MASK (0x1 << PCIE_BP_IN_RESET_ERR_SHIFT) + +/* PCIE control per Function */ +#define PCIE_FTN_DLYPERST_SHIFT 1 +#define PCIE_FTN_DLYPERST_MASK (1 << PCIE_FTN_DLYPERST_SHIFT) + +#define PCIE_FTN_WakeModeL2_SHIFT 3 +#define PCIE_FTN_WakeModeL2_MASK (1 << PCIE_FTN_WakeModeL2_SHIFT) + +#define PCIE_FTN_MSI_B2B_EN_SHIFT 4 +#define PCIE_FTN_MSI_B2B_EN_MASK (1 << PCIE_FTN_MSI_B2B_EN_SHIFT) + +#define PCIE_FTN_MSI_FIFO_CLEAR_SHIFT 5 +#define PCIE_FTN_MSI_FIFO_CLEAR_MASK (1 << PCIE_FTN_MSI_FIFO_CLEAR_SHIFT) + +#define PCIE_FTN_SWPME_SHIFT 6 +#define PCIE_FTN_SWPME_MASK (1 << PCIE_FTN_SWPME_SHIFT) + +#ifdef BCMDRIVER +#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) +void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val); +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +void pcie_set_trefup_time_100us(si_t *sih); +uint32 pcie_cto_to_thresh_default(uint corerev); +uint32 pcie_corereg(osl_t *osh, volatile void *regs, uint32 offset, uint32 mask, uint32 val); +#endif /* !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) */ +#if defined(DONGLEBUILD) +void pcie_coherent_accenable(osl_t *osh, si_t *sih); +#endif /* DONGLEBUILD */ +#endif /* BCMDRIVER */ + +/* DMA intstatus and intmask */ +#define I_PC (1 << 10) /* pci descriptor error */ +#define I_PD (1 << 11) /* pci data error */ +#define I_DE (1 << 12) /* descriptor protocol error */ +#define I_RU (1 << 13) /* receive descriptor underflow */ +#define I_RO (1 << 14) /* receive fifo overflow */ +#define I_XU (1 << 15) /* transmit fifo underflow */ +#define I_RI (1 << 16) /* receive interrupt */ +#define I_XI (1 << 24) /* transmit interrupt */ + +#define PD_DMA_INT_MASK_H2D 0x1DC00 +#define PD_DMA_INT_MASK_D2H 0x1DC00 +#define PD_DB_INT_MASK 0xFF0000 + +#if defined(DONGLEBUILD) +#if REV_GE_64(BCMPCIEREV) +#define PD_DEV0_DB_INTSHIFT 8u +#define PD_DEV1_DB_INTSHIFT 10u +#define PD_DEV2_DB_INTSHIFT 12u +#define PD_DEV3_DB_INTSHIFT 14u +#else +#define PD_DEV0_DB_INTSHIFT 16u +#define PD_DEV1_DB_INTSHIFT 18u +#define PD_DEV2_DB_INTSHIFT 20u +#define PD_DEV3_DB_INTSHIFT 22u +#endif /* BCMPCIEREV */ +#endif /* DONGLEBUILD */ + +#define PCIE_INVALID_OFFSET 0x18003ffc /* Invalid Register Offset for Induce Error */ +#define PCIE_INVALID_DATA 0x55555555 /* Invalid Data for Induce Error */ + +#define PD_DEV0_DB0_INTMASK (0x1 << PD_DEV0_DB_INTSHIFT) +#define PD_DEV0_DB1_INTMASK (0x2 << PD_DEV0_DB_INTSHIFT) +#define PD_DEV0_DB_INTMASK ((PD_DEV0_DB0_INTMASK) | (PD_DEV0_DB1_INTMASK)) + +#define PD_DEV1_DB0_INTMASK (0x1 << PD_DEV1_DB_INTSHIFT) +#define PD_DEV1_DB1_INTMASK (0x2 << PD_DEV1_DB_INTSHIFT) +#define PD_DEV1_DB_INTMASK ((PD_DEV1_DB0_INTMASK) | (PD_DEV1_DB1_INTMASK)) + +#define PD_DEV2_DB0_INTMASK (0x1 << PD_DEV2_DB_INTSHIFT) +#define PD_DEV2_DB1_INTMASK (0x2 << PD_DEV2_DB_INTSHIFT) +#define PD_DEV2_DB_INTMASK ((PD_DEV2_DB0_INTMASK) | (PD_DEV2_DB1_INTMASK)) + +#define PD_DEV3_DB0_INTMASK (0x1 << PD_DEV3_DB_INTSHIFT) +#define PD_DEV3_DB1_INTMASK (0x2 << PD_DEV3_DB_INTSHIFT) +#define PD_DEV3_DB_INTMASK ((PD_DEV3_DB0_INTMASK) | (PD_DEV3_DB1_INTMASK)) + +#define PD_DEV0_DMA_INTMASK 0x80 + +#define PD_FUNC0_MB_INTSHIFT 8u +#define PD_FUNC0_MB_INTMASK (0x3 << PD_FUNC0_MB_INTSHIFT) + +#define PD_FUNC0_PCIE_SB_INTSHIFT 0u +#define PD_FUNC0_PCIE_SB__INTMASK (0x3 << PD_FUNC0_PCIE_SB_INTSHIFT) + +#define PD_DEV0_PWRSTATE_INTSHIFT 24u +#define PD_DEV0_PWRSTATE_INTMASK (0x1 << PD_DEV0_PWRSTATE_INTSHIFT) + +#define PD_DEV0_PERST_INTSHIFT 6u +#define PD_DEV0_PERST_INTMASK (0x1 << PD_DEV0_PERST_INTSHIFT) + +#define PD_MSI_FIFO_OVERFLOW_INTSHIFT 28u +#define PD_MSI_FIFO_OVERFLOW_INTMASK (0x1 << PD_MSI_FIFO_OVERFLOW_INTSHIFT) + +#if defined(BCMPCIE_IFRM) +#define PD_IFRM_INTSHIFT 5u +#define PD_IFRM_INTMASK (0x1 << PD_IFRM_INTSHIFT) +#endif /* BCMPCIE_IFRM */ + +/* HMAP related constants */ +#define PD_HMAP_VIO_INTSHIFT 3u +#define PD_HMAP_VIO_INTMASK (0x1 << PD_HMAP_VIO_INTSHIFT) +#define PD_HMAP_VIO_CLR_VAL 0x3 /* write 0b11 to clear HMAP violation */ +#define PD_HMAP_VIO_SHIFT_VAL 17u /* bits 17:18 clear HMAP violation */ + +#define PD_FLR0_IN_PROG_INTSHIFT 0u +#define PD_FLR0_IN_PROG_INTMASK (0x1 << PD_FLR0_IN_PROG_INTSHIFT) +#define PD_FLR1_IN_PROG_INTSHIFT 1u +#define PD_FLR1_IN_PROG_INTMASK (0x1 << PD_FLR1_IN_PROG_INTSHIFT) + +/* DMA channel 2 datapath use case + * Implicit DMA uses DMA channel 2 (outbound only) + */ +#if defined(BCMPCIE_IDMA) && !defined(BCMPCIE_IDMA_DISABLED) +#define PD_DEV2_INTMASK PD_DEV2_DB0_INTMASK +#elif defined(BCMPCIE_IFRM) && !defined(BCMPCIE_IFRM_DISABLED) +#define PD_DEV2_INTMASK PD_DEV2_DB0_INTMASK +#elif defined(BCMPCIE_DMA_CH2) +#define PD_DEV2_INTMASK PD_DEV2_DB0_INTMASK +#else +#define PD_DEV2_INTMASK 0u +#endif /* BCMPCIE_IDMA || BCMPCIE_DMA_CH2 || BCMPCIE_IFRM */ +/* DMA channel 1 datapath use case */ +#ifdef BCMPCIE_DMA_CH1 +#define PD_DEV1_INTMASK PD_DEV1_DB0_INTMASK +#else +#define PD_DEV1_INTMASK 0u +#endif /* BCMPCIE_DMA_CH1 */ +#if defined(BCMPCIE_IDMA) || defined(BCMPCIE_IFRM) +#define PD_DEV1_IDMA_DW_INTMASK PD_DEV1_DB1_INTMASK +#else +#define PD_DEV1_IDMA_DW_INTMASK 0u +#endif /* BCMPCIE_IDMA || BCMPCIE_IFRM */ + +#define PD_DEV0_INTMASK \ + (PD_DEV0_DMA_INTMASK | PD_DEV0_DB0_INTMASK | PD_DEV0_PWRSTATE_INTMASK | \ + PD_DEV0_PERST_INTMASK | PD_DEV1_INTMASK | PD_DEV2_INTMASK | PD_DEV0_DB1_INTMASK | \ + PD_DEV1_IDMA_DW_INTMASK) + +/* implicit DMA index */ +#define PD_IDMA_COMP 0xf /* implicit dma complete */ +#define PD_IDMA_IDX0_COMP ((uint32)1 << 0) /* implicit dma index0 complete */ +#define PD_IDMA_IDX1_COMP ((uint32)1 << 1) /* implicit dma index1 complete */ +#define PD_IDMA_IDX2_COMP ((uint32)1 << 2) /* implicit dma index2 complete */ +#define PD_IDMA_IDX3_COMP ((uint32)1 << 3) /* implicit dma index3 complete */ + +#define PCIE_D2H_DB0_VAL (0x12345678) + +#define PD_ERR_ATTN_INTMASK (1u << 29) +#define PD_LINK_DOWN_INTMASK (1u << 27) + +#define PD_ERR_TTX_REQ_DURING_D3 (1u << 31) /* Tx mem req on iface when in non-D0 */ +#define PD_PRI_SIG_TARGET_ABORT_F1 (1u << 19) /* Rcvd target Abort Err Status (CA) F1 */ +#define PD_ERR_UNSPPORT_F1 (1u << 18) /* Unsupported Request Error Status. F1 */ +#define PD_ERR_ECRC_F1 (1u << 17) /* ECRC Error TLP Status. F1 */ +#define PD_ERR_MALF_TLP_F1 (1u << 16) /* Malformed TLP Status. F1 */ +#define PD_ERR_RX_OFLOW_F1 (1u << 15) /* Receiver Overflow Status. */ +#define PD_ERR_UNEXP_CPL_F1 (1u << 14) /* Unexpected Completion Status. F1 */ +#define PD_ERR_MASTER_ABRT_F1 (1u << 13) /* Receive UR Completion Status. F1 */ +#define PD_ERR_CPL_TIMEOUT_F1 (1u << 12) /* Completer Timeout Status F1 */ +#define PD_ERR_FC_PRTL_F1 (1u << 11) /* Flow Control Protocol Error Status F1 */ +#define PD_ERR_PSND_TLP_F1 (1u << 10) /* Poisoned Error Status F1 */ +#define PD_PRI_SIG_TARGET_ABORT (1u << 9) /* Received target Abort Error Status(CA) */ +#define PD_ERR_UNSPPORT (1u << 8) /* Unsupported Request Error Status. */ +#define PD_ERR_ECRC (1u << 7) /* ECRC Error TLP Status. */ +#define PD_ERR_MALF_TLP (1u << 6) /* Malformed TLP Status. */ +#define PD_ERR_RX_OFLOW (1u << 5) /* Receiver Overflow Status. */ +#define PD_ERR_UNEXP_CPL (1u << 4) /* Unexpected Completion Status. */ +#define PD_ERR_MASTER_ABRT (1u << 3) /* Receive UR Completion Status. */ +#define PD_ERR_CPL_TIMEOUT (1u << 2) /* Completer Timeout Status */ +#define PD_ERR_FC_PRTL (1u << 1) /* Flow Control Protocol Error Status */ +#define PD_ERR_PSND_TLP (1u << 0) /* Poisoned Error Status */ + +/* All ERR_ATTN of F1 */ +#define PD_ERR_FUNCTION1 \ + (PD_ERR_PSND_TLP_F1 | PD_ERR_FC_PRTL_F1 | PD_ERR_CPL_TIMEOUT_F1 | PD_ERR_MASTER_ABRT_F1 | \ + PD_ERR_UNEXP_CPL_F1 | PD_ERR_RX_OFLOW_F1 | PD_ERR_MALF_TLP_F1 | PD_ERR_ECRC_F1 | \ + PD_ERR_UNSPPORT_F1 | PD_PRI_SIG_TARGET_ABORT_F1) + +#define PD_ERR_TTX_REQ_DURING_D3_FN0 (1u << 10) /* Tx mem req on iface when in non-D0 */ + +/* H2D Doorbell Fields for IDMA / PWI */ +#define PD_DB_FRG_ID_SHIFT (0u) +#define PD_DB_FRG_ID_MASK (0xFu) /* bits 3:0 */ +#define PD_DB_DMA_TYPE_SHIFT (4u) +#define PD_DB_DMA_TYPE_MASK (0xFu) /* bits 7:4 */ +#define PD_DB_RINGIDX_NUM_SHIFT (8u) +#define PD_DB_RINGIDX_NUM_MASK (0xFFu) /* bits 15:8 */ +#define PD_DB_INDEX_VAL_SHIFT (16u) +#define PD_DB_INDEX_VAL_MASK (0xFFFFu) /* bits 31:16 */ + +/* PWI LUT entry fields */ +#define PWI_FLOW_VALID_MASK (0x1u) +#define PWI_FLOW_VALID_SHIFT (22u) +#define PWI_FLOW_RING_GROUP_ID_MASK (0x3u) +#define PWI_FLOW_RING_GROUP_ID_SHIFT (20u) +#define PWI_HOST_RINGIDX_MASK (0xFFu) /* Host Ring Index Number[19:12] */ +#define PWI_HOST_RINGIDX_SHIFT (12u) + +/* DMA_TYPE Values */ +#define PD_DB_DMA_TYPE_NO_IDMA (0u) +#define PD_DB_DMA_TYPE_IDMA (1u) +#define PD_DB_DMA_TYPE_PWI (2u) +#define PD_DB_DMA_TYPE_RXPOST(rev) (REV_GE_73((rev)) ? (1u) : (5u)) +#define PD_DB_DMA_TYPE_TXCPL(rev) (REV_GE_73((rev)) ? (2u) : (6u)) +#define PD_DB_DMA_TYPE_RXCPL(rev) (REV_GE_73((rev)) ? (3u) : (7u)) + +/* All ERR_ATTN of F0 */ +#define PD_ERR_FUNCTION0 \ + (PD_ERR_PSND_TLP | PD_ERR_FC_PRTL | PD_ERR_CPL_TIMEOUT | PD_ERR_MASTER_ABRT | \ + PD_ERR_UNEXP_CPL | PD_ERR_RX_OFLOW | PD_ERR_MALF_TLP | PD_ERR_ECRC | \ + PD_ERR_UNSPPORT | PD_PRI_SIG_TARGET_ABORT) +/* Shift of F1 bits */ +#define PD_ERR_FUNCTION1_SHIFT 10u + +#endif /* _PCIE_CORE_H */ diff --git a/bcmdhd.101.10.361.x/include/sbchipc.h b/bcmdhd.101.10.361.x/include/sbchipc.h new file mode 100755 index 0000000..94f3c70 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbchipc.h @@ -0,0 +1,5282 @@ +/* + * SiliconBackplane Chipcommon core hardware definitions. + * + * The chipcommon core provides chip identification, SB control, + * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, + * GPIO interface, extbus, and support for serial and parallel flashes. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBCHIPC_H +#define _SBCHIPC_H + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +#define BCM_MASK32(msb, lsb) ((~0u >> (32u - (msb) - 1u)) & (~0u << (lsb))) +#include +#ifdef WL_INITVALS +#include +#endif + +/** + * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the + * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to + * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead + * be assigned their respective chipc-specific address space and connected to the Always On + * Backplane via the APB interface. + */ +typedef volatile struct { + uint32 PAD[384]; + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; /* 0x604 */ + uint32 pmustatus; /* 0x608 */ + uint32 res_state; /* 0x60C */ + uint32 res_pending; /* 0x610 */ + uint32 pmutimer; /* 0x614 */ + uint32 min_res_mask; /* 0x618 */ + uint32 max_res_mask; /* 0x61C */ + uint32 res_table_sel; /* 0x620 */ + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; /* 0x640 */ + uint32 res_req_timer; /* 0x644 */ + uint32 res_req_mask; /* 0x648 */ + uint32 core_cap_ext; /* 0x64C */ + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 ILPPeriod; /* 0x674 */ + uint32 PAD[2]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 mac_res_req_timer; /* 0x688 */ + uint32 mac_res_req_mask; /* 0x68c */ + uint32 spm_ctrl; /* 0x690 */ + uint32 spm_cap; /* 0x694 */ + uint32 spm_clk_ctrl; /* 0x698 */ + uint32 int_hi_status; /* 0x69c */ + uint32 int_lo_status; /* 0x6a0 */ + uint32 mon_table_addr; /* 0x6a4 */ + uint32 mon_ctrl_n; /* 0x6a8 */ + uint32 mon_status_n; /* 0x6ac */ + uint32 int_treshold_n; /* 0x6b0 */ + uint32 watermarks_n; /* 0x6b4 */ + uint32 spm_debug; /* 0x6b8 */ + uint32 PAD[1]; + uint32 vtrim_ctrl; /* 0x6c0 */ + uint32 vtrim_status; /* 0x6c4 */ + uint32 usec_timer; /* 0x6c8 */ + uint32 usec_timer_frac; /* 0x6cc */ + uint32 pcie_tpower_on; /* 0x6d0 */ + uint32 pcie_tport_cnt; /* 0x6d4 */ + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 pmu_statstimer_addr; /* 0x6e0 */ + uint32 pmu_statstimer_ctrl; /* 0x6e4 */ + uint32 pmu_statstimer_N; /* 0x6e8 */ + uint32 PAD[1]; + uint32 mac_res_req_timer1; /* 0x6f0 */ + uint32 mac_res_req_mask1; /* 0x6f4 */ + uint32 PAD[2]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[2]; + uint32 fis_start_min_res_mask; /* 0x710 */ + uint32 PAD[3]; + uint32 rsrc_event0; /* 0x720 */ + uint32 PAD[3]; + uint32 slowtimer2; /* 0x730 */ + uint32 slowtimerfrac2; /* 0x734 */ + uint32 mac_res_req_timer2; /* 0x738 */ + uint32 mac_res_req_mask2; /* 0x73c */ + uint32 pmuintstatus; /* 0x740 */ + uint32 extwakeupstatus; /* 0x744 */ + uint32 watchdog_res_mask; /* 0x748 */ + uint32 PAD[1]; /* 0x74C */ + uint32 swscratch; /* 0x750 */ + uint32 PAD[3]; /* 0x754-0x75C */ + uint32 extwakemask0; /* 0x760 */ + uint32 extwakemask1; /* 0x764 */ + uint32 PAD[2]; /* 0x768-0x76C */ + uint32 extwakereqmask[2]; /* 0x770-0x774 */ + uint32 PAD[2]; /* 0x778-0x77C */ + uint32 pmuintctrl0; /* 0x780 */ + uint32 pmuintctrl1; /* 0x784 */ + uint32 PAD[2]; + uint32 extwakectrl[2]; /* 0x790 */ + uint32 PAD[7]; + uint32 fis_ctrl_status; /* 0x7b4 */ + uint32 fis_min_res_mask; /* 0x7b8 */ + uint32 PAD[1]; + uint32 precision_tmr_ctrl_status; /* 0x7c0 */ + uint32 precision_tmr_capture_low; /* 0x7c4 */ + uint32 precision_tmr_capture_high; /* 0x7c8 */ + uint32 precision_tmr_capture_frac; /* 0x7cc */ + uint32 precision_tmr_running_low; /* 0x7d0 */ + uint32 precision_tmr_running_high; /* 0x7d4 */ + uint32 precision_tmr_running_frac; /* 0x7d8 */ + uint32 PAD[3]; + uint32 core_cap_ext1; /* 0x7e8 */ + uint32 PAD[5]; + uint32 rsrc_substate_ctl_sts; /* 0x800 */ + uint32 rsrc_substate_trans_tmr; /* 0x804 */ + uint32 PAD[2]; + uint32 dvfs_ctrl1; /* 0x810 */ + uint32 dvfs_ctrl2; /* 0x814 */ + uint32 dvfs_voltage; /* 0x818 */ + uint32 dvfs_status; /* 0x81c */ + uint32 dvfs_core_table_address; /* 0x820 */ + uint32 dvfs_core_ctrl; /* 0x824 */ +} pmuregs_t; + +typedef struct eci_prerev35 { + uint32 eci_output; + uint32 eci_control; + uint32 eci_inputlo; + uint32 eci_inputmi; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolaritymi; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskmi; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventmi; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskmi; + uint32 eci_eventmaskhi; + uint32 PAD[3]; +} eci_prerev35_t; + +typedef struct eci_rev35 { + uint32 eci_outputlo; + uint32 eci_outputhi; + uint32 eci_controllo; + uint32 eci_controlhi; + uint32 eci_inputlo; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskhi; + uint32 eci_auxtx; + uint32 eci_auxrx; + uint32 eci_datatag; + uint32 eci_uartescvalue; + uint32 eci_autobaudctr; + uint32 eci_uartfifolevel; +} eci_rev35_t; + +typedef struct flash_config { + uint32 PAD[19]; + /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */ + uint32 flashstrconfig; +} flash_config_t; + +typedef volatile struct { + uint32 chipid; /* 0x0 */ + uint32 capabilities; + uint32 corecontrol; /* corerev >= 1 */ + uint32 bist; + + /* OTP */ + uint32 otpstatus; /* 0x10, corerev >= 10 */ + uint32 otpcontrol; + uint32 otpprog; + uint32 otplayout; /* corerev >= 23 */ + + /* Interrupt control */ + uint32 intstatus; /* 0x20 */ + uint32 intmask; + + /* Chip specific regs */ + uint32 chipcontrol; /* 0x28, rev >= 11 */ + uint32 chipstatus; /* 0x2c, rev >= 11 */ + + /* Jtag Master */ + uint32 jtagcmd; /* 0x30, rev >= 10 */ + uint32 jtagir; + uint32 jtagdr; + uint32 jtagctrl; + + /* serial flash interface registers */ + uint32 flashcontrol; /* 0x40 */ + uint32 flashaddress; + uint32 flashdata; + uint32 otplayoutextension; /* rev >= 35 */ + + /* Silicon backplane configuration broadcast control */ + uint32 broadcastaddress; /* 0x50 */ + uint32 broadcastdata; + + /* gpio - cleared only by power-on-reset */ + uint32 gpiopullup; /* 0x58, corerev >= 20 */ + uint32 gpiopulldown; /* 0x5c, corerev >= 20 */ + uint32 gpioin; /* 0x60 */ + uint32 gpioout; /* 0x64 */ + uint32 gpioouten; /* 0x68 */ + uint32 gpiocontrol; /* 0x6C */ + uint32 gpiointpolarity; /* 0x70 */ + uint32 gpiointmask; /* 0x74 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioevent; + uint32 gpioeventintmask; + + /* Watchdog timer */ + uint32 watchdog; /* 0x80 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioeventintpolarity; + + /* GPIO based LED powersave regs corerev >= 16 */ + uint32 gpiotimerval; /* 0x88 */ /* Obsolete and unused now */ + uint32 gpiotimeroutmask; /* Obsolete and unused now */ + + /* clock control */ + uint32 clockcontrol_n; /* 0x90 */ + uint32 clockcontrol_sb; /* aka m0 */ + uint32 clockcontrol_pci; /* aka m1 */ + uint32 clockcontrol_m2; /* mii/uart/mipsref */ + uint32 clockcontrol_m3; /* cpu */ + uint32 clkdiv; /* corerev >= 3 */ + uint32 gpiodebugsel; /* corerev >= 28 */ + uint32 capabilities_ext; /* 0xac */ + + /* pll delay registers (corerev >= 4) */ + uint32 pll_on_delay; /* 0xb0 */ + uint32 fref_sel_delay; + uint32 slow_clk_ctl; /* 5 < corerev < 10 */ + uint32 PAD; + + /* Instaclock registers (corerev >= 10) */ + uint32 system_clk_ctl; /* 0xc0 */ + uint32 clkstatestretch; + uint32 PAD[2]; + + /* Indirect backplane access (corerev >= 22) */ + uint32 bp_addrlow; /* 0xd0 */ + uint32 bp_addrhigh; + uint32 bp_data; + uint32 PAD; + uint32 bp_indaccess; + /* SPI registers, corerev >= 37 */ + uint32 gsioctrl; + uint32 gsioaddress; + uint32 gsiodata; + + /* More clock dividers (corerev >= 32) */ + uint32 clkdiv2; + /* FAB ID (corerev >= 40) */ + uint32 otpcontrol1; + uint32 fabid; /* 0xf8 */ + + /* In AI chips, pointer to erom */ + uint32 eromptr; /* 0xfc */ + + /* ExtBus control registers (corerev >= 3) */ + uint32 pcmcia_config; /* 0x100 */ + uint32 pcmcia_memwait; + uint32 pcmcia_attrwait; + uint32 pcmcia_iowait; + uint32 ide_config; + uint32 ide_memwait; + uint32 ide_attrwait; + uint32 ide_iowait; + uint32 prog_config; + uint32 prog_waitcount; + uint32 flash_config; + uint32 flash_waitcount; + uint32 SECI_config; /* 0x130 SECI configuration */ + uint32 SECI_status; + uint32 SECI_statusmask; + uint32 SECI_rxnibchanged; + +#if !defined(BCMDONGLEHOST) + union { /* 0x140 */ + /* Enhanced Coexistence Interface (ECI) registers (corerev >= 21) */ + struct eci_prerev35 lt35; + struct eci_rev35 ge35; + /* Other interfaces */ + struct flash_config flashconf; + uint32 PAD[20]; + } eci; +#else + uint32 PAD[20]; +#endif /* !defined(BCMDONGLEHOST) */ + + /* SROM interface (corerev >= 32) */ + uint32 sromcontrol; /* 0x190 */ + uint32 sromaddress; + uint32 sromdata; + uint32 PAD[1]; /* 0x19C */ + /* NAND flash registers for BCM4706 (corerev = 31) */ + uint32 nflashctrl; /* 0x1a0 */ + uint32 nflashconf; + uint32 nflashcoladdr; + uint32 nflashrowaddr; + uint32 nflashdata; + uint32 nflashwaitcnt0; /* 0x1b4 */ + uint32 PAD[2]; + + uint32 seci_uart_data; /* 0x1C0 */ + uint32 seci_uart_bauddiv; + uint32 seci_uart_fcr; + uint32 seci_uart_lcr; + uint32 seci_uart_mcr; + uint32 seci_uart_lsr; + uint32 seci_uart_msr; + uint32 seci_uart_baudadj; + /* Clock control and hardware workarounds (corerev >= 20) */ + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 hw_war; + uint32 powerctl; /* 0x1e8 */ + uint32 powerctl2; /* 0x1ec */ + uint32 PAD[68]; + + /* UARTs */ + uint8 uart0data; /* 0x300 */ + uint8 uart0imr; + uint8 uart0fcr; + uint8 uart0lcr; + uint8 uart0mcr; + uint8 uart0lsr; + uint8 uart0msr; + uint8 uart0scratch; + uint8 PAD[184]; /* corerev >= 65 */ + uint32 rng_ctrl_0; /* 0x3c0 */ + uint32 rng_rng_soft_reset; /* 0x3c4 */ + uint32 rng_rbg_soft_reset; /* 0x3c8 */ + uint32 rng_total_bit_cnt; /* 0x3cc */ + uint32 rng_total_bit_thrshld; /* 0x3d0 */ + uint32 rng_rev_id; /* 0x3d4 */ + uint32 rng_int_status_0; /* 0x3d8 */ + uint32 rng_int_enable_0; /* 0x3dc */ + uint32 rng_fifo_data; /* 0x3e0 */ + uint32 rng_fifo_cnt; /* 0x3e4 */ + uint8 PAD[24]; /* corerev >= 65 */ + + uint8 uart1data; /* 0x400 */ + uint8 uart1imr; + uint8 uart1fcr; + uint8 uart1lcr; + uint8 uart1mcr; + uint8 uart1lsr; + uint8 uart1msr; + uint8 uart1scratch; /* 0x407 */ + uint32 PAD[50]; + uint32 sr_memrw_addr; /* 0x4d0 */ + uint32 sr_memrw_data; /* 0x4d4 */ + uint32 etbmemctrl; /* 0x4d8 */ + uint32 PAD[9]; + + /* save/restore, corerev >= 48 */ + uint32 sr_capability; /* 0x500 */ + uint32 sr_control0; /* 0x504 */ + uint32 sr_control1; /* 0x508 */ + uint32 gpio_control; /* 0x50C */ + uint32 PAD[29]; + /* 2 SR engines case */ + uint32 sr1_control0; /* 0x584 */ + uint32 sr1_control1; /* 0x588 */ + uint32 PAD[29]; + /* PMU registers (corerev >= 20) */ + /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP. + * The CPU must read them twice, compare, and retry if different. + */ + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 core_cap_ext; /* 0x64c */ + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 ILPPeriod; /* 0x674 */ + uint32 PAD[2]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 mac_res_req_timer; /* 0x688 */ + uint32 mac_res_req_mask; /* 0x68c */ + uint32 PAD[18]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 pmu_statstimer_addr; /* 0x6e0 */ + uint32 pmu_statstimer_ctrl; /* 0x6e4 */ + uint32 pmu_statstimer_N; /* 0x6e8 */ + uint32 PAD[1]; + uint32 mac_res_req_timer1; /* 0x6f0 */ + uint32 mac_res_req_mask1; /* 0x6f4 */ + uint32 PAD[2]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 extwakeupstatus; /* 0x744 */ + uint32 PAD[6]; + uint32 extwakemask0; /* 0x760 */ + uint32 extwakemask1; /* 0x764 */ + uint32 PAD[2]; /* 0x768-0x76C */ + uint32 extwakereqmask[2]; /* 0x770-0x774 */ + uint32 PAD[2]; /* 0x778-0x77C */ + uint32 pmuintctrl0; /* 0x780 */ + uint32 PAD[3]; /* 0x784 - 0x78c */ + uint32 extwakectrl[1]; /* 0x790 */ + uint32 PAD[PADSZ(0x794u, 0x7b0u)]; /* 0x794 - 0x7b0 */ + uint32 fis_ctrl_status; /* 0x7b4 */ + uint32 fis_min_res_mask; /* 0x7b8 */ + uint32 PAD[PADSZ(0x7bcu, 0x7bcu)]; /* 0x7bc */ + uint32 precision_tmr_ctrl_status; /* 0x7c0 */ + uint32 precision_tmr_capture_low; /* 0x7c4 */ + uint32 precision_tmr_capture_high; /* 0x7c8 */ + uint32 precision_tmr_capture_frac; /* 0x7cc */ + uint32 precision_tmr_running_low; /* 0x7d0 */ + uint32 precision_tmr_running_high; /* 0x7d4 */ + uint32 precision_tmr_running_frac; /* 0x7d8 */ + uint32 PAD[PADSZ(0x7dcu, 0x7e4u)]; /* 0x7dc - 0x7e4 */ + uint32 core_cap_ext1; /* 0x7e8 */ + uint32 PAD[PADSZ(0x7ecu, 0x7fcu)]; /* 0x7ec - 0x7fc */ + + uint16 sromotp[512]; /* 0x800 */ +#ifdef CCNFLASH_SUPPORT + /* Nand flash MLC controller registers (corerev >= 38) */ + uint32 nand_revision; /* 0xC00 */ + uint32 nand_cmd_start; + uint32 nand_cmd_addr_x; + uint32 nand_cmd_addr; + uint32 nand_cmd_end_addr; + uint32 nand_cs_nand_select; + uint32 nand_cs_nand_xor; + uint32 PAD; + uint32 nand_spare_rd0; + uint32 nand_spare_rd4; + uint32 nand_spare_rd8; + uint32 nand_spare_rd12; + uint32 nand_spare_wr0; + uint32 nand_spare_wr4; + uint32 nand_spare_wr8; + uint32 nand_spare_wr12; + uint32 nand_acc_control; + uint32 PAD; + uint32 nand_config; + uint32 PAD; + uint32 nand_timing_1; + uint32 nand_timing_2; + uint32 nand_semaphore; + uint32 PAD; + uint32 nand_devid; + uint32 nand_devid_x; + uint32 nand_block_lock_status; + uint32 nand_intfc_status; + uint32 nand_ecc_corr_addr_x; + uint32 nand_ecc_corr_addr; + uint32 nand_ecc_unc_addr_x; + uint32 nand_ecc_unc_addr; + uint32 nand_read_error_count; + uint32 nand_corr_stat_threshold; + uint32 PAD[2]; + uint32 nand_read_addr_x; + uint32 nand_read_addr; + uint32 nand_page_program_addr_x; + uint32 nand_page_program_addr; + uint32 nand_copy_back_addr_x; + uint32 nand_copy_back_addr; + uint32 nand_block_erase_addr_x; + uint32 nand_block_erase_addr; + uint32 nand_inv_read_addr_x; + uint32 nand_inv_read_addr; + uint32 PAD[2]; + uint32 nand_blk_wr_protect; + uint32 PAD[3]; + uint32 nand_acc_control_cs1; + uint32 nand_config_cs1; + uint32 nand_timing_1_cs1; + uint32 nand_timing_2_cs1; + uint32 PAD[20]; + uint32 nand_spare_rd16; + uint32 nand_spare_rd20; + uint32 nand_spare_rd24; + uint32 nand_spare_rd28; + uint32 nand_cache_addr; + uint32 nand_cache_data; + uint32 nand_ctrl_config; + uint32 nand_ctrl_status; +#endif /* CCNFLASH_SUPPORT */ + /* Note: there is a clash between GCI and NFLASH. So, + * we decided to have it like below. the functions accessing following + * have to be protected with NFLASH_SUPPORT. The functions will + * assert in case the clash happens. + */ + uint32 gci_corecaps0; /* GCI starting at 0xC00 */ + uint32 gci_corecaps1; + uint32 gci_corecaps2; + uint32 gci_corectrl; + uint32 gci_corestat; /* 0xC10 */ + uint32 gci_intstat; /* 0xC14 */ + uint32 gci_intmask; /* 0xC18 */ + uint32 gci_wakemask; /* 0xC1C */ + uint32 gci_levelintstat; /* 0xC20 */ + uint32 gci_eventintstat; /* 0xC24 */ + uint32 PAD[6]; + uint32 gci_indirect_addr; /* 0xC40 */ + uint32 gci_gpioctl; /* 0xC44 */ + uint32 gci_gpiostatus; + uint32 gci_gpiomask; /* 0xC4C */ + uint32 gci_eventsummary; /* 0xC50 */ + uint32 gci_miscctl; /* 0xC54 */ + uint32 gci_gpiointmask; + uint32 gci_gpiowakemask; + uint32 gci_input[32]; /* C60 */ + uint32 gci_event[32]; /* CE0 */ + uint32 gci_output[4]; /* D60 */ + uint32 gci_control_0; /* 0xD70 */ + uint32 gci_control_1; /* 0xD74 */ + uint32 gci_intpolreg; /* 0xD78 */ + uint32 gci_levelintmask; /* 0xD7C */ + uint32 gci_eventintmask; /* 0xD80 */ + uint32 PAD[3]; + uint32 gci_inbandlevelintmask; /* 0xD90 */ + uint32 gci_inbandeventintmask; /* 0xD94 */ + uint32 PAD[2]; + uint32 gci_seciauxtx; /* 0xDA0 */ + uint32 gci_seciauxrx; /* 0xDA4 */ + uint32 gci_secitx_datatag; /* 0xDA8 */ + uint32 gci_secirx_datatag; /* 0xDAC */ + uint32 gci_secitx_datamask; /* 0xDB0 */ + uint32 gci_seciusef0tx_reg; /* 0xDB4 */ + uint32 gci_secif0tx_offset; /* 0xDB8 */ + uint32 gci_secif0rx_offset; /* 0xDBC */ + uint32 gci_secif1tx_offset; /* 0xDC0 */ + uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */ + uint32 gci_rxfifoctrl; /* 0xDC8 */ + uint32 gci_uartreadid; /* DCC */ + uint32 gci_seciuartescval; /* DD0 */ + uint32 PAD; + uint32 gci_secififolevel; /* DD8 */ + uint32 gci_seciuartdata; /* DDC */ + uint32 gci_secibauddiv; /* DE0 */ + uint32 gci_secifcr; /* DE4 */ + uint32 gci_secilcr; /* DE8 */ + uint32 gci_secimcr; /* DEC */ + uint32 gci_secilsr; /* DF0 */ + uint32 gci_secimsr; /* DF4 */ + uint32 gci_baudadj; /* DF8 */ + uint32 PAD; + uint32 gci_chipctrl; /* 0xE00 */ + uint32 gci_chipsts; /* 0xE04 */ + uint32 gci_gpioout; /* 0xE08 */ + uint32 gci_gpioout_read; /* 0xE0C */ + uint32 gci_mpwaketx; /* 0xE10 */ + uint32 gci_mpwakedetect; /* 0xE14 */ + uint32 gci_seciin_ctrl; /* 0xE18 */ + uint32 gci_seciout_ctrl; /* 0xE1C */ + uint32 gci_seciin_auxfifo_en; /* 0xE20 */ + uint32 gci_seciout_txen_txbr; /* 0xE24 */ + uint32 gci_seciin_rxbrstatus; /* 0xE28 */ + uint32 gci_seciin_rxerrstatus; /* 0xE2C */ + uint32 gci_seciin_fcstatus; /* 0xE30 */ + uint32 gci_seciout_txstatus; /* 0xE34 */ + uint32 gci_seciout_txbrstatus; /* 0xE38 */ + +} chipcregs_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +#if !defined(IL_BIGENDIAN) +#define CC_CHIPID 0 +#define CC_CAPABILITIES 4 +#define CC_CHIPST 0x2c +#define CC_EROMPTR 0xfc +#endif /* IL_BIGENDIAN */ + +#define CC_OTPST 0x10 +#define CC_INTSTATUS 0x20 +#define CC_INTMASK 0x24 +#define CC_JTAGCMD 0x30 +#define CC_JTAGIR 0x34 +#define CC_JTAGDR 0x38 +#define CC_JTAGCTRL 0x3c +#define CC_GPIOPU 0x58 +#define CC_GPIOPD 0x5c +#define CC_GPIOIN 0x60 +#define CC_GPIOOUT 0x64 +#define CC_GPIOOUTEN 0x68 +#define CC_GPIOCTRL 0x6c +#define CC_GPIOPOL 0x70 +#define CC_GPIOINTM 0x74 +#define CC_GPIOEVENT 0x78 +#define CC_GPIOEVENTMASK 0x7c +#define CC_WATCHDOG 0x80 +#define CC_GPIOEVENTPOL 0x84 +#define CC_CLKC_N 0x90 +#define CC_CLKC_M0 0x94 +#define CC_CLKC_M1 0x98 +#define CC_CLKC_M2 0x9c +#define CC_CLKC_M3 0xa0 +#define CC_CLKDIV 0xa4 +#define CC_CAP_EXT 0xac +#define CC_SYS_CLK_CTL 0xc0 +#define CC_BP_ADRLOW 0xd0 +#define CC_BP_ADRHI 0xd4 +#define CC_BP_DATA 0xd8 +#define CC_SCR_DHD_TO_BL CC_BP_ADRHI +#define CC_SCR_BL_TO_DHD CC_BP_ADRLOW +#define CC_CLKDIV2 0xf0 +#define CC_CLK_CTL_ST SI_CLK_CTL_ST +#define PMU_CTL 0x600 +#define PMU_CAP 0x604 +#define PMU_ST 0x608 +#define PMU_RES_STATE 0x60c +#define PMU_RES_PENDING 0x610 +#define PMU_TIMER 0x614 +#define PMU_MIN_RES_MASK 0x618 +#define PMU_MAX_RES_MASK 0x61c +#define CC_CHIPCTL_ADDR 0x650 +#define CC_CHIPCTL_DATA 0x654 +#define PMU_REG_CONTROL_ADDR 0x658 +#define PMU_REG_CONTROL_DATA 0x65C +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 +#define PMU_RSRC_CONTROL_MASK 0x7B0 + +#define CC_SROM_CTRL 0x190 +#define CC_SROM_ADDRESS 0x194u +#define CC_SROM_DATA 0x198u +#define CC_SROM_OTP 0x0800 +#define CC_GCI_INDIRECT_ADDR_REG 0xC40 +#define CC_GCI_CHIP_CTRL_REG 0xE00 +#define CC_GCI_CC_OFFSET_2 2 +#define CC_GCI_CC_OFFSET_5 5 +#define CC_SWD_CTRL 0x380 +#define CC_SWD_REQACK 0x384 +#define CC_SWD_DATA 0x388 +#define GPIO_SEL_0 0x00001111 +#define GPIO_SEL_1 0x11110000 +#define GPIO_SEL_8 0x00001111 +#define GPIO_SEL_9 0x11110000 + +#define CHIPCTRLREG0 0x0 +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define CHIPCTRLREG6 0x6 +#define CHIPCTRLREG13 0xd +#define CHIPCTRLREG16 0x10 +#define REGCTRLREG4 0x4 +#define REGCTRLREG5 0x5 +#define REGCTRLREG6 0x6 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define PMU_RES_DEP_MASK 0x624 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c +#define PMUREG_RESREQ_TIMER 0x688 +#define PMUREG_RESREQ_MASK1 0x6f4 +#define PMUREG_RESREQ_TIMER1 0x6f0 +#define EXT_LPO_AVAIL 0x100 +#define LPO_SEL (1 << 0) +#define CC_EXT_LPO_PU 0x200000 +#define GC_EXT_LPO_PU 0x2 +#define CC_INT_LPO_PU 0x100000 +#define GC_INT_LPO_PU 0x1 +#define EXT_LPO_SEL 0x8 +#define INT_LPO_SEL 0x4 +#define ENABLE_FINE_CBUCK_CTRL (1 << 30) +#define REGCTRL5_PWM_AUTO_CTRL_MASK 0x007e0000 +#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17 +#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000 +#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16 +#define CC_BP_IND_ACCESS_START_SHIFT 9 +#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT) +#define CC_BP_IND_ACCESS_RDWR_SHIFT 8 +#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT) +#define CC_BP_IND_ACCESS_ERROR_SHIFT 10 +#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT) +#define GC_BT_CTRL_UARTPADS_OVRD_EN (1u << 1) + +#define LPO_SEL_TIMEOUT 1000 + +#define LPO_FINAL_SEL_SHIFT 18 + +#define LHL_LPO1_SEL 0 +#define LHL_LPO2_SEL 0x1 +#define LHL_32k_SEL 0x2 +#define LHL_EXT_SEL 0x3 + +#define EXTLPO_BUF_PD 0x40 +#define LPO1_PD_EN 0x1 +#define LPO1_PD_SEL 0x6 +#define LPO1_PD_SEL_VAL 0x4 +#define LPO2_PD_EN 0x8 +#define LPO2_PD_SEL 0x30 +#define LPO2_PD_SEL_VAL 0x20 +#define OSC_32k_PD 0x80 + +#define LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL 0x3 + +#define LHL_LPO_AUTO 0x0 +#define LHL_LPO1_ENAB 0x1 +#define LHL_LPO2_ENAB 0x2 +#define LHL_OSC_32k_ENAB 0x3 +#define LHL_EXT_LPO_ENAB 0x4 +#define RADIO_LPO_ENAB 0x5 + +#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN 0x4 +#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR 0x8 +#define LHL_CLK_DET_CNT 0xF0 +#define LHL_CLK_DET_CNT_SHIFT 4 +#define LPO_SEL_SHIFT 9 + +#define LHL_MAIN_CTL_ADR_FINAL_CLK_SEL 0x3C0000 +#define LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL 0x600 + +#define CLK_DET_CNT_THRESH 8 + +#ifdef SR_DEBUG +#define SUBCORE_POWER_ON 0x0001 +#define PHY_POWER_ON 0x0010 +#define VDDM_POWER_ON 0x0100 +#define MEMLPLDO_POWER_ON 0x1000 +#define SUBCORE_POWER_ON_CHK 0x00040000 +#define PHY_POWER_ON_CHK 0x00080000 +#define VDDM_POWER_ON_CHK 0x00100000 +#define MEMLPLDO_POWER_ON_CHK 0x00200000 +#endif /* SR_DEBUG */ + +#ifdef CCNFLASH_SUPPORT +/* NAND flash support */ +#define CC_NAND_REVISION 0xC00 +#define CC_NAND_CMD_START 0xC04 +#define CC_NAND_CMD_ADDR 0xC0C +#define CC_NAND_SPARE_RD_0 0xC20 +#define CC_NAND_SPARE_RD_4 0xC24 +#define CC_NAND_SPARE_RD_8 0xC28 +#define CC_NAND_SPARE_RD_C 0xC2C +#define CC_NAND_CONFIG 0xC48 +#define CC_NAND_DEVID 0xC60 +#define CC_NAND_DEVID_EXT 0xC64 +#define CC_NAND_INTFC_STATUS 0xC6C +#endif /* CCNFLASH_SUPPORT */ + +/* chipid */ +#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */ +#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */ +#define CID_REV_SHIFT 16 /**< Chip Revision shift */ +#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */ +#define CID_PKG_SHIFT 20 /**< Package Option shift */ +#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */ +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */ +#define CID_TYPE_SHIFT 28 + +/* capabilities */ +#define CC_CAP_UARTS_MASK 0x00000003u /**< Number of UARTs */ +#define CC_CAP_MIPSEB 0x00000004u /**< MIPS is in big-endian mode */ +#define CC_CAP_UCLKSEL 0x00000018u /**< UARTs clock select */ +#define CC_CAP_UINTCLK 0x00000008u /**< UARTs are driven by internal divided clock */ +#define CC_CAP_UARTGPIO 0x00000020u /**< UARTs own GPIOs 15:12 */ +#define CC_CAP_EXTBUS_MASK 0x000000c0u /**< External bus mask */ +#define CC_CAP_EXTBUS_NONE 0x00000000u /**< No ExtBus present */ +#define CC_CAP_EXTBUS_FULL 0x00000040u /**< ExtBus: PCMCIA, IDE & Prog */ +#define CC_CAP_EXTBUS_PROG 0x00000080u /**< ExtBus: ProgIf only */ +#define CC_CAP_FLASH_MASK 0x00000700u /**< Type of flash */ +#define CC_CAP_PLL_MASK 0x00038000u /**< Type of PLL */ +#define CC_CAP_PWR_CTL 0x00040000u /**< Power control */ +#define CC_CAP_OTPSIZE 0x00380000u /**< OTP Size (0 = none) */ +#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */ +#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */ +#define CC_CAP_JTAGP 0x00400000u /**< JTAG Master Present */ +#define CC_CAP_ROM 0x00800000u /**< Internal boot rom active */ +#define CC_CAP_BKPLN64 0x08000000u /**< 64-bit backplane */ +#define CC_CAP_PMU 0x10000000u /**< PMU Present, rev >= 20 */ +#define CC_CAP_ECI 0x20000000u /**< ECI Present, rev >= 21 */ +#define CC_CAP_SROM 0x40000000u /**< Srom Present, rev >= 32 */ +#define CC_CAP_NFLASH 0x80000000u /**< Nand flash present, rev >= 35 */ + +#define CC_CAP2_SECI 0x00000001u /**< SECI Present, rev >= 36 */ +#define CC_CAP2_GSIO 0x00000002u /**< GSIO (spi/i2c) present, rev >= 37 */ + +/* capabilities extension */ +#define CC_CAP_EXT_SECI_PRESENT 0x00000001u /**< SECI present */ +#define CC_CAP_EXT_GSIO_PRESENT 0x00000002u /**< GSIO present */ +#define CC_CAP_EXT_GCI_PRESENT 0x00000004u /**< GCI present */ +#define CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008u /**< UART present */ +#define CC_CAP_EXT_AOB_PRESENT 0x00000040u /**< AOB present */ +#define CC_CAP_EXT_SWD_PRESENT 0x00000400u /**< SWD present */ +#define CC_CAP_SR_AON_PRESENT 0x0001E000u /**< SWD present */ +#define CC_CAP_EXT1_DVFS_PRESENT 0x00001000u /**< DVFS present */ + +/* DVFS core count */ +#define CC_CAP_EXT1_CORE_CNT_SHIFT (7u) +#define CC_CAP_EXT1_CORE_CNT_MASK ((0x1Fu) << CC_CAP_EXT1_CORE_CNT_SHIFT) + +/* SpmCtrl (Chipcommon Offset 0x690) + * Bits 27:16 AlpDiv + * Clock divider control for dividing ALP or TCK clock + * (bit 8 determines ALP vs TCK) + * Bits 8 UseDivTck + * See UseDivAlp (bit 1) for more details + * Bits 7:6 DebugMuxSel + * Controls the debug mux for SpmDebug register + * Bits 5 IntPending + * This field is set to 1 when any of the bits in IntHiStatus or IntLoStatus + * is set. It is automatically cleared after reading and clearing the + * IntHiStatus and IntLoStatus registers. This bit is Read only. + * Bits 4 SpmIdle + * Indicates whether the spm controller is running (SpmIdle=0) or in idle + * state (SpmIdle=1); Note that after setting Spmen=1 (or 0), it takes a + * few clock cycles (ILP or divided ALP) for SpmIdle to go to 0 (or 1). + * This bit is Read only. + * Bits 3 RoDisOutput + * Debug register - gate off all the SPM ring oscillator clock outputs + * Bits 2 RstSpm + * Reset spm controller. + * Put spm in reset before changing UseDivAlp and AlpDiv + * Bits 1 UseDivAlp + * This field, along with UseDivTck, selects the clock as the reference clock + * Bits [UseDivTck,UseDivAlp]: + * 00 - Use ILP clock as reference clock + * 01 - Use divided ALP clock + * 10 - Use divided jtag TCK + * Bits 0 Spmen + * 0 - SPM disabled + * 1 - SPM enabled + * Program all the SPM controls before enabling spm. For one-shot operation, + * SpmIdle indicates when the one-shot run has completed. After one-shot + * completion, spmen needs to be disabled first before enabling again. + */ +#define SPMCTRL_ALPDIV_FUNC 0x1ffu +#define SPMCTRL_ALPDIV_RO 0xfffu +#define SPMCTRL_ALPDIV_SHIFT 16u +#define SPMCTRL_ALPDIV_MASK (0xfffu << SPMCTRL_ALPDIV_SHIFT) +#define SPMCTRL_RSTSPM 0x1u +#define SPMCTRL_RSTSPM_SHIFT 2u +#define SPMCTRL_RSTSPM_MASK (0x1u << SPMCTRL_RSTSPM_SHIFT) +#define SPMCTRL_USEDIVALP 0x1u +#define SPMCTRL_USEDIVALP_SHIFT 1u +#define SPMCTRL_USEDIVALP_MASK (0x1u << SPMCTRL_USEDIVALP_SHIFT) +#define SPMCTRL_SPMEN 0x1u +#define SPMCTRL_SPMEN_SHIFT 0u +#define SPMCTRL_SPMEN_MASK (0x1u << SPMCTRL_SPMEN_SHIFT) + +/* SpmClkCtrl (Chipcommon Offset 0x698) + * Bits 31 OneShot + * 0 means Take periodic measurements based on IntervalValue + * 1 means Take a one shot measurement + * when OneShot=1 IntervalValue determines the amount of time to wait before + * taking the measurement + * Bits 30:28 ROClkprediv1 + * ROClkprediv1 and ROClkprediv2 controls the clock dividers of the RO clk + * before it goes to the monitor + * The RO clk goes through prediv1, followed by prediv2 + * prediv1: + * 0 - no divide + * 1 - divide by 2 + * 2 - divide by 4 + * 3 - divide by 8 + * 4 - divide by 16 + * 5 - divide by 32 + * prediv2: + * 0 - no divide + * 1 to 15 - divide by (prediv2+1) + */ +#define SPMCLKCTRL_SAMPLETIME 0x2u +#define SPMCLKCTRL_SAMPLETIME_SHIFT 24u +#define SPMCLKCTRL_SAMPLETIME_MASK (0xfu << SPMCLKCTRL_SAMPLETIME_SHIFT) +#define SPMCLKCTRL_ONESHOT 0x1u +#define SPMCLKCTRL_ONESHOT_SHIFT 31u +#define SPMCLKCTRL_ONESHOT_MASK (0x1u << SPMCLKCTRL_ONESHOT_SHIFT) + +/* MonCtrlN (Chipcommon Offset 0x6a8) + * Bits 15:8 TargetRo + * The target ring oscillator to observe + * Bits 7:6 TargetRoExt + * Extended select option to choose the target clock to monitor; + * 00 - selects ring oscillator clock; + * 10 - selects functional clock; + * 11 - selects DFT clocks; + * Bits 15:8 (TargetRO) is used to select the specific RO or functional or + * DFT clock + * Bits 3 intHiEn + * Interrupt hi enable (MonEn should be 1) + * Bits 2 intLoEn + * Interrupt hi enable (MonEn should be 1) + * Bits 1 HwEnable + * TBD + * Bits 0 MonEn + * Enable monitor, interrupt and watermark functions + */ +#define MONCTRLN_TARGETRO_PMU_ALP_CLK 0u +#define MONCTRLN_TARGETRO_PCIE_ALP_CLK 1u +#define MONCTRLN_TARGETRO_CB_BP_CLK 2u +#define MONCTRLN_TARGETRO_ARMCR4_CLK_4387B0 3u +#define MONCTRLN_TARGETRO_ARMCR4_CLK_4387C0 20u +#define MONCTRLN_TARGETRO_SHIFT 8u +#define MONCTRLN_TARGETRO_MASK (0xffu << MONCTRLN_TARGETRO_SHIFT) +#define MONCTRLN_TARGETROMAX 64u +#define MONCTRLN_TARGETROHI 32u +#define MONCTRLN_TARGETROEXT_RO 0x0u +#define MONCTRLN_TARGETROEXT_FUNC 0x2u +#define MONCTRLN_TARGETROEXT_DFT 0x3u +#define MONCTRLN_TARGETROEXT_SHIFT 6u +#define MONCTRLN_TARGETROEXT_MASK (0x3u << MONCTRLN_TARGETROEXT_SHIFT) +#define MONCTRLN_MONEN 0x1u +#define MONCTRLN_MONEN_SHIFT 0u +#define MONCTRLN_MONEN_MASK (0x1u << MONCTRLN_MONENEXT_SHIFT) + +/* DvfsCoreCtrlN + * Bits 10 Request_override_PDn + * When set, the dvfs_request logic for this core is overridden with the + * content in Request_val_PDn. This field is ignored when + * DVFSCtrl1.dvfs_req_override is set. + * Bits 9:8 Request_val_PDn + * see Request_override_PDn description + * Bits 4:0 DVFS_RsrcTrig_PDn + * Specifies the pmu resource that is used to trigger the DVFS request for + * this core request. Current plan is to use the appropriate PWRSW_* pmu + * resource each power domain / cores + */ +#define CTRLN_REQUEST_OVERRIDE_SHIFT 10u +#define CTRLN_REQUEST_OVERRIDE_MASK (0x1u << CTRLN_REQUEST_OVERRIDE_SHIFT) +#define CTRLN_REQUEST_VAL_SHIFT 8u +#define CTRLN_REQUEST_VAL_MASK (0x3u << CTRLN_REQUEST_VAL_SHIFT) +#define CTRLN_RSRC_TRIG_SHIFT 0u +#define CTRLN_RSRC_TRIG_MASK (0x1Fu << CTRLN_RSRC_TRIG_SHIFT) +#define CTRLN_RSRC_TRIG_CHIPC 0x1Au +#define CTRLN_RSRC_TRIG_PCIE 0x1Au +#define CTRLN_RSRC_TRIG_ARM 0x8u +#define CTRLN_RSRC_TRIG_D11_MAIN 0xEu +#define CTRLN_RSRC_TRIG_D11_AUX 0xBu +#define CTRLN_RSRC_TRIG_D11_SCAN 0xCu +#define CTRLN_RSRC_TRIG_HWA 0x8u +#define CTRLN_RSRC_TRIG_BT_MAIN 0x9u +#define CTRLN_RSRC_TRIG_BT_SCAN 0xAu + +/* DVFS core FW index */ +#define DVFS_CORE_CHIPC 0u +#define DVFS_CORE_PCIE 1u +#define DVFS_CORE_ARM 2u +#define DVFS_CORE_D11_MAIN 3u +#define DVFS_CORE_D11_AUX 4u +#define DVFS_CORE_D11_SCAN 5u +#define DVFS_CORE_BT_MAIN 6u +#define DVFS_CORE_BT_SCAN 7u +#define DVFS_CORE_HWA 8u +#define DVFS_CORE_SYSMEM ((PMUREV((sih)->pmurev) < 43u) ? \ + 9u : 8u) +#define DVFS_CORE_MASK 0xFu + +#define DVFS_CORE_INVALID_IDX 0xFFu + +/* DVFS_Ctrl2 (PMU_BASE + 0x814) + * Bits 31:28 Voltage ramp down step + * Voltage increment amount during ramp down (10mv units) + * Bits 27:24 Voltage ramp up step + * Voltage increment amount during ramp up (10mv units) + * Bits 23:16 Voltage ramp down interval + * Number of clocks to wait during each voltage decrement + * Bits 15:8 Voltage ramp up interval + * Number of clocks to wait during each voltage increment + * Bits 7:0 Clock stable time + * Number of clocks to wait after dvfs_clk_sel is asserted + */ +#define DVFS_VOLTAGE_RAMP_DOWN_STEP 1u +#define DVFS_VOLTAGE_RAMP_DOWN_STEP_SHIFT 28u +#define DVFS_VOLTAGE_RAMP_DOWN_STEP_MASK (0xFu << DVFS_VOLTAGE_RAMP_DOWN_STEP_SHIFT) +#define DVFS_VOLTAGE_RAMP_UP_STEP 1u +#define DVFS_VOLTAGE_RAMP_UP_STEP_SHIFT 24u +#define DVFS_VOLTAGE_RAMP_UP_STEP_MASK (0xFu << DVFS_VOLTAGE_RAMP_UP_STEP_SHIFT) +#define DVFS_VOLTAGE_RAMP_DOWN_INTERVAL 1u +#define DVFS_VOLTAGE_RAMP_DOWN_INTERVAL_SHIFT 16u +#define DVFS_VOLTAGE_RAMP_DOWN_INTERVAL_MASK (0xFFu << DVFS_VOLTAGE_RAMP_DOWN_INTERVAL_SHIFT) +#define DVFS_VOLTAGE_RAMP_UP_INTERVAL 1u +#define DVFS_VOLTAGE_RAMP_UP_INTERVAL_SHIFT 8u +#define DVFS_VOLTAGE_RAMP_UP_INTERVAL_MASK (0xFFu << DVFS_VOLTAGE_RAMP_UP_INTERVAL_SHIFT) +#define DVFS_CLOCK_STABLE_TIME 3u +#define DVFS_CLOCK_STABLE_TIME_SHIFT 0 +#define DVFS_CLOCK_STABLE_TIME_MASK (0xFFu << DVFS_CLOCK_STABLE_TIME_SHIFT) + +/* DVFS_Voltage (PMU_BASE + 0x818) + * Bits 22:16 HDV Voltage + * Specifies the target HDV voltage in 10mv units + * Bits 14:8 NDV Voltage + * Specifies the target NDV voltage in 10mv units + * Bits 6:0 LDV Voltage + * Specifies the target LDV voltage in 10mv units + */ +#define DVFS_VOLTAGE_XDV 0u /* Reserved */ +#ifdef WL_INITVALS +#define DVFS_VOLTAGE_HDV (wliv_pmu_dvfs_voltage_hdv) /* 0.72V */ +#define DVFS_VOLTAGE_HDV_MAX (wliv_pmu_dvfs_voltage_hdv_max) /* 0.80V */ +#else +#define DVFS_VOLTAGE_HDV 72u /* 0.72V */ +#define DVFS_VOLTAGE_HDV_MAX 80u /* 0.80V */ +#endif +#define DVFS_VOLTAGE_HDV_PWR_OPT 68u /* 0.68V */ +#define DVFS_VOLTAGE_HDV_SHIFT 16u +#define DVFS_VOLTAGE_HDV_MASK (0x7Fu << DVFS_VOLTAGE_HDV_SHIFT) +#ifdef WL_INITVALS +#define DVFS_VOLTAGE_NDV (wliv_pmu_dvfs_voltage_ndv) /* 0.72V */ +#define DVFS_VOLTAGE_NDV_NON_LVM (wliv_pmu_dvfs_voltage_ndv_non_lvm) /* 0.76V */ +#define DVFS_VOLTAGE_NDV_MAX (wliv_pmu_dvfs_voltage_ndv_max) /* 0.80V */ +#else +#define DVFS_VOLTAGE_NDV 72u /* 0.72V */ +#define DVFS_VOLTAGE_NDV_NON_LVM 76u /* 0.76V */ +#define DVFS_VOLTAGE_NDV_MAX 80u /* 0.80V */ +#endif +#define DVFS_VOLTAGE_NDV_PWR_OPT 68u /* 0.68V */ +#define DVFS_VOLTAGE_NDV_SHIFT 8u +#define DVFS_VOLTAGE_NDV_MASK (0x7Fu << DVFS_VOLTAGE_NDV_SHIFT) +#ifdef WL_INITVALS +#define DVFS_VOLTAGE_LDV (wliv_pmu_dvfs_voltage_ldv) /* 0.65V */ +#else +#define DVFS_VOLTAGE_LDV 65u /* 0.65V */ +#endif +#define DVFS_VOLTAGE_LDV_PWR_OPT 65u /* 0.65V */ +#define DVFS_VOLTAGE_LDV_SHIFT 0u +#define DVFS_VOLTAGE_LDV_MASK (0x7Fu << DVFS_VOLTAGE_LDV_SHIFT) + +/* DVFS_Status (PMU_BASE + 0x81C) + * Bits 27:26 Raw_Core_Reqn + * Bits 25:24 Active_Core_Reqn + * Bits 12:11 Core_dvfs_status + * Bits 9:8 Dvfs_clk_sel + * 00 - LDV + * 01 - NDV + * Bits 6:0 Dvfs Voltage + * The real time voltage that is being output from the dvfs controller + */ +#define DVFS_RAW_CORE_REQ_SHIFT 26u +#define DVFS_RAW_CORE_REQ_MASK (0x3u << DVFS_RAW_CORE_REQ_SHIFT) +#define DVFS_ACT_CORE_REQ_SHIFT 24u +#define DVFS_ACT_CORE_REQ_MASK (0x3u << DVFS_ACT_CORE_REQ_SHIFT) +#define DVFS_CORE_STATUS_SHIFT 11u +#define DVFS_CORE_STATUS_MASK (0x3u << DVFS_CORE_STATUS_SHIFT) +#define DVFS_CLK_SEL_SHIFT 8u +#define DVFS_CLK_SEL_MASK (0x3u << DVFS_CLK_SEL_SHIFT) +#define DVFS_VOLTAGE_SHIFT 0u +#define DVFS_VOLTAGE_MASK (0x7Fu << DVFS_VOLTAGE_SHIFT) + +/* DVFS_Ctrl1 (PMU_BASE + 0x810) + * Bits 0 Enable DVFS + * This bit will enable DVFS operation. When cleared, the complete DVFS + * controller is bypassed and DVFS_voltage output will be the contents of + * NDV voltage register + */ +#define DVFS_DISABLE_DVFS 0u +#define DVFS_ENABLE_DVFS 1u +#define DVFS_ENABLE_DVFS_SHIFT 0u +#define DVFS_ENABLE_DVFS_MASK (1u << DVFS_ENABLE_DVFS_SHIFT) + +#define DVFS_LPO_DELAY 40u /* usec (1 LPO clock + margin) */ +#define DVFS_FASTLPO_DELAY 2u /* usec (1 FAST_LPO clock + margin) */ +#define DVFS_NDV_LPO_DELAY 1500u +#define DVFS_NDV_FASTLPO_DELAY 50u + +#if defined(BCM_FASTLPO) && !defined(BCM_FASTLPO_DISABLED) +#define DVFS_DELAY DVFS_FASTLPO_DELAY +#define DVFS_NDV_DELAY DVFS_NDV_FASTLPO_DELAY +#else +#define DVFS_DELAY DVFS_LPO_DELAY +#define DVFS_NDV_DELAY DVFS_NDV_LPO_DELAY +#endif /* BCM_FASTLPO && !BCM_FASTLPO_DISABLED */ + +#define DVFS_LDV 0u +#define DVFS_NDV 1u +#define DVFS_HDV 2u + +/* PowerControl2 (Core Offset 0x1EC) + * Bits 17:16 DVFSStatus + * This 2-bit field is the DVFS voltage status mapped as + * 00 - LDV + * 01 - NDV + * 10 - HDV + * Bits 1:0 DVFSRequest + * This 2-bit field is used to request DVFS voltage mapped as shown above + */ +#define DVFS_REQ_LDV DVFS_LDV +#define DVFS_REQ_NDV DVFS_NDV +#define DVFS_REQ_HDV DVFS_HDV +#define DVFS_REQ_SHIFT 0u +#define DVFS_REQ_MASK (0x3u << DVFS_REQ_SHIFT) +#define DVFS_STATUS_SHIFT 16u +#define DVFS_STATUS_MASK (0x3u << DVFS_STATUS_SHIFT) + +/* GCI Chip Control 16 Register + * Bits 0 CB Clock sel + * 0 - 160MHz + * 1 - 80Mhz - BT can force CB backplane clock to 80Mhz when wl is down + */ +#define GCI_CC16_CB_CLOCK_SEL_160 0u +#define GCI_CC16_CB_CLOCK_SEL_80 1u +#define GCI_CC16_CB_CLOCK_SEL_SHIFT 0u +#define GCI_CC16_CB_CLOCK_SEL_MASK (0x1u << GCI_CC16_CB_CLOCK_SEL_SHIFT) +#define GCI_CHIPCTRL_16_PRISEL_ANT_MASK_PSM_OVR (1 << 8) + +/* WL Channel Info to BT via GCI - bits 40 - 47 */ +#define GCI_WL_CHN_INFO_MASK (0xFF00) +/* WL indication of MCHAN enabled/disabled to BT - bit 36 */ +#define GCI_WL_MCHAN_BIT_MASK (0x0010) + +#ifdef WLC_SW_DIVERSITY +/* WL indication of SWDIV enabled/disabled to BT - bit 33 */ +#define GCI_WL_SWDIV_ANT_VALID_BIT_MASK (0x0002) +#define GCI_SWDIV_ANT_VALID_SHIFT 0x1 +#define GCI_SWDIV_ANT_VALID_DISABLE 0x0 +#endif + +/* Indicate to BT that WL is scheduling ACL based ble scan grant */ +#define GCI_WL2BT_ACL_BSD_BLE_SCAN_GRNT_MASK 0x8000000 +/* WLAN is awake Indicate to BT */ +#define GCI_WL2BT_2G_AWAKE_MASK (1u << 28u) + +/* WL inidcation of Aux Core 2G hibernate status - bit 50 */ +#define GCI_WL2BT_2G_HIB_STATE_MASK (0x0040000u) + +/* WL Traffic Indication to BT */ +#define GCI_WL2BT_TRAFFIC_IND_SHIFT (12) +#define GCI_WL2BT_TRAFFIC_IND_MASK (0x3 << GCI_WL2BT_TRAFFIC_IND_SHIFT) + +/* WL Strobe to BT */ +#define GCI_WL_STROBE_BIT_MASK (0x0020) +/* bits [51:48] - reserved for wlan TX pwr index */ +/* bits [55:52] btc mode indication */ +#define GCI_WL_BTC_MODE_SHIFT (20) +#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT) +#define GCI_WL_ANT_BIT_MASK (0x00c0) +#define GCI_WL_ANT_SHIFT_BITS (6) + +/* bit [40] - to indicate RC2CX mode to BT */ +#define GCI_WL_RC2CX_PERCTS_MASK 0x00000100u + +/* PLL type */ +#define PLL_NONE 0x00000000 +#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */ +#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */ +#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */ +#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */ +#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */ + +/* ILP clock */ +#define ILP_CLOCK 32000 + +/* ALP clock on pre-PMU chips */ +#define ALP_CLOCK 20000000 + +#ifdef CFG_SIM +#define NS_ALP_CLOCK 84922 +#define NS_SLOW_ALP_CLOCK 84922 +#define NS_CPU_CLOCK 534500 +#define NS_SLOW_CPU_CLOCK 534500 +#define NS_SI_CLOCK 271750 +#define NS_SLOW_SI_CLOCK 271750 +#define NS_FAST_MEM_CLOCK 271750 +#define NS_MEM_CLOCK 271750 +#define NS_SLOW_MEM_CLOCK 271750 +#else +#define NS_ALP_CLOCK 125000000 +#define NS_SLOW_ALP_CLOCK 100000000 +#define NS_CPU_CLOCK 1000000000 +#define NS_SLOW_CPU_CLOCK 800000000 +#define NS_SI_CLOCK 250000000 +#define NS_SLOW_SI_CLOCK 200000000 +#define NS_FAST_MEM_CLOCK 800000000 +#define NS_MEM_CLOCK 533000000 +#define NS_SLOW_MEM_CLOCK 400000000 +#endif /* CFG_SIM */ + +/* HT clock */ +#define HT_CLOCK 80000000 + +/* corecontrol */ +#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */ +#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */ +#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */ +#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */ +#define CC_RBG_RESET 0x00000040 /**< Reset RBG block (corerev > = 65 */ + +/* retention_ctl */ +#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29 +#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT) + +/* 4321 chipcontrol */ +#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */ + +/* Fields in the otpstatus register in rev >= 21 */ +#define OTPS_OL_MASK 0x000000ff +#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */ +#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */ +#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */ +#define OTPS_OL_GU 0x00000008 /**< general use region is locked */ +#define OTPS_GUP_MASK 0x00000f00 +#define OTPS_GUP_SHIFT 8 +#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */ +#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */ +#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */ +#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */ +#define OTPS_READY 0x00001000 +#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */ +#define OTPS_RV_MASK 0x0fff0000 +#define OTPS_PROGOK 0x40000000 + +/* Fields in the otpcontrol register in rev >= 21 */ +#define OTPC_PROGSEL 0x00000001 +#define OTPC_PCOUNT_MASK 0x0000000e +#define OTPC_PCOUNT_SHIFT 1 +#define OTPC_VSEL_MASK 0x000000f0 +#define OTPC_VSEL_SHIFT 4 +#define OTPC_TMM_MASK 0x00000700 +#define OTPC_TMM_SHIFT 8 +#define OTPC_ODM 0x00000800 +#define OTPC_PROGEN 0x80000000 + +/* Fields in the 40nm otpcontrol register in rev >= 40 */ +#define OTPC_40NM_PROGSEL_SHIFT 0 +#define OTPC_40NM_PCOUNT_SHIFT 1 +#define OTPC_40NM_PCOUNT_WR 0xA +#define OTPC_40NM_PCOUNT_V1X 0xB +#define OTPC_40NM_REGCSEL_SHIFT 5 +#define OTPC_40NM_REGCSEL_DEF 0x4 +#define OTPC_40NM_PROGIN_SHIFT 8 +#define OTPC_40NM_R2X_SHIFT 10 +#define OTPC_40NM_ODM_SHIFT 11 +#define OTPC_40NM_DF_SHIFT 15 +#define OTPC_40NM_VSEL_SHIFT 16 +#define OTPC_40NM_VSEL_WR 0xA +#define OTPC_40NM_VSEL_V1X 0xA +#define OTPC_40NM_VSEL_R1X 0x5 +#define OTPC_40NM_COFAIL_SHIFT 30 + +#define OTPC1_CPCSEL_SHIFT 0 +#define OTPC1_CPCSEL_DEF 6 +#define OTPC1_TM_SHIFT 8 +#define OTPC1_TM_WR 0x84 +#define OTPC1_TM_V1X 0x84 +#define OTPC1_TM_R1X 0x4 +#define OTPC1_CLK_EN_MASK 0x00020000 +#define OTPC1_CLK_DIV_MASK 0x00FC0000 + +/* Fields in otpprog in rev >= 21 and HND OTP */ +#define OTPP_COL_MASK 0x000000ff +#define OTPP_COL_SHIFT 0 +#define OTPP_ROW_MASK 0x0000ff00 +#define OTPP_ROW_MASK9 0x0001ff00 /* for ccrev >= 49 */ +#define OTPP_ROW_SHIFT 8 +#define OTPP_OC_MASK 0x0f000000 +#define OTPP_OC_SHIFT 24 +#define OTPP_READERR 0x10000000 +#define OTPP_VALUE_MASK 0x20000000 +#define OTPP_VALUE_SHIFT 29 +#define OTPP_START_BUSY 0x80000000 +#define OTPP_READ 0x40000000 /* HND OTP */ + +/* Fields in otplayout register */ +#define OTPL_HWRGN_OFF_MASK 0x00000FFF +#define OTPL_HWRGN_OFF_SHIFT 0 +#define OTPL_WRAP_REVID_MASK 0x00F80000 +#define OTPL_WRAP_REVID_SHIFT 19 +#define OTPL_WRAP_TYPE_MASK 0x00070000 +#define OTPL_WRAP_TYPE_SHIFT 16 +#define OTPL_WRAP_TYPE_65NM 0 +#define OTPL_WRAP_TYPE_40NM 1 +#define OTPL_WRAP_TYPE_28NM 2 +#define OTPL_WRAP_TYPE_16NM 3 +#define OTPL_WRAP_TYPE_7NM 4 +#define OTPL_ROW_SIZE_MASK 0x0000F000 +#define OTPL_ROW_SIZE_SHIFT 12 + +/* otplayout reg corerev >= 36 */ +#define OTP_CISFORMAT_NEW 0x80000000 + +/* Opcodes for OTPP_OC field */ +#define OTPPOC_READ 0 +#define OTPPOC_BIT_PROG 1 +#define OTPPOC_VERIFY 3 +#define OTPPOC_INIT 4 +#define OTPPOC_SET 5 +#define OTPPOC_RESET 6 +#define OTPPOC_OCST 7 +#define OTPPOC_ROW_LOCK 8 +#define OTPPOC_PRESCN_TEST 9 + +/* Opcodes for OTPP_OC field (40NM) */ +#define OTPPOC_READ_40NM 0 +#define OTPPOC_PROG_ENABLE_40NM 1 +#define OTPPOC_PROG_DISABLE_40NM 2 +#define OTPPOC_VERIFY_40NM 3 +#define OTPPOC_WORD_VERIFY_1_40NM 4 +#define OTPPOC_ROW_LOCK_40NM 5 +#define OTPPOC_STBY_40NM 6 +#define OTPPOC_WAKEUP_40NM 7 +#define OTPPOC_WORD_VERIFY_0_40NM 8 +#define OTPPOC_PRESCN_TEST_40NM 9 +#define OTPPOC_BIT_PROG_40NM 10 +#define OTPPOC_WORDPROG_40NM 11 +#define OTPPOC_BURNIN_40NM 12 +#define OTPPOC_AUTORELOAD_40NM 13 +#define OTPPOC_OVST_READ_40NM 14 +#define OTPPOC_OVST_PROG_40NM 15 + +/* Opcodes for OTPP_OC field (28NM) */ +#define OTPPOC_READ_28NM 0 +#define OTPPOC_READBURST_28NM 1 +#define OTPPOC_PROG_ENABLE_28NM 2 +#define OTPPOC_PROG_DISABLE_28NM 3 +#define OTPPOC_PRESCREEN_28NM 4 +#define OTPPOC_PRESCREEN_RP_28NM 5 +#define OTPPOC_FLUSH_28NM 6 +#define OTPPOC_NOP_28NM 7 +#define OTPPOC_PROG_ECC_28NM 8 +#define OTPPOC_PROG_ECC_READ_28NM 9 +#define OTPPOC_PROG_28NM 10 +#define OTPPOC_PROGRAM_RP_28NM 11 +#define OTPPOC_PROGRAM_OVST_28NM 12 +#define OTPPOC_RELOAD_28NM 13 +#define OTPPOC_ERASE_28NM 14 +#define OTPPOC_LOAD_RF_28NM 15 +#define OTPPOC_CTRL_WR_28NM 16 +#define OTPPOC_CTRL_RD_28NM 17 +#define OTPPOC_READ_HP_28NM 18 +#define OTPPOC_READ_OVST_28NM 19 +#define OTPPOC_READ_VERIFY0_28NM 20 +#define OTPPOC_READ_VERIFY1_28NM 21 +#define OTPPOC_READ_FORCE0_28NM 22 +#define OTPPOC_READ_FORCE1_28NM 23 +#define OTPPOC_BURNIN_28NM 24 +#define OTPPOC_PROGRAM_LOCK_28NM 25 +#define OTPPOC_PROGRAM_TESTCOL_28NM 26 +#define OTPPOC_READ_TESTCOL_28NM 27 +#define OTPPOC_READ_FOUT_28NM 28 +#define OTPPOC_SFT_RESET_28NM 29 + +#define OTPP_OC_MASK_28NM 0x0f800000 +#define OTPP_OC_SHIFT_28NM 23 + +/* OTPControl bitmap for GCI rev >= 7 */ +#define OTPC_PROGEN_28NM 0x8 +#define OTPC_DBLERRCLR 0x20 +#define OTPC_CLK_EN_MASK 0x00000040 +#define OTPC_CLK_DIV_MASK 0x00000F80 +#define OTPC_FORCE_OTP_PWR_DIS 0x00008000 + +/* Fields in otplayoutextension */ +#define OTPLAYOUTEXT_FUSE_MASK 0x3FF + +/* Jtagm characteristics that appeared at a given corerev */ +#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */ +#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */ +#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */ + +/* jtagcmd */ +#define JCMD_START 0x80000000 +#define JCMD_BUSY 0x80000000 +#define JCMD_STATE_MASK 0x60000000 +#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */ +#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */ +#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */ +#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */ +#define JCMD0_ACC_MASK 0x0000f000 +#define JCMD0_ACC_IRDR 0x00000000 +#define JCMD0_ACC_DR 0x00001000 +#define JCMD0_ACC_IR 0x00002000 +#define JCMD0_ACC_RESET 0x00003000 +#define JCMD0_ACC_IRPDR 0x00004000 +#define JCMD0_ACC_PDR 0x00005000 +#define JCMD0_IRW_MASK 0x00000f00 +#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */ +#define JCMD_ACC_IRDR 0x00000000 +#define JCMD_ACC_DR 0x00010000 +#define JCMD_ACC_IR 0x00020000 +#define JCMD_ACC_RESET 0x00030000 +#define JCMD_ACC_IRPDR 0x00040000 +#define JCMD_ACC_PDR 0x00050000 +#define JCMD_ACC_PIR 0x00060000 +#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */ +#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */ +#define JCMD_IRW_MASK 0x00001f00 +#define JCMD_IRW_SHIFT 8 +#define JCMD_DRW_MASK 0x0000003f + +/* jtagctrl */ +#define JCTRL_FORCE_CLK 4 /**< Force clock */ +#define JCTRL_EXT_EN 2 /**< Enable external targets */ +#define JCTRL_EN 1 /**< Enable Jtag master */ +#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */ + +/* swdmasterctrl */ +#define SWDCTRL_INT_EN 8 /**< Enable internal targets */ +#define SWDCTRL_FORCE_CLK 4 /**< Force clock */ +#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */ +#define SWDCTRL_EN 1 /**< Enable Jtag master */ + +/* Fields in clkdiv */ +#define CLKD_SFLASH 0x1f000000 +#define CLKD_SFLASH_SHIFT 24 +#define CLKD_OTP 0x000f0000 +#define CLKD_OTP_SHIFT 16 +#define CLKD_JTAG 0x00000f00 +#define CLKD_JTAG_SHIFT 8 +#define CLKD_UART 0x000000ff + +#define CLKD2_SROM 0x00000007 +#define CLKD2_SROMDIV_32 0 +#define CLKD2_SROMDIV_64 1 +#define CLKD2_SROMDIV_96 2 +#define CLKD2_SROMDIV_128 3 +#define CLKD2_SROMDIV_192 4 +#define CLKD2_SROMDIV_256 5 +#define CLKD2_SROMDIV_384 6 +#define CLKD2_SROMDIV_512 7 +#define CLKD2_SWD 0xf8000000 +#define CLKD2_SWD_SHIFT 27 + +/* intstatus/intmask */ +#define CI_GPIO 0x00000001 /**< gpio intr */ +#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */ +#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */ +#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */ +#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */ +#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */ +#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */ +#define CI_WECI 0x00000080 /* eci wakeup intr (corerev >= 21) */ +#define CI_SPMI 0x00100000 /* SPMI (corerev >= 65) */ +#define CI_RNG 0x00200000 /**< rng intr (corerev >= 65) */ +#define CI_SSRESET_F0 0x10000000 /**< ss reset occurred */ +#define CI_SSRESET_F1 0x20000000 /**< ss reset occurred */ +#define CI_SSRESET_F2 0x40000000 /**< ss reset occurred */ +#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */ + +/* slow_clk_ctl */ +#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */ +#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */ +#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */ +#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */ +#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled, + * 0: LPO is enabled + */ +#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock, + * 0: power logic control + */ +#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors + * PLL clock disable requests from core + */ +#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't + * disable crystal when appropriate + */ +#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */ +#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SCC_CD_SHIFT 16 + +/* system_clk_ctl */ +#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */ +#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */ +#define SYCC_FP 0x00000004 /**< ForcePLLOn */ +#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */ +#define SYCC_HR 0x00000010 /**< Force HT */ +#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */ +#define SYCC_CD_SHIFT 16 + +/* watchdogcounter */ +/* WL sub-system reset */ +#define WD_SSRESET_PCIE_F0_EN 0x10000000 +/* BT sub-system reset */ +#define WD_SSRESET_PCIE_F1_EN 0x20000000 +#define WD_SSRESET_PCIE_F2_EN 0x40000000 +/* Both WL and BT sub-system reset */ +#define WD_SSRESET_PCIE_ALL_FN_EN 0x80000000 +#define WD_COUNTER_MASK 0x0fffffff +#define WD_ENABLE_MASK \ + (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_F1_EN | \ + WD_SSRESET_PCIE_F2_EN | WD_SSRESET_PCIE_ALL_FN_EN) + +/* Indirect backplane access */ +#define BPIA_BYTEEN 0x0000000f +#define BPIA_SZ1 0x00000001 +#define BPIA_SZ2 0x00000003 +#define BPIA_SZ4 0x00000007 +#define BPIA_SZ8 0x0000000f +#define BPIA_WRITE 0x00000100 +#define BPIA_START 0x00000200 +#define BPIA_BUSY 0x00000200 +#define BPIA_ERROR 0x00000400 + +/* pcmcia/prog/flash_config */ +#define CF_EN 0x00000001 /**< enable */ +#define CF_EM_MASK 0x0000000e /**< mode */ +#define CF_EM_SHIFT 1 +#define CF_EM_FLASH 0 /**< flash/asynchronous mode */ +#define CF_EM_SYNC 2 /**< synchronous mode */ +#define CF_EM_PCMCIA 4 /**< pcmcia mode */ +#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */ +#define CF_BS 0x00000020 /**< byteswap */ +#define CF_CD_MASK 0x000000c0 /**< clock divider */ +#define CF_CD_SHIFT 6 +#define CF_CD_DIV2 0x00000000 /**< backplane/2 */ +#define CF_CD_DIV3 0x00000040 /**< backplane/3 */ +#define CF_CD_DIV4 0x00000080 /**< backplane/4 */ +#define CF_CE 0x00000100 /**< clock enable */ +#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define PM_W0_MASK 0x0000003f /**< waitcount0 */ +#define PM_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PM_W1_SHIFT 8 +#define PM_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PM_W2_SHIFT 16 +#define PM_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PM_W3_SHIFT 24 + +/* pcmcia_attrwait */ +#define PA_W0_MASK 0x0000003f /**< waitcount0 */ +#define PA_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PA_W1_SHIFT 8 +#define PA_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PA_W2_SHIFT 16 +#define PA_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PA_W3_SHIFT 24 + +/* pcmcia_iowait */ +#define PI_W0_MASK 0x0000003f /**< waitcount0 */ +#define PI_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PI_W1_SHIFT 8 +#define PI_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PI_W2_SHIFT 16 +#define PI_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PI_W3_SHIFT 24 + +/* prog_waitcount */ +#define PW_W0_MASK 0x0000001f /**< waitcount0 */ +#define PW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PW_W1_SHIFT 8 +#define PW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PW_W2_SHIFT 16 +#define PW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PW_W3_SHIFT 24 + +#define PW_W0 0x0000000c +#define PW_W1 0x00000a00 +#define PW_W2 0x00020000 +#define PW_W3 0x01000000 + +/* flash_waitcount */ +#define FW_W0_MASK 0x0000003f /**< waitcount0 */ +#define FW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define FW_W1_SHIFT 8 +#define FW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define FW_W2_SHIFT 16 +#define FW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define FW_W3_SHIFT 24 + +/* When Srom support present, fields in sromcontrol */ +#define SRC_START 0x80000000 +#define SRC_BUSY 0x80000000 +#define SRC_OPCODE 0x60000000 +#define SRC_OP_READ 0x00000000 +#define SRC_OP_WRITE 0x20000000 +#define SRC_OP_WRDIS 0x40000000 +#define SRC_OP_WREN 0x60000000 +#define SRC_OTPSEL 0x00000010 +#define SRC_OTPPRESENT 0x00000020 +#define SRC_LOCK 0x00000008 +#define SRC_SIZE_MASK 0x00000006 +#define SRC_SIZE_1K 0x00000000 +#define SRC_SIZE_4K 0x00000002 +#define SRC_SIZE_16K 0x00000004 +#define SRC_SIZE_SHIFT 1 +#define SRC_PRESENT 0x00000001 + +/* Fields in pmucontrol */ +#define PCTL_ILP_DIV_MASK 0xffff0000 +#define PCTL_ILP_DIV_SHIFT 16 +#define PCTL_LQ_REQ_EN 0x00008000 +#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */ +#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */ +#define PCTL_HT_REQ_EN 0x00000100 +#define PCTL_ALP_REQ_EN 0x00000080 +#define PCTL_XTALFREQ_MASK 0x0000007c +#define PCTL_XTALFREQ_SHIFT 2 +#define PCTL_ILP_DIV_EN 0x00000002 +#define PCTL_LPO_SEL 0x00000001 + +/* Fields in pmucontrol_ext */ +#define PCTL_EXT_FAST_TRANS_ENAB 0x00000001u +#define PCTL_EXT_USE_LHL_TIMER 0x00000010u +#define PCTL_EXT_FASTLPO_ENAB 0x00000080u +#define PCTL_EXT_FASTLPO_SWENAB 0x00000200u +#define PCTL_EXT_FASTSEQ_ENAB 0x00001000u +#define PCTL_EXT_FASTLPO_PCIE_SWENAB 0x00004000u /**< rev33 for FLL1M */ +#define PCTL_EXT_FASTLPO_SB_SWENAB 0x00008000u /**< rev36 for FLL1M */ +#define PCTL_EXT_REQ_MIRROR_ENAB 0x00010000u /**< rev36 for ReqMirrorEn */ + +#define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77 + +/* Retention Control */ +#define PMU_RCTL_CLK_DIV_SHIFT 0 +#define PMU_RCTL_CHAIN_LEN_SHIFT 12 +#define PMU_RCTL_MACPHY_DISABLE_SHIFT 26 +#define PMU_RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define PMU_RCTL_LOGIC_DISABLE_SHIFT 27 +#define PMU_RCTL_LOGIC_DISABLE_MASK (1 << 27) +#define PMU_RCTL_MEMSLP_LOG_SHIFT 28 +#define PMU_RCTL_MEMSLP_LOG_MASK (1 << 28) +#define PMU_RCTL_MEMRETSLP_LOG_SHIFT 29 +#define PMU_RCTL_MEMRETSLP_LOG_MASK (1 << 29) + +/* Retention Group Control */ +#define PMU_RCTLGRP_CHAIN_LEN_SHIFT 0 +#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT 14 +#define PMU_RCTLGRP_RMODE_ENABLE_MASK (1 << 14) +#define PMU_RCTLGRP_DFT_ENABLE_SHIFT 15 +#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15) +#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16 +#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16) + +/* Fields in clkstretch */ +#define CSTRETCH_HT 0xffff0000 +#define CSTRETCH_ALP 0x0000ffff +#define CSTRETCH_REDUCE_8 0x00080008 + +/* gpiotimerval */ +#define GPIO_ONTIME_SHIFT 16 + +/* clockcontrol_n */ +/* Some pll types use less than the number of bits in some of these (n or m) masks */ +#define CN_N1_MASK 0x3f /**< n1 control */ +#define CN_N2_MASK 0x3f00 /**< n2 control */ +#define CN_N2_SHIFT 8 +#define CN_PLLC_MASK 0xf0000 /**< pll control */ +#define CN_PLLC_SHIFT 16 + +/* clockcontrol_sb/pci/uart */ +#define CC_M1_MASK 0x3f /**< m1 control */ +#define CC_M2_MASK 0x3f00 /**< m2 control */ +#define CC_M2_SHIFT 8 +#define CC_M3_MASK 0x3f0000 /**< m3 control */ +#define CC_M3_SHIFT 16 +#define CC_MC_MASK 0x1f000000 /**< mux control */ +#define CC_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define CC_F6_2 0x02 /**< A factor of 2 in */ +#define CC_F6_3 0x03 /**< 6-bit fields like */ +#define CC_F6_4 0x05 /**< N1, M1 or M3 */ +#define CC_F6_5 0x09 +#define CC_F6_6 0x11 +#define CC_F6_7 0x21 + +#define CC_F5_BIAS 5 /**< 5-bit fields get this added */ + +#define CC_MC_BYPASS 0x08 +#define CC_MC_M1 0x04 +#define CC_MC_M1M2 0x02 +#define CC_MC_M1M2M3 0x01 +#define CC_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */ +#define CC_T2M2_BIAS 3 /**< m2 bias */ + +#define CC_T2MC_M1BYP 1 +#define CC_T2MC_M2BYP 2 +#define CC_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define CC_T6_MMASK 1 /**< bits of interest in m */ +#define CC_T6_M0 120000000 /**< sb clock for m = 0 */ +#define CC_T6_M1 100000000 /**< sb clock for m = 1 */ +#define SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */ +#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */ + +/* Flash types in the chipcommon capabilities register */ +#define FLASH_NONE 0x000 /**< No flash */ +#define SFLASH_ST 0x100 /**< ST serial flash */ +#define SFLASH_AT 0x200 /**< Atmel serial flash */ +#define NFLASH 0x300 /**< NAND flash */ +#define PFLASH 0x700 /**< Parallel flash */ +#define QSPIFLASH_ST 0x800 +#define QSPIFLASH_AT 0x900 + +/* Bits in the ExtBus config registers */ +#define CC_CFG_EN 0x0001 /**< Enable */ +#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */ +#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */ +#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */ +#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */ +#define CC_CFG_EM_IDE 0x0006 /**< IDE */ +#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */ +#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */ +#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */ +#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */ +#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */ + +/* ExtBus address space */ +#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */ +#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */ +#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */ +#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */ +#define CC_EB_IDE 0x1a800000 /**< IDE memory base */ +#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */ +#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */ +#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */ +#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */ + +/* Start/busy bit in flashcontrol */ +#define SFLASH_OPCODE 0x000000ff +#define SFLASH_ACTION 0x00000700 +#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */ +#define SFLASH_START 0x80000000 +#define SFLASH_BUSY SFLASH_START + +/* flashcontrol action codes */ +#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */ +#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */ +#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */ +#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */ +#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */ +#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */ +#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */ + +/* flashcontrol action+opcodes for ST flashes */ +#define SFLASH_ST_WREN 0x0006 /**< Write Enable */ +#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */ +#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */ +#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */ +#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */ +#define SFLASH_ST_PP 0x0302 /**< Page Program */ +#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */ +#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */ +#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */ +#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */ +#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */ +#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */ + +#define SFLASH_ST_READ4B 0x6313 /* Read Data Bytes in 4Byte address */ +#define SFLASH_ST_PP4B 0x6312 /* Page Program in 4Byte address */ +#define SFLASH_ST_SE4B 0x62dc /* Sector Erase in 4Byte address */ +#define SFLASH_ST_SSE4B 0x6221 /* Sub-sector Erase */ + +#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */ +#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */ + +#define SFLASH_WINBOND_RDID 0x0390 /* Read Manufacture ID */ +#define SFLASH_WINBOND_MFID 0xef /* Winbond Manufacture ID */ + +/* Status register bits for ST flashes */ +#define SFLASH_ST_WIP 0x01 /**< Write In Progress */ +#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */ +#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */ +#define SFLASH_ST_BP_SHIFT 2 +#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */ + +/* flashcontrol action+opcodes for Atmel flashes */ +#define SFLASH_AT_READ 0x07e8 +#define SFLASH_AT_PAGE_READ 0x07d2 +/* PR9631: impossible to specify Atmel Buffer Read command */ +#define SFLASH_AT_BUF1_READ +#define SFLASH_AT_BUF2_READ +#define SFLASH_AT_STATUS 0x01d7 +#define SFLASH_AT_BUF1_WRITE 0x0384 +#define SFLASH_AT_BUF2_WRITE 0x0387 +#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 +#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 +#define SFLASH_AT_BUF1_PROGRAM 0x0288 +#define SFLASH_AT_BUF2_PROGRAM 0x0289 +#define SFLASH_AT_PAGE_ERASE 0x0281 +#define SFLASH_AT_BLOCK_ERASE 0x0250 +#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define SFLASH_AT_BUF1_LOAD 0x0253 +#define SFLASH_AT_BUF2_LOAD 0x0255 +#define SFLASH_AT_BUF1_COMPARE 0x0260 +#define SFLASH_AT_BUF2_COMPARE 0x0261 +#define SFLASH_AT_BUF1_REPROGRAM 0x0258 +#define SFLASH_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SFLASH_AT_READY 0x80 +#define SFLASH_AT_MISMATCH 0x40 +#define SFLASH_AT_ID_MASK 0x38 +#define SFLASH_AT_ID_SHIFT 3 + +/* SPI register bits, corerev >= 37 */ +#define GSIO_START 0x80000000u +#define GSIO_BUSY GSIO_START + +/* UART Function sel related */ +#define MUXENAB_DEF_UART_MASK 0x0000000fu +#define MUXENAB_DEF_UART_SHIFT 0 + +/* HOST_WAKE Function sel related */ +#define MUXENAB_DEF_HOSTWAKE_MASK 0x000000f0u /**< configure GPIO for host_wake */ +#define MUXENAB_DEF_HOSTWAKE_SHIFT 4u + +/* GCI UART Function sel related */ +#define MUXENAB_GCI_UART_MASK 0x00000f00u +#define MUXENAB_GCI_UART_SHIFT 8u +#define MUXENAB_GCI_UART_FNSEL_MASK 0x00003000u +#define MUXENAB_GCI_UART_FNSEL_SHIFT 12u + +/* Mask used to decide whether MUX to be performed or not */ +#define MUXENAB_DEF_GETIX(val, name) \ + ((((val) & MUXENAB_DEF_ ## name ## _MASK) >> MUXENAB_DEF_ ## name ## _SHIFT) - 1) + +/* + * These are the UART port assignments, expressed as offsets from the base + * register. These assignments should hold for any serial port based on + * a 8250, 16450, or 16550(A). + */ + +#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */ +#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */ +#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */ +#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */ +#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */ +#define UART_IIR 2 /**< In: Interrupt Identity Register */ +#define UART_FCR 2 /**< Out: FIFO Control Register */ +#define UART_LCR 3 /**< Out: Line Control Register */ +#define UART_MCR 4 /**< Out: Modem Control Register */ +#define UART_LSR 5 /**< In: Line Status Register */ +#define UART_MSR 6 /**< In: Modem Status Register */ +#define UART_SCR 7 /**< I/O: Scratch Register */ +#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */ +#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */ +#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */ +#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */ +#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */ +#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */ +#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */ +#define UART_LSR_BREAK 0x10 /**< Break interrupt */ +#define UART_LSR_FRAMING 0x08 /**< Framing error */ +#define UART_LSR_PARITY 0x04 /**< Parity error */ +#define UART_LSR_OVERRUN 0x02 /**< Overrun error */ +#define UART_LSR_RXRDY 0x01 /**< Receiver ready */ +#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */ + +/* Interrupt Identity Register (IIR) bits */ +#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */ +#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */ +#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */ +#define UART_IIR_NOINT 0x1 /**< No interrupt pending */ +#define UART_IIR_THRE 0x2 /**< THR empty */ +#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */ +#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */ +#define UART_IIR_CHAR_TIME 0xc /**< Character time */ + +/* Interrupt Enable Register (IER) bits */ +#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */ +#define UART_IER_EDSSI 8 /**< enable modem status interrupt */ +#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */ +#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */ +#define UART_IER_ERBFI 1 /**< enable data available interrupt */ + +/* pmustatus */ +#define PST_SLOW_WR_PENDING 0x0400 +#define PST_EXTLPOAVAIL 0x0100 +#define PST_WDRESET 0x0080 +#define PST_INTPEND 0x0040 +#define PST_SBCLKST 0x0030 +#define PST_SBCLKST_ILP 0x0010 +#define PST_SBCLKST_ALP 0x0020 +#define PST_SBCLKST_HT 0x0030 +#define PST_ALPAVAIL 0x0008 +#define PST_HTAVAIL 0x0004 +#define PST_RESINIT 0x0003 +#define PST_ILPFASTLPO 0x00010000 + +/* pmucapabilities */ +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */ +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 + +/* pmucapabilities ext */ +#define PCAP_EXT_ST_NUM_SHIFT (8) /* stat timer number */ +#define PCAP_EXT_ST_NUM_MASK (0xf << PCAP_EXT_ST_NUM_SHIFT) +#define PCAP_EXT_ST_SRC_NUM_SHIFT (12) /* stat timer source number */ +#define PCAP_EXT_ST_SRC_NUM_MASK (0xf << PCAP_EXT_ST_SRC_NUM_SHIFT) +#define PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_SHIFT (20u) /* # of MAC rsrc req timers */ +#define PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_MASK (7u << PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_SHIFT) +#define PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT (23u) /* pmu int rcvr cnt */ +#define PCAP_EXT_PMU_INTR_RCVR_CNT_MASK (7u << PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT) + +/* pmustattimer ctrl */ +#define PMU_ST_SRC_SHIFT (0) /* stat timer source number */ +#define PMU_ST_SRC_MASK (0xff << PMU_ST_SRC_SHIFT) +#define PMU_ST_CNT_MODE_SHIFT (10) /* stat timer count mode */ +#define PMU_ST_CNT_MODE_MASK (0x3 << PMU_ST_CNT_MODE_SHIFT) +#define PMU_ST_EN_SHIFT (8) /* stat timer enable */ +#define PMU_ST_EN_MASK (0x1 << PMU_ST_EN_SHIFT) +#define PMU_ST_ENAB 1 +#define PMU_ST_DISAB 0 +#define PMU_ST_INT_EN_SHIFT (9) /* stat timer enable */ +#define PMU_ST_INT_EN_MASK (0x1 << PMU_ST_INT_EN_SHIFT) +#define PMU_ST_INT_ENAB 1 +#define PMU_ST_INT_DISAB 0 + +/* CoreCapabilitiesExtension */ +#define PCAP_EXT_USE_MUXED_ILP_CLK_MASK 0x04000000 + +/* PMU Resource Request Timer registers */ +/* This is based on PmuRev0 */ +#define PRRT_TIME_MASK 0x03ff +#define PRRT_INTEN 0x0400 +/* ReqActive 25 + * The hardware sets this field to 1 when the timer expires. + * Software writes this field to 1 to make immediate resource requests. + */ +#define PRRT_REQ_ACTIVE 0x0800 /* To check h/w status */ +#define PRRT_IMMEDIATE_RES_REQ 0x0800 /* macro for sw immediate res req */ +#define PRRT_ALP_REQ 0x1000 +#define PRRT_HT_REQ 0x2000 +#define PRRT_HQ_REQ 0x4000 + +/* PMU Int Control register bits */ +#define PMU_INTC_ALP_REQ 0x1 +#define PMU_INTC_HT_REQ 0x2 +#define PMU_INTC_HQ_REQ 0x4 + +/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */ +#define RSRC_INTR_MASK_TIMER_INT_0 1 +#define PMU_INTR_MASK_EXTWAKE_REQ_ACTIVE_0 (1 << 20) + +#define PMU_INT_STAT_RSRC_EVENT_INT0_SHIFT (8u) +#define PMU_INT_STAT_RSRC_EVENT_INT0_MASK (1u << PMU_INT_STAT_RSRC_EVENT_INT0_SHIFT) + +/* bit 16 of the PMU interrupt vector - Stats Timer Interrupt */ +#define PMU_INT_STAT_TIMER_INT_SHIFT (16u) +#define PMU_INT_STAT_TIMER_INT_MASK (1u << PMU_INT_STAT_TIMER_INT_SHIFT) + +/* + * bit 18 of the PMU interrupt vector - S/R self test fails + */ +#define PMU_INT_STAT_SR_ERR_SHIFT (18u) +#define PMU_INT_STAT_SR_ERR_MASK (1u << PMU_INT_STAT_SR_ERR_SHIFT) + +/* PMU resource bit position */ +#define PMURES_BIT(bit) (1u << (bit)) + +/* PMU resource number limit */ +#define PMURES_MAX_RESNUM 30 + +/* PMU chip control0 register */ +#define PMU_CHIPCTL0 0 + +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0) +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0) +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0xF << 6) +#define PMU_CC0_4369B0_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1A << 6) +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6) +#define PMU_CC0_4369_XTAL_RES_BYPASS_START_VAL (0 << 12) +#define PMU_CC0_4369_XTAL_RES_BYPASS_START_MASK (0x7 << 12) +#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_VAL (0x1 << 15) +#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15) + +// This is not used. so retains reset value +#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20u << 0u) + +#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3Fu << 0u) +#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1Au << 6u) +#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3Fu << 6u) +#define PMU_CC0_4362_XTAL_RES_BYPASS_START_VAL (0x00u << 12u) +#define PMU_CC0_4362_XTAL_RES_BYPASS_START_MASK (0x07u << 12u) +#define PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_VAL (0x02u << 15u) +#define PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_MASK (0x07u << 15u) + +#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0) +#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0) +#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1A << 6) +#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6) +#define PMU_CC0_4378_XTAL_RES_BYPASS_START_VAL (0 << 12) +#define PMU_CC0_4378_XTAL_RES_BYPASS_START_MASK (0x7 << 12) +#define PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_VAL (0x2 << 15) +#define PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15) + +#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0) +#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0) +#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1A << 6) +#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6) +#define PMU_CC0_4387_XTAL_RES_BYPASS_START_VAL (0 << 12) +#define PMU_CC0_4387_XTAL_RES_BYPASS_START_MASK (0x7 << 12) +#define PMU_CC0_4387_XTAL_RES_BYPASS_NORMAL_VAL (0x2 << 15) +#define PMU_CC0_4387_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15) +#define PMU_CC0_4387_BT_PU_WAKE_MASK (0x3u << 30u) + +/* clock req types */ +#define PMU_CC1_CLKREQ_TYPE_SHIFT 19 +#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT) + +#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0 +#define CLKREQ_TYPE_CONFIG_PUSHPULL 1 + +/* Power Control */ +#define PWRCTL_ENAB_MEM_CLK_GATE_SHIFT 5 +#define PWRCTL_FORCE_HW_PWR_REQ_OFF_SHIFT 6 +#define PWRCTL_AUTO_MEM_STBYRET 28 + +/* PMU chip control1 register */ +#define PMU_CHIPCTL1 1 +#define PMU_CC1_RXC_DLL_BYPASS 0x00010000 +#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN 0x00000010 + +#define PMU_CC1_IF_TYPE_MASK 0x00000030 +#define PMU_CC1_IF_TYPE_RMII 0x00000000 +#define PMU_CC1_IF_TYPE_MII 0x00000010 +#define PMU_CC1_IF_TYPE_RGMII 0x00000020 + +#define PMU_CC1_SW_TYPE_MASK 0x000000c0 +#define PMU_CC1_SW_TYPE_EPHY 0x00000000 +#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040 +#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 +#define PMU_CC1_SW_TYPE_RGMII 0x000000c0 + +#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080 +#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000 + +#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY_MASK 0x00003F00u +#ifdef BCM_FASTLPO_PMU +#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY 0x00002000u +#else +#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY 0x00000400u +#endif /* BCM_FASTLPO_PMU */ + +/* PMU chip control2 register */ +#define PMU_CC2_CB2WL_INTR_PWRREQ_EN (1u << 13u) +#define PMU_CC2_RFLDO3P3_PU_FORCE_ON (1u << 15u) +#define PMU_CC2_RFLDO3P3_PU_CLEAR 0x00000000u + +#define PMU_CC2_WL2CDIG_I_PMU_SLEEP (1u << 16u) +#define PMU_CHIPCTL2 2u +#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1u << 18u) +#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1u << 19u) +#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1u << 20u) +#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1u << 21u) +#define PMU_CC2_MASK_WL_DEV_WAKE (1u << 22u) +#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1u << 25u) +#define PMU_CC2_GCI2_WAKE (1u << 31u) + +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u) +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u) +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u) +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u) + +#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u) +#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u) +#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u) +#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u) + +#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u) +#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u) +#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u) +#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u) + +#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u) +#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u) +#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u) +#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u) + +/* PMU chip control3 register */ +#define PMU_CHIPCTL3 3u +#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19u +#define PMU_CC3_ENABLE_RF_SHIFT 22u +#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23u + +#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_VAL (0x3Fu << 0u) +#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_MASK (0x3Fu << 0u) +#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_VAL (0x3Fu << 15u) +#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_MASK (0x3Fu << 15u) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_VAL (0x3Fu << 6u) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_MASK (0x3Fu << 6u) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_VAL (0x3Fu << 21) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_MASK (0x3Fu << 21) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_VAL (0x2u << 12u) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_MASK (0x7u << 12u) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_VAL (0x2u << 27u) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_MASK (0x7u << 27u) + +#define PMU_CC3_4362_XTALCORESIZE_PMOS_START_VAL (0x3Fu << 0u) +#define PMU_CC3_4362_XTALCORESIZE_PMOS_START_MASK (0x3Fu << 0u) +#define PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_VAL (0x3Fu << 15u) +#define PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_MASK (0x3Fu << 15u) +#define PMU_CC3_4362_XTALCORESIZE_NMOS_START_VAL (0x3Fu << 6u) +#define PMU_CC3_4362_XTALCORESIZE_NMOS_START_MASK (0x3Fu << 6u) +#define PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_VAL (0x3Fu << 21u) +#define PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_MASK (0x3Fu << 21u) +#define PMU_CC3_4362_XTALSEL_BIAS_RES_START_VAL (0x02u << 12u) +#define PMU_CC3_4362_XTALSEL_BIAS_RES_START_MASK (0x07u << 12u) +/* Changed from 6 to 4 for wlan PHN and to 2 for BT PER issues */ +#define PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_VAL (0x02u << 27u) +#define PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_MASK (0x07u << 27u) + +#define PMU_CC3_4378_XTALCORESIZE_PMOS_START_VAL (0x3F << 0) +#define PMU_CC3_4378_XTALCORESIZE_PMOS_START_MASK (0x3F << 0) +#define PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_VAL (0x3F << 15) +#define PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_MASK (0x3F << 15) +#define PMU_CC3_4378_XTALCORESIZE_NMOS_START_VAL (0x3F << 6) +#define PMU_CC3_4378_XTALCORESIZE_NMOS_START_MASK (0x3F << 6) +#define PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_VAL (0x3F << 21) +#define PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_MASK (0x3F << 21) +#define PMU_CC3_4378_XTALSEL_BIAS_RES_START_VAL (0x2 << 12) +#define PMU_CC3_4378_XTALSEL_BIAS_RES_START_MASK (0x7 << 12) +#define PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_VAL (0x2 << 27) +#define PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_MASK (0x7 << 27) + +#define PMU_CC3_4387_XTALCORESIZE_PMOS_START_VAL (0x3F << 0) +#define PMU_CC3_4387_XTALCORESIZE_PMOS_START_MASK (0x3F << 0) +#define PMU_CC3_4387_XTALCORESIZE_PMOS_NORMAL_VAL (0x3F << 15) +#define PMU_CC3_4387_XTALCORESIZE_PMOS_NORMAL_MASK (0x3F << 15) +#define PMU_CC3_4387_XTALCORESIZE_NMOS_START_VAL (0x3F << 6) +#define PMU_CC3_4387_XTALCORESIZE_NMOS_START_MASK (0x3F << 6) +#define PMU_CC3_4387_XTALCORESIZE_NMOS_NORMAL_VAL (0x3F << 21) +#define PMU_CC3_4387_XTALCORESIZE_NMOS_NORMAL_MASK (0x3F << 21) +#define PMU_CC3_4387_XTALSEL_BIAS_RES_START_VAL (0x2 << 12) +#define PMU_CC3_4387_XTALSEL_BIAS_RES_START_MASK (0x7 << 12) +#define PMU_CC3_4387_XTALSEL_BIAS_RES_NORMAL_VAL (0x5 << 27) +#define PMU_CC3_4387_XTALSEL_BIAS_RES_NORMAL_MASK (0x7 << 27) + +/* PMU chip control4 register */ +#define PMU_CHIPCTL4 4 + +/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */ +#define PMU_CC4_IF_TYPE_MASK 0x00003000 +#define PMU_CC4_IF_TYPE_RMII 0x00000000 +#define PMU_CC4_IF_TYPE_MII 0x00001000 +#define PMU_CC4_IF_TYPE_RGMII 0x00002000 + +#define PMU_CC4_SW_TYPE_MASK 0x0000c000 +#define PMU_CC4_SW_TYPE_EPHY 0x00000000 +#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000 +#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000 +#define PMU_CC4_SW_TYPE_RGMII 0x0000c000 +#define PMU_CC4_DISABLE_LQ_AVAIL (1<<27) + +#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON (1u << 15u) +#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u) +#define PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u) +#define PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u) + +#define PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON (1u << 21u) +#define PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON (1u << 22u) +#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u) +#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u) + +#define PMU_CC4_4362_PD_CBUCK2VDDB_ON (1u << 15u) +#define PMU_CC4_4362_PD_CBUCK2VDDRET_ON (1u << 16u) +#define PMU_CC4_4362_PD_MEMLPLDO2VDDB_ON (1u << 17u) +#define PMU_CC4_4362_PD_MEMLPDLO2VDDRET_ON (1u << 18u) + +#define PMU_CC4_4378_MAIN_PD_CBUCK2VDDB_ON (1u << 15u) +#define PMU_CC4_4378_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u) +#define PMU_CC4_4378_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u) +#define PMU_CC4_4378_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u) + +#define PMU_CC4_4378_AUX_PD_CBUCK2VDDB_ON (1u << 21u) +#define PMU_CC4_4378_AUX_PD_CBUCK2VDDRET_ON (1u << 22u) +#define PMU_CC4_4378_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u) +#define PMU_CC4_4378_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u) + +#define PMU_CC4_4387_MAIN_PD_CBUCK2VDDB_ON (1u << 15u) +#define PMU_CC4_4387_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u) +#define PMU_CC4_4387_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u) +#define PMU_CC4_4387_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u) + +#define PMU_CC4_4387_AUX_PD_CBUCK2VDDB_ON (1u << 21u) +#define PMU_CC4_4387_AUX_PD_CBUCK2VDDRET_ON (1u << 22u) +#define PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u) +#define PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u) + +/* PMU chip control5 register */ +#define PMU_CHIPCTL5 5 + +#define PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON (1u << 9u) +#define PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON (1u << 10u) +#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u) +#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u) + +#define PMU_CC5_4362_SUBCORE_CBUCK2VDDB_ON (1u << 9u) +#define PMU_CC5_4362_SUBCORE_CBUCK2VDDRET_ON (1u << 10u) +#define PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u) +#define PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u) + +#define PMU_CC5_4378_SUBCORE_CBUCK2VDDB_ON (1u << 9u) +#define PMU_CC5_4378_SUBCORE_CBUCK2VDDRET_ON (1u << 10u) +#define PMU_CC5_4378_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u) +#define PMU_CC5_4378_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u) + +#define PMU_CC5_4387_SUBCORE_CBUCK2VDDB_ON (1u << 9u) +#define PMU_CC5_4387_SUBCORE_CBUCK2VDDRET_ON (1u << 10u) +#define PMU_CC5_4387_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u) +#define PMU_CC5_4387_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u) + +#define PMU_CC5_4388_SUBCORE_SDTCCLK0_ON (1u << 3u) +#define PMU_CC5_4388_SUBCORE_SDTCCLK1_ON (1u << 4u) + +#define PMU_CC5_4389_SUBCORE_SDTCCLK0_ON (1u << 3u) +#define PMU_CC5_4389_SUBCORE_SDTCCLK1_ON (1u << 4u) + +/* PMU chip control6 register */ +#define PMU_CHIPCTL6 6 +#define PMU_CC6_RX4_CLK_SEQ_SELECT_MASK BCM_MASK32(1u, 0u) +#define PMU_CC6_ENABLE_DMN1_WAKEUP (1 << 3) +#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4) +#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6) +#define PMU_CC6_ENABLE_PCIE_RETENTION (1 << 12) +#define PMU_CC6_ENABLE_PMU_EXT_PERST (1 << 13) +#define PMU_CC6_ENABLE_PMU_WAKEUP_PERST (1 << 14) +#define PMU_CC6_ENABLE_LEGACY_WAKEUP (1 << 16) + +/* PMU chip control7 register */ +#define PMU_CHIPCTL7 7 +#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25) +#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27) +/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */ +#define PMU_CC7_IF_TYPE_MASK 0x000000c0 +#define PMU_CC7_IF_TYPE_RMII 0x00000000 +#define PMU_CC7_IF_TYPE_MII 0x00000040 +#define PMU_CC7_IF_TYPE_RGMII 0x00000080 + +#define PMU_CHIPCTL8 8 +#define PMU_CHIPCTL9 9 + +#define PMU_CHIPCTL10 10 +#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_SHIFT 0 +#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_MASK 0x000000ff +#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_SHIFT 8 +#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_MASK 0x0000ff00 +#define PMU_CC10_PCIE_PWRSW_UP_DLY_SHIFT 16 +#define PMU_CC10_PCIE_PWRSW_UP_DLY_MASK 0x000f0000 +#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_SHIFT 20 +#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_MASK 0x00f00000 +#define PMU_CC10_FORCE_PCIE_ON (1 << 24) +#define PMU_CC10_FORCE_PCIE_SW_ON (1 << 25) +#define PMU_CC10_FORCE_PCIE_RETNT_ON (1 << 26) + +#define PMU_CC10_PCIE_PWRSW_RESET_CNT_4US 1 +#define PMU_CC10_PCIE_PWRSW_RESET_CNT_8US 2 + +#define PMU_CC10_PCIE_PWRSW_UP_DLY_0US 0 + +#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_4US 1 +#define PMU_CC10_PCIE_RESET0_CNT_SLOW_MASK (0xFu << 4u) +#define PMU_CC10_PCIE_RESET1_CNT_SLOW_MASK (0xFu << 12u) + +#define PMU_CHIPCTL11 11 + +/* PMU chip control12 register */ +#define PMU_CHIPCTL12 12 +#define PMU_CC12_DISABLE_LQ_CLK_ON (1u << 31u) /* HW4387-254 */ + +/* PMU chip control13 register */ +#define PMU_CHIPCTL13 13 + +#define PMU_CC13_SUBCORE_CBUCK2VDDB_OFF (1u << 0u) +#define PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF (1u << 1u) +#define PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF (1u << 2u) +#define PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF (1u << 3u) + +#define PMU_CC13_MAIN_CBUCK2VDDB_OFF (1u << 4u) +#define PMU_CC13_MAIN_CBUCK2VDDRET_OFF (1u << 5u) +#define PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF (1u << 6u) +#define PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF (1u << 7u) + +#define PMU_CC13_AUX_CBUCK2VDDB_OFF (1u << 8u) +#define PMU_CC13_AUX_MEMLPLDO2VDDB_OFF (1u << 10u) +#define PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF (1u << 11u) +#define PMU_CC13_AUX_CBUCK2VDDRET_OFF (1u << 12u) +#define PMU_CC13_CMN_MEMLPLDO2VDDRET_ON (1u << 18u) + +/* HW4368-331 */ +#define PMU_CC13_MAIN_ALWAYS_USE_COHERENT_IF0 (1u << 13u) +#define PMU_CC13_MAIN_ALWAYS_USE_COHERENT_IF1 (1u << 14u) +#define PMU_CC13_AUX_ALWAYS_USE_COHERENT_IF0 (1u << 15u) +#define PMU_CC13_AUX_ALWAYS_USE_COHERENT_IF1 (1u << 19u) + +#define PMU_CC13_LHL_TIMER_SELECT (1u << 23u) + +#define PMU_CC13_4369_LHL_TIMER_SELECT (1u << 23u) +#define PMU_CC13_4378_LHL_TIMER_SELECT (1u << 23u) + +#define PMU_CC13_4387_ENAB_RADIO_REG_CLK (1u << 9u) +#define PMU_CC13_4387_LHL_TIMER_SELECT (1u << 23u) + +#define PMU_CHIPCTL14 14 +#define PMU_CHIPCTL15 15 +#define PMU_CHIPCTL16 16 +#define PMU_CC16_CLK4M_DIS (1 << 4) +#define PMU_CC16_FF_ZERO_ADJ (4 << 5) + +/* PMU chip control17 register */ +#define PMU_CHIPCTL17 17u + +#define PMU_CC17_SCAN_DIG_SR_CLK_SHIFT (2u) +#define PMU_CC17_SCAN_DIG_SR_CLK_MASK (3u << 2u) +#define PMU_CC17_SCAN_CBUCK2VDDB_OFF (1u << 8u) +#define PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF (1u << 10u) +#define PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF (1u << 11u) +#define PMU_CC17_SCAN_CBUCK2VDDB_ON (1u << 24u) +#define PMU_CC17_SCAN_MEMLPLDO2VDDB_ON (1u << 26u) +#define PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON (1u << 27u) + +#define SCAN_DIG_SR_CLK_80_MHZ (0) /* 80 MHz */ +#define SCAN_DIG_SR_CLK_53P35_MHZ (1u) /* 53.35 MHz */ +#define SCAN_DIG_SR_CLK_40_MHZ (2u) /* 40 MHz */ + +/* PMU chip control18 register */ +#define PMU_CHIPCTL18 18u + +/* Expiry time for wl_SSReset if P channel sleep handshake is not through */ +#define PMU_CC18_WL_P_CHAN_TIMER_SEL_OFF (1u << 1u) +#define PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK (7u << 1u) + +#define PMU_CC18_WL_P_CHAN_TIMER_SEL_8ms 7u /* (2^(7+1))*32us = 8ms */ + +/* Enable wl booker to force a P channel sleep handshake upon assertion of wl_SSReset */ +#define PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN (1u << 4u) + +/* PMU chip control 19 register */ +#define PMU_CHIPCTL19 19u + +#define PMU_CC19_ASYNC_ATRESETMN (1u << 9u) + +#define PMU_CHIPCTL23 23 +#define PMU_CC23_MACPHYCLK_MASK (1u << 31u) + +#define PMU_CC23_AT_CLK0_ON (1u << 14u) +#define PMU_CC23_AT_CLK1_ON (1u << 15u) + +/* PMU chip control14 register */ +#define PMU_CC14_MAIN_VDDB2VDDRET_UP_DLY_MASK (0xF) +#define PMU_CC14_MAIN_VDDB2VDD_UP_DLY_MASK (0xF << 4) +#define PMU_CC14_AUX_VDDB2VDDRET_UP_DLY_MASK (0xF << 8) +#define PMU_CC14_AUX_VDDB2VDD_UP_DLY_MASK (0xF << 12) +#define PMU_CC14_PCIE_VDDB2VDDRET_UP_DLY_MASK (0xF << 16) +#define PMU_CC14_PCIE_VDDB2VDD_UP_DLY_MASK (0xF << 20) + +/* PMU chip control15 register */ +#define PMU_CC15_PCIE_VDDB_CURRENT_LIMIT_DELAY_MASK (0xFu << 4u) +#define PMU_CC15_PCIE_VDDB_FORCE_RPS_PWROK_DELAY_MASK (0xFu << 8u) + +/* PMU corerev and chip specific PLL controls. + * PMU_PLL_XX where is PMU corerev and is an arbitrary number + * to differentiate different PLLs controlled by the same PMU rev. + */ +/* pllcontrol registers */ +/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */ +#define PMU0_PLL0_PLLCTL0 0 +#define PMU0_PLL0_PC0_PDIV_MASK 1 +#define PMU0_PLL0_PC0_PDIV_FREQ 25000 +#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 +#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 +#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 + +/* PC0_DIV_ARM for PLLOUT_ARM */ +#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 +#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 +#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 +#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */ +#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 +#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 +#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 +#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 + +/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */ +#define PMU0_PLL0_PLLCTL1 1 +#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 +#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 +#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 +#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 +#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 + +/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */ +#define PMU0_PLL0_PLLCTL2 2 +#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 + +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU1_PLL0_PLLCTL0 0 +#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define PMU1_PLL0_PC0_P1DIV_SHIFT 20 +#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 +#define PMU1_PLL0_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU1_PLL0_PLLCTL1 1 +#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff +#define PMU1_PLL0_PC1_M1DIV_SHIFT 0 +#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC1_M2DIV_SHIFT 8 +#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 +#define PMU1_PLL0_PC1_M3DIV_SHIFT 16 +#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 +#define PMU1_PLL0_PC1_M4DIV_SHIFT 24 +#define PMU1_PLL0_PC1_M4DIV_BY_9 9 +#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12 +#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24 +#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C +#define PMU1_PLL0_PC1_M2_M4DIV_MASK 0xff00ff00 +#define PMU1_PLL0_PC1_HOLD_LOAD_CH 0x28 + +#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 +#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) +#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU1_PLL0_PLLCTL2 2 +#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff +#define PMU1_PLL0_PC2_M5DIV_SHIFT 0 +#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc +#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M5DIV_BY_31 0x1f +#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_M5DIV_BY_42 0x2a +#define PMU1_PLL0_PC2_M5DIV_BY_60 0x3c +#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC2_M6DIV_SHIFT 8 +#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 +#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 +#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 +#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU1_PLL0_PLLCTL3 3 +#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU1_PLL0_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU1_PLL0_PLLCTL5 5 +#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 +#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 +#define PMU1_PLL0_PC5_ASSERT_CH_MASK 0x3f000000 +#define PMU1_PLL0_PC5_ASSERT_CH_SHIFT 24 +#define PMU1_PLL0_PC5_DEASSERT_CH_MASK 0xff000000 + +#define PMU1_PLL0_PLLCTL6 6 +#define PMU1_PLL0_PLLCTL7 7 +#define PMU1_PLL0_PLLCTL8 8 + +#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1) + +#define PMU1_PLL0_PLLCTL9 9 + +#define PMU1_PLL0_PLLCTL10 10 + +/* PMU rev 2 control words */ +#define PMU2_PHY_PLL_PLLCTL 4 +#define PMU2_SI_PLL_PLLCTL 10 + +/* PMU rev 2 */ +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU2_PLL_PLLCTL0 0 +#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 +#define PMU2_PLL_PC0_P1DIV_SHIFT 20 +#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 +#define PMU2_PLL_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU2_PLL_PLLCTL1 1 +#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff +#define PMU2_PLL_PC1_M1DIV_SHIFT 0 +#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC1_M2DIV_SHIFT 8 +#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 +#define PMU2_PLL_PC1_M3DIV_SHIFT 16 +#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000 +#define PMU2_PLL_PC1_M4DIV_SHIFT 24 + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU2_PLL_PLLCTL2 2 +#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff +#define PMU2_PLL_PC2_M5DIV_SHIFT 0 +#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC2_M6DIV_SHIFT 8 +#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 +#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU2_PLL_PLLCTL3 3 +#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU2_PLL_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU2_PLL_PLLCTL5 5 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 + +/* PMU rev 5 (& 6) */ +#define PMU5_PLL_P1P2_OFF 0 +#define PMU5_PLL_P1_MASK 0x0f000000 +#define PMU5_PLL_P1_SHIFT 24 +#define PMU5_PLL_P2_MASK 0x00f00000 +#define PMU5_PLL_P2_SHIFT 20 +#define PMU5_PLL_M14_OFF 1 +#define PMU5_PLL_MDIV_MASK 0x000000ff +#define PMU5_PLL_MDIV_WIDTH 8 +#define PMU5_PLL_NM5_OFF 2 +#define PMU5_PLL_NDIV_MASK 0xfff00000 +#define PMU5_PLL_NDIV_SHIFT 20 +#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000 +#define PMU5_PLL_NDIV_MODE_SHIFT 17 +#define PMU5_PLL_FMAB_OFF 3 +#define PMU5_PLL_MRAT_MASK 0xf0000000 +#define PMU5_PLL_MRAT_SHIFT 28 +#define PMU5_PLL_ABRAT_MASK 0x08000000 +#define PMU5_PLL_ABRAT_SHIFT 27 +#define PMU5_PLL_FDIV_MASK 0x07ffffff +#define PMU5_PLL_PLLCTL_OFF 4 +#define PMU5_PLL_PCHI_OFF 5 +#define PMU5_PLL_PCHI_MASK 0x0000003f + +/* pmu XtalFreqRatio */ +#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF +#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 +#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31 + +/* Divider allocation in 5357 */ +#define PMU5_MAINPLL_CPU 1 +#define PMU5_MAINPLL_MEM 2 +#define PMU5_MAINPLL_SI 3 + +#define PMU7_PLL_PLLCTL7 7 +#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000 +#define PMU7_PLL_CTL7_M4DIV_SHIFT 24 +#define PMU7_PLL_CTL7_M4DIV_BY_6 6 +#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc +#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL8 8 +#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff +#define PMU7_PLL_CTL8_M5DIV_SHIFT 0 +#define PMU7_PLL_CTL8_M5DIV_BY_8 8 +#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18 +#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00 +#define PMU7_PLL_CTL8_M6DIV_SHIFT 8 +#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL11 11 +#define PMU7_PLL_PLLCTL11_MASK 0xffffff00 +#define PMU7_PLL_PLLCTL11_VAL 0x22222200 + +/* PMU rev 15 */ +#define PMU15_PLL_PLLCTL0 0 +#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + +#define PMU15_PLL_PLLCTL1 1 +#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060 +#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17 +#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000 +#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28 +#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000 +#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30 + +#define PMU15_PLL_PLLCTL2 2 +#define PMU15_PLL_PC2_CTEN_MASK 0x00000001 +#define PMU15_PLL_PC2_CTEN_SHIFT 0 + +#define PMU15_PLL_PLLCTL3 3 +#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001 +#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000 +#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3 + +#define PMU15_PLL_PLLCTL4 4 +#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007 +#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0 +#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038 +#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3 +#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0 +#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6 +#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00 +#define PMU15_PLL_PC4_DBGMODE_SHIFT 9 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12 +#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000 +#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13 +#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000 +#define PMU15_PLL_PC4_DINPOL_SHIFT 20 +#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000 +#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21 +#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000 +#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22 +#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000 +#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23 +#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000 +#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24 +#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000 +#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25 +#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000 +#define PMU15_PLL_PC4_TEST_EN_SHIFT 26 + +#define PMU15_PLL_PLLCTL5 5 +#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC5_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC5_PRESCALE_SHIFT 27 + +#define PMU15_PLL_PLLCTL6 6 +#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC6_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC6_PRESCALE_SHIFT 27 + +#define PMU15_FREQTGT_480_DEFAULT 0x19AB1 +#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5 +#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */ +#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */ +#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */ + +#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070 +#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4 + +#define PMU17_PLLCTL2_NDIV_MODE_INT 0 +#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3 + +#define PMU17_PLLCTL0_BBPLL_PWRDWN 0 +#define PMU17_PLLCTL0_BBPLL_DRST 3 +#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8 + +/* PLL usage in 4716/47162 */ +#define PMU4716_MAINPLL_PLL0 12 + +/* PLL Usages for 4368 */ +#define PMU4368_P1DIV_LO_SHIFT 0 +#define PMU4368_P1DIV_HI_SHIFT 2 + +#define PMU4368_PLL1_PC4_P1DIV_MASK 0xC0000000 +#define PMU4368_PLL1_PC4_P1DIV_SHIFT 30 +#define PMU4368_PLL1_PC5_P1DIV_MASK 0x00000003 +#define PMU4368_PLL1_PC5_P1DIV_SHIFT 0 +#define PMU4368_PLL1_PC5_NDIV_INT_MASK 0x00000ffc +#define PMU4368_PLL1_PC5_NDIV_INT_SHIFT 2 +#define PMU4368_PLL1_PC5_NDIV_FRAC_MASK 0xfffff000 +#define PMU4368_PLL1_PC5_NDIV_FRAC_SHIFT 12 + +/* PLL usage in 4369 */ +#define PMU4369_PLL0_PC2_PDIV_MASK 0x000f0000 +#define PMU4369_PLL0_PC2_PDIV_SHIFT 16 +#define PMU4369_PLL0_PC2_NDIV_INT_MASK 0x3ff00000 +#define PMU4369_PLL0_PC2_NDIV_INT_SHIFT 20 +#define PMU4369_PLL0_PC3_NDIV_FRAC_MASK 0x000fffff +#define PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT 0 +#define PMU4369_PLL1_PC5_P1DIV_MASK 0xc0000000 +#define PMU4369_PLL1_PC5_P1DIV_SHIFT 30 +#define PMU4369_PLL1_PC6_P1DIV_MASK 0x00000003 +#define PMU4369_PLL1_PC6_P1DIV_SHIFT 0 +#define PMU4369_PLL1_PC6_NDIV_INT_MASK 0x00000ffc +#define PMU4369_PLL1_PC6_NDIV_INT_SHIFT 2 +#define PMU4369_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000 +#define PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT 12 + +#define PMU4369_P1DIV_LO_SHIFT 0 +#define PMU4369_P1DIV_HI_SHIFT 2 + +#define PMU4369_PLL6VAL_P1DIV 4 +#define PMU4369_PLL6VAL_P1DIV_BIT3_2 1 +#define PMU4369_PLL6VAL_PRE_SCALE (1 << 17) +#define PMU4369_PLL6VAL_POST_SCALE (1 << 3) + +/* PLL usage in 4378 +* Temporay setting, update is needed. +*/ +#define PMU4378_PLL0_PC2_P1DIV_MASK 0x000f0000 +#define PMU4378_PLL0_PC2_P1DIV_SHIFT 16 +#define PMU4378_PLL0_PC2_NDIV_INT_MASK 0x3ff00000 +#define PMU4378_PLL0_PC2_NDIV_INT_SHIFT 20 + +/* PLL usage in 4387 */ +#define PMU4387_PLL0_PC1_ICH2_MDIV_SHIFT 18 +#define PMU4387_PLL0_PC1_ICH2_MDIV_MASK 0x07FC0000 +#define PMU4387_PLL0_PC2_ICH3_MDIV_MASK 0x000001ff + +/* PLL usage in 4388 */ +#define PMU4388_APLL_NDIV_P 0x154u +#define PMU4388_APLL_NDIV_Q 0x1ffu +#define PMU4388_APLL_PDIV 0x3u +#define PMU4388_ARMPLL_I_NDIV_INT_MASK 0x01ff8000u +#define PMU4388_ARMPLL_I_NDIV_INT_SHIFT 15u + +/* PLL usage in 4389 */ +#define PMU4389_APLL_NDIV_P 0x154u +#define PMU4389_APLL_NDIV_Q 0x1ffu +#define PMU4389_APLL_PDIV 0x3u +#define PMU4389_ARMPLL_I_NDIV_INT_MASK 0x01ff8000u +#define PMU4389_ARMPLL_I_NDIV_INT_SHIFT 15u + +/* 5357 Chip specific ChipControl register bits */ +#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */ +#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */ +#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */ +/* 43217 Chip specific ChipControl register bits */ +#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */ +#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */ + +#define PMU1_PLL0_CHIPCTL0 0 +#define PMU1_PLL0_CHIPCTL1 1 +#define PMU1_PLL0_CHIPCTL2 2 + +#define SOCDEVRAM_BP_ADDR 0x1E000000 +#define SOCDEVRAM_ARM_ADDR 0x00800000 + +#define PMU_VREG0_I_SR_CNTL_EN_SHIFT 0 +#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2 +#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3 +#define PMU_VREG0_CBUCKFSW_ADJ_SHIFT 7 +#define PMU_VREG0_CBUCKFSW_ADJ_MASK 0x1F +#define PMU_VREG0_RAMP_SEL_SHIFT 13 +#define PMU_VREG0_RAMP_SEL_MASK 0x7 +#define PMU_VREG0_VFB_RSEL_SHIFT 17 +#define PMU_VREG0_VFB_RSEL_MASK 3 + +#define PMU_VREG4_ADDR 4 + +#define PMU_VREG4_CLDO_PWM_SHIFT 4 +#define PMU_VREG4_CLDO_PWM_MASK 0x7 + +#define PMU_VREG4_LPLDO1_SHIFT 15 +#define PMU_VREG4_LPLDO1_MASK 0x7 +#define PMU_VREG4_LPLDO1_1p20V 0 +#define PMU_VREG4_LPLDO1_1p15V 1 +#define PMU_VREG4_LPLDO1_1p10V 2 +#define PMU_VREG4_LPLDO1_1p25V 3 +#define PMU_VREG4_LPLDO1_1p05V 4 +#define PMU_VREG4_LPLDO1_1p00V 5 +#define PMU_VREG4_LPLDO1_0p95V 6 +#define PMU_VREG4_LPLDO1_0p90V 7 + +#define PMU_VREG4_LPLDO2_LVM_SHIFT 18 +#define PMU_VREG4_LPLDO2_LVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_HVM_SHIFT 21 +#define PMU_VREG4_LPLDO2_HVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_LVM_HVM_MASK 0x3f +#define PMU_VREG4_LPLDO2_1p00V 0 +#define PMU_VREG4_LPLDO2_1p15V 1 +#define PMU_VREG4_LPLDO2_1p20V 2 +#define PMU_VREG4_LPLDO2_1p10V 3 +#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */ + +#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27 +#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1 + +#define PMU_VREG5_ADDR 5 +#define PMU_VREG5_HSICAVDD_PD_SHIFT 6 +#define PMU_VREG5_HSICAVDD_PD_MASK 0x1 +#define PMU_VREG5_HSICDVDD_PD_SHIFT 11 +#define PMU_VREG5_HSICDVDD_PD_MASK 0x1 + +/* 43228 chipstatus reg bits */ +#define CST43228_OTP_PRESENT 0x2 + +/* 4360 Chip specific ChipControl register bits */ +/* 43602 uses these ChipControl definitions as well */ +#define CCTRL4360_I2C_MODE (1 << 0) +#define CCTRL4360_UART_MODE (1 << 1) +#define CCTRL4360_SECI_MODE (1 << 2) +#define CCTRL4360_BTSWCTRL_MODE (1 << 3) +#define CCTRL4360_DISCRETE_FEMCTRL_MODE (1 << 4) +#define CCTRL4360_DIGITAL_PACTRL_MODE (1 << 5) +#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT (1 << 6) +#define CCTRL4360_EXTRA_GPIO_MODE (1 << 7) +#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8) +#define CCTRL4360_BT_LGCY_MODE (1 << 9) +#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21) +#define CCTRL4360_SECI_ON_GPIO01 (1 << 24) + +/* 4360 Chip specific Regulator Control register bits */ +#define RCTRL4360_RFLDO_PWR_DOWN (1 << 1) + +/* 4360 PMU resources and chip status bits */ +#define RES4360_REGULATOR 0 +#define RES4360_ILP_AVAIL 1 +#define RES4360_ILP_REQ 2 +#define RES4360_XTAL_LDO_PU 3 +#define RES4360_XTAL_PU 4 +#define RES4360_ALP_AVAIL 5 +#define RES4360_BBPLLPWRSW_PU 6 +#define RES4360_HT_AVAIL 7 +#define RES4360_OTP_PU 8 +#define RES4360_AVB_PLL_PWRSW_PU 9 +#define RES4360_PCIE_TL_CLK_AVAIL 10 + +#define CST4360_XTAL_40MZ 0x00000001 +#define CST4360_SFLASH 0x00000002 +#define CST4360_SPROM_PRESENT 0x00000004 +#define CST4360_SFLASH_TYPE 0x00000004 +#define CST4360_OTP_ENABLED 0x00000008 +#define CST4360_REMAP_ROM 0x00000010 +#define CST4360_RSRC_INIT_MODE_MASK 0x00000060 +#define CST4360_RSRC_INIT_MODE_SHIFT 5 +#define CST4360_ILP_DIVEN 0x00000080 +#define CST4360_MODE_USB 0x00000100 +#define CST4360_SPROM_SIZE_MASK 0x00000600 +#define CST4360_SPROM_SIZE_SHIFT 9 +#define CST4360_BBPLL_LOCK 0x00000800 +#define CST4360_AVBBPLL_LOCK 0x00001000 +#define CST4360_USBBBPLL_LOCK 0x00002000 +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + +#define CCTRL_4360_UART_SEL 0x2 + +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + +#define PMU4360_CC1_GPIO7_OVRD (1<<23) /* GPIO7 override */ + +/* 43602 PMU resources based on pmu_params.xls version v0.95 */ +#define RES43602_LPLDO_PU 0 +#define RES43602_REGULATOR 1 +#define RES43602_PMU_SLEEP 2 +#define RES43602_RSVD_3 3 +#define RES43602_XTALLDO_PU 4 +#define RES43602_SERDES_PU 5 +#define RES43602_BBPLL_PWRSW_PU 6 +#define RES43602_SR_CLK_START 7 +#define RES43602_SR_PHY_PWRSW 8 +#define RES43602_SR_SUBCORE_PWRSW 9 +#define RES43602_XTAL_PU 10 +#define RES43602_PERST_OVR 11 +#define RES43602_SR_CLK_STABLE 12 +#define RES43602_SR_SAVE_RESTORE 13 +#define RES43602_SR_SLEEP 14 +#define RES43602_LQ_START 15 +#define RES43602_LQ_AVAIL 16 +#define RES43602_WL_CORE_RDY 17 +#define RES43602_ILP_REQ 18 +#define RES43602_ALP_AVAIL 19 +#define RES43602_RADIO_PU 20 +#define RES43602_RFLDO_PU 21 +#define RES43602_HT_START 22 +#define RES43602_HT_AVAIL 23 +#define RES43602_MACPHY_CLKAVAIL 24 +#define RES43602_PARLDO_PU 25 +#define RES43602_RSVD_26 26 + +/* 43602 chip status bits */ +#define CST43602_SPROM_PRESENT (1<<1) +#define CST43602_SPROM_SIZE (1<<10) /* 0 = 16K, 1 = 4K */ +#define CST43602_BBPLL_LOCK (1<<11) +#define CST43602_RF_LDO_OUT_OK (1<<15) /* RF LDO output OK */ + +#define PMU43602_CC1_GPIO12_OVRD (1<<28) /* GPIO12 override */ + +#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN (1<<2) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3) +#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5) /* enable pmu_wakeup to request for ALP_AVAIL */ +#define PMU43602_CC2_PERST_L_EXTEND_EN (1<<9) /* extend perst_l until rsc PERST_OVR comes up */ +#define PMU43602_CC2_FORCE_EXT_LPO (1<<19) /* 1=ext LPO clock is the final LPO clock */ +#define PMU43602_CC2_XTAL32_SEL (1<<30) /* 0=ext_clock, 1=xtal */ + +#define CC_SR1_43602_SR_ASM_ADDR (0x0) + +/* PLL CTL register values for open loop, used during S/R operation */ +#define PMU43602_PLL_CTL6_VAL 0x68000528 +#define PMU43602_PLL_CTL7_VAL 0x6 + +#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29) + +#define CC_SR0_43602_SR_ENG_EN_MASK 0x1 +#define CC_SR0_43602_SR_ENG_EN_SHIFT 0 + +/* GCI function sel values */ +#define CC_FNSEL_HWDEF (0u) +#define CC_FNSEL_SAMEASPIN (1u) +#define CC_FNSEL_GPIO0 (2u) +#define CC_FNSEL_GPIO1 (3u) +#define CC_FNSEL_GCI0 (4u) +#define CC_FNSEL_GCI1 (5u) +#define CC_FNSEL_UART (6u) +#define CC_FNSEL_SFLASH (7u) +#define CC_FNSEL_SPROM (8u) +#define CC_FNSEL_MISC0 (9u) +#define CC_FNSEL_MISC1 (10u) +#define CC_FNSEL_MISC2 (11u) +#define CC_FNSEL_IND (12u) +#define CC_FNSEL_PDN (13u) +#define CC_FNSEL_PUP (14u) +#define CC_FNSEL_TRI (15u) + +/* 4387 GCI function sel values */ +#define CC4387_FNSEL_FUART (3u) +#define CC4387_FNSEL_DBG_UART (6u) +#define CC4387_FNSEL_SPI (7u) + +/* Indices of PMU voltage regulator registers */ +#define PMU_VREG_0 (0u) +#define PMU_VREG_1 (1u) +#define PMU_VREG_2 (2u) +#define PMU_VREG_3 (3u) +#define PMU_VREG_4 (4u) +#define PMU_VREG_5 (5u) +#define PMU_VREG_6 (6u) +#define PMU_VREG_7 (7u) +#define PMU_VREG_8 (8u) +#define PMU_VREG_9 (9u) +#define PMU_VREG_10 (10u) +#define PMU_VREG_11 (11u) +#define PMU_VREG_12 (12u) +#define PMU_VREG_13 (13u) +#define PMU_VREG_14 (14u) +#define PMU_VREG_15 (15u) +#define PMU_VREG_16 (16u) + +/* 43012 Chipcommon ChipStatus bits */ +#define CST43012_FLL_LOCK (1 << 13) +/* 43012 resources - End */ + +/* 43012 related Cbuck modes */ +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490 +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410 + +/* 43012 related dynamic cbuck mode mask */ +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFC07 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFFFF + +/* 4369 related VREG masks */ +#define PMU_4369_VREG_5_MISCLDO_POWER_UP_MASK (1u << 11u) +#define PMU_4369_VREG_5_MISCLDO_POWER_UP_SHIFT 11u +#define PMU_4369_VREG_5_LPLDO_POWER_UP_MASK (1u << 27u) +#define PMU_4369_VREG_5_LPLDO_POWER_UP_SHIFT 27u +#define PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(23u, 20u) +#define PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_SHIFT 20u +#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(31, 28) +#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT 28u + +#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK (1u << 3u) +#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT 3u + +#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK (1u << 27u) +#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_SHIFT 27u +#define PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK (1u << 28u) +#define PMU_4369_VREG_7_WL_PMU_LP_MODE_SHIFT 28u +#define PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK (1u << 29u) +#define PMU_4369_VREG_7_WL_PMU_LV_MODE_SHIFT 29u + +#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK BCM_MASK32(4, 0) +#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT 0u + +#define PMU_4369_VREG13_RSRC_EN0_ASR_MASK BCM_MASK32(9, 9) +#define PMU_4369_VREG13_RSRC_EN0_ASR_SHIFT 9u +#define PMU_4369_VREG13_RSRC_EN1_ASR_MASK BCM_MASK32(10, 10) +#define PMU_4369_VREG13_RSRC_EN1_ASR_SHIFT 10u +#define PMU_4369_VREG13_RSRC_EN2_ASR_MASK BCM_MASK32(11, 11) +#define PMU_4369_VREG13_RSRC_EN2_ASR_SHIFT 11u + +#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK (1u << 23u) +#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT 23u + +#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_MASK BCM_MASK32(2, 0) +#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_SHIFT 0u +#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_MASK BCM_MASK32(17, 15) +#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_SHIFT 15u +#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK BCM_MASK32(20, 18) +#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT 18u +#define PMU_4369_VREG16_RSRC2_ABUCK_MODE_MASK BCM_MASK32(23, 21) +#define PMU_4369_VREG16_RSRC2_ABUCK_MODE_SHIFT 21u + +/* 4362 related VREG masks */ +#define PMU_4362_VREG_5_MISCLDO_POWER_UP_MASK (1u << 11u) +#define PMU_4362_VREG_5_MISCLDO_POWER_UP_SHIFT (11u) +#define PMU_4362_VREG_5_LPLDO_POWER_UP_MASK (1u << 27u) +#define PMU_4362_VREG_5_LPLDO_POWER_UP_SHIFT (27u) +#define PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(31, 28) +#define PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT (28u) +#define PMU_4362_VREG_6_MEMLPLDO_POWER_UP_MASK (1u << 3u) +#define PMU_4362_VREG_6_MEMLPLDO_POWER_UP_SHIFT (3u) + +#define PMU_4362_VREG_7_PMU_FORCE_HP_MODE_MASK (1u << 27u) +#define PMU_4362_VREG_7_PMU_FORCE_HP_MODE_SHIFT (27u) +#define PMU_4362_VREG_7_WL_PMU_LP_MODE_MASK (1u << 28u) +#define PMU_4362_VREG_7_WL_PMU_LP_MODE_SHIFT (28u) +#define PMU_4362_VREG_7_WL_PMU_LV_MODE_MASK (1u << 29u) +#define PMU_4362_VREG_7_WL_PMU_LV_MODE_SHIFT (29u) + +#define PMU_4362_VREG8_ASR_OVADJ_LPPFM_MASK BCM_MASK32(4, 0) +#define PMU_4362_VREG8_ASR_OVADJ_LPPFM_SHIFT (0u) + +#define PMU_4362_VREG8_ASR_OVADJ_PFM_MASK BCM_MASK32(9, 5) +#define PMU_4362_VREG8_ASR_OVADJ_PFM_SHIFT (5u) + +#define PMU_4362_VREG8_ASR_OVADJ_PWM_MASK BCM_MASK32(14, 10) +#define PMU_4362_VREG8_ASR_OVADJ_PWM_SHIFT (10u) + +#define PMU_4362_VREG13_RSRC_EN0_ASR_MASK BCM_MASK32(9, 9) +#define PMU_4362_VREG13_RSRC_EN0_ASR_SHIFT 9u +#define PMU_4362_VREG13_RSRC_EN1_ASR_MASK BCM_MASK32(10, 10) +#define PMU_4362_VREG13_RSRC_EN1_ASR_SHIFT 10u +#define PMU_4362_VREG13_RSRC_EN2_ASR_MASK BCM_MASK32(11, 11) +#define PMU_4362_VREG13_RSRC_EN2_ASR_SHIFT 11u + +#define PMU_4362_VREG14_RSRC_EN_CSR_MASK0_MASK (1u << 23u) +#define PMU_4362_VREG14_RSRC_EN_CSR_MASK0_SHIFT (23u) + +#define PMU_4362_VREG16_RSRC0_CBUCK_MODE_MASK BCM_MASK32(2, 0) +#define PMU_4362_VREG16_RSRC0_CBUCK_MODE_SHIFT (0u) +#define PMU_4362_VREG16_RSRC0_ABUCK_MODE_MASK BCM_MASK32(17, 15) +#define PMU_4362_VREG16_RSRC0_ABUCK_MODE_SHIFT (15u) +#define PMU_4362_VREG16_RSRC1_ABUCK_MODE_MASK BCM_MASK32(20, 18) +#define PMU_4362_VREG16_RSRC1_ABUCK_MODE_SHIFT (18u) +#define PMU_4362_VREG16_RSRC2_ABUCK_MODE_MASK BCM_MASK32(23, 21) +#define PMU_4362_VREG16_RSRC2_ABUCK_MODE_SHIFT 21u + +#define VREG0_4378_CSR_VOLT_ADJ_PWM_MASK 0x00001F00u +#define VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT 8u +#define VREG0_4378_CSR_VOLT_ADJ_PFM_MASK 0x0003E000u +#define VREG0_4378_CSR_VOLT_ADJ_PFM_SHIFT 13u +#define VREG0_4378_CSR_VOLT_ADJ_LP_PFM_MASK 0x007C0000u +#define VREG0_4378_CSR_VOLT_ADJ_LP_PFM_SHIFT 18u +#define VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_MASK 0x07800000u +#define VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_SHIFT 23u + +#define PMU_4387_VREG1_CSR_OVERI_DIS_MASK (1u << 22u) +#define PMU_4387_VREG6_WL_PMU_LV_MODE_MASK (0x00000002u) +#define PMU_4387_VREG6_MEMLDO_PU_MASK (0x00000008u) +#define PMU_4387_VREG8_ASR_OVERI_DIS_MASK (1u << 7u) + +#define PMU_4388_VREG6_WL_PMU_LV_MODE_SHIFT (1u) +#define PMU_4388_VREG6_WL_PMU_LV_MODE_MASK (1u << PMU_4388_VREG6_WL_PMU_LV_MODE_SHIFT) +#define PMU_4388_VREG6_MEMLDO_PU_SHIFT (3u) +#define PMU_4388_VREG6_MEMLDO_PU_MASK (1u << PMU_4388_VREG6_MEMLDO_PU_SHIFT) + +#define PMU_4389_VREG6_WL_PMU_LV_MODE_SHIFT (1u) +#define PMU_4389_VREG6_WL_PMU_LV_MODE_MASK (1u << PMU_4389_VREG6_WL_PMU_LV_MODE_SHIFT) +#define PMU_4389_VREG6_MEMLDO_PU_SHIFT (3u) +#define PMU_4389_VREG6_MEMLDO_PU_MASK (1u << PMU_4389_VREG6_MEMLDO_PU_SHIFT) + +#define PMU_VREG13_ASR_OVADJ_PWM_MASK (0x001F0000u) +#define PMU_VREG13_ASR_OVADJ_PWM_SHIFT (16u) + +#define PMU_VREG14_RSRC_EN_ASR_PWM_PFM_MASK (1u << 18u) +#define PMU_VREG14_RSRC_EN_ASR_PWM_PFM_SHIFT (18u) + +#define CSR_VOLT_ADJ_PWM_4378 (0x17u) +#define CSR_VOLT_ADJ_PFM_4378 (0x17u) +#define CSR_VOLT_ADJ_LP_PFM_4378 (0x17u) +#define CSR_OUT_VOLT_TRIM_ADJ_4378 (0xEu) + +#ifdef WL_INITVALS +#define ABUCK_VOLT_SW_DEFAULT_4387 (wliv_pmu_abuck_volt) /* 1.00V */ +#define CBUCK_VOLT_SW_DEFAULT_4387 (wliv_pmu_cbuck_volt) /* 0.68V */ +#define CBUCK_VOLT_NON_LVM (wliv_pmu_cbuck_volt_non_lvm) /* 0.76V */ +#else +#define ABUCK_VOLT_SW_DEFAULT_4387 (0x1Fu) /* 1.00V */ +#define CBUCK_VOLT_SW_DEFAULT_4387 (0xFu) /* 0.68V */ +#define CBUCK_VOLT_NON_LVM (0x13u) /* 0.76V */ +#endif + +#define CC_GCI1_REG (0x1) + +#define FORCE_CLK_ON 1 +#define FORCE_CLK_OFF 0 + +#define PMU1_PLL0_SWITCH_MACCLOCK_120MHZ (0) +#define PMU1_PLL0_SWITCH_MACCLOCK_160MHZ (1) +#define PMU1_PLL0_PC1_M2DIV_VALUE_120MHZ 8 +#define PMU1_PLL0_PC1_M2DIV_VALUE_160MHZ 6 + +/* 4369 Related */ + +/* + * PMU VREG Definitions: + * http://confluence.broadcom.com/display/WLAN/BCM4369+PMU+Vreg+Control+Register + */ +/* PMU VREG4 */ +#define PMU_28NM_VREG4_WL_LDO_CNTL_EN (0x1 << 10) + +/* PMU VREG6 */ +#define PMU_28NM_VREG6_BTLDO3P3_PU (0x1 << 12) +#define PMU_4387_VREG6_BTLDO3P3_PU (0x1 << 8) + +/* PMU resources */ +#define RES4347_XTAL_PU 6 +#define RES4347_CORE_RDY_DIG 17 +#define RES4347_CORE_RDY_AUX 18 +#define RES4347_CORE_RDY_MAIN 22 + +/* 4369 PMU Resources */ +#define RES4369_DUMMY 0 +#define RES4369_ABUCK 1 +#define RES4369_PMU_SLEEP 2 +#define RES4369_MISCLDO 3 +#define RES4369_LDO3P3 4 +#define RES4369_FAST_LPO_AVAIL 5 +#define RES4369_XTAL_PU 6 +#define RES4369_XTAL_STABLE 7 +#define RES4369_PWRSW_DIG 8 +#define RES4369_SR_DIG 9 +#define RES4369_SLEEP_DIG 10 +#define RES4369_PWRSW_AUX 11 +#define RES4369_SR_AUX 12 +#define RES4369_SLEEP_AUX 13 +#define RES4369_PWRSW_MAIN 14 +#define RES4369_SR_MAIN 15 +#define RES4369_SLEEP_MAIN 16 +#define RES4369_DIG_CORE_RDY 17 +#define RES4369_CORE_RDY_AUX 18 +#define RES4369_ALP_AVAIL 19 +#define RES4369_RADIO_AUX_PU 20 +#define RES4369_MINIPMU_AUX_PU 21 +#define RES4369_CORE_RDY_MAIN 22 +#define RES4369_RADIO_MAIN_PU 23 +#define RES4369_MINIPMU_MAIN_PU 24 +#define RES4369_PCIE_EP_PU 25 +#define RES4369_COLD_START_WAIT 26 +#define RES4369_ARMHTAVAIL 27 +#define RES4369_HT_AVAIL 28 +#define RES4369_MACPHY_AUX_CLK_AVAIL 29 +#define RES4369_MACPHY_MAIN_CLK_AVAIL 30 + +/* +* 4378 PMU Resources +*/ +#define RES4378_DUMMY 0 +#define RES4378_ABUCK 1 +#define RES4378_PMU_SLEEP 2 +#define RES4378_MISC_LDO 3 +#define RES4378_LDO3P3_PU 4 +#define RES4378_FAST_LPO_AVAIL 5 +#define RES4378_XTAL_PU 6 +#define RES4378_XTAL_STABLE 7 +#define RES4378_PWRSW_DIG 8 +#define RES4378_SR_DIG 9 +#define RES4378_SLEEP_DIG 10 +#define RES4378_PWRSW_AUX 11 +#define RES4378_SR_AUX 12 +#define RES4378_SLEEP_AUX 13 +#define RES4378_PWRSW_MAIN 14 +#define RES4378_SR_MAIN 15 +#define RES4378_SLEEP_MAIN 16 +#define RES4378_CORE_RDY_DIG 17 +#define RES4378_CORE_RDY_AUX 18 +#define RES4378_ALP_AVAIL 19 +#define RES4378_RADIO_AUX_PU 20 +#define RES4378_MINIPMU_AUX_PU 21 +#define RES4378_CORE_RDY_MAIN 22 +#define RES4378_RADIO_MAIN_PU 23 +#define RES4378_MINIPMU_MAIN_PU 24 +#define RES4378_CORE_RDY_CB 25 +#define RES4378_PWRSW_CB 26 +#define RES4378_ARMHTAVAIL 27 +#define RES4378_HT_AVAIL 28 +#define RES4378_MACPHY_AUX_CLK_AVAIL 29 +#define RES4378_MACPHY_MAIN_CLK_AVAIL 30 +#define RES4378_RESERVED_31 31 + +/* +* 4387 PMU Resources +*/ +#define RES4387_DUMMY 0 +#define RES4387_RESERVED_1 1 +#define RES4387_FAST_LPO_AVAIL 1 /* C0 */ +#define RES4387_PMU_SLEEP 2 +#define RES4387_PMU_LP 2 /* C0 */ +#define RES4387_MISC_LDO 3 +#define RES4387_RESERVED_4 4 +#define RES4387_SERDES_AFE_RET 4 /* C0 */ +#define RES4387_XTAL_HQ 5 +#define RES4387_XTAL_PU 6 +#define RES4387_XTAL_STABLE 7 +#define RES4387_PWRSW_DIG 8 +#define RES4387_CORE_RDY_BTMAIN 9 +#define RES4387_CORE_RDY_BTSC 10 +#define RES4387_PWRSW_AUX 11 +#define RES4387_PWRSW_SCAN 12 +#define RES4387_CORE_RDY_SCAN 13 +#define RES4387_PWRSW_MAIN 14 +#define RES4387_RESERVED_15 15 +#define RES4387_XTAL_PM_CLK 15 /* C0 */ +#define RES4387_RESERVED_16 16 +#define RES4387_CORE_RDY_DIG 17 +#define RES4387_CORE_RDY_AUX 18 +#define RES4387_ALP_AVAIL 19 +#define RES4387_RADIO_PU_AUX 20 +#define RES4387_RADIO_PU_SCAN 21 +#define RES4387_CORE_RDY_MAIN 22 +#define RES4387_RADIO_PU_MAIN 23 +#define RES4387_MACPHY_CLK_SCAN 24 +#define RES4387_CORE_RDY_CB 25 +#define RES4387_PWRSW_CB 26 +#define RES4387_ARMCLK_AVAIL 27 +#define RES4387_HT_AVAIL 28 +#define RES4387_MACPHY_CLK_AUX 29 +#define RES4387_MACPHY_CLK_MAIN 30 +#define RES4387_RESERVED_31 31 + +/* 4388 PMU Resources */ +#define RES4388_DUMMY 0u +#define RES4388_FAST_LPO_AVAIL 1u +#define RES4388_PMU_LP 2u +#define RES4388_MISC_LDO 3u +#define RES4388_SERDES_AFE_RET 4u +#define RES4388_XTAL_HQ 5u +#define RES4388_XTAL_PU 6u +#define RES4388_XTAL_STABLE 7u +#define RES4388_PWRSW_DIG 8u +#define RES4388_BTMC_TOP_RDY 9u +#define RES4388_BTSC_TOP_RDY 10u +#define RES4388_PWRSW_AUX 11u +#define RES4388_PWRSW_SCAN 12u +#define RES4388_CORE_RDY_SCAN 13u +#define RES4388_PWRSW_MAIN 14u +#define RES4388_RESERVED_15 15u +#define RES4388_RESERVED_16 16u +#define RES4388_CORE_RDY_DIG 17u +#define RES4388_CORE_RDY_AUX 18u +#define RES4388_ALP_AVAIL 19u +#define RES4388_RADIO_PU_AUX 20u +#define RES4388_RADIO_PU_SCAN 21u +#define RES4388_CORE_RDY_MAIN 22u +#define RES4388_RADIO_PU_MAIN 23u +#define RES4388_MACPHY_CLK_SCAN 24u +#define RES4388_CORE_RDY_CB 25u +#define RES4388_PWRSW_CB 26u +#define RES4388_ARMCLKAVAIL 27u +#define RES4388_HT_AVAIL 28u +#define RES4388_MACPHY_CLK_AUX 29u +#define RES4388_MACPHY_CLK_MAIN 30u +#define RES4388_RESERVED_31 31u + +/* 4389 PMU Resources */ +#define RES4389_DUMMY 0u +#define RES4389_FAST_LPO_AVAIL 1u +#define RES4389_PMU_LP 2u +#define RES4389_MISC_LDO 3u +#define RES4389_SERDES_AFE_RET 4u +#define RES4389_XTAL_HQ 5u +#define RES4389_XTAL_PU 6u +#define RES4389_XTAL_STABLE 7u +#define RES4389_PWRSW_DIG 8u +#define RES4389_BTMC_TOP_RDY 9u +#define RES4389_BTSC_TOP_RDY 10u +#define RES4389_PWRSW_AUX 11u +#define RES4389_PWRSW_SCAN 12u +#define RES4389_CORE_RDY_SCAN 13u +#define RES4389_PWRSW_MAIN 14u +#define RES4389_RESERVED_15 15u +#define RES4389_RESERVED_16 16u +#define RES4389_CORE_RDY_DIG 17u +#define RES4389_CORE_RDY_AUX 18u +#define RES4389_ALP_AVAIL 19u +#define RES4389_RADIO_PU_AUX 20u +#define RES4389_RADIO_PU_SCAN 21u +#define RES4389_CORE_RDY_MAIN 22u +#define RES4389_RADIO_PU_MAIN 23u +#define RES4389_MACPHY_CLK_SCAN 24u +#define RES4389_CORE_RDY_CB 25u +#define RES4389_PWRSW_CB 26u +#define RES4389_ARMCLKAVAIL 27u +#define RES4389_HT_AVAIL 28u +#define RES4389_MACPHY_CLK_AUX 29u +#define RES4389_MACPHY_CLK_MAIN 30u +#define RES4389_RESERVED_31 31u + +/* 4397 PMU Resources */ +#define RES4397_DUMMY 0u +#define RES4397_FAST_LPO_AVAIL 1u +#define RES4397_PMU_LP 2u +#define RES4397_MISC_LDO 3u +#define RES4397_SERDES_AFE_RET 4u +#define RES4397_XTAL_HQ 5u +#define RES4397_XTAL_PU 6u +#define RES4397_XTAL_STABLE 7u +#define RES4397_PWRSW_DIG 8u +#define RES4397_BTMC_TOP_RDY 9u +#define RES4397_BTSC_TOP_RDY 10u +#define RES4397_PWRSW_AUX 11u +#define RES4397_PWRSW_SCAN 12u +#define RES4397_CORE_RDY_SCAN 13u +#define RES4397_PWRSW_MAIN 14u +#define RES4397_XTAL_PM_CLK 15u +#define RES4397_PWRSW_DRR2 16u +#define RES4397_CORE_RDY_DIG 17u +#define RES4397_CORE_RDY_AUX 18u +#define RES4397_ALP_AVAIL 19u +#define RES4397_RADIO_PU_AUX 20u +#define RES4397_RADIO_PU_SCAN 21u +#define RES4397_CORE_RDY_MAIN 22u +#define RES4397_RADIO_PU_MAIN 23u +#define RES4397_MACPHY_CLK_SCAN 24u +#define RES4397_CORE_RDY_CB 25u +#define RES4397_PWRSW_CB 26u +#define RES4397_ARMCLKAVAIL 27u +#define RES4397_HT_AVAIL 28u +#define RES4397_MACPHY_CLK_AUX 29u +#define RES4397_MACPHY_CLK_MAIN 30u +#define RES4397_RESERVED_31 31u + +/* 0: BToverPCIe, 1: BToverUART */ +#define CST4378_CHIPMODE_BTOU(cs) (((cs) & (1 << 6)) != 0) +#define CST4378_CHIPMODE_BTOP(cs) (((cs) & (1 << 6)) == 0) +#define CST4378_SPROM_PRESENT 0x00000010 + +#define CST4387_SFLASH_PRESENT 0x00000010U + +#define CST4387_CHIPMODE_BTOU(cs) (((cs) & (1 << 6)) != 0) +#define CST4387_CHIPMODE_BTOP(cs) (((cs) & (1 << 6)) == 0) +#define CST4387_SPROM_PRESENT 0x00000010 + +/* GCI chip status */ +#define GCI_CS_4369_FLL1MHZ_LOCK_MASK (1 << 1) +#define GCI_CS_4387_FLL1MHZ_LOCK_MASK (1 << 1) + +#define GCI_CS_4387_FLL1MHZ_DAC_OUT_SHIFT (16u) +#define GCI_CS_4387_FLL1MHZ_DAC_OUT_MASK (0x00ff0000u) +#define GCI_CS_4389_FLL1MHZ_DAC_OUT_SHIFT (16u) +#define GCI_CS_4389_FLL1MHZ_DAC_OUT_MASK (0x00ff0000u) + +/* GCI chip control registers */ +#define GCI_CC7_AAON_BYPASS_PWRSW_SEL 13 +#define GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON 14 + +/* 4368 GCI chip control registers */ +#define GCI_CC7_PRISEL_MASK (1 << 8 | 1 << 9) +#define GCI_CC12_PRISEL_MASK (1 << 0 | 1 << 1) +#define GCI_CC12_PRISEL_SHIFT 0 +#define GCI_CC12_DMASK_MASK (0x3ff << 10) +#define GCI_CC16_ANT_SHARE_MASK (1 << 16 | 1 << 17) + +#define CC2_4362_SDIO_AOS_WAKEUP_MASK (1u << 24u) +#define CC2_4362_SDIO_AOS_WAKEUP_SHIFT 24u + +#define CC2_4378_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u) +#define CC2_4378_MAIN_MEMLPLDO_VDDB_OFF_SHIFT 12u +#define CC2_4378_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u) +#define CC2_4378_AUX_MEMLPLDO_VDDB_OFF_SHIFT 13u +#define CC2_4378_MAIN_VDDRET_ON_MASK (1u << 15u) +#define CC2_4378_MAIN_VDDRET_ON_SHIFT 15u +#define CC2_4378_AUX_VDDRET_ON_MASK (1u << 16u) +#define CC2_4378_AUX_VDDRET_ON_SHIFT 16u +#define CC2_4378_GCI2WAKE_MASK (1u << 31u) +#define CC2_4378_GCI2WAKE_SHIFT 31u +#define CC2_4378_SDIO_AOS_WAKEUP_MASK (1u << 24u) +#define CC2_4378_SDIO_AOS_WAKEUP_SHIFT 24u +#define CC4_4378_LHL_TIMER_SELECT (1u << 0u) +#define CC6_4378_PWROK_WDT_EN_IN_MASK (1u << 6u) +#define CC6_4378_PWROK_WDT_EN_IN_SHIFT 6u +#define CC6_4378_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u) +#define CC6_4378_SDIO_AOS_CHIP_WAKEUP_SHIFT 24u + +#define CC2_4378_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u) +#define CC2_4378_USE_WLAN_BP_CLK_ON_REQ_SHIFT 15u +#define CC2_4378_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u) +#define CC2_4378_USE_CMN_BP_CLK_ON_REQ_SHIFT 16u + +#define CC2_4387_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u) +#define CC2_4387_MAIN_MEMLPLDO_VDDB_OFF_SHIFT 12u +#define CC2_4387_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u) +#define CC2_4387_AUX_MEMLPLDO_VDDB_OFF_SHIFT 13u +#define CC2_4387_MAIN_VDDRET_ON_MASK (1u << 15u) +#define CC2_4387_MAIN_VDDRET_ON_SHIFT 15u +#define CC2_4387_AUX_VDDRET_ON_MASK (1u << 16u) +#define CC2_4387_AUX_VDDRET_ON_SHIFT 16u +#define CC2_4387_GCI2WAKE_MASK (1u << 31u) +#define CC2_4387_GCI2WAKE_SHIFT 31u +#define CC2_4387_SDIO_AOS_WAKEUP_MASK (1u << 24u) +#define CC2_4387_SDIO_AOS_WAKEUP_SHIFT 24u +#define CC4_4387_LHL_TIMER_SELECT (1u << 0u) +#define CC6_4387_PWROK_WDT_EN_IN_MASK (1u << 6u) +#define CC6_4387_PWROK_WDT_EN_IN_SHIFT 6u +#define CC6_4387_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u) +#define CC6_4387_SDIO_AOS_CHIP_WAKEUP_SHIFT 24u + +#define CC2_4387_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u) +#define CC2_4387_USE_WLAN_BP_CLK_ON_REQ_SHIFT 15u +#define CC2_4387_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u) +#define CC2_4387_USE_CMN_BP_CLK_ON_REQ_SHIFT 16u + +#define CC2_4388_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u) +#define CC2_4388_MAIN_MEMLPLDO_VDDB_OFF_SHIFT (12u) +#define CC2_4388_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u) +#define CC2_4388_AUX_MEMLPLDO_VDDB_OFF_SHIFT (13u) +#define CC2_4388_MAIN_VDDRET_ON_MASK (1u << 15u) +#define CC2_4388_MAIN_VDDRET_ON_SHIFT (15u) +#define CC2_4388_AUX_VDDRET_ON_MASK (1u << 16u) +#define CC2_4388_AUX_VDDRET_ON_SHIFT (16u) +#define CC2_4388_GCI2WAKE_MASK (1u << 31u) +#define CC2_4388_GCI2WAKE_SHIFT (31u) +#define CC2_4388_SDIO_AOS_WAKEUP_MASK (1u << 24u) +#define CC2_4388_SDIO_AOS_WAKEUP_SHIFT (24u) +#define CC4_4388_LHL_TIMER_SELECT (1u << 0u) +#define CC6_4388_PWROK_WDT_EN_IN_MASK (1u << 6u) +#define CC6_4388_PWROK_WDT_EN_IN_SHIFT (6u) +#define CC6_4388_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u) +#define CC6_4388_SDIO_AOS_CHIP_WAKEUP_SHIFT (24u) + +#define CC2_4388_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u) +#define CC2_4388_USE_WLAN_BP_CLK_ON_REQ_SHIFT (15u) +#define CC2_4388_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u) +#define CC2_4388_USE_CMN_BP_CLK_ON_REQ_SHIFT (16u) + +#define CC2_4389_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u) +#define CC2_4389_MAIN_MEMLPLDO_VDDB_OFF_SHIFT (12u) +#define CC2_4389_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u) +#define CC2_4389_AUX_MEMLPLDO_VDDB_OFF_SHIFT (13u) +#define CC2_4389_MAIN_VDDRET_ON_MASK (1u << 15u) +#define CC2_4389_MAIN_VDDRET_ON_SHIFT (15u) +#define CC2_4389_AUX_VDDRET_ON_MASK (1u << 16u) +#define CC2_4389_AUX_VDDRET_ON_SHIFT (16u) +#define CC2_4389_GCI2WAKE_MASK (1u << 31u) +#define CC2_4389_GCI2WAKE_SHIFT (31u) +#define CC2_4389_SDIO_AOS_WAKEUP_MASK (1u << 24u) +#define CC2_4389_SDIO_AOS_WAKEUP_SHIFT (24u) +#define CC4_4389_LHL_TIMER_SELECT (1u << 0u) +#define CC6_4389_PWROK_WDT_EN_IN_MASK (1u << 6u) +#define CC6_4389_PWROK_WDT_EN_IN_SHIFT (6u) +#define CC6_4389_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u) +#define CC6_4389_SDIO_AOS_CHIP_WAKEUP_SHIFT (24u) + +#define CC2_4389_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u) +#define CC2_4389_USE_WLAN_BP_CLK_ON_REQ_SHIFT (15u) +#define CC2_4389_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u) +#define CC2_4389_USE_CMN_BP_CLK_ON_REQ_SHIFT (16u) + +#define PCIE_GPIO1_GPIO_PIN CC_GCI_GPIO_0 +#define PCIE_PERST_GPIO_PIN CC_GCI_GPIO_1 +#define PCIE_CLKREQ_GPIO_PIN CC_GCI_GPIO_2 + +#define VREG5_4378_MEMLPLDO_ADJ_MASK 0xF0000000 +#define VREG5_4378_MEMLPLDO_ADJ_SHIFT 28 +#define VREG5_4378_LPLDO_ADJ_MASK 0x00F00000 +#define VREG5_4378_LPLDO_ADJ_SHIFT 20 + +#define VREG5_4387_MISCLDO_PU_MASK (0x00000800u) +#define VREG5_4387_MISCLDO_PU_SHIFT (11u) + +#define VREG5_4387_MEMLPLDO_ADJ_MASK 0xF0000000 +#define VREG5_4387_MEMLPLDO_ADJ_SHIFT 28 +#define VREG5_4387_LPLDO_ADJ_MASK 0x00F00000 +#define VREG5_4387_LPLDO_ADJ_SHIFT 20 +#define VREG5_4387_MISC_LDO_ADJ_MASK (0xfu) +#define VREG5_4387_MISC_LDO_ADJ_SHIFT (0) + +/* misc ldo voltage + * https://drive.google.com/file/d/1JjvNhp-RIXJBtw99M4w5ww4MmDsBJbpD + */ +#define PMU_VREG5_MISC_LDO_VOLT_0p931 (0x7u) /* 0.93125 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p912 (0x6u) /* 0.91250 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p893 (0x5u) /* 0.89375 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p875 (0x4u) /* 0.87500 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p856 (0x3u) /* 0.85625 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p837 (0x2u) /* 0.83750 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p818 (0x1u) /* 0.81875 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p800 (0) /* 0.80000 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p781 (0xfu) /* 0.78125 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p762 (0xeu) /* 0.76250 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p743 (0xdu) /* 0.74375 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p725 (0xcu) /* 0.72500 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p706 (0xbu) /* 0.70625 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p687 (0xau) /* 0.68750 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p668 (0x9u) /* 0.66875 v */ +#define PMU_VREG5_MISC_LDO_VOLT_0p650 (0x8u) /* 0.65000 v */ + +/* lpldo/memlpldo voltage */ +#define PMU_VREG5_LPLDO_VOLT_0_88 0xf /* 0.88v */ +#define PMU_VREG5_LPLDO_VOLT_0_86 0xe /* 0.86v */ +#define PMU_VREG5_LPLDO_VOLT_0_84 0xd /* 0.84v */ +#define PMU_VREG5_LPLDO_VOLT_0_82 0xc /* 0.82v */ +#define PMU_VREG5_LPLDO_VOLT_0_80 0xb /* 0.80v */ +#define PMU_VREG5_LPLDO_VOLT_0_78 0xa /* 0.78v */ +#define PMU_VREG5_LPLDO_VOLT_0_76 0x9 /* 0.76v */ +#define PMU_VREG5_LPLDO_VOLT_0_74 0x8 /* 0.74v */ +#define PMU_VREG5_LPLDO_VOLT_0_72 0x7 /* 0.72v */ +#define PMU_VREG5_LPLDO_VOLT_1_10 0x6 /* 1.10v */ +#define PMU_VREG5_LPLDO_VOLT_1_00 0x5 /* 1.00v */ +#define PMU_VREG5_LPLDO_VOLT_0_98 0x4 /* 0.98v */ +#define PMU_VREG5_LPLDO_VOLT_0_96 0x3 /* 0.96v */ +#define PMU_VREG5_LPLDO_VOLT_0_94 0x2 /* 0.94v */ +#define PMU_VREG5_LPLDO_VOLT_0_92 0x1 /* 0.92v */ +#define PMU_VREG5_LPLDO_VOLT_0_90 0x0 /* 0.90v */ + +/* Save/Restore engine */ + +/* 512 bytes block */ +#define SR_ASM_ADDR_BLK_SIZE_SHIFT (9u) + +#define BM_ADDR_TO_SR_ADDR(bmaddr) ((bmaddr) >> SR_ASM_ADDR_BLK_SIZE_SHIFT) +#define SR_ADDR_TO_BM_ADDR(sraddr) ((sraddr) << SR_ASM_ADDR_BLK_SIZE_SHIFT) + +/* Txfifo is 512KB for main core and 128KB for aux core + * We use first 12kB (0x3000) in BMC buffer for template in main core and + * 6.5kB (0x1A00) in aux core, followed by ASM code + */ +#define SR_ASM_ADDR_MAIN_4369 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_AUX_4369 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_DIG_4369 (0x0) + +#define SR_ASM_ADDR_MAIN_4362 BM_ADDR_TO_SR_ADDR(0xc00u) +#define SR_ASM_ADDR_DIG_4362 (0x0u) + +#define SR_ASM_ADDR_MAIN_4378 (0x18) +#define SR_ASM_ADDR_AUX_4378 (0xd) +/* backplane address, use last 16k of BTCM for s/r */ +#define SR_ASM_ADDR_DIG_4378A0 (0x51c000) + +/* backplane address, use last 32k of BTCM for s/r */ +#define SR_ASM_ADDR_DIG_4378B0 (0x518000) + +#define SR_ASM_ADDR_MAIN_4387 (0x18) +#define SR_ASM_ADDR_AUX_4387 (0xd) +#define SR_ASM_ADDR_SCAN_4387 (0) +/* backplane address */ +#define SR_ASM_ADDR_DIG_4387 (0x800000) + +#define SR_ASM_ADDR_MAIN_4387C0 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_AUX_4387C0 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_DIG_4387C0 (0x931000) +#define SR_ASM_ADDR_DIG_4387_C0 (0x931000) + +#define SR_ASM_ADDR_MAIN_4388 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_AUX_4388 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_SCAN_4388 BM_ADDR_TO_SR_ADDR(0) +#define SR_ASM_ADDR_DIG_4388 (0x18520000) +#define SR_ASM_SIZE_DIG_4388 (65536u) +#define FIS_CMN_SUBCORE_ADDR_4388 (0x1640u) + +#define SR_ASM_ADDR_MAIN_4389C0 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_AUX_4389C0 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_SCAN_4389C0 BM_ADDR_TO_SR_ADDR(0x000) +#define SR_ASM_ADDR_DIG_4389C0 (0x18520000) +#define SR_ASM_SIZE_DIG_4389C0 (8192u * 8u) + +#define SR_ASM_ADDR_MAIN_4389 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_AUX_4389 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_SCAN_4389 BM_ADDR_TO_SR_ADDR(0x000) +#define SR_ASM_ADDR_DIG_4389 (0x18520000) +#define SR_ASM_SIZE_DIG_4389 (8192u * 8u) +#define FIS_CMN_SUBCORE_ADDR_4389 (0x1640u) + +#define SR_ASM_ADDR_DIG_4397 (0x18520000) + +/* SR Control0 bits */ +#define SR0_SR_ENG_EN_MASK 0x1 +#define SR0_SR_ENG_EN_SHIFT 0 +#define SR0_SR_ENG_CLK_EN (1 << 1) +#define SR0_RSRC_TRIGGER (0xC << 2) +#define SR0_WD_MEM_MIN_DIV (0x3 << 6) +#define SR0_INVERT_SR_CLK (1 << 11) +#define SR0_MEM_STBY_ALLOW (1 << 16) +#define SR0_ENABLE_SR_ILP (1 << 17) +#define SR0_ENABLE_SR_ALP (1 << 18) +#define SR0_ENABLE_SR_HT (1 << 19) +#define SR0_ALLOW_PIC (3 << 20) +#define SR0_ENB_PMU_MEM_DISABLE (1 << 30) + +/* SR Control0 bits for 4369 */ +#define SR0_4369_SR_ENG_EN_MASK 0x1 +#define SR0_4369_SR_ENG_EN_SHIFT 0 +#define SR0_4369_SR_ENG_CLK_EN (1 << 1) +#define SR0_4369_RSRC_TRIGGER (0xC << 2) +#define SR0_4369_WD_MEM_MIN_DIV (0x2 << 6) +#define SR0_4369_INVERT_SR_CLK (1 << 11) +#define SR0_4369_MEM_STBY_ALLOW (1 << 16) +#define SR0_4369_ENABLE_SR_ILP (1 << 17) +#define SR0_4369_ENABLE_SR_ALP (1 << 18) +#define SR0_4369_ENABLE_SR_HT (1 << 19) +#define SR0_4369_ALLOW_PIC (3 << 20) +#define SR0_4369_ENB_PMU_MEM_DISABLE (1 << 30) + +/* SR Control0 bits for 4378 */ +#define SR0_4378_SR_ENG_EN_MASK 0x1 +#define SR0_4378_SR_ENG_EN_SHIFT 0 +#define SR0_4378_SR_ENG_CLK_EN (1 << 1) +#define SR0_4378_RSRC_TRIGGER (0xC << 2) +#define SR0_4378_WD_MEM_MIN_DIV (0x2 << 6) +#define SR0_4378_INVERT_SR_CLK (1 << 11) +#define SR0_4378_MEM_STBY_ALLOW (1 << 16) +#define SR0_4378_ENABLE_SR_ILP (1 << 17) +#define SR0_4378_ENABLE_SR_ALP (1 << 18) +#define SR0_4378_ENABLE_SR_HT (1 << 19) +#define SR0_4378_ALLOW_PIC (3 << 20) +#define SR0_4378_ENB_PMU_MEM_DISABLE (1 << 30) + +/* SR Control0 bits for 4387 */ +#define SR0_4387_SR_ENG_EN_MASK 0x1 +#define SR0_4387_SR_ENG_EN_SHIFT 0 +#define SR0_4387_SR_ENG_CLK_EN (1 << 1) +#define SR0_4387_RSRC_TRIGGER (0xC << 2) +#define SR0_4387_WD_MEM_MIN_DIV (0x2 << 6) +#define SR0_4387_WD_MEM_MIN_DIV_AUX (0x4 << 6) +#define SR0_4387_INVERT_SR_CLK (1 << 11) +#define SR0_4387_MEM_STBY_ALLOW (1 << 16) +#define SR0_4387_ENABLE_SR_ILP (1 << 17) +#define SR0_4387_ENABLE_SR_ALP (1 << 18) +#define SR0_4387_ENABLE_SR_HT (1 << 19) +#define SR0_4387_ALLOW_PIC (3 << 20) +#define SR0_4387_ENB_PMU_MEM_DISABLE (1 << 30) + +/* SR Control0 bits for 4388 */ +#define SR0_4388_SR_ENG_EN_MASK 0x1u +#define SR0_4388_SR_ENG_EN_SHIFT 0 +#define SR0_4388_SR_ENG_CLK_EN (1u << 1u) +#define SR0_4388_RSRC_TRIGGER (0xCu << 2u) +#define SR0_4388_WD_MEM_MIN_DIV (0x2u << 6u) +#define SR0_4388_INVERT_SR_CLK (1u << 11u) +#define SR0_4388_MEM_STBY_ALLOW (1u << 16u) +#define SR0_4388_ENABLE_SR_ILP (1u << 17u) +#define SR0_4388_ENABLE_SR_ALP (1u << 18u) +#define SR0_4388_ENABLE_SR_HT (1u << 19u) +#define SR0_4388_ALLOW_PIC (3u << 20u) +#define SR0_4388_ENB_PMU_MEM_DISABLE (1u << 30u) + +/* SR Control0 bits for 4389 */ +#define SR0_4389_SR_ENG_EN_MASK 0x1 +#define SR0_4389_SR_ENG_EN_SHIFT 0 +#define SR0_4389_SR_ENG_CLK_EN (1 << 1) +#define SR0_4389_RSRC_TRIGGER (0xC << 2) +#define SR0_4389_WD_MEM_MIN_DIV (0x2 << 6) +#define SR0_4389_INVERT_SR_CLK (1 << 11) +#define SR0_4389_MEM_STBY_ALLOW (1 << 16) +#define SR0_4389_ENABLE_SR_ILP (1 << 17) +#define SR0_4389_ENABLE_SR_ALP (1 << 18) +#define SR0_4389_ENABLE_SR_HT (1 << 19) +#define SR0_4389_ALLOW_PIC (3 << 20) +#define SR0_4389_ENB_PMU_MEM_DISABLE (1 << 30) + +/* SR Control1 bits */ +#define SR1_INIT_ADDR_MASK (0x000003FFu) +#define SR1_SELFTEST_ENB_MASK (0x00004000u) +#define SR1_SELFTEST_ERR_INJCT_ENB_MASK (0x00008000u) +#define SR1_SELFTEST_ERR_INJCT_PRD_MASK (0xFFFF0000u) +#define SR1_SELFTEST_ERR_INJCT_PRD_SHIFT (16u) + +/* SR Control2 bits */ +#define SR2_INIT_ADDR_LONG_MASK (0x00003FFFu) + +#define SR_SELFTEST_ERR_INJCT_PRD (0x10u) + +/* SR Status1 bits */ +#define SR_STS1_SR_ERR_MASK (0x00000001u) + +/* =========== LHL regs =========== */ +/* 4369 LHL register settings */ +#define LHL4369_UP_CNT 0 +#define LHL4369_DN_CNT 2 +#define LHL4369_PWRSW_EN_DWN_CNT (LHL4369_DN_CNT + 2) +#define LHL4369_ISO_EN_DWN_CNT (LHL4369_PWRSW_EN_DWN_CNT + 3) +#define LHL4369_SLB_EN_DWN_CNT (LHL4369_ISO_EN_DWN_CNT + 1) +#define LHL4369_ASR_CLK4M_DIS_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_LPPFM_MODE_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_MODE_SEL_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_MANUAL_MODE_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_ADJ_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_OVERI_DIS_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_TRIM_ADJ_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_VDDC_SW_DIS_DWN_CNT (LHL4369_SLB_EN_DWN_CNT + 1) +#define LHL4369_VMUX_ASR_SEL_DWN_CNT (LHL4369_VDDC_SW_DIS_DWN_CNT + 1) +#define LHL4369_CSR_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_MODE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_OVERI_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_HPBG_CHOP_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_SRBG_REF_SEL_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_PFM_PWR_SLICE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_TRIM_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_VOLTAGE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_HPBG_PU_EN_DWN_CNT (LHL4369_CSR_MODE_DWN_CNT + 1) + +#define LHL4369_HPBG_PU_EN_UP_CNT (LHL4369_UP_CNT + 1) +#define LHL4369_CSR_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_MODE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_OVERI_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_HPBG_CHOP_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_SRBG_REF_SEL_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_PFM_PWR_SLICE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_TRIM_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_VOLTAGE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_VMUX_ASR_SEL_UP_CNT (LHL4369_CSR_MODE_UP_CNT + 1) +#define LHL4369_VDDC_SW_DIS_UP_CNT (LHL4369_VMUX_ASR_SEL_UP_CNT + 1) +#define LHL4369_SLB_EN_UP_CNT (LHL4369_VDDC_SW_DIS_UP_CNT + 8) +#define LHL4369_ISO_EN_UP_CNT (LHL4369_SLB_EN_UP_CNT + 1) +#define LHL4369_PWRSW_EN_UP_CNT (LHL4369_ISO_EN_UP_CNT + 3) +#define LHL4369_ASR_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_CLK4M_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_LPPFM_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_MODE_SEL_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_MANUAL_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_OVERI_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_TRIM_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) + +/* 4362 LHL register settings */ +#define LHL4362_UP_CNT (0u) +#define LHL4362_DN_CNT (2u) +#define LHL4362_PWRSW_EN_DWN_CNT (LHL4362_DN_CNT + 2) +#define LHL4362_ISO_EN_DWN_CNT (LHL4362_PWRSW_EN_DWN_CNT + 3) +#define LHL4362_SLB_EN_DWN_CNT (LHL4362_ISO_EN_DWN_CNT + 1) +#define LHL4362_ASR_CLK4M_DIS_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_ASR_LPPFM_MODE_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_ASR_MODE_SEL_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_ASR_MANUAL_MODE_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_ASR_ADJ_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_ASR_OVERI_DIS_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_ASR_TRIM_ADJ_DWN_CNT (LHL4362_DN_CNT) +#define LHL4362_VDDC_SW_DIS_DWN_CNT (LHL4362_SLB_EN_DWN_CNT + 1) +#define LHL4362_VMUX_ASR_SEL_DWN_CNT (LHL4362_VDDC_SW_DIS_DWN_CNT + 1) +#define LHL4362_CSR_ADJ_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_CSR_MODE_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_CSR_OVERI_DIS_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_HPBG_CHOP_DIS_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_SRBG_REF_SEL_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_PFM_PWR_SLICE_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_CSR_TRIM_ADJ_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_CSR_VOLTAGE_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4362_HPBG_PU_EN_DWN_CNT (LHL4362_CSR_MODE_DWN_CNT + 1) + +#define LHL4362_HPBG_PU_EN_UP_CNT (LHL4362_UP_CNT + 1) +#define LHL4362_CSR_ADJ_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_CSR_MODE_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_CSR_OVERI_DIS_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_HPBG_CHOP_DIS_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_SRBG_REF_SEL_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_PFM_PWR_SLICE_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_CSR_TRIM_ADJ_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_CSR_VOLTAGE_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1) +#define LHL4362_VMUX_ASR_SEL_UP_CNT (LHL4362_CSR_MODE_UP_CNT + 1) +#define LHL4362_VDDC_SW_DIS_UP_CNT (LHL4362_VMUX_ASR_SEL_UP_CNT + 1) +#define LHL4362_SLB_EN_UP_CNT (LHL4362_VDDC_SW_DIS_UP_CNT + 8) +#define LHL4362_ISO_EN_UP_CNT (LHL4362_SLB_EN_UP_CNT + 1) +#define LHL4362_PWRSW_EN_UP_CNT (LHL4362_ISO_EN_UP_CNT + 3) +#define LHL4362_ASR_ADJ_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) +#define LHL4362_ASR_CLK4M_DIS_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) +#define LHL4362_ASR_LPPFM_MODE_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) +#define LHL4362_ASR_MODE_SEL_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) +#define LHL4362_ASR_MANUAL_MODE_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) +#define LHL4362_ASR_OVERI_DIS_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) +#define LHL4362_ASR_TRIM_ADJ_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1) + +/* 4378 LHL register settings */ +#define LHL4378_CSR_OVERI_DIS_DWN_CNT 5u +#define LHL4378_CSR_MODE_DWN_CNT 5u +#define LHL4378_CSR_ADJ_DWN_CNT 5u + +#define LHL4378_CSR_OVERI_DIS_UP_CNT 1u +#define LHL4378_CSR_MODE_UP_CNT 1u +#define LHL4378_CSR_ADJ_UP_CNT 1u + +#define LHL4378_VDDC_SW_DIS_DWN_CNT 3u +#define LHL4378_ASR_ADJ_DWN_CNT 3u +#define LHL4378_HPBG_CHOP_DIS_DWN_CNT 0 + +#define LHL4378_VDDC_SW_DIS_UP_CNT 3u +#define LHL4378_ASR_ADJ_UP_CNT 1u +#define LHL4378_HPBG_CHOP_DIS_UP_CNT 0 + +#define LHL4378_ASR_MANUAL_MODE_DWN_CNT 5u +#define LHL4378_ASR_MODE_SEL_DWN_CNT 5u +#define LHL4378_ASR_LPPFM_MODE_DWN_CNT 5u +#define LHL4378_ASR_CLK4M_DIS_DWN_CNT 0 + +#define LHL4378_ASR_MANUAL_MODE_UP_CNT 1u +#define LHL4378_ASR_MODE_SEL_UP_CNT 1u +#define LHL4378_ASR_LPPFM_MODE_UP_CNT 1u +#define LHL4378_ASR_CLK4M_DIS_UP_CNT 0 + +#define LHL4378_PFM_PWR_SLICE_DWN_CNT 5u +#define LHL4378_ASR_OVERI_DIS_DWN_CNT 5u +#define LHL4378_SRBG_REF_SEL_DWN_CNT 5u +#define LHL4378_HPBG_PU_EN_DWN_CNT 6u + +#define LHL4378_PFM_PWR_SLICE_UP_CNT 1u +#define LHL4378_ASR_OVERI_DIS_UP_CNT 1u +#define LHL4378_SRBG_REF_SEL_UP_CNT 1u +#define LHL4378_HPBG_PU_EN_UP_CNT 0 + +#define LHL4378_CSR_TRIM_ADJ_CNT_SHIFT (16u) +#define LHL4378_CSR_TRIM_ADJ_CNT_MASK (0x3Fu << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT) +#define LHL4378_CSR_TRIM_ADJ_DWN_CNT 0 +#define LHL4378_CSR_TRIM_ADJ_UP_CNT 0 + +#define LHL4378_ASR_TRIM_ADJ_CNT_SHIFT (0u) +#define LHL4378_ASR_TRIM_ADJ_CNT_MASK (0x3Fu << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT) +#define LHL4378_ASR_TRIM_ADJ_UP_CNT 0 +#define LHL4378_ASR_TRIM_ADJ_DWN_CNT 0 + +#define LHL4378_PWRSW_EN_DWN_CNT 0 +#define LHL4378_SLB_EN_DWN_CNT 2u +#define LHL4378_ISO_EN_DWN_CNT 1u + +#define LHL4378_VMUX_ASR_SEL_DWN_CNT 4u + +#define LHL4378_PWRSW_EN_UP_CNT 6u +#define LHL4378_SLB_EN_UP_CNT 4u +#define LHL4378_ISO_EN_UP_CNT 5u + +#define LHL4378_VMUX_ASR_SEL_UP_CNT 2u + +#define LHL4387_VMUX_ASR_SEL_DWN_CNT (8u) +#define LHL4387_VMUX_ASR_SEL_UP_CNT (0x14u) + +/* 4387 LHL register settings for top off mode */ +#define LHL4387_TO_CSR_OVERI_DIS_DWN_CNT 3u +#define LHL4387_TO_CSR_MODE_DWN_CNT 3u +#define LHL4387_TO_CSR_ADJ_DWN_CNT 0 + +#define LHL4387_TO_CSR_OVERI_DIS_UP_CNT 1u +#define LHL4387_TO_CSR_MODE_UP_CNT 1u +#define LHL4387_TO_CSR_ADJ_UP_CNT 0 + +#define LHL4387_TO_VDDC_SW_DIS_DWN_CNT 4u +#define LHL4387_TO_ASR_ADJ_DWN_CNT 3u +#define LHL4387_TO_LP_MODE_DWN_CNT 6u +#define LHL4387_TO_HPBG_CHOP_DIS_DWN_CNT 3u + +#define LHL4387_TO_VDDC_SW_DIS_UP_CNT 0 +#define LHL4387_TO_ASR_ADJ_UP_CNT 1u +#define LHL4387_TO_LP_MODE_UP_CNT 0 +#define LHL4387_TO_HPBG_CHOP_DIS_UP_CNT 1u + +#define LHL4387_TO_ASR_MANUAL_MODE_DWN_CNT 3u +#define LHL4387_TO_ASR_MODE_SEL_DWN_CNT 3u +#define LHL4387_TO_ASR_LPPFM_MODE_DWN_CNT 3u +#define LHL4387_TO_ASR_CLK4M_DIS_DWN_CNT 3u + +#define LHL4387_TO_ASR_MANUAL_MODE_UP_CNT 1u +#define LHL4387_TO_ASR_MODE_SEL_UP_CNT 1u +#define LHL4387_TO_ASR_LPPFM_MODE_UP_CNT 1u +#define LHL4387_TO_ASR_CLK4M_DIS_UP_CNT 1u + +#define LHL4387_TO_PFM_PWR_SLICE_DWN_CNT 3u +#define LHL4387_TO_ASR_OVERI_DIS_DWN_CNT 3u +#define LHL4387_TO_SRBG_REF_SEL_DWN_CNT 3u +#define LHL4387_TO_HPBG_PU_EN_DWN_CNT 4u + +#define LHL4387_TO_PFM_PWR_SLICE_UP_CNT 1u +#define LHL4387_TO_ASR_OVERI_DIS_UP_CNT 1u +#define LHL4387_TO_SRBG_REF_SEL_UP_CNT 1u +#define LHL4387_TO_HPBG_PU_EN_UP_CNT 1u + +#define LHL4387_TO_PWRSW_EN_DWN_CNT 0 +#define LHL4387_TO_SLB_EN_DWN_CNT 4u +#define LHL4387_TO_ISO_EN_DWN_CNT 2u +#define LHL4387_TO_TOP_SLP_EN_DWN_CNT 0 + +#define LHL4387_TO_PWRSW_EN_UP_CNT 0x16u +#define LHL4387_TO_SLB_EN_UP_CNT 0xeu +#define LHL4387_TO_ISO_EN_UP_CNT 0x10u +#define LHL4387_TO_TOP_SLP_EN_UP_CNT 2u + +/* MacResourceReqTimer0/1 */ +#define MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT 24 +#define MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT 26 +#define MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT 27 +#define MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT 28 +#define MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT 29 + +/* for pmu rev32 and higher */ +#define PMU32_MAC_MAIN_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \ + (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT)) + +#define PMU32_MAC_AUX_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \ + (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT)) + +/* for pmu rev38 and higher */ +#define PMU32_MAC_SCAN_RSRC_REQ_TIMER ((1u << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \ + (1u << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \ + (1u << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \ + (1u << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \ + (0u << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT)) + +/* 4369 related: 4369 parameters + * http://www.sj.broadcom.com/projects/BCM4369/gallery_backend.RC6.0/design/backplane/pmu_params.xls + */ +#define RES4369_DUMMY 0 +#define RES4369_ABUCK 1 +#define RES4369_PMU_SLEEP 2 +#define RES4369_MISCLDO_PU 3 +#define RES4369_LDO3P3_PU 4 +#define RES4369_FAST_LPO_AVAIL 5 +#define RES4369_XTAL_PU 6 +#define RES4369_XTAL_STABLE 7 +#define RES4369_PWRSW_DIG 8 +#define RES4369_SR_DIG 9 +#define RES4369_SLEEP_DIG 10 +#define RES4369_PWRSW_AUX 11 +#define RES4369_SR_AUX 12 +#define RES4369_SLEEP_AUX 13 +#define RES4369_PWRSW_MAIN 14 +#define RES4369_SR_MAIN 15 +#define RES4369_SLEEP_MAIN 16 +#define RES4369_DIG_CORE_RDY 17 +#define RES4369_CORE_RDY_AUX 18 +#define RES4369_ALP_AVAIL 19 +#define RES4369_RADIO_AUX_PU 20 +#define RES4369_MINIPMU_AUX_PU 21 +#define RES4369_CORE_RDY_MAIN 22 +#define RES4369_RADIO_MAIN_PU 23 +#define RES4369_MINIPMU_MAIN_PU 24 +#define RES4369_PCIE_EP_PU 25 +#define RES4369_COLD_START_WAIT 26 +#define RES4369_ARMHTAVAIL 27 +#define RES4369_HT_AVAIL 28 +#define RES4369_MACPHY_AUX_CLK_AVAIL 29 +#define RES4369_MACPHY_MAIN_CLK_AVAIL 30 +#define RES4369_RESERVED_31 31 + +#define CST4369_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4369_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4369_SPROM_PRESENT 0x00000010 + +#define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF +#define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647 + +/* 4362 related */ +/* 4362 resource_table + * http://www.sj.broadcom.com/projects/BCM4362/gallery_backend.RC1.mar_15_2017/design/backplane/ + * pmu_params.xls + */ +#define RES4362_DUMMY (0u) +#define RES4362_ABUCK (1u) +#define RES4362_PMU_SLEEP (2u) +#define RES4362_MISCLDO_PU (3u) +#define RES4362_LDO3P3_PU (4u) +#define RES4362_FAST_LPO_AVAIL (5u) +#define RES4362_XTAL_PU (6u) +#define RES4362_XTAL_STABLE (7u) +#define RES4362_PWRSW_DIG (8u) +#define RES4362_SR_DIG (9u) +#define RES4362_SLEEP_DIG (10u) +#define RES4362_PWRSW_AUX (11u) +#define RES4362_SR_AUX (12u) +#define RES4362_SLEEP_AUX (13u) +#define RES4362_PWRSW_MAIN (14u) +#define RES4362_SR_MAIN (15u) +#define RES4362_SLEEP_MAIN (16u) +#define RES4362_DIG_CORE_RDY (17u) +#define RES4362_CORE_RDY_AUX (18u) +#define RES4362_ALP_AVAIL (19u) +#define RES4362_RADIO_AUX_PU (20u) +#define RES4362_MINIPMU_AUX_PU (21u) +#define RES4362_CORE_RDY_MAIN (22u) +#define RES4362_RADIO_MAIN_PU (23u) +#define RES4362_MINIPMU_MAIN_PU (24u) +#define RES4362_PCIE_EP_PU (25u) +#define RES4362_COLD_START_WAIT (26u) +#define RES4362_ARMHTAVAIL (27u) +#define RES4362_HT_AVAIL (28u) +#define RES4362_MACPHY_AUX_CLK_AVAIL (29u) +#define RES4362_MACPHY_MAIN_CLK_AVAIL (30u) +#define RES4362_RESERVED_31 (31u) + +#define CST4362_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4362_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4362_SPROM_PRESENT (0x00000010u) + +#define PMU_4362_MACCORE_0_RES_REQ_MASK (0x3FCBF7FFu) +#define PMU_4362_MACCORE_1_RES_REQ_MASK (0x7FFB3647u) + +#define PMU_MACCORE_0_RES_REQ_TIMER 0x1d000000 +#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F + +#define PMU43012_MAC_RES_REQ_TIMER 0x1D000000 +#define PMU43012_MAC_RES_REQ_MASK 0x3FBBF7FF + +#define PMU_MACCORE_1_RES_REQ_TIMER 0x1d000000 +#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F + +/* defines to detect active host interface in use */ +#define CHIP_HOSTIF_PCIEMODE 0x1 +#define CHIP_HOSTIF_USBMODE 0x2 +#define CHIP_HOSTIF_SDIOMODE 0x4 +#define CHIP_HOSTIF_PCIE(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE) +#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE) +#define CHIP_HOSTIF_SDIO(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE) + +#define PATCHTBL_SIZE (0x800) +#define CR4_4335_RAM_BASE (0x180000) +#define CR4_4345_LT_C0_RAM_BASE (0x1b0000) +#define CR4_4345_GE_C0_RAM_BASE (0x198000) +#define CR4_4349_RAM_BASE (0x180000) +#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) +#define CR4_4350_RAM_BASE (0x180000) +#define CR4_4360_RAM_BASE (0x0) +#define CR4_43602_RAM_BASE (0x180000) + +#define CR4_4347_RAM_BASE (0x170000) +#define CR4_4362_RAM_BASE (0x170000) +#define CR4_4364_RAM_BASE (0x160000) +#define CR4_4369_RAM_BASE (0x170000) +#define CR4_4377_RAM_BASE (0x170000) +#define CR4_43751_RAM_BASE (0x170000) +#define CR4_43752_RAM_BASE (0x170000) +#define CR4_4376_RAM_BASE (0x352000) +#define CR4_4378_RAM_BASE (0x352000) +#define CR4_4387_RAM_BASE (0x740000) +#define CR4_4385_RAM_BASE (0x740000) +#define CA7_4388_RAM_BASE (0x200000) +#define CA7_4389_RAM_BASE (0x200000) +#define CA7_4385_RAM_BASE (0x200000) + +/* Physical memory in 4388a0 HWA is 64KB (8192 x 64 bits) even though + * the memory space allows 192KB (0x1850_0000 - 0x1852_FFFF) + */ +#define HWA_MEM_BASE_4388 (0x18520000u) +#define HWA_MEM_SIZE_4388 (0x10000u) + +/* 43012 PMU resources based on pmu_params.xls - Start */ +#define RES43012_MEMLPLDO_PU 0 +#define RES43012_PMU_SLEEP 1 +#define RES43012_FAST_LPO 2 +#define RES43012_BTLPO_3P3 3 +#define RES43012_SR_POK 4 +#define RES43012_DUMMY_PWRSW 5 +#define RES43012_DUMMY_LDO3P3 6 +#define RES43012_DUMMY_BT_LDO3P3 7 +#define RES43012_DUMMY_RADIO 8 +#define RES43012_VDDB_VDDRET 9 +#define RES43012_HV_LDO3P3 10 +#define RES43012_OTP_PU 11 +#define RES43012_XTAL_PU 12 +#define RES43012_SR_CLK_START 13 +#define RES43012_XTAL_STABLE 14 +#define RES43012_FCBS 15 +#define RES43012_CBUCK_MODE 16 +#define RES43012_CORE_READY 17 +#define RES43012_ILP_REQ 18 +#define RES43012_ALP_AVAIL 19 +#define RES43012_RADIOLDO_1P8 20 +#define RES43012_MINI_PMU 21 +#define RES43012_UNUSED 22 +#define RES43012_SR_SAVE_RESTORE 23 +#define RES43012_PHY_PWRSW 24 +#define RES43012_VDDB_CLDO 25 +#define RES43012_SUBCORE_PWRSW 26 +#define RES43012_SR_SLEEP 27 +#define RES43012_HT_START 28 +#define RES43012_HT_AVAIL 29 +#define RES43012_MACPHY_CLK_AVAIL 30 +#define CST43012_SPROM_PRESENT 0x00000010 + +/* SR Control0 bits */ +#define SR0_43012_SR_ENG_EN_MASK 0x1u +#define SR0_43012_SR_ENG_EN_SHIFT 0u +#define SR0_43012_SR_ENG_CLK_EN (1u << 1u) +#define SR0_43012_SR_RSRC_TRIGGER (0xCu << 2u) +#define SR0_43012_SR_WD_MEM_MIN_DIV (0x3u << 6u) +#define SR0_43012_SR_MEM_STBY_ALLOW_MSK (1u << 16u) +#define SR0_43012_SR_MEM_STBY_ALLOW_SHIFT 16u +#define SR0_43012_SR_ENABLE_ILP (1u << 17u) +#define SR0_43012_SR_ENABLE_ALP (1u << 18u) +#define SR0_43012_SR_ENABLE_HT (1u << 19u) +#define SR0_43012_SR_ALLOW_PIC (3u << 20u) +#define SR0_43012_SR_PMU_MEM_DISABLE (1u << 30u) +#define CC_43012_VDDM_PWRSW_EN_MASK (1u << 20u) +#define CC_43012_VDDM_PWRSW_EN_SHIFT (20u) +#define CC_43012_SDIO_AOS_WAKEUP_MASK (1u << 24u) +#define CC_43012_SDIO_AOS_WAKEUP_SHIFT (24u) + +/* 43012 - offset at 5K */ +#define SR1_43012_SR_INIT_ADDR_MASK 0x3ffu +#define SR1_43012_SR_ASM_ADDR 0xAu + +/* PLL usage in 43012 */ +#define PMU43012_PLL0_PC0_NDIV_INT_MASK 0x0000003fu +#define PMU43012_PLL0_PC0_NDIV_INT_SHIFT 0u +#define PMU43012_PLL0_PC0_NDIV_FRAC_MASK 0xfffffc00u +#define PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT 10u +#define PMU43012_PLL0_PC3_PDIV_MASK 0x00003c00u +#define PMU43012_PLL0_PC3_PDIV_SHIFT 10u +#define PMU43012_PLL_NDIV_FRAC_BITS 20u +#define PMU43012_PLL_P_DIV_SCALE_BITS 10u + +#define CCTL_43012_ARM_OFFCOUNT_MASK 0x00000003u +#define CCTL_43012_ARM_OFFCOUNT_SHIFT 0u +#define CCTL_43012_ARM_ONCOUNT_MASK 0x0000000cu +#define CCTL_43012_ARM_ONCOUNT_SHIFT 2u + +/* PMU Rev >= 30 */ +#define PMU30_ALPCLK_ONEMHZ_ENAB 0x80000000u + +/* 43012 PMU Chip Control Registers */ +#define PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON 0x00000010u +#define PMUCCTL02_43012_PHY_PWRSW_FORCE_ON 0x00000040u +#define PMUCCTL02_43012_LHL_TIMER_SELECT 0x00000800u +#define PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON 0x00008000u +#define PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB 0x00010000u +#define PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF (1u << 12u) + +#define PMUCCTL04_43012_BBPLL_ENABLE_PWRDN 0x00100000u +#define PMUCCTL04_43012_BBPLL_ENABLE_PWROFF 0x00200000u +#define PMUCCTL04_43012_FORCE_BBPLL_ARESET 0x00400000u +#define PMUCCTL04_43012_FORCE_BBPLL_DRESET 0x00800000u +#define PMUCCTL04_43012_FORCE_BBPLL_PWRDN 0x01000000u +#define PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH 0x02000000u +#define PMUCCTL04_43012_FORCE_BBPLL_PWROFF 0x04000000u +#define PMUCCTL04_43012_DISABLE_LQ_AVAIL 0x08000000u +#define PMUCCTL04_43012_DISABLE_HT_AVAIL 0x10000000u +#define PMUCCTL04_43012_USE_LOCK 0x20000000u +#define PMUCCTL04_43012_OPEN_LOOP_ENABLE 0x40000000u +#define PMUCCTL04_43012_FORCE_OPEN_LOOP 0x80000000u +#define PMUCCTL05_43012_DISABLE_SPM_CLK (1u << 8u) +#define PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN (1u << 14u) +#define PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB (1u << 31u) +#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_MASK 0x00000FC0u +#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_SHIFT 6u +#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_MASK 0x00FC0000u +#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_SHIFT 18u +#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x07000000u +#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 24u +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x0003F000u +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 12u +#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_MASK 0x00000038u +#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_SHIFT 3u + +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_MASK 0x00000FC0u +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_SHIFT 6u +/* during normal operation normal value is reduced for optimized power */ +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_VAL 0x1Fu + +#define PMUCCTL13_43012_FCBS_UP_TRIG_EN 0x00000400 + +#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL 0x00000001 +#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL 0x00000020 +#define PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL 0x00000080 +#define PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL 0x00000200 +#define PMUCCTL14_43012_SDIOD_RESET_INIVAL 0x00000400 +#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL 0x00001000 +#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL 0x00004000 +#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL 0x00008000 +#define PMUCCTL14_43012_DISABLE_LQ_AVAIL 0x08000000 + +#define VREG6_43012_MEMLPLDO_ADJ_MASK 0x0000F000 +#define VREG6_43012_MEMLPLDO_ADJ_SHIFT 12 + +#define VREG6_43012_LPLDO_ADJ_MASK 0x000000F0 +#define VREG6_43012_LPLDO_ADJ_SHIFT 4 + +#define VREG7_43012_PWRSW_1P8_PU_MASK 0x00400000 +#define VREG7_43012_PWRSW_1P8_PU_SHIFT 22 + +/* 4378 PMU Chip Control Registers */ +#define PMUCCTL03_4378_XTAL_CORESIZE_PMOS_NORMAL_MASK 0x001F8000 +#define PMUCCTL03_4378_XTAL_CORESIZE_PMOS_NORMAL_SHIFT 15 +#define PMUCCTL03_4378_XTAL_CORESIZE_PMOS_NORMAL_VAL 0x3F + +#define PMUCCTL03_4378_XTAL_CORESIZE_NMOS_NORMAL_MASK 0x07E00000 +#define PMUCCTL03_4378_XTAL_CORESIZE_NMOS_NORMAL_SHIFT 21 +#define PMUCCTL03_4378_XTAL_CORESIZE_NMOS_NORMAL_VAL 0x3F + +#define PMUCCTL03_4378_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x38000000 +#define PMUCCTL03_4378_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 27 +#define PMUCCTL03_4378_XTAL_SEL_BIAS_RES_NORMAL_VAL 0x0 + +#define PMUCCTL00_4378_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x00000FC0 +#define PMUCCTL00_4378_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 6 +#define PMUCCTL00_4378_XTAL_CORESIZE_BIAS_ADJ_NORMAL_VAL 0x5 + +#define PMUCCTL00_4378_XTAL_RES_BYPASS_NORMAL_MASK 0x00038000 +#define PMUCCTL00_4378_XTAL_RES_BYPASS_NORMAL_SHIFT 15 +#define PMUCCTL00_4378_XTAL_RES_BYPASS_NORMAL_VAL 0x7 + +/* 4387 PMU Chip Control Registers */ +#define PMUCCTL03_4387_XTAL_CORESIZE_PMOS_NORMAL_MASK 0x001F8000 +#define PMUCCTL03_4387_XTAL_CORESIZE_PMOS_NORMAL_SHIFT 15 +#define PMUCCTL03_4387_XTAL_CORESIZE_PMOS_NORMAL_VAL 0x3F + +#define PMUCCTL03_4387_XTAL_CORESIZE_NMOS_NORMAL_MASK 0x07E00000 +#define PMUCCTL03_4387_XTAL_CORESIZE_NMOS_NORMAL_SHIFT 21 +#define PMUCCTL03_4387_XTAL_CORESIZE_NMOS_NORMAL_VAL 0x3F + +#define PMUCCTL03_4387_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x38000000 +#define PMUCCTL03_4387_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 27 +#define PMUCCTL03_4387_XTAL_SEL_BIAS_RES_NORMAL_VAL 0x0 + +#define PMUCCTL00_4387_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x00000FC0 +#define PMUCCTL00_4387_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 6 +#define PMUCCTL00_4387_XTAL_CORESIZE_BIAS_ADJ_NORMAL_VAL 0x5 + +#define PMUCCTL00_4387_XTAL_RES_BYPASS_NORMAL_MASK 0x00038000 +#define PMUCCTL00_4387_XTAL_RES_BYPASS_NORMAL_SHIFT 15 +#define PMUCCTL00_4387_XTAL_RES_BYPASS_NORMAL_VAL 0x7 + +/* GPIO pins */ +#define CC_PIN_GPIO_00 (0u) +#define CC_PIN_GPIO_01 (1u) +#define CC_PIN_GPIO_02 (2u) +#define CC_PIN_GPIO_03 (3u) +#define CC_PIN_GPIO_04 (4u) +#define CC_PIN_GPIO_05 (5u) +#define CC_PIN_GPIO_06 (6u) +#define CC_PIN_GPIO_07 (7u) +#define CC_PIN_GPIO_08 (8u) +#define CC_PIN_GPIO_09 (9u) +#define CC_PIN_GPIO_10 (10u) +#define CC_PIN_GPIO_11 (11u) +#define CC_PIN_GPIO_12 (12u) +#define CC_PIN_GPIO_13 (13u) +#define CC_PIN_GPIO_14 (14u) +#define CC_PIN_GPIO_15 (15u) +#define CC_PIN_GPIO_16 (16u) +#define CC_PIN_GPIO_17 (17u) +#define CC_PIN_GPIO_18 (18u) +#define CC_PIN_GPIO_19 (19u) +#define CC_PIN_GPIO_20 (20u) +#define CC_PIN_GPIO_21 (21u) +#define CC_PIN_GPIO_22 (22u) +#define CC_PIN_GPIO_23 (23u) +#define CC_PIN_GPIO_24 (24u) +#define CC_PIN_GPIO_25 (25u) +#define CC_PIN_GPIO_26 (26u) +#define CC_PIN_GPIO_27 (27u) +#define CC_PIN_GPIO_28 (28u) +#define CC_PIN_GPIO_29 (29u) +#define CC_PIN_GPIO_30 (30u) +#define CC_PIN_GPIO_31 (31u) + +/* Last GPIO Pad */ +#define CC_PIN_GPIO_LAST CC_PIN_GPIO_31 + +/* GCI chipcontrol register indices */ +#define CC_GCI_CHIPCTRL_00 (0) +#define CC_GCI_CHIPCTRL_01 (1) +#define CC_GCI_CHIPCTRL_02 (2) +#define CC_GCI_CHIPCTRL_03 (3) +#define CC_GCI_CHIPCTRL_04 (4) +#define CC_GCI_CHIPCTRL_05 (5) +#define CC_GCI_CHIPCTRL_06 (6) +#define CC_GCI_CHIPCTRL_07 (7) +#define CC_GCI_CHIPCTRL_08 (8) +#define CC_GCI_CHIPCTRL_09 (9) +#define CC_GCI_CHIPCTRL_10 (10) +#define CC_GCI_CHIPCTRL_10 (10) +#define CC_GCI_CHIPCTRL_11 (11) +#define CC_GCI_CHIPCTRL_12 (12) +#define CC_GCI_CHIPCTRL_13 (13) +#define CC_GCI_CHIPCTRL_14 (14) +#define CC_GCI_CHIPCTRL_15 (15) +#define CC_GCI_CHIPCTRL_16 (16) +#define CC_GCI_CHIPCTRL_17 (17) +#define CC_GCI_CHIPCTRL_18 (18) +#define CC_GCI_CHIPCTRL_19 (19) +#define CC_GCI_CHIPCTRL_20 (20) +#define CC_GCI_CHIPCTRL_21 (21) +#define CC_GCI_CHIPCTRL_22 (22) +#define CC_GCI_CHIPCTRL_23 (23) +#define CC_GCI_CHIPCTRL_24 (24) +#define CC_GCI_CHIPCTRL_25 (25) +#define CC_GCI_CHIPCTRL_26 (26) +#define CC_GCI_CHIPCTRL_27 (27) +#define CC_GCI_CHIPCTRL_28 (28) + +/* GCI chip ctrl SDTC Soft reset */ +#define GCI_CHIP_CTRL_SDTC_SOFT_RESET (1 << 31) + +#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12) + +#define CC_GCI_04_SDIO_DRVSTR_SHIFT 15 +#define CC_GCI_04_SDIO_DRVSTR_MASK (0x0f << CC_GCI_04_SDIO_DRVSTR_SHIFT) /* 0x00078000 */ +#define CC_GCI_04_SDIO_DRVSTR_OVERRIDE_BIT (1 << 18) +#define CC_GCI_04_SDIO_DRVSTR_DEFAULT_MA 14 +#define CC_GCI_04_SDIO_DRVSTR_MIN_MA 2 +#define CC_GCI_04_SDIO_DRVSTR_MAX_MA 16 + +#define CC_GCI_04_4387C0_XTAL_PM_CLK (1u << 20u) + +#define CC_GCI_05_4387C0_AFE_RET_ENB_MASK (1u << 7u) + +#define CC_GCI_CHIPCTRL_07_BTDEFLO_ANT0_NBIT 2u +#define CC_GCI_CHIPCTRL_07_BTDEFLO_ANT0_MASK 0xFu +#define CC_GCI_CHIPCTRL_07_BTDEFHI_ANT0_NBIT 11u +#define CC_GCI_CHIPCTRL_07_BTDEFHI_ANT0_MASK 1u + +#define CC_GCI_CHIPCTRL_18_BTDEF_ANT0_NBIT 10u +#define CC_GCI_CHIPCTRL_18_BTDEF_ANT0_MASK 0x1Fu +#define CC_GCI_CHIPCTRL_18_BTDEFLO_ANT1_NBIT 15u +#define CC_GCI_CHIPCTRL_18_BTDEFLO_ANT1_MASK 1u +#define CC_GCI_CHIPCTRL_18_BTDEFHI_ANT1_NBIT 26u +#define CC_GCI_CHIPCTRL_18_BTDEFHI_ANT1_MASK 0x3Fu + +#define CC_GCI_CHIPCTRL_19_BTDEF_ANT1_NBIT 10u +#define CC_GCI_CHIPCTRL_19_BTDEF_ANT1_MASK 0x7u + +#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_FORCE_NBIT 16u +#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_VAL_NBIT 17u +#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_FORCE_NBIT 18u +#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_VAL_NBIT 19u +#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_NBIT 20u +#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_NBIT 21u +#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_FORCE_NBIT 22u +#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_VAL_NBIT 23u +#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_FORCE_NBIT 24u +#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_VAL_NBIT 25u +#define CC_GCI_CHIPCTRL_23_LVM_MODE_DISABLE_NBIT 26u + +#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_FORCE_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_FORCE_NBIT) +#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_VAL_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_VAL_NBIT) +#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_FORCE_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_FORCE_NBIT) +#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_VAL_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_VAL_NBIT) +#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_NBIT) +#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_NBIT) +#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_FORCE_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_FORCE_NBIT) +#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_VAL_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_VAL_NBIT) +#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_FORCE_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_FORCE_NBIT) +#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_VAL_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_VAL_NBIT) +#define CC_GCI_CHIPCTRL_23_LVM_MODE_DISABLE_MASK (1u <<\ + CC_GCI_CHIPCTRL_23_LVM_MODE_DISABLE_NBIT) + +/* 2G core0/core1 Pulse width register (offset : 0x47C) +* wl_rx_long_pulse_width_2g_core0 [4:0]; +* wl_rx_short_pulse_width_2g_core0 [9:5]; +* wl_rx_long_pulse_width_2g_core1 [20:16]; +* wl_rx_short_pulse_width_2g_core1 [25:21]; +*/ +#define CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE1_NBIT (16u) +#define CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE0_MASK (0x1Fu) +#define CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE1_MASK (0x1Fu <<\ + CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE1_NBIT) + +#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE0_NBIT (5u) +#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE1_NBIT (16u) +#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE1_NBIT (21u) + +#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE0_MASK (0x1Fu) +#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE1_MASK (0x1Fu <<\ + CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE1_NBIT) +#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE0_MASK (0x1Fu <<\ + CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE0_NBIT) +#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE1_MASK (0x1Fu <<\ + CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE1_NBIT) + +/* 5G core0/Core1 (offset : 0x480) +* wl_rx_long_pulse_width_5g[4:0]; +* wl_rx_short_pulse_width_5g[9:5] +*/ + +#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_5G_NBIT (5u) + +#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_5G_MASK (0x1Fu) +#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_5G_MASK (0x1Fu <<\ + CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_5G_NBIT) + +#define CC_GCI_CNCB_GLITCH_FILTER_WIDTH_MASK (0xFFu) + +#define CC_GCI_RESET_OVERRIDE_NBIT 0x1u +#define CC_GCI_RESET_OVERRIDE_MASK (0x1u << \ + CC_GCI_RESET_OVERRIDE_NBIT) + +#define CC_GCI_06_JTAG_SEL_SHIFT 4u +#define CC_GCI_06_JTAG_SEL_MASK (1u << 4u) + +#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00u) >> 8u) + +#define CC_GCI_03_LPFLAGS_SFLASH_MASK (0xFFFFFFu << 8u) +#define CC_GCI_03_LPFLAGS_SFLASH_VAL (0xCCCCCCu << 8u) + +#define CC_GCI_13_INSUFF_TREFUP_FIX_SHIFT 31u +/* Note: For 4368 B0 onwards, the shift offset remains the same, +* but the Chip Common Ctrl GCI register is 16 +*/ +#define CC_GCI_16_INSUFF_TREFUP_FIX_SHIFT 31u + +#define GPIO_CTRL_REG_DISABLE_INTERRUPT (3u << 9u) +#define GPIO_CTRL_REG_COUNT 40 + +#ifdef WL_INITVALS +#define XTAL_HQ_SETTING_4387 (wliv_pmu_xtal_HQ) +#define XTAL_LQ_SETTING_4387 (wliv_pmu_xtal_LQ) +#else +#define XTAL_HQ_SETTING_4387 (0xFFF94D30u) +#define XTAL_LQ_SETTING_4387 (0xFFF94380u) +#endif + +#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_MASK (0x00000200u) +#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_SHIFT (9u) +#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_MASK (0xFFFFFC00u) +#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_SHIFT (10u) + +#define CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_MASK (0x0000FC00u) +#define CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_SHIFT (10u) +#define CC_GCI_17_BBPLL_CH_CTRL_EN_MASK (0x04000000u) + +#define CC_GCI_20_BBPLL_CH_CTRL_GRP_MASK (0xFC000000u) +#define CC_GCI_20_BBPLL_CH_CTRL_GRP_SHIFT (26u) + +/* GCI Chip Ctrl Regs */ +#define GCI_CC28_IHRP_SEL_MASK (7 << 24) +#define GCI_CC28_IHRP_SEL_SHIFT (24u) + +/* 30=MACPHY_CLK_MAIN, 29=MACPHY_CLK_AUX, 23=RADIO_PU_MAIN, 22=CORE_RDY_MAIN + * 20=RADIO_PU_AUX, 18=CORE_RDY_AUX, 14=PWRSW_MAIN, 11=PWRSW_AUX + */ +#define GRP_PD_TRIGGER_MASK_4387 (0x60d44800u) + +/* power down ch0=MAIN/AUX PHY_clk, ch2=MAIN/AUX MAC_clk, ch5=RFFE_clk */ +#define GRP_PD_MASK_4387 (0x25u) + +#define CC_GCI_CHIPCTRL_11_2x2_ANT_MASK 0x03 +#define CC_GCI_CHIPCTRL_11_SHIFT_ANT_MASK 26 + +/* GCI chipstatus register indices */ +#define GCI_CHIPSTATUS_00 (0) +#define GCI_CHIPSTATUS_01 (1) +#define GCI_CHIPSTATUS_02 (2) +#define GCI_CHIPSTATUS_03 (3) +#define GCI_CHIPSTATUS_04 (4) +#define GCI_CHIPSTATUS_05 (5) +#define GCI_CHIPSTATUS_06 (6) +#define GCI_CHIPSTATUS_07 (7) +#define GCI_CHIPSTATUS_08 (8) +#define GCI_CHIPSTATUS_09 (9) +#define GCI_CHIPSTATUS_10 (10) +#define GCI_CHIPSTATUS_11 (11) +#define GCI_CHIPSTATUS_12 (12) +#define GCI_CHIPSTATUS_13 (13) +#define GCI_CHIPSTATUS_15 (15) + +/* 43021 GCI chipstatus registers */ +#define GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK (1 << 3) + +/* GCI Core Control Reg */ +#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */ +#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */ +#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */ +#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */ +#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */ +#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */ +#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */ +#define GCI_CORECTRL_FORCEREGCLK_MASK (1 << 18) /* ForceRegClk */ + +/* 4378 & 4387 GCI AVS function */ +#define GCI6_AVS_ENAB 1u +#define GCI6_AVS_ENAB_SHIFT 31u +#define GCI6_AVS_ENAB_MASK (1u << GCI6_AVS_ENAB_SHIFT) +#define GCI6_AVS_CBUCK_VOLT_SHIFT 25u +#define GCI6_AVS_CBUCK_VOLT_MASK (0x1Fu << GCI6_AVS_CBUCK_VOLT_SHIFT) + +/* GCI GPIO for function sel GCI-0/GCI-1 */ +#define CC_GCI_GPIO_0 (0) +#define CC_GCI_GPIO_1 (1) +#define CC_GCI_GPIO_2 (2) +#define CC_GCI_GPIO_3 (3) +#define CC_GCI_GPIO_4 (4) +#define CC_GCI_GPIO_5 (5) +#define CC_GCI_GPIO_6 (6) +#define CC_GCI_GPIO_7 (7) +#define CC_GCI_GPIO_8 (8) +#define CC_GCI_GPIO_9 (9) +#define CC_GCI_GPIO_10 (10) +#define CC_GCI_GPIO_11 (11) +#define CC_GCI_GPIO_12 (12) +#define CC_GCI_GPIO_13 (13) +#define CC_GCI_GPIO_14 (14) +#define CC_GCI_GPIO_15 (15) + +/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */ +#define CC_GCI_GPIO_INVALID 0xFF + +/* 4378 LHL GPIO configuration */ +#define LHL_IOCFG_P_ADDR_LHL_GPIO_DOUT_SEL_SHIFT (3u) +#define LHL_IOCFG_P_ADDR_LHL_GPIO_DOUT_SEL_MASK (1u << LHL_IOCFG_P_ADDR_LHL_GPIO_DOUT_SEL_SHIFT) + +/* 4378 LHL SPMI bit definitions */ +#define LHL_LP_CTL5_SPMI_DATA_SEL_SHIFT (8u) +#define LHL_LP_CTL5_SPMI_DATA_SEL_MASK (0x3u << LHL_LP_CTL5_SPMI_CLK_DATA_SHIFT) +#define LHL_LP_CTL5_SPMI_CLK_SEL_SHIFT (6u) +#define LHL_LP_CTL5_SPMI_CLK_SEL_MASK (0x3u << LHL_LP_CTL5_SPMI_CLK_SEL_SHIFT) +#define LHL_LP_CTL5_SPMI_CLK_DATA_GPIO0 (0u) +#define LHL_LP_CTL5_SPMI_CLK_DATA_GPIO1 (1u) +#define LHL_LP_CTL5_SPMI_CLK_DATA_GPIO2 (2u) + +/* Plese do not these following defines */ +/* find the 4 bit mask given the bit position */ +#define GCIMASK(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL(val, pos) ((val >> pos) & 0xF) + +/* find the 8 bit mask given the bit position */ +#define GCIMASK_8B(pos) (((uint32)0xFF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_8B(val, pos) ((((uint32)val) << pos) & GCIMASK_8B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_8B(val, pos) ((val >> pos) & 0xFF) + +/* find the 4 bit mask given the bit position */ +#define GCIMASK_4B(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_4B(val, pos) ((((uint32)val) << pos) & GCIMASK_4B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF) + +/* GCI Intstatus(Mask)/WakeMask Register bits. */ +#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTSTATUS_EVENT (1 << 21) /* GCI Event Interrupt */ +#define GCI_INTSTATUS_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */ +#define GCI_INTSTATUS_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */ +#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */ +#define GCI_INTSTATUS_LHLWLWAKE (1 << 30) /* LHL WL wake */ + +/* GCI IntMask Register bits. */ +#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTMASK_EVENT (1 << 21) /* GCI Event Interrupt */ +#define GCI_INTMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */ +#define GCI_INTMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */ +#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ +#define GCI_INTMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */ + +/* GCI WakeMask Register bits. */ +#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_WAKEMASK_EVENT (1 << 21) /* GCI Event Interrupt */ +#define GCI_WAKEMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */ +#define GCI_WAKEMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */ +#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ +#define GCI_WAKEMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */ + +#define GCI_WAKE_ON_GCI_GPIO1 1 +#define GCI_WAKE_ON_GCI_GPIO2 2 +#define GCI_WAKE_ON_GCI_GPIO3 3 +#define GCI_WAKE_ON_GCI_GPIO4 4 +#define GCI_WAKE_ON_GCI_GPIO5 5 +#define GCI_WAKE_ON_GCI_GPIO6 6 +#define GCI_WAKE_ON_GCI_GPIO7 7 +#define GCI_WAKE_ON_GCI_GPIO8 8 +#define GCI_WAKE_ON_GCI_SECI_IN 9 + +#define PMU_EXT_WAKE_MASK_0_SDIO (1u << 2u) +#define PMU_EXT_WAKE_MASK_0_PCIE_PERST (1u << 5u) + +#define PMU_4362_EXT_WAKE_MASK_0_SDIO (1u << 1u | 1u << 2u) + +/* =========== LHL regs =========== */ +#define LHL_PWRSEQCTL_SLEEP_EN (1 << 0) +#define LHL_PWRSEQCTL_PMU_SLEEP_MODE (1 << 1) +#define LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN (1 << 2) +#define LHL_PWRSEQCTL_PMU_TOP_ISO_EN (1 << 3) +#define LHL_PWRSEQCTL_PMU_TOP_SLB_EN (1 << 4) +#define LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN (1 << 5) +#define LHL_PWRSEQCTL_PMU_CLDO_PD (1 << 6) +#define LHL_PWRSEQCTL_PMU_LPLDO_PD (1 << 7) +#define LHL_PWRSEQCTL_PMU_RSRC6_EN (1 << 8) + +#define PMU_SLEEP_MODE_0 (LHL_PWRSEQCTL_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN) + +#define PMU_SLEEP_MODE_1 (LHL_PWRSEQCTL_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_SLEEP_MODE |\ + LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\ + LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\ + LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\ + LHL_PWRSEQCTL_PMU_CLDO_PD |\ + LHL_PWRSEQCTL_PMU_RSRC6_EN) + +#define PMU_SLEEP_MODE_2 (LHL_PWRSEQCTL_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_SLEEP_MODE |\ + LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\ + LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\ + LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\ + LHL_PWRSEQCTL_PMU_CLDO_PD |\ + LHL_PWRSEQCTL_PMU_LPLDO_PD |\ + LHL_PWRSEQCTL_PMU_RSRC6_EN) + +#define LHL_PWRSEQ_CTL (0x000000ff) + +/* LHL Top Level Power Up Control Register (lhl_top_pwrup_ctl_adr, Offset 0xE78) +* Top Level Counter values for isolation, retention, Power Switch control +*/ +#define LHL_PWRUP_ISOLATION_CNT (0x6 << 8) +#define LHL_PWRUP_RETENTION_CNT (0x5 << 16) +#define LHL_PWRUP_PWRSW_CNT (0x7 << 24) +/* Mask is taken only for isolation 8:13 , Retention 16:21 , +* Power Switch control 24:29 +*/ +#define LHL_PWRUP_CTL_MASK (0x3F3F3F00) +#define LHL_PWRUP_CTL (LHL_PWRUP_ISOLATION_CNT |\ + LHL_PWRUP_RETENTION_CNT |\ + LHL_PWRUP_PWRSW_CNT) + +#define LHL_PWRUP2_CLDO_DN_CNT (0x0) +#define LHL_PWRUP2_LPLDO_DN_CNT (0x0 << 8) +#define LHL_PWRUP2_RSRC6_DN_CN (0x4 << 16) +#define LHL_PWRUP2_RSRC7_DN_CN (0x0 << 24) +#define LHL_PWRUP2_CTL_MASK (0x3F3F3F3F) +#define LHL_PWRUP2_CTL (LHL_PWRUP2_CLDO_DN_CNT |\ + LHL_PWRUP2_LPLDO_DN_CNT |\ + LHL_PWRUP2_RSRC6_DN_CN |\ + LHL_PWRUP2_RSRC7_DN_CN) + +/* LHL Top Level Power Down Control Register (lhl_top_pwrdn_ctl_adr, Offset 0xE74) */ +#define LHL_PWRDN_SLEEP_CNT (0x4) +#define LHL_PWRDN_CTL_MASK (0x3F) + +/* LHL Top Level Power Down Control 2 Register (lhl_top_pwrdn2_ctl_adr, Offset 0xE80) */ +#define LHL_PWRDN2_CLDO_DN_CNT (0x4) +#define LHL_PWRDN2_LPLDO_DN_CNT (0x4 << 8) +#define LHL_PWRDN2_RSRC6_DN_CN (0x3 << 16) +#define LHL_PWRDN2_RSRC7_DN_CN (0x0 << 24) +#define LHL_PWRDN2_CTL (LHL_PWRDN2_CLDO_DN_CNT |\ + LHL_PWRDN2_LPLDO_DN_CNT |\ + LHL_PWRDN2_RSRC6_DN_CN |\ + LHL_PWRDN2_RSRC7_DN_CN) +#define LHL_PWRDN2_CTL_MASK (0x3F3F3F3F) + +#define LHL_FAST_WRITE_EN (1 << 14) + +#define LHL_WL_MACTIMER_MASK 0xFFFFFFFF +/* Write 1 to clear */ +#define LHL_WL_MACTIMER_INT_ST_MASK (0x1u) + +/* WL ARM Timer0 Interrupt Mask (lhl_wl_armtim0_intrp_adr) */ +#define LHL_WL_ARMTIM0_INTRP_EN 0x00000001 +#define LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER 0x00000002 + +/* WL ARM Timer0 Interrupt Status (lhl_wl_armtim0_st_adr) */ +#define LHL_WL_ARMTIM0_ST_WL_ARMTIM_INT_ST 0x00000001 + +/* WL MAC TimerX Interrupt Mask (lhl_wl_mactimX_intrp_adr) */ +#define LHL_WL_MACTIM_INTRP_EN 0x00000001 +#define LHL_WL_MACTIM_INTRP_EDGE_TRIGGER 0x00000002 + +/* WL MAC TimerX Interrupt Status (lhl_wl_mactimX_st_adr) */ +#define LHL_WL_MACTIM_ST_WL_MACTIM_INT_ST 0x00000001 + +/* LHL Wakeup Status (lhl_wkup_status_adr) */ +#define LHL_WKUP_STATUS_WR_PENDING_ARMTIM0 0x00100000 + +#define LHL_PS_MODE_0 0 +#define LHL_PS_MODE_1 1 + +/* GCI EventIntMask Register SW bits */ +#define GCI_MAILBOXDATA_TOWLAN (1 << 0) +#define GCI_MAILBOXDATA_TOBT (1 << 1) +#define GCI_MAILBOXDATA_TONFC (1 << 2) +#define GCI_MAILBOXDATA_TOGPS (1 << 3) +#define GCI_MAILBOXDATA_TOLTE (1 << 4) +#define GCI_MAILBOXACK_TOWLAN (1 << 8) +#define GCI_MAILBOXACK_TOBT (1 << 9) +#define GCI_MAILBOXACK_TONFC (1 << 10) +#define GCI_MAILBOXACK_TOGPS (1 << 11) +#define GCI_MAILBOXACK_TOLTE (1 << 12) +#define GCI_WAKE_TOWLAN (1 << 16) +#define GCI_WAKE_TOBT (1 << 17) +#define GCI_WAKE_TONFC (1 << 18) +#define GCI_WAKE_TOGPS (1 << 19) +#define GCI_WAKE_TOLTE (1 << 20) +#define GCI_SWREADY (1 << 24) + +/* GCI SECI_OUT TX Status Regiser bits */ +#define GCI_SECIOUT_TXSTATUS_TXHALT (1 << 0) +#define GCI_SECIOUT_TXSTATUS_TI (1 << 16) + +/* 43012 MUX options */ +#define MUXENAB43012_HOSTWAKE_MASK (0x00000001) +#define MUXENAB43012_GETIX(val, name) (val - 1) + +/* +* Maximum delay for the PMU state transition in us. +* This is an upper bound intended for spinwaits etc. +*/ +#if defined(BCMQT) && defined(BCMDONGLEHOST) +#define PMU_MAX_TRANSITION_DLY 1500000 +#else +#define PMU_MAX_TRANSITION_DLY 15000 +#endif /* BCMDONGLEHOST */ + +/* PMU resource up transition time in ILP cycles */ +#define PMURES_UP_TRANSITION 2 + +#if !defined(BCMDONGLEHOST) +/* +* Information from BT to WLAN over eci_inputlo, eci_inputmi & +* eci_inputhi register. Rev >=21 +*/ +/* Fields in eci_inputlo register - [0:31] */ +#define ECI_INLO_TASKTYPE_MASK 0x0000000f /* [3:0] - 4 bits */ +#define ECI_INLO_TASKTYPE_SHIFT 0 +#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */ +#define ECI_INLO_PKTDUR_SHIFT 4 +#define ECI_INLO_ROLE_MASK 0x00000100 /* [8] - 1 bits */ +#define ECI_INLO_ROLE_SHIFT 8 +#define ECI_INLO_MLP_MASK 0x00000e00 /* [11:9] - 3 bits */ +#define ECI_INLO_MLP_SHIFT 9 +#define ECI_INLO_TXPWR_MASK 0x000ff000 /* [19:12] - 8 bits */ +#define ECI_INLO_TXPWR_SHIFT 12 +#define ECI_INLO_RSSI_MASK 0x0ff00000 /* [27:20] - 8 bits */ +#define ECI_INLO_RSSI_SHIFT 20 +#define ECI_INLO_VAD_MASK 0x10000000 /* [28] - 1 bits */ +#define ECI_INLO_VAD_SHIFT 28 + +/* +* Register eci_inputlo bitfield values. +* - BT packet type information bits [7:0] +*/ +/* [3:0] - Task (link) type */ +#define BT_ACL 0x00 +#define BT_SCO 0x01 +#define BT_eSCO 0x02 +#define BT_A2DP 0x03 +#define BT_SNIFF 0x04 +#define BT_PAGE_SCAN 0x05 +#define BT_INQUIRY_SCAN 0x06 +#define BT_PAGE 0x07 +#define BT_INQUIRY 0x08 +#define BT_MSS 0x09 +#define BT_PARK 0x0a +#define BT_RSSISCAN 0x0b +#define BT_MD_ACL 0x0c +#define BT_MD_eSCO 0x0d +#define BT_SCAN_WITH_SCO_LINK 0x0e +#define BT_SCAN_WITHOUT_SCO_LINK 0x0f +/* [7:4] = packet duration code */ +/* [8] - Master / Slave */ +#define BT_MASTER 0 +#define BT_SLAVE 1 +/* [11:9] - multi-level priority */ +#define BT_LOWEST_PRIO 0x0 +#define BT_HIGHEST_PRIO 0x3 +/* [19:12] - BT transmit power */ +/* [27:20] - BT RSSI */ +/* [28] - VAD silence */ +/* [31:29] - Undefined */ +/* Register eci_inputmi values - [32:63] - none defined */ +/* [63:32] - Undefined */ + +/* Information from WLAN to BT over eci_output register. */ +/* Fields in eci_output register - [0:31] */ +#define ECI48_OUT_MASKMAGIC_HIWORD 0x55550000 +#define ECI_OUT_CHANNEL_MASK(ccrev) ((ccrev) < 35 ? 0xf : (ECI48_OUT_MASKMAGIC_HIWORD | 0xf000)) +#define ECI_OUT_CHANNEL_SHIFT(ccrev) ((ccrev) < 35 ? 0 : 12) +#define ECI_OUT_BW_MASK(ccrev) ((ccrev) < 35 ? 0x70 : (ECI48_OUT_MASKMAGIC_HIWORD | 0xe00)) +#define ECI_OUT_BW_SHIFT(ccrev) ((ccrev) < 35 ? 4 : 9) +#define ECI_OUT_ANTENNA_MASK(ccrev) ((ccrev) < 35 ? 0x80 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x100)) +#define ECI_OUT_ANTENNA_SHIFT(ccrev) ((ccrev) < 35 ? 7 : 8) +#define ECI_OUT_SIMUL_TXRX_MASK(ccrev) \ + ((ccrev) < 35 ? 0x10000 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x80)) +#define ECI_OUT_SIMUL_TXRX_SHIFT(ccrev) ((ccrev) < 35 ? 16 : 7) +#define ECI_OUT_FM_DISABLE_MASK(ccrev) \ + ((ccrev) < 35 ? 0x40000 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x40)) +#define ECI_OUT_FM_DISABLE_SHIFT(ccrev) ((ccrev) < 35 ? 18 : 6) + +/* Indicate control of ECI bits between s/w and dot11mac. + * 0 => FW control, 1=> MAC/ucode control + + * Current assignment (ccrev >= 35): + * 0 - TxConf (ucode) + * 38 - FM disable (wl) + * 39 - Allow sim rx (ucode) + * 40 - Num antennas (wl) + * 43:41 - WLAN channel exclusion BW (wl) + * 47:44 - WLAN channel (wl) + * + * (ccrev < 35) + * 15:0 - wl + * 16 - + * 18 - FM disable + * 30 - wl interrupt + * 31 - ucode interrupt + * others - unassigned (presumed to be with dot11mac/ucode) + */ +#define ECI_MACCTRL_BITS 0xbffb0000 +#define ECI_MACCTRLLO_BITS 0x1 +#define ECI_MACCTRLHI_BITS 0xFF + +#endif /* !defined(BCMDONGLEHOST) */ + +/* SECI Status (0x134) & Mask (0x138) bits - Rev 35 */ +#define SECI_STAT_BI (1 << 0) /* Break Interrupt */ +#define SECI_STAT_SPE (1 << 1) /* Parity Error */ +#define SECI_STAT_SFE (1 << 2) /* Parity Error */ +#define SECI_STAT_SDU (1 << 3) /* Data Updated */ +#define SECI_STAT_SADU (1 << 4) /* Auxiliary Data Updated */ +#define SECI_STAT_SAS (1 << 6) /* AUX State */ +#define SECI_STAT_SAS2 (1 << 7) /* AUX2 State */ +#define SECI_STAT_SRITI (1 << 8) /* Idle Timer Interrupt */ +#define SECI_STAT_STFF (1 << 9) /* Tx FIFO Full */ +#define SECI_STAT_STFAE (1 << 10) /* Tx FIFO Almost Empty */ +#define SECI_STAT_SRFE (1 << 11) /* Rx FIFO Empty */ +#define SECI_STAT_SRFAF (1 << 12) /* Rx FIFO Almost Full */ +#define SECI_STAT_SFCE (1 << 13) /* Flow Control Event */ + +/* SECI configuration */ +#define SECI_MODE_UART 0x0 +#define SECI_MODE_SECI 0x1 +#define SECI_MODE_LEGACY_3WIRE_BT 0x2 +#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3 +#define SECI_MODE_HALF_SECI 0x4 + +#define SECI_RESET (1 << 0) +#define SECI_RESET_BAR_UART (1 << 1) +#define SECI_ENAB_SECI_ECI (1 << 2) +#define SECI_ENAB_SECIOUT_DIS (1 << 3) +#define SECI_MODE_MASK 0x7 +#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */ +#define SECI_UPD_SECI (1 << 7) + +#define SECI_AUX_TX_START (1 << 31) +#define SECI_SLIP_ESC_CHAR 0xDB +#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR +#define SECI_SIGNOFF_1 0 +#define SECI_REFRESH_REQ 0xDA + +/* seci clk_ctl_st bits */ +#define CLKCTL_STS_HT_AVAIL_REQ (1 << 4) +#define CLKCTL_STS_SECI_CLK_REQ (1 << 8) +#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) + +#define SECI_UART_MSR_CTS_STATE (1 << 0) +#define SECI_UART_MSR_RTS_STATE (1 << 1) +#define SECI_UART_SECI_IN_STATE (1 << 2) +#define SECI_UART_SECI_IN2_STATE (1 << 3) + +/* GCI RX FIFO Control Register */ +#define GCI_RXF_LVL_MASK (0xFF << 0) +#define GCI_RXF_TIMEOUT_MASK (0xFF << 8) + +/* GCI UART Registers' Bit definitions */ +/* Seci Fifo Level Register */ +#define SECI_TXF_LVL_MASK (0x3F << 8) +#define TXF_AE_LVL_DEFAULT 0x4 +#define SECI_RXF_LVL_FC_MASK (0x3F << 16) + +/* SeciUARTFCR Bit definitions */ +#define SECI_UART_FCR_RFR (1 << 0) +#define SECI_UART_FCR_TFR (1 << 1) +#define SECI_UART_FCR_SR (1 << 2) +#define SECI_UART_FCR_THP (1 << 3) +#define SECI_UART_FCR_AB (1 << 4) +#define SECI_UART_FCR_ATOE (1 << 5) +#define SECI_UART_FCR_ARTSOE (1 << 6) +#define SECI_UART_FCR_ABV (1 << 7) +#define SECI_UART_FCR_ALM (1 << 8) + +/* SECI UART LCR register bits */ +#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */ +#define SECI_UART_LCR_PARITY_EN (1 << 1) +#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */ +#define SECI_UART_LCR_RX_EN (1 << 3) +#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */ +#define SECI_UART_LCR_TXO_EN (1 << 5) +#define SECI_UART_LCR_RTSO_EN (1 << 6) +#define SECI_UART_LCR_SLIPMODE_EN (1 << 7) +#define SECI_UART_LCR_RXCRC_CHK (1 << 8) +#define SECI_UART_LCR_TXCRC_INV (1 << 9) +#define SECI_UART_LCR_TXCRC_LSBF (1 << 10) +#define SECI_UART_LCR_TXCRC_EN (1 << 11) +#define SECI_UART_LCR_RXSYNC_EN (1 << 12) + +#define SECI_UART_MCR_TX_EN (1 << 0) +#define SECI_UART_MCR_PRTS (1 << 1) +#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2) +#define SECI_UART_MCR_HIGHRATE_EN (1 << 3) +#define SECI_UART_MCR_LOOPBK_EN (1 << 4) +#define SECI_UART_MCR_AUTO_RTS (1 << 5) +#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6) +#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7) +#define SECI_UART_MCR_XONOFF_RPT (1 << 9) + +/* SeciUARTLSR Bit Mask */ +#define SECI_UART_LSR_RXOVR_MASK (1 << 0) +#define SECI_UART_LSR_RFF_MASK (1 << 1) +#define SECI_UART_LSR_TFNE_MASK (1 << 2) +#define SECI_UART_LSR_TI_MASK (1 << 3) +#define SECI_UART_LSR_TPR_MASK (1 << 4) +#define SECI_UART_LSR_TXHALT_MASK (1 << 5) + +/* SeciUARTMSR Bit Mask */ +#define SECI_UART_MSR_CTSS_MASK (1 << 0) +#define SECI_UART_MSR_RTSS_MASK (1 << 1) +#define SECI_UART_MSR_SIS_MASK (1 << 2) +#define SECI_UART_MSR_SIS2_MASK (1 << 3) + +/* SeciUARTData Bits */ +#define SECI_UART_DATA_RF_NOT_EMPTY_BIT (1 << 12) +#define SECI_UART_DATA_RF_FULL_BIT (1 << 13) +#define SECI_UART_DATA_RF_OVRFLOW_BIT (1 << 14) +#define SECI_UART_DATA_FIFO_PTR_MASK 0xFF +#define SECI_UART_DATA_RF_RD_PTR_SHIFT 16 +#define SECI_UART_DATA_RF_WR_PTR_SHIFT 24 + +/* LTECX: ltecxmux */ +#define LTECX_EXTRACT_MUX(val, idx) (getbit4(&(val), (idx))) + +/* LTECX: ltecxmux MODE */ +#define LTECX_MUX_MODE_IDX 0 +#define LTECX_MUX_MODE_WCI2 0x0 +#define LTECX_MUX_MODE_GPIO 0x1 + +/* LTECX GPIO Information Index */ +#define LTECX_NVRAM_FSYNC_IDX 0 +#define LTECX_NVRAM_LTERX_IDX 1 +#define LTECX_NVRAM_LTETX_IDX 2 +#define LTECX_NVRAM_WLPRIO_IDX 3 + +/* LTECX WCI2 Information Index */ +#define LTECX_NVRAM_WCI2IN_IDX 0 +#define LTECX_NVRAM_WCI2OUT_IDX 1 + +/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */ +#define LTECX_EXTRACT_PADNUM(val, idx) (getbit8(&(val), (idx))) +#define LTECX_EXTRACT_FNSEL(val, idx) (getbit4(&(val), (idx))) +#define LTECX_EXTRACT_GCIGPIO(val, idx) (getbit4(&(val), (idx))) + +/* WLAN channel numbers - used from wifi.h */ + +/* WLAN BW */ +#define ECI_BW_20 0x0 +#define ECI_BW_25 0x1 +#define ECI_BW_30 0x2 +#define ECI_BW_35 0x3 +#define ECI_BW_40 0x4 +#define ECI_BW_45 0x5 +#define ECI_BW_50 0x6 +#define ECI_BW_ALL 0x7 + +/* WLAN - number of antenna */ +#define WLAN_NUM_ANT1 TXANT_0 +#define WLAN_NUM_ANT2 TXANT_1 + +/* otpctrl1 0xF4 */ +#define OTPC_FORCE_PWR_OFF 0x02000000 +/* chipcommon s/r registers introduced with cc rev >= 48 */ +#define CC_SR_CTL0_ENABLE_MASK 0x1 +#define CC_SR_CTL0_ENABLE_SHIFT 0 +#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */ +#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to sr_engine */ +#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk in sr_engine */ +#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 /* Allow Subcore mem StandBy? */ +#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18 +#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19 +#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power domains */ +#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25 +#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30 + +#define CC_SR_CTL1_SR_INIT_MASK 0x3FF +#define CC_SR_CTL1_SR_INIT_SHIFT 0 + +#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */ +#define ECI_INLO_PKTDUR_SHIFT 4 + +/* gci chip control bits */ +#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT 0 +#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT 1 +#define GCI_GPIO_CHIPCTRL_INVERT_BIT 2 +#define GCI_GPIO_CHIPCTRL_PULLUP_BIT 3 +#define GCI_GPIO_CHIPCTRL_PULLDN_BIT 4 +#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT 5 +#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT 6 +#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT 7 + +/* gci GPIO input status bits */ +#define GCI_GPIO_STS_VALUE_BIT 0 +#define GCI_GPIO_STS_POS_EDGE_BIT 1 +#define GCI_GPIO_STS_NEG_EDGE_BIT 2 +#define GCI_GPIO_STS_FAST_EDGE_BIT 3 +#define GCI_GPIO_STS_CLEAR 0xF + +#define GCI_GPIO_STS_EDGE_TRIG_BIT 0 +#define GCI_GPIO_STS_NEG_EDGE_TRIG_BIT 1 +#define GCI_GPIO_STS_DUAL_EDGE_TRIG_BIT 2 +#define GCI_GPIO_STS_WL_DIN_SELECT 6 + +#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT) + +/* SR Power Control */ +#define SRPWR_DMN0_PCIE (0) /* PCIE */ +#define SRPWR_DMN0_PCIE_SHIFT (SRPWR_DMN0_PCIE) /* PCIE */ +#define SRPWR_DMN0_PCIE_MASK (1 << SRPWR_DMN0_PCIE_SHIFT) /* PCIE */ +#define SRPWR_DMN1_ARMBPSD (1) /* ARM/BP/SDIO */ +#define SRPWR_DMN1_ARMBPSD_SHIFT (SRPWR_DMN1_ARMBPSD) /* ARM/BP/SDIO */ +#define SRPWR_DMN1_ARMBPSD_MASK (1 << SRPWR_DMN1_ARMBPSD_SHIFT) /* ARM/BP/SDIO */ +#define SRPWR_DMN2_MACAUX (2) /* MAC/Phy Aux */ +#define SRPWR_DMN2_MACAUX_SHIFT (SRPWR_DMN2_MACAUX) /* MAC/Phy Aux */ +#define SRPWR_DMN2_MACAUX_MASK (1 << SRPWR_DMN2_MACAUX_SHIFT) /* MAC/Phy Aux */ +#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */ +#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */ +#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */ + +#define SRPWR_DMN4_MACSCAN (4) /* MAC/Phy Scan */ +#define SRPWR_DMN4_MACSCAN_SHIFT (SRPWR_DMN4_MACSCAN) /* MAC/Phy Scan */ +#define SRPWR_DMN4_MACSCAN_MASK (1 << SRPWR_DMN4_MACSCAN_SHIFT) /* MAC/Phy Scan */ + +#define SRPWR_DMN_MAX (5) +/* all power domain mask */ +#define SRPWR_DMN_ALL_MASK(sih) si_srpwr_domain_all_mask(sih) + +#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */ +#define SRPWR_REQON_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_REQON_SHIFT) + +#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */ +#define SRPWR_STATUS_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_STATUS_SHIFT) + +#define SRPWR_BT_STATUS_SHIFT (20) /* PowerDomain[20:21], RO */ +#define SRPWR_BT_STATUS_MASK (0x3) + +#define SRPWR_DMN_ID_SHIFT (28) /* PowerDomain[31:28], RO */ +#define SRPWR_DMN_ID_MASK (0xF) + +#define SRPWR_UP_DOWN_DELAY 100 /* more than 3 ILP clocks */ + +/* PMU Precision Usec Timer */ +#define PMU_PREC_USEC_TIMER_ENABLE 0x1 + +/* Random Number/Bit Generator defines */ +#define MASK_1BIT(offset) (0x1u << offset) + +#define CC_RNG_CTRL_0_RBG_EN_SHIFT (0u) +#define CC_RNG_CTRL_0_RBG_EN_MASK (0x1FFFu << CC_RNG_CTRL_0_RBG_EN_SHIFT) +#define CC_RNG_CTRL_0_RBG_EN (0x1FFFu) +#define CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT (12u) +#define CC_RNG_CTRL_0_RBG_DEV_CTRL_MASK (0x3u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT) +#define CC_RNG_CTRL_0_RBG_DEV_CTRL_1MHz (0x3u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT) +#define CC_RNG_CTRL_0_RBG_DEV_CTRL_2MHz (0x2u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT) +#define CC_RNG_CTRL_0_RBG_DEV_CTRL_4MHz (0x1u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT) +#define CC_RNG_CTRL_0_RBG_DEV_CTRL_8MHz (0x0u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT) + +/* RNG_FIFO_COUNT */ +/* RFC - RNG FIFO COUNT */ +#define CC_RNG_FIFO_COUNT_RFC_SHIFT (0u) +#define CC_RNG_FIFO_COUNT_RFC_MASK (0xFFu << CC_RNG_FIFO_COUNT_RFC_SHIFT) + +/* RNG interrupt */ +#define CC_RNG_TOT_BITS_CNT_IRQ_SHIFT (0u) +#define CC_RNG_TOT_BITS_CNT_IRQ_MASK (0x1u << CC_RNG_TOT_BITS_CNT_IRQ_SHIFT) +#define CC_RNG_TOT_BITS_MAX_IRQ_SHIFT (1u) +#define CC_RNG_TOT_BITS_MAX_IRQ_MASK (0x1u << CC_RNG_TOT_BITS_MAX_IRQ_SHIFT) +#define CC_RNG_FIFO_FULL_IRQ_SHIFT (2u) +#define CC_RNG_FIFO_FULL_IRQ_MASK (0x1u << CC_RNG_FIFO_FULL_IRQ_SHIFT) +#define CC_RNG_FIFO_OVER_RUN_IRQ_SHIFT (3u) +#define CC_RNG_FIFO_OVER_RUN_IRQ_MASK (0x1u << CC_RNG_FIFO_OVER_RUN_IRQ_SHIFT) +#define CC_RNG_FIFO_UNDER_RUN_IRQ_SHIFT (4u) +#define CC_RNG_FIFO_UNDER_RUN_IRQ_MASK (0x1u << CC_RNG_FIFO_UNDER_RUN_IRQ_SHIFT) +#define CC_RNG_NIST_FAIL_IRQ_SHIFT (5u) +#define CC_RNG_NIST_FAIL_IRQ_MASK (0x1u << CC_RNG_NIST_FAIL_IRQ_SHIFT) +#define CC_RNG_STARTUP_TRANSITION_MET_IRQ_SHIFT (17u) +#define CC_RNG_STARTUP_TRANSITION_MET_IRQ_MASK (0x1u << \ + CC_RNG_STARTUP_TRANSITION_MET_IRQ_SHIFT) +#define CC_RNG_MASTER_FAIL_LOCKOUT_IRQ_SHIFT (31u) +#define CC_RNG_MASTER_FAIL_LOCKOUT_IRQ_MASK (0x1u << \ + CC_RNG_MASTER_FAIL_LOCKOUT_IRQ_SHIFT) + +/* FISCtrlStatus */ +#define PMU_CLEAR_FIS_DONE_SHIFT 1u +#define PMU_CLEAR_FIS_DONE_MASK (1u << PMU_CLEAR_FIS_DONE_SHIFT) + +#define PMU_FIS_FORCEON_ALL_SHIFT 4u +#define PMU_FIS_FORCEON_ALL_MASK (1u << PMU_FIS_FORCEON_ALL_SHIFT) + +#define PMU_FIS_DN_TIMER_VAL_SHIFT 16u +#define PMU_FIS_DN_TIMER_VAL_MASK 0x7FFF0000u + +#define PMU_FIS_DN_TIMER_VAL_4378 0x2f80u /* micro second */ +#define PMU_FIS_DN_TIMER_VAL_4388 0x3f80u /* micro second */ +#define PMU_FIS_DN_TIMER_VAL_4389 0x3f80u /* micro second */ + +#define PMU_FIS_PCIE_SAVE_EN_SHIFT 5u +#define PMU_FIS_PCIE_SAVE_EN_VALUE (1u << PMU_FIS_PCIE_SAVE_EN_SHIFT) + +#define PMU_REG6_RFLDO_CTRL 0x000000E0 +#define PMU_REG6_RFLDO_CTRL_SHFT 5 + +#define PMU_REG6_BTLDO_CTRL 0x0000E000 +#define PMU_REG6_BTLDO_CTRL_SHFT 13 + +/* ETBMemCtrl */ +#define CC_ETBMEMCTRL_FORCETMCINTFTOETB_SHIFT 1u +#define CC_ETBMEMCTRL_FORCETMCINTFTOETB_MASK (1u << CC_ETBMEMCTRL_FORCETMCINTFTOETB_SHIFT) + +/* SSSR dumps locations on the backplane space */ +#define BCM4387_SSSR_DUMP_AXI_MAIN 0xE8C00000u +#define BCM4387_SSSR_DUMP_MAIN_SIZE 160000u +#define BCM4387_SSSR_DUMP_AXI_AUX 0xE8400000u +#define BCM4387_SSSR_DUMP_AUX_SIZE 160000u +#define BCM4387_SSSR_DUMP_AXI_SCAN 0xE9400000u +#define BCM4387_SSSR_DUMP_SCAN_SIZE 32768u + +#endif /* _SBCHIPC_H */ diff --git a/bcmdhd.101.10.361.x/include/sbconfig.h b/bcmdhd.101.10.361.x/include/sbconfig.h new file mode 100755 index 0000000..283eb0e --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbconfig.h @@ -0,0 +1,279 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* enumeration in SB is based on the premise that cores are contiguous in the + * enumeration space. + */ +#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */ +#define SB_BUS_BASE(sih, b) (SI_ENUM_BASE(sih) + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */ + +/* + * Sonics Configuration Space Registers. + */ +#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */ +#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */ + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */ +#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */ +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + +/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have + * a few registers *below* that line. I think it would be very confusing to try + * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, + */ + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; /**< initiator port ocp slave flag */ + uint32 PAD[3]; + uint32 sbtpsflag; /**< target port ocp slave flag */ + uint32 PAD[11]; + uint32 sbtmerrloga; /**< (sonics >= 2.3) */ + uint32 PAD; + uint32 sbtmerrlog; /**< (sonics >= 2.3) */ + uint32 PAD[3]; + uint32 sbadmatch3; /**< address match3 */ + uint32 PAD; + uint32 sbadmatch2; /**< address match2 */ + uint32 PAD; + uint32 sbadmatch1; /**< address match1 */ + uint32 PAD[7]; + uint32 sbimstate; /**< initiator agent state */ + uint32 sbintvec; /**< interrupt mask */ + uint32 sbtmstatelow; /**< target state */ + uint32 sbtmstatehigh; /**< target state */ + uint32 sbbwa0; /**< bandwidth allocation table0 */ + uint32 PAD; + uint32 sbimconfiglow; /**< initiator configuration */ + uint32 sbimconfighigh; /**< initiator configuration */ + uint32 sbadmatch0; /**< address match0 */ + uint32 PAD; + uint32 sbtmconfiglow; /**< target configuration */ + uint32 sbtmconfighigh; /**< target configuration */ + uint32 sbbconfig; /**< broadcast configuration */ + uint32 PAD; + uint32 sbbstate; /**< broadcast state */ + uint32 PAD[3]; + uint32 sbactcnfg; /**< activate configuration */ + uint32 PAD[3]; + uint32 sbflagst; /**< current sbflags */ + uint32 PAD[3]; + uint32 sbidlow; /**< identification */ + uint32 sbidhigh; /**< identification */ +} sbconfig_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* sbipsflag */ +#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */ +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */ +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */ +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */ +#define SBIPS_INT4_SHIFT 24 + +/* sbtpsflag */ +#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */ +#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */ + +/* sbtmerrlog */ +#define SBTMEL_CM 0x00000007 /**< command */ +#define SBTMEL_CI 0x0000ff00 /**< connection id */ +#define SBTMEL_EC 0x0f000000 /**< error code */ +#define SBTMEL_ME 0x80000000 /**< multiple error */ + +/* sbimstate */ +#define SBIM_PC 0xf /**< pipecount */ +#define SBIM_AP_MASK 0x30 /**< arbitration policy */ +#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */ +#define SBIM_AP_TS 0x10 /**< use timesliaces only */ +#define SBIM_AP_TK 0x20 /**< use token only */ +#define SBIM_AP_RSV 0x30 /**< reserved */ +#define SBIM_IBE 0x20000 /**< inbanderror */ +#define SBIM_TO 0x40000 /**< timeout */ +#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */ +#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */ + +/* sbtmstatelow */ +#define SBTML_RESET 0x0001 /**< reset */ +#define SBTML_REJ_MASK 0x0006 /**< reject field */ +#define SBTML_REJ 0x0002 /**< reject */ +#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */ + +#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */ + +/* sbtmstatehigh */ +#define SBTMH_SERR 0x0001 /**< serror */ +#define SBTMH_INT 0x0002 /**< interrupt */ +#define SBTMH_BUSY 0x0004 /**< busy */ +#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */ + +#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */ + +/* sbbwa0 */ +#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */ +#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */ +#define SBBWA_TAB1_SHIFT 16 + +/* sbimconfiglow */ +#define SBIMCL_STO_MASK 0x7 /**< service timeout */ +#define SBIMCL_RTO_MASK 0x70 /**< request timeout */ +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 /**< connection id */ +#define SBIMCL_CID_SHIFT 16 + +/* sbimconfighigh */ +#define SBIMCH_IEM_MASK 0xc /**< inband error mode */ +#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */ +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */ +#define SBIMCH_BEM_SHIFT 6 + +/* sbadmatch0 */ +#define SBAM_TYPE_MASK 0x3 /**< address type */ +#define SBAM_AD64 0x4 /**< reserved */ +#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */ +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */ +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */ +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 /**< enable */ +#define SBAM_ADNEG 0x800 /**< negative decode */ +#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */ +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */ +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */ +#define SBAM_BASE2_SHIFT 16 + +/* sbtmconfiglow */ +#define SBTMCL_CD_MASK 0xff /**< clock divide */ +#define SBTMCL_CO_MASK 0xf800 /**< clock offset */ +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */ +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */ +#define SBTMCL_IM_SHIFT 24 + +/* sbtmconfighigh */ +#define SBTMCH_BM_MASK 0x3 /**< busy mode */ +#define SBTMCH_RM_MASK 0x3 /**< retry mode */ +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 /**< stop mode */ +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 /**< sb error mode */ +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 /**< int mode */ +#define SBTMCH_IM_SHIFT 10 + +/* sbbconfig */ +#define SBBC_LAT_MASK 0x3 /**< sb latency */ +#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */ +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */ +#define SBBC_MAX1_SHIFT 20 + +/* sbbstate */ +#define SBBS_SRD 0x1 /**< st reg disable */ +#define SBBS_HRD 0x2 /**< hold reg disable */ + +/* sbidlow */ +#define SBIDL_CS_MASK 0x3 /**< config space */ +#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */ +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 /**< sync */ +#define SBIDL_INIT 0x80 /**< initiator */ +#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */ +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */ +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 /**< this initiator is first */ +#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */ +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 /**< target ports */ +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */ +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */ +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */ +#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */ + +/* sbidhigh */ +#define SBIDH_RC_MASK 0x000f /**< revision code */ +#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */ +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 /**< core code */ +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */ +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 /**< update buffered registers value */ + +/* vendor codes */ +#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */ + +#endif /* _SBCONFIG_H */ diff --git a/bcmdhd.101.10.361.x/include/sbgci.h b/bcmdhd.101.10.361.x/include/sbgci.h new file mode 100755 index 0000000..0b265b6 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbgci.h @@ -0,0 +1,424 @@ +/* + * SiliconBackplane GCI core hardware definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBGCI_H +#define _SBGCI_H + +#include + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +#define GCI_OFFSETOF(sih, reg) \ + (AOB_ENAB(sih) ? OFFSETOF(gciregs_t, reg) : OFFSETOF(chipcregs_t, reg)) +#define GCI_CORE_IDX(sih) (AOB_ENAB(sih) ? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX) + +typedef volatile struct { + uint32 gci_corecaps0; /* 0x000 */ + uint32 gci_corecaps1; /* 0x004 */ + uint32 gci_corecaps2; /* 0x008 */ + uint32 gci_corectrl; /* 0x00c */ + uint32 gci_corestat; /* 0x010 */ + uint32 gci_intstat; /* 0x014 */ + uint32 gci_intmask; /* 0x018 */ + uint32 gci_wakemask; /* 0x01c */ + uint32 gci_levelintstat; /* 0x020 */ + uint32 gci_eventintstat; /* 0x024 */ + uint32 gci_wakelevelintstat; /* 0x028 */ + uint32 gci_wakeeventintstat; /* 0x02c */ + uint32 semaphoreintstatus; /* 0x030 */ + uint32 semaphoreintmask; /* 0x034 */ + uint32 semaphorerequest; /* 0x038 */ + uint32 semaphorereserve; /* 0x03c */ + uint32 gci_indirect_addr; /* 0x040 */ + uint32 gci_gpioctl; /* 0x044 */ + uint32 gci_gpiostatus; /* 0x048 */ + uint32 gci_gpiomask; /* 0x04c */ + uint32 gci_eventsummary; /* 0x050 */ + uint32 gci_miscctl; /* 0x054 */ + uint32 gci_gpiointmask; /* 0x058 */ + uint32 gci_gpiowakemask; /* 0x05c */ + uint32 gci_input[32]; /* 0x060 */ + uint32 gci_event[32]; /* 0x0e0 */ + uint32 gci_output[4]; /* 0x160 */ + uint32 gci_control_0; /* 0x170 */ + uint32 gci_control_1; /* 0x174 */ + uint32 gci_intpolreg; /* 0x178 */ + uint32 gci_levelintmask; /* 0x17c */ + uint32 gci_eventintmask; /* 0x180 */ + uint32 wakelevelintmask; /* 0x184 */ + uint32 wakeeventintmask; /* 0x188 */ + uint32 hwmask; /* 0x18c */ + uint32 PAD; + uint32 gci_inbandeventintmask; /* 0x194 */ + uint32 PAD; + uint32 gci_inbandeventstatus; /* 0x19c */ + uint32 gci_seciauxtx; /* 0x1a0 */ + uint32 gci_seciauxrx; /* 0x1a4 */ + uint32 gci_secitx_datatag; /* 0x1a8 */ + uint32 gci_secirx_datatag; /* 0x1ac */ + uint32 gci_secitx_datamask; /* 0x1b0 */ + uint32 gci_seciusef0tx_reg; /* 0x1b4 */ + uint32 gci_secif0tx_offset; /* 0x1b8 */ + uint32 gci_secif0rx_offset; /* 0x1bc */ + uint32 gci_secif1tx_offset; /* 0x1c0 */ + uint32 gci_rxfifo_common_ctrl; /* 0x1c4 */ + uint32 gci_rxfifoctrl; /* 0x1c8 */ + uint32 gci_hw_sema_status; /* 0x1cc */ + uint32 gci_seciuartescval; /* 0x1d0 */ + uint32 gic_seciuartautobaudctr; /* 0x1d4 */ + uint32 gci_secififolevel; /* 0x1d8 */ + uint32 gci_seciuartdata; /* 0x1dc */ + uint32 gci_secibauddiv; /* 0x1e0 */ + uint32 gci_secifcr; /* 0x1e4 */ + uint32 gci_secilcr; /* 0x1e8 */ + uint32 gci_secimcr; /* 0x1ec */ + uint32 gci_secilsr; /* 0x1f0 */ + uint32 gci_secimsr; /* 0x1f4 */ + uint32 gci_baudadj; /* 0x1f8 */ + uint32 gci_inbandintmask; /* 0x1fc */ + uint32 gci_chipctrl; /* 0x200 */ + uint32 gci_chipsts; /* 0x204 */ + uint32 gci_gpioout; /* 0x208 */ + uint32 gci_gpioout_read; /* 0x20C */ + uint32 gci_mpwaketx; /* 0x210 */ + uint32 gci_mpwakedetect; /* 0x214 */ + uint32 gci_seciin_ctrl; /* 0x218 */ + uint32 gci_seciout_ctrl; /* 0x21C */ + uint32 gci_seciin_auxfifo_en; /* 0x220 */ + uint32 gci_seciout_txen_txbr; /* 0x224 */ + uint32 gci_seciin_rxbrstatus; /* 0x228 */ + uint32 gci_seciin_rxerrstatus; /* 0x22C */ + uint32 gci_seciin_fcstatus; /* 0x230 */ + uint32 gci_seciout_txstatus; /* 0x234 */ + uint32 gci_seciout_txbrstatus; /* 0x238 */ + uint32 wlan_mem_info; /* 0x23C */ + uint32 wlan_bankxinfo; /* 0x240 */ + uint32 bt_smem_select; /* 0x244 */ + uint32 bt_smem_stby; /* 0x248 */ + uint32 bt_smem_status; /* 0x24C */ + uint32 wlan_bankxactivepda; /* 0x250 */ + uint32 wlan_bankxsleeppda; /* 0x254 */ + uint32 wlan_bankxkill; /* 0x258 */ + uint32 reset_override; /* 0x25C */ + uint32 ip_id; /* 0x260 */ + uint32 lpo_safe_zone; /* 0x264 */ + uint32 function_sel_control_and_status; /* 0x268 */ + uint32 bt_smem_control0; /* 0x26C */ + uint32 bt_smem_control1; /* 0x270 */ + uint32 PAD[PADSZ(0x274, 0x2fc)]; /* 0x274-0x2fc */ + uint32 gci_chipid; /* 0x300 */ + uint32 PAD[PADSZ(0x304, 0x30c)]; /* 0x304-0x30c */ + uint32 otpstatus; /* 0x310 */ + uint32 otpcontrol; /* 0x314 */ + uint32 otpprog; /* 0x318 */ + uint32 otplayout; /* 0x31c */ + uint32 otplayoutextension; /* 0x320 */ + uint32 otpcontrol1; /* 0x324 */ + uint32 otpprogdata; /* 0x328 */ + uint32 PAD[PADSZ(0x32c, 0x3f8)]; /* 0x32c-0x3f8 */ + uint32 otpECCstatus; /* 0x3FC */ + uint32 gci_rffe_rfem_data0; /* 0x400 */ + uint32 gci_rffe_rfem_data1; /* 0x404 */ + uint32 gci_rffe_rfem_data2; /* 0x408 */ + uint32 gci_rffe_rfem_data3; /* 0x40c */ + uint32 gci_rffe_rfem_addr; /* 0x410 */ + uint32 gci_rffe_config; /* 0x414 */ + uint32 gci_rffe_clk_ctrl; /* 0x418 */ + uint32 gci_rffe_ctrl; /* 0x41c */ + uint32 gci_rffe_misc_ctrl; /* 0x420 */ + uint32 gci_rffe_rfem_reg0_field_ctrl; /* 0x424 */ + uint32 PAD[PADSZ(0x428, 0x438)]; /* 0x428-0x438 */ + uint32 gci_rffe_rfem_mapping_mux0; /* 0x43c */ + uint32 gci_rffe_rfem_mapping_mux1; /* 0x440 */ + uint32 gci_rffe_rfem_mapping_mux2; /* 0x444 */ + uint32 gci_rffe_rfem_mapping_mux3; /* 0x448 */ + uint32 gci_rffe_rfem_mapping_mux4; /* 0x44c */ + uint32 gci_rffe_rfem_mapping_mux5; /* 0x450 */ + uint32 gci_rffe_rfem_mapping_mux6; /* 0x454 */ + uint32 gci_rffe_rfem_mapping_mux7; /* 0x458 */ + uint32 gci_rffe_change_detect_ovr_wlmc; /* 0x45c */ + uint32 gci_rffe_change_detect_ovr_wlac; /* 0x460 */ + uint32 gci_rffe_change_detect_ovr_wlsc; /* 0x464 */ + uint32 gci_rffe_change_detect_ovr_btmc; /* 0x468 */ + uint32 gci_rffe_change_detect_ovr_btsc; /* 0x46c */ + uint32 gci_cncb_ctrl_status; /* 0x470 */ + uint32 gci_cncb_2g_force_unlock; /* 0x474 */ + uint32 gci_cncb_5g_force_unlock; /* 0x478 */ + uint32 gci_cncb_2g_reset_pulse_width; /* 0x47c */ + uint32 gci_cncb_5g_reset_pulse_width; /* 0x480 */ + uint32 gci_cncb_lut_indirect_addr; /* 0x484 */ + uint32 gci_cncb_2g_lut; /* 0x488 */ + uint32 gci_cncb_5g_lut; /* 0x48c */ + uint32 gci_cncb_glitch_filter_width; /* 0x490 */ + uint32 PAD[PADSZ(0x494, 0x5fc)]; /* 0x494-0x5fc */ + uint32 sgr_fifo_control_reg_5g; /* 0x600 */ + uint32 sgr_fifo_control_reg_2g; /* 0x604 */ + uint32 sgr_fifo_control_reg_bt; /* 0x608 */ + uint32 PAD; /* 0x60c */ + uint32 sgr_rx_fifo0_read_reg0; /* 0x610 */ + uint32 sgr_rx_fifo0_read_reg1; /* 0x614 */ + uint32 sgr_rx_fifo0_read_reg2; /* 0x618 */ + uint32 sgr_rx_fifo1_read_reg0; /* 0x61c */ + uint32 sgr_rx_fifo1_read_reg1; /* 0x620 */ + uint32 sgr_rx_fifo1_read_reg2; /* 0x624 */ + uint32 sgr_rx_fifo2_read_reg0; /* 0x628 */ + uint32 sgr_rx_fifo2_read_reg1; /* 0x62c */ + uint32 sgr_rx_fifo2_read_reg2; /* 0x630 */ + uint32 sgr_rx_fifo3_read_reg0; /* 0x634 */ + uint32 sgr_rx_fifo3_read_reg1; /* 0x638 */ + uint32 sgr_rx_fifo3_read_reg2; /* 0x63c */ + uint32 sgr_rx_fifo4_read_reg0; /* 0x640 */ + uint32 sgr_rx_fifo4_read_reg1; /* 0x644 */ + uint32 sgr_rx_fifo4_read_reg2; /* 0x648 */ + uint32 sgr_rx_fifo5_read_reg0; /* 0x64c */ + uint32 sgr_rx_fifo5_read_reg1; /* 0x650 */ + uint32 sgr_rx_fifo5_read_reg2; /* 0x654 */ + uint32 sgr_rx_fifo6_read_reg0; /* 0x658 */ + uint32 sgr_rx_fifo6_read_reg1; /* 0x65c */ + uint32 sgr_rx_fifo6_read_reg2; /* 0x660 */ + uint32 sgr_rx_fifo7_read_reg0; /* 0x664 */ + uint32 sgr_rx_fifo7_read_reg1; /* 0x668 */ + uint32 sgr_rx_fifo7_read_reg2; /* 0x66c */ + uint32 sgr_rx_fifo8_read_reg0; /* 0x670 */ + uint32 sgr_rx_fifo8_read_reg1; /* 0x674 */ + uint32 sgr_rx_fifo8_read_reg2; /* 0x678 */ + uint32 sgr_rx_fifo0_read_status; /* 0x67c */ + uint32 sgr_rx_fifo1_read_status; /* 0x680 */ + uint32 sgr_rx_fifo2_read_status; /* 0x684 */ + uint32 sgr_rx_fifo3_read_status; /* 0x688 */ + uint32 sgr_rx_fifo4_read_status; /* 0x68c */ + uint32 sgr_rx_fifo5_read_status; /* 0x690 */ + uint32 sgr_rx_fifo6_read_status; /* 0x694 */ + uint32 sgr_rx_fifo7_read_status; /* 0x698 */ + uint32 sgr_rx_fifo8_read_status; /* 0x69c */ + uint32 wl_tx_fifo_data_idx_reg; /* 0x6a0 */ + uint32 wl_tx_fifo_data_reg0; /* 0x6a4 */ + uint32 wl_tx_fifo_data_reg1; /* 0x6a8 */ + uint32 wl_tx_fifo_data_reg2; /* 0x6ac */ + uint32 mac_main_core_tx_fifo_data_idx_reg; /* 0x6b0 */ + uint32 mac_main_core_tx_fifo_data_reg0; /* 0x6b4 */ + uint32 mac_main_core_tx_fifo_data_reg1; /* 0x6b8 */ + uint32 mac_main_core_tx_fifo_data_reg2; /* 0x6bc */ + uint32 mac_aux_core_tx_fifo_data_idx_reg; /* 0x6c0 */ + uint32 mac_aux_core_tx_fifo_data_reg0; /* 0x6c4 */ + uint32 mac_aux_core_tx_fifo_data_reg1; /* 0x6c8 */ + uint32 mac_aux_core_tx_fifo_data_reg2; /* 0x6cc */ + uint32 bt_tx_fifo_data_idx_reg; /* 0x6d0 */ + uint32 bt_tx_fifo_data_reg0; /* 0x6d4 */ + uint32 bt_tx_fifo_data_reg1; /* 0x6d8 */ + uint32 bt_tx_fifo_data_reg2; /* 0x6dc */ + uint32 wci2_tx_fifo_data_reg0; /* 0x6e0 */ + uint32 wci2_tx_fifo_data_reg1; /* 0x6e4 */ + uint32 sgt_tx_fifo_ctrl; /* 0x6e8 */ + uint32 sgt_fifo_status_hpri; /* 0x6ec */ + uint32 sgt_fifo_status_norm; /* 0x6f0 */ + uint32 sgt_fifo_status_lpri; /* 0x6f4 */ + uint32 PAD[PADSZ(0x6f8, 0x7a0)]; /* 0x6f8-0x7a0 */ + uint32 sg_timestamp_fifo_ctrl; /* 0x7a4 */ + uint32 sgr_timestamp_data_rx; /* 0x7a8 */ + uint32 sgr_timestamp_data_tx; /* 0x7ac */ + uint32 sgr_fifo_int_reg; /* 0x7b0 */ + uint32 sgr_fifo_int_mask_reg; /* 0x7b4 */ + uint32 sgt_fifo_int_reg; /* 0x7b8 */ + uint32 sgt_fifo_int_mask_reg; /* 0x7bc */ + uint32 sg_fifo_debug_bus; /* 0x7c0 */ + uint32 PAD[PADSZ(0x7c4, 0xbfc)]; /* 0x7c4-0xbfc */ + uint32 lhl_core_capab_adr; /* 0xC00 */ + uint32 lhl_main_ctl_adr; /* 0xC04 */ + uint32 lhl_pmu_ctl_adr; /* 0xC08 */ + uint32 lhl_extlpo_ctl_adr; /* 0xC0C */ + uint32 lpo_ctl_adr; /* 0xC10 */ + uint32 lhl_lpo2_ctl_adr; /* 0xC14 */ + uint32 lhl_osc32k_ctl_adr; /* 0xC18 */ + uint32 lhl_clk_status_adr; /* 0xC1C */ + uint32 lhl_clk_det_ctl_adr; /* 0xC20 */ + uint32 lhl_clk_sel_adr; /* 0xC24 */ + uint32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */ + uint32 lhl_autoclk_ctl_adr; /* 0xC30 */ + uint32 PAD; /* reserved */ + uint32 lhl_hibtim_adr; /* 0xC38 */ + uint32 lhl_wl_ilp_val_adr; /* 0xC3C */ + uint32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */ + uint32 lhl_wl_armtim0_st_adr; /* 0xC44 */ + uint32 lhl_wl_armtim0_adr; /* 0xC48 */ + uint32 PAD[PADSZ(0xc4c, 0xc6c)]; /* 0xC4C-0xC6C */ + uint32 lhl_wl_mactim0_intrp_adr; /* 0xC70 */ + uint32 lhl_wl_mactim0_st_adr; /* 0xC74 */ + uint32 lhl_wl_mactim_int0_adr; /* 0xC78 */ + uint32 lhl_wl_mactim_frac0_adr; /* 0xC7C */ + uint32 lhl_wl_mactim1_intrp_adr; /* 0xC80 */ + uint32 lhl_wl_mactim1_st_adr; /* 0xC84 */ + uint32 lhl_wl_mactim_int1_adr; /* 0xC88 */ + uint32 lhl_wl_mactim_frac1_adr; /* 0xC8C */ + uint32 lhl_wl_mactim2_intrp_adr; /* 0xC90 */ + uint32 lhl_wl_mactim2_st_adr; /* 0xC94 */ + uint32 lhl_wl_mactim_int2_adr; /* 0xC98 */ + uint32 lhl_wl_mactim_frac2_adr; /* 0xC9C */ + uint32 PAD[PADSZ(0xca0, 0xcac)]; /* 0xCA0-0xCAC */ + uint32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */ + uint32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */ + uint32 gpio_ctrl_iocfg_p_adr[40]; /* 0xCD0-0xD6C */ + uint32 lhl_lp_up_ctl1_adr; /* 0xd70 */ + uint32 lhl_lp_dn_ctl1_adr; /* 0xd74 */ + uint32 PAD[PADSZ(0xd78, 0xdb4)]; /* 0xd78-0xdb4 */ + uint32 lhl_sleep_timer_adr; /* 0xDB8 */ + uint32 lhl_sleep_timer_ctl_adr; /* 0xDBC */ + uint32 lhl_sleep_timer_load_val_adr; /* 0xDC0 */ + uint32 lhl_lp_main_ctl_adr; /* 0xDC4 */ + uint32 lhl_lp_up_ctl_adr; /* 0xDC8 */ + uint32 lhl_lp_dn_ctl_adr; /* 0xDCC */ + uint32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */ + uint32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */ + uint32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */ + uint32 PAD[PADSZ(0xddc, 0xdf8)]; /* 0xDDC-0xDF8 */ + uint32 lhl_gpio_din0_adr; /* 0xDFC */ + uint32 lhl_gpio_din1_adr; /* 0xE00 */ + uint32 lhl_wkup_status_adr; /* 0xE04 */ + uint32 lhl_ctl_adr; /* 0xE08 */ + uint32 lhl_adc_ctl_adr; /* 0xE0C */ + uint32 lhl_qdxyz_in_dly_adr; /* 0xE10 */ + uint32 lhl_optctl_adr; /* 0xE14 */ + uint32 lhl_optct2_adr; /* 0xE18 */ + uint32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */ + uint32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */ + uint32 lhl_optx_smp_val_adr; /* 0xE38 */ + uint32 lhl_opty_smp_val_adr; /* 0xE3C */ + uint32 lhl_optz_smp_val_adr; /* 0xE40 */ + uint32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */ + uint32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */ + uint32 lhl_wl_fw_ctl; /* 0xE60 */ + uint32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */ + uint32 lhl_bt_hw_ctl_adr; /* 0xE6C */ + uint32 lhl_top_pwrseq_en_adr; /* 0xE70 */ + uint32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */ + uint32 lhl_top_pwrup_ctl_adr; /* 0xE78 */ + uint32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */ + uint32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */ + uint32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */ + uint32 wpt_regon_intrp_cfg_adr; /* 0xE88 */ + uint32 bt_regon_intrp_cfg_adr; /* 0xE8C */ + uint32 wl_regon_intrp_cfg_adr; /* 0xE90 */ + uint32 regon_intrp_st_adr; /* 0xE94 */ + uint32 regon_intrp_en_adr; /* 0xE98 */ + uint32 PAD[PADSZ(0xe9c, 0xeb4)]; /* 0xe9c-0xeb4 */ + uint32 lhl_lp_main_ctl1_adr; /* 0xeb8 */ + uint32 lhl_lp_up_ctl2_adr; /* 0xebc */ + uint32 lhl_lp_dn_ctl2_adr; /* 0xec0 */ + uint32 lhl_lp_up_ctl3_adr; /* 0xec4 */ + uint32 lhl_lp_dn_ctl3_adr; /* 0xec8 */ + uint32 PAD[PADSZ(0xecc, 0xed8)]; /* 0xecc-0xed8 */ + uint32 lhl_lp_main_ctl2_adr; /* 0xedc */ + uint32 lhl_lp_up_ctl4_adr; /* 0xee0 */ + uint32 lhl_lp_dn_ctl4_adr; /* 0xee4 */ + uint32 lhl_lp_up_ctl5_adr; /* 0xee8 */ + uint32 lhl_lp_dn_ctl5_adr; /* 0xeec */ + uint32 lhl_top_pwrdn3_ctl_adr; /* 0xEF0 */ + uint32 lhl_top_pwrup3_ctl_adr; /* 0xEF4 */ + uint32 PAD[PADSZ(0xef8, 0xf00)]; /* 0xEF8 - 0xF00 */ + uint32 error_status; /* 0xF04 */ + uint32 error_parity; /* 0xF08 */ + uint32 PAD; /* 0xF0C */ + uint32 msg_buf_0[8]; /* 0xF10 - 0xF2C */ + uint32 PAD[PADSZ(0xf30, 0xf3c)]; /* 0xF30 - 0xF3C */ + uint32 CTRL_REG0; /* 0xF40 */ + uint32 CTRL_REG1; /* 0xF44 */ + uint32 chipID; /* 0xF48 */ + uint32 PAD[PADSZ(0xf4c, 0xf54)]; /* 0xF4C - 0xF54 */ + uint32 timestamp_mask0; /* 0xf58 */ + uint32 timestamp_mask1; /* 0xf5c */ + uint32 wl_event_rdAddress; /* 0xF60 */ + uint32 bt_event_rdAddress; /* 0xF64 */ + uint32 interrupt_Address; /* 0xF68 */ + uint32 PAD[PADSZ(0xf6c, 0xf70)]; /* 0xF6c - 0xF70 */ + uint32 coex_error_status; /* 0xF74 */ + uint32 coex_error_parity; /* 0xF78 */ + uint32 PAD; /* 0xF7C */ + uint32 ar_buf_01[4]; /* 0xF80 - 0xF8C */ + uint32 PAD[PADSZ(0xf90,0xfac)]; /* 0xF90 - 0xFAC */ + uint32 coex_ctrl_reg0; /* 0xFB0 */ + uint32 coex_ctrl_reg1; /* 0xFB4 */ + uint32 coex_chip_id; /* 0xFB8 */ + uint32 PAD[PADSZ(0xfbc, 0xfcc)]; /* 0xFBC - 0xFCC */ + uint32 coex_wl_event_rd; /* 0xFD0 */ + uint32 coex_bt_event_rd; /* 0xFD4 */ + uint32 coex_interrupt; /* 0xFD8 */ + uint32 PAD; /* 0xFDC */ + uint32 spmi_shared_reg_status_intMask_adr; /* 0xFE0 */ + uint32 spmi_shared_reg_status_intStatus_adr; /* 0xFE4 */ + uint32 spmi_shared_reg_status_wakeMask_adr; /* 0xFE8 */ + uint32 spmi_shared_event_map_idx_adr; /* 0xFEC */ + uint32 spmi_shared_event_map_data_adr; /* 0xFF0 */ + uint32 spmi_coex_event_gpr_status_adr; /* 0xFF4 */ +} gciregs_t; + +#define GCI_CAP0_REV_MASK 0x000000ff + +/* GCI Capabilities registers */ +#define GCI_CORE_CAP_0_COREREV_MASK 0xFF +#define GCI_CORE_CAP_0_COREREV_SHIFT 0 + +#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK 0x3F +#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT 0 +#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK 0xF +#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT 16 + +#define WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK 0xFFFF + +#define WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK 0x1 + +/* WLAN BankXInfo Register */ +#define WLAN_BANKXINFO_BANK_SIZE_MASK 0x00FFF000 +#define WLAN_BANKXINFO_BANK_SIZE_SHIFT 12 + +/* WLAN Mem Info Register */ +#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK 0x000000FF +#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT 0 + +#define WLAN_MEM_INFO_REG_NUMD11MACBM_MASK 0x0000FF00 +#define WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT 8 + +#define WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK 0x00FF0000 +#define WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT 16 + +#define WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK 0xFF000000 +#define WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT 24 + +/* GCI chip status register 9 */ +#define GCI_CST9_SCAN_DIS (1u << 31u) /* scan core disable */ + +/* GCI Output register indices */ +#define GCI_OUTPUT_IDX_0 0 +#define GCI_OUTPUT_IDX_1 1 +#define GCI_OUTPUT_IDX_2 2 +#define GCI_OUTPUT_IDX_3 3 + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +#endif /* _SBGCI_H */ diff --git a/bcmdhd.101.10.361.x/include/sbhndarm.h b/bcmdhd.101.10.361.x/include/sbhndarm.h new file mode 100755 index 0000000..bdddbce --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbhndarm.h @@ -0,0 +1,414 @@ +/* + * Broadcom SiliconBackplane ARM definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _sbhndarm_h_ +#define _sbhndarm_h_ + +#ifdef DONGLEBUILD + +#include +#include + +/* register offsets */ +#define ARM7_CORECTL 0 + +/* bits in corecontrol */ +#define ACC_FORCED_RST 0x1 +#define ACC_SERRINT 0x2 +#define ACC_WFICLKSTOP 0x4 + +#if !defined(__ARM_ARCH_7A__) +#define ACC_NOTSLEEPINGCLKREQ_SHIFT 24 +#endif /* !__ARM_ARCH_7A__ */ + +#if defined(__ARM_ARCH_7A__) + +#define ACC_FORCECLOCKRATIO (0x1u << 8) +#define ACC_CLOCKRATIO_SHIFT (9u) +#define ACC_CLOCKRATIO_MASK (0xFu << ACC_CLOCKRATIO_SHIFT) + +#define ACC_CLOCKRATIO_1_TO_1 (0u) +#define ACC_CLOCKRATIO_2_TO_1 (1u) +#define ACC_CLOCKRATIO_3_TO_1 (2u) +#define ACC_CLOCKRATIO_4_TO_1 (3u) + +#define ACC_FASTCLOCKCHANNEL_SHIFT (24u) +#define ACC_FASTCLOCKCHANNEL_MASK (0x3u << ACC_FASTCLOCKCHANNEL_SHIFT) +#define ACC_NUM_FASTCLOCKS_SHIFT (2u) +#define ACC_NUM_FASTCLOCKS_MASK (0x3u << ACC_NUM_FASTCLOCKS_SHIFT) + +#define ACC_NOTSLEEPINGCLKREQ_SHIFT (4u) +#define ACC_NOTSLEEPINGCLKREQ_MASK (0x3u << ACC_NOT_SLEEPING_CLKREQ_SHIFT) +#define ACC_NOTSLEEPING_ALP (0u) +#define ACC_NOTSLEEPING_HT (1u) +#define ACC_NOTSLEEPING_ALP_HT_AVAIL (2u) +#define ACC_NOTSLEEPING_HT_AVAIL (3u) + +#elif defined(__ARM_ARCH_7R__) /* CR4 */ + +#define ACC_FORCECLOCKRATIO (1u << 7u) +#define ACC_CLOCKRATIO_SHIFT 8u +#define ACC_CLOCKRATIO_MASK (0xFu << ACC_CLOCKRATIO_SHIFT) +#define ACC_CLOCKMODE_SHIFT 12u +#define ACC_CLOCKMODE_MASK (7u << ACC_CLOCKMODE_SHIFT) +#define ACC_NOTSLEEPCLKREQ0 3u +#define ACC_NOTSLEEPCLKREQ1 18u +#define ACC_FLOPSPROTECT (1u << 20u) + +#define ACC_CLOCKRATIO_1_TO_1 (0u) +#define ACC_CLOCKRATIO_2_TO_1 (4u) + +#endif /* __ARM_ARCH_7A__ */ + +#define ACC_CLOCKMODE_SAME (0) /**< BP and CPU clock are the same */ +#define ACC_CLOCKMODE_ASYNC (1) /**< BP and CPU clock are asynchronous */ +#define ACC_CLOCKMODE_SYNCH (2) /**< BP and CPU clock are synch, ratio 1:1 or 1:2 */ + +/* Request ALP on backplane bit 3 and 18 */ +#define ACC_REQALP ((1<boardflags2 & BFL2_SDR_EN) +#define CHIP_TCMPROTENAB(sih) (si_arm_sflags(sih) & SISF_TCMPROT) + +/* Power Control */ +#define ARM_ENAB_MEM_CLK_GATE_SHIFT 5 + +#define ROM_STBY_TIMER_4378 0xb0 +#define RAM_STBY_TIMER_4378 0x64 + +#define ROM_STBY_TIMER_4387 0x10 +#define RAM_STBY_TIMER_4387 0x100 + +#define RAM_STBY_DEFAULT_WAIT_TIME (3u) +#define ROM_STBY_DEFAULT_WAIT_TIME (4u) +#define DEFAULT_FORCE_STBY_IN_WFI (1u) +#define ARMCR4_DYN_STBY_CTRL_RAM_STBY_WAIT_TIMER_SHIFT (0u) +#define ARMCR4_DYN_STBY_CTRL_RAM_STBY_WAIT_TIMER_MASK (0xF << \ + ARMCR4_DYN_STBY_CTRL_RAM_STBY_WAIT_TIMER_SHIFT) +#define ARMCR4_DYN_STBY_CTRL_ROM_STBY_WAIT_TIMER_SHIFT (8u) +#define ARMCR4_DYN_STBY_CTRL_ROM_STBY_WAIT_TIMER_MASK (0x3F << \ + ARMCR4_DYN_STBY_CTRL_ROM_STBY_WAIT_TIMER_SHIFT) +#define ARMCR4_DYN_STBY_CTRL_FORCE_STBY_IN_WFI_SHIFT (16u) +#define ARMCR4_DYN_STBY_CTRL_FORCE_STBY_IN_WFI_MASK (0x1 << \ + ARMCR4_DYN_STBY_CTRL_FORCE_STBY_IN_WFI_SHIFT) + +/* using CHIPID because no capabilities bit */ +#define ARM_CLKGATING_CAP(sih) ((void)(sih), (BCM4378_CHIP(sih->chip) ||\ + BCM4387_CHIP(sih->chip))) + +#define ARM_CLKGATING_ENAB(sih) (ARM_CLKGATING_CAP(sih) && 1) + +#elif defined(__ARM_ARCH_7A__) + +#if defined(CA7) +/* backplane related stuff */ +#define ARM_CORE_ID ARMCA7_CORE_ID +#define SI_ARM_ROM SI_ARMCA7_ROM /**< ROM backplane/system address */ + +#else +/* backplane related stuff */ +#define ARM_CORE_ID ARMCA9_CORE_ID /* arm coreid */ +#endif /* __ARM_ARCH_7A__ */ +#else /* !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */ +#error Unrecognized ARM Architecture +#endif /* !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */ + +#endif /* DONGLEBUILD */ + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* cortex-m3 */ +typedef volatile struct { + uint32 corecontrol; /* 0x0 */ + uint32 corestatus; /* 0x4 */ + uint32 PAD[1]; + uint32 biststatus; /* 0xc */ + uint32 nmiisrst; /* 0x10 */ + uint32 nmimask; /* 0x14 */ + uint32 isrmask; /* 0x18 */ + uint32 PAD[1]; + uint32 resetlog; /* 0x20 */ + uint32 gpioselect; /* 0x24 */ + uint32 gpioenable; /* 0x28 */ + uint32 PAD[1]; + uint32 bpaddrlo; /* 0x30 */ + uint32 bpaddrhi; /* 0x34 */ + uint32 bpdata; /* 0x38 */ + uint32 bpindaccess; /* 0x3c */ + uint32 ovlidx; /* 0x40 */ + uint32 ovlmatch; /* 0x44 */ + uint32 ovladdr; /* 0x48 */ + uint32 PAD[13]; + uint32 bwalloc; /* 0x80 */ + uint32 PAD[3]; + uint32 cyclecnt; /* 0x90 */ + uint32 inttimer; /* 0x94 */ + uint32 intmask; /* 0x98 */ + uint32 intstatus; /* 0x9c */ + uint32 PAD[80]; + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ +} cm3regs_t; +#define ARM_CM3_REG(regs, reg) (&((cm3regs_t *)regs)->reg) + +/* cortex-R4 */ +typedef volatile struct { + uint32 corecontrol; /* 0x0 */ + uint32 corecapabilities; /* 0x4 */ + uint32 corestatus; /* 0x8 */ + uint32 biststatus; /* 0xc */ + uint32 nmiisrst; /* 0x10 */ + uint32 nmimask; /* 0x14 */ + uint32 isrmask; /* 0x18 */ + uint32 swintreg; /* 0x1C */ + uint32 intstatus; /* 0x20 */ + uint32 intmask; /* 0x24 */ + uint32 cyclecnt; /* 0x28 */ + uint32 inttimer; /* 0x2c */ + uint32 gpioselect; /* 0x30 */ + uint32 gpioenable; /* 0x34 */ + uint32 PAD[2]; + uint32 bankidx; /* 0x40 */ + uint32 bankinfo; /* 0x44 */ + uint32 bankstbyctl; /* 0x48 */ + uint32 bankpda; /* 0x4c */ + uint32 dyn_stby_control; /* 0x50 */ + uint32 PAD[5]; + uint32 tcampatchctrl; /* 0x68 */ + uint32 tcampatchtblbaseaddr; /* 0x6c */ + uint32 tcamcmdreg; /* 0x70 */ + uint32 tcamdatareg; /* 0x74 */ + uint32 tcambankxmaskreg; /* 0x78 */ + uint32 PAD[5]; + uint32 mpucontrol; /* 0x90 */ + uint32 mpucapabilities; /* 0x94 */ + uint32 rom_reloc_addr; /* 0x98 */ + uint32 PAD[1]; + uint32 region_n_regs[16]; /* 0xa0 - 0xdc */ + uint32 PAD[16]; + uint32 initiat_n_masks[16]; /* 0x120 - 0x15c */ + uint32 PAD[32]; + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 hw_war; /* 0x1e4 */ + uint32 powerctl; /* 0x1e8 */ + uint32 powerctl2; /* 0x1ec */ +} cr4regs_t; +#define ARM_CR4_REG(regs, reg) (&((cr4regs_t *)regs)->reg) + +#define SBRESETLOG_CR4 0x4 + +/* cortex-A7 */ +typedef volatile struct { + uint32 corecontrol; /* 0x0 */ + uint32 corecapabilities; /* 0x4 */ + uint32 corestatus; /* 0x8 */ + uint32 tracecontrol; /* 0xc */ + uint32 gpioselect; /* 0x10 */ + uint32 gpioenable; /* 0x14 */ + uint32 PAD[114]; + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 workaround; /* 0x1e4 */ + uint32 powerctl; /* 0x1e8 */ + uint32 powerctl2; /* 0x1ec */ +} ca7regs_t; +#define ARM_CA7_REG(regs, reg) (&((ca7regs_t *)regs)->reg) + +#if defined(__ARM_ARCH_7M__) +#define ARMREG(regs, reg) ARM_CM3_REG(regs, reg) +#endif /* __ARM_ARCH_7M__ */ + +#if defined(__ARM_ARCH_7R__) +#define ARMREG(regs, reg) ARM_CR4_REG(regs, reg) +#endif /* __ARM_ARCH_7R__ */ + +#if defined(__ARM_ARCH_7A__) +#define ARMREG(regs, reg) ARM_CA7_REG(regs, reg) +#endif /* __ARM_ARCH_7A__ */ + +/* MPU is present mask of ca7regs_t->corecapabilities */ +#define CAP_MPU_MASK 2000000u + +#endif /* _LANGUAGE_ASSEMBLY */ + +#endif /* _sbhndarm_h_ */ diff --git a/bcmdhd.101.10.361.x/include/sbhnddma.h b/bcmdhd.101.10.361.x/include/sbhnddma.h new file mode 100755 index 0000000..bf7f3ba --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbhnddma.h @@ -0,0 +1,481 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + +/* DMA structure: + * support two DMA engines: 32 bits address or 64 bit addressing + * basic DMA register set is per channel(transmit or receive) + * a pair of channels is defined for convenience + */ + +/* 32 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 addr; /**< descriptor ring base address (4K aligned) */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 status; /**< current active descriptor, et al */ +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; /**< dma tx channel */ + dma32regs_t rcv; /**< dma rx channel */ +} dma32regp_t; + +typedef volatile struct { /* diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma32diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl; /**< misc control bits & bufcount */ + uint32 addr; /**< data buffer address */ +} dma32dd_t; + +/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */ +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) + +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + +/* transmit channel control */ +#define XC_XE ((uint32)1 << 0) /**< transmit enable */ +#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */ +#define XC_LE ((uint32)1 << 2) /**< loopback enable */ +#define XC_FL ((uint32)1 << 4) /**< flush request */ +#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define XC_MR_SHIFT 6 +#define XC_PD ((uint32)1 << 11) /**< parity check disable */ +#define XC_AE ((uint32)3 << 16) /**< address extension bits */ +#define XC_AE_SHIFT 16 +#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define XC_BL_SHIFT 18 +#define XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define XC_PC_SHIFT 21 +#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define XC_PT_SHIFT 24 + +/** Multiple outstanding reads */ +#define DMA_MR_1 0 +#define DMA_MR_2 1 +#define DMA_MR_4 2 +#define DMA_MR_8 3 +#define DMA_MR_12 4 +#define DMA_MR_16 5 +#define DMA_MR_20 6 +#define DMA_MR_32 7 + +/** DMA Burst Length in bytes */ +#define DMA_BL_16 0 +#define DMA_BL_32 1 +#define DMA_BL_64 2 +#define DMA_BL_128 3 +#define DMA_BL_256 4 +#define DMA_BL_512 5 +#define DMA_BL_1024 6 +#define DMA_BL_INVALID 0xFF + +/** Prefetch control */ +#define DMA_PC_0 0 +#define DMA_PC_4 1 +#define DMA_PC_8 2 +#define DMA_PC_16 3 +#define DMA_PC_32 4 +/* others: reserved */ + +/** Prefetch threshold */ +#define DMA_PT_1 0 +#define DMA_PT_2 1 +#define DMA_PT_4 2 +#define DMA_PT_8 3 + +/** Channel Switch */ +#define DMA_CS_OFF 0 +#define DMA_CS_ON 1 + +/* transmit descriptor table pointer */ +#define XP_LD_MASK 0xfff /**< last valid descriptor */ + +/* transmit channel status */ +#define XS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define XS_XS_MASK 0xf000 /**< transmit state */ +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 /**< disabled */ +#define XS_XS_ACTIVE 0x1000 /**< active */ +#define XS_XS_IDLE 0x2000 /**< idle wait */ +#define XS_XS_STOPPED 0x3000 /**< stopped */ +#define XS_XS_SUSP 0x4000 /**< suspend pending */ +#define XS_XE_MASK 0xf0000 /**< transmit errors */ +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 /**< no error */ +#define XS_XE_DPE 0x10000 /**< descriptor protocol error */ +#define XS_XE_DFU 0x20000 /**< data fifo underrun */ +#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */ +#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */ +#define XS_AD_MASK 0xfff00000 /**< active descriptor */ +#define XS_AD_SHIFT 20 + +/* receive channel control */ +#define RC_RE ((uint32)1 << 0) /**< receive enable */ +#define RC_RO_MASK 0xfe /**< receive frame offset */ +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */ +#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */ +#define RC_OC ((uint32)1 << 10) /**< overflow continue */ +#define RC_PD ((uint32)1 << 11) /**< parity check disable */ +#define RC_AE ((uint32)3 << 16) /**< address extension bits */ +#define RC_AE_SHIFT 16 +#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define RC_BL_SHIFT 18 +#define RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define RC_PC_SHIFT 21 +#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define RC_PT_SHIFT 24 +#define RC_WAITCMP_MASK 0x00001000 +#define RC_WAITCMP_SHIFT 12 +/* receive descriptor table pointer */ +#define RP_LD_MASK 0xfff /**< last valid descriptor */ + +/* receive channel status */ +#define RS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define RS_RS_MASK 0xf000 /**< receive state */ +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 /**< disabled */ +#define RS_RS_ACTIVE 0x1000 /**< active */ +#define RS_RS_IDLE 0x2000 /**< idle wait */ +#define RS_RS_STOPPED 0x3000 /**< reserved */ +#define RS_RE_MASK 0xf0000 /**< receive errors */ +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 /**< no error */ +#define RS_RE_DPE 0x10000 /**< descriptor protocol error */ +#define RS_RE_DFO 0x20000 /**< data fifo overflow */ +#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */ +#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */ +#define RS_AD_MASK 0xfff00000 /**< active descriptor */ +#define RS_AD_SHIFT 20 + +/* fifoaddr */ +#define FA_OFF_MASK 0xffff /**< offset */ +#define FA_SEL_MASK 0xf0000 /**< select */ +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define FA_SEL_RDD 0x40000 /**< receive dma data */ +#define FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags */ +#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */ +#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */ +#define CTRL_AE_SHIFT 16 +#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */ +#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */ +#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define CTRL_CORE_MASK 0x0ff00000 + +/* 64 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */ + uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */ + uint32 status0; /**< current descriptor, xmt state */ + uint32 status1; /**< active descriptor, xmt error */ +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; /**< dma64 tx channel */ + dma64regs_t rx; /**< dma64 rx channel */ +} dma64regp_t; + +typedef volatile struct { /**< diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma64diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl1; /**< misc control bits */ + uint32 ctrl2; /**< buffer count and address extension */ + uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */ + uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */ +} dma64dd_t; + +/** + * Pool implementation: each pool is 64KB max. Align it to maximize ability to grow + */ +#define D64POOLALIGN_BITS 15u +#define D64POOLALIGN_BITS_MAX 16u +/** + * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. + */ +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) + +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + +/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ +#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) + +/** + * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross + * 64K boundary + */ +#define D64RINGBOUNDARY_LARGE (1 << 16) + +/* + * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. + * When this field contains the value N, the burst length is 2**(N + 4) bytes. + */ +#define D64_DEF_USBBURSTLEN 2 +#define D64_DEF_SDIOBURSTLEN 1 + +#ifndef D64_USBBURSTLEN +#define D64_USBBURSTLEN DMA_BL_64 +#endif +#ifndef D64_SDIOBURSTLEN +#define D64_SDIOBURSTLEN DMA_BL_32 +#endif + +/* transmit channel control */ +#define D64_XC_XE 0x00000001 /**< transmit enable */ +#define D64_XC_SE 0x00000002 /**< transmit suspend request */ +#define D64_XC_LE 0x00000004 /**< loopback enable */ +#define D64_XC_FL 0x00000010 /**< flush request */ +#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define D64_XC_MR_SHIFT 6 +#define D64_XC_CS_SHIFT 9 /**< channel switch enable */ +#define D64_XC_CS_MASK 0x00000200 /**< channel switch enable */ +#define D64_XC_PD 0x00000800 /**< parity check disable */ +#define D64_XC_AE 0x00030000 /**< address extension bits */ +#define D64_XC_AE_SHIFT 16 +#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_XC_BL_SHIFT 18 +#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_XC_PC_SHIFT 21 +#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_XC_PT_SHIFT 24 +#define D64_XC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */ +#define D64_XC_CO_SHIFT 26 + +/* transmit descriptor table pointer */ +#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* transmit channel status */ +#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */ +#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */ +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */ +#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */ +#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */ +#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */ +#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */ +#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */ +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 /**< no error */ +#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */ +#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */ +#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */ +#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_XS1_XE_COREE 0x50000000 /**< core error */ + +/* receive channel control */ +#define D64_RC_RE 0x00000001 /**< receive enable */ +#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */ +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */ +#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */ +#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */ +#define D64_RC_OC 0x00000400 /**< overflow continue */ +#define D64_RC_PD 0x00000800 /**< parity check disable */ +#define D64_RC_WAITCMP_MASK 0x00001000 +#define D64_RC_WAITCMP_SHIFT 12 +#define D64_RC_SA 0x00002000 /**< select active */ +#define D64_RC_GE 0x00004000 /**< Glom enable */ +#define D64_RC_AE 0x00030000 /**< address extension bits */ +#define D64_RC_AE_SHIFT 16 +#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_RC_BL_SHIFT 18 +#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_RC_PC_SHIFT 21 +#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_RC_PT_SHIFT 24 +#define D64_RC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */ +#define D64_RC_CO_SHIFT 26 +#define D64_RC_ROEXT_MASK 0x08000000 /**< receive frame offset extension bit */ +#define D64_RC_ROEXT_SHIFT 27 +#define D64_RC_MOW_SHIFT (28u) /**< multiple outstanding write */ +#define D64_RC_MOW_MASK ((0x3u) << D64_RC_MOW_SHIFT) + +/* receive control values */ +/* RcvCtrl.MultipleOutstandingWrites(MOW) valid values(N) listed below. + * (N + 1) out standing write(s) supported + */ +#define D64_RC_MOW_1 (0u) /**< 1 outstanding write */ +#define D64_RC_MOW_2 (1u) /**< 2 outstanding writes */ +#define D64_RC_MOW_3 (2u) /**< 3 outstanding writes */ +#define D64_RC_MOW_4 (3u) /**< 4 outstanding writes */ + +/* flags for dma controller */ +#define DMA_CTRL_PEN (1u << 0u) /**< partity enable */ +#define DMA_CTRL_ROC (1u << 1u) /**< rx overflow continue */ +#define DMA_CTRL_RXMULTI (1u << 2u) /**< allow rx scatter to multiple descriptors */ +#define DMA_CTRL_UNFRAMED (1u << 3u) /**< Unframed Rx/Tx data */ +#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1u << 4u) /**< USB core REV9's SETUP dma channel's + * buffer can not crossed 4K boundary PR80468 + */ +#define DMA_CTRL_DMA_AVOIDANCE_WAR (1u << 5u) /**< DMA avoidance WAR for 4331 */ +#define DMA_CTRL_RXSINGLE (1u << 6u) /**< always single buffer */ +#define DMA_CTRL_SDIO_RXGLOM (1u << 7u) /**< DMA Rx glome is enabled */ +#define DMA_CTRL_DESC_ONLY_FLAG (1u << 8u) /**< For DMA which posts only descriptors, + * no packets + */ +#define DMA_CTRL_DESC_CD_WAR (1u << 9u) /**< WAR for descriptor only DMA's CD not being + * updated correctly by HW in CT mode. + */ +#define DMA_CTRL_CS (1u << 10u) /* channel switch enable */ +#define DMA_CTRL_ROEXT (1u << 11u) /* receive frame offset extension support */ +#define DMA_CTRL_RX_ALIGN_8BYTE (1u << 12u) /* RXDMA address 8-byte aligned */ +#define DMA_CTRL_SHARED_POOL (1u << 15u) /** shared descriptor pool */ +#define DMA_CTRL_COREUNIT_SHIFT (17u) /* Core unit shift */ +#define DMA_CTRL_COREUNIT_MASK (0x3u << 17u) /* Core unit mask */ + +#define DMA_CTRL_SET_COREUNIT(di, coreunit) \ + ((di)->hnddma.dmactrlflags |= \ + (((coreunit) << DMA_CTRL_COREUNIT_SHIFT) & DMA_CTRL_COREUNIT_MASK)) + +#define DMA_CTRL_GET_COREUNIT(di) \ + (((di)->hnddma.dmactrlflags & DMA_CTRL_COREUNIT_MASK) >> DMA_CTRL_COREUNIT_SHIFT) + +/* receive descriptor table pointer */ +#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* receive channel status */ +#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */ +#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */ +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */ +#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */ +#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */ +#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ +#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_RS1_AD_MASK (di->d64_rs1_ad_mask) /* active descriptor pointer */ +#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 /**< no error */ +#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ +#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */ +#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */ +#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_RS1_RE_COREE 0x50000000 /**< core error */ + +/* fifoaddr */ +#define D64_FA_OFF_MASK 0xffff /**< offset */ +#define D64_FA_SEL_MASK 0xf0000 /**< select */ +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */ +#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags 1 */ +#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */ + +/**< bzero operation for receive channels or a compare-to-zero operation for transmit engines */ +#define D64_CTRL1_BIT_BZEROBCMP (15u) +/* WAR for JIRA CRWLDMA-245 */ +#define D64_DMA_COREFLAGS_WAR_BIT (25u) + +#define D64_CTRL1_COHERENT ((uint32)1 << 17) /**< cache coherent per transaction */ +#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */ +#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */ +#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */ +#define D64_CTRL1_SOFPTR 0x0000FFFFu +#define D64_CTRL1_NUMD_MASK 0x00F00000u +#define D64_CTRL1_NUMD_SHIFT 20u + +/* descriptor control flags 2 */ +#define D64_CTRL2_MAX_LEN 0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */ +#define D64_CTRL2_BC_MASK 0x0000ffff /**< mask for buffer byte count */ +#define D64_CTRL2_AE 0x00030000 /**< address extension bits */ +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define D64_CTRL_CORE_MASK 0x0ff00000 + +#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */ +#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */ +#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */ +#define D64_RX_FRM_STS_DSCRCNT_SHIFT 24 /* Shift for no .of dma descriptor field */ +#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */ + +#define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \ +(((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len)) + +/** receive frame status */ +typedef volatile struct { + uint16 len; + uint16 flags; +} dma_rxh_t; + +#endif /* _sbhnddma_h_ */ diff --git a/bcmdhd.101.10.361.x/include/sbhndpio.h b/bcmdhd.101.10.361.x/include/sbhndpio.h new file mode 100755 index 0000000..f4038f3 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbhndpio.h @@ -0,0 +1,60 @@ +/* + * Generic Broadcom Home Networking Division (HND) PIO engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _sbhndpio_h_ +#define _sbhndpio_h_ + +/* PIO structure, + * support two PIO format: 2 bytes access and 4 bytes access + * basic FIFO register set is per channel(transmit or receive) + * a pair of channels is defined for convenience + */ + +/* 2byte-wide pio register set per channel(xmt or rcv) */ +typedef volatile struct { + uint16 fifocontrol; + uint16 fifodata; + uint16 fifofree; /* only valid in xmt channel, not in rcv channel */ + uint16 PAD; +} pio2regs_t; + +/* a pair of pio channels(tx and rx) */ +typedef volatile struct { + pio2regs_t tx; + pio2regs_t rx; +} pio2regp_t; + +/* 4byte-wide pio register set per channel(xmt or rcv) */ +typedef volatile struct { + uint32 fifocontrol; + uint32 fifodata; +} pio4regs_t; + +/* a pair of pio channels(tx and rx) */ +typedef volatile struct { + pio4regs_t tx; + pio4regs_t rx; +} pio4regp_t; + +#endif /* _sbhndpio_h_ */ diff --git a/bcmdhd.101.10.361.x/include/sbpcmcia.h b/bcmdhd.101.10.361.x/include/sbpcmcia.h new file mode 100755 index 0000000..77f65f4 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbpcmcia.h @@ -0,0 +1,415 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + +/* All the addresses that are offsets in attribute space are divided + * by two to account for the fact that odd bytes are invalid in + * attribute space and our read/write routines make the space appear + * as if they didn't exist. Still we want to show the original numbers + * as documented in the hnd_pcmcia core manual. + */ + +/* PCMCIA Function Configuration Registers */ +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + +/* Standard PCMCIA FCR registers */ + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + +/* Memory base in the function fcr's */ +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + +/* PCMCIA base plus Srom access in fcr0: */ +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */ +#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */ + +/* Values for srom_cs: */ +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + +/* Fields in srom_info: */ +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + +#define SROM16K_BANK_SEL_MASK (3 << 11) +#define SROM16K_BANK_SHFT_MASK 11 +#define SROM16K_ADDR_SEL_MASK ((1 << SROM16K_BANK_SHFT_MASK) - 1) +#define SROM_PRSNT_MASK 0x1 +#define SROM_SUPPORT_SHIFT_MASK 30 +#define SROM_SUPPORTED (0x1 << SROM_SUPPORT_SHIFT_MASK) +#define SROM_SIZE_MASK 0x00000006 +#define SROM_SIZE_2K 2 +#define SROM_SIZE_512 1 +#define SROM_SIZE_128 0 +#define SROM_SIZE_SHFT_MASK 1 + +/* CIS stuff */ + +/* The CIS stops where the FCRs start */ +#define CIS_SIZE PCMCIA_FCR +#define CIS_SIZE_12K 1154 /* Maximum h/w + s/w sub region size for 12k OTP */ + +/* CIS tuple length field max */ +#define CIS_TUPLE_LEN_MAX 0xff + +/* Standard tuples we know about */ + +#define CISTPL_NULL 0x00 +#define CISTPL_END 0xff /* End of the CIS tuple chain */ + +#define CISTPL_VERS_1 0x15 /* CIS ver, manf, dev & ver strings */ +#define CISTPL_MANFID 0x20 /* Manufacturer and device id */ +#define CISTPL_FUNCID 0x21 /* Function identification */ +#define CISTPL_FUNCE 0x22 /* Function extensions */ +#define CISTPL_CFTABLE 0x1b /* Config table entry */ + +/* Function identifier provides context for the function extentions tuple */ +#define CISTPL_FID_SDIO 0x0c /* Extensions defined by SDIO spec */ + +/* Function extensions for LANs (assumed for extensions other than SDIO) */ +#define LAN_TECH 1 /* Technology type */ +#define LAN_SPEED 2 /* Raw bit rate */ +#define LAN_MEDIA 3 /* Transmission media */ +#define LAN_NID 4 /* Node identification (aka MAC addr) */ +#define LAN_CONN 5 /* Connector standard */ + +/* CFTable */ +#define CFTABLE_REGWIN_2K 0x08 /* 2k reg windows size */ +#define CFTABLE_REGWIN_4K 0x10 /* 4k reg windows size */ +#define CFTABLE_REGWIN_8K 0x20 /* 8k reg windows size */ + +/* Vendor unique tuples are 0x80-0x8f. Within Broadcom we'll + * take one for HNBU, and use "extensions" (a la FUNCE) within it. + */ + +#define CISTPL_BRCM_HNBU 0x80 + +/* Subtypes of BRCM_HNBU: */ + +#define HNBU_SROMREV 0x00 /* A byte with sromrev, 1 if not present */ +#define HNBU_CHIPID 0x01 /* Two 16bit values: PCI vendor & device id */ + +#define HNBU_BOARDREV 0x02 /* One byte board revision */ + +#define HNBU_PAPARMS 0x03 /* PA parameters: 8 (sromrev == 1) + * or 9 (sromrev > 1) bytes + */ +#define HNBU_OEM 0x04 /* Eight bytes OEM data (sromrev == 1) */ +#define HNBU_CC 0x05 /* Default country code (sromrev == 1) */ +#define HNBU_AA 0x06 /* Antennas available */ +#define HNBU_AG 0x07 /* Antenna gain */ +#define HNBU_BOARDFLAGS 0x08 /* board flags (2 or 4 bytes) */ +#define HNBU_UNUSED 0x09 /* UNUSED (was LEDs) */ +#define HNBU_CCODE 0x0a /* Country code (2 bytes ascii + 1 byte cctl) + * in rev 2 + */ +#define HNBU_CCKPO 0x0b /* 2 byte cck power offsets in rev 3 */ +#define HNBU_OFDMPO 0x0c /* 4 byte 11g ofdm power offsets in rev 3 */ +#define HNBU_GPIOTIMER 0x0d /* 2 bytes with on/off values in rev 3 */ +#define HNBU_PAPARMS5G 0x0e /* 5G PA params */ +#define HNBU_ANT5G 0x0f /* 4328 5G antennas available/gain */ +#define HNBU_RDLID 0x10 /* 2 byte USB remote downloader (RDL) product Id */ +#define HNBU_RSSISMBXA2G 0x11 /* 4328 2G RSSI mid pt sel & board switch arch, + * 2 bytes, rev 3. + */ +#define HNBU_RSSISMBXA5G 0x12 /* 4328 5G RSSI mid pt sel & board switch arch, + * 2 bytes, rev 3. + */ +#define HNBU_XTALFREQ 0x13 /* 4 byte Crystal frequency in kilohertz */ +#define HNBU_TRI2G 0x14 /* 4328 2G TR isolation, 1 byte */ +#define HNBU_TRI5G 0x15 /* 4328 5G TR isolation, 3 bytes */ +#define HNBU_RXPO2G 0x16 /* 4328 2G RX power offset, 1 byte */ +#define HNBU_RXPO5G 0x17 /* 4328 5G RX power offset, 1 byte */ +#define HNBU_BOARDNUM 0x18 /* board serial number, independent of mac addr */ +#define HNBU_MACADDR 0x19 /* mac addr override for the standard CIS LAN_NID */ +#define HNBU_RDLSN 0x1a /* 2 bytes; serial # advertised in USB descriptor */ + +#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */ + +#define HNBU_UNUSED2 0x1c /* was LEDs duty cycle */ + +#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */ + +#define HNBU_PAPARMS_SSLPNPHY 0x1e /* SSLPNPHY PA params */ +#define HNBU_RSSISMBXA2G_SSLPNPHY 0x1f /* SSLPNPHY RSSI mid pt sel & board switch arch */ +#define HNBU_RDLRNDIS 0x20 /* 1 byte; 1 = RDL advertises RNDIS config */ +#define HNBU_CHAINSWITCH 0x21 /* 2 byte; txchain, rxchain */ +#define HNBU_REGREV 0x22 /* 1 byte; */ +#define HNBU_FEM 0x23 /* 2 or 4 byte: 11n frontend specification */ +#define HNBU_PAPARMS_C0 0x24 /* 8 or 30 bytes: 11n pa paramater for chain 0 */ +#define HNBU_PAPARMS_C1 0x25 /* 8 or 30 bytes: 11n pa paramater for chain 1 */ +#define HNBU_PAPARMS_C2 0x26 /* 8 or 30 bytes: 11n pa paramater for chain 2 */ +#define HNBU_PAPARMS_C3 0x27 /* 8 or 30 bytes: 11n pa paramater for chain 3 */ +#define HNBU_PO_CCKOFDM 0x28 /* 6 or 18 bytes: cck2g/ofdm2g/ofdm5g power offset */ +#define HNBU_PO_MCS2G 0x29 /* 8 bytes: mcs2g power offset */ +#define HNBU_PO_MCS5GM 0x2a /* 8 bytes: mcs5g mid band power offset */ +#define HNBU_PO_MCS5GLH 0x2b /* 16 bytes: mcs5g low-high band power offset */ +#define HNBU_PO_CDD 0x2c /* 2 bytes: cdd2g/5g power offset */ +#define HNBU_PO_STBC 0x2d /* 2 bytes: stbc2g/5g power offset */ +#define HNBU_PO_40M 0x2e /* 2 bytes: 40Mhz channel 2g/5g power offset */ +#define HNBU_PO_40MDUP 0x2f /* 2 bytes: 40Mhz channel dup 2g/5g power offset */ + +#define HNBU_RDLRWU 0x30 /* 1 byte; 1 = RDL advertises Remote Wake-up */ +#define HNBU_WPS 0x31 /* 1 byte; GPIO pin for WPS button */ +#define HNBU_USBFS 0x32 /* 1 byte; 1 = USB advertises FS mode only */ +#define HNBU_BRMIN 0x33 /* 4 byte bootloader min resource mask */ +#define HNBU_BRMAX 0x34 /* 4 byte bootloader max resource mask */ +#define HNBU_PATCH 0x35 /* bootloader patch addr(2b) & data(4b) pair */ +#define HNBU_CCKFILTTYPE 0x36 /* CCK digital filter selection options */ +#define HNBU_OFDMPO5G 0x37 /* 4 * 3 = 12 byte 11a ofdm power offsets in rev 3 */ +#define HNBU_ELNA2G 0x38 +#define HNBU_ELNA5G 0x39 +#define HNBU_TEMPTHRESH 0x3A /* 2 bytes + * byte1 tempthresh + * byte2 period(msb 4 bits) | hysterisis(lsb 4 bits) + */ +#define HNBU_UUID 0x3B /* 16 Bytes Hex */ + +#define HNBU_USBEPNUM 0x40 /* USB endpoint numbers */ + +/* POWER PER RATE for SROM V9 */ +#define HNBU_CCKBW202GPO 0x41 /* 2 bytes each + * CCK Power offsets for 20 MHz rates (11, 5.5, 2, 1Mbps) + * cckbw202gpo cckbw20ul2gpo + */ + +#define HNBU_LEGOFDMBW202GPO 0x42 /* 4 bytes each + * OFDM power offsets for 20 MHz Legacy rates + * (54, 48, 36, 24, 18, 12, 9, 6 Mbps) + * legofdmbw202gpo legofdmbw20ul2gpo + */ + +#define HNBU_LEGOFDMBW205GPO 0x43 /* 4 bytes each + * 5G band: OFDM power offsets for 20 MHz Legacy rates + * (54, 48, 36, 24, 18, 12, 9, 6 Mbps) + * low subband : legofdmbw205glpo legofdmbw20ul2glpo + * mid subband :legofdmbw205gmpo legofdmbw20ul2gmpo + * high subband :legofdmbw205ghpo legofdmbw20ul2ghpo + */ + +#define HNBU_MCS2GPO 0x44 /* 4 bytes each + * mcs 0-7 power-offset. LSB nibble: m0, MSB nibble: m7 + * mcsbw202gpo mcsbw20ul2gpo mcsbw402gpo + */ +#define HNBU_MCS5GLPO 0x45 /* 4 bytes each + * 5G low subband mcs 0-7 power-offset. + * LSB nibble: m0, MSB nibble: m7 + * mcsbw205glpo mcsbw20ul5glpo mcsbw405glpo + */ +#define HNBU_MCS5GMPO 0x46 /* 4 bytes each + * 5G mid subband mcs 0-7 power-offset. + * LSB nibble: m0, MSB nibble: m7 + * mcsbw205gmpo mcsbw20ul5gmpo mcsbw405gmpo + */ +#define HNBU_MCS5GHPO 0x47 /* 4 bytes each + * 5G high subband mcs 0-7 power-offset. + * LSB nibble: m0, MSB nibble: m7 + * mcsbw205ghpo mcsbw20ul5ghpo mcsbw405ghpo + */ +#define HNBU_MCS32PO 0x48 /* 2 bytes total + * mcs-32 power offset for each band/subband. + * LSB nibble: 2G band, MSB nibble: + * mcs322ghpo, mcs325gmpo, mcs325glpo, mcs322gpo + */ +#define HNBU_LEG40DUPPO 0x49 /* 2 bytes total + * Additional power offset for Legacy Dup40 transmissions. + * Applied in addition to legofdmbw20ulXpo, X=2g, 5gl, 5gm, or 5gh. + * LSB nibble: 2G band, MSB nibble: 5G band high subband. + * leg40dup5ghpo, leg40dup5gmpo, leg40dup5glpo, leg40dup2gpo + */ + +#define HNBU_PMUREGS 0x4a /* Variable length (5 bytes for each register) + * The setting of the ChipCtrl, PLL, RegulatorCtrl, Up/Down Timer and + * ResourceDependency Table registers. + */ + +#define HNBU_PATCH2 0x4b /* bootloader TCAM patch addr(4b) & data(4b) pair . + * This is required for socram rev 15 onwards. + */ + +#define HNBU_USBRDY 0x4c /* Variable length (upto 5 bytes) + * This is to indicate the USB/HSIC host controller + * that the device is ready for enumeration. + */ + +#define HNBU_USBREGS 0x4d /* Variable length + * The setting of the devcontrol, HSICPhyCtrl1 and HSICPhyCtrl2 + * registers during the USB initialization. + */ + +#define HNBU_BLDR_TIMEOUT 0x4e /* 2 bytes used for HSIC bootloader to reset chip + * on connect timeout. + * The Delay after USBConnect for timeout till dongle receives + * get_descriptor request. + */ +#define HNBU_USBFLAGS 0x4f +#define HNBU_PATCH_AUTOINC 0x50 +#define HNBU_MDIO_REGLIST 0x51 +#define HNBU_MDIOEX_REGLIST 0x52 +/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ +#define HNBU_UMANFID 0x53 +#define HNBU_PUBKEY 0x54 /* 128 byte; publick key to validate downloaded FW */ +#define HNBU_WOWLGPIO 0x55 /* 1 byte bit 7 initial polarity, bit 6..0 gpio pin */ +#define HNBU_MUXENAB 0x56 /* 1 byte to enable mux options */ +#define HNBU_GCI_CCR 0x57 /* GCI Chip control register */ + +#define HNBU_FEM_CFG 0x58 /* FEM config */ +#define HNBU_ACPA_C0 0x59 /* ACPHY PA parameters: chain 0 */ +#define HNBU_ACPA_C1 0x5a /* ACPHY PA parameters: chain 1 */ +#define HNBU_ACPA_C2 0x5b /* ACPHY PA parameters: chain 2 */ +#define HNBU_MEAS_PWR 0x5c +#define HNBU_PDOFF 0x5d +#define HNBU_ACPPR_2GPO 0x5e /* ACPHY Power-per-rate 2gpo */ +#define HNBU_ACPPR_5GPO 0x5f /* ACPHY Power-per-rate 5gpo */ +#define HNBU_ACPPR_SBPO 0x60 /* ACPHY Power-per-rate sbpo */ +#define HNBU_NOISELVL 0x61 +#define HNBU_RXGAIN_ERR 0x62 +#define HNBU_AGBGA 0x63 +#define HNBU_USBDESC_COMPOSITE 0x64 /* USB WLAN/BT composite descriptor */ +#define HNBU_PATCH_AUTOINC8 0x65 /* Auto increment patch entry for 8 byte patching */ +#define HNBU_PATCH8 0x66 /* Patch entry for 8 byte patching */ +#define HNBU_ACRXGAINS_C0 0x67 /* ACPHY rxgains: chain 0 */ +#define HNBU_ACRXGAINS_C1 0x68 /* ACPHY rxgains: chain 1 */ +#define HNBU_ACRXGAINS_C2 0x69 /* ACPHY rxgains: chain 2 */ +#define HNBU_TXDUTY 0x6a /* Tx duty cycle for ACPHY 5g 40/80 Mhz */ +#define HNBU_USBUTMI_CTL 0x6b /* 2 byte USB UTMI/LDO Control */ +#define HNBU_PDOFF_2G 0x6c +#define HNBU_USBSSPHY_UTMI_CTL0 0x6d /* 4 byte USB SSPHY UTMI Control */ +#define HNBU_USBSSPHY_UTMI_CTL1 0x6e /* 4 byte USB SSPHY UTMI Control */ +#define HNBU_USBSSPHY_UTMI_CTL2 0x6f /* 4 byte USB SSPHY UTMI Control */ +#define HNBU_USBSSPHY_SLEEP0 0x70 /* 2 byte USB SSPHY sleep */ +#define HNBU_USBSSPHY_SLEEP1 0x71 /* 2 byte USB SSPHY sleep */ +#define HNBU_USBSSPHY_SLEEP2 0x72 /* 2 byte USB SSPHY sleep */ +#define HNBU_USBSSPHY_SLEEP3 0x73 /* 2 byte USB SSPHY sleep */ +#define HNBU_USBSSPHY_MDIO 0x74 /* USB SSPHY INIT regs setting */ +#define HNBU_USB30PHY_NOSS 0x75 /* USB30 NO Super Speed */ +#define HNBU_USB30PHY_U1U2 0x76 /* USB30 PHY U1U2 Enable */ +#define HNBU_USB30PHY_REGS 0x77 /* USB30 PHY REGs update */ +#define HNBU_GPIO_PULL_DOWN 0x78 /* 4 byte GPIO pull down mask */ + +#define HNBU_SROM3SWRGN 0x80 /* 78 bytes; srom rev 3 s/w region without crc8 + * plus extra info appended. + */ +#define HNBU_RESERVED 0x81 +#define HNBU_CUSTOM1 0x82 /* 4 byte; For non-BRCM post-mfg additions */ +#define HNBU_CUSTOM2 0x83 /* Reserved; For non-BRCM post-mfg additions */ +#define HNBU_ACPAPARAM 0x84 /* ACPHY PAPARAM */ +#define HNBU_ACPA_CCK_C0 0x86 /* ACPHY PA trimming parameters: CCK */ +#define HNBU_ACPA_40 0x87 /* ACPHY PA trimming parameters: 40 */ +#define HNBU_ACPA_80 0x88 /* ACPHY PA trimming parameters: 80 */ +#define HNBU_ACPA_4080 0x89 /* ACPHY PA trimming parameters: 40/80 */ +#define HNBU_SUBBAND5GVER 0x8a /* subband5gver */ +#define HNBU_PAPARAMBWVER 0x8b /* paparambwver */ + +#define HNBU_MCS5Gx1PO 0x8c +#define HNBU_ACPPR_SB8080_PO 0x8d +#define HNBU_TXBFRPCALS 0x8f /* phy txbf rpcalvars */ +#define HNBU_MACADDR2 0x90 /* (optional) 2nd mac-addr for RSDB chips */ + +#define HNBU_ACPA_4X4C0 0x91 +#define HNBU_ACPA_4X4C1 0x92 +#define HNBU_ACPA_4X4C2 0x93 +#define HNBU_ACPA_4X4C3 0x94 +#define HNBU_ACPA_BW20_4X4C0 0x95 +#define HNBU_ACPA_BW40_4X4C0 0x96 +#define HNBU_ACPA_BW80_4X4C0 0x97 +#define HNBU_ACPA_BW20_4X4C1 0x98 +#define HNBU_ACPA_BW40_4X4C1 0x99 +#define HNBU_ACPA_BW80_4X4C1 0x9a +#define HNBU_ACPA_BW20_4X4C2 0x9b +#define HNBU_ACPA_BW40_4X4C2 0x9c +#define HNBU_ACPA_BW80_4X4C2 0x9d +#define HNBU_ACPA_BW20_4X4C3 0x9e +#define HNBU_ACPA_BW40_4X4C3 0x9f +#define HNBU_ACPA_BW80_4X4C3 0xa0 +#define HNBU_ACPA_CCK_C1 0xa1 /* ACPHY PA trimming parameters: CCK */ + +#define HNBU_GAIN_CAL_TEMP 0xa2 /* RSSI Cal temperature parameter */ +#define HNBU_RSSI_DELTA_2G_B0 0xa3 /* RSSI Cal parameter for 2G channel group 0 */ +#define HNBU_RSSI_DELTA_2G_B1 0xa4 /* RSSI Cal parameter for 2G channel group 1 */ +#define HNBU_RSSI_DELTA_2G_B2 0xa5 /* RSSI Cal parameter for 2G channel group 2 */ +#define HNBU_RSSI_DELTA_2G_B3 0xa6 /* RSSI Cal parameter for 2G channel group 3 */ +#define HNBU_RSSI_DELTA_2G_B4 0xa7 /* RSSI Cal parameter for 2G channel group 4 */ +#define HNBU_RSSI_CAL_FREQ_GRP_2G 0xa8 /* RSSI Cal parameter for channel group def. */ +#define HNBU_RSSI_DELTA_5GL 0xa9 /* RSSI Cal parameter for 5G low channel */ +#define HNBU_RSSI_DELTA_5GML 0xaa /* RSSI Cal parameter for 5G mid lower channel */ +#define HNBU_RSSI_DELTA_5GMU 0xab /* RSSI Cal parameter for 5G mid upper channel */ +#define HNBU_RSSI_DELTA_5GH 0xac /* RSSI Cal parameter for 5G high channel */ + +#define HNBU_ACPA_6G_C0 0xad /* paparams for 6G Core0 */ +#define HNBU_ACPA_6G_C1 0xae /* paparams for 6G Core1 */ +#define HNBU_ACPA_6G_C2 0xaf /* paparams for 6G Core2 */ + +/* sbtmstatelow */ +#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ +#define SBTML_INT_EN 0x20000 /* enable sb interrupt */ + +/* sbtmstatehigh */ +#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ +#endif /* _SBPCMCIA_H */ diff --git a/bcmdhd.101.10.361.x/include/sbsdio.h b/bcmdhd.101.10.361.x/include/sbsdio.h new file mode 100755 index 0000000..0a2c227 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbsdio.h @@ -0,0 +1,185 @@ +/* + * SDIO device core hardware definitions. + * sdio is a portion of the pcmcia core in core rev 3 - rev 8 + * + * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBSDIO_H +#define _SBSDIO_H + +#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */ + +/* function 1 miscellaneous registers */ +#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */ +#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */ +#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */ +#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */ +#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */ +#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */ + +/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */ +#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */ + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ + +/* Sdio Core Rev 12 */ +#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1 +#define SBSDIO_FUNC1_SLEEPCSR 0x1001F +#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1 + +/* SBSDIO_SPROM_CS */ +#define SBSDIO_SPROM_IDLE 0 +#define SBSDIO_SPROM_WRITE 1 +#define SBSDIO_SPROM_READ 2 +#define SBSDIO_SPROM_WEN 4 +#define SBSDIO_SPROM_WDS 7 +#define SBSDIO_SPROM_DONE 8 + +/* SBSDIO_SPROM_INFO */ +#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */ +#define SROM_BLANK 0x04 /* depreciated in corerev 6 */ +#define SROM_OTP 0x80 /* OTP present */ + +/* SBSDIO_WATERMARK */ +#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device + * to wait before sending data to host + */ + +/* SBSDIO_MESBUSYCTRL */ +/* When RX FIFO has less entries than this & MBE is set + * => busy signal is asserted between data blocks. +*/ +#define SBSDIO_MESBUSYCTRL_MASK 0x7f +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 /* Enable busy capability for MES access */ + +/* SBSDIO_DEVICE_CTL */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when + * receiving CMD53 + */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is + * synchronous to the sdio clock + */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host + * except the chipActive (rev 8) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put + * external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10 /* Enable function 2 tx for each block */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Enable F2 Watermark */ +#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 0x20 /* Isolate sdio clk and cmd (non-data) */ + +/* SBSDIO_FUNC1_CHIPCLKCSR */ +#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */ +#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */ +#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */ +#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */ +/* In rev8, actual avail bits followed original docs */ +#define SBSDIO_Rev8_HT_AVAIL 0x40 +#define SBSDIO_Rev8_ALP_AVAIL 0x80 +#define SBSDIO_CSR_MASK 0x1F + +/* WAR for PR 40695: determine HT/ALP regardless of actual bit order. Need to use + * before we know corerev. (Can drop if all supported revs have same bit order.) + */ +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \ + (alponly ? 1 : SBSDIO_HTAV(regval))) + +/* SBSDIO_FUNC1_SDIOPULLUP */ +#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */ +#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */ +#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */ +#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */ +#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */ + +/* function 1 OCP space */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */ + +/* some duplication with sbsdpcmdev.h here */ +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */ + +/* direct(mapped) cis space */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */ +#ifdef BCMSPI +#define SBSDIO_CIS_SIZE_LIMIT 0x100 /* maximum bytes in one spi CIS */ +#else +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#endif /* !BCMSPI */ +#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */ + +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */ + +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple, + * link bytes + */ + +/* indirect cis access (in sprom) */ +#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from + * 8th byte + */ + +#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one + * data comamnd + */ + +#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */ + +#endif /* _SBSDIO_H */ diff --git a/bcmdhd.101.10.361.x/include/sbsdpcmdev.h b/bcmdhd.101.10.361.x/include/sbsdpcmdev.h new file mode 100755 index 0000000..ced0aff --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbsdpcmdev.h @@ -0,0 +1,307 @@ +/* + * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific + * device core support + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _sbsdpcmdev_h_ +#define _sbsdpcmdev_h_ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +typedef volatile struct { + dma64regs_t xmt; /* dma tx */ + uint32 PAD[2]; + dma64regs_t rcv; /* dma rx */ + uint32 PAD[2]; +} dma64p_t; + +/* dma64 sdiod corerev >= 1 */ +typedef volatile struct { + dma64p_t dma64regs[2]; + dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */ + uint32 PAD[92]; +} sdiodma64_t; + +/* dma32 sdiod corerev == 0 */ +typedef volatile struct { + dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */ + uint32 PAD[108]; +} sdiodma32_t; + +/* dma32 regs for pcmcia core */ +typedef volatile struct { + dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */ + uint32 PAD[116]; +} pcmdma32_t; + +/* core registers */ +typedef volatile struct { + uint32 corecontrol; /* CoreControl, 0x000, rev8 */ + uint32 corestatus; /* CoreStatus, 0x004, rev8 */ + uint32 PAD[1]; + uint32 biststatus; /* BistStatus, 0x00c, rev8 */ + + /* PCMCIA access */ + uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */ + uint16 PAD[1]; + uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */ + uint16 PAD[1]; + uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */ + uint16 PAD[1]; + uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */ + uint16 PAD[1]; + + /* interrupt */ + uint32 intstatus; /* IntStatus, 0x020, rev8 */ + uint32 hostintmask; /* IntHostMask, 0x024, rev8 */ + uint32 intmask; /* IntSbMask, 0x028, rev8 */ + uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */ + uint32 sbintmask; /* SBIntMask, 0x030, rev8 */ + uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */ + uint32 PAD[2]; + uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */ + uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */ + uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */ + uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */ + uint32 PAD[1]; + uint32 MiscHostAccessIntEn; + uint32 PAD[1]; + + /* PCMCIA frame control */ + uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */ + uint8 PAD[3]; + uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */ + uint8 PAD[155]; + + /* interrupt batching control */ + uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */ + uint32 PAD[3]; + + /* counters */ + uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */ + uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */ + uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */ + uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */ + uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */ + uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */ + uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ + uint32 PAD[40]; + uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ + uint32 PAD[5]; + + /* DMA engines */ + volatile union { + pcmdma32_t pcm32; + sdiodma32_t sdiod32; + sdiodma64_t sdiod64; + } dma; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */ + uint16 PAD[55]; + + /* PCMCIA backplane access */ + uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */ + uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */ + uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */ + uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */ + uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */ + uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */ + uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */ + uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */ + uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */ + uint16 PAD[31]; + + /* sprom "size" & "blank" info */ + uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */ + uint32 PAD[464]; + + /* Sonics SiliconBackplane registers */ + sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */ +} sdpcmd_regs_t; + +/* corecontrol */ +#define CC_CISRDY (1 << 0) /* CIS Ready */ +#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */ +#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */ +#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */ +#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */ + +/* corestatus */ +#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */ +#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */ +#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */ + +#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */ +#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */ +#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */ +#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */ + +/* intstatus */ +#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ +#define I_PC (1 << 10) /* descriptor error */ +#define I_PD (1 << 11) /* data error */ +#define I_DE (1 << 12) /* Descriptor protocol Error */ +#define I_RU (1 << 13) /* Receive descriptor Underflow */ +#define I_RO (1 << 14) /* Receive fifo Overflow */ +#define I_XU (1 << 15) /* Transmit fifo Underflow */ +#define I_RI (1 << 16) /* Receive Interrupt */ +#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ +#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */ +#define I_XI (1 << 24) /* Transmit Interrupt */ +#define I_RF_TERM (1 << 25) /* Read Frame Terminate */ +#define I_WF_TERM (1 << 26) /* Write Frame Terminate */ +#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT (1 << 28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */ +#define I_SRESET (1 << 30) /* CCCR RES interrupt */ +#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */ +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* sbintstatus */ +#define I_SB_SERR (1 << 8) /* Backplane SError (write) */ +#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */ +#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */ + +/* sdioaccess */ +#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */ +#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */ +#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */ +#define SDA_WRITE 0x01000000 /* Write bit */ +#define SDA_READ 0x00000000 /* Write bit cleared for Read */ +#define SDA_BUSY 0x80000000 /* Busy bit */ + +/* sdioaccess-accessible register address spaces */ +#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */ +#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */ +#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */ +#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */ +#define SDA_F3_FBR_SPACE 0x400 /* sdioAccess F3 FBR register space */ + +/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */ +#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */ +#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */ +#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */ +#define SDA_DEVICECONTROL 0x009 /* DeviceControl */ +#define SDA_SBADDRLOW 0x00a /* SbAddrLow */ +#define SDA_SBADDRMID 0x00b /* SbAddrMid */ +#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */ +#define SDA_FRAMECTRL 0x00d /* FrameCtrl */ +#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */ +#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */ +#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */ +#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */ +#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */ +#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */ +#define SDA_MESBUSYCNTRL 0x01d /* mesBusyCntrl */ +#define SDA_WAKEUPCTRL 0x01e /* WakeupCtrl */ +#define SDA_SLEEPCSR 0x01f /* sleepCSR */ + +/* SDA_F1_REG_SPACE register bits */ +/* sleepCSR register */ +#define SDA_SLEEPCSR_KEEP_SDIO_ON 0x1 + +/* SDA_F2WATERMARK */ +#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */ + +/* SDA_SBADDRLOW */ +#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */ + +/* SDA_SBADDRMID */ +#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */ + +/* SDA_SBADDRHIGH */ +#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */ + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */ +#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */ + +/* pcmciaframectrl */ +#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */ + +/* intrcvlazy */ +#define IRL_TO_MASK 0x00ffffff /* timeout */ +#define IRL_FC_MASK 0xff000000 /* frame count */ +#define IRL_FC_SHIFT 24 /* frame count */ + +/* rx header */ +typedef volatile struct { + uint16 len; + uint16 flags; +} sdpcmd_rxh_t; + +/* rx header flags */ +#define RXF_CRC 0x0001 /* CRC error detected */ +#define RXF_WOOS 0x0002 /* write frame out of sync */ +#define RXF_WF_TERM 0x0004 /* write frame terminated */ +#define RXF_ABORT 0x0008 /* write frame aborted */ +#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */ + +/* HW frame tag */ +#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ + +#if !defined(NDISVER) || (NDISVER < 0x0630) +#define SDPCM_HWEXT_LEN 8 +#else +#define SDPCM_HWEXT_LEN 0 +#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ + +#endif /* _sbsdpcmdev_h_ */ diff --git a/bcmdhd.101.10.361.x/include/sbsocram.h b/bcmdhd.101.10.361.x/include/sbsocram.h new file mode 100755 index 0000000..f8d6b0d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbsocram.h @@ -0,0 +1,198 @@ +/* + * BCM47XX Sonics SiliconBackplane embedded ram core + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBSOCRAM_H +#define _SBSOCRAM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* Memcsocram core registers */ +typedef volatile struct sbsocramregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; /* rev 6 */ + uint32 errlogaddr; /* rev 6 */ + /* used for patching rev 3 & 5 */ + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; /* corev 8 */ + uint32 bankpda; + uint32 PAD[14]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; /* corerev >= 2 */ + uint32 PAD[133]; + uint32 sr_control; /* corerev >= 15 */ + uint32 sr_status; /* corerev >= 15 */ + uint32 sr_address; /* corerev >= 15 */ + uint32 sr_data; /* corerev >= 15 */ +} sbsocramregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 +/* corerev >= 3 */ +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SRCI_SRNB_MASK_EXT 0x100 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */ +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */ +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines corerev >= 8 */ +#define SOCRAM_BANKINFO_SZMASK 0x7f +#define SOCRAM_BANKIDX_ROM_MASK 0x100 + +#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 +/* socram bankinfo memtype */ +#define SOCRAM_MEMTYPE_RAM 0 +#define SOCRAM_MEMTYPE_ROM 1 +#define SOCRAM_MEMTYPE_DEVRAM 2 + +#define SOCRAM_BANKINFO_REG 0x40 +#define SOCRAM_BANKIDX_REG 0x10 +#define SOCRAM_BANKINFO_STDBY_MASK 0x400 +#define SOCRAM_BANKINFO_STDBY_TIMER 0x800 + +/* bankinfo rev >= 10 */ +#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15 +#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16 +#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SOCRAM_BANKINFO_PDASZ_SHIFT 17 +#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SOCRAM_DEVRAMBANK_MASK 0xF000 +#define SOCRAM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SOCRAM_BANKINFO_SZBASE 8192 +#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */ + +#endif /* _SBSOCRAM_H */ diff --git a/bcmdhd.101.10.361.x/include/sbsprom.h b/bcmdhd.101.10.361.x/include/sbsprom.h new file mode 100755 index 0000000..f43da2d --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbsprom.h @@ -0,0 +1,236 @@ +/* + * SPROM format definitions for the Broadcom 47xx and 43xx chip family. + * Broadcom Proprietary and Confidential. Copyright (C) 2020, + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom; + * the contents of this file may not be disclosed to third parties, + * copied or duplicated in any form, in whole or in part, without + * the prior written permission of Broadcom. + * + * + * <> + */ + +#ifndef _SBSPROM_H +#define _SBSPROM_H + +#include "typedefs.h" +#include "bcmdevs.h" + +/* A word is this many bytes */ +#define SRW 2 + +/* offset into PCI config space for write enable bit */ +#define CFG_SROM_WRITABLE_OFFSET 0x88 +#define SROM_WRITEABLE 0x10 + +/* enumeration space consists of N contiguous 4Kbyte core register sets */ +#define SBCORES_BASE 0x18000000 +#define SBCORES_EACH 0x1000 + +/* offset from BAR0 for srom space */ +#define SROM_BASE 4096 + +/* number of 2-byte words in srom */ +#define SROM_SIZE 64 + +#define SROM_BYTES (SROM_SIZE * SRW) + +#define MAX_FN 4 + +/* Word 0, Hardware control */ +#define SROM_HWCTL 0 +#define HW_FUNMSK 0x000f +#define HW_FCLK 0x0200 +#define HW_CBM 0x0400 +#define HW_PIMSK 0xf000 +#define HW_PISHIFT 12 +#define HW_PI4402 0x2 +#define HW_FUN4401 0x0001 +#define HW_FCLK4402 0x0000 + +/* Word 1, common-power/boot-rom */ +#define SROM_COMMPW 1 +/* boot rom present bit */ +#define BR_PRESSHIFT 8 +/* 15:9 for n; boot rom size is 2^(14 + n) bytes */ +#define BR_SIZESHIFT 9 + +/* Word 2, SubsystemId */ +#define SROM_SSID 2 + +/* Word 3, VendorId */ +#define SROM_VID 3 + +/* Function 0 info, function info length */ +#define SROM_FN0 4 +#define SROM_FNSZ 8 + +/* Within each function: */ +/* Word 0, deviceID */ +#define SRFN_DID 0 + +/* Words 1-2, ClassCode */ +#define SRFN_CCL 1 +/* Word 2, D0 Power */ +#define SRFN_CCHD0 2 + +/* Word 3, PME and D1D2D3 power */ +#define SRFN_PMED123 3 + +#define PME_IL 0 +#define PME_ENET0 1 +#define PME_ENET1 2 +#define PME_CODEC 3 + +#define PME_4402_ENET 0 +#define PME_4402_CODEC 1 +#define PMEREP_4402_ENET (PMERD3CV | PMERD3CA | PMERD3H | PMERD2 | PMERD1 | PMERD0 | PME) + +/* Word 4, Bar1 enable, pme reports */ +#define SRFN_B1PMER 4 +#define B1E 1 +#define B1SZMSK 0xe +#define B1SZSH 1 +#define PMERMSK 0x0ff0 +#define PME 0x0010 +#define PMERD0 0x0020 +#define PMERD1 0x0040 +#define PMERD2 0x0080 +#define PMERD3H 0x0100 +#define PMERD3CA 0x0200 +#define PMERD3CV 0x0400 +#define IGNCLKRR 0x0800 +#define B0LMSK 0xf000 + +/* Words 4-5, Bar0 Sonics value */ +#define SRFN_B0H 5 +/* Words 6-7, CIS Pointer */ +#define SRFN_CISL 6 +#define SRFN_CISH 7 + +/* Words 36-38: iLine MAC address */ +#define SROM_I_MACHI 36 +#define SROM_I_MACMID 37 +#define SROM_I_MACLO 38 + +/* Words 36-38: wireless0 MAC address on 43xx */ +#define SROM_W0_MACHI 36 +#define SROM_W0_MACMID 37 +#define SROM_W0_MACLO 38 + +/* Words 39-41: enet0 MAC address */ +#define SROM_E0_MACHI 39 +#define SROM_E0_MACMID 40 +#define SROM_E0_MACLO 41 + +/* Words 42-44: enet1 MAC address */ +#define SROM_E1_MACHI 42 +#define SROM_E1_MACMID 43 +#define SROM_E1_MACLO 44 + +#define SROM_EPHY 45 + +/* Words 47-51 wl0 PA bx */ +#define SROM_WL0_PAB0 47 +#define SROM_WL0_PAB1 48 +#define SROM_WL0_PAB2 49 +#define SROM_WL0_PAB3 50 +#define SROM_WL0_PAB4 51 + +/* Word 52: wl0/wl1 MaxPower */ +#define SROM_WL_MAXPWR 52 + +/* Words 53-55 wl1 PA bx */ +#define SROM_WL1_PAB0 53 +#define SROM_WL1_PAB1 54 +#define SROM_WL1_PAB2 55 + +/* Woprd 56: itt */ +#define SROM_ITT 56 + +/* Words 59-62: OEM Space */ +#define SROM_WL_OEM 59 +#define SROM_OEM_SIZE 4 + +/* Contents for the srom */ + +#define BU4710_SSID 0x0400 +#define VSIM4710_SSID 0x0401 +#define QT4710_SSID 0x0402 + +#define BU4610_SSID 0x0403 +#define VSIM4610_SSID 0x0404 + +#define BU4402_SSID 0x4402 + +#define CLASS_OTHER 0x8000 +#define CLASS_ETHER 0x0000 +#define CLASS_NET 0x0002 +#define CLASS_COMM 0x0007 +#define CLASS_MODEM 0x0300 +#define CLASS_MIPS 0x3000 +#define CLASS_PROC 0x000b +#define CLASS_FLASH 0x0100 +#define CLASS_MEM 0x0005 +#define CLASS_SERIALBUS 0x000c +#define CLASS_OHCI 0x0310 + +/* Broadcom IEEE MAC addresses are 00:90:4c:xx:xx:xx */ +#define MACHI 0x90 + +#define MACMID_BU4710I 0x4c17 +#define MACMID_BU4710E0 0x4c18 +#define MACMID_BU4710E1 0x4c19 + +#define MACMID_94710R1I 0x4c1a +#define MACMID_94710R1E0 0x4c1b +#define MACMID_94710R1E1 0x4c1c + +#define MACMID_94710R4I 0x4c1d +#define MACMID_94710R4E0 0x4c1e +#define MACMID_94710R4E1 0x4c1f + +#define MACMID_94710DEVI 0x4c20 +#define MACMID_94710DEVE0 0x4c21 +#define MACMID_94710DEVE1 0x4c22 + +#define MACMID_BU4402 0x4c23 + +#define MACMID_BU4610I 0x4c24 +#define MACMID_BU4610E0 0x4c25 +#define MACMID_BU4610E1 0x4c26 + +#define MACMID_BU4401 0x4c37 + +/* Enet phy settings one or two singles or a dual */ +/* Bits 4-0 : MII address for enet0 (0x1f for not there */ +/* Bits 9-5 : MII address for enet1 (0x1f for not there */ +/* Bit 14 : Mdio for enet0 */ +/* Bit 15 : Mdio for enet1 */ + +/* bu4710 with only one phy on enet1 with address 7: */ +#define SROM_EPHY_ONE 0x80ff + +/* bu4710 with two individual phys, at 6 and 7, */ +/* each mdio connected to its own mac: */ +#define SROM_EPHY_TWO 0x80e6 + +/* bu4710 with a dual phy addresses 0 & 1, mdio-connected to enet0 */ +/* bringup board has phyaddr0 and phyaddr1 swapped */ +#define SROM_EPHY_DUAL 0x0001 + +/* r1 board with a dual phy at 0, 1 (NOT swapped and mdc0 */ +#define SROM_EPHY_R1 0x0010 + +/* r4 board with a single phy on enet0 at address 5 and a switch */ +/* chip on enet1 (speciall case: 0x1e */ +#define SROM_EPHY_R4 0x83e5 + +/* 4402 uses an internal phy at phyaddr 1; want mdcport == coreunit == 0 */ +#define SROM_EPHY_INTERNAL 0x0001 + +#define SROM_VERS 0x0001 + +#endif /* _SBSPROM_H */ diff --git a/bcmdhd.101.10.361.x/include/sbsysmem.h b/bcmdhd.101.10.361.x/include/sbsysmem.h new file mode 100755 index 0000000..5c86c0c --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sbsysmem.h @@ -0,0 +1,191 @@ +/* + * SiliconBackplane System Memory core + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SBSYSMEM_H +#define _SBSYSMEM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* sysmem core registers */ +typedef volatile struct sysmemregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 standbywait; + uint32 PAD[1]; + uint32 bankinfo; + uint32 PAD[7]; + uint32 region_n_regs[32]; + uint32 initiat_n_masks[31]; + uint32 PAD[1]; + uint32 mpucontrol; + uint32 mpucapabilities; + uint32 PAD[31]; + uint32 workaround; + uint32 pwrctl; + uint32 PAD[133]; + uint32 sr_control; + uint32 sr_status; + uint32 sr_address; + uint32 sr_data; +} sysmemregs_t; + +/* bus MPU region count mask of sysmemregs_t->mpucapabilities */ +#define ACC_MPU_REGION_CNT_MASK 0x7u +/* bus MPU disable mask of sysmemregs_t->mpucontrol */ +#define BUSMPU_DISABLE_MASK 0xfu + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SYSMEM_SRCI_ROMNB_MASK 0x3e0 +#define SYSMEM_SRCI_ROMNB_SHIFT 5 +#define SYSMEM_SRCI_SRNB_MASK 0x1f +#define SYSMEM_SRCI_SRNB_SHIFT 0 +/* Above bits are obsolete and replaced with below in rev 12 */ +#define SYSMEM_SRCI_NEW_ROMNB_MASK 0xff000000u +#define SYSMEM_SRCI_NEW_ROMNB_SHIFT 24u +#define SYSMEM_SRCI_NEW_SRNB_MASK 0xff0000u +#define SYSMEM_SRCI_NEW_SRNB_SHIFT 16u + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines */ +#define SYSMEM_BANKINFO_SZMASK 0x7f +#define SYSMEM_BANKIDX_ROM_MASK 0x80 + +#define SYSMEM_BANKINFO_REG 0x40 +#define SYSMEM_BANKIDX_REG 0x10 +#define SYSMEM_BANKINFO_STDBY_MASK 0x200 +#define SYSMEM_BANKINFO_STDBY_TIMER 0x400 + +#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 14 +#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x4000 +#define SYSMEM_BANKINFO_PDASZ_SHIFT 16 +#define SYSMEM_BANKINFO_PDASZ_MASK 0x001F0000 + +/* extracoreinfo register */ +#define SYSMEM_DEVRAMBANK_MASK 0xF000 +#define SYSMEM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SYSMEM_BANKINFO_SZBASE 8192 +#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */ + +/* standbycontrol register default values */ +#define SYSMEM_SBYCNTRL_TIMEVAL 0x100000u /* standbycontrol timeval[23:0] */ +#define SYSMEM_SBYCNTRL_TIMEVAL_MASK 0xffffffu + +/* sbywaitcycle register default values (sysme rev 8) */ +#define SYSMEM_SBYWAIT_RAM_TIMEVAL 0xau /* RAM memory access after standby exit */ + +#endif /* _SBSYSMEM_H */ diff --git a/bcmdhd.101.10.361.x/include/sdio.h b/bcmdhd.101.10.361.x/include/sdio.h new file mode 100755 index 0000000..b0343f0 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sdio.h @@ -0,0 +1,644 @@ +/* + * SDIO spec header file + * Protocol and standard (common) device definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SDIO_H +#define _SDIO_H + +#ifdef BCMSDIO +/* + * Standard SD Device Register Map. + * + * Reference definitions from: + * SD Specifications, Part E1: SDIO Specification + * Version 1.10 + * August 18, 2004 + * http://www.sdcard.org + * + * EXCEPTION: The speed_control register defined here is based on a + * draft of the next version, and is thus nonstandard. + */ + +/* CCCR structure for function 0 */ +typedef volatile struct { + uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */ + uint8 sd_rev; /* RO, sd spec revision */ + uint8 io_en; /* I/O enable */ + uint8 io_rdy; /* I/O ready reg */ + uint8 intr_ctl; /* Master and per function interrupt enable control */ + uint8 intr_status; /* RO, interrupt pending status */ + uint8 io_abort; /* read/write abort or reset all functions */ + uint8 bus_inter; /* bus interface control */ + uint8 capability; /* RO, card capability */ + + uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */ + uint8 cis_base_mid; + uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */ + + /* suspend/resume registers */ + uint8 bus_suspend; /* 0xC */ + uint8 func_select; /* 0xD */ + uint8 exec_flag; /* 0xE */ + uint8 ready_flag; /* 0xF */ + + uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */ + + uint8 power_control; /* 0x12 (SDIO version 1.10) */ + + uint8 speed_control; /* 0x13 */ +} sdio_regs_t; + +/* SDIO Device CCCR offsets */ +#define SDIOD_CCCR_REV 0x00 +#define SDIOD_CCCR_SDREV 0x01 +#define SDIOD_CCCR_IOEN 0x02 +#define SDIOD_CCCR_IORDY 0x03 +#define SDIOD_CCCR_INTEN 0x04 +#define SDIOD_CCCR_INTPEND 0x05 +#define SDIOD_CCCR_IOABORT 0x06 +#define SDIOD_CCCR_BICTRL 0x07 +#define SDIOD_CCCR_CAPABLITIES 0x08 +#define SDIOD_CCCR_CISPTR_0 0x09 +#define SDIOD_CCCR_CISPTR_1 0x0A +#define SDIOD_CCCR_CISPTR_2 0x0B +#define SDIOD_CCCR_BUSSUSP 0x0C +#define SDIOD_CCCR_FUNCSEL 0x0D +#define SDIOD_CCCR_EXECFLAGS 0x0E +#define SDIOD_CCCR_RDYFLAGS 0x0F +#define SDIOD_CCCR_BLKSIZE_0 0x10 +#define SDIOD_CCCR_BLKSIZE_1 0x11 +#define SDIOD_CCCR_POWER_CONTROL 0x12 +#define SDIOD_CCCR_SPEED_CONTROL 0x13 +#define SDIOD_CCCR_UHSI_SUPPORT 0x14 +#define SDIOD_CCCR_DRIVER_STRENGTH 0x15 +#define SDIOD_CCCR_INTR_EXTN 0x16 + +/* Broadcom extensions (corerev >= 1) */ +#define SDIOD_CCCR_BRCM_CARDCAP 0xf0 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08 +#define SDIOD_CCCR_BRCM_CARDCTL 0xf1 +#define SDIOD_CCCR_BRCM_SEPINT 0xf2 + +/* cccr_sdio_rev */ +#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */ +#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */ +#define SDIO_SPEC_VERSION_3_0 0x40 /* SDIO spec version 3.0 */ + +/* sd_rev */ +#define SD_REV_PHY_MASK 0x0f /* SD format version number */ + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ +#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ +#if defined (BT_OVER_SDIO) +#define SDIO_FUNC_ENABLE_3 0x08 /* function 2 I/O enable */ +#define SDIO_FUNC_DISABLE_3 0xF0 /* function 2 I/O enable */ +#endif /* defined (BT_OVER_SDIO) */ + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ +#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */ + +/* intr_ctl */ +#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ +#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ +#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ +#if defined (BT_OVER_SDIO) +#define INTR_CTL_FUNC3_EN 0x8 /* interrupt enable for function 3 */ +#endif /* defined (BT_OVER_SDIO) */ +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ +#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ + +/* io_abort */ +#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */ +#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */ + +/* bus_inter */ +#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */ +#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */ +#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */ +#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */ +#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */ +#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */ + +/* capability */ +#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */ +#define SDIO_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_SBS 0x08 /* support suspend/resume */ +#define SDIO_CAP_SRW 0x04 /* support read wait */ +#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */ +#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */ + +/* power_control */ +#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */ +#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */ + +/* speed_control (control device entry into high-speed clocking mode) */ +#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */ +#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */ +#define SDIO_SPEED_UHSI_DDR50 0x08 + +/* for setting bus speed in card: 0x13h */ +#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSISEL_S 1 + +/* for getting bus speed cap in card: 0x14h */ +#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSICAP_S 0 + +/* for getting driver type CAP in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3) +#define SDIO_BUS_DRVR_TYPE_CAP_S 0 + +/* for setting driver type selection in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2) +#define SDIO_BUS_DRVR_TYPE_SEL_S 4 + +/* for getting async int support in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_CAP_S 0 + +/* for setting async int selection in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_SEL_S 1 + +/* brcm sepint */ +#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */ +#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */ +#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */ + +/* FBR structure for function 1-7, FBR addresses and register offsets */ +typedef volatile struct { + uint8 devctr; /* device interface, CSA control */ + uint8 ext_dev; /* extended standard I/O device type code */ + uint8 pwr_sel; /* power selection support */ + uint8 PAD[6]; /* reserved */ + + uint8 cis_low; /* CIS LSB */ + uint8 cis_mid; + uint8 cis_high; /* CIS MSB */ + uint8 csa_low; /* code storage area, LSB */ + uint8 csa_mid; + uint8 csa_high; /* code storage area, MSB */ + uint8 csa_dat_win; /* data access window to function */ + + uint8 fnx_blk_size[2]; /* block size, little endian */ +} sdio_fbr_t; + +/* Maximum number of I/O funcs */ +#define SDIOD_MAX_FUNCS 8 +#define SDIOD_MAX_IOFUNCS 7 + +/* SDIO Device FBR Start Address */ +#define SDIOD_FBR_STARTADDR 0x100 + +/* SDIO Device FBR Size */ +#define SDIOD_FBR_SIZE 0x100 + +/* Macro to calculate FBR register base */ +#define SDIOD_FBR_BASE(n) ((n) * 0x100) + +/* Function register offsets */ +#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */ +#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */ +#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */ + +/* SDIO Function CIS ptr offset */ +#define SDIOD_FBR_CISPTR_0 0x09 +#define SDIOD_FBR_CISPTR_1 0x0A +#define SDIOD_FBR_CISPTR_2 0x0B + +/* Code Storage Area pointer */ +#define SDIOD_FBR_CSA_ADDR_0 0x0C +#define SDIOD_FBR_CSA_ADDR_1 0x0D +#define SDIOD_FBR_CSA_ADDR_2 0x0E +#define SDIOD_FBR_CSA_DATA 0x0F + +/* SDIO Function I/O Block Size */ +#define SDIOD_FBR_BLKSIZE_0 0x10 +#define SDIOD_FBR_BLKSIZE_1 0x11 + +/* devctr */ +#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */ +#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */ +#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */ +/* interface codes */ +#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */ +#define SDIOD_DIC_UART 1 +#define SDIOD_DIC_BLUETOOTH_A 2 +#define SDIOD_DIC_BLUETOOTH_B 3 +#define SDIOD_DIC_GPS 4 +#define SDIOD_DIC_CAMERA 5 +#define SDIOD_DIC_PHS 6 +#define SDIOD_DIC_WLAN 7 +#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */ + +/* pwr_sel */ +#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */ +#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */ + +/* misc defines */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */ +#define SD_CARD_TYPE_IO 1 /* IO only card */ +#define SD_CARD_TYPE_MEMORY 2 /* memory only card */ +#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */ + +#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */ +#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */ + +/* Card registers: status bit position */ +#define CARDREG_STATUS_BIT_OUTOFRANGE 31 +#define CARDREG_STATUS_BIT_COMCRCERROR 23 +#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22 +#define CARDREG_STATUS_BIT_ERROR 19 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9 +#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4 + +/* ---------------------------------------------------- + * SDIO Protocol Definitions -- commands and responses + * + * Reference definitions from SDIO Specification v1.10 + * of August 18, 2004; and SD Physical Layer v1.10 of + * October 15, 2004. + * ---------------------------------------------------- + */ + +/* Straight defines, mostly used by older driver(s). */ + +#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */ +#define SD_CMD_SEND_OPCOND 1 +#define SD_CMD_MMC_SET_RCA 3 +#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */ +#define SD_CMD_SELECT_DESELECT_CARD 7 +#define SD_CMD_SEND_CSD 9 +#define SD_CMD_SEND_CID 10 +#define SD_CMD_STOP_TRANSMISSION 12 +#define SD_CMD_SEND_STATUS 13 +#define SD_CMD_GO_INACTIVE_STATE 15 +#define SD_CMD_SET_BLOCKLEN 16 +#define SD_CMD_READ_SINGLE_BLOCK 17 +#define SD_CMD_READ_MULTIPLE_BLOCK 18 +#define SD_CMD_WRITE_BLOCK 24 +#define SD_CMD_WRITE_MULTIPLE_BLOCK 25 +#define SD_CMD_PROGRAM_CSD 27 +#define SD_CMD_SET_WRITE_PROT 28 +#define SD_CMD_CLR_WRITE_PROT 29 +#define SD_CMD_SEND_WRITE_PROT 30 +#define SD_CMD_ERASE_WR_BLK_START 32 +#define SD_CMD_ERASE_WR_BLK_END 33 +#define SD_CMD_ERASE 38 +#define SD_CMD_LOCK_UNLOCK 42 +#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */ +#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */ +#define SD_CMD_APP_CMD 55 +#define SD_CMD_GEN_CMD 56 +#define SD_CMD_READ_OCR 58 +#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */ +#define SD_ACMD_SD_STATUS 13 +#define SD_ACMD_SEND_NUM_WR_BLOCKS 22 +#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23 +#define SD_ACMD_SD_SEND_OP_COND 41 +#define SD_ACMD_SET_CLR_CARD_DETECT 42 +#define SD_ACMD_SEND_SCR 51 + +/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */ +#define SD_IO_OP_READ 0 /* Read_Write: Read */ +#define SD_IO_OP_WRITE 1 /* Read_Write: Write */ +#define SD_IO_RW_NORMAL 0 /* no RAW */ +#define SD_IO_RW_RAW 1 /* RAW */ +#define SD_IO_BYTE_MODE 0 /* Byte Mode */ +#define SD_IO_BLOCK_MODE 1 /* BlockMode */ +#define SD_IO_FIXED_ADDRESS 0 /* fix Address */ +#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */ + +/* build SD_CMD_IO_RW_DIRECT Argument */ +#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \ + (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF)) + +/* build SD_CMD_IO_RW_EXTENDED Argument */ +#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \ + (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF)) + +/* SDIO response parameters */ +#define SD_RSP_NO_NONE 0 +#define SD_RSP_NO_1 1 +#define SD_RSP_NO_2 2 +#define SD_RSP_NO_3 3 +#define SD_RSP_NO_4 4 +#define SD_RSP_NO_5 5 +#define SD_RSP_NO_6 6 + + /* Modified R6 response (to CMD3) */ +#define SD_RSP_MR6_COM_CRC_ERROR 0x8000 +#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000 +#define SD_RSP_MR6_ERROR 0x2000 + + /* Modified R1 in R4 Response (to CMD5) */ +#define SD_RSP_MR1_SBIT 0x80 +#define SD_RSP_MR1_PARAMETER_ERROR 0x40 +#define SD_RSP_MR1_RFU5 0x20 +#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10 +#define SD_RSP_MR1_COM_CRC_ERROR 0x08 +#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04 +#define SD_RSP_MR1_RFU1 0x02 +#define SD_RSP_MR1_IDLE_STATE 0x01 + + /* R5 response (to CMD52 and CMD53) */ +#define SD_RSP_R5_COM_CRC_ERROR 0x80 +#define SD_RSP_R5_ILLEGAL_COMMAND 0x40 +#define SD_RSP_R5_IO_CURRENTSTATE1 0x20 +#define SD_RSP_R5_IO_CURRENTSTATE0 0x10 +#define SD_RSP_R5_ERROR 0x08 +#define SD_RSP_R5_RFU 0x04 +#define SD_RSP_R5_FUNC_NUM_ERROR 0x02 +#define SD_RSP_R5_OUT_OF_RANGE 0x01 + +#define SD_RSP_R5_ERRBITS 0xCB + +/* Mask/shift form, commonly used in newer driver(s) */ + +/* ------------------------------------------------ + * SDIO Commands and responses + * + * I/O only commands are: + * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +/* SDIO Commands */ +#define SDIOH_CMD_0 0 +#define SDIOH_CMD_3 3 +#define SDIOH_CMD_5 5 +#define SDIOH_CMD_7 7 +#define SDIOH_CMD_11 11 +#define SDIOH_CMD_14 14 +#define SDIOH_CMD_15 15 +#define SDIOH_CMD_19 19 +#define SDIOH_CMD_52 52 +#define SDIOH_CMD_53 53 +#define SDIOH_CMD_59 59 + +/* SDIO Command Responses */ +#define SDIOH_RSP_NONE 0 +#define SDIOH_RSP_R1 1 +#define SDIOH_RSP_R2 2 +#define SDIOH_RSP_R3 3 +#define SDIOH_RSP_R4 4 +#define SDIOH_RSP_R5 5 +#define SDIOH_RSP_R6 6 + +/* + * SDIO Response Error flags + */ +#define SDIOH_RSP5_ERROR_FLAGS 0xCB + +/* ------------------------------------------------ + * SDIO Command structures. I/O only commands are: + * + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +#define CMD5_OCR_M BITFIELD_MASK(24) +#define CMD5_OCR_S 0 + +#define CMD5_S18R_M BITFIELD_MASK(1) +#define CMD5_S18R_S 24 + +#define CMD7_RCA_M BITFIELD_MASK(16) +#define CMD7_RCA_S 16 + +#define CMD14_RCA_M BITFIELD_MASK(16) +#define CMD14_RCA_S 16 +#define CMD14_SLEEP_M BITFIELD_MASK(1) +#define CMD14_SLEEP_S 15 + +#define CMD_15_RCA_M BITFIELD_MASK(16) +#define CMD_15_RCA_S 16 + +#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52 + */ +#define CMD52_DATA_S 0 +#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD52_REG_ADDR_S 9 +#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */ +#define CMD52_RAW_S 27 +#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD52_FUNCTION_S 28 +#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD52_RW_FLAG_S 31 + +#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */ +#define CMD53_BYTE_BLK_CNT_S 0 +#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD53_REG_ADDR_S 9 +#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */ +#define CMD53_OP_CODE_S 26 +#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */ +#define CMD53_BLK_MODE_S 27 +#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD53_FUNCTION_S 28 +#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD53_RW_FLAG_S 31 + +/* ------------------------------------------------------ + * SDIO Command Response structures for SD1 and SD4 modes + * ----------------------------------------------------- + */ +#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_IO_OCR_S 0 + +#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_S18A_S 24 + +#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */ +#define RSP4_STUFF_S 24 +#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */ +#define RSP4_MEM_PRESENT_S 27 +#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */ +#define RSP4_NUM_FUNCS_S 28 +#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */ +#define RSP4_CARD_READY_S 31 + +#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0] + */ +#define RSP6_STATUS_S 0 +#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */ +#define RSP6_IO_RCA_S 16 + +#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */ +#define RSP1_AKE_SEQ_ERROR_S 3 +#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP1_APP_CMD_S 5 +#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */ +#define RSP1_READY_FOR_DATA_S 8 +#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card + * when Cmd was received + */ +#define RSP1_CURR_STATE_S 9 +#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */ +#define RSP1_EARSE_RESET_S 13 +#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */ +#define RSP1_CARD_ECC_DISABLE_S 14 +#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */ +#define RSP1_WP_ERASE_SKIP_S 15 +#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits + * of CSD + */ +#define RSP1_CID_CSD_OVERW_S 16 +#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */ +#define RSP1_ERROR_S 19 +#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */ +#define RSP1_CC_ERROR_S 20 +#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed + * to correct data + */ +#define RSP1_CARD_ECC_FAILED_S 21 +#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */ +#define RSP1_ILLEGAL_CMD_S 22 +#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed + */ +#define RSP1_COM_CRC_ERROR_S 23 +#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */ +#define RSP1_LOCK_UNLOCK_FAIL_S 24 +#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */ +#define RSP1_CARD_LOCKED_S 25 +#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program + * write-protected blocks + */ +#define RSP1_WP_VIOLATION_S 26 +#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */ +#define RSP1_ERASE_PARAM_S 27 +#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */ +#define RSP1_ERASE_SEQ_ERR_S 28 +#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */ +#define RSP1_BLK_LEN_ERR_S 29 +#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */ +#define RSP1_ADDR_ERR_S 30 +#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */ +#define RSP1_OUT_OF_RANGE_S 31 + +#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */ +#define RSP5_DATA_S 0 +#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */ +#define RSP5_FLAGS_S 8 +#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */ +#define RSP5_STUFF_S 16 + +/* ---------------------------------------------- + * SDIO Command Response structures for SPI mode + * ---------------------------------------------- + */ +#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */ +#define SPIRSP4_IO_OCR_S 0 +#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */ +#define SPIRSP4_STUFF_S 16 +#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */ +#define SPIRSP4_MEM_PRESENT_S 19 +#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */ +#define SPIRSP4_NUM_FUNCS_S 20 +#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */ +#define SPIRSP4_CARD_READY_S 23 +#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */ +#define SPIRSP4_IDLE_STATE_S 24 +#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP4_ILLEGAL_CMD_S 26 +#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP4_COM_CRC_ERROR_S 27 +#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP4_FUNC_NUM_ERROR_S 28 +#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP4_PARAM_ERROR_S 30 +#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP4_START_BIT_S 31 + +#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */ +#define SPIRSP5_DATA_S 16 +#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */ +#define SPIRSP5_IDLE_STATE_S 24 +#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP5_ILLEGAL_CMD_S 26 +#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP5_COM_CRC_ERROR_S 27 +#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP5_FUNC_NUM_ERROR_S 28 +#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP5_PARAM_ERROR_S 30 +#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP5_START_BIT_S 31 + +/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */ +#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error + */ +#define RSP6STAT_AKE_SEQ_ERROR_S 3 +#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP6STAT_APP_CMD_S 5 +#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data + * (buff empty) + */ +#define RSP6STAT_READY_FOR_DATA_S 8 +#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at + * Cmd reception + */ +#define RSP6STAT_CURR_STATE_S 9 +#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19 + */ +#define RSP6STAT_ERROR_S 13 +#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for + * card state Bit 22 + */ +#define RSP6STAT_ILLEGAL_CMD_S 14 +#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command + * failed Bit 23 + */ +#define RSP6STAT_COM_CRC_ERROR_S 15 + +#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ +#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE + +/* command issue options */ +#define CMD_OPTION_DEFAULT 0 +#define CMD_OPTION_TUNING 1 + +#endif /* def BCMSDIO */ +#endif /* _SDIO_H */ diff --git a/bcmdhd.101.10.361.x/include/sdioh.h b/bcmdhd.101.10.361.x/include/sdioh.h new file mode 100755 index 0000000..805f061 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sdioh.h @@ -0,0 +1,459 @@ +/* + * SDIO Host Controller Spec header file + * Register map and definitions for the Standard Host Controller + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SDIOH_H +#define _SDIOH_H + +/* + * Standard SD Host Control Register Map. + * + * Reference definitions from: + * SD Specification, Part A2: SD Host Controller Standard Specification + * Version 1.00 + * February, 2004 + * http://www.sdcard.org + * + * One set for each SDIO slot on the controller board. + * In PCI, each set is mapped into a BAR. Since PCI only + * has six BARS, spec compliant PCI SDIO host controllers are + * limited to 6 slots. + */ +#define SD_SysAddr 0x000 +#define SD_BlockSize 0x004 +#define SD_BlockCount 0x006 +#define SD_Arg0 0x008 +#define SD_Arg1 0x00A /* Not really in spec, remove? */ +#define SD_TransferMode 0x00C +#define SD_Command 0x00E +#define SD_Response0 0x010 +#define SD_Response1 0x012 +#define SD_Response2 0x014 +#define SD_Response3 0x016 +#define SD_Response4 0x018 +#define SD_Response5 0x01A +#define SD_Response6 0x01C +#define SD_Response7 0x01E +#define SD_BufferDataPort0 0x020 +#define SD_BufferDataPort1 0x022 +#define SD_PresentState 0x024 +#define SD_HostCntrl 0x028 +#define SD_PwrCntrl 0x029 +#define SD_BlockGapCntrl 0x02A +#define SD_WakeupCntrl 0x02B +#define SD_ClockCntrl 0x02C /* Add (and use) bitdefs */ +#define SD_TimeoutCntrl 0x02E /* Add (and use) bitdefs */ +#define SD_SoftwareReset 0x02F +#define SD_IntrStatus 0x030 +#define SD_ErrorIntrStatus 0x032 /* Add (and use) bitdefs */ +#define SD_IntrStatusEnable 0x034 +#define SD_ErrorIntrStatusEnable 0x036 +#define SD_IntrSignalEnable 0x038 +#define SD_ErrorIntrSignalEnable 0x03A +#define SD_CMD12ErrorStatus 0x03C +#define SD_Capabilities 0x040 +#define SD_Capabilities3 0x044 +#define SD_MaxCurCap 0x048 +#define SD_MaxCurCap_Reserved 0x04C +#define SD_ADMA_ErrStatus 0x054 +#define SD_ADMA_SysAddr 0x58 +#define SD_SlotInterruptStatus 0x0FC +#define SD_HostControllerVersion 0x0FE +#define SD_GPIO_Reg 0x100 +#define SD_GPIO_OE 0x104 +#define SD_GPIO_Enable 0x108 + +/* SD specific registers in PCI config space */ +#define SD_SlotInfo 0x40 + +/* HC 3.0 specific registers and offsets */ +#define SD3_HostCntrl2 0x03E +/* preset regsstart and count */ +#define SD3_PresetValStart 0x060 +#define SD3_PresetValCount 8 +/* preset-indiv regs */ +#define SD3_PresetVal_init 0x060 +#define SD3_PresetVal_default 0x062 +#define SD3_PresetVal_HS 0x064 +#define SD3_PresetVal_SDR12 0x066 +#define SD3_PresetVal_SDR25 0x068 +#define SD3_PresetVal_SDR50 0x06a +#define SD3_PresetVal_SDR104 0x06c +#define SD3_PresetVal_DDR50 0x06e +/* SDIO3.0 Revx specific Registers */ +#define SD3_Tuning_Info_Register 0x0EC +#define SD3_WL_BT_reset_register 0x0F0 + +/* preset value indices */ +#define SD3_PRESETVAL_INITIAL_IX 0 +#define SD3_PRESETVAL_DESPEED_IX 1 +#define SD3_PRESETVAL_HISPEED_IX 2 +#define SD3_PRESETVAL_SDR12_IX 3 +#define SD3_PRESETVAL_SDR25_IX 4 +#define SD3_PRESETVAL_SDR50_IX 5 +#define SD3_PRESETVAL_SDR104_IX 6 +#define SD3_PRESETVAL_DDR50_IX 7 + +/* SD_Capabilities reg (0x040) */ +#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6) +#define CAP_TO_CLKFREQ_S 0 +#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1) +#define CAP_TO_CLKUNIT_S 7 +/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2 + bits are reserved. going ahead with 8 bits, as it is req for 3.0 +*/ +#define CAP_BASECLK_M BITFIELD_MASK(8) +#define CAP_BASECLK_S 8 +#define CAP_MAXBLOCK_M BITFIELD_MASK(2) +#define CAP_MAXBLOCK_S 16 +#define CAP_ADMA2_M BITFIELD_MASK(1) +#define CAP_ADMA2_S 19 +#define CAP_ADMA1_M BITFIELD_MASK(1) +#define CAP_ADMA1_S 20 +#define CAP_HIGHSPEED_M BITFIELD_MASK(1) +#define CAP_HIGHSPEED_S 21 +#define CAP_DMA_M BITFIELD_MASK(1) +#define CAP_DMA_S 22 +#define CAP_SUSPEND_M BITFIELD_MASK(1) +#define CAP_SUSPEND_S 23 +#define CAP_VOLT_3_3_M BITFIELD_MASK(1) +#define CAP_VOLT_3_3_S 24 +#define CAP_VOLT_3_0_M BITFIELD_MASK(1) +#define CAP_VOLT_3_0_S 25 +#define CAP_VOLT_1_8_M BITFIELD_MASK(1) +#define CAP_VOLT_1_8_S 26 +#define CAP_64BIT_HOST_M BITFIELD_MASK(1) +#define CAP_64BIT_HOST_S 28 + +#define SDIO_OCR_READ_FAIL (2) + +#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1) +#define CAP_ASYNCINT_SUP_S 29 + +#define CAP_SLOTTYPE_M BITFIELD_MASK(2) +#define CAP_SLOTTYPE_S 30 + +#define CAP3_MSBits_OFFSET (32) +/* note: following are caps MSB32 bits. + So the bits start from 0, instead of 32. that is why + CAP3_MSBits_OFFSET is subtracted. +*/ +#define CAP3_SDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_SDR104_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET) + +#define CAP3_DDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET) + +/* for knowing the clk caps in a single read */ +#define CAP3_30CLKCAP_M BITFIELD_MASK(3) +#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_M BITFIELD_MASK(4) +#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET) + +#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1) +#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2) +#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_DISABLED (0x0) +#define CAP3_RETUNING_TC_1024S (0xB) +#define CAP3_RETUNING_TC_OTHER (0xF) + +#define CAP3_CLK_MULT_M BITFIELD_MASK(8) +#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET) + +#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2) +#define PRESET_DRIVR_SELECT_S 14 + +#define PRESET_CLK_DIV_M BITFIELD_MASK(10) +#define PRESET_CLK_DIV_S 0 + +/* SD_MaxCurCap reg (0x048) */ +#define CAP_CURR_3_3_M BITFIELD_MASK(8) +#define CAP_CURR_3_3_S 0 +#define CAP_CURR_3_0_M BITFIELD_MASK(8) +#define CAP_CURR_3_0_S 8 +#define CAP_CURR_1_8_M BITFIELD_MASK(8) +#define CAP_CURR_1_8_S 16 + +/* SD_SysAddr: Offset 0x0000, Size 4 bytes */ + +/* SD_BlockSize: Offset 0x004, Size 2 bytes */ +#define BLKSZ_BLKSZ_M BITFIELD_MASK(12) +#define BLKSZ_BLKSZ_S 0 +#define BLKSZ_BNDRY_M BITFIELD_MASK(3) +#define BLKSZ_BNDRY_S 12 + +/* SD_BlockCount: Offset 0x006, size 2 bytes */ + +/* SD_Arg0: Offset 0x008, size = 4 bytes */ +/* SD_TransferMode Offset 0x00C, size = 2 bytes */ +#define XFER_DMA_ENABLE_M BITFIELD_MASK(1) +#define XFER_DMA_ENABLE_S 0 +#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1) +#define XFER_BLK_COUNT_EN_S 1 +#define XFER_CMD_12_EN_M BITFIELD_MASK(1) +#define XFER_CMD_12_EN_S 2 +#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1) +#define XFER_DATA_DIRECTION_S 4 +#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1) +#define XFER_MULTI_BLOCK_S 5 + +/* SD_Command: Offset 0x00E, size = 2 bytes */ +/* resp_type field */ +#define RESP_TYPE_NONE 0 +#define RESP_TYPE_136 1 +#define RESP_TYPE_48 2 +#define RESP_TYPE_48_BUSY 3 +/* type field */ +#define CMD_TYPE_NORMAL 0 +#define CMD_TYPE_SUSPEND 1 +#define CMD_TYPE_RESUME 2 +#define CMD_TYPE_ABORT 3 + +#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */ +#define CMD_RESP_TYPE_S 0 +#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */ +#define CMD_CRC_EN_S 3 +#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */ +#define CMD_INDEX_EN_S 4 +#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */ +#define CMD_DATA_EN_S 5 +#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc + */ +#define CMD_TYPE_S 6 +#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */ +#define CMD_INDEX_S 8 + +/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */ +/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */ +/* SD_PresentState : Offset 0x024, size = 4 bytes */ +#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */ +#define PRES_CMD_INHIBIT_S 0 +#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */ +#define PRES_DAT_INHIBIT_S 1 +#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */ +#define PRES_DAT_BUSY_S 2 +#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */ +#define PRES_PRESENT_RSVD_S 3 +#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */ +#define PRES_WRITE_ACTIVE_S 8 +#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */ +#define PRES_READ_ACTIVE_S 9 +#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */ +#define PRES_WRITE_DATA_RDY_S 10 +#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */ +#define PRES_READ_DATA_RDY_S 11 +#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */ +#define PRES_CARD_PRESENT_S 16 +#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */ +#define PRES_CARD_STABLE_S 17 +#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */ +#define PRES_CARD_PRESENT_RAW_S 18 +#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */ +#define PRES_WRITE_ENABLED_S 19 +#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */ +#define PRES_DAT_SIGNAL_S 20 +#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */ +#define PRES_CMD_SIGNAL_S 24 + +/* SD_HostCntrl: Offset 0x028, size = 1 bytes */ +#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */ +#define HOST_LED_S 0 +#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */ +#define HOST_DATA_WIDTH_S 1 +#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */ +#define HOST_DMA_SEL_S 3 +#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */ +#define HOST_HI_SPEED_EN_S 2 + +/* Host Control2: */ +#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */ + +#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */ + +#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */ + +#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */ + +#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */ +#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */ + +#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */ + +#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */ +#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */ + +#define HOST_CONTR_VER_2 (1) +#define HOST_CONTR_VER_3 (2) + +/* misc defines */ +/* Driver uses of these should be replaced! */ +#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */ +#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */ + +/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */ +#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */ +#define PWR_BUS_EN_S 0 +#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */ +#define PWR_VOLTS_S 1 + +/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */ +#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */ +#define SW_RESET_ALL_S 0 +#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */ +#define SW_RESET_CMD_S 1 +#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */ +#define SW_RESET_DAT_S 2 + +/* SD_IntrStatus: Offset 0x030, size = 2 bytes */ +/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */ +#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */ +#define INTSTAT_CMD_COMPLETE_S 0 +#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1) +#define INTSTAT_XFER_COMPLETE_S 1 +#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1) +#define INTSTAT_BLOCK_GAP_EVENT_S 2 +#define INTSTAT_DMA_INT_M BITFIELD_MASK(1) +#define INTSTAT_DMA_INT_S 3 +#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_WRITE_READY_S 4 +#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_READ_READY_S 5 +#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INSERTION_S 6 +#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1) +#define INTSTAT_CARD_REMOVAL_S 7 +#define INTSTAT_CARD_INT_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INT_S 8 +#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */ +#define INTSTAT_RETUNING_INT_S 12 +#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */ +#define INTSTAT_ERROR_INT_S 15 + +/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */ +/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */ +#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_CMD_TIMEOUT_S 0 +#define ERRINT_CMD_CRC_M BITFIELD_MASK(1) +#define ERRINT_CMD_CRC_S 1 +#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_CMD_ENDBIT_S 2 +#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1) +#define ERRINT_CMD_INDEX_S 3 +#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_DATA_TIMEOUT_S 4 +#define ERRINT_DATA_CRC_M BITFIELD_MASK(1) +#define ERRINT_DATA_CRC_S 5 +#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_DATA_ENDBIT_S 6 +#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1) +#define ERRINT_CURRENT_LIMIT_S 7 +#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1) +#define ERRINT_AUTO_CMD12_S 8 +#define ERRINT_VENDOR_M BITFIELD_MASK(4) +#define ERRINT_VENDOR_S 12 +#define ERRINT_ADMA_M BITFIELD_MASK(1) +#define ERRINT_ADMA_S 9 + +/* Also provide definitions in "normal" form to allow combined masks */ +#define ERRINT_CMD_TIMEOUT_BIT 0x0001 +#define ERRINT_CMD_CRC_BIT 0x0002 +#define ERRINT_CMD_ENDBIT_BIT 0x0004 +#define ERRINT_CMD_INDEX_BIT 0x0008 +#define ERRINT_DATA_TIMEOUT_BIT 0x0010 +#define ERRINT_DATA_CRC_BIT 0x0020 +#define ERRINT_DATA_ENDBIT_BIT 0x0040 +#define ERRINT_CURRENT_LIMIT_BIT 0x0080 +#define ERRINT_AUTO_CMD12_BIT 0x0100 +#define ERRINT_ADMA_BIT 0x0200 + +/* Masks to select CMD vs. DATA errors */ +#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\ + ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT) +#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\ + ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT) +#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS) + +/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */ +/* SD_ClockCntrl : Offset 0x02C , size = bytes */ +/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */ +/* SD_IntrStatus : Offset 0x030 , size = bytes */ +/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */ +/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */ +/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */ +/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */ +/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */ +/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */ +/* SD_Capabilities : Offset 0x040 , size = bytes */ +/* SD_MaxCurCap : Offset 0x048 , size = bytes */ +/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */ +/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */ +/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */ + +/* SDIO Host Control Register DMA Mode Definitions */ +#define SDIOH_SDMA_MODE 0 +#define SDIOH_ADMA1_MODE 1 +#define SDIOH_ADMA2_MODE 2 +#define SDIOH_ADMA2_64_MODE 3 + +#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */ +#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */ +#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */ +#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */ +#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */ +#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */ +#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */ +#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */ + +/* ADMA2 Descriptor Table Entry for 32-bit Address */ +typedef struct adma2_dscr_32b { + uint32 len_attr; + uint32 phys_addr; +} adma2_dscr_32b_t; + +/* ADMA1 Descriptor Table Entry */ +typedef struct adma1_dscr { + uint32 phys_addr_attr; +} adma1_dscr_t; + +#endif /* _SDIOH_H */ diff --git a/bcmdhd.101.10.361.x/include/sdiovar.h b/bcmdhd.101.10.361.x/include/sdiovar.h new file mode 100755 index 0000000..33c8825 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sdiovar.h @@ -0,0 +1,124 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +typedef struct sdreg_64 { + int func; + int offset; + uint64 value; +} sdreg_64_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ +#define SDH_COST_VAL 0x8000 /* Control Regs */ + +#define NUM_PREV_TRANSACTIONS 16 + +#ifdef BCMSPI +/* Error statistics for gSPI */ +struct spierrstats_t { + uint32 dna; /* The requested data is not available. */ + uint32 rdunderflow; /* FIFO underflow happened due to current (F2, F3) rd command */ + uint32 wroverflow; /* FIFO underflow happened due to current (F1, F2, F3) wr command */ + + uint32 f2interrupt; /* OR of all F2 related intr status bits. */ + uint32 f3interrupt; /* OR of all F3 related intr status bits. */ + + uint32 f2rxnotready; /* F2 FIFO is not ready to receive data (FIFO empty) */ + uint32 f3rxnotready; /* F3 FIFO is not ready to receive data (FIFO empty) */ + + uint32 hostcmddataerr; /* Error in command or host data, detected by CRC/checksum + * (optional) + */ + uint32 f2pktavailable; /* Packet is available in F2 TX FIFO */ + uint32 f3pktavailable; /* Packet is available in F2 TX FIFO */ + + uint32 dstatus[NUM_PREV_TRANSACTIONS]; /* dstatus bits of last 16 gSPI transactions */ + uint32 spicmd[NUM_PREV_TRANSACTIONS]; +}; +#endif /* BCMSPI */ + +typedef struct sdio_bus_metrics { + uint32 active_dur; /* msecs */ + + /* Generic */ + uint32 data_intr_cnt; /* data interrupt counter */ + uint32 mb_intr_cnt; /* mailbox interrupt counter */ + uint32 error_intr_cnt; /* error interrupt counter */ + uint32 wakehost_cnt; /* counter for OOB wakehost */ + + /* DS forcewake */ + uint32 ds_wake_on_cnt; /* counter for (clock) ON */ + uint32 ds_wake_on_dur; /* duration for (clock) ON) */ + uint32 ds_wake_off_cnt; /* counter for (clock) OFF */ + uint32 ds_wake_off_dur; /* duration for (clock) OFF */ + + /* DS_D0 state */ + uint32 ds_d0_cnt; /* counter for DS_D0 state */ + uint32 ds_d0_dur; /* duration for DS_D0 state */ + + /* DS_D3 state */ + uint32 ds_d3_cnt; /* counter for DS_D3 state */ + uint32 ds_d3_dur; /* duration for DS_D3 state */ + + /* DS DEV_WAKE */ + uint32 ds_dw_assrt_cnt; /* counter for DW_ASSERT */ + uint32 ds_dw_dassrt_cnt; /* counter for DW_DASSERT */ + + /* DS mailbox signals */ + uint32 ds_tx_dsreq_cnt; /* counter for tx HMB_DATA_DSREQ */ + uint32 ds_tx_dsexit_cnt; /* counter for tx HMB_DATA_DSEXIT */ + uint32 ds_tx_d3ack_cnt; /* counter for tx HMB_DATA_D3ACK */ + uint32 ds_tx_d3exit_cnt; /* counter for tx HMB_DATA_D3EXIT */ + uint32 ds_rx_dsack_cnt; /* counter for rx SMB_DATA_DSACK */ + uint32 ds_rx_dsnack_cnt; /* counter for rx SMB_DATA_DSNACK */ + uint32 ds_rx_d3inform_cnt; /* counter for rx SMB_DATA_D3INFORM */ +} sdio_bus_metrics_t; + +/* Bus interface info for SDIO */ +typedef struct wl_pwr_sdio_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SDIO */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + sdio_bus_metrics_t sdio; /* stats from SDIO bus driver */ +} wl_pwr_sdio_stats_t; + +#endif /* _sdiovar_h_ */ diff --git a/bcmdhd.101.10.361.x/include/sdspi.h b/bcmdhd.101.10.361.x/include/sdspi.h new file mode 100755 index 0000000..b030c69 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/sdspi.h @@ -0,0 +1,72 @@ +/* + * SD-SPI Protocol Standard + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _SD_SPI_H +#define _SD_SPI_H + +#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */ +#define SPI_START_S 31 +#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */ +#define SPI_DIR_S 30 +#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */ +#define SPI_CMD_INDEX_S 24 +#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */ +#define SPI_RW_S 23 +#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */ +#define SPI_FUNC_S 20 +#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */ +#define SPI_RAW_S 19 +#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */ +#define SPI_STUFF_S 18 +#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */ +#define SPI_BLKMODE_S 19 +#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */ +#define SPI_OPCODE_S 18 +#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */ +#define SPI_ADDR_S 1 +#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */ +#define SPI_STUFF0_S 0 + +#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */ +#define SPI_RSP_START_S 7 +#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */ +#define SPI_RSP_PARAM_ERR_S 6 +#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */ +#define SPI_RSP_RFU5_S 5 +#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */ +#define SPI_RSP_FUNC_ERR_S 4 +#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */ +#define SPI_RSP_CRC_ERR_S 3 +#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */ +#define SPI_RSP_ILL_CMD_S 2 +#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */ +#define SPI_RSP_RFU1_S 1 +#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */ +#define SPI_RSP_IDLE_S 0 + +/* SD-SPI Protocol Definitions */ +#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */ +#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */ +#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */ +#define SDSPI_START_BIT_MASK 0x80 + +#endif /* _SD_SPI_H */ diff --git a/bcmdhd.101.10.361.x/include/siutils.h b/bcmdhd.101.10.361.x/include/siutils.h new file mode 100755 index 0000000..0078bbd --- /dev/null +++ b/bcmdhd.101.10.361.x/include/siutils.h @@ -0,0 +1,1057 @@ +/* + * Misc utility routines for accessing the SOC Interconnects + * of Broadcom HNBU chips. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _siutils_h_ +#define _siutils_h_ + +#include + +/* Make the d11 core(s) selectable by the user config... */ +#ifndef D11_CORE_UNIT_MASK +/* By default we allow all d11 cores to be used */ +#define D11_CORE_UNIT_MASK 0xFFFFFFFFu +#endif + +/* Generic interrupt bit mask definitions */ +enum bcm_int_reg_idx { + BCM_INT_REG_IDX_0 = 0, + BCM_INT_REG_IDX_1 = 1, + /* temp work around to avoid > 50K invalidation on 4388a0-roml */ +#ifndef ROM_COMPAT_INT_REG_IDX + BCM_INT_REG_IDX_2 = 2, +#endif /* ROM_COMPAT_INT_REG_IDX */ + BCM_INT_REGS_NUM +}; + +typedef struct bcm_int_bitmask { + uint32 bits[BCM_INT_REGS_NUM]; +} bcm_int_bitmask_t; + +#ifndef ROM_COMPAT_INT_REG_IDX + +#define BCM_INT_BITMASK_IS_EQUAL(b, cmp) (\ + (b)->bits[BCM_INT_REG_IDX_0] == (cmp)->bits[BCM_INT_REG_IDX_0] && \ + (b)->bits[BCM_INT_REG_IDX_1] == (cmp)->bits[BCM_INT_REG_IDX_1] && \ + (b)->bits[BCM_INT_REG_IDX_2] == (cmp)->bits[BCM_INT_REG_IDX_2]) + +#define BCM_INT_BITMASK_IS_ZERO(b) (\ + (b)->bits[BCM_INT_REG_IDX_0] == 0 && \ + (b)->bits[BCM_INT_REG_IDX_1] == 0 && \ + (b)->bits[BCM_INT_REG_IDX_2] == 0) + +#define BCM_INT_BITMASK_SET(to, from) do { \ + (to)->bits[BCM_INT_REG_IDX_0] = (from)->bits[BCM_INT_REG_IDX_0]; \ + (to)->bits[BCM_INT_REG_IDX_1] = (from)->bits[BCM_INT_REG_IDX_1]; \ + (to)->bits[BCM_INT_REG_IDX_2] = (from)->bits[BCM_INT_REG_IDX_2]; \ +} while (0) +#define BCM_INT_BITMASK_OR(to, from) do { \ + (to)->bits[BCM_INT_REG_IDX_0] |= (from)->bits[BCM_INT_REG_IDX_0]; \ + (to)->bits[BCM_INT_REG_IDX_1] |= (from)->bits[BCM_INT_REG_IDX_1]; \ + (to)->bits[BCM_INT_REG_IDX_2] |= (from)->bits[BCM_INT_REG_IDX_2]; \ +} while (0) + +#define BCM_INT_BITMASK_AND(to, mask) do { \ + (to)->bits[BCM_INT_REG_IDX_0] &= (mask)->bits[BCM_INT_REG_IDX_0]; \ + (to)->bits[BCM_INT_REG_IDX_1] &= (mask)->bits[BCM_INT_REG_IDX_1]; \ + (to)->bits[BCM_INT_REG_IDX_2] &= (mask)->bits[BCM_INT_REG_IDX_2]; \ +} while (0) + +#else + +#define BCM_INT_BITMASK_IS_EQUAL(b, cmp) (\ + (b)->bits[BCM_INT_REG_IDX_0] == (cmp)->bits[BCM_INT_REG_IDX_0] && \ + (b)->bits[BCM_INT_REG_IDX_1] == (cmp)->bits[BCM_INT_REG_IDX_1]) \ + +#define BCM_INT_BITMASK_IS_ZERO(b) (\ + (b)->bits[BCM_INT_REG_IDX_0] == 0 && \ + (b)->bits[BCM_INT_REG_IDX_1] == 0) + +#define BCM_INT_BITMASK_SET(to, from) do { \ + (to)->bits[BCM_INT_REG_IDX_0] = (from)->bits[BCM_INT_REG_IDX_0]; \ + (to)->bits[BCM_INT_REG_IDX_1] = (from)->bits[BCM_INT_REG_IDX_1]; \ +} while (0) + +#define BCM_INT_BITMASK_OR(to, from) do { \ + (to)->bits[BCM_INT_REG_IDX_0] |= (from)->bits[BCM_INT_REG_IDX_0]; \ + (to)->bits[BCM_INT_REG_IDX_1] |= (from)->bits[BCM_INT_REG_IDX_1]; \ +} while (0) + +#define BCM_INT_BITMASK_AND(to, mask) do { \ + (to)->bits[BCM_INT_REG_IDX_0] &= (mask)->bits[BCM_INT_REG_IDX_0]; \ + (to)->bits[BCM_INT_REG_IDX_1] &= (mask)->bits[BCM_INT_REG_IDX_1]; \ +} while (0) + +#endif /* ROM_COMPAT_INT_REG_IDX */ + +#define WARM_BOOT 0xA0B0C0D0 + +typedef struct si_axi_error_info si_axi_error_info_t; + +#ifdef AXI_TIMEOUTS_NIC +#define SI_MAX_ERRLOG_SIZE 4 +typedef struct si_axi_error +{ + uint32 error; + uint32 coreid; + uint32 errlog_lo; + uint32 errlog_hi; + uint32 errlog_id; + uint32 errlog_flags; + uint32 errlog_status; +} si_axi_error_t; + +struct si_axi_error_info +{ + uint32 count; + si_axi_error_t axi_error[SI_MAX_ERRLOG_SIZE]; +}; +#endif /* AXI_TIMEOUTS_NIC */ + +/** + * Data structure to export all chip specific common variables + * public (read-only) portion of siutils handle returned by si_attach()/si_kattach() + */ +struct si_pub { + bool issim; /**< chip is in simulation or emulation */ + + uint16 socitype; /**< SOCI_SB, SOCI_AI */ + int16 socirev; /**< SOC interconnect rev */ + + uint16 bustype; /**< SI_BUS, PCI_BUS */ + uint16 buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID */ + int16 buscorerev; /**< buscore rev */ + uint16 buscoreidx; /**< buscore index */ + + int16 ccrev; /**< chip common core rev */ + uint32 cccaps; /**< chip common capabilities */ + uint32 cccaps_ext; /**< chip common capabilities extension */ + int16 pmurev; /**< pmu core rev */ + uint32 pmucaps; /**< pmu capabilities */ + + uint32 boardtype; /**< board type */ + uint32 boardrev; /* board rev */ + uint32 boardvendor; /**< board vendor */ + uint32 boardflags; /**< board flags */ + uint32 boardflags2; /**< board flags2 */ + uint32 boardflags4; /**< board flags4 */ + + uint32 chip; /**< chip number */ + uint16 chiprev; /**< chip revision */ + uint16 chippkg; /**< chip package option */ + uint32 chipst; /**< chip status */ + + int16 gcirev; /**< gci core rev */ + int16 lhlrev; /**< gci core rev */ + + uint32 lpflags; /**< low power flags */ + uint32 enum_base; /**< backplane address where the chipcommon core resides */ + bool _multibp_enable; + bool rffe_debug_mode; + bool rffe_elnabyp_mode; + + si_axi_error_info_t * err_info; +}; + +/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver + * for monolithic driver, it is readonly to prevent accident change + */ +typedef struct si_pub si_t; + +/* + * Many of the routines below take an 'sih' handle as their first arg. + * Allocate this by calling si_attach(). Free it by calling si_detach(). + * At any one time, the sih is logically focused on one particular si core + * (the "current core"). + * Use si_setcore() or si_setcoreidx() to change the association to another core. + */ +#define SI_OSH NULL /**< Use for si_kattach when no osh is available */ + +#ifndef SOCI_NCI_BUS +#define BADIDX (SI_MAXCORES + 1) +#else +#define BADIDX (0xffffu) /* MAXCORES will be dynamically calculated for NCI. */ +#endif /* SOCI_NCI_BUS */ + +/* clkctl xtal what flags */ +#define XTAL 0x1 /**< primary crystal oscillator (2050) */ +#define PLL 0x2 /**< main chip pll */ + +/* clkctl clk mode */ +#define CLK_FAST 0 /**< force fast (pll) clock */ +#define CLK_DYNAMIC 2 /**< enable dynamic clock control */ + +/* GPIO usage priorities */ +#define GPIO_DRV_PRIORITY 0 /**< Driver */ +#define GPIO_APP_PRIORITY 1 /**< Application */ +#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */ + +/* GPIO pull up/down */ +#define GPIO_PULLUP 0 +#define GPIO_PULLDN 1 + +/* GPIO event regtype */ +#define GPIO_REGEVT 0 /**< GPIO register event */ +#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */ +#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */ + +/* device path */ +#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */ + +/* SI routine enumeration: to be used by update function with multiple hooks */ +#define SI_DOATTACH 1 +#define SI_PCIDOWN 2 /**< wireless interface is down */ +#define SI_PCIUP 3 /**< wireless interface is up */ + +#ifdef SR_DEBUG +#define PMU_RES 31 +#endif /* SR_DEBUG */ + +/* "access" param defines for si_seci_access() below */ +#define SECI_ACCESS_STATUSMASK_SET 0 +#define SECI_ACCESS_INTRS 1 +#define SECI_ACCESS_UART_CTS 2 +#define SECI_ACCESS_UART_RTS 3 +#define SECI_ACCESS_UART_RXEMPTY 4 +#define SECI_ACCESS_UART_GETC 5 +#define SECI_ACCESS_UART_TXFULL 6 +#define SECI_ACCESS_UART_PUTC 7 +#define SECI_ACCESS_STATUSMASK_GET 8 + +#if defined(BCMQT) +#define ISSIM_ENAB(sih) TRUE +#else /* !defined(BCMQT) */ +#define ISSIM_ENAB(sih) FALSE +#endif /* defined(BCMQT) */ + +#if defined(ATE_BUILD) +#define ATE_BLD_ENAB(sih) TRUE +#else +#define ATE_BLD_ENAB(sih) FALSE +#endif + +#define INVALID_ADDR (0xFFFFFFFFu) + +/* PMU clock/power control */ +#if defined(BCMPMUCTL) +#define PMUCTL_ENAB(sih) (BCMPMUCTL) +#else +#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) +#endif + +#if defined(BCMAOBENAB) +#define AOB_ENAB(sih) (BCMAOBENAB) +#else +#define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \ + ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0) +#endif /* BCMAOBENAB */ + +/* chipcommon clock/power control (exclusive with PMU's) */ +#if defined(BCMPMUCTL) && BCMPMUCTL +#define CCCTL_ENAB(sih) (0) +#define CCPLL_ENAB(sih) (0) +#else +#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) +#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) +#endif + +typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); + +typedef void (*wci2_handler_t)(void *ctx, char *buf, int len); + +/* External BT Coex enable mask */ +#define CC_BTCOEX_EN_MASK 0x01 +/* External PA enable mask */ +#define GPIO_CTRL_EPA_EN_MASK 0x40 +/* WL/BT control enable mask */ +#define GPIO_CTRL_5_6_EN_MASK 0x60 +#define GPIO_CTRL_7_6_EN_MASK 0xC0 +#define GPIO_OUT_7_EN_MASK 0x80 + +#define UCODE_WAKE_STATUS_BIT 1 + +#if defined(BCMDONGLEHOST) + +/* CR4 specific defines used by the host driver */ +#define SI_CR4_CAP (0x04) +#define SI_CR4_BANKIDX (0x40) +#define SI_CR4_BANKINFO (0x44) +#define SI_CR4_BANKPDA (0x4C) + +#define ARMCR4_TCBBNB_MASK 0xf0 +#define ARMCR4_TCBBNB_SHIFT 4 +#define ARMCR4_TCBANB_MASK 0xf +#define ARMCR4_TCBANB_SHIFT 0 + +#define SICF_CPUHALT (0x0020) +#define ARMCR4_BSZ_MASK 0x7f +#define ARMCR4_BUNITSZ_MASK 0x200 +#define ARMCR4_BSZ_8K 8192 +#define ARMCR4_BSZ_1K 1024 +#endif /* BCMDONGLEHOST */ +#define SI_BPIND_1BYTE 0x1 +#define SI_BPIND_2BYTE 0x3 +#define SI_BPIND_4BYTE 0xF + +#define GET_GCI_OFFSET(sih, gci_reg) \ + (AOB_ENAB(sih)? OFFSETOF(gciregs_t, gci_reg) : OFFSETOF(chipcregs_t, gci_reg)) + +#define GET_GCI_CORE(sih) \ + (AOB_ENAB(sih)? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX) + +#define VARBUF_PRIO_INVALID 0u +#define VARBUF_PRIO_NVRAM 1u +#define VARBUF_PRIO_SROM 2u +#define VARBUF_PRIO_OTP 3u +#define VARBUF_PRIO_SH_SFLASH 4u + +#define BT_IN_RESET_BIT_SHIFT 19u +#define BT_IN_PDS_BIT_SHIFT 10u + +/* === exported functions === */ +extern si_t *si_attach(uint pcidev, osl_t *osh, volatile void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *si_kattach(osl_t *osh); +extern void si_detach(si_t *sih); +extern volatile void *si_d11_switch_addrbase(si_t *sih, uint coreunit); +extern uint si_corelist(const si_t *sih, uint coreid[]); +extern uint si_coreid(const si_t *sih); +extern uint si_flag(si_t *sih); +extern uint si_flag_alt(const si_t *sih); +extern uint si_intflag(si_t *sih); +extern uint si_coreidx(const si_t *sih); +extern uint si_get_num_cores(const si_t *sih); +extern uint si_coreunit(const si_t *sih); +extern uint si_corevendor(const si_t *sih); +extern uint si_corerev(const si_t *sih); +extern uint si_corerev_minor(const si_t *sih); +extern void *si_osh(si_t *sih); +extern void si_setosh(si_t *sih, osl_t *osh); +extern int si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read); + +/* precommit failed when this is removed */ +/* BLAZAR_BRANCH_101_10_DHD_002/build/dhd/linux-fc30/brix-brcm */ +/* TBD: Revisit later */ +#ifdef BCMINTERNAL +extern int si_backplane_access_64(si_t *sih, uint addr, uint size, + uint64 *val, bool read); +#endif /* BCMINTERNAL */ + +extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val); +extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern volatile void *si_coreregs(const si_t *sih); +extern uint si_wrapperreg(const si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val); +extern void *si_wrapperregs(const si_t *sih); +extern uint32 si_core_cflags(const si_t *sih, uint32 mask, uint32 val); +extern void si_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val); +extern uint32 si_core_sflags(const si_t *sih, uint32 mask, uint32 val); +extern void si_commit(si_t *sih); +extern bool si_iscoreup(const si_t *sih); +extern uint si_numcoreunits(const si_t *sih, uint coreid); +extern uint si_numd11coreunits(const si_t *sih); +extern uint si_findcoreidx(const si_t *sih, uint coreid, uint coreunit); +extern uint si_findcoreid(const si_t *sih, uint coreidx); +extern volatile void *si_setcoreidx(si_t *sih, uint coreidx); +extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern uint32 si_oobr_baseaddr(const si_t *sih, bool second); +#if !defined(BCMDONGLEHOST) +extern uint si_corereg_ifup(si_t *sih, uint core_id, uint regoff, uint mask, uint val); +extern void si_lowpwr_opt(si_t *sih); +#endif /* !defined(BCMDONGLEHOST */ +extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, + bcm_int_bitmask_t *intr_val); +extern void si_restore_core(si_t *sih, uint coreid, bcm_int_bitmask_t *intr_val); +#ifdef USE_NEW_COREREV_API +extern uint si_corerev_ext(si_t *sih, uint coreid, uint coreunit); +#else +uint si_get_corerev(si_t *sih, uint core_id); +#endif +extern int si_numaddrspaces(const si_t *sih); +extern uint32 si_addrspace(const si_t *sih, uint spidx, uint baidx); +extern uint32 si_addrspacesize(const si_t *sih, uint spidx, uint baidx); +extern void si_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern int si_corebist(const si_t *sih); +extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void si_core_disable(const si_t *sih, uint32 bits); +extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); +extern uint si_chip_hostif(const si_t *sih); +extern uint32 si_clock(si_t *sih); +extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */ +extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */ +extern void si_pci_setup(si_t *sih, uint coremask); +extern int si_pcie_setup(si_t *sih, uint coreidx); +extern void si_setint(const si_t *sih, int siflag); +extern bool si_backplane64(const si_t *sih); +extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void si_deregister_intr_callback(si_t *sih); +extern void si_clkctl_init(si_t *sih); +extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih); +extern bool si_clkctl_cc(si_t *sih, uint mode); +extern int si_clkctl_xtal(si_t *sih, uint what, bool on); +extern void si_btcgpiowar(si_t *sih); +extern bool si_deviceremoved(const si_t *sih); +extern void si_set_device_removed(si_t *sih, bool status); +extern uint32 si_sysmem_size(si_t *sih); +extern uint32 si_socram_size(si_t *sih); +extern uint32 si_socram_srmem_size(si_t *sih); +extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda); +extern bool si_is_bus_mpu_present(si_t *sih); + +extern void si_watchdog(si_t *sih, uint ticks); +extern void si_watchdog_ms(si_t *sih, uint32 ms); +extern uint32 si_watchdog_msticks(void); +extern volatile void *si_gpiosetcore(si_t *sih); +extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioin(si_t *sih); +extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_gpioreserve(const si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiorelease(const si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val); +extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val); +extern uint32 si_gpio_int_enable(si_t *sih, bool enable); +extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode); +extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value); +extern uint8 si_gci_host_wake_gpio_init(si_t *sih); +extern uint8 si_gci_time_sync_gpio_init(si_t *sih); +extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state); +extern void si_gci_time_sync_gpio_enable(si_t *sih, uint8 gpio, bool state); +extern void si_gci_host_wake_gpio_tristate(si_t *sih, uint8 gpio, bool state); +extern int si_gpio_enable(si_t *sih, uint32 mask); + +extern void si_invalidate_second_bar0win(si_t *sih); + +extern void si_gci_shif_config_wake_pin(si_t *sih, uint8 gpio_n, + uint8 wake_events, bool gci_gpio); +extern void si_shif_int_enable(si_t *sih, uint8 gpio_n, uint8 wake_events, bool enable); + +/* GCI interrupt handlers */ +extern void si_gci_handler_process(si_t *sih); + +extern void si_enable_gpio_wake(si_t *sih, uint8 *wake_mask, uint8 *cur_status, uint8 gci_gpio, + uint32 pmu_cc2_mask, uint32 pmu_cc2_value); + +/* GCI GPIO event handlers */ +extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts, + gci_gpio_handler_t cb, void *arg); +extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i); + +extern void si_gci_gpio_chipcontrol_ex(si_t *si, uint8 gpoi, uint8 opt); +extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value); +extern void si_gci_config_wake_pin(si_t *sih, uint8 gpio_n, uint8 wake_events, + bool gci_gpio); +extern void si_gci_free_wake_pin(si_t *sih, uint8 gpio_n); +#if !defined(BCMDONGLEHOST) +extern uint8 si_gci_gpio_wakemask(si_t *sih, uint8 gpio, uint8 mask, uint8 value); +extern uint8 si_gci_gpio_intmask(si_t *sih, uint8 gpio, uint8 mask, uint8 value); +#endif /* !defined(BCMDONGLEHOST) */ + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool si_pci_pmestat(const si_t *sih); +extern void si_pci_pmeclr(const si_t *sih); +extern void si_pci_pmeen(const si_t *sih); +extern void si_pci_pmestatclr(const si_t *sih); +extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); +extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val); + +#ifdef BCMSDIO +extern void si_sdio_init(si_t *sih); +#endif + +extern uint16 si_d11_devid(si_t *sih); +extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, + uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); + +extern uint32 si_seci_access(si_t *sih, uint32 val, int access); +extern volatile void* si_seci_init(si_t *sih, uint8 seci_mode); +extern void si_seci_clk_force(si_t *sih, bool val); +extern bool si_seci_clk_force_status(si_t *sih); + +#if (defined(BCMECICOEX) && !defined(BCMDONGLEHOST)) +extern bool si_eci(const si_t *sih); +extern int si_eci_init(si_t *sih); +extern void si_eci_notify_bt(si_t *sih, uint32 mask, uint32 val, bool interrupt); +extern bool si_seci(const si_t *sih); +extern void* si_gci_init(si_t *sih); +extern void si_seci_down(si_t *sih); +extern void si_seci_upd(si_t *sih, bool enable); +extern bool si_gci(const si_t *sih); +extern bool si_sraon(const si_t *sih); +#else +#define si_eci(sih) 0 +#define si_eci_init(sih) 0 +#define si_eci_notify_bt(sih, type, val) (0) +#define si_seci(sih) 0 +#define si_seci_upd(sih, a) do {} while (0) +#define si_gci_init(sih) NULL +#define si_seci_down(sih) do {} while (0) +#define si_gci(sih) 0 +#define si_sraon(sih) 0 +#endif /* BCMECICOEX */ + +/* OTP status */ +extern bool si_is_otp_disabled(const si_t *sih); +extern bool si_is_otp_powered(si_t *sih); +extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask); + +/* SPROM availability */ +extern bool si_is_sprom_available(si_t *sih); +#ifdef SI_SPROM_PROBE +extern void si_sprom_init(si_t *sih); +#endif /* SI_SPROM_PROBE */ + +/* SFlash availability */ +bool si_is_sflash_available(const si_t *sih); + +/* OTP/SROM CIS stuff */ +extern int si_cis_source(const si_t *sih); +#define CIS_DEFAULT 0 +#define CIS_SROM 1 +#define CIS_OTP 2 + +/* Fab-id information */ +#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */ +#define CSM_FAB7 0x1 /**< CSM Fab7 chip */ +#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */ +#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */ + +/* bp_ind_access default timeout */ +#define BP_ACCESS_TO (500u * 1000u) + +extern uint16 BCMATTACHFN(si_fabid)(si_t *sih); +extern uint16 BCMINITFN(si_chipid)(const si_t *sih); + +/* + * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. + * The returned path is NULL terminated and has trailing '/'. + * Return 0 on success, nonzero otherwise. + */ +extern int si_devpath(const si_t *sih, char *path, int size); +extern int si_devpath_pcie(const si_t *sih, char *path, int size); +/* Read variable with prepending the devpath to the name */ +extern char *si_getdevpathvar(const si_t *sih, const char *name); +extern int si_getdevpathintvar(const si_t *sih, const char *name); +extern char *si_coded_devpathvar(const si_t *sih, char *varname, int var_len, const char *name); + +/* === HW PR WARs === */ +extern uint8 si_pcieclkreq(const si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcielcreg(const si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieltrenable(const si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieobffenable(const si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltr_reg(const si_t *sih, uint32 reg, uint32 mask, uint32 val); +extern uint32 si_pcieltrspacing_reg(const si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltrhysteresiscnt_reg(const si_t *sih, uint32 mask, uint32 val); +extern void si_pcie_set_error_injection(const si_t *sih, uint32 mode); +extern void si_pcie_set_L1substate(const si_t *sih, uint32 substate); +#ifndef BCM_BOOTLOADER +extern uint32 si_pcie_get_L1substate(const si_t *sih); +#endif /* BCM_BOOTLOADER */ +extern void si_pci_down(const si_t *sih); +extern void si_pci_up(const si_t *sih); +extern void si_pci_sleep(const si_t *sih); +extern void si_pcie_war_ovr_update(const si_t *sih, uint8 aspm); +extern void si_pcie_power_save_enable(const si_t *sih, bool enable); +extern int si_pci_fixcfg(si_t *sih); +extern bool si_is_warmboot(void); + +extern void si_chipcontrl_restore(si_t *sih, uint32 val); +extern uint32 si_chipcontrl_read(si_t *sih); +extern void si_chipcontrl_srom4360(si_t *sih, bool on); +extern void si_srom_clk_set(si_t *sih); /**< for chips with fast BP clock */ +extern void si_btc_enable_chipcontrol(si_t *sih); +extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag); +/* === debug routines === */ + +extern bool si_taclear(si_t *sih, bool details); + +#ifdef BCMDBG +extern void si_view(si_t *sih, bool verbose); +extern void si_viewall(si_t *sih, bool verbose); +#endif /* BCMDBG */ +#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP) || \ + defined(WLTEST) +struct bcmstrbuf; +extern int si_dump_pcieinfo(const si_t *sih, struct bcmstrbuf *b); +extern void si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b); +extern int si_dump_pcieregs(const si_t *sih, struct bcmstrbuf *b); +#endif /* BCMDBG || BCMDBG_DUMP || WLTEST */ + +#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP) +extern void si_dump(const si_t *sih, struct bcmstrbuf *b); +extern void si_ccreg_dump(si_t *sih, struct bcmstrbuf *b); +extern void si_clkctl_dump(si_t *sih, struct bcmstrbuf *b); +extern int si_gpiodump(si_t *sih, struct bcmstrbuf *b); + +extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */ + +extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint32 si_pciereg(const si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type); +extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low, + int32* data, bool read, uint32 us_timeout); +extern void sih_write_sraon(si_t *sih, int offset, int len, const uint32* data); +#ifdef SR_DEBUG +extern void si_dump_pmu(si_t *sih, void *pmu_var); +extern void si_pmu_keep_on(const si_t *sih, int32 int_val); +extern uint32 si_pmu_keep_on_get(const si_t *sih); +extern uint32 si_power_island_set(si_t *sih, uint32 int_val); +extern uint32 si_power_island_get(si_t *sih); +#endif /* SR_DEBUG */ + +extern uint32 si_pcieserdesreg(const si_t *sih, uint32 mdioslave, uint32 offset, + uint32 mask, uint32 val); +extern void si_pcie_set_request_size(const si_t *sih, uint16 size); +extern uint16 si_pcie_get_request_size(const si_t *sih); +extern void si_pcie_set_maxpayload_size(const si_t *sih, uint16 size); +extern uint16 si_pcie_get_maxpayload_size(const si_t *sih); +extern uint16 si_pcie_get_ssid(const si_t *sih); +extern uint32 si_pcie_get_bar0(const si_t *sih); +extern int si_pcie_configspace_cache(const si_t *sih); +extern int si_pcie_configspace_restore(const si_t *sih); +extern int si_pcie_configspace_get(const si_t *sih, uint8 *buf, uint size); + +#ifndef BCMDONGLEHOST +extern void si_muxenab(si_t *sih, uint32 w); +extern uint32 si_clear_backplane_to(si_t *sih); +extern void si_slave_wrapper_add(si_t *sih); + +#ifdef AXI_TIMEOUTS_NIC +extern uint32 si_clear_backplane_to_fast(void *sih, void *addr); +#endif /* AXI_TIMEOUTS_NIC */ + +#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC) +extern uint32 si_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap); +#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */ +#endif /* !BCMDONGLEHOST */ + +extern uint32 si_findcoreidx_by_axiid(const si_t *sih, uint32 axiid); +extern void si_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core, + uint32 *lo, uint32 *hi, uint32 *id); +extern uint32 si_get_axi_timeout_reg(const si_t *sih); + +#ifdef AXI_TIMEOUTS_NIC +extern const si_axi_error_info_t * si_get_axi_errlog_info(const si_t *sih); +extern void si_reset_axi_errlog_info(const si_t * sih); +#endif /* AXI_TIMEOUTS_NIC */ + +extern void si_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout, uint32 cid); + +#if defined(BCMDONGLEHOST) +extern uint32 si_tcm_size(si_t *sih); +extern bool si_has_flops(si_t *sih); +#endif /* BCMDONGLEHOST */ + +extern int si_set_sromctl(si_t *sih, uint32 value); +extern uint32 si_get_sromctl(si_t *sih); + +extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_input(si_t *sih, uint reg); +extern uint32 si_gci_int_enable(si_t *sih, bool enable); +extern void si_gci_reset(si_t *sih); +#ifdef BCMLTECOEX +extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +#endif /* BCMLTECOEX */ +#if defined(BCMLTECOEX) && !defined(WLTEST) +extern int si_wci2_rxfifo_handler_register(si_t *sih, wci2_handler_t rx_cb, void *ctx); +extern void si_wci2_rxfifo_handler_unregister(si_t *sih); +#endif /* BCMLTECOEX && !WLTEST */ +extern void si_gci_seci_init(si_t *sih); +extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio, uint32 xtalfreq); + +extern bool si_btcx_wci2_init(si_t *sih); + +extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel); +extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin); +extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel); +extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos); +extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_chipstatus(si_t *sih, uint reg); +extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status); +extern uint8 si_get_device_wake_opt(si_t *sih); +extern void si_swdenable(si_t *sih, uint32 swdflag); +extern uint8 si_enable_perst_wake(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status); + +extern uint32 si_get_pmu_reg_addr(si_t *sih, uint32 offset); +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c + +void si_update_masks(si_t *sih); +void si_force_islanding(si_t *sih, bool enable); +extern uint32 si_pmu_res_req_timer_clr(si_t *sih); +extern void si_pmu_rfldo(si_t *sih, bool on); +extern void si_pcie_ltr_war(const si_t *sih); +extern void si_pcie_hw_LTR_war(const si_t *sih); +extern void si_pcie_hw_L1SS_war(const si_t *sih); +extern void si_pciedev_crwlpciegen2(const si_t *sih); +extern void si_pcie_prep_D3(const si_t *sih, bool enter_D3); +extern void si_pciedev_reg_pm_clk_period(const si_t *sih); +extern void si_pcie_disable_oobselltr(const si_t *sih); +extern uint32 si_raw_reg(const si_t *sih, uint32 reg, uint32 val, uint32 wrire_req); + +/* Macro to enable clock gating changes in different cores */ +#define MEM_CLK_GATE_BIT 5 +#define GCI_CLK_GATE_BIT 18 + +#define USBAPP_CLK_BIT 0 +#define PCIE_CLK_BIT 3 +#define ARMCR4_DBG_CLK_BIT 4 +#define SAMPLE_SYNC_CLK_BIT 17 +#define PCIE_TL_CLK_BIT 18 +#define HQ_REQ_BIT 24 +#define PLL_DIV2_BIT_START 9 +#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START) +#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START) + +#define pmu_corereg(si, cc_idx, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val)) + +#define PMU_REG(si, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val)) + +/* Used only for the regs present in the pmu core and not present in the old cc core */ +#define PMU_REG_NEW(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val) + +#define GCI_REG(si, offset, mask, val) \ + (AOB_ENAB(si) ? \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + offset, mask, val): \ + si_corereg(si, SI_CC_IDX, offset, mask, val)) + +/* Used only for the regs present in the gci core and not present in the old cc core */ +#define GCI_REG_NEW(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + OFFSETOF(gciregs_t, member), mask, val) + +#define LHL_REG(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + OFFSETOF(gciregs_t, member), mask, val) + +#define CHIPC_REG(si, member, mask, val) \ + si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val) + +/* GCI Macros */ +#define ALLONES_32 0xFFFFFFFF +#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */ +#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */ +#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */ +#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */ +#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */ +#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */ +#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */ +#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */ +#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */ +#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */ +#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */ +#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */ +#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */ +#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */ +#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */ +#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */ +#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */ +#define GCI_CCTL_SCS 25 /* SeciClkStretch */ + +#define GCI_MODE_UART 0x0 +#define GCI_MODE_SECI 0x1 +#define GCI_MODE_BTSIG 0x2 +#define GCI_MODE_GPIO 0x3 +#define GCI_MODE_MASK 0x7 + +#define GCI_CCTL_LOWTOUT_DIS 0x0 +#define GCI_CCTL_LOWTOUT_10BIT 0x1 +#define GCI_CCTL_LOWTOUT_20BIT 0x2 +#define GCI_CCTL_LOWTOUT_30BIT 0x3 +#define GCI_CCTL_LOWTOUT_MASK 0x3 + +#define GCI_CCTL_SCS_DEF 0x19 +#define GCI_CCTL_SCS_MASK 0xFF + +#define GCI_SECIIN_MODE_OFFSET 0 +#define GCI_SECIIN_GCIGPIO_OFFSET 4 +#define GCI_SECIIN_RXID2IP_OFFSET 8 + +#define GCI_SECIIN_MODE_MASK 0x7 +#define GCI_SECIIN_GCIGPIO_MASK 0xF + +#define GCI_SECIOUT_MODE_OFFSET 0 +#define GCI_SECIOUT_GCIGPIO_OFFSET 4 +#define GCI_SECIOUT_LOOPBACK_OFFSET 8 +#define GCI_SECIOUT_SECIINRELATED_OFFSET 16 + +#define GCI_SECIOUT_MODE_MASK 0x7 +#define GCI_SECIOUT_GCIGPIO_MASK 0xF +#define GCI_SECIOUT_SECIINRELATED_MASK 0x1 + +#define GCI_SECIOUT_SECIINRELATED 0x1 + +#define GCI_SECIAUX_RXENABLE_OFFSET 0 +#define GCI_SECIFIFO_RXENABLE_OFFSET 16 + +#define GCI_SECITX_ENABLE_OFFSET 0 + +#define GCI_GPIOCTL_INEN_OFFSET 0 +#define GCI_GPIOCTL_OUTEN_OFFSET 1 +#define GCI_GPIOCTL_PDN_OFFSET 4 + +#define GCI_GPIOIDX_OFFSET 16 + +#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */ +#define GCI_LTECX_TXCONF_EN_OFFSET 2 +#define GCI_LTECX_PRISEL_EN_OFFSET 3 + +/* To access per GCI bit registers */ +#define GCI_REG_WIDTH 32 + +/* number of event summary bits */ +#define GCI_EVENT_NUM_BITS 32 + +/* gci event bits per core */ +#define GCI_EVENT_BITS_PER_CORE 4 +#define GCI_EVENT_HWBIT_1 1 +#define GCI_EVENT_HWBIT_2 2 +#define GCI_EVENT_SWBIT_1 3 +#define GCI_EVENT_SWBIT_2 4 + +#define GCI_MBDATA_TOWLAN_POS 96 +#define GCI_MBACK_TOWLAN_POS 104 +#define GCI_WAKE_TOWLAN_PO 112 +#define GCI_SWREADY_POS 120 + +/* GCI bit positions */ +/* GCI [127:000] = WLAN [127:0] */ +#define GCI_WLAN_IP_ID 0 +#define GCI_WLAN_BEGIN 0 +#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4) +#define GCI_WLAN_PERST_POS (GCI_WLAN_BEGIN + 15) + +/* GCI [255:128] = BT [127:0] */ +#define GCI_BT_IP_ID 1 +#define GCI_BT_BEGIN 128 +#define GCI_BT_MBDATA_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBDATA_TOWLAN_POS) +#define GCI_BT_MBACK_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBACK_TOWLAN_POS) +#define GCI_BT_WAKE_TOWLAN_POS (GCI_BT_BEGIN + GCI_WAKE_TOWLAN_PO) +#define GCI_BT_SWREADY_POS (GCI_BT_BEGIN + GCI_SWREADY_POS) + +/* GCI [639:512] = LTE [127:0] */ +#define GCI_LTE_IP_ID 4 +#define GCI_LTE_BEGIN 512 +#define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0) +#define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1) +#define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2) +#define GCI_LTE_WCI2TYPE_POS (GCI_LTE_BEGIN + 48) +#define GCI_LTE_WCI2TYPE_MASK 7 +#define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56) + +/* Reg Index corresponding to ECI bit no x of ECI space */ +#define GCI_REGIDX(x) ((x)/GCI_REG_WIDTH) +/* Bit offset of ECI bit no x in 32-bit words */ +#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH) + +#define GCI_ECI_HW0(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 0) +#define GCI_ECI_HW1(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 1) +#define GCI_ECI_SW0(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 2) +#define GCI_ECI_SW1(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 3) + +/* BT SMEM Control Register 0 */ +#define GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL (1 << 28) + +/* GCI RXFIFO Common control */ +#define GCI_RXFIFO_CTRL_AUX_EN 0xFF +#define GCI_RXFIFO_CTRL_FIFO_EN 0xFF00 +#define GCI_RXFIFO_CTRL_FIFO_TYPE2_EN 0x400 + +/* End - GCI Macros */ + +extern void si_pll_sr_reinit(si_t *sih); +extern void si_pll_closeloop(si_t *sih); +extern uint si_num_slaveports(const si_t *sih, uint coreid); +extern uint32 si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx, + uint core_id, uint coreunit); +extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint spidx, + uint baidx, uint coreunit); +void si_introff(const si_t *sih, bcm_int_bitmask_t *intr_val); +void si_intrrestore(const si_t *sih, bcm_int_bitmask_t *intr_val); +bool si_get_nvram_rfldo3p3_war(const si_t *sih); +void si_nvram_res_masks(const si_t *sih, uint32 *min_mask, uint32 *max_mask); +extern uint32 si_xtalfreq(const si_t *sih); +extern uint8 si_getspurmode(const si_t *sih); +extern uint32 si_get_openloop_dco_code(const si_t *sih); +extern void si_set_openloop_dco_code(si_t *sih, uint32 openloop_dco_code); +extern uint32 si_wrapper_dump_buf_size(const si_t *sih); +extern uint32 si_wrapper_dump_binary(const si_t *sih, uchar *p); +extern uint32 si_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, uint32 *core, + uint32 *ba, uchar *p); + +/* SR Power Control */ +extern uint32 si_srpwr_request(const si_t *sih, uint32 mask, uint32 val); +extern uint32 si_srpwr_request_on_rev80(si_t *sih, uint32 mask, uint32 val, + uint32 ucode_awake); +extern uint32 si_srpwr_stat_spinwait(const si_t *sih, uint32 mask, uint32 val); +extern uint32 si_srpwr_stat(si_t *sih); +extern uint32 si_srpwr_domain(si_t *sih); +extern uint32 si_srpwr_domain_all_mask(const si_t *sih); +extern uint8 si_srpwr_domain_wl(si_t *sih); +extern uint32 si_srpwr_bt_status(si_t *sih); +/* SR Power Control */ +bool si_srpwr_cap(si_t *sih); +#define SRPWR_CAP(sih) (si_srpwr_cap(sih)) + +#ifdef BCMSRPWR + extern bool _bcmsrpwr; +#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define SRPWR_ENAB() (_bcmsrpwr) +#elif defined(BCMSRPWR_DISABLED) + #define SRPWR_ENAB() (0) +#else + #define SRPWR_ENAB() (1) +#endif +#else + #define SRPWR_ENAB() (0) +#endif /* BCMSRPWR */ + +/* + * Multi-BackPlane architecture. Each can power up/down independently. + * Common backplane: shared between BT and WL + * ChipC, PCIe, GCI, PMU, SRs + * HW powers up as needed + * WL BackPlane (WLBP): + * ARM, TCM, Main, Aux + * Host needs to power up + */ +#define MULTIBP_CAP(sih) (BCM4378_CHIP(sih->chip) || \ + BCM4387_CHIP(sih->chip) || BCM4388_CHIP(sih->chip) || \ + BCM4389_CHIP(sih->chip) || BCM4385_CHIP(sih->chip) || \ + BCM4376_CHIP(sih->chip) || BCM4397_CHIP(sih->chip)) +#define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable) + +#ifdef DONGLEBUILD +extern bool si_check_enable_backplane_log(const si_t *sih); +#endif /* DONGLEBUILD */ + +uint32 si_enum_base(uint devid); + +/* Default ARM PLL freq 4369/4368 */ +#define ARMPLL_FREQ_400MHZ (400u) +#define ARMPLL_FREQ_800MHZ (800u) +/* ARM PLL freq computed using chip defaults is 1002.8235 Mhz */ +#define ARMPLL_FREQ_1000MHZ (1003u) + +extern uint8 si_lhl_ps_mode(const si_t *sih); +extern uint32 si_get_armpllclkfreq(const si_t *sih); +uint8 si_get_ccidiv(const si_t *sih); +extern uint8 si_hib_ext_wakeup_isenab(const si_t *sih); + +#ifdef UART_TRAP_DBG +void si_dump_APB_Bridge_registers(const si_t *sih); +#endif /* UART_TRAP_DBG */ +void si_force_clocks(const si_t *sih, uint clock_state); + +#if defined(BCMSDIODEV_ENABLED) && defined(ATE_BUILD) +bool si_chipcap_sdio_ate_only(const si_t *sih); +#endif /* BCMSDIODEV_ENABLED && ATE_BUILD */ + +/* indicates to the siutils how the PICe BAR0 is mappend. + * here is the current scheme, which are all using BAR0: + * id enum wrapper + * ==== ========= ========= + * 0 0000-0FFF 1000-1FFF + * 1 4000-4FFF 5000-5FFF + * 2 9000-9FFF A000-AFFF + * >= 3 not supported + */ +void si_set_slice_id(si_t *sih, uint8 slice); +uint8 si_get_slice_id(const si_t *sih); + +/* query the d11 core type */ +#define D11_CORE_TYPE_NORM 0u +#define D11_CORE_TYPE_SCAN 1u +uint si_core_d11_type(si_t *sih, uint coreunit); + +/* check if the package option allows the d11 core */ +bool si_pkgopt_d11_allowed(si_t *sih, uint coreuint); + +/* return if scan core is present */ +bool si_scan_core_present(const si_t *sih); +void si_configure_pwrthrottle_gpio(si_t *sih, uint8 pwrthrottle_gpio_pin); +void si_configure_onbody_gpio(si_t *sih, uint8 onbody_gpio_pin); + +/* check if HWA core present */ +bool si_hwa_present(const si_t *sih); + +/* check if SYSMEM present */ +bool si_sysmem_present(const si_t *sih); + +/* return BT state */ +bool si_btc_bt_status_in_reset(si_t *sih); +bool si_btc_bt_status_in_pds(si_t *sih); +int si_btc_bt_pds_wakeup_force(si_t *sih, bool force); + +/* RFFE RFEM Functions */ +#ifndef BCMDONGLEHOST +void si_rffe_rfem_init(si_t *sih); +void si_rffe_set_debug_mode(si_t *sih, bool enable); +bool si_rffe_get_debug_mode(si_t *sih); +int si_rffe_set_elnabyp_mode(si_t *sih, uint8 mode); +int8 si_rffe_get_elnabyp_mode(si_t *sih); +int si_rffe_rfem_read(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr, uint32 *val); +int si_rffe_rfem_write(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr, uint32 data); +#endif /* !BCMDONGLEHOST */ +extern void si_jtag_udr_pwrsw_main_toggle(si_t *sih, bool on); +extern int si_pmu_res_state_pwrsw_main_wait(si_t *sih); +extern uint32 si_d11_core_sssr_addr(si_t *sih, uint unit, uint32 *sssr_dmp_sz); + +#ifdef USE_LHL_TIMER +/* Get current HIB time API */ +uint32 si_cur_hib_time(si_t *sih); +#endif + +#endif /* _siutils_h_ */ diff --git a/bcmdhd.101.10.361.x/include/spid.h b/bcmdhd.101.10.361.x/include/spid.h new file mode 100755 index 0000000..0fbbb23 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/spid.h @@ -0,0 +1,164 @@ +/* + * SPI device spec header file + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _SPI_H +#define _SPI_H + +/* + * Brcm SPI Device Register Map. + * + */ + +typedef volatile struct { + uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */ + uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */ + uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay + * function selection, command/data error check + */ + uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */ + uint16 intr_reg; /* 0x04, Intr status register */ + uint16 intr_en_reg; /* 0x06, Intr mask register */ + uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */ + uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */ + uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */ + uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */ + uint32 test_read; /* 0x14, RO 0xfeedbead signature */ + uint32 test_rw; /* 0x18, RW */ + uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */ + uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */ + uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */ + uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */ +} spi_regs_t; + +/* SPI device register offsets */ +#define SPID_CONFIG 0x00 +#define SPID_RESPONSE_DELAY 0x01 +#define SPID_STATUS_ENABLE 0x02 +#define SPID_RESET_BP 0x03 /* (corerev >= 1) */ +#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */ +#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */ +#define SPID_STATUS_REG 0x08 /* 32 bits */ +#define SPID_F1_INFO_REG 0x0C /* 16 bits */ +#define SPID_F2_INFO_REG 0x0E /* 16 bits */ +#define SPID_F3_INFO_REG 0x10 /* 16 bits */ +#define SPID_TEST_READ 0x14 /* 32 bits */ +#define SPID_TEST_RW 0x18 /* 32 bits */ +#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */ + +/* Bit masks for SPID_CONFIG device register */ +#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */ +#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */ +#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */ +#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */ +#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */ +#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */ +#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */ + +/* Bit mask for SPID_RESPONSE_DELAY device register */ +#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */ + +/* Bit mask for SPID_STATUS_ENABLE device register */ +#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */ +#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */ +#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */ +#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */ +#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */ +#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */ + +/* Bit mask for SPID_RESET_BP device register */ +#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */ +#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */ +#define RESET_SPI 0x80 /* reset the above enabled logic */ + +/* Bit mask for SPID_INTR_REG device register */ +#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */ +#define F2_F3_FIFO_RD_UNDERFLOW 0x0002 +#define F2_F3_FIFO_WR_OVERFLOW 0x0004 +#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */ +#define DATA_ERROR 0x0010 /* Cleared by writing 1 */ +#define F2_PACKET_AVAILABLE 0x0020 +#define F3_PACKET_AVAILABLE 0x0040 +#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */ +#define MISC_INTR0 0x0100 +#define MISC_INTR1 0x0200 +#define MISC_INTR2 0x0400 +#define MISC_INTR3 0x0800 +#define MISC_INTR4 0x1000 +#define F1_INTR 0x2000 +#define F2_INTR 0x4000 +#define F3_INTR 0x8000 + +/* Bit mask for 32bit SPID_STATUS_REG device register */ +#define STATUS_DATA_NOT_AVAILABLE 0x00000001 +#define STATUS_UNDERFLOW 0x00000002 +#define STATUS_OVERFLOW 0x00000004 +#define STATUS_F2_INTR 0x00000008 +#define STATUS_F3_INTR 0x00000010 +#define STATUS_F2_RX_READY 0x00000020 +#define STATUS_F3_RX_READY 0x00000040 +#define STATUS_HOST_CMD_DATA_ERR 0x00000080 +#define STATUS_F2_PKT_AVAILABLE 0x00000100 +#define STATUS_F2_PKT_LEN_MASK 0x000FFE00 +#define STATUS_F2_PKT_LEN_SHIFT 9 +#define STATUS_F3_PKT_AVAILABLE 0x00100000 +#define STATUS_F3_PKT_LEN_MASK 0xFFE00000 +#define STATUS_F3_PKT_LEN_SHIFT 21 + +/* Bit mask for 16 bits SPID_F1_INFO_REG device register */ +#define F1_ENABLED 0x0001 +#define F1_RDY_FOR_DATA_TRANSFER 0x0002 +#define F1_MAX_PKT_SIZE 0x01FC + +/* Bit mask for 16 bits SPID_F2_INFO_REG device register */ +#define F2_ENABLED 0x0001 +#define F2_RDY_FOR_DATA_TRANSFER 0x0002 +#define F2_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 16 bits SPID_F3_INFO_REG device register */ +#define F3_ENABLED 0x0001 +#define F3_RDY_FOR_DATA_TRANSFER 0x0002 +#define F3_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */ +#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD + +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 + +#define SPI_MAX_PKT_LEN (2048*4) + +/* Misc defines */ +#define SPI_FUNC_0 0 +#define SPI_FUNC_1 1 +#define SPI_FUNC_2 2 +#define SPI_FUNC_3 3 + +/* with gspi mode, we observed that it almost takes 110ms to come up */ +/* check the register 20ms, for 100 times. 2 seconds would be enough */ +#define WAIT_F2RXFIFORDY 100 +#define WAIT_F2RXFIFORDY_DELAY 20 + +#endif /* _SPI_H */ diff --git a/bcmdhd.101.10.361.x/include/trxhdr.h b/bcmdhd.101.10.361.x/include/trxhdr.h new file mode 100755 index 0000000..5af956c --- /dev/null +++ b/bcmdhd.101.10.361.x/include/trxhdr.h @@ -0,0 +1,93 @@ +/* + * TRX image file header format. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _TRX_HDR_H +#define _TRX_HDR_H + +#include + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_MAX_LEN 0x3B0000 /* Max length */ +#define TRX_NO_HEADER 1 /* Do not write TRX header */ +#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ +#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */ +#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ +#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */ + +#define TRX_V1 1 +#define TRX_V1_MAX_OFFSETS 3 /* V1: Max number of individual files */ + +#ifndef BCMTRXV2 +#define TRX_VERSION TRX_V1 /* Version 1 */ +#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS +#endif + +/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as + * Ver 2 of trx header. To make it generic, trx_header is structure is modified + * as below where size of "offsets" field will vary as per the TRX version. + * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well. + * To make sure, other applications like "dhdl" which are yet to be enhanced to support + * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2 + * is defined. + */ +struct trx_header { + uint32 magic; /* "HDR0" */ + uint32 len; /* Length of file including header */ + uint32 crc32; /* 32-bit CRC from flag_version to end of file */ + uint32 flag_version; /* 0:15 flags, 16:31 version */ +#ifndef BCMTRXV2 + uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ +#else + uint32 offsets[1]; /* Offsets of partitions from start of header */ +#endif +}; + +#ifdef BCMTRXV2 +#define TRX_VERSION TRX_V2 /* Version 2 */ +#define TRX_MAX_OFFSET TRX_V2_MAX_OFFSETS + +#define TRX_V2 2 +/* V2: Max number of individual files + * To support SDR signature + Config data region + */ +#define TRX_V2_MAX_OFFSETS 5 +#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32)) +#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32)) +#ifdef IL_BIGENDIAN +#define TRX_VER(trx) (ltoh32((trx)->flag_version>>16)) +#else +#define TRX_VER(trx) ((trx)->flag_version>>16) +#endif +#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1) +#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2) +/* For V2, return size of V2 size: others, return V1 size */ +#define SIZEOF_TRX(trx) (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1) +#else +#define SIZEOF_TRX(trx) (sizeof(struct trx_header)) +#endif /* BCMTRXV2 */ + +/* Compatibility */ +typedef struct trx_header TRXHDR, *PTRXHDR; + +#endif /* _TRX_HDR_H */ diff --git a/bcmdhd.101.10.361.x/include/typedefs.h b/bcmdhd.101.10.361.x/include/typedefs.h new file mode 100755 index 0000000..6a25130 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/typedefs.h @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _TYPEDEFS_H_ +#define _TYPEDEFS_H_ + +#if (!defined(EDK_RELEASE_VERSION) || (EDK_RELEASE_VERSION < 0x00020000)) || \ + !defined(BWL_NO_INTERNAL_STDLIB_SUPPORT) + +#ifdef SITE_TYPEDEFS + +/* + * Define SITE_TYPEDEFS in the compile to include a site-specific + * typedef file "site_typedefs.h". + * + * If SITE_TYPEDEFS is not defined, then the code section below makes + * inferences about the compile environment based on defined symbols and + * possibly compiler pragmas. + * + * Following these two sections is the Default Typedefs section. + * This section is only processed if USE_TYPEDEF_DEFAULTS is + * defined. This section has a default set of typedefs and a few + * preprocessor symbols (TRUE, FALSE, NULL, ...). + */ + +#include "site_typedefs.h" + +#else + +/* + * Infer the compile environment based on preprocessor symbols and pragmas. + * Override type definitions as needed, and include configuration-dependent + * header files to define types. + */ + +#ifdef __cplusplus + +#define TYPEDEF_BOOL +#ifndef FALSE +#define FALSE false +#endif +#ifndef TRUE +#define TRUE true +#endif + +#else /* ! __cplusplus */ + +#if defined(_WIN32) + +#define TYPEDEF_BOOL +typedef unsigned char bool; /* consistent w/BOOL */ + +#endif /* _WIN32 */ + +#endif /* ! __cplusplus */ + +#if defined(EFI) && !defined(EFI_WINBLD) && !defined(__size_t__) +typedef long unsigned int size_t; +#endif /* EFI */ + +#if !defined(TYPEDEF_UINTPTR) +#if defined(_WIN64) && !defined(EFI) +/* use the Windows ULONG_PTR type when compiling for 64 bit */ +#include +#define TYPEDEF_UINTPTR +typedef ULONG_PTR uintptr; +#elif defined(__LP64__) +#define TYPEDEF_UINTPTR +typedef unsigned long long int uintptr; +#endif +#endif /* TYPEDEF_UINTPTR */ + +#if defined(_RTE_) +#define _NEED_SIZE_T_ +#endif + +/* float_t types conflict with the same typedefs from the standard ANSI-C +** math.h header file. Don't re-typedef them here. +*/ + +#if defined(MACOSX) +#define TYPEDEF_FLOAT_T +#endif /* MACOSX */ + +#if defined(_NEED_SIZE_T_) +typedef long unsigned int size_t; +#endif + +#ifdef _MSC_VER /* Microsoft C */ +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +typedef signed __int64 int64; +typedef unsigned __int64 uint64; +#endif + +#if defined(__sparc__) +#define TYPEDEF_ULONG +#endif + +#if defined(__linux__) && !defined(EFI) +/* + * If this is either a Linux hybrid build or the per-port code of a hybrid build + * then use the Linux header files to get some of the typedefs. Otherwise, define + * them entirely in this file. We can't always define the types because we get + * a duplicate typedef error; there is no way to "undefine" a typedef. + * We know when it's per-port code because each file defines LINUX_PORT at the top. + */ +#define TYPEDEF_UINT +#ifndef TARGETENV_android +#define TYPEDEF_USHORT +#define TYPEDEF_ULONG +#endif /* TARGETENV_android */ +#ifdef __KERNEL__ +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#define TYPEDEF_BOOL +#endif /* >= 2.6.19 */ +/* special detection for 2.6.18-128.7.1.0.1.el5 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) +#include +#ifdef noinline_for_stack +#define TYPEDEF_BOOL +#endif +#endif /* == 2.6.18 */ +#endif /* __KERNEL__ */ +#endif /* linux && !EFI */ + +#if !defined(__linux__) && !defined(_WIN32) && \ + !defined(_RTE_) && !defined(__DJGPP__) && \ + !defined(__BOB__) && !defined(EFI) +#define TYPEDEF_UINT +#define TYPEDEF_USHORT +#endif + +/* Do not support the (u)int64 types with strict ansi for GNU C */ +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */ + +/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode + * for signed or unsigned + */ +#if defined(__ICL) + +#define TYPEDEF_INT64 + +#if defined(__STDC__) +#define TYPEDEF_UINT64 +#endif + +#endif /* __ICL */ + +#if !defined(_WIN32) && !defined(_RTE_) && \ + !defined(__DJGPP__) && !defined(__BOB__) && !defined(EFI) + +/* pick up ushort & uint from standard types.h */ +#if defined(__linux__) && defined(__KERNEL__) + +/* See note above */ +#ifdef USER_MODE +#include +#else +#include /* sys/types.h and linux/types.h are oil and water */ +#endif /* USER_MODE */ + +#else + +#include + +#endif /* linux && __KERNEL__ */ + +#endif /* !_WIN32 && !_RTE_ && !__DJGPP__ */ + +/* use the default typedefs in the next section of this file */ +#define USE_TYPEDEF_DEFAULTS + +#endif /* SITE_TYPEDEFS */ + +/* + * Default Typedefs + */ + +#ifdef USE_TYPEDEF_DEFAULTS +#undef USE_TYPEDEF_DEFAULTS + +#ifndef TYPEDEF_BOOL +typedef /* @abstract@ */ unsigned char bool; +#endif /* endif TYPEDEF_BOOL */ + +/* define uchar, ushort, uint, ulong */ + +#ifndef TYPEDEF_UCHAR +typedef unsigned char uchar; +#endif + +#ifndef TYPEDEF_USHORT +typedef unsigned short ushort; +#endif + +#ifndef TYPEDEF_UINT +typedef unsigned int uint; +#endif + +#ifndef TYPEDEF_ULONG +typedef unsigned long ulong; +#endif + +/* define [u]int8/16/32/64, uintptr */ + +#ifndef TYPEDEF_UINT8 +typedef unsigned char uint8; +#endif + +#ifndef TYPEDEF_UINT16 +typedef unsigned short uint16; +#endif + +#ifndef TYPEDEF_UINT32 +typedef unsigned int uint32; +#endif + +#ifndef TYPEDEF_UINT64 +typedef unsigned long long uint64; +#endif + +#ifndef TYPEDEF_UINTPTR +typedef unsigned int uintptr; +#endif + +#ifndef TYPEDEF_INT8 +typedef signed char int8; +#endif + +#ifndef TYPEDEF_INT16 +typedef signed short int16; +#endif + +#ifndef TYPEDEF_INT32 +typedef signed int int32; +#endif + +#ifndef TYPEDEF_INT64 +typedef signed long long int64; +#endif + +/* define float32/64, float_t */ + +#ifndef TYPEDEF_FLOAT32 +typedef float float32; +#endif + +#ifndef TYPEDEF_FLOAT64 +typedef double float64; +#endif + +/* + * abstracted floating point type allows for compile time selection of + * single or double precision arithmetic. Compiling with -DFLOAT32 + * selects single precision; the default is double precision. + */ + +#ifndef TYPEDEF_FLOAT_T + +#if defined(FLOAT32) +typedef float32 float_t; +#else /* default to double precision floating point */ +typedef float64 float_t; +#endif + +#endif /* TYPEDEF_FLOAT_T */ + +/* define macro values */ + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 /* TRUE */ +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef OFF +#define OFF 0 +#endif + +#ifndef ON +#define ON 1 /* ON = 1 */ +#endif + +#define AUTO (-1) /* Auto = -1 */ + +/* define PTRSZ, INLINE */ + +#ifndef PTRSZ +#define PTRSZ sizeof(char*) +#endif + +/* Detect compiler type. */ +#ifdef _MSC_VER + #define BWL_COMPILER_MICROSOFT +#elif defined(__GNUC__) || defined(__lint) + #define BWL_COMPILER_GNU +#elif defined(__CC_ARM) && __CC_ARM + #define BWL_COMPILER_ARMCC +#else + #error "Unknown compiler!" +#endif /* _MSC_VER */ + +#ifndef INLINE +#if defined(BWL_COMPILER_MICROSOFT) + #define INLINE __inline +#elif defined(BWL_COMPILER_GNU) + #define INLINE __inline__ +#elif defined(BWL_COMPILER_ARMCC) + #define INLINE __inline +#else + #define INLINE +#endif /* _MSC_VER */ +#endif /* INLINE */ + +#undef TYPEDEF_BOOL +#undef TYPEDEF_UCHAR +#undef TYPEDEF_USHORT +#undef TYPEDEF_UINT +#undef TYPEDEF_ULONG +#undef TYPEDEF_UINT8 +#undef TYPEDEF_UINT16 +#undef TYPEDEF_UINT32 +#undef TYPEDEF_UINT64 +#undef TYPEDEF_UINTPTR +#undef TYPEDEF_INT8 +#undef TYPEDEF_INT16 +#undef TYPEDEF_INT32 +#undef TYPEDEF_INT64 +#undef TYPEDEF_FLOAT32 +#undef TYPEDEF_FLOAT64 +#undef TYPEDEF_FLOAT_T + +#endif /* USE_TYPEDEF_DEFAULTS */ + +/* Suppress unused parameter warning */ +#define UNUSED_PARAMETER(x) (void)(x) + +/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */ +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) + +#else /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */ + +#include +#include +#include + +#ifdef stderr +#undef stderr +#define stderr stdout +#endif + +typedef UINT8 uint8; +typedef UINT16 uint16; +typedef UINT32 uint32; +typedef UINT64 uint64; +typedef INT8 int8; +typedef INT16 int16; +typedef INT32 int32; +typedef INT64 int64; + +typedef BOOLEAN bool; +typedef unsigned char uchar; +typedef UINTN uintptr; + +#define UNUSED_PARAMETER(x) (void)(x) +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) +#define INLINE +#define AUTO (-1) /* Auto = -1 */ +#define ON 1 /* ON = 1 */ +#define OFF 0 + +#endif /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */ + +/* + * Including the bcmdefs.h here, to make sure everyone including typedefs.h + * gets this automatically +*/ +#include +#endif /* _TYPEDEFS_H_ */ diff --git a/bcmdhd.101.10.361.x/include/usbrdl.h b/bcmdhd.101.10.361.x/include/usbrdl.h new file mode 100755 index 0000000..be5bd69 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/usbrdl.h @@ -0,0 +1,134 @@ +/* + * Broadcom USB remote download definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: usbrdl.h 597933 2015-11-06 18:52:06Z $ + */ + +#ifndef _USB_RDL_H +#define _USB_RDL_H + +/* Control messages: bRequest values */ +#define DL_GETSTATE 0 /* returns the rdl_state_t struct */ +#define DL_CHECK_CRC 1 /* currently unused */ +#define DL_GO 2 /* execute downloaded image */ +#define DL_START 3 /* initialize dl state */ +#define DL_REBOOT 4 /* reboot the device in 2 seconds */ +#define DL_GETVER 5 /* returns the bootrom_id_t struct */ +#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset event + * to occur in 2 seconds. It is the responsibility + * of the downloaded code to clear this event + */ +#define DL_EXEC 7 /* jump to a supplied address */ +#define DL_RESETCFG 8 /* To support single enum on dongle + * - Not used by bootloader + */ +#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup + * if resp unavailable + */ +#define DL_CHGSPD 0x0A + +#define DL_HWCMD_MASK 0xfc /* Mask for hardware read commands: */ +#define DL_RDHW 0x10 /* Read a hardware address (Ctl-in) */ +#define DL_RDHW32 0x10 /* Read a 32 bit word */ +#define DL_RDHW16 0x11 /* Read 16 bits */ +#define DL_RDHW8 0x12 /* Read an 8 bit byte */ +#define DL_WRHW 0x14 /* Write a hardware address (Ctl-out) */ +#define DL_WRHW_BLK 0x13 /* Block write to hardware access */ + +#define DL_CMD_WRHW 2 + + +/* states */ +#define DL_WAITING 0 /* waiting to rx first pkt that includes the hdr info */ +#define DL_READY 1 /* hdr was good, waiting for more of the compressed image */ +#define DL_BAD_HDR 2 /* hdr was corrupted */ +#define DL_BAD_CRC 3 /* compressed image was corrupted */ +#define DL_RUNNABLE 4 /* download was successful, waiting for go cmd */ +#define DL_START_FAIL 5 /* failed to initialize correctly */ +#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM value */ +#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START for rdl) */ + +#define TIMEOUT 5000 /* Timeout for usb commands */ + +struct bcm_device_id { + char *name; + uint32 vend; + uint32 prod; +}; + +typedef struct { + uint32 state; + uint32 bytes; +} rdl_state_t; + +typedef struct { + uint32 chip; /* Chip id */ + uint32 chiprev; /* Chip rev */ + uint32 ramsize; /* Size of RAM */ + uint32 remapbase; /* Current remap base address */ + uint32 boardtype; /* Type of board */ + uint32 boardrev; /* Board revision */ +} bootrom_id_t; + +/* struct for backplane & jtag accesses */ +typedef struct { + uint32 cmd; /* tag to identify the cmd */ + uint32 addr; /* backplane address for write */ + uint32 len; /* length of data: 1, 2, 4 bytes */ + uint32 data; /* data to write */ +} hwacc_t; + + +/* struct for querying nvram params from bootloader */ +#define QUERY_STRING_MAX 32 +typedef struct { + uint32 cmd; /* tag to identify the cmd */ + char var[QUERY_STRING_MAX]; /* param name */ +} nvparam_t; + +typedef void (*exec_fn_t)(void *sih); + +#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE) +#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE) + +#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */ +#define USB_BULK_EP_TIMEOUT 500 /* Timeout used in USB bulk transactions. */ + +#define RDL_CHUNK_MAX (64 * 1024) /* max size of each dl transfer */ +#define RDL_CHUNK 1500 /* size of each dl transfer */ + +/* bootloader makes special use of trx header "offsets" array */ +#define TRX_OFFSETS_DLFWLEN_IDX 0 /* Size of the fw; used in uncompressed case */ +#define TRX_OFFSETS_JUMPTO_IDX 1 /* RAM address for jumpto after download */ +#define TRX_OFFSETS_NVM_LEN_IDX 2 /* Length of appended NVRAM data */ +#ifdef BCMTRXV2 +#define TRX_OFFSETS_DSG_LEN_IDX 3 /* Length of digital signature for the first image */ +#define TRX_OFFSETS_CFG_LEN_IDX 4 /* Length of config region, which is not digitally signed */ +#endif /* BCMTRXV2 */ + +#define TRX_OFFSETS_DLBASE_IDX 0 /* RAM start address for download */ + +#endif /* _USB_RDL_H */ diff --git a/bcmdhd.101.10.361.x/include/vlan.h b/bcmdhd.101.10.361.x/include/vlan.h new file mode 100755 index 0000000..bbf5e50 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/vlan.h @@ -0,0 +1,91 @@ +/* + * 802.1Q VLAN protocol definitions + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _vlan_h_ +#define _vlan_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#ifndef VLAN_VID_MASK +#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */ +#endif + +#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */ +#define VLAN_PRI_SHIFT 13 /* user priority */ + +#define VLAN_PRI_MASK 7 /* 3 bits of priority */ + +#define VLAN_TPID_OFFSET 12 /* offset of tag protocol id field */ +#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */ + +#define VLAN_TAG_LEN 4 +#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */ + +#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */ + +struct vlan_header { + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ +}; + +BWL_PRE_PACKED_STRUCT struct ethervlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + +struct dot3_mac_llc_snapvlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[3]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; /* ethertype */ +}; + +#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) + +/* This marks the end of a packed structure section. */ +#include + +#define ETHERVLAN_MOVE_HDR(d, s) \ +do { \ + struct ethervlan_header t; \ + t = *(struct ethervlan_header *)(s); \ + *(struct ethervlan_header *)(d) = t; \ +} while (0) + +#endif /* _vlan_h_ */ diff --git a/bcmdhd.101.10.361.x/include/wl_bam.h b/bcmdhd.101.10.361.x/include/wl_bam.h new file mode 100755 index 0000000..2c7d59c --- /dev/null +++ b/bcmdhd.101.10.361.x/include/wl_bam.h @@ -0,0 +1,74 @@ +/* + * Bad AP Manager for ADPS + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef _WL_BAM_H_ +#define _WL_BAM_H_ +#include +#include +#include +#include + +#include +#include + +#define WL_BAD_AP_MAX_ENTRY_NUM 20u + +typedef struct wl_bad_ap_mngr { + osl_t *osh; + + uint32 num; + spinlock_t lock; +#if !defined(DHD_ADPS_BAM_EXPORT) + struct mutex fs_lock; /* lock for bad ap file list */ +#endif /* !DHD_ADPS_BAM_EXPORT */ + struct list_head list; +} wl_bad_ap_mngr_t; + +typedef struct wl_bad_ap_info { + struct ether_addr bssid; +#if !defined(DHD_ADPS_BAM_EXPORT) + struct tm tm; + uint32 status; + uint32 reason; + uint32 connect_count; +#endif /* !DHD_ADPS_BAM_EXPORT */ +} wl_bad_ap_info_t; + +typedef struct wl_bad_ap_info_entry { + wl_bad_ap_info_t bad_ap; + struct list_head list; +} wl_bad_ap_info_entry_t; + +void wl_bad_ap_mngr_init(struct bcm_cfg80211 *cfg); +void wl_bad_ap_mngr_deinit(struct bcm_cfg80211 *cfg); + +int wl_bad_ap_mngr_add(wl_bad_ap_mngr_t *bad_ap_mngr, wl_bad_ap_info_t *bad_ap_info); +wl_bad_ap_info_entry_t* wl_bad_ap_mngr_find(wl_bad_ap_mngr_t *bad_ap_mngr, + const struct ether_addr *bssid); + +bool wl_adps_bad_ap_check(struct bcm_cfg80211 *cfg, const struct ether_addr *bssid); +int wl_adps_enabled(struct bcm_cfg80211 *cfg, struct net_device *ndev); +int wl_adps_set_suspend(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 suspend); + +s32 wl_adps_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* _WL_BAM_H_ */ diff --git a/bcmdhd.101.10.361.x/include/wl_bigdata.h b/bcmdhd.101.10.361.x/include/wl_bigdata.h new file mode 100755 index 0000000..bdd4019 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/wl_bigdata.h @@ -0,0 +1,81 @@ +/* + * Bigdata logging and report. None EWP and Hang event. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef __WL_BIGDATA_H_ +#define __WL_BIGDATA_H_ + +#include <802.11.h> +#include +#include + +#define MAX_STA_INFO_AP_CNT 20 + +#define DOT11_11B_MAX_RATE 11 +#define DOT11_2GHZ_MAX_CH_NUM 14 +#define DOT11_HT_MCS_RATE_MASK 0xFF + +enum { + BIGDATA_DOT11_11B_MODE = 0, + BIGDATA_DOT11_11G_MODE = 1, + BIGDATA_DOT11_11N_MODE = 2, + BIGDATA_DOT11_11A_MODE = 3, + BIGDATA_DOT11_11AC_MODE = 4, + BIGDATA_DOT11_11AX_MODE = 5, + BIGDATA_DOT11_MODE_MAX +}; + +typedef struct wl_ap_sta_data +{ + struct ether_addr mac; + uint32 mode_80211; + uint32 nss; + chanspec_t chanspec; + int16 rssi; + uint32 rate; + uint8 channel; + uint32 mimo; + uint32 disconnected; + uint32 is_empty; + uint32 reason_code; +} wl_ap_sta_data_t; + +typedef struct ap_sta_wq_data +{ + wl_event_msg_t e; + void *dhdp; + void *bcm_cfg; + void *ndev; +} ap_sta_wq_data_t; + +typedef struct wl_ap_sta_info +{ + wl_ap_sta_data_t *ap_sta_data; + uint32 sta_list_cnt; + struct mutex wq_data_sync; +} wl_ap_sta_info_t; + +int wl_attach_ap_stainfo(void *bcm_cfg); +int wl_detach_ap_stainfo(void *bcm_cfg); +int wl_ap_stainfo_init(void *bcm_cfg); +void wl_gather_ap_stadata(void *handle, void *event_info, u8 event); +int wl_get_ap_stadata(void *bcm_cfg, struct ether_addr *sta_mac, void **data); +#endif /* __WL_BIGDATA_H_ */ diff --git a/bcmdhd.101.10.361.x/include/wldev_common.h b/bcmdhd.101.10.361.x/include/wldev_common.h new file mode 100755 index 0000000..529704e --- /dev/null +++ b/bcmdhd.101.10.361.x/include/wldev_common.h @@ -0,0 +1,135 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ +#ifndef __WLDEV_COMMON_H__ +#define __WLDEV_COMMON_H__ + +#include + +/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or + * netdev_ops->ndo_do_ioctl in new kernels) + * @dev: the net_device handle + */ +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set); + +s32 wldev_ioctl_get( + struct net_device *dev, u32 cmd, void *arg, u32 len); + +s32 wldev_ioctl_set( + struct net_device *dev, u32 cmd, const void *arg, u32 len); + +/** Retrieve named IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + const void *param, u32 paramlen, void *buf, u32 buflen, struct mutex* buf_sync); + +/** Set named IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val); + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval); + +/** The following function can be implemented if there is a need for bsscfg + * indexed IOVARs + */ + +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, const s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx); + +/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, const s8 *iovar_name, const void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx); + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx); + +extern int dhd_net_set_fw_path(struct net_device *dev, char *fw); +extern int dhd_net_bus_suspend(struct net_device *dev); +extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage); +extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, + unsigned long delay_msec); +extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec); +extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify); + +#ifdef OEM_ANDROID +extern bool dhd_force_country_change(struct net_device *dev); +#endif + +extern void dhd_bus_band_set(struct net_device *dev, uint band); +extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify, + int revinfo); +extern int net_os_wake_lock(struct net_device *dev); +extern int net_os_wake_unlock(struct net_device *dev); +extern int net_os_wake_lock_timeout(struct net_device *dev); +extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val); +extern int net_os_set_dtim_skip(struct net_device *dev, int val); +extern int net_os_set_suspend_disable(struct net_device *dev, int val); +extern int net_os_set_suspend(struct net_device *dev, int val, int force); +extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val); +extern int net_os_set_max_dtim_enable(struct net_device *dev, int val); +#ifdef DISABLE_DTIM_IN_SUSPEND +extern int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val); +#endif /* DISABLE_DTIM_IN_SUSPEND */ + +#if defined(OEM_ANDROID) +extern int wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, + int max, int *bytes_left); +#endif /* defined(OEM_ANDROID) */ + +/* Get the link speed from dongle, speed is in kpbs */ +int wldev_get_link_speed(struct net_device *dev, int *plink_speed); + +int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi); + +int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid); + +int wldev_get_band(struct net_device *dev, uint *pband); +int wldev_get_mode(struct net_device *dev, uint8 *pband, uint8 caplen); +int wldev_get_datarate(struct net_device *dev, int *datarate); +int wldev_set_band(struct net_device *dev, uint band); + +#endif /* __WLDEV_COMMON_H__ */ diff --git a/bcmdhd.101.10.361.x/include/wlfc_proto.h b/bcmdhd.101.10.361.x/include/wlfc_proto.h new file mode 100755 index 0000000..d8d1009 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/wlfc_proto.h @@ -0,0 +1,496 @@ +/* + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + * + */ + +/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */ + +#ifndef __wlfc_proto_definitions_h__ +#define __wlfc_proto_definitions_h__ + + /* Use TLV to convey WLFC information. + --------------------------------------------------------------------------- + | Type | Len | value | Description + --------------------------------------------------------------------------- + | 1 | 1 | (handle) | MAC OPEN + --------------------------------------------------------------------------- + | 2 | 1 | (handle) | MAC CLOSE + --------------------------------------------------------------------------- + | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn + --------------------------------------------------------------------------- + | 4 | 4+ | see pkttag comments | TXSTATUS + | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled + --------------------------------------------------------------------------- + | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware] + --------------------------------------------------------------------------- + | 6 | 8 | (handle, ifid, MAC) | MAC ADD + --------------------------------------------------------------------------- + | 7 | 8 | (handle, ifid, MAC) | MAC DEL + --------------------------------------------------------------------------- + | 8 | 1 | (rssi) | RSSI - RSSI value for the packet. + --------------------------------------------------------------------------- + | 9 | 1 | (interface ID) | Interface OPEN + --------------------------------------------------------------------------- + | 10 | 1 | (interface ID) | Interface CLOSE + --------------------------------------------------------------------------- + | 11 | 8 | fifo credit returns map | FIFO credits back to the host + | | | | + | | | | -------------------------------------- + | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim | + | | | | -------------------------------------- + | | | | + --------------------------------------------------------------------------- + | 12 | 2 | MAC handle, | Host provides a bitmap of pending + | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn. + | | | | [host->firmware] + --------------------------------------------------------------------------- + | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific + | | | | MAC destination. + --------------------------------------------------------------------------- + | 15 | 12 | (pkttag, timestamps) | Send TX timestamp at reception from host + --------------------------------------------------------------------------- + | 16 | 12 | (pkttag, timestamps) | Send WLAN RX timestamp along with RX frame + --------------------------------------------------------------------------- + | 255 | N/A | N/A | FILLER - This is a special type + | | | | that has no length or value. + | | | | Typically used for padding. + --------------------------------------------------------------------------- + */ + +typedef enum { + WLFC_CTL_TYPE_MAC_OPEN = 1, + WLFC_CTL_TYPE_MAC_CLOSE = 2, + WLFC_CTL_TYPE_MAC_REQUEST_CREDIT = 3, + WLFC_CTL_TYPE_TXSTATUS = 4, + WLFC_CTL_TYPE_PKTTAG = 5, /** host<->dongle */ + + WLFC_CTL_TYPE_MACDESC_ADD = 6, + WLFC_CTL_TYPE_MACDESC_DEL = 7, + WLFC_CTL_TYPE_RSSI = 8, + + WLFC_CTL_TYPE_INTERFACE_OPEN = 9, + WLFC_CTL_TYPE_INTERFACE_CLOSE = 10, + + WLFC_CTL_TYPE_FIFO_CREDITBACK = 11, + + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP = 12, /** host->dongle */ + WLFC_CTL_TYPE_MAC_REQUEST_PACKET = 13, + WLFC_CTL_TYPE_HOST_REORDER_RXPKTS = 14, + + WLFC_CTL_TYPE_TX_ENTRY_STAMP = 15, + WLFC_CTL_TYPE_RX_STAMP = 16, + + WLFC_CTL_TYPE_UPD_FLR_FETCH = 17, /* PCIE_FLOWCTL: Update Flowring Fetch */ + + WLFC_CTL_TYPE_TRANS_ID = 18, + WLFC_CTL_TYPE_COMP_TXSTATUS = 19, + + WLFC_CTL_TYPE_TID_OPEN = 20, /* open flowring/s with tid */ + WLFC_CTL_TYPE_TID_CLOSE = 21, /* close flowring/s with tid */ + WLFC_CTL_TYPE_UPD_FLR_WEIGHT = 22, /* WLATF_DONGLE */ + WLFC_CTL_TYPE_ENAB_FFSCH = 23, /* WLATF_DONGLE */ + + WLFC_CTL_TYPE_UPDATE_FLAGS = 24, /* clear the flags set in flowring */ + WLFC_CTL_TYPE_CLEAR_SUPPR = 25, /* free the supression info in the flowring */ + + WLFC_CTL_TYPE_FLOWID_OPEN = 26, /* open flowring with flowid */ + WLFC_CTL_TYPE_FLOWID_CLOSE = 27, /* close flowring with flowid */ + + WLFC_CTL_TYPE_PENDING_TX_PKTS = 28, /* Get the outstandinding packets in host + * flowring for the given interface. + */ + WLFC_CTL_TYPE_UPD_SCB_RATESEL_CHANGE = 29, /* Upd flow's max rate dynamically */ + WLFC_CTL_TYPE_AMSDU_STATE = 30, /* Upd flow's AMSDU state(Enabled/Disabled) */ + WLFC_CTL_TYPE_APP_STATE = 31, /* Upd flow's APP state, enable/disable APP */ + WLFC_CTL_TYPE_HP2P_EXT_TXSTATUS = 32, /* Hp2p extended tx status */ + WLFC_CTL_TYPE_HP2P_ACTIVE_STATE = 33, /* Get status of HP2P ring active or not */ + WLFC_CTL_TYPE_HP2P_QUERY_LIFETIME = 34, /* Query lifetime for last unacked */ + WLFC_CTL_TYPE_FILLER = 255 +} wlfc_ctl_type_t; + +#define WLFC_CTL_VALUE_LEN_FLOWID 2u /* flowid legth in TLV */ + +#define WLFC_CTL_VALUE_LEN_MACDESC 8u /** handle, interface, MAC */ + +#define WLFC_CTL_VALUE_LEN_MAC 1u /** MAC-handle */ +#define WLFC_CTL_VALUE_LEN_RSSI 1u + +#define WLFC_CTL_VALUE_LEN_INTERFACE 1u +#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2u + +#define WLFC_CTL_VALUE_LEN_TXSTATUS 4u +#define WLFC_CTL_VALUE_LEN_PKTTAG 4u +#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12u /** 4-byte rate info + 2 TSF */ + +#define WLFC_CTL_VALUE_LEN_SEQ 2u +#define WLFC_CTL_VALUE_LEN_TID 3u /* interface index, TID */ + +#define WLFC_CTL_EXT_TXSTATUS_PAYLOAD_LEN 8u /* Payload legnth of extention tx status */ + +/* Reset the flags set for the corresponding flowring of the SCB which is de-inited */ +/* FLOW_RING_FLAG_LAST_TIM | FLOW_RING_FLAG_INFORM_PKTPEND | FLOW_RING_FLAG_PKT_REQ */ +#define WLFC_RESET_ALL_FLAGS 0 +#define WLFC_CTL_VALUE_LEN_FLAGS 7 /** flags, MAC */ + +/* free the data stored to be used for suppressed packets in future */ +#define WLFC_CTL_VALUE_LEN_SUPR 8 /** ifindex, tid, MAC */ + +#define WLFC_CTL_VALUE_LEN_SCB_RATESEL_CHANGE 7 /* ifindex, MAC */ +/* enough space to host all 4 ACs, bc/mc and atim fifo credit */ +#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6 + +#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */ +#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */ + +/* + WLFC packet identifier: b[31:0] (WLFC_CTL_TYPE_PKTTAG) + + Generation : b[31] => generation number for this packet [host->fw] + OR, current generation number [fw->host] + Flags : b[30:27] => command, status flags + FIFO-AC : b[26:24] => AC-FIFO id + + h-slot : b[23:8] => hanger-slot + freerun : b[7:0] => A free running counter? + + As far as the firmware is concerned, host generated b[23:0] should be just + reflected back on txstatus. +*/ + +#ifndef WLFC_PKTFLAG_COMPAT +#define WLFC_PKTFLAG_PKTFROMHOST 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 +#define WLFC_PKTFLAG_PKT_SENDTOHOST 0x04 +#define WLFC_PKTFLAG_PKT_FLUSHED 0x08 +#else +#define WLFC_PKTFLAG_PKTFROMHOST_MASK 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED_MASK 0x02 +#define WLFC_PKTFLAG_PKT_SENDTOHOST_MASK 0x04 +#define WLFC_PKTFLAG_PKT_FLUSHED_MASK 0x08 +#endif /* WLFC_PKTFLAG_COMPAT */ + +#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */ +#define WL_TXSTATUS_STATUS_SHIFT 24 + +#define WL_TXSTATUS_SET_STATUS(x, status) ((x) = \ + ((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \ + (((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT)) +#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \ + WL_TXSTATUS_STATUS_MASK) + +/** + * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the + * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The + * firmware accepts a packet when the generation matches; on reset (startup) both "current" and + * "expected" are set to 0. + */ +#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */ +#define WL_TXSTATUS_GENERATION_SHIFT 31 + +#define WL_TXSTATUS_SET_GENERATION(x, gen) ((x) = \ + ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \ + (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT)) + +#define WL_TXSTATUS_GET_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \ + WL_TXSTATUS_GENERATION_MASK) + +#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */ +#define WL_TXSTATUS_FLAGS_SHIFT 27 + +#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT)) +#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \ + WL_TXSTATUS_FLAGS_MASK) +#define WL_TXSTATUS_CLEAR_FLAGS(x, flags) ((x) = \ + ((x) & ~(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))) + +#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */ +#define WL_TXSTATUS_FIFO_SHIFT 24 + +#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT)) +#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK) + +#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */ +#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \ + ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num)) +#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK) + +#define WL_TXSTATUS_HSLOT_MASK 0xffff /* allow 16 bits */ +#define WL_TXSTATUS_HSLOT_SHIFT 8 + +#define WL_TXSTATUS_SET_HSLOT(x, hslot) ((x) = \ + ((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \ + (((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT)) +#define WL_TXSTATUS_GET_HSLOT(x) (((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \ + WL_TXSTATUS_HSLOT_MASK) + +#define WL_TXSTATUS_FREERUNCTR_MASK 0xff /* allow 8 bits */ + +#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr) ((x) = \ + ((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \ + ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK)) +#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK) + +/* packet prio phase bit updated */ +#define WL_SEQ_PKTPRIO_PHASE_MASK 0x1 +#define WL_SEQ_PKTPRIO_PHASE_SHIFT 15 +#define WL_SEQ_SET_PKTPRIO_PHASE(x, val) ((x) = \ + ((x) & ~(WL_SEQ_PKTPRIO_PHASE_MASK << WL_SEQ_PKTPRIO_PHASE_SHIFT)) | \ + (((val) & WL_SEQ_PKTPRIO_PHASE_MASK) << WL_SEQ_PKTPRIO_PHASE_SHIFT)) +#define WL_SEQ_PKTPRIO_PHASE(x) (((x) >> WL_SEQ_PKTPRIO_PHASE_SHIFT) & \ + WL_SEQ_PKTPRIO_PHASE_MASK) + +/* AMSDU part of d11 seq number */ +#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_AMSDU_SHIFT 14 +#define WL_SEQ_SET_AMSDU(x, val) ((x) = \ + ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \ + (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) /**< sets a single AMSDU bit */ +/** returns TRUE if ring item is AMSDU (seq = d11 seq nr) */ +#define WL_SEQ_IS_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ + WL_SEQ_AMSDU_MASK) + +/* indicates last_suppr_seq is valid */ +#define WL_SEQ_VALIDSUPPR_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_VALIDSUPPR_SHIFT 12 +#define WL_SEQ_SET_VALIDSUPPR(x, val) ((x) = \ + ((x) & ~(WL_SEQ_VALIDSUPPR_MASK << WL_SEQ_VALIDSUPPR_SHIFT)) | \ + (((val) & WL_SEQ_VALIDSUPPR_MASK) << WL_SEQ_VALIDSUPPR_SHIFT)) +#define WL_SEQ_GET_VALIDSUPPR(x) (((x) >> WL_SEQ_VALIDSUPPR_SHIFT) & \ + WL_SEQ_VALIDSUPPR_MASK) + +#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMFW_SHIFT 13 +#define WL_SEQ_SET_FROMFW(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \ + (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT)) +/** Set when firmware assigns D11 sequence number to packet */ +#define SET_WL_HAS_ASSIGNED_SEQ(x) WL_SEQ_SET_FROMFW((x), 1) + +/** returns TRUE if packet has been assigned a d11 seq number by the WL firmware layer */ +#define GET_WL_HAS_ASSIGNED_SEQ(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & WL_SEQ_FROMFW_MASK) + +#ifdef WLFC_PKTFLAG_COMPAT +/* Helper macros for WLFC pktflags */ +#define WLFC_PKTFLAG_PKTFROMHOST(p) \ + (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKTFROMHOST_MASK) +#define WLFC_PKTFLAG_PKT_REQUESTED(p) \ + (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKT_REQUESTED_MASK) +#define WLFC_PKTFLAG_PKT_SENDTOHOST(p) \ + (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKT_SENDTOHOST_MASK) +#define WLFC_PKTFLAG_PKT_FLUSHED(p) \ + (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKT_FLUSHED_MASK) +#endif /* WLFC_PKTFLAG_COMPAT */ + +/** + * Proptxstatus related. + * + * When a packet is suppressed by WL or the D11 core, the packet has to be retried. Assigning + * a new d11 sequence number for the packet when retrying would cause the peer to be unable to + * reorder the packets within an AMPDU. So, suppressed packet from bus layer (DHD for SDIO and + * pciedev for PCIE) is re-using d11 seq number, so FW should not assign a new one. + */ +#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMDRV_SHIFT 12 + +/** + * Proptxstatus, host or fw PCIe layer requests WL layer to reuse d11 seq no. Bit is reset by WL + * subsystem when it reuses the seq number. + */ +#define WL_SEQ_SET_REUSE(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \ + (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT)) +#define SET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 1) +#define RESET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 0) + +/** Proptxstatus, related to reuse of d11 seq numbers when retransmitting */ +#define IS_WL_TO_REUSE_SEQ(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \ + WL_SEQ_FROMDRV_MASK) + +#define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */ +#define WL_SEQ_NUM_SHIFT 0 +/** Proptxstatus, sets d11seq no in pkt tag, related to reuse of d11seq no when retransmitting */ +#define WL_SEQ_SET_NUM(x, val) ((x) = \ + ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \ + (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT)) +/** Proptxstatus, gets d11seq no from pkt tag, related to reuse of d11seq no when retransmitting */ +#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \ + WL_SEQ_NUM_MASK) + +#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \ + (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \ + (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) + +/* 32 STA should be enough??, 6 bits; Must be power of 2 */ +#define WLFC_MAC_DESC_TABLE_SIZE 32 +#define WLFC_MAX_IFNUM 16 +#define WLFC_MAC_DESC_ID_INVALID 0xff + +/* b[7:5] -reuse guard, b[4:0] -value */ +#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f) + +#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \ + (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \ + ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_MAX_PENDING_DATALEN 120 + +/* host is free to discard the packet */ +#define WLFC_CTL_PKTFLAG_DISCARD 0 +/* D11 suppressed a packet */ +#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1 +/* WL firmware suppressed a packet because MAC is + already in PSMode (short time window) +*/ +#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2 +/* Firmware tossed this packet */ +#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3 +/* Firmware tossed after retries */ +#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4 +/* Firmware wrongly reported suppressed previously,now fixing to acked */ +#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5 +/* Firmware send this packet expired, lifetime expiration */ +#define WLFC_CTL_PKTFLAG_EXPIRED 6 +/* Firmware drop this packet for any other reason */ +#define WLFC_CTL_PKTFLAG_DROPPED 7 +/* Firmware free this packet */ +#define WLFC_CTL_PKTFLAG_MKTFREE 8 +/* Firmware dropped the frame after suppress retries reached max */ +#define WLFC_CTL_PKTFLAG_MAX_SUP_RETR 9 + +/* Firmware forced packet lifetime expiry */ +#define WLFC_CTL_PKTFLAG_FORCED_EXPIRED 10 + +#define WLFC_CTL_PKTFLAG_MASK (0x0f) /* For 4-bit mask with one extra bit */ + +#if defined(PROP_TXSTATUS_DEBUG) && !defined(BINCMP) +#define WLFC_DBGMESG(x) printf x +/* wlfc-breadcrumb */ +#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \ + {printf("WLFC :%d:caller:%p\n", \ + __LINE__, CALL_SITE);}} while (0) +#define WLFC_WHEREIS(s) printf("WLFC: at %d, %s\n", __LINE__, (s)) +#else +#define WLFC_DBGMESG(x) +#define WLFC_BREADCRUMB(x) +#define WLFC_WHEREIS(s) +#endif /* PROP_TXSTATUS_DEBUG && !BINCMP */ + +/* AMPDU host reorder packet flags */ +#define WLHOST_REORDERDATA_MAXFLOWS 256 +#define WLHOST_REORDERDATA_LEN 10 +#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */ + +#define WLHOST_REORDERDATA_FLOWID_OFFSET 0 +#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2 +#define WLHOST_REORDERDATA_FLAGS_OFFSET 4 +#define WLHOST_REORDERDATA_CURIDX_OFFSET 6 +#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8 + +#define WLHOST_REORDERDATA_DEL_FLOW 0x01 +#define WLHOST_REORDERDATA_FLUSH_ALL 0x02 +#define WLHOST_REORDERDATA_CURIDX_VALID 0x04 +#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08 +#define WLHOST_REORDERDATA_NEW_HOLE 0x10 + +/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */ +#define WLFC_CTL_TRANS_ID_LEN 6 +#define WLFC_TYPE_TRANS_ID_LEN 6 + +#define WLFC_MODE_HANGER 1 /* use hanger */ +#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */ +#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2)) + +#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */ +#define WLFC_SET_AFQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_AFQ_SHIFT)) +/** returns TRUE if firmware supports 'at firmware queue' feature */ +#define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1) + +#define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */ +#define WLFC_SET_REUSESEQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT)) + +/** returns TRUE if 'd11 sequence reuse' has been agreed upon between host and dongle */ +#if defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK) +/* GET_REUSESEQ is always TRUE in pciedev */ +#define WLFC_GET_REUSESEQ(x) (TRUE) +#else +#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1) +#endif /* defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK) */ + +#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */ +#define WLFC_SET_REORDERSUPP(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT)) +/** returns TRUE if 'reorder suppress' has been agreed upon between host and dongle */ +#define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1) + +#define FLOW_RING_CREATE 1u +#define FLOW_RING_DELETE 2u +#define FLOW_RING_FLUSH 3u +#define FLOW_RING_OPEN 4u +#define FLOW_RING_CLOSED 5u +#define FLOW_RING_FLUSHED 6u +#define FLOW_RING_TIM_SET 7u +#define FLOW_RING_TIM_RESET 8u +#define FLOW_RING_FLUSH_TXFIFO 9u +#define FLOW_RING_GET_PKT_MAX 10u +#define FLOW_RING_RESET_WEIGHT 11u +#define FLOW_RING_UPD_PRIOMAP 12u +#define FLOW_RING_HP2P_CREATE 13u +#define FLOW_RING_HP2P_DELETE 14u +#define FLOW_RING_GET_BUFFERED_TIME 15u +#define FLOW_RING_HP2P_TXQ_STRT 16u +#define FLOW_RING_HP2P_TXQ_STOP 17u + +/* bit 7, indicating if is TID(1) or AC(0) mapped info in tid field) */ +#define PCIEDEV_IS_AC_TID_MAP_MASK 0x80 + +#define WLFC_PCIEDEV_AC_PRIO_MAP 0 +#define WLFC_PCIEDEV_TID_PRIO_MAP 1 +#define WLFC_PCIEDEV_LLR_PRIO_MAP 2 + +void wlc_wlfc_set_pkttime(void* pkt, uint16 time); + +/* reason for disabling APP, when none are set, APP will be enabled */ +typedef enum { + APP_STS_FLOWRING_NO_APP = 0u, /* Reason code used by pciedev */ + APP_STS_FLOWRING_CLOSED = 1u, /* Disable APP as flowring is closed */ + APP_STS_CRYPTO_UNSUPPORTED = 2u, /* Secuirity type doesn't support APP */ + APP_STS_80211_FRAGMENTATION = 3u, /* 802.11 fragmentation enabled */ + APP_STS_MAX = 4u /* MAX */ +} app_disable_reason_s; + +/* shared structure between wlc and pciedev layer to set/reset a reason code */ +typedef struct app_upd_sts { + bool set; /* if set, app is disabled for reason rsn */ + bool sta; /* set if scb/flowring belong to sta */ + app_disable_reason_s rsn; /* APP disable reason codes. */ +} app_upd_sts_t; + +#endif /* __wlfc_proto_definitions_h__ */ diff --git a/bcmdhd.101.10.361.x/include/wlioctl.h b/bcmdhd.101.10.361.x/include/wlioctl.h new file mode 100755 index 0000000..97f0148 --- /dev/null +++ b/bcmdhd.101.10.361.x/include/wlioctl.h @@ -0,0 +1,25850 @@ +/* + * Custom OID/ioctl definitions for + * + * + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 2020, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * + * <> + */ + +#ifndef _wlioctl_h_ +#define _wlioctl_h_ + +#include +#include +#include +#include +#include +#include +#include +#include <802.11.h> +#include <802.11s.h> +#include <802.1d.h> +#include +#ifdef WL11AX +#include <802.11ax.h> +#endif /* WL11AX */ +#include +#include +#include + +#include +#include +#define SSSR_NEW_API + +/* Include bcmerror.h for error codes or aliases */ +#ifdef BCMUTILS_ERR_CODES +#include +#endif /* BCMUTILS_ERR_CODES */ +#include + +/* NOTE re: Module specific error codes. + * + * BCME_.. error codes are extended by various features - e.g. FTM, NAN, SAE etc. + * The current process is to allocate a range of 1024 negative 32 bit integers to + * each module that extends the error codes to indicate a module specific status. + * + * The next range to use is below. If that range is used for a new feature, please + * update the range to be used by the next feature. + * + * The error codes -4096 ... -5119 are reserved for firmware signing. + * + * Next available (inclusive) range: [-8*1024 + 1, -7*1024] + * + * End Note + */ + +/* 11ax trigger frame format - versioning info */ +#define TRIG_FRAME_FORMAT_11AX_DRAFT_1P1 0 + +typedef struct { + uint32 num; + chanspec_t list[1]; +} chanspec_list_t; + +#define RSN_KCK_LENGTH 16 +#define RSN_KEK_LENGTH 16 +#define TPK_FTM_LEN 16 +#ifndef INTF_NAME_SIZ +#define INTF_NAME_SIZ 16 +#endif + +#define WL_ASSOC_START_EVT_DATA_VERSION 1 + +typedef struct assoc_event_data { + uint32 version; + uint32 flags; + chanspec_t join_chspec; +} assoc_event_data_t; + +/**Used to send ioctls over the transport pipe */ +typedef struct remote_ioctl { + cdc_ioctl_t msg; + uint32 data_len; + char intf_name[INTF_NAME_SIZ]; +} rem_ioctl_t; +#define REMOTE_SIZE sizeof(rem_ioctl_t) + +#define BCM_IOV_XTLV_VERSION 0 + +#define MAX_NUM_D11CORES 2 + +/**DFS Forced param */ +typedef struct wl_dfs_forced_params { + chanspec_t chspec; + uint16 version; + chanspec_list_t chspec_list; +} wl_dfs_forced_t; + +#define DFS_PREFCHANLIST_VER 0x01 +#define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list) +/* size of dfs forced param size given n channels are in the list */ +#define WL_DFS_FORCED_PARAMS_SIZE(n) \ + (sizeof(wl_dfs_forced_t) + (((n) < 1) ? (0) : (((n) - 1)* sizeof(chanspec_t)))) +#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \ + (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list)) +#define WL_DFS_FORCED_PARAMS_MAX_SIZE \ + WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t)) + +/**association decision information */ +typedef struct { + uint8 assoc_approved; /**< (re)association approved */ + uint8 pad; + uint16 reject_reason; /**< reason code for rejecting association */ + struct ether_addr da; + uint8 pad1[6]; +#if defined(NDIS) && (NDISVER >= 0x0620) + LARGE_INTEGER sys_time; /**< current system time */ +#else + int64 sys_time; /**< current system time */ +#endif +} assoc_decision_t; + +#define DFS_SCAN_S_IDLE -1 +#define DFS_SCAN_S_RADAR_FREE 0 +#define DFS_SCAN_S_RADAR_FOUND 1 +#define DFS_SCAN_S_INPROGESS 2 +#define DFS_SCAN_S_SCAN_ABORTED 3 +#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4 +#define DFS_SCAN_S_MAX 5 + +#define ACTION_FRAME_SIZE 1800 + +typedef struct wl_action_frame { + struct ether_addr da; + uint16 len; + uint32 packetId; + uint8 data[ACTION_FRAME_SIZE]; +} wl_action_frame_t; + +#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) + +typedef struct ssid_info +{ + uint8 ssid_len; /**< the length of SSID */ + uint8 ssid[32]; /**< SSID string */ +} ssid_info_t; + +typedef struct wl_af_params { + uint32 channel; + int32 dwell_time; + struct ether_addr BSSID; + uint8 PAD[2]; + wl_action_frame_t action_frame; +} wl_af_params_t; + +#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params) + +#define MFP_TEST_FLAG_NORMAL 0 +#define MFP_TEST_FLAG_ANY_KEY 1 +typedef struct wl_sa_query { + uint32 flag; + uint8 action; + uint8 PAD; + uint16 id; + struct ether_addr da; + uint16 PAD; +} wl_sa_query_t; + +/* EXT_STA */ +/**association information */ +typedef struct { + uint32 assoc_req; /**< offset to association request frame */ + uint32 assoc_req_len; /**< association request frame length */ + uint32 assoc_rsp; /**< offset to association response frame */ + uint32 assoc_rsp_len; /**< association response frame length */ + uint32 bcn; /**< offset to AP beacon */ + uint32 bcn_len; /**< AP beacon length */ + uint32 wsec; /**< ucast security algo */ + uint32 wpaie; /**< offset to WPA ie */ + uint8 auth_alg; /**< 802.11 authentication mode */ + uint8 WPA_auth; /**< WPA: authenticated key management */ + uint8 ewc_cap; /**< EWC (MIMO) capable */ + uint8 ofdm; /**< OFDM */ +} assoc_info_t; +/* defined(EXT_STA) */ + +/* Flags for OBSS IOVAR Parameters */ +#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD (0x02) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04) +#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD (0x08) +#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD (0x10) +#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD (0x20) +#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD (0x40) + +/* OBSS IOVAR Version information */ +#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1 + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 obss_bwsw_activity_cfm_count_cfg; /**< configurable count in + * seconds before we confirm that OBSS is present and + * dynamically activate dynamic bwswitch. + */ + uint8 obss_bwsw_no_activity_cfm_count_cfg; /**< configurable count in + * seconds before we confirm that OBSS is GONE and + * dynamically start pseudo upgrade. If in pseudo sense time, we + * will see OBSS, [means that, we false detected that OBSS-is-gone + * in watchdog] this count will be incremented in steps of + * obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS + * detection again. Note that, at present, max 30seconds is + * allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT] + */ + uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above + */ + uint16 obss_bwsw_pseudo_sense_count_cfg; /**< number of msecs/cnt to be in + * pseudo state. This is used to sense/measure the stats from lq. + */ + uint8 obss_bwsw_rx_crs_threshold_cfg; /**< RX CRS default threshold */ + uint8 obss_bwsw_dur_thres; /**< OBSS dyn bwsw trigger/RX CRS Sec */ + uint8 obss_bwsw_txop_threshold_cfg; /**< TXOP default threshold */ +} BWL_POST_PACKED_STRUCT wlc_obss_dynbwsw_config_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 version; /**< version field */ + uint32 config_mask; + uint32 reset_mask; + wlc_obss_dynbwsw_config_t config_params; +} BWL_POST_PACKED_STRUCT obss_config_params_t; +#include + +/**bsscfg type */ +typedef enum bsscfg_type { + BSSCFG_TYPE_GENERIC = 0, /**< Generic AP/STA/IBSS BSS */ + BSSCFG_TYPE_P2P = 1, /**< P2P BSS */ + /* index 2 earlier used for BTAMP */ + BSSCFG_TYPE_PSTA = 3, + BSSCFG_TYPE_TDLS = 4, + BSSCFG_TYPE_SLOTTED_BSS = 5, + BSSCFG_TYPE_PROXD = 6, + BSSCFG_TYPE_NAN = 7, + BSSCFG_TYPE_MESH = 8, + BSSCFG_TYPE_AIBSS = 9 +} bsscfg_type_t; + +/* bsscfg subtype */ +typedef enum bsscfg_subtype { + BSSCFG_SUBTYPE_NONE = 0, + BSSCFG_GENERIC_STA = 1, /* GENERIC */ + BSSCFG_GENERIC_AP = 2, + BSSCFG_GENERIC_IBSS = 6, + BSSCFG_P2P_GC = 3, /* P2P */ + BSSCFG_P2P_GO = 4, + BSSCFG_P2P_DISC = 5, + /* Index 7 & 8 earlier used for BTAMP */ + BSSCFG_SUBTYPE_AWDL = 9, /* SLOTTED_BSS_TYPE */ + BSSCFG_SUBTYPE_NAN_MGMT = 10, + BSSCFG_SUBTYPE_NAN_DATA = 11, + BSSCFG_SUBTYPE_NAN_MGMT_DATA = 12 +} bsscfg_subtype_t; + +typedef struct wlc_bsscfg_info { + uint32 type; + uint32 subtype; +} wlc_bsscfg_info_t; + +/* ULP SHM Offsets info */ +typedef struct ulp_shm_info { + uint32 m_ulp_ctrl_sdio; + uint32 m_ulp_wakeevt_ind; + uint32 m_ulp_wakeind; +} ulp_shm_info_t; + +/* Note: Due to unpredictable size, bool type should not be used in any ioctl argument structure + * Cf PR53622 + */ + +#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */ + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 freespace1; /* make implicit padding explicit */ + uint8 load; /**< BSS Load from QBSS load IE if available */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ +} wl_bss_info_v109_t; + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_v109_1 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 he_cap; /**< BSS is he capable */ + uint8 load; /**< BSS Load from QBSS load IE if available */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + uint32 he_mcsmap; /**< STA's Associated hemcsmap */ + uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ +} wl_bss_info_v109_1_t; + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_v109_2 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 he_cap; /**< BSS is he capable */ + uint8 load; /**< BSS Load from QBSS load IE if available */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 RSVD1[3]; + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 RSVD2[2]; + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + uint32 he_mcsmap; /**< STA's Associated hemcsmap */ + uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */ +} wl_bss_info_v109_2_t; + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_v112 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 he_cap; /**< BSS is he capable */ + uint8 load; /**< BSS Load from QBSS load IE if available */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 RSVD1[3]; + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 RSVD2[2]; + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + uint32 he_mcsmap; /**< STA's Associated hemcsmap */ + uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */ + uint8 eht_cap; /* BSS is EHT capable */ + uint8 RSVD3[3]; + /* by the spec. it is maximum 16 streams hence all mcs code for all nss may not fit + * in a 32 bit mcs nss map but since this field only reflects the common mcs nss map + * between that of the peer and our device so it's probably ok to make it 32 bit and + * allow only a limited number of nss e.g. upto 8 of them in the map given the fact + * that our device probably won't exceed 4 streams anyway... + */ + uint32 eht_mcsmap; /* STA's associated EHT mcs code map */ + /* FIXME: change the following mcs code map to uint32 if all mcs+nss can fit in */ + uint8 eht_rxmcsmap[6]; /* EHT rx mcs code map */ + uint8 eht_txmcsmap[6]; /* EHT tx mcs code map */ +} wl_bss_info_v112_t; + +#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS +typedef wl_bss_info_v109_t wl_bss_info_t; +#endif + +#define WL_GSCAN_FULL_RESULT_VERSION 2 /* current version of wl_gscan_result_t struct */ + +typedef struct wl_gscan_bss_info_v2 { + uint32 timestamp[2]; + wl_bss_info_v109_t info; + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_v2_t; + +typedef struct wl_gscan_bss_info_v3 { + uint32 timestamp[2]; + uint8 info[]; /* var length wl_bss_info_X structures */ + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_v3_t; + +#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS +typedef wl_gscan_bss_info_v2_t wl_gscan_bss_info_t; +#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t)) +#endif + +typedef struct wl_bsscfg { + uint32 bsscfg_idx; + uint32 wsec; + uint32 WPA_auth; + uint32 wsec_index; + uint32 associated; + uint32 BSS; + uint32 phytest_on; + struct ether_addr prev_BSSID; + struct ether_addr BSSID; + uint32 targetbss_wpa2_flags; + uint32 assoc_type; + uint32 assoc_state; +} wl_bsscfg_t; + +typedef struct wl_if_add { + uint32 bsscfg_flags; + uint32 if_flags; + uint32 ap; + struct ether_addr mac_addr; + uint16 PAD; + uint32 wlc_index; +} wl_if_add_t; + +typedef struct wl_bss_config { + uint32 atim_window; + uint32 beacon_period; + uint32 chanspec; +} wl_bss_config_t; + +/* Number of Bsscolor supported per core */ +#ifndef HE_MAX_BSSCOLOR_RES +#define HE_MAX_BSSCOLOR_RES 2 +#endif + +#ifndef HE_MAX_STAID_PER_BSSCOLOR +#define HE_MAX_STAID_PER_BSSCOLOR 4 +#endif + +/* BSSColor indices */ +#define BSSCOLOR0_IDX 0 +#define BSSCOLOR1_IDX 1 +#define HE_BSSCOLOR0 0 +#define HE_BSSCOLOR_MAX_VAL 63 + +/* STAID indices */ +#define STAID0_IDX 0 +#define STAID1_IDX 1 +#define STAID2_IDX 2 +#define STAID3_IDX 3 +#define HE_STAID_MAX_VAL 0x07FF + +typedef struct wl_bsscolor_info { + uint16 version; /**< structure version */ + uint16 length; /**< length of the bsscolor info */ + uint8 bsscolor_index; /**< bsscolor index 0-1 */ + uint8 bsscolor; /**c0, B[9:5]=>c1, B[14:10]=>c2, B[19:15]=>c[3-7] + * B[24:20]=>c[8-9], B[29:25]=>c[10-11] + */ + uint32 bfgain_2x1[NUM_BFGAIN_ARRAY_1RX]; /* exp 1ss, imp 1ss */ + uint32 bfgain_2x2[NUM_BFGAIN_ARRAY_2RX]; /* exp [1-2]ss, imp 1ss */ + uint32 bfgain_3x1[NUM_BFGAIN_ARRAY_1RX]; + uint32 bfgain_3x2[NUM_BFGAIN_ARRAY_2RX]; + uint32 bfgain_3x3[NUM_BFGAIN_ARRAY_3RX]; /* exp [1-3]ss, imp 1ss */ + uint32 bfgain_4x1[NUM_BFGAIN_ARRAY_1RX]; + uint32 bfgain_4x2[NUM_BFGAIN_ARRAY_2RX]; + uint32 bfgain_4x3[NUM_BFGAIN_ARRAY_3RX]; + uint32 bfgain_4x4[NUM_BFGAIN_ARRAY_4RX]; /* exp [1-4]ss, imp 1ss */ +} wl_txbf_expgainset_t; + +#define OFDM_RATE_MASK 0x0000007f +typedef uint8 ofdm_rates_t; + +typedef struct wl_rates_info { + wl_rateset_t rs_tgt; + uint32 phy_type; + int32 bandtype; + uint8 cck_only; + uint8 rate_mask; + uint8 mcsallow; + uint8 bw; + uint8 txstreams; + uint8 PAD[3]; +} wl_rates_info_t; + +/**uint32 list */ +typedef struct wl_uint32_list { + /** in - # of elements, out - # of entries */ + uint32 count; + /** variable length uint32 list */ + uint32 element[1]; +} wl_uint32_list_t; +/* Size in bytes for wl_uint32_list_t with 'count' elements */ +#define WL_UINT32_LIST_SIZE(count) (((count) + 1) * sizeof(uint32)) + +#define CHAN_INFO_LIST_ALL_V1 1 + +typedef struct wl_chanspec_attr_s_v1 { + uint32 chaninfo; + uint32 chanspec; +} wl_chanspec_attr_v1_t; + +/**chanspecs list */ +typedef struct wl_chanspec_list_s_v1 { + uint16 version; + /** in - # of chanspecs, out - # of entries */ + uint16 count; + /** variable length chanspecs list */ + wl_chanspec_attr_v1_t chspecs[1]; +} wl_chanspec_list_v1_t; + +/* WLC_SET_ALLOW_MODE values */ +#define ALLOW_MODE_ANY_BSSID 0 +#define ALLOW_MODE_ONLY_DESIRED_BSSID 1 +#define ALLOW_MODE_NO_BSSID 2 + +/** used for association with a specific BSSID and chanspec list */ +typedef struct wl_assoc_params { + struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ + uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, + * otherwise count of chanspecs in chanspec_list + * AND paired bssids following chanspec_list + * also, chanspec_num has to be set to zero + * for bssid list to be used + */ + int32 chanspec_num; /**< 0: all available channels, + * otherwise count of chanspecs in chanspec_list + */ + chanspec_t chanspec_list[1]; /**< list of chanspecs */ + +} wl_assoc_params_t; + +typedef struct wl_assoc_params_v1 { + uint16 version; + uint16 flags; + struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ + uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, + * otherwise count of chanspecs in chanspec_list + * AND paired bssids following chanspec_list + * also, chanspec_num has to be set to zero + * for bssid list to be used + */ + int32 chanspec_num; /**< 0: all available channels, + * otherwise count of chanspecs in chanspec_list + */ + chanspec_t chanspec_list[1]; /**< list of chanspecs */ +} wl_assoc_params_v1_t; + +#define ASSOC_HINT_BSSID_PRESENT (1 << 0) + +#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) +#define WL_ASSOC_PARAMS_FIXED_SIZE_V1 OFFSETOF(wl_assoc_params_v1_t, chanspec_list) +/** used for reassociation/roam to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_reassoc_params_t; +#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE +#define WL_EXT_REASSOC_VER 1 + +typedef struct wl_ext_reassoc_params { + uint16 version; + uint16 length; + uint32 flags; + wl_reassoc_params_t params; +} wl_ext_reassoc_params_t; + +/* Flags field defined above in wl_ext_reassoc_params + * The value in bits [2:0] is used to specify the type + * of scan to be used for reassoc + */ + +#define WL_SCAN_MODE_HIGH_ACC 0u /**< use high accuracy scans for roam */ +#define WL_SCAN_MODE_LOW_SPAN 1u /**< use low span scans for roam */ +#define WL_SCAN_MODE_LOW_POWER 2u /**< use low power scans for roam */ + +#define WL_EXTREASSOC_PARAMS_FIXED_SIZE (OFFSETOF(wl_ext_reassoc_params_t, params) + \ + WL_REASSOC_PARAMS_FIXED_SIZE) + +/** used for association to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_join_assoc_params_t; +typedef wl_assoc_params_v1_t wl_join_assoc_params_v1_t; +#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE +#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE_V1 WL_ASSOC_PARAMS_FIXED_SIZE_V1 +/** used for join with or without a specific bssid and channel list */ +typedef struct wl_join_params { + wlc_ssid_t ssid; + wl_assoc_params_t params; /**< optional field, but it must include the fixed portion + * of the wl_assoc_params_t struct when it does present. + */ +} wl_join_params_t; + +/** used for join with or without a specific bssid and channel list */ +typedef struct wl_join_params_v1 { + wlc_ssid_t ssid; + wl_assoc_params_v1_t params; /**< optional field, but it must include the fixed portion + * of the wl_assoc_params_t struct when it does present. + */ +} wl_join_params_v1_t; + +#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ + WL_ASSOC_PARAMS_FIXED_SIZE) +#define WL_JOIN_PARAMS_FIXED_SIZE_V1 (OFFSETOF(wl_join_params_v1_t, params) + \ + WL_ASSOC_PARAMS_FIXED_SIZE_V1) +typedef struct wlc_roam_exp_params { + int8 a_band_boost_threshold; + int8 a_band_penalty_threshold; + int8 a_band_boost_factor; + int8 a_band_penalty_factor; + int8 cur_bssid_boost; + int8 alert_roam_trigger_threshold; + int16 a_band_max_boost; +} wlc_roam_exp_params_t; + +#define ROAM_EXP_CFG_VERSION 1 + +#define ROAM_EXP_ENABLE_FLAG (1 << 0) + +#define ROAM_EXP_CFG_PRESENT (1 << 1) + +typedef struct wl_roam_exp_cfg { + uint16 version; + uint16 flags; + wlc_roam_exp_params_t params; +} wl_roam_exp_cfg_t; + +typedef struct wl_bssid_pref_list { + struct ether_addr bssid; + /* Add this to modify rssi */ + int8 rssi_factor; + int8 flags; +} wl_bssid_pref_list_t; + +#define BSSID_PREF_LIST_VERSION 1 +#define ROAM_EXP_CLEAR_BSSID_PREF (1 << 0) + +typedef struct wl_bssid_pref_cfg { + uint16 version; + uint16 flags; + uint16 count; + uint16 reserved; + wl_bssid_pref_list_t bssids[]; +} wl_bssid_pref_cfg_t; + +#define SSID_WHITELIST_VERSION 1 + +#define ROAM_EXP_CLEAR_SSID_WHITELIST (1 << 0) + +/* Roam SSID whitelist, ssids in this list are ok to */ +/* be considered as targets to join when considering a roam */ + +typedef struct wl_ssid_whitelist { + + uint16 version; + uint16 flags; + + uint8 ssid_count; + uint8 reserved[3]; + wlc_ssid_t ssids[]; +} wl_ssid_whitelist_t; + +#define ROAM_EXP_EVENT_VERSION 1 + +typedef struct wl_roam_exp_event { + + uint16 version; + uint16 flags; + wlc_ssid_t cur_ssid; +} wl_roam_exp_event_t; + +/** scan params for extended join */ +typedef struct wl_join_scan_params { + uint8 scan_type; /**< 0 use default, active or passive scan */ + uint8 PAD[3]; + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for + * active scanning + */ + int32 passive_time; /**< -1 use default, dwell time per channel + * for passive scanning + */ + int32 home_time; /**< -1 use default, dwell time for the home channel + * between channel scans + */ +} wl_join_scan_params_t; + +#define wl_join_assoc_params_t wl_assoc_params_t +#define wl_join_assoc_params_v1_t wl_assoc_params_v1_t +/** extended join params */ +typedef struct wl_extjoin_params { + wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ + wl_join_scan_params_t scan; + wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion + * of the wl_join_assoc_params_t struct when it does + * present. + */ +} wl_extjoin_params_t; + +typedef struct wl_extjoin_params_v1 { + uint16 version; + wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ + wl_join_scan_params_t scan; + wl_join_assoc_params_v1_t assoc; /**< optional field, but it must include the fixed portion + * of the wl_join_assoc_params_t struct when it does + * present. + */ +} wl_extjoin_params_v1_t; + +#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \ + WL_JOIN_ASSOC_PARAMS_FIXED_SIZE) +#define WL_EXTJOIN_PARAMS_FIXED_SIZE_V1 (OFFSETOF(wl_extjoin_params_v1_t, assoc) + \ + WL_JOIN_ASSOC_PARAMS_FIXED_SIZE_V1) +#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */ +#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */ +typedef struct { + uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */ + uint8 num_antcfg; /**< number of available antenna configurations */ +} wlc_antselcfg_t; + +/* This is obsolete.Pls add new fields by extending versioned structure. + * cca_congest_ext_vX_t [X is latest version] + */ +typedef struct cca_congest { + uint32 duration; /**< millisecs spent sampling this channel */ + union { + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_me; /**< millisecs in my own traffic */ + }; + union { + uint32 congest_obss; /**< traffic not in our bss */ + uint32 congest_notme; /**< traffic not from/to me (including bc/mc) */ + }; + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_t; + +/* This is obsolete.Pls add new fields by extending versioned structure. + * cca_congest_ext_channel_req_vX_t [X is latest version] + */ +typedef struct cca_congest_channel_req { + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /**< How many secs worth of data */ + cca_congest_t secs[1]; /**< Data */ +} cca_congest_channel_req_t; + +typedef struct cca_congest_ext { + uint32 timestamp; /**< second timestamp */ + + /* Base structure of cca_congest_t: CCA statistics all inclusive */ + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_meonly; /**< millisecs in my own traffic (TX + RX) */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + + /* CCA statistics for non PM only */ + uint32 duration_nopm; /**< millisecs spent sampling this channel */ + uint32 congest_meonly_nopm; /**< millisecs in my own traffic (TX + RX) */ + uint32 congest_ibss_nopm; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss_nopm; /**< traffic not in our bss */ + uint32 interference_nopm; /**< millisecs detecting a non 802.11 interferer. */ + + /* CCA statistics for during PM only */ + uint32 duration_pm; /**< millisecs spent sampling this channel */ + uint32 congest_meonly_pm; /**< millisecs in my own traffic (TX + RX) */ + uint32 congest_ibss_pm; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss_pm; /**< traffic not in our bss */ + uint32 interference_pm; /**< millisecs detecting a non 802.11 interferer. */ +} cca_congest_ext_t; + +typedef struct cca_congest_ext_v2 { + uint32 timestamp; /**< second timestamp */ + + /* Base structure of cca_congest_t: CCA statistics all inclusive */ + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_meonly; /**< millisecs in my own traffic (TX + RX) */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + + /* CCA statistics for non PM only */ + uint32 duration_nopm; /**< millisecs spent sampling this channel */ + uint32 congest_meonly_nopm; /**< millisecs in my own traffic (TX + RX) */ + uint32 congest_ibss_nopm; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss_nopm; /**< traffic not in our bss */ + uint32 interference_nopm; /**< millisecs detecting a non 802.11 interferer. */ + + /* CCA statistics for during PM only */ + uint32 duration_pm; /**< millisecs spent sampling this channel */ + uint32 congest_meonly_pm; /**< millisecs in my own traffic (TX + RX) */ + uint32 congest_ibss_pm; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss_pm; /**< traffic not in our bss */ + uint32 interference_pm; /**< millisecs detecting a non 802.11 interferer. */ + uint32 radio_on_time; /* Awake time on this channel */ + uint32 cca_busy_time; /* CCA is held busy on this channel */ +} cca_congest_ext_v2_t; + +#define WL_CCA_EXT_REQ_VER 0u +#define WL_CCA_EXT_REQ_VER_V2 2u +#define WL_CCA_EXT_REQ_VER_V3 3u + +typedef struct cca_congest_ext_channel_req { + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /**< How many secs worth of data */ + struct cca_congest_ext secs[1]; /**< Data - 3 sets for ALL - non-PM - PM */ +} cca_congest_ext_channel_req_t; + +typedef struct cca_congest_ext_channel_req_v2 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /* How many secs worth of data */ + cca_congest_ext_v2_t secs[1]; /* Data - 3 sets for ALL - non-PM - PM */ +} cca_congest_ext_channel_req_v2_t; + +/* Struct holding all channels cca statistics */ +typedef struct cca_congest_ext_channel_req_v3 { + uint16 ver; + uint16 len; + uint8 PAD[2]; + uint16 num_of_entries; + cca_congest_ext_channel_req_v2_t per_chan_stats[1]; +} cca_congest_ext_channel_req_v3_t; + +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest; /**< millisecs detecting busy CCA */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_simple_t; + +/* The following two structure must have same first 4 fields. + * The cca_chan_qual_event_t is used to report CCA in older formats and NF. + * The cca_only_chan_qual_event_t is used to report CCA only with newer format. + */ +typedef struct { + uint16 status; + uint16 id; + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + cca_congest_t cca_busy_ext; /**< Extended CCA report */ + int32 noise; /**< noise floor */ + }; +} cca_chan_qual_event_t; + +typedef struct { + uint16 status; + uint16 id; + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + struct { + cca_congest_t cca_busy_ext; /**< Extended CCA report */ + cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */ + cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */ + }; + }; +} cca_only_chan_qual_event_t; + +typedef struct { + uint16 status; /* Indicates the status of event */ + uint16 id; + /* id is used to indicate the number of bytes to read */ + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + struct { + cca_congest_t cca_busy_ext; /**< Extended CCA report */ + cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */ + cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */ + }; + }; + int32 ofdm_desense; +} cca_only_chan_qual_event_v2_t; + +typedef struct { + uint32 msrmnt_time; /**< Time for Measurement (msec) */ + uint32 msrmnt_done; /**< flag set when measurement complete */ + char buf[]; +} cca_stats_n_flags; + +typedef struct { + uint32 msrmnt_query; /* host to driver query for measurement done */ + uint32 time_req; /* time required for measurement */ + uint8 report_opt; /* option to print different stats in report */ + uint8 PAD[3]; +} cca_msrmnt_query; + +/* interference sources */ +enum interference_source { + ITFR_NONE = 0, /**< interference */ + ITFR_PHONE, /**< wireless phone */ + ITFR_VIDEO_CAMERA, /**< wireless video camera */ + ITFR_MICROWAVE_OVEN, /**< microwave oven */ + ITFR_BABY_MONITOR, /**< wireless baby monitor */ + ITFR_BLUETOOTH, /**< bluetooth */ + ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */ + ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */ + ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */ + ITFR_UNIDENTIFIED /**< interference from unidentified source */ +}; + +/** structure for interference source report */ +typedef struct { + uint32 flags; /**< flags. bit definitions below */ + uint32 source; /**< last detected interference source */ + uint32 timestamp; /**< second timestamp on interferenced flag change */ +} interference_source_rep_t; + +#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */ + +typedef struct wl_country { + char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in + * the Country IE + */ + int32 rev; /**< revision specifier for ccode + * on set, -1 indicates unspecified. + * on get, rev >= 0 + */ + char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code. + * variable length, but fixed size in + * struct allows simple allocation for + * expected country strings <= 3 chars. + */ +} wl_country_t; + +#define CCODE_INFO_VERSION 1 + +typedef enum wl_ccode_role { + WLC_CCODE_ROLE_ACTIVE = 0, + WLC_CCODE_ROLE_HOST, + WLC_CCODE_ROLE_80211D_ASSOC, + WLC_CCODE_ROLE_80211D_SCAN, + WLC_CCODE_ROLE_DEFAULT, + WLC_CCODE_ROLE_DEFAULT_SROM_BKUP, + WLC_CCODE_LAST +} wl_ccode_role_t; +#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST + +typedef struct wl_ccode_entry { + uint16 reserved; + uint8 band; + uint8 role; + char ccode[WLC_CNTRY_BUF_SZ]; +} wl_ccode_entry_t; + +typedef struct wl_ccode_info { + uint16 version; + uint16 count; /**< Number of ccodes entries in the set */ + wl_ccode_entry_t ccodelist[1]; +} wl_ccode_info_t; +#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist) +typedef struct wl_channels_in_country { + uint32 buflen; + uint32 band; + char country_abbrev[WLC_CNTRY_BUF_SZ]; + uint32 count; + uint32 channel[1]; +} wl_channels_in_country_t; + +typedef struct wl_country_list { + uint32 buflen; + uint32 band_set; + uint32 band; + uint32 count; + char country_abbrev[1]; +} wl_country_list_t; + +typedef struct wl_rm_req_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ +} wl_rm_req_elt_t; + +typedef struct wl_rm_req { + uint32 token; /**< overall measurement set token */ + uint32 count; /**< number of measurement requests */ + void *cb; /**< completion callback function: may be NULL */ + void *cb_arg; /**< arg to completion callback function */ + wl_rm_req_elt_t req[1]; /**< variable length block of requests */ +} wl_rm_req_t; +#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) + +typedef struct wl_rm_rep_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ + uint32 len; /**< byte length of data block */ + uint8 data[1]; /**< variable length data block */ +} wl_rm_rep_elt_t; +#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */ + +#define WL_RPI_REP_BIN_NUM 8 +typedef struct wl_rm_rpi_rep { + uint8 rpi[WL_RPI_REP_BIN_NUM]; + int8 rpi_max[WL_RPI_REP_BIN_NUM]; +} wl_rm_rpi_rep_t; + +typedef struct wl_rm_rep { + uint32 token; /**< overall measurement set token */ + uint32 len; /**< length of measurement report block */ + wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */ +} wl_rm_rep_t; +#define WL_RM_REP_FIXED_LEN 8 + +#ifdef BCMCCX +#define LEAP_USER_MAX 32 +#define LEAP_DOMAIN_MAX 32 +#define LEAP_PASSWORD_MAX 32 + +typedef struct wl_leap_info { + wlc_ssid_t ssid; + uint8 user_len; + uint8 user[LEAP_USER_MAX]; + uint8 password_len; + uint8 password[LEAP_PASSWORD_MAX]; + uint8 domain_len; + uint8 domain[LEAP_DOMAIN_MAX]; + uint8 PAD; +} wl_leap_info_t; + +typedef struct wl_leap_list { + uint32 buflen; + uint32 version; + uint32 count; + wl_leap_info_t leap_info[1]; +} wl_leap_list_t; +#endif /* BCMCCX */ + +typedef enum sup_auth_status { + /* Basic supplicant authentication states */ + WLC_SUP_DISCONNECTED = 0, + WLC_SUP_CONNECTING, + WLC_SUP_IDREQUIRED, + WLC_SUP_AUTHENTICATING, + WLC_SUP_AUTHENTICATED, + WLC_SUP_KEYXCHANGE, + WLC_SUP_KEYED, + WLC_SUP_TIMEOUT, + WLC_SUP_LAST_BASIC_STATE, + + /* Extended supplicant authentication states */ + /** Waiting to receive handshake msg M1 */ + WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, + /** Preparing to send handshake msg M2 */ + WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, + /* Waiting to receive handshake msg M3 */ + WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, + WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */ + WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */ + WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */ +} sup_auth_status_t; + +typedef struct wl_wsec_key { + uint32 index; /**< key index */ + uint32 len; /**< key length */ + uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */ + uint32 pad_1[18]; + uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + uint32 flags; /**< misc flags */ + uint32 pad_2[2]; + int32 pad_3; + int32 iv_initialized; /**< has IV been initialized already? */ + int32 pad_4; + /* Rx IV */ + struct { + uint32 hi; /**< upper 32 bits of IV */ + uint16 lo; /**< lower 16 bits of IV */ + uint16 PAD; + } rxiv; + uint32 pad_5[2]; + struct ether_addr ea; /**< per station */ + uint16 PAD; +} wl_wsec_key_t; + +/* Min length for PSK passphrase */ +#define WSEC_MIN_PSK_LEN 8 +/* Max length of supported passphrases for PSK */ +#define WSEC_MAX_PSK_LEN 64 +/* Max length of supported passphrases for SAE */ +#define WSEC_MAX_PASSPHRASE_LEN 256u +/* Max length of SAE password ID */ +#define WSEC_MAX_SAE_PASSWORD_ID 255u + +/* Flag for key material needing passhash'ing */ +#define WSEC_PASSPHRASE 1u +/* Flag indicating an SAE passphrase */ +#define WSEC_SAE_PASSPHRASE 2u + +/**receptacle for WLC_SET_WSEC_PMK parameter */ + +typedef struct wsec_pmk { + ushort key_len; /* octets in key material */ + ushort flags; /* key handling qualification */ + uint8 key[WSEC_MAX_PASSPHRASE_LEN]; /* PMK material */ + uint16 opt_len; /* optional field length */ + uint8 opt_tlvs[1]; /* optional filed in bcm_xtlv_t format */ +} wsec_pmk_t; + +typedef enum { + WL_PMK_TLV_PASSWORD_ID = 1, + WL_PMK_TLV_SSID = 2, + WL_PMK_TLV_BSSID = 3 +} wl_pmk_tlv_types_t; + +#define WL_AUTH_EVENT_DATA_V1 0x1 +#define WL_AUTH_EVENT_DATA_V2 0x2 + +/* tlv ids for auth event */ +#define WL_AUTH_PMK_TLV_ID 1u +#define WL_AUTH_PMKID_TLV_ID 2u +#define WL_AUTH_PMKID_TYPE_TLV_ID 3u +#define WL_AUTH_SSID_TLV_ID 4u + +#define WL_AUTH_PMKID_TYPE_BSSID 1u +#define WL_AUTH_PMKID_TYPE_SSID 2u +/* AUTH event data +* pmk and pmkid in case of SAE auth +* xtlvs will be 32 bit alligned +*/ +typedef struct wl_auth_event { + uint16 version; + uint16 length; + uint8 xtlvs[]; +} wl_auth_event_t; + +#define WL_AUTH_EVENT_FIXED_LEN_V1 OFFSETOF(wl_auth_event_t, xtlvs) +#define WL_AUTH_EVENT_FIXED_LEN_V2 OFFSETOF(wl_auth_event_t, xtlvs) + +#define WL_PMKSA_EVENT_DATA_V1 1u + +/* tlv ids for PMKSA event */ +#define WL_PMK_TLV_ID 1u +#define WL_PMKID_TLV_ID 2u +#define WL_PEER_ADDR_TLV_ID 3u + +/* PMKSA event data structure */ +typedef struct wl_pmksa_event { + uint16 version; + uint16 length; + uint8 xtlvs[]; +} wl_pmksa_event_t; + +#define WL_PMKSA_EVENT_FIXED_LEN_V1 OFFSETOF(wl_pmksa_event_t, xtlvs) + +#define FILS_CACHE_ID_LEN 2u +#define PMK_LEN_MAX 48u + +typedef struct _pmkid_v1 { + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; +} pmkid_v1_t; + +#define PMKID_ELEM_V2_LENGTH (sizeof(struct ether_addr) + WPA2_PMKID_LEN + PMK_LEN_MAX + \ + sizeof(ssid_info_t) + FILS_CACHE_ID_LEN) + +typedef struct _pmkid_v2 { + uint16 length; /* Should match PMKID_ELEM_VX_LENGTH */ + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; + uint8 pmk[PMK_LEN_MAX]; /* for FILS key deriviation */ + uint16 pmk_len; + ssid_info_t ssid; + uint8 fils_cache_id[FILS_CACHE_ID_LEN]; +} pmkid_v2_t; + +#define PMKID_LIST_VER_2 2 + +typedef struct _pmkid_v3 { + struct ether_addr bssid; + uint8 pmkid[WPA2_PMKID_LEN]; + uint8 pmkid_len; + uint8 pmk[PMK_LEN_MAX]; + uint8 pmk_len; + uint16 fils_cache_id; /* 2-byte length */ + uint8 akm; + uint8 ssid_len; + uint8 ssid[DOT11_MAX_SSID_LEN]; /* For FILS, to save ESSID */ + /* one pmkid used in whole ESS */ + uint32 time_left; /* remaining time until expirary in sec. */ + /* 0 means expired, all 0xFF means never expire */ +} pmkid_v3_t; + +#define PMKID_LIST_VER_3 3 +typedef struct _pmkid_list_v1 { + uint32 npmkid; + pmkid_v1_t pmkid[1]; +} pmkid_list_v1_t; + +typedef struct _pmkid_list_v2 { + uint16 version; + uint16 length; + pmkid_v2_t pmkid[1]; +} pmkid_list_v2_t; + +#define PMKDB_SET_IOVAR 1u +#define PMKDB_GET_IOVAR 2u +#define PMKDB_CLEAR_IOVAR 4u + +typedef struct _pmkid_list_v3 { + uint16 version; + uint16 length; + uint16 count; + uint16 flag; + pmkid_v3_t pmkid[]; +} pmkid_list_v3_t; + +#ifndef PMKID_VERSION_ENABLED +/* pmkid structure before versioning. legacy. DONOT update anymore here */ +typedef pmkid_v1_t pmkid_t; +typedef pmkid_list_v1_t pmkid_list_t; +#endif /* PMKID_VERSION_ENABLED */ + +typedef struct _pmkid_cand { + struct ether_addr BSSID; + uint8 preauth; +} pmkid_cand_t; + +typedef struct _pmkid_cand_list { + uint32 npmkid_cand; + pmkid_cand_t pmkid_cand[1]; +} pmkid_cand_list_t; + +#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */ + +typedef struct wl_assoc_info { + uint32 req_len; + uint32 resp_len; + uint32 flags; + struct dot11_assoc_req req; + struct ether_addr reassoc_bssid; /**< used in reassoc's */ + struct dot11_assoc_resp resp; + uint32 state; +} wl_assoc_info_t; + +/** srom read/write struct passed through ioctl */ +typedef struct { + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + uint16 buf[]; +} srom_rw_t; + +#define CISH_FLAG_PCIECIS (1 << 15) /**< write CIS format bit for PCIe CIS */ + +/** similar cis (srom or otp) struct [iovar: may not be aligned] */ +typedef struct { + uint16 source; /**< cis source */ + uint16 flags; /**< flags */ + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + /* data follows here */ +} cis_rw_t; + +/** R_REG and W_REG struct passed through ioctl */ +typedef struct { + uint32 byteoff; /**< byte offset of the field in d11regs_t */ + uint32 val; /**< read/write value of the field */ + uint32 size; /**< sizeof the field */ + uint32 band; /**< band (optional) */ +} rw_reg_t; + +/** + * Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band + * PCL - Power Control Loop + */ +typedef struct { + uint16 auto_ctrl; /**< WL_ATTEN_XX */ + uint16 bb; /**< Baseband attenuation */ + uint16 radio; /**< Radio attenuation */ + uint16 txctl1; /**< Radio TX_CTL1 value */ +} atten_t; + +/** Per-AC retry parameters */ +struct wme_tx_params_s { + uint8 short_retry; + uint8 short_fallback; + uint8 long_retry; + uint8 long_fallback; + uint16 max_rate; /**< In units of 512 Kbps */ +}; + +typedef struct wme_tx_params_s wme_tx_params_t; + +#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) + +/**Used to get specific link/ac parameters */ +typedef struct { + int32 ac; + uint8 val; + struct ether_addr ea; + uint8 PAD; +} link_val_t; + +#define WL_PM_MUTE_TX_VER 1 + +typedef struct wl_pm_mute_tx { + uint16 version; /**< version */ + uint16 len; /**< length */ + uint16 deadline; /**< deadline timer (in milliseconds) */ + uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */ + uint8 PAD; +} wl_pm_mute_tx_t; + +/* + * Pay attention to version if structure changes. + */ + +/* sta_info_t version 4 */ +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + uint32 rx_dur_total; /* total user RX duration (estimated) */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */ + uint32 PAD; +} sta_info_v4_t; + +/* Note: Version 4 is the latest version of sta_info_t. Version 5 is abandoned. + * Please add new fields to version 4, not version 5. + */ +/* sta_info_t version 5 */ +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */ +} sta_info_v5_t; + +/* + * Pay attention to version if structure changes. + */ + +/* sta_info_t version 6 + changes to wl_rateset_args_t is leading to update this struct version as well. + */ +typedef struct sta_info_v6 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + uint32 rx_dur_total; /* total user RX duration (estimated) */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v2_t rateset_adv; /* rateset along with mcs index bitmap */ +} sta_info_v6_t; + +/* + * Pay attention to version if structure changes. + */ + +/* sta_info_t version 7 + changes to wl_rateset_args_t is leading to update this struct version as well. + */ +typedef struct sta_info_v7 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + uint32 rx_dur_total; /* total user RX duration (estimated) */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v3_t rateset_adv; /* rateset along with mcs index bitmap */ +} sta_info_v7_t; + +/* define to help support one version older sta_info_t from user level + * applications. + */ +#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts) + +#define WL_STA_VER_4 4 +#define WL_STA_VER_5 5 +/* FIXME: the user/branch should make the selection! */ +#define WL_STA_VER WL_STA_VER_4 + +#define SWDIV_STATS_VERSION_2 2 +#define SWDIV_STATS_CURRENT_VERSION SWDIV_STATS_VERSION_2 + +struct wlc_swdiv_stats_v1 { + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; + uint32 swap_snrdrop0; + uint32 swap_snrdrop1; + uint32 mws_antsel_ovr_tx; + uint32 mws_antsel_ovr_rx; + uint8 swap_trig_event_id; +}; + +struct wlc_swdiv_stats_v2 { + uint16 version; /* version of the structure + * as defined by SWDIV_STATS_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; + uint32 swap_snrdrop0; + uint32 swap_snrdrop1; + uint32 mws_antsel_ovr_tx; + uint32 mws_antsel_ovr_rx; + uint32 swap_trig_event_id; +}; + +#define WLC_NUMRATES 16 /**< max # of rates in a rateset */ + +/**Used to get specific STA parameters */ +typedef struct { + uint32 val; + struct ether_addr ea; + uint16 PAD; +} scb_val_t; + +/**Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ +typedef struct { + uint32 code; + scb_val_t ioctl_args; +} authops_t; + +/** channel encoding */ +typedef struct channel_info { + int32 hw_channel; + int32 target_channel; + int32 scan_channel; +} channel_info_t; + +/** For ioctls that take a list of MAC addresses */ +typedef struct maclist { + uint32 count; /**< number of MAC addresses */ + struct ether_addr ea[1]; /**< variable length array of MAC addresses */ +} maclist_t; + +typedef struct wds_client_info { + char ifname[INTF_NAME_SIZ]; /* WDS ifname */ + struct ether_addr ea; /* WDS client MAC address */ +} wds_client_info_t; + +#define WDS_MACLIST_MAGIC 0xFFFFFFFF +#define WDS_MACLIST_VERSION 1 + +/* For wds MAC list ioctls */ +typedef struct wds_maclist { + uint32 count; /* Number of WDS clients */ + uint32 magic; /* Magic number */ + uint32 version; /* Version number */ + struct wds_client_info client_list[1]; /* Variable length array of WDS clients */ +} wds_maclist_t; + +/**get pkt count struct passed through ioctl */ +typedef struct get_pktcnt { + uint32 rx_good_pkt; + uint32 rx_bad_pkt; + uint32 tx_good_pkt; + uint32 tx_bad_pkt; + uint32 rx_ocast_good_pkt; /**< unicast packets destined for others */ +} get_pktcnt_t; + +/* NINTENDO2 */ +#define LQ_IDX_MIN 0 +#define LQ_IDX_MAX 1 +#define LQ_IDX_AVG 2 +#define LQ_IDX_SUM 2 +#define LQ_IDX_LAST 3 +#define LQ_STOP_MONITOR 0 +#define LQ_START_MONITOR 1 + +/** Get averages RSSI, Rx PHY rate and SNR values */ +/* Link Quality */ +typedef struct { + int32 rssi[LQ_IDX_LAST]; /**< Array to keep min, max, avg rssi */ + int32 snr[LQ_IDX_LAST]; /**< Array to keep min, max, avg snr */ + int32 isvalid; /**< Flag indicating whether above data is valid */ +} wl_lq_t; + +typedef enum wl_wakeup_reason_type { + LCD_ON = 1, + LCD_OFF, + DRC1_WAKE, + DRC2_WAKE, + REASON_LAST +} wl_wr_type_t; + +typedef struct { + /** Unique filter id */ + uint32 id; + /** stores the reason for the last wake up */ + uint8 reason; + uint8 PAD[3]; +} wl_wr_t; + +/** Get MAC specific rate histogram command */ +typedef struct { + struct ether_addr ea; /**< MAC Address */ + uint8 ac_cat; /**< Access Category */ + uint8 num_pkts; /**< Number of packet entries to be averaged */ +} wl_mac_ratehisto_cmd_t; +/** Get MAC rate histogram response */ +/* deprecated after JAGUAR branch */ +typedef struct { + uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */ + uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */ + uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */ + uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */ + uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /** MCS counts */ +} wl_mac_ratehisto_res_t; + +/* sta_info ecounters */ +typedef struct { + struct ether_addr ea; /* Station MAC addr */ + struct ether_addr BSSID; /* BSSID of the BSS */ + uint32 tx_pkts_fw_total; /* # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /* # FW generated which + * failed after retry + */ +} sta_info_ecounters_t; + +#define STAMON_MODULE_VER 1 + +/**Linux network driver ioctl encoding */ +typedef struct wl_ioctl { + uint32 cmd; /**< common ioctl definition */ + void *buf; /**< pointer to user buffer */ + uint32 len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint32 used; /**< bytes read or written (optional) */ + uint32 needed; /**< bytes needed (optional) */ +} wl_ioctl_t; + +#ifdef CONFIG_COMPAT +typedef struct compat_wl_ioctl { + uint32 cmd; /**< common ioctl definition */ + uint32 buf; /**< pointer to user buffer */ + uint32 len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint32 used; /**< bytes read or written (optional) */ + uint32 needed; /**< bytes needed (optional) */ +} compat_wl_ioctl_t; +#endif /* CONFIG_COMPAT */ + +#define WL_NUM_RATES_CCK 4 /**< 1, 2, 5.5, 11 Mbps */ +#define WL_NUM_RATES_OFDM 8 /**< 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ +#define WL_NUM_RATES_MCS_1STREAM 8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ +#define WL_NUM_RATES_EXTRA_VHT 2 /**< Additional VHT 11AC rates */ +#define WL_NUM_RATES_VHT 10 +#define WL_NUM_RATES_VHT_ALL (WL_NUM_RATES_VHT + WL_NUM_RATES_EXTRA_VHT) +#define WL_NUM_RATES_HE 12 +#define WL_NUM_RATES_EHT 14 +#define WL_NUM_RATES_MCS32 1 +#define UC_PATH_LEN 128u /**< uCode path length */ + +/* + * Structure for passing hardware and software + * revision info up from the driver. + */ +typedef struct wlc_rev_info { + uint32 vendorid; /**< PCI vendor id */ + uint32 deviceid; /**< device id of chip */ + uint32 radiorev; /**< radio revision */ + uint32 chiprev; /**< chip revision */ + uint32 corerev; /**< core revision */ + uint32 boardid; /**< board identifier (usu. PCI sub-device id) */ + uint32 boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ + uint32 boardrev; /**< board revision */ + uint32 driverrev; /**< driver version */ + uint32 ucoderev; /**< uCode version */ + uint32 bus; /**< bus type */ + uint32 chipnum; /**< chip number */ + uint32 phytype; /**< phy type */ + uint32 phyrev; /**< phy revision */ + uint32 anarev; /**< anacore rev */ + uint32 chippkg; /**< chip package info */ + uint32 nvramrev; /**< nvram revision number */ + uint32 phyminorrev; /**< phy minor rev */ + uint32 coreminorrev; /**< core minor rev */ + uint32 drvrev_major; /**< driver version: major */ + uint32 drvrev_minor; /**< driver version: minor */ + uint32 drvrev_rc; /**< driver version: rc */ + uint32 drvrev_rc_inc; /**< driver version: rc incremental */ + uint16 ucodeprebuilt; /**< uCode prebuilt flag */ + uint16 ucodediffct; /**< uCode diff count */ + uchar ucodeurl[128u]; /* obsolete, kept for ROM compatiblity */ + uchar ucodepath[UC_PATH_LEN]; /**< uCode URL or path */ +} wlc_rev_info_t; + +#define WL_REV_INFO_LEGACY_LENGTH 48 + +#define WL_BRAND_MAX 10 +typedef struct wl_instance_info { + uint32 instance; + int8 brand[WL_BRAND_MAX]; + int8 PAD[4-(WL_BRAND_MAX%4)]; +} wl_instance_info_t; + +/** structure to change size of tx fifo */ +typedef struct wl_txfifo_sz { + uint16 magic; + uint16 fifo; + uint16 size; +} wl_txfifo_sz_t; + +/* Transfer info about an IOVar from the driver */ +/**Max supported IOV name size in bytes, + 1 for nul termination */ +#define WLC_IOV_NAME_LEN (32 + 1) + +typedef struct wlc_iov_trx_s { + uint8 module; + uint8 type; + char name[WLC_IOV_NAME_LEN]; +} wlc_iov_trx_t; + +/** bump this number if you change the ioctl interface */ +#define WLC_IOCTL_VERSION 2 +#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1 +/* ifdef EXT_STA */ +typedef struct _wl_assoc_result { + ulong associated; + ulong NDIS_auth; + ulong NDIS_infra; +} wl_assoc_result_t; +/* EXT_STA */ + +#define WL_PHY_PAVARS_LEN 64 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */ + +#define WL_PHY_PAVAR_VER 1 /**< pavars version */ +#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */ +typedef struct wl_pavars2 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */ + uint16 phy_type; /**< phy type */ + uint16 bandrange; + uint16 chain; + uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */ +} wl_pavars2_t; + +typedef struct wl_po { + uint16 phy_type; /**< Phy type */ + uint16 band; + uint16 cckpo; + uint16 PAD; + uint32 ofdmpo; + uint16 mcspo[8]; +} wl_po_t; + +#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */ + +typedef struct wl_rpcal { + uint16 value; + uint16 update; +} wl_rpcal_t; + +#define WL_NUM_RPCALPHASEVARS 5 /* number of rpcal phase vars */ + +typedef struct wl_rpcal_phase { + uint16 value; + uint16 update; +} wl_rpcal_phase_t; + +typedef struct wl_aci_args { + int32 enter_aci_thresh; /* Trigger level to start detecting ACI */ + int32 exit_aci_thresh; /* Trigger level to exit ACI mode */ + int32 usec_spin; /* microsecs to delay between rssi samples */ + int32 glitch_delay; /* interval between ACI scans when glitch count is consistently high */ + uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */ + uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */ + uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */ + uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */ + uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */ + uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */ + uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */ + uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */ + uint16 nphy_noise_noassoc_glitch_th_up; /**< wl interference 4 */ + uint16 nphy_noise_noassoc_glitch_th_dn; + uint16 nphy_noise_assoc_glitch_th_up; + uint16 nphy_noise_assoc_glitch_th_dn; + uint16 nphy_noise_assoc_aci_glitch_th_up; + uint16 nphy_noise_assoc_aci_glitch_th_dn; + uint16 nphy_noise_assoc_enter_th; + uint16 nphy_noise_noassoc_enter_th; + uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th; + uint16 nphy_noise_noassoc_crsidx_incr; + uint16 nphy_noise_assoc_crsidx_incr; + uint16 nphy_noise_crsidx_decr; +} wl_aci_args_t; + +#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */ + +#define WL_MACFIFO_PLAY_ARGS_T_VERSION 1u /* version of wl_macfifo_play_args_t struct */ + +enum wl_macfifo_play_flags { + WL_MACFIFO_PLAY_STOP = 0x00u, /* stop playing samples */ + WL_MACFIFO_PLAY_START = 0x01u, /* start playing samples */ + WL_MACFIFO_PLAY_LOAD = 0x02u, /* for set: load samples + for get: samples are loaded + */ + WL_MACFIFO_PLAY_GET_MAX_SIZE = 0x10u, /* get the macfifo buffer size */ + WL_MACFIFO_PLAY_GET_STATUS = 0x20u, /* get macfifo play status */ +}; + +typedef struct wl_macfifo_play_args { + uint16 version; /* structure version */ + uint16 len; /* size of structure */ + uint16 flags; + uint8 PAD[2]; + uint32 data_len; /* data length */ +} wl_macfifo_play_args_t; + +#define WL_MACFIFO_PLAY_DATA_T_VERSION 1u /* version of wl_macfifo_play_data_t struct */ + +typedef struct wl_macfifo_play_data { + uint16 version; /* structure version */ + uint16 len; /* size of structure */ + uint32 data_len; /* data length */ +} wl_macfifo_play_data_t; + +#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */ +typedef struct wl_samplecollect_args { + /* version 0 fields */ + uint8 coll_us; + uint8 PAD[3]; + int32 cores; + /* add'l version 1 fields */ + uint16 version; /**< see definition of WL_SAMPLECOLLECT_T_VERSION */ + uint16 length; /**< length of entire structure */ + int8 trigger; + uint8 PAD; + uint16 timeout; + uint16 mode; + uint16 PAD; + uint32 pre_dur; + uint32 post_dur; + uint8 gpio_sel; + uint8 downsamp; + uint8 be_deaf; + uint8 agc; /**< loop from init gain and going down */ + uint8 filter; /**< override high pass corners to lowest */ + /* add'l version 2 fields */ + uint8 trigger_state; + uint8 module_sel1; + uint8 module_sel2; + uint16 nsamps; + uint16 PAD; + int32 bitStart; + uint32 gpioCapMask; + uint8 gpio_collection; + uint8 PAD[3]; +} wl_samplecollect_args_t; + +#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */ +/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */ +#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 + +typedef struct wl_sampledata { + uint16 version; /**< structure version */ + uint16 size; /**< size of structure */ + uint16 tag; /**< Header/Data */ + uint16 length; /**< data length */ + uint32 flag; /**< bit def */ +} wl_sampledata_t; + +/* WL_OTA START */ +/* OTA Test Status */ +enum { + WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ + WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ + WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */ + WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ +}; + +/* OTA SYNC Status */ +enum { + WL_OTA_SYNC_IDLE = 0, /**< Idle state */ + WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */ + WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */ +}; + +/* Various error states dut can get stuck during test */ +enum { + WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */ + WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ + WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */ + WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */ + WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */ + WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */ +}; + +/* Differentiator for ota_tx and ota_rx */ +enum { + WL_OTA_TEST_TX = 0, /**< ota_tx */ + WL_OTA_TEST_RX = 1, /**< ota_rx */ +}; + +/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */ +enum { + WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */ + WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */ + WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */ + WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */ +}; +#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */ +#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */ +#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OTA_STF_SISO 0 +#define OTA_STF_CDD 1 +#define OTA_STF_STBC 2 +#define OTA_STF_SDM 3 + +typedef struct ota_rate_info { + uint8 rate_cnt; /**< Total number of rates */ + uint8 PAD; + uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */ + /**< for legacy rates : ratein mbps * 2 */ + /**< for HT rates : mcs index */ +} ota_rate_info_t; + +typedef struct ota_power_info { + int8 pwr_ctrl_on; /**< power control on/off */ + int8 start_pwr; /**< starting power/index */ + int8 delta_pwr; /**< delta power/index */ + int8 end_pwr; /**< end power/index */ +} ota_power_info_t; + +typedef struct ota_packetengine { + uint16 delay; /**< Inter-packet delay */ + /**< for ota_tx, delay is tx ifs in micro seconds */ + /* for ota_rx, delay is wait time in milliseconds */ + uint16 nframes; /**< Number of frames */ + uint16 length; /**< Packet length */ +} ota_packetengine_t; + +/* + * OTA txant/rxant parameter + * bit7-4: 4 bits swdiv_tx/rx_policy bitmask, specify antenna-policy for SW diversity + * bit3-0: 4 bits TxCore bitmask, specify cores used for transmit frames + * (maximum spatial expansion) + */ +#define WL_OTA_TEST_ANT_MASK 0xF0 +#define WL_OTA_TEST_CORE_MASK 0x0F + +/* OTA txant/rxant 'ant_mask' field; map to Tx/Rx antenna policy for SW diversity */ +enum { + WL_OTA_TEST_FORCE_ANT0 = 0x10, /* force antenna to Ant 0 */ + WL_OTA_TEST_FORCE_ANT1 = 0x20, /* force antenna to Ant 1 */ +}; + +/* antenna/core fields access */ +#define WL_OTA_TEST_GET_ANT(_txant) ((_txant) & WL_OTA_TEST_ANT_MASK) +#define WL_OTA_TEST_GET_CORE(_txant) ((_txant) & WL_OTA_TEST_CORE_MASK) + +/** Test info vector */ +typedef struct wl_ota_test_args { + uint8 cur_test; /**< test phase */ + uint8 chan; /**< channel */ + uint8 bw; /**< bandwidth */ + uint8 control_band; /**< control band */ + uint8 stf_mode; /**< stf mode */ + uint8 PAD; + ota_rate_info_t rt_info; /**< Rate info */ + ota_packetengine_t pkteng; /**< packeteng info */ + uint8 txant; /**< tx antenna */ + uint8 rxant; /**< rx antenna */ + ota_power_info_t pwr_info; /**< power sweep info */ + uint8 wait_for_sync; /**< wait for sync or not */ + uint8 ldpc; + uint8 sgi; + uint8 PAD; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_args_t; + +#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */ +typedef struct wl_ota_test_vector { + uint16 version; + wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */ + uint16 test_cnt; /**< Total no of test */ + uint8 file_dwnld_valid; /**< File successfully downloaded */ + uint8 sync_timeout; /**< sync packet timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< macaddress for tx */ + struct ether_addr rx_mac; /**< macaddress for rx */ + int8 loop_test; /**< dbg feature to loop the test */ + uint16 test_rxcnt; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_vector_t; + +/** struct copied back form dongle to host to query the status */ +typedef struct wl_ota_test_status { + int16 cur_test_cnt; /**< test phase */ + int8 skip_test_reason; /**< skip test reasoin */ + uint8 PAD; + wl_ota_test_args_t test_arg; /**< cur test arg details */ + uint16 test_cnt; /**< total no of test downloaded */ + uint8 file_dwnld_valid; /**< file successfully downloaded ? */ + uint8 sync_timeout; /**< sync timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< tx mac address */ + struct ether_addr rx_mac; /**< rx mac address */ + uint8 test_stage; /**< check the test status */ + int8 loop_test; /**< Debug feature to puts test enfine in a loop */ + uint8 sync_status; /**< sync status */ +} wl_ota_test_status_t; + +/* FOR ioctl that take the sta monitor information */ +typedef struct stamon_data { + struct ether_addr ea; + uint8 PAD[2]; + int32 rssi; +} stamon_data_t; + +typedef struct stamon_info { + int32 version; + uint32 count; + stamon_data_t sta_data[1]; +} stamon_info_t; + +typedef struct wl_ota_rx_rssi { + uint16 pktcnt; /* Pkt count used for this rx test */ + chanspec_t chanspec; /* Channel info on which the packets are received */ + int16 rssi; /* Average RSSI of the first 50% packets received */ +} wl_ota_rx_rssi_t; + +#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */ +#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi) + +typedef struct wl_ota_test_rssi { + uint8 version; + uint8 testcnt; /* total measured RSSI values, valid on output only */ + wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */ +} wl_ota_test_rssi_t; + +/* WL_OTA END */ + +/**wl_radar_args_t */ +typedef struct { + int32 npulses; /**< required number of pulses at n * t_int */ + int32 ncontig; /**< required number of pulses at t_int */ + int32 min_pw; /**< minimum pulse width (20 MHz clocks) */ + int32 max_pw; /**< maximum pulse width (20 MHz clocks) */ + uint16 thresh0; /**< Radar detection, thresh 0 */ + uint16 thresh1; /**< Radar detection, thresh 1 */ + uint16 blank; /**< Radar detection, blank control */ + uint16 fmdemodcfg; /**< Radar detection, fmdemod config */ + int32 npulses_lp; /**< Radar detection, minimum long pulses */ + int32 min_pw_lp; /**< Minimum pulsewidth for long pulses */ + int32 max_pw_lp; /**< Maximum pulsewidth for long pulses */ + int32 min_fm_lp; /**< Minimum fm for long pulses */ + int32 max_span_lp; /**< Maximum deltat for long pulses */ + int32 min_deltat; /**< Minimum spacing between pulses */ + int32 max_deltat; /**< Maximum spacing between pulses */ + uint16 autocorr; /**< Radar detection, autocorr on or off */ + uint16 st_level_time; /**< Radar detection, start_timing level */ + uint16 t2_min; /**< minimum clocks needed to remain in state 2 */ + uint8 PAD[2]; + uint32 version; /**< version */ + uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */ + int32 npulses_fra; /**< Radar detection, minimum French pulses set */ + int32 npulses_stg2; /**< Radar detection, minimum staggered-2 pulses set */ + int32 npulses_stg3; /**< Radar detection, minimum staggered-3 pulses set */ + uint16 percal_mask; /**< defines which period cal is masked from radar detection */ + uint8 PAD[2]; + int32 quant; /**< quantization resolution to pulse positions */ + uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */ + uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */ + int32 nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ + int32 max_pw_tol; /* maximum tolerance allowd in detected pulse width for radar detection */ + uint16 feature_mask; /**< 16-bit mask to specify enabled features */ + uint16 thresh0_sc; /**< Radar detection, thresh 0 */ + uint16 thresh1_sc; /**< Radar detection, thresh 1 */ + uint8 PAD[2]; +} wl_radar_args_t; + +#define WL_RADAR_ARGS_VERSION 2 + +typedef struct { + uint32 version; /**< version */ + uint16 thresh0_20_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh1_20_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh0_40_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh1_40_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh0_80_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh1_80_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh0_20_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh1_20_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh0_40_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh1_40_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh0_80_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh1_80_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh0_160_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh1_160_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh0_160_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ + uint16 thresh1_160_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ +} wl_radar_thr_t; + +typedef struct { + uint32 version; /* version */ + uint16 thresh0_sc_20_lo; + uint16 thresh1_sc_20_lo; + uint16 thresh0_sc_40_lo; + uint16 thresh1_sc_40_lo; + uint16 thresh0_sc_80_lo; + uint16 thresh1_sc_80_lo; + uint16 thresh0_sc_20_hi; + uint16 thresh1_sc_20_hi; + uint16 thresh0_sc_40_hi; + uint16 thresh1_sc_40_hi; + uint16 thresh0_sc_80_hi; + uint16 thresh1_sc_80_hi; + uint16 fc_varth_sb; + uint16 fc_varth_bin5_sb; + uint16 notradar_enb; + uint16 max_notradar_lp; + uint16 max_notradar; + uint16 max_notradar_lp_sc; + uint16 max_notradar_sc; + uint16 highpow_war_enb; + uint16 highpow_sp_ratio; //unit is 0.5 +} wl_radar_thr2_t; + +#define WL_RADAR_THR_VERSION 2 + +typedef struct { + uint32 ver; + uint32 len; + int32 rssi_th[3]; + uint8 rssi_gain_80[4]; + uint8 rssi_gain_160[4]; +} wl_dyn_switch_th_t; + +#define WL_PHY_DYN_SWITCH_TH_VERSION 1 + +/** RSSI per antenna */ +typedef struct { + uint32 version; /**< version field */ + uint32 count; /**< number of valid antenna rssi */ + int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ + int8 rssi_sum; /**< summed rssi across all antennas */ + int8 PAD[3]; +} wl_rssi_ant_t; + +/* SNR per antenna */ +typedef struct { + uint32 version; /* version field */ + uint32 count; /* number of valid antenna snr */ + int8 snr_ant[WL_RSSI_ANT_MAX]; /* snr per antenna */ +} wl_snr_ant_t; + +/* Weighted average support */ +#define WL_WA_VER 0 /* Initial version - Basic WA algorithm only */ + +#define WL_WA_ALGO_BASIC 0 /* Basic weighted average algorithm (all 4 metrics) */ +#define WL_WA_TYPE_RSSI 0 +#define WL_WA_TYPE_SNR 1 +#define WL_WA_TYPE_TXRATE 2 +#define WL_WA_TYPE_RXRATE 3 +#define WL_WA_TYPE_MAX 4 + +typedef struct { /* payload of subcmd in xtlv */ + uint8 id; + uint8 n_total; /* Total number of samples (n_total >= n_recent) */ + uint8 n_recent; /* Number of samples denoted as recent */ + uint8 w_recent; /* Total weight for the recent samples (as percentage) */ +} wl_wa_basic_params_t; + +typedef struct { + uint16 ver; + uint16 len; + uint8 subcmd[]; /* sub-cmd in bcm_xtlv_t */ +} wl_wa_cmd_t; + +/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */ +typedef struct { + uint32 state; /**< noted by WL_DFS_CACSTATE_XX. */ + uint32 duration; /**< time spent in ms in state. */ + /** + * as dfs enters ISM state, it removes the operational channel from quiet channel + * list and notes the channel in channel_cleared. set to 0 if no channel is cleared + */ + chanspec_t chanspec_cleared; + /** chanspec cleared used to be a uint32, add another to uint16 to maintain size */ + uint16 pad; +} wl_dfs_status_t; + +typedef struct { + uint32 state; /* noted by WL_DFS_CACSTATE_XX */ + uint32 duration; /* time spent in ms in state */ + chanspec_t chanspec; /* chanspec of this core */ + chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */ + uint16 sub_type; /* currently just the index of the core or the respective PLL */ + uint16 pad; +} wl_dfs_sub_status_t; + +#define WL_DFS_STATUS_ALL_VERSION (1) +typedef struct { + uint16 version; /* version field; current max version 1 */ + uint16 num_sub_status; + wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */ +} wl_dfs_status_all_t; + +#define WL_DFS_AP_MOVE_VERSION (1) + +struct wl_dfs_ap_move_status_v1 { + int16 dfs_status; /* DFS scan status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_t cac_status; /* CAC status */ +}; + +typedef struct wl_dfs_ap_move_status_v2 { + int8 version; /* version field; current max version 1 */ + int8 move_status; /* DFS move status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */ +} wl_dfs_ap_move_status_v2_t; + +#define WL_DFS_AP_MOVE_ABORT -1 /* Abort any dfs_ap_move in progress immediately */ +#define WL_DFS_AP_MOVE_STUNT -2 /* Stunt move but continue background CSA if in progress */ + +/** data structure used in 'radar_status' wl interface, which is use to query radar det status */ +typedef struct { + uint8 detected; + uint8 PAD[3]; + int32 count; + uint8 pretended; + uint8 PAD[3]; + uint32 radartype; + uint32 timenow; + uint32 timefromL; + int32 lp_csect_single; + int32 detected_pulse_index; + int32 nconsecq_pulses; + chanspec_t ch; + uint8 PAD[2]; + int32 pw[10]; + int32 intv[10]; + int32 fm[10]; +} wl_radar_status_t; + +#define NUM_PWRCTRL_RATES 12 + +typedef struct { + uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */ + uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */ + uint8 txpwr_local_max; /**< local max according to the AP */ + uint8 txpwr_local_constraint; /**< local constraint according to the AP */ + uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */ + uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */ + uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */ + uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */ + uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */ + int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_power_legacy_t; + +#define WL_TX_POWER_RATES_LEGACY 45 +#define WL_TX_POWER_MCS20_FIRST 12 +#define WL_TX_POWER_MCS20_NUM 16 +#define WL_TX_POWER_MCS40_FIRST 28 +#define WL_TX_POWER_MCS40_NUM 17 + +typedef struct { + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint8 local_max; /**< local max according to the AP */ + uint8 local_constraint; /**< local constraint according to the AP */ + int8 antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 rf_cores; /**< count of RF Cores being reported */ + uint8 est_Pout[4]; /**< Latest tx power out estimate per RF + * chain without adjustment + */ + uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */ + uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /**< User limit */ + uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /**< Regulatory power limit */ + uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /**< Max power board can support (SROM) */ + uint8 target[WL_TX_POWER_RATES_LEGACY]; /**< Latest target power */ + uint8 PAD[2]; +} tx_power_legacy2_t; + +#define WL_NUM_2x2_ELEMENTS 4 +#define WL_NUM_3x3_ELEMENTS 6 +#define WL_NUM_4x4_ELEMENTS 10 + +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint32 buflen; /**< ppr buffer length */ + uint8 pprbuf[1]; /**< Latest target power buffer */ +} wl_txppr_t; + +#define WL_TXPPR_VERSION 1 +#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) +#define TX_POWER_T_VERSION 45 +#define TX_POWER_T_VERSION_V2 46 + +/* curpower ppr types */ +enum { + PPRTYPE_TARGETPOWER = 1, + PPRTYPE_BOARDLIMITS = 2, + PPRTYPE_REGLIMITS = 3, + PPRTYPE_RU_REGLIMITS = 4, + PPRTYPE_RU_BOARDLIMITS = 5, + PPRTYPE_RU_TARGETPOWER = 6, + PPRTYPE_DYNAMIC_INFO = 7, + PPRTYPE_LAST +}; + +/** number of ppr serialization buffers, it should be reg, board and target */ +#define WL_TXPPR_SER_BUF_NUM (PPRTYPE_LAST - 1) + +typedef struct chanspec_txpwr_max { + chanspec_t chanspec; /**< chanspec */ + uint8 txpwr_max; /**< max txpwr in all the rates */ + uint8 padding; +} chanspec_txpwr_max_t; + +typedef struct wl_chanspec_txpwr_max { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */ + chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */ +} wl_chanspec_txpwr_max_t; + +#define WL_CHANSPEC_TXPWR_MAX_VER 1 +#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t)) + +typedef struct tx_inst_power { + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_inst_power_t; + +#define WL_NUM_TXCHAIN_MAX 4 +typedef struct wl_txchain_pwr_offsets { + int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */ +} wl_txchain_pwr_offsets_t; + +/** maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 +#define WL_NUMCHANNELS_MANY_CHAN 10 +#define WL_ITER_LIMIT_MANY_CHAN 5 + +#define WL_MIMO_PS_CFG_VERSION_1 1 + +typedef struct wl_mimops_cfg { + uint8 version; + /* active_chains: 0 for all, 1 for 1 chain. */ + uint8 active_chains; + /* static (0) or dynamic (1).or disabled (3) Mode applies only when active_chains = 0. */ + uint8 mode; + /* bandwidth = Full (0), 20M (1), 40M (2), 80M (3). */ + uint8 bandwidth; + uint8 applychangesafterlearning; + uint8 pad[3]; +} wl_mimops_cfg_t; + +/* This event is for tracing MIMO PS metrics snapshot calls. + * It is helpful to debug out-of-sync issue between + * ucode SHM values and FW snapshot calculation. + * It is part of the EVENT_LOG_TAG_MIMO_PS_TRACE. + */ +#define WL_MIMO_PS_METRICS_SNAPSHOT_TRACE_TYPE 0 +typedef struct wl_mimo_ps_metrics_snapshot_trace { + /* type field for this TLV: */ + uint16 type; + /* length field for this TLV */ + uint16 len; + uint32 idle_slotcnt_mimo; /* MIMO idle slotcnt raw SHM value */ + uint32 last_idle_slotcnt_mimo; /* stored value snapshot */ + uint32 idle_slotcnt_siso; /* SISO idle slotcnt raw SHM value */ + uint32 last_idle_slotcnt_siso; /* stored value snapshot */ + uint32 rx_time_mimo; /* Rx MIMO raw SHM value */ + uint32 last_rx_time_mimo; /* stored value snapshot */ + uint32 rx_time_siso; /* RX SISO raw SHM value */ + uint32 last_rx_time_siso; /* stored value snapshot */ + uint32 tx_time_1chain; /* Tx 1-chain raw SHM value */ + uint32 last_tx_time_1chain; /* stored value snapshot */ + uint32 tx_time_2chain; /* Tx 2-chain raw SHM value */ + uint32 last_tx_time_2chain; /* stored value snapshot */ + uint32 tx_time_3chain; /* Tx 3-chain raw SHM value */ + uint32 last_tx_time_3chain; /* stored value snapshot */ + uint16 reason; /* reason for snapshot call, see below */ + /* Does the call reset last values after delta calculation */ + uint16 reset_last; +} wl_mimo_ps_metrics_snapshot_trace_t; +/* reason codes for mimo ps metrics snapshot function calls */ +#define WL_MIMOPS_METRICS_SNAPSHOT_REPORT 1 +#define WL_MIMOPS_METRICS_SNAPSHOT_RXCHAIN_SET 2 +#define WL_MIMOPS_METRICS_SNAPSHOT_ARBI 3 +#define WL_MIMOPS_METRICS_SNAPSHOT_SLOTUPD 4 +#define WL_MIMOPS_METRICS_SNAPSHOT_PMBCNRX 5 +#define WL_MIMOPS_METRICS_SNAPSHOT_BMACINIT 6 +#define WL_MIMOPS_METRICS_SNAPSHOT_HT_COMPLETE 7 +#define WL_MIMOPS_METRICS_SNAPSHOT_OCL 8 + +#define WL_MIMO_PS_STATUS_VERSION_2 2 +typedef struct wl_mimo_ps_status { + uint8 version; + uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */ + uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */ + uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */ + uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */ + uint8 bss_rxchain; /* bss rxchain bitmask */ + uint8 bss_txchain; /* bss txchain bitmask */ + uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint16 hw_state; /* bitmask of hw state. See below for values */ + uint8 hw_rxchain; /* actual HW rxchain bitmask */ + uint8 hw_txchain; /* actual HW txchain bitmask */ + uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint8 pm_bcnrx_state; /* actual state of ucode flag */ + uint8 basic_rates_present; /* internal flag to trigger siso bcmc rx */ + uint8 siso_bcmc_rx_state; /* actual state of ucode flag */ +} wl_mimo_ps_status_t; + +#define WL_MIMO_PS_STATUS_VERSION_1 1 +typedef struct wl_mimo_ps_status_v1 { + uint8 version; + uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */ + uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */ + uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */ + uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */ + uint8 bss_rxchain; /* bss rxchain bitmask */ + uint8 bss_txchain; /* bss txchain bitmask */ + uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint16 hw_state; /* bitmask of hw state. See below for values */ + uint8 hw_rxchain; /* actual HW rxchain bitmask */ + uint8 hw_txchain; /* actual HW txchain bitmask */ + uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint8 pad[3]; +} wl_mimo_ps_status_v1_t; + +#define WL_MIMO_PS_STATUS_AP_CAP(ap_cap) (ap_cap & 0x0F) +#define WL_MIMO_PS_STATUS_AP_CAP_BW(ap_cap) (ap_cap >> 4) +#define WL_MIMO_PS_STATUS_ASSOC_BW_SHIFT 4 + +/* version 3: assoc status: low nibble is status enum, high other flags */ +#define WL_MIMO_PS_STATUS_VERSION_3 3 +#define WL_MIMO_PS_STATUS_ASSOC_STATUS_MASK 0x0F +#define WL_MIMO_PS_STATUS_ASSOC_STATUS_VHT_WITHOUT_OMN 0x80 + +/* mimo_ps_status: ap_cap/association status */ +enum { + WL_MIMO_PS_STATUS_ASSOC_NONE = 0, + WL_MIMO_PS_STATUS_ASSOC_SISO = 1, + WL_MIMO_PS_STATUS_ASSOC_MIMO = 2, + WL_MIMO_PS_STATUS_ASSOC_LEGACY = 3 +}; + +/* mimo_ps_status: mimo_ps_cfg states */ +enum { + WL_MIMO_PS_CFG_STATE_NONE = 0, + WL_MIMO_PS_CFG_STATE_INFORM_AP_INPROGRESS = 1, + WL_MIMO_PS_CFG_STATE_INFORM_AP_DONE = 2, + WL_MIMO_PS_CFG_STATE_LEARNING = 3, + WL_MIMO_PS_CFG_STATE_HW_CONFIGURE = 4, + WL_MIMO_PS_CFG_STATE_INFORM_AP_PENDING = 5 +}; + +/* mimo_ps_status: hw_state values */ +#define WL_MIMO_PS_STATUS_HW_STATE_NONE 0 +#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0) +#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1) + +#ifdef WLAWDL +#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2) +#endif /* WLAWDL */ + +#define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3) +#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4) +#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5) +#define WL_MIMO_PS_STATUS_HW_STATE_TMPSENSE (0x1 << 6) +#define WL_MIMO_PS_STATUS_HW_STATE_IOVAR (0x1 << 7) +#define WL_MIMO_PS_STATUS_HW_STATE_AP_BSS (0x1 << 8) + +/* mimo_ps_status: mrc states */ +#define WL_MIMO_PS_STATUS_MRC_NONE 0 +#define WL_MIMO_PS_STATUS_MRC_ACTIVE 1 + +/* mimo_ps_status: core flag states for single-core beacon and siso-bcmc rx */ +#define WL_MIMO_PS_STATUS_MHF_FLAG_NONE 0 +#define WL_MIMO_PS_STATUS_MHF_FLAG_ACTIVE 1 +#define WL_MIMO_PS_STATUS_MHF_FLAG_COREDOWN 2 +#define WL_MIMO_PS_STATUS_MHF_FLAG_INVALID 3 + +/* Type values for the REASON */ +#define WL_MIMO_PS_PS_LEARNING_ABORTED (1 << 0) +#define WL_MIMO_PS_PS_LEARNING_COMPLETED (1 << 1) +#define WL_MIMO_PS_PS_LEARNING_ONGOING (1 << 2) + +typedef struct wl_mimo_ps_learning_event_data { + uint32 startTimeStamp; + uint32 endTimeStamp; + uint16 reason; + struct ether_addr BSSID; + uint32 totalSISO_below_rssi_threshold; + uint32 totalMIMO_below_rssi_threshold; + uint32 totalSISO_above_rssi_threshold; + uint32 totalMIMO_above_rssi_threshold; +} wl_mimo_ps_learning_event_data_t; + +#define WL_MIMO_PS_PS_LEARNING_CFG_ABORT (1 << 0) +#define WL_MIMO_PS_PS_LEARNING_CFG_STATUS (1 << 1) +#define WL_MIMO_PS_PS_LEARNING_CFG_CONFIG (1 << 2) +#define WL_MIMO_PS_PS_LEARNING_CFG_MASK (0x7) + +#define WL_MIMO_PS_PS_LEARNING_CFG_V1 1 + +typedef struct wl_mimops_learning_cfg { + /* flag: bit 0 for abort */ + /* flag: bit 1 for status */ + /* flag: bit 2 for configuring no of packets and rssi */ + uint8 flag; + /* mimo ps learning version, compatible version is 0 */ + uint8 version; + /* if version is 0 or rssi is 0, ignored */ + int8 learning_rssi_threshold; + uint8 reserved; + uint32 no_of_packets_for_learning; + wl_mimo_ps_learning_event_data_t mimops_learning_data; +} wl_mimops_learning_cfg_t; + +#define WL_OCL_STATUS_VERSION 1 +typedef struct ocl_status_info { + uint8 version; + uint8 len; + uint16 fw_status; /* Bits representing FW disable reasons */ + uint8 hw_status; /* Bits for actual HW config and SISO/MIMO coremask */ + uint8 coremask; /* The ocl core mask (indicating listening core) */ +} ocl_status_info_t; + +/* MWS OCL map */ +#define WL_MWS_OCL_OVERRIDE_VERSION 1 +typedef struct wl_mws_ocl_override { + uint16 version; /* Structure version */ + uint16 bitmap_2g; /* bitmap for 2.4G channels bits 1-13 */ + uint16 bitmap_5g_lo; /* bitmap for 5G low channels by 2: + *34-48, 52-56, 60-64, 100-102 + */ + uint16 bitmap_5g_mid; /* bitmap for 5G mid channels by 2: + * 104, 108-112, 116-120, 124-128, + * 132-136, 140, 149-151 + */ + uint16 bitmap_5g_high; /* bitmap for 5G high channels by 2 + * 153, 157-161, 165 + */ +} wl_mws_ocl_override_t; + +/* Bits for fw_status */ +#define OCL_DISABLED_HOST 0x01 /* Host has disabled through ocl_enable */ +#define OCL_DISABLED_RSSI 0x02 /* Disabled because of ocl_rssi_threshold */ +#define OCL_DISABLED_LTEC 0x04 /* Disabled due to LTE Coex activity */ +#define OCL_DISABLED_SISO 0x08 /* Disabled while in SISO mode */ +#define OCL_DISABLED_CAL 0x10 /* Disabled during active calibration */ +#define OCL_DISABLED_CHANSWITCH 0x20 /* Disabled during active channel switch */ +#define OCL_DISABLED_ASPEND 0x40 /* Disabled due to assoc pending */ +#define OCL_DISABLED_SEQ_RANGE 0x80 /* Disabled during SEQ Ranging */ +#define OCL_DISABLED_RXIQ_EST_BTLOWAR 0x100 /* Disabled if the bt-lo-war is active */ +#define OCL_DISABLED_IDLE_TSSICAL 0x200 +#define OCL_DISABLED_TONE 0x400 /* Disabled if the tone is active */ +#define OCL_DISABLED_NOISECAL 0x800 /* Disabled if the noise cal is active */ + +/* Bits for hw_status */ +#define OCL_HWCFG 0x01 /* State of OCL config bit in phy HW */ +#define OCL_HWMIMO 0x02 /* Set if current coremask is > 1 bit */ +#define OCL_COREDOWN 0x80 /* Set if core is currently down */ + +#define WL_OPS_CFG_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct wl_ops_cfg_v1 { + uint16 version; + uint16 len; /* total length includes fixed fields and variable data[] */ + uint16 subcmd_id; /* subcommand id */ + uint16 padding; /* reserved / padding for 4 byte align */ + uint8 data[]; /* subcommand data; could be empty */ +} wl_ops_cfg_v1_t; + +/* subcommands ids */ +enum { + WL_OPS_CFG_SUBCMD_ENABLE = 0, /* OPS enable/disable mybss and obss + * for nav and plcp options + */ + WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR = 1, /* Max sleep duration used for OPS */ + WL_OPS_CFG_SUBCMD_RESET_STATS = 2 /* Reset stats part of ops_status + * on both slices + */ +}; + +#define WL_OPS_CFG_MASK 0xffff +#define WL_OPS_CFG_CAP_MASK 0xffff0000 +#define WL_OPS_CFG_CAP_SHIFT 16 /* Shift bits to locate the OPS CAP */ +#define WL_OPS_MAX_SLEEP_DUR 12500 /* max ops duration in us */ +#define WL_OPS_MINOF_MAX_SLEEP_DUR 512 /* minof max ops duration in us */ +#define WL_OPS_SUPPORTED_CFG (WL_OPS_MYBSS_PLCP_DUR | WL_OPS_MYBSS_NAV_DUR \ + | WL_OPS_OBSS_PLCP_DUR | WL_OPS_OBSS_NAV_DUR) +#define WL_OPS_DEFAULT_CFG WL_OPS_SUPPORTED_CFG + +/* WL_OPS_CFG_SUBCMD_ENABLE */ +typedef struct wl_ops_cfg_enable { + uint32 bits; /* selectively enable ops for mybss and obss */ +} wl_ops_cfg_enable_t; +/* Bits for WL_OPS_CFG_SUBCMD_ENABLE Parameter */ +#define WL_OPS_MYBSS_PLCP_DUR 0x1 /* OPS based on mybss 11b & 11n mixed HT frames + * PLCP header duration + */ +#define WL_OPS_MYBSS_NAV_DUR 0x2 /* OPS based on mybss RTS-CTS duration */ +#define WL_OPS_OBSS_PLCP_DUR 0x4 /* OPS based on obss 11b & 11n mixed HT frames + * PLCP header duration + */ +#define WL_OPS_OBSS_NAV_DUR 0x8 /* OPS based on obss RTS-CTS duration */ + +/* WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR */ +typedef struct wl_ops_cfg_max_sleep_dur { + uint32 val; /* maximum sleep duration (us) used for OPS */ +} wl_ops_cfg_max_sleep_dur_t; + +/* WL_OPS_CFG_SUBCMD_RESET_STATS */ +typedef struct wl_ops_cfg_reset_stats { + uint32 val; /* bitmap of slices, 0 means all slices */ +} wl_ops_cfg_reset_stats_t; + +#define WL_OPS_STATUS_VERSION_1 1 +#define OPS_DUR_HIST_BINS 5 /* number of bins used, 0-1, 1-2, 2-4, 4-8, >8 msec */ +typedef struct wl_ops_status_v1 { + uint16 version; + uint16 len; /* Total length including all fixed fields */ + uint8 slice_index; /* Slice for which status is reported */ + uint8 disable_obss; /* indicate if obss cfg is disabled */ + uint8 pad[2]; /* 4-byte alignment */ + uint32 disable_reasons; /* FW disable reasons */ + uint32 disable_duration; /* ops disable time(ms) due to disable reasons */ + uint32 applied_ops_config; /* currently applied ops config */ + uint32 partial_ops_dur; /* Total time (in usec) of partial ops duration */ + uint32 full_ops_dur; /* Total time (in usec) of full ops duration */ + uint32 count_dur_hist[OPS_DUR_HIST_BINS]; /* ops occurrence histogram */ + uint32 nav_cnt; /* number of times ops triggered based NAV duration */ + uint32 plcp_cnt; /* number of times ops triggered based PLCP duration */ + uint32 mybss_cnt; /* number of times mybss ops trigger */ + uint32 obss_cnt; /* number of times obss ops trigger */ + uint32 miss_dur_cnt; /* number of times ops couldn't happen + * due to insufficient duration + */ + uint32 miss_premt_cnt; /* number of times ops couldn't happen due + * to not meeting Phy preemption thresh + */ + uint32 max_dur_cnt; /* number of times ops did not trigger due to + * frames exceeding max sleep duration + */ + uint32 wake_cnt; /* number of ops miss due to wake reason */ + uint32 bcn_wait_cnt; /* number of ops miss due to waiting for bcn */ +} wl_ops_status_v1_t; +/* Bits for disable_reasons */ +#define OPS_DISABLED_HOST 0x01 /* Host has disabled through ops_cfg */ +#define OPS_DISABLED_UNASSOC 0x02 /* Disabled because the slice is in unassociated state */ +#define OPS_DISABLED_SCAN 0x04 /* Disabled because the slice is in scan state */ +#define OPS_DISABLED_BCN_MISS 0x08 /* Disabled because beacon missed for a duration */ + +#define WL_PSBW_CFG_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct wl_psbw_cfg_v1 { + uint16 version; + uint16 len; /* total length includes fixed fields and variable data[] */ + uint16 subcmd_id; /* subcommand id */ + uint16 pad; /* reserved / padding for 4 byte align */ + uint8 data[]; /* subcommand data */ +} wl_psbw_cfg_v1_t; + +/* subcommands ids */ +enum { + /* PSBW enable/disable */ + WL_PSBW_CFG_SUBCMD_ENABLE = 0, + /* override psbw disable requests */ + WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1, + /* Reset stats part of psbw status */ + WL_PSBW_CFG_SUBCMD_RESET_STATS = 2 +}; + +#define WL_PSBW_OVERRIDE_DISA_CFG_MASK 0x0000ffff +#define WL_PSBW_OVERRIDE_DISA_CAP_MASK 0xffff0000 +#define WL_PSBW_OVERRIDE_DISA_CAP_SHIFT 16 /* shift bits for cap */ + +/* WL_PSBW_CFG_SUBCMD_ENABLE */ +typedef struct wl_psbw_cfg_enable { + bool enable; /* enable or disable */ +} wl_psbw_cfg_enable_t; + +/* WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */ +typedef struct wl_psbw_cfg_override_disable_mask { + uint32 mask; /* disable requests to override, cap and current cfg */ +} wl_psbw_cfg_override_disable_mask_t; + +/* WL_PSBW_CFG_SUBCMD_RESET_STATS */ +typedef struct wl_psbw_cfg_reset_stats { + uint32 val; /* infra interface index, 0 */ +} wl_psbw_cfg_reset_stats_t; + +#define WL_PSBW_STATUS_VERSION_1 1 +typedef struct wl_psbw_status_v1 { + uint16 version; + uint16 len; /* total length including all fixed fields */ + uint8 curr_slice_index; /* current slice index of the interface */ + uint8 associated; /* interface associatd */ + chanspec_t chspec; /* radio chspec */ + uint32 state; /* psbw state */ + uint32 disable_reasons; /* FW disable reasons */ + uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */ + uint32 total_enable_dur; /* time(ms) psbw remains enabled total */ + uint32 enter_cnt; /* total cnt entering PSBW active */ + uint32 exit_cnt; /* total cnt exiting PSBW active */ + uint32 exit_imd_cnt; /* total cnt imd exit when waited N tbtts */ + uint32 enter_skip_cnt; /* total cnt entering PSBW active skipped */ +} wl_psbw_status_v1_t; + +/* Bit for state */ +#define PSBW_ACTIVE 0x1 /* active 20MHz */ +#define PSBW_TTTT_PEND 0x2 /* waiting for TTTT intr */ +#define PSBW_WAIT_ENTER 0x4 /* in wait period before entering */ +#define PSBW_CAL_DONE 0x8 /* 20M channel cal done */ + +/* Bits for disable_reasons */ +#define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */ +#define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */ +#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* slot_bss active */ +#define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */ +#define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */ +#define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */ +#define WL_PSBW_DISA_CSA 0x00000040 /* CSA IE is present */ +#define WL_PSBW_DISA_ASSOC 0x00000080 /* assoc state is active/or unassoc */ +#define WL_PSBW_DISA_SCAN 0x00000100 /* scan state is active */ +#define WL_PSBW_DISA_CAL 0x00000200 /* cal pending or active */ +#define WL_PSBW_DISA_BCN_OFFLOAD 0x00000400 /* PSBW disabled due to scan + * core beacon offload + */ +#define WL_PSBW_DISA_DISASSOC 0x00000800 /* STA is disassociated */ +/* following are not part of disable reasons */ +#define WL_PSBW_EXIT_PM 0x00001000 /* Out of PM */ +#define WL_PSBW_EXIT_TIM 0x00002000 /* unicast TIM bit present */ +#define WL_PSBW_EXIT_DATA 0x00004000 /* Data for transmission */ +#define WL_PSBW_EXIT_MGMTDATA 0x00008000 /* management frame for transmission */ +#define WL_PSBW_EXIT_BW_UPD 0x00010000 /* BW being updated */ +#define WL_PSBW_DISA_NONE 0x80000000 /* reserved for internal use only */ + +/* DVFS */ +#define DVFS_CMND_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct dvfs_cmnd_v1 { + uint16 ver; /* version of this structure */ + uint16 len; /* includes both fixed and variable data[] fields */ + uint32 subcmd; /* subcommand id */ + uint8 data[]; /* subcommand data */ +} dvfs_cmnd_v1_t; + +/* subcommand ids */ +enum { + DVFS_SUBCMD_ENABLE = 0, /* DVFS enable/disable, 1-byte data + * DVFS enable:1, disable:0 + */ + DVFS_SUBCMD_LDV = 1, /* DVFS force arm state to LDV, 1-byte data + * DVFS force LDV ON 1, LDV OFF 0 + */ + DVFS_SUBCMD_STATUS = 2, /* DVFS status, data[] contains dvfs_status */ + DVFS_SUBCMD_HIST = 3, /* DVFS history, data[] contains + * history of dvfs state change + */ + DVFS_SUBCMD_LAST +}; + +/* DVFS Status */ +/* current DVFS state request for ARM */ +#define DVFS_STATE_BIT_MASK 0x0Fu +#define DVFS_STATE_BIT_SHIFT 0u +/* Bit value for DVFS state request */ +#define DVFS_STATE_LDV 0u +#define DVFS_STATE_NDV 1u +/* current DVFS status */ +#define DVFS_STATUS_BIT_MASK 0xF0u +#define DVFS_STATUS_BIT_SHIFT 4u +/* Bit value for DVFS status */ +#define DVFS_STATUS_LDV 0u +#define DVFS_STATUS_NDV 1u +/* DVFS bits are for status, raw request and active request */ +/* 4387b0 supports only status bits for aux, main, and bt */ +/* 4387c0 supports all eight status and request bits */ +#define DVFS_BIT_AUX_MASK 0x0001u +#define DVFS_BIT_AUX_SHIFT 0u +#define DVFS_BIT_AUX_VAL(_val) (((_val) & DVFS_BIT_AUX_MASK) \ + >> DVFS_BIT_AUX_SHIFT) +#define DVFS_BIT_MAIN_MASK 0x0002u +#define DVFS_BIT_MAIN_SHIFT 1u +#define DVFS_BIT_MAIN_VAL(_val) (((_val) & DVFS_BIT_MAIN_MASK) \ + >> DVFS_BIT_MAIN_SHIFT) +#define DVFS_BIT_BT_MASK 0x0004u +#define DVFS_BIT_BT_SHIFT 2u +#define DVFS_BIT_BT_VAL(_val) (((_val) & DVFS_BIT_BT_MASK) \ + >> DVFS_BIT_BT_SHIFT) +#define DVFS_BIT_CHIPC_MASK 0x0008u +#define DVFS_BIT_CHIPC_SHIFT 3u +#define DVFS_BIT_CHIPC_VAL(_val) (((_val) & DVFS_BIT_CHIPC_MASK) \ + >> DVFS_BIT_CHIPC_SHIFT) +#define DVFS_BIT_PCIE_MASK 0x0010u +#define DVFS_BIT_PCIE_SHIFT 4u +#define DVFS_BIT_PCIE_VAL(_val) (((_val) & DVFS_BIT_PCIE_MASK) \ + >> DVFS_BIT_PCIE_SHIFT) +#define DVFS_BIT_ARM_MASK 0x0020u +#define DVFS_BIT_ARM_SHIFT 5u +#define DVFS_BIT_ARM_VAL(_val) (((_val) & DVFS_BIT_ARM_MASK) \ + >> DVFS_BIT_ARM_SHIFT) +#define DVFS_BIT_SCAN_MASK 0x0040u +#define DVFS_BIT_SCAN_SHIFT 6u +#define DVFS_BIT_SCAN_VAL(_val) (((_val) & DVFS_BIT_SCAN_MASK) \ + >> DVFS_BIT_SCAN_SHIFT) +#define DVFS_BIT_BTSCAN_MASK 0x0080u +#define DVFS_BIT_BTSCAN_SHIFT 7u +#define DVFS_BIT_BTSCAN_VAL(_val) (((_val) & DVFS_BIT_BTSCAN_MASK) \ + >> DVFS_BIT_BTSCAN_SHIFT) +#define DVFS_BIT_HWA_MASK 0x0100u +#define DVFS_BIT_HWA_SHIFT 8u +#define DVFS_BIT_HWA_VAL(_val) (((_val) & DVFS_BIT_HWA_MASK) \ + >> DVFS_BIT_HWA_SHIFT) +#define DVFS_BIT_SYSMEM_MASK 0x0200u +#define DVFS_BIT_SYSMEM_SHIFT 9u +#define DVFS_BIT_SYSMEM_VAL(_val) (((_val) & DVFS_BIT_SYSMEM_MASK) \ + >> DVFS_BIT_SYSMEM_SHIFT) +/* to convert voltage to volt from multiple of 10mVolt */ +#define DVFS_CONVERT_TO_VOLT 100u + +/* status version for 4387b0 */ +#define DVFS_STATUS_VERSION_1 1 +typedef struct dvfs_status_v1 { + uint16 version; /* version of dvfs_status */ + uint16 len; /* total length including all fixed fields */ + uint8 info; /* current dvfs state request and status */ + uint8 voltage; /* voltage (multiple of 10mV) */ + uint16 freq; /* arm clock frequency (in MHz) */ + uint32 state_change_count; /* total state (LDV/NDV) transition count */ + uint32 ldv_duration; /* total time (ms) in LDV */ + uint32 ndv_duration; /* total time (ms) in NDV */ + uint16 status; /* status bits */ + uint16 pad; /* word aligned for size */ +} dvfs_status_v1_t; +#define DVFS_STATUS_VER_1_LEN (sizeof(dvfs_status_v1_t)) +/* status version for 4387c0 */ +#define DVFS_STATUS_VERSION_2 2 +#define DVFS_STATUS_VERSION_3 3 +typedef struct dvfs_status_v2 { + uint16 version; /* version of dvfs_status */ + uint16 len; /* total length including all fixed fields */ + uint8 info; /* current dvfs state request and status */ + uint8 voltage; /* voltage (multiple of 10mV) */ + uint16 freq; /* arm clock frequency (in MHz) */ + uint32 state_change_count; /* total state (LDV/NDV) transition count */ + uint32 ldv_duration; /* total time (ms) in LDV */ + uint32 ndv_duration; /* total time (ms) in NDV */ + uint16 status; /* status bits */ + uint16 raw_request; /* raw request bits */ + uint16 active_request; /* active request bits */ + /* DVFS_STATUS_VERSION_3 for pmurev >= 40 */ + uint16 valid_cores; /* bitmap to indicate valid cores status */ +} dvfs_status_v2_t; +#define DVFS_STATUS_V2_VALID_CORES (0xFFu) +#define DVFS_STATUS_VER_3_LEN (sizeof(dvfs_status_v2_t)) +#define DVFS_STATUS_VER_2_LEN (DVFS_STATUS_VER_3_LEN - (sizeof(uint16))) + +/* DVFS_SUBCMD_HIST */ +#define DVFS_HIST_CMD_VERSION_1 1 +typedef struct dvfs_hist_cmd_v1 { + uint16 version; /* version of this structure */ + uint16 len; /* includes both fixed and variable data[] fields */ + uint8 data[]; /* subcommand data : array of dvfs_hist_v1_t */ +} dvfs_hist_cmd_v1_t; + +/* DVFS_SUBCMD_HIST data[] payload */ +typedef struct dvfs_hist_v1 { + uint8 old_state; /* old state */ + uint8 new_state; /* new state */ + uint16 reason; /* reason for state change */ + uint32 timestamp; /* timestamp of state change */ +} dvfs_hist_v1_t; + +/* Bits for DVFS state change reason */ +#define WL_DVFS_REASON_NOTPM 0x0001u /* Not PM */ +#define WL_DVFS_REASON_MPC 0x0002u /* MPC */ +#define WL_DVFS_REASON_TX_ACTIVE 0x0004u /* TX Active */ +#define WL_DVFS_REASON_DBGST_ACTIVE 0x0008u /* Power state active */ +#define WL_DVFS_REASON_DBGST_ASLEEP 0x0010u /* Power state asleep */ +#define WL_DVFS_REASON_LTR_ACTIVE 0x0020u /* LTR Active */ +#define WL_DVFS_REASON_HOST 0x0040u /* Host disabled */ +#define WL_DVFS_REASON_SCAN 0x0080u /* Scan */ +#define WL_DVFS_REASON_SLOTTED_BSS 0x0100u /* Slotted BSS */ +#define WL_DVFS_REASON_CHAN 0x0200u /* Channel Change */ +#define WL_DVFS_REASON_CAL 0x0400u /* CAL */ +#define WL_DVFS_REASON_ASSOC 0x0800u /* ASSOC */ +#define WL_DVFS_REASON_WD 0x1000u /* WD */ +#define WL_DVFS_REASON_SOFTAP 0x2000u /* SoftAP */ + +/* + * Join preference iovar value is an array of tuples. Each tuple has a one-byte type, + * a one-byte length, and a variable length value. RSSI type tuple must be present + * in the array. + * + * Types are defined in "join preference types" section. + * + * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple + * and must be set to zero. + * + * Values are defined below. + * + * 1. RSSI - 2 octets + * offset 0: reserved + * offset 1: reserved + * + * 2. WPA - 2 + 12 * n octets (n is # tuples defined below) + * offset 0: reserved + * offset 1: # of tuples + * offset 2: tuple 1 + * offset 14: tuple 2 + * ... + * offset 2 + 12 * (n - 1) octets: tuple n + * + * struct wpa_cfg_tuple { + * uint8 akm[DOT11_OUI_LEN+1]; akm suite + * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite + * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite + * }; + * + * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY. + * + * 3. BAND - 2 octets + * offset 0: reserved + * offset 1: see "band preference" and "band types" + * + * 4. BAND RSSI - 2 octets + * offset 0: band types + * offset 1: +ve RSSI boost value in dB + */ + +struct tsinfo_arg { + uint8 octets[3]; +}; + +#define RATE_CCK_1MBPS 0 +#define RATE_CCK_2MBPS 1 +#define RATE_CCK_5_5MBPS 2 +#define RATE_CCK_11MBPS 3 + +#define RATE_LEGACY_OFDM_6MBPS 0 +#define RATE_LEGACY_OFDM_9MBPS 1 +#define RATE_LEGACY_OFDM_12MBPS 2 +#define RATE_LEGACY_OFDM_18MBPS 3 +#define RATE_LEGACY_OFDM_24MBPS 4 +#define RATE_LEGACY_OFDM_36MBPS 5 +#define RATE_LEGACY_OFDM_48MBPS 6 +#define RATE_LEGACY_OFDM_54MBPS 7 + +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1 +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V1 1 +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V2 2 +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V3 3 + +typedef struct wl_bsstrans_rssi { + int8 rssi_2g; /**< RSSI in dbm for 2.4 G */ + int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */ +} wl_bsstrans_rssi_t; + +#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */ + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map_v3 { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /* MCS0-11 */ + wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /* MCS0-11 */ + wl_bsstrans_rssi_t phy_be[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_EHT]; /* MCS0-13 */ +} wl_bsstrans_rssi_rate_map_v3_t; + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map_v2 { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /**< MCS0-11 */ + wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /**< MCS0-11 */ +} wl_bsstrans_rssi_rate_map_v2_t; + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map_v1 { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */ +} wl_bsstrans_rssi_rate_map_v1_t; + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */ +} wl_bsstrans_rssi_rate_map_t; + +#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1 + +/** Configure number of scans allowed per throttle period */ +typedef struct wl_bsstrans_roamthrottle { + uint16 ver; + uint16 period; + uint16 scans_allowed; +} wl_bsstrans_roamthrottle_t; + +#define NFIFO 6 /**< # tx/rx fifopairs */ + +#ifndef NFIFO_EXT +#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED) +#ifdef WL_LLW +#define NFIFO_EXT 11 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare + 1 LLQ */ +#else +#define NFIFO_EXT 10 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare */ +#endif +#elif defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED) +#define NFIFO_EXT 10 +#else +#define NFIFO_EXT NFIFO +#endif /* BCM_AQM_DMA_DESC && !BCM_AQM_DMA_DESC_DISABLED */ +#endif /* NFIFO_EXT */ + +/* When new reason codes are added to list, Please update wl_reinit_names also */ +/* Reinit reason codes */ +enum { + WL_REINIT_RC_NONE = 0, + WL_REINIT_RC_PS_SYNC = 1, + WL_REINIT_RC_PSM_WD = 2, + WL_REINIT_RC_MAC_WAKE = 3, + WL_REINIT_RC_MAC_SUSPEND = 4, + WL_REINIT_RC_MAC_SPIN_WAIT = 5, + WL_REINIT_RC_AXI_BUS_ERROR = 6, + WL_REINIT_RC_DEVICE_REMOVED = 7, + WL_REINIT_RC_PCIE_FATAL_ERROR = 8, + WL_REINIT_RC_OL_FW_TRAP = 9, + WL_REINIT_RC_FIFO_ERR = 10, + WL_REINIT_RC_INV_TX_STATUS = 11, + WL_REINIT_RC_MQ_ERROR = 12, + WL_REINIT_RC_PHYTXERR_THRESH = 13, + WL_REINIT_RC_USER_FORCED = 14, + WL_REINIT_RC_FULL_RESET = 15, + WL_REINIT_RC_AP_BEACON = 16, + WL_REINIT_RC_PM_EXCESSED = 17, + WL_REINIT_RC_NO_CLK = 18, + WL_REINIT_RC_SW_ASSERT = 19, + WL_REINIT_RC_PSM_JMP0 = 20, + WL_REINIT_RC_PSM_RUN = 21, + WL_REINIT_RC_ENABLE_MAC = 22, + WL_REINIT_RC_SCAN_TIMEOUT = 23, + WL_REINIT_RC_JOIN_TIMEOUT = 24, + /* Below error codes are generated during D3 exit validation */ + WL_REINIT_RC_LINK_NOT_ACTIVE = 25, + WL_REINIT_RC_PCI_CFG_RD_FAIL = 26, + WL_REINIT_RC_INV_VEN_ID = 27, + WL_REINIT_RC_INV_DEV_ID = 28, + WL_REINIT_RC_INV_BAR0 = 29, + WL_REINIT_RC_INV_BAR2 = 30, + WL_REINIT_RC_AER_UC_FATAL = 31, + WL_REINIT_RC_AER_UC_NON_FATAL = 32, + WL_REINIT_RC_AER_CORR = 33, + WL_REINIT_RC_AER_DEV_STS = 34, + WL_REINIT_RC_PCIe_STS = 35, + WL_REINIT_RC_MMIO_RD_FAIL = 36, + WL_REINIT_RC_MMIO_RD_INVAL = 37, + WL_REINIT_RC_MMIO_ARM_MEM_RD_FAIL = 38, + WL_REINIT_RC_MMIO_ARM_MEM_INVAL = 39, + WL_REINIT_RC_SROM_LOAD_FAILED = 40, + WL_REINIT_RC_PHY_CRASH = 41, + WL_REINIT_TX_STALL = 42, + WL_REINIT_RC_TX_FLOW_CONTROL_BLOCKED = 43, + WL_REINIT_RC_RX_HC_FAIL = 44, + WL_REINIT_RC_RX_DMA_STALL = 45, + WL_REINIT_UTRACE_BUF_OVERLAP_SR = 46, + WL_REINIT_UTRACE_TPL_OUT_BOUNDS = 47, + WL_REINIT_UTRACE_TPL_OSET_STRT0 = 48, + WL_REINIT_RC_PHYTXERR = 49, + WL_REINIT_RC_PSM_FATAL_SUSP = 50, + WL_REINIT_RC_TX_FIFO_SUSP = 51, + WL_REINIT_RC_MAC_ENABLE = 52, + WL_REINIT_RC_SCAN_STALLED = 53, + WL_REINIT_RC_PHY_HC = 54, + WL_REINIT_RC_LAST, /* DONOT use this any more, kept for legacy reasons */ + WL_REINIT_RC_RADIO_CRASH = 55, + WL_REINIT_RC_SUPPORTED_LAST /* Use for app ONLY, DONOT use this in wlc code. + * For wlc, use WL_REINIT_RC_VERSIONED_LAST + */ +}; + +#define WL_REINIT_RC_V2 (2u) +#define WL_REINIT_RC_LAST_V2 (WL_REINIT_RC_RADIO_CRASH) + +#define WL_REINIT_RC_INVALID 255 + +#define NREINITREASONCOUNT 8 +/* NREINITREASONCOUNT is 8 in other branches. + * Any change to this will break wl tool compatibility with other branches + * #define NREINITREASONCOUNT WL_REINIT_RC_LAST + */ +/* REINITRSNIDX is kept for legacy reasons. Use REINIT_RSN_IDX for new versioned structure */ +#define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0) +#define REINIT_RSN_IDX(_x) (((_x) < WL_REINIT_RC_SUPPORTED_LAST) ? (_x) : 0) /* TBD: move + * this to src + */ +#define REINIT_RSN_IDX_V2(_x) (((_x) <= WL_REINIT_RC_LAST_V2) ? (_x) : 0) + +#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */ +#define WL_CNT_VERSION_6 6 +#define WL_CNT_VERSION_7 7 +#define WL_CNT_VERSION_11 11 +#define WL_CNT_VERSION_XTLV 30 + +#define WL_COUNTERS_IOV_VERSION_1 1 +#define WL_SUBCNTR_IOV_VER WL_COUNTERS_IOV_VERSION_1 +/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */ +#define FIRST_COUNTER_OFFSET 0x04 + +/* need for now due to src/wl/ndis automerged to other branches. e.g. BISON */ +#define WLC_WITH_XTLV_CNT + +/* Number of xtlv info as required to calculate subcounter offsets */ +#define WL_CNT_XTLV_ID_NUM 12 +#define WL_TLV_IOV_VER 1 + +/** + * tlv IDs uniquely identifies counter component + * packed into wl_cmd_t container + */ +enum wl_cnt_xtlv_id { + WL_CNT_XTLV_SLICE_IDX = 0x1, /**< Slice index */ + WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */ + WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension: LEGACY */ + WL_CNT_XTLV_WLC_HE = 0x102, /* he counters */ + WL_CNT_XTLV_WLC_SECVLN = 0x103, /* security vulnerabilities counters */ + WL_CNT_XTLV_WLC_HE_OMI = 0x104, /* he omi counters */ + WL_CNT_XTLV_WLC_RINIT_RSN_V2 = 0x105, /**< WLC layer reinitreason extension */ + WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ + WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800, /* corerev >= 64 UCODEX MACSTAT */ + WL_CNT_XTLV_GE80_UCODE_V1 = 0x900, /* corerev >= 80 UCODEX MACSTAT */ + WL_CNT_XTLV_GE80_TXFUNFL_UCODE_V1 = 0x1000 /* corerev >= 80 UCODEX MACSTAT */ +}; + +/* tlv IDs uniquely identifies periodic state component */ +enum wl_periodic_slice_state_xtlv_id { + WL_STATE_COMPACT_COUNTERS = 0x1, + WL_STATE_TXBF_COUNTERS = 0x2, + WL_STATE_COMPACT_HE_COUNTERS = 0x3 +}; + +/* Sub tlvs for chan_counters */ +enum wl_periodic_chan_xtlv_id { + WL_CHAN_GENERIC_COUNTERS = 0x1, + WL_CHAN_PERIODIC_COUNTERS = 0x2 +}; + +#ifdef WLC_CHAN_ECNTR_TEST +#define WL_CHAN_PERIODIC_CNTRS_VER_1 1 +typedef struct wlc_chan_periodic_cntr +{ + uint16 version; + uint16 pad; + uint32 rxstrt; +} wlc_chan_periodic_cntr_t; +#endif /* WLC_CHAN_ECNTR_TEST */ + +#define WL_CHANCNTR_HDR_VER_1 1 +typedef struct wlc_chan_cntr_hdr_v1 +{ + uint16 version; + uint16 pad; + chanspec_t chanspec; /* Dont add any fields above this */ + uint16 pad1; + uint32 total_time; + uint32 chan_entry_cnt; +} wlc_chan_cntr_hdr_v1_t; + +/* tlv IDs uniquely identifies periodic state component */ +enum wl_periodic_if_state_xtlv_id { + WL_STATE_IF_COMPACT_STATE = 0x1, + WL_STATE_IF_ADPS_STATE = 0x02, + WL_STATE_IF_ADPS_ENERGY_GAIN = 0x03 +}; + +enum wl_periodic_tdls_if_state_xtlv_id { + WL_STATE_IF_TDLS_STATE = 0x1 +}; + +#define TDMTX_CNT_VERSION_V1 1 +#define TDMTX_CNT_VERSION_V2 2 + +/* structure holding tdm counters that interface to iovar */ +typedef struct tdmtx_cnt_v1 { + uint16 ver; + uint16 length; /* length of this structure */ + uint16 wlc_idx; /* index for wlc */ + uint16 enabled; /* tdmtx is enabled on slice */ + uint32 tdmtx_txa_on; /* TXA on requests */ + uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */ + uint32 tdmtx_por_on; /* TXA POR requests */ + uint32 tdmtx_txpuen; /* Path enable requests */ + uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */ + uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint32 tdmtx_txa_dur; /* Total time txa on */ + uint32 tdmtx_txpri_dur; /* Total time TXPri */ + uint32 tdmtx_txdefer_dur; /* Total time txdefer */ + /* TDMTX input fields */ + uint32 tdmtx_txpri; + uint32 tdmtx_defer; + uint32 tdmtx_threshold; + uint32 tdmtx_rssi_threshold; + uint32 tdmtx_txpwrboff; + uint32 tdmtx_txpwrboff_dt; +} tdmtx_cnt_v1_t; + +typedef struct { + uint16 ver; + uint16 length; /* length of the data portion */ + uint16 cnt; + uint16 pad; /* pad to align to 32 bit */ + uint8 data[]; /* array of tdmtx_cnt_v1_t */ +} tdmtx_status_t; + +/* structure holding counters that match exactly shm field sizes */ +typedef struct tdmtx_cnt_shm_v1 { + uint16 tdmtx_txa_on; /* TXA on requests */ + uint16 tdmtx_tmcnt; /* TXA on requests */ + uint16 tdmtx_por_on; /* TXA POR requests */ + uint16 tdmtx_txpuen; /* Path enable requests */ + uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */ + uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */ + uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */ + uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */ + uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */ +} tdmtx_cnt_shm_v1_t; + +/* structure holding tdm counters that interface to iovar for version 2 */ +typedef struct tdmtx_cnt_v2 { + uint16 ver; + uint16 length; /* length of this structure */ + uint16 wlc_idx; /* index for wlc */ + uint16 enabled; /* tdmtx is enabled on slice */ + uint32 tdmtx_txa_on; /* TXA on requests */ + uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */ + uint32 tdmtx_porhi_on; /* TXA PORHI requests */ + uint32 tdmtx_porlo_on; /* TXA PORLO requests */ + uint32 tdmtx_txpuen; /* Path enable requests */ + uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */ + uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint32 tdmtx_txa_dur; /* Total time txa on */ + uint32 tdmtx_txpri_dur; /* Total time TXPri */ + uint32 tdmtx_txdefer_dur; /* Total time txdefer */ + /* TDMTX input fields */ + uint32 tdmtx_txpri; + uint32 tdmtx_defer; + uint32 tdmtx_threshold; + uint32 tdmtx_rssi_threshold; + uint32 tdmtx_txpwrboff; + uint32 tdmtx_txpwrboff_dt; +} tdmtx_cnt_v2_t; + +/* structure holding counters that match exactly shm field sizes */ +typedef struct tdmtx_cnt_shm_v2 { + uint16 tdmtx_txa_on; /* TXA on requests */ + uint16 tdmtx_tmcnt; /* TXA on requests */ + uint16 tdmtx_porhi_on; /* TXA PORHI requests */ + uint16 tdmtx_porlo_on; /* TXA PORLO requests */ + uint16 tdmtx_txpuen; /* Path enable requests */ + uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */ + uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */ + uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */ + uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */ + uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */ +} tdmtx_cnt_shm_v2_t; + +typedef struct wl_tdmtx_ioc { + uint16 id; /* ID of the sub-command */ + uint16 len; /* total length of all data[] */ + uint8 data[]; /* var len payload */ +} wl_tdmtx_ioc_t; + +/* + * iovar subcommand ids + */ +enum { + IOV_TDMTX_ENB = 1, + IOV_TDMTX_STATUS = 2, + IOV_TDMTX_TXPRI = 3, + IOV_TDMTX_DEFER = 4, + IOV_TDMTX_TXA = 5, + IOV_TDMTX_CFG = 6, + IOV_TDMTX_LAST +}; + +/* iovar structure for beacon simulator */ +typedef struct wl_bcnsim_ioc { + uint16 id; /* ID of the sub-command */ + uint16 len; /* total length of all data[] */ + uint8 data[]; /* var len payload */ +} wl_bcnsim_ioc_t; + +/* iovar subcmd ids */ +enum { + IOV_BCNSIM_ENB = 1, + IOV_BCNSIM_ERRMAX = 2, + IOV_BCNSIM_ERRDSTRB = 3, + IOV_BCNSIM_DRIFT = 4, + IOV_BCNSIM_RNDLYMAX = 5, + IOV_BCNSIM_RNDDLY_DSTRB = 6, + IOV_BCNSIM_CONSDLY = 7, + IOV_BCNSIM_OMT_PROB = 8, + IOV_BCNSIM_OMT_MIN_N = 9, + IOV_BCNSIM_OMT_MAX_N = 10, + IOV_BCNSIM_OMT_DSTRB = 11, + IOV_BCNSIM_TSF_JUMP = 12, + IOV_BCNSIM_PATTERN = 13, + IOV_BCNSIM_STATUS = 14, + IOV_BCNSIM_AUTH = 15, + IOV_BCNSIM_RNDDLY_PROB = 16, + IOV_BCNSIM_LAST +}; + +/* tlv id for beacon simulator */ +enum wl_bcnsim_xtlv_id { + WL_BCNSIM_XTLV_ENABLE = 0x1, + WL_BCNSIM_XTLV_ERRMAX = 0x2, + WL_BCNSIM_XTLV_ERRDSTRB = 0x3, + WL_BCNSIM_XTLV_DRIFT = 0x4, + WL_BCNSIM_XTLV_RNDLYMAX = 0x5, + WL_BCNSIM_XTLV_RNDDLY_DSTRB = 0x6, + WL_BCNSIM_XTLV_CONSDLY = 0x7, + WL_BCNSIM_XTLV_OMT_PROB = 0x8, + WL_BCNSIM_XTLV_OMT_MIN_N = 0x9, + WL_BCNSIM_XTLV_OMT_MAX_N = 0xa, + WL_BCNSIM_XTLV_OMT_DSTRB = 0xb, + WL_BCNSIM_XTLV_TSF_JUMP = 0xc, + WL_BCNSIM_XTLV_PATTERN = 0xd, + WL_BCNSIM_XTLV_STATUS = 0xe, + WL_BCNSIM_XTLV_AUTH = 0xf, + WL_BCNSIM_XTLV_RNDDLY_PROB = 0x10 +}; + +/* structure to store different pattern params */ +typedef struct wlc_bcnsim_bcn_diff_v1 { + uint16 version; + uint16 dtim_cnt; + uint32 tx_delta; + uint32 ts_delta; +} wlc_bcnsim_bcn_diff_v1_t; + +/* structure to store/pass pattern */ +typedef struct wlc_bcnsim_pattern_info_v1 { + uint16 version; + uint16 pattern_count; + uint16 current_pos; + wlc_bcnsim_bcn_diff_v1_t bcnsim_bcn_diff[]; +} wlc_bcnsim_pattern_info_v1_t; + +/* struct to store bcn sim status */ +typedef struct bcnsim_status_v1 { + uint16 ver; + uint16 length; /* length of this structure */ + uint32 rnd_delay_max; /* random delay */ + int32 const_delay; /* cons delay */ + int32 tsf_jump; /* change tsf */ + int16 drift; /* add drift */ + uint16 error_max; /* max error */ + uint8 error_dstrb; /* error dstrb */ + uint8 rnd_delay_dstrb; /* rnd delay distr */ + uint8 rnd_delay_prob; /* random delay prob */ + uint8 omit_prob; /* control omit prob */ + uint8 omit_min_n; /* min omit */ + uint8 omit_max_n; /* max omit */ + uint8 omit_dstrb; /* omit dstrb % */ + uint8 padding1; +} bcnsim_status_v1_t; + +/** + * The number of variables in wl macstat cnt struct. + * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t) + */ +#define WL_CNT_MCST_VAR_NUM 64 +/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */ +#define WL_CNT_MCST_STRUCT_SZ ((uint32)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) +#define WL_CNT_REV80_MCST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge80mcst_v1_t)) +#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ \ + ((uint32)OFFSETOF(wl_cnt_ge80_txfunfl_v1_t, txfunfl)) +#define WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(fcnt) \ + (WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ + (fcnt * sizeof(uint32))) +#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_SZ (WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(NFIFO_EXT)) + +#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t)) + +#define WL_CNT_HE_STRUCT_SZ ((uint32)sizeof(wl_he_cnt_wlc_t)) + +#define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t)) + +#define WL_CNT_HE_OMI_STRUCT_SZ ((uint32)sizeof(wl_he_omi_cnt_wlc_v1_t)) +#define INVALID_CNT_VAL (uint32)(-1) + +#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \ + (uint32)BCM_XTLV_HDR_SIZE + (uint32)sizeof(wl_cnt_wlc_t) + \ + (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ + (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) + +#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t)) + +/** Top structure of counters IOVar buffer */ +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 datalen; /**< length of data including all paddings. */ + uint8 data []; /**< variable length payload: + * 1 or more bcm_xtlv_t type of tuples. + * each tuple is padded to multiple of 4 bytes. + * 'datalen' field of this structure includes all paddings. + */ +} wl_cnt_info_t; + +/* Top structure of subcounters IOVar buffer + * Whenever we make any change in this structure + * WL_SUBCNTR_IOV_VER should be updated accordingly + * The structure definition should remain consistant b/w + * FW and wl/WLM app. + */ +typedef struct { + uint16 version; /* Version of IOVAR structure. Used for backward + * compatibility in future. Whenever we make any + * changes to this structure then value of WL_SUBCNTR_IOV_VER + * needs to be updated properly. + */ + uint16 length; /* length in bytes of this structure */ + uint16 counters_version; /* see definition of WL_CNT_T_VERSION + * wl app will send the version of counters + * which is used to calculate the offset of counters. + * It must match the version of counters FW is using + * else FW will return error with his version of counters + * set in this field. + */ + uint16 num_subcounters; /* Number of counter offset passed by wl app to FW. */ + uint32 data[1]; /* variable length payload: + * Offsets to the counters will be passed to FW + * throught this data field. FW will return the value of counters + * at the offsets passed by wl app in this fiels itself. + */ +} wl_subcnt_info_t; + +/* Top structure of counters TLV version IOVar buffer + * The structure definition should remain consistant b/w + * FW and wl/WLM app. + */ +typedef struct { + uint16 version; /* Version of IOVAR structure. Added for backward + * compatibility feature. If any changes are done, + * WL_TLV_IOV_VER need to be updated. + */ + uint16 length; /* total len in bytes of this structure + payload */ + uint16 counters_version; /* See definition of WL_CNT_VERSION_XTLV + * wl app will update counter tlv version to be used + * so to calculate offset of supported TLVs. + * If there is a mismatch in the version, FW will update an error + */ + uint16 num_tlv; /* Max number of TLV info passed by FW to WL app. + * and vice-versa + */ + uint32 data[]; /* variable length payload: + * This stores the tlv as supported by F/W to the wl app. + * This table is required to compute subcounter offsets at WLapp end. + */ +} wl_cntr_tlv_info_t; + +/** wlc layer counters */ +typedef struct { + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; /**< PR8861/8963 counter */ + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; /**< # occurrences of PR15420 workaround */ + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + uint32 rfdisable; /**< count of radio disables */ + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /**< hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + uint32 ampdu_wds; /**< Number of AMPDU watchdogs */ + uint32 txlost; /**< Number of lost packets reported in txs */ + uint32 txdatamcast; /**< Number of TX multicast data packets */ + uint32 txdatabcast; /**< Number of TX broadcast data packets */ + uint32 psmxwds; /**< Number of PSMx watchdogs */ + uint32 rxback; + uint32 txback; + uint32 p2p_tbtt; /**< Number of P2P TBTT Events */ + uint32 p2p_tbtt_miss; /**< Number of P2P TBTT Events Miss */ + uint32 txqueue_start; + uint32 txqueue_end; + uint32 txbcast; /* Broadcast TransmittedFrameCount */ + uint32 txdropped; /* tx dropped pkts */ + uint32 rxbcast; /* BroadcastReceivedFrameCount */ + uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ + uint32 txq_end_assoccb; /* forced txqueue_end callback fired in assoc */ + uint32 tx_toss_cnt; /* number of tx packets tossed */ + uint32 rx_toss_cnt; /* number of rx packets tossed */ + uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */ + uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */ + uint32 pmk_badlen_cnt; /* number of invalid pmk len */ + uint32 txbar_notx; /* number of TX BAR not sent (maybe supressed or muted) */ + uint32 txbar_noack; /* number of TX BAR sent, but not acknowledged by peer */ + uint32 rxfrag_agedout; /**< # of aged out rx fragmentation */ + uint32 pmkid_mismatch_cnt; /* number of EAPOL msg1 PMKID mismatch */ + uint32 txaction_vndr_attempt; /* Number of VS AFs scheduled successfully for Tx */ + uint32 txaction_vndr_fail; /* Number of VS AFs not sent or not acked */ + uint32 rxnofrag; /* # of nobuf failure due to no pkt availability */ + uint32 rxnocmplid; /* # of nobuf failure due to rxcmplid non-availability */ + uint32 rxnohaddr; /* # of nobuf failure due to host address non-availability */ + + /* Do not remove or rename in the middle of this struct. + * All counter variables have to be of uint32. + */ +} wl_cnt_wlc_t; + +/* he counters Version 1 */ +#define HE_COUNTERS_V1 (1) +typedef struct wl_he_cnt_wlc_v1 { + uint32 he_rxtrig_myaid; + uint32 he_rxtrig_rand; + uint32 he_colormiss_cnt; + uint32 he_txmampdu; + uint32 he_txmtid_back; + uint32 he_rxmtid_back; + uint32 he_rxmsta_back; + uint32 he_txfrag; + uint32 he_rxdefrag; + uint32 he_txtrig; + uint32 he_rxtrig_basic; + uint32 he_rxtrig_murts; + uint32 he_rxtrig_bsrp; + uint32 he_rxdlmu; + uint32 he_physu_rx; + uint32 he_phyru_rx; + uint32 he_txtbppdu; +} wl_he_cnt_wlc_v1_t; + +/* he counters Version 2 */ +#define HE_COUNTERS_V2 (2) +typedef struct wl_he_cnt_wlc_v2 { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */ + uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */ + uint32 he_colormiss_cnt; /**< for bss color mismatch cases */ + uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */ + uint32 he_txmtid_back; /**< for multi-TID BACK transmission */ + uint32 he_rxmtid_back; /**< reception of multi-TID BACK */ + uint32 he_rxmsta_back; /**< reception of multi-STA BACK */ + uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */ + uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */ + uint32 he_txtrig; /**< transmission of trigger frames */ + uint32 he_rxtrig_basic; /**< reception of basic trigger frame */ + uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */ + uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */ + uint32 he_rxdlmu; /**< reception of DL MU PPDU */ + uint32 he_physu_rx; /**< reception of SU frame */ + uint32 he_phyru_rx; /**< reception of RU frame */ + uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */ + uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */ +} wl_he_cnt_wlc_v2_t; + +/* he counters Version 3 */ +#define WL_RU_TYPE_MAX 6 +#define HE_COUNTERS_V3 (3) + +typedef struct wl_he_cnt_wlc_v3 { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */ + uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */ + uint32 he_colormiss_cnt; /**< for bss color mismatch cases */ + uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */ + uint32 he_txmtid_back; /**< for multi-TID BACK transmission */ + uint32 he_rxmtid_back; /**< reception of multi-TID BACK */ + uint32 he_rxmsta_back; /**< reception of multi-STA BACK */ + uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */ + uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */ + uint32 he_txtrig; /**< transmission of trigger frames */ + uint32 he_rxtrig_basic; /**< reception of basic trigger frame */ + uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */ + uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */ + uint32 he_rxhemuppdu_cnt; /**< rxing HE MU PPDU */ + uint32 he_physu_rx; /**< reception of SU frame */ + uint32 he_phyru_rx; /**< reception of RU frame */ + uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */ + uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */ + uint32 he_rxhesuppdu_cnt; /**< rxing SU PPDU */ + uint32 he_rxhesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */ + uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger + * because of zero aggregation + */ + uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */ + uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger + * because of no frames in fifo's + */ + uint32 he_myAID_cnt; + uint32 he_rxtrig_bfm_cnt; + uint32 he_rxtrig_mubar; + uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */ + uint32 txheru[WL_RU_TYPE_MAX]; + uint32 he_mgmt_tbppdu; + uint32 he_cs_req_tx_cancel; + uint32 he_wrong_nss; + uint32 he_trig_unsupp_rate; + uint32 he_rxtrig_nfrp; + uint32 he_rxtrig_bqrp; + uint32 he_rxtrig_gcrmubar; +} wl_he_cnt_wlc_v3_t; + +/* he counters Version 4 */ +#define HE_COUNTERS_V4 (4) +typedef struct wl_he_cnt_wlc_v4 { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */ + uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */ + uint32 he_colormiss_cnt; /**< for bss color mismatch cases */ + uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */ + uint32 he_txmtid_back; /**< for multi-TID BACK transmission */ + uint32 he_rxmtid_back; /**< reception of multi-TID BACK */ + uint32 he_rxmsta_back; /**< reception of multi-STA BACK */ + uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */ + uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */ + uint32 he_txtrig; /**< transmission of trigger frames */ + uint32 he_rxtrig_basic; /**< reception of basic trigger frame */ + uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */ + uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */ + uint32 he_rxtsrt_hemuppdu_cnt; /**< rxing HE MU PPDU */ + uint32 he_physu_rx; /**< reception of SU frame */ + uint32 he_phyru_rx; /**< reception of RU frame */ + uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */ + uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */ + uint32 he_rxstrt_hesuppdu_cnt; /**< rxing SU PPDU */ + uint32 he_rxstrt_hesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */ + uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger + * because of zero aggregation + */ + uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */ + uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger + * because of no frames in fifo's + */ + uint32 he_myAID_cnt; + uint32 he_rxtrig_bfm_cnt; + uint32 he_rxtrig_mubar; + uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */ + uint32 txheru[WL_RU_TYPE_MAX]; + uint32 he_mgmt_tbppdu; + uint32 he_cs_req_tx_cancel; + uint32 he_wrong_nss; + uint32 he_trig_unsupp_rate; + uint32 he_rxtrig_nfrp; + uint32 he_rxtrig_bqrp; + uint32 he_rxtrig_gcrmubar; + uint32 he_rxtrig_basic_htpack; /**< triggers received with HTP ack policy */ + uint32 he_rxtrig_ed_cncl; /**< count of cancelled packets + * becasue of cs_req in trigger frame + */ + uint32 he_rxtrig_suppr_null_tbppdu; /**< count of null frame sent becasue of + * suppression scenarios + */ + uint32 he_ulmu_disable; /**< number of UL MU disable scenario's handled in ucode */ + uint32 he_ulmu_data_disable; /**= 64) */ +typedef struct { + uint32 macxsusp; + uint32 m2vmsg; + uint32 v2mmsg; + uint32 mboxout; + uint32 musnd; + uint32 sfb2v; +} wl_cnt_ge64mcxst_v1_t; + +/** MACSTAT counters for ucode (corerev >= 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + /* All counter variables have to be of uint32. */ +} wl_cnt_ge40mcst_v1_t; + +/** MACSTAT counters for ucode (corerev < 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 dbgoff46; /**< BTCX protection failure count, + * getting RX antenna in PHY DEBUG, + * PR84273 timeout count + */ + uint32 dbgoff47; /**< BTCX preemption failure count, + * getting RX antenna in PHY DEBUG, + * PR84273 reset CCA count, + * RATEENGDBG + */ + uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 phywatch; /**< number of phywatchdog to kill any pending transmissions. + * (PR 38187 corerev == 11) + */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + /* All counter variables have to be of uint32. */ +} wl_cnt_lt40mcst_v1_t; + +/** MACSTAT counters for ucode (corerev >= 80) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + /* Start of PSM2HOST stats(72) block */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 rxtrig_myaid; /* New counters added in corerev 80 */ + uint32 rxtrig_rand; + uint32 goodfcs; + uint32 colormiss; + uint32 txmampdu; + uint32 rxmtidback; + uint32 rxmstaback; + uint32 txfrag; + /* End of PSM2HOST stats block */ + /* start of rxerror overflow counter(24) block which are modified/added in corerev 80 */ + uint32 phyovfl; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 lenfovfl; + uint32 weppeof; + uint32 badplcp; + uint32 msduthresh; + uint32 strmeof; + uint32 stsfifofull; + uint32 stsfifoerr; + uint32 PAD[6]; + uint32 rxerr_stat; + uint32 ctx_fifo_full; + uint32 PAD0[9]; + uint32 ctmode_ufc_cnt; + uint32 PAD1[28]; /* PAD added for counter elements to be added soon */ +} wl_cnt_ge80mcst_v1_t; + +typedef struct { + uint32 fifocount; + uint32 txfunfl[]; +} wl_cnt_ge80_txfunfl_v1_t; + +/** MACSTAT counters for "wl counter" version <= 10 */ +/* With ucode before its macstat cnts cleaned up */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 PAD0; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 PAD1; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + /* All counter variables have to be of uint32. */ +} wl_cnt_v_le10_mcst_t; + +#define MAX_RX_FIFO 3 +#define WL_RXFIFO_CNT_VERSION 1 /* current version of wl_rxfifo_cnt_t */ +typedef struct { + /* Counters for frames received from rx fifos */ + uint16 version; + uint16 length; /* length of entire structure */ + uint32 rxf_data[MAX_RX_FIFO]; /* data frames from rx fifo */ + uint32 rxf_mgmtctl[MAX_RX_FIFO]; /* mgmt/ctl frames from rx fifo */ +} wl_rxfifo_cnt_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; /**< PR8861/8963 counter */ + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; /**< # occurrences of PR15420 workaround */ + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /**< hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; /**< num of received packets with retry bit on */ + uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */ + uint32 rxnodelim; /**< macstat cnt only valid in ver 11. + * number of occasions that no valid delimiter is detected + * by ampdu parser. + */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + uint32 ampdu_wds; /**< Number of AMPDU watchdogs */ + uint32 txlost; /**< Number of lost packets reported in txs */ + uint32 txdatamcast; /**< Number of TX multicast data packets */ + uint32 txdatabcast; /**< Number of TX broadcast data packets */ + uint32 txbcast; /* Broadcast TransmittedFrameCount */ + uint32 txdropped; /* tx dropped pkts */ + uint32 rxbcast; /* BroadcastReceivedFrameCount */ + uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ + + /* This structure is deprecated and used only for ver <= 11. + * All counter variables have to be of uint32. + */ +} wl_cnt_ver_11_t; + +typedef struct { + uint16 version; /* see definition of WL_CNT_T_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /* tx data frames */ + uint32 txbyte; /* tx data bytes */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txerror; /* tx data errors (derived: sum of others) */ + uint32 txctl; /* tx management frames */ + uint32 txprshort; /* tx short preamble frames */ + uint32 txserr; /* tx status errors */ + uint32 txnobuf; /* tx out of buffers errors */ + uint32 txnoassoc; /* tx discard because we're not associated */ + uint32 txrunt; /* tx runt frames */ + uint32 txchit; /* tx header cache hit (fastpath) */ + uint32 txcmiss; /* tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /* tx fifo underflows */ + uint32 txphyerr; /* tx phy errors (indicated in tx status) */ + uint32 txphycrs; /* PR8861/8963 counter */ + + /* receive stat counters */ + uint32 rxframe; /* rx data frames */ + uint32 rxbyte; /* rx data bytes */ + uint32 rxerror; /* rx data errors (derived: sum of others) */ + uint32 rxctl; /* rx management frames */ + uint32 rxnobuf; /* rx out of buffers errors */ + uint32 rxnondata; /* rx non data frames in the data channel errors */ + uint32 rxbadds; /* rx bad DS errors */ + uint32 rxbadcm; /* rx bad control or management frames */ + uint32 rxfragerr; /* rx fragmentation errors */ + uint32 rxrunt; /* rx runt frames */ + uint32 rxgiant; /* rx giant frames */ + uint32 rxnoscb; /* rx no scb error */ + uint32 rxbadproto; /* rx invalid frames */ + uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ + uint32 rxbadda; /* rx frames tossed for invalid da */ + uint32 rxfilter; /* rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /* rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /* tx/rx dma descriptor errors */ + uint32 dmada; /* tx/rx dma data errors */ + uint32 dmape; /* tx/rx dma descriptor protocol errors */ + uint32 reset; /* reset count */ + uint32 tbtt; /* cnts the TBTT int's */ + uint32 txdmawar; /* # occurrences of PR15420 workaround */ + uint32 pkt_callback_reg_fail; /* callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /* number of RTS sent out by the MAC */ + uint32 txctsfrm; /* number of CTS sent out by the MAC */ + uint32 txackfrm; /* number of ACK frames sent out */ + uint32 txdnlfrm; /* Not used */ + uint32 txbcnfrm; /* beacons transmitted */ + uint32 txfunfl[8]; /* per-fifo tx underflows */ + uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /* parity check of the PLCP header failed */ + uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /* Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /* beacons received from member of BSS */ + uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /* beacons received from other BSS */ + uint32 rxrsptmout; /* Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /* Number of PMQ overflows */ + uint32 rxcgprqfrm; /* Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /* obsolete */ + uint32 frmscons; /* obsolete */ + uint32 txnack; /* obsolete */ + uint32 txglitch_nack; /* obsolete */ + uint32 txburst; /* obsolete */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /* TKIPReplays */ + uint32 ccmpfmterr; /* CCMPFormatErrors */ + uint32 ccmpreplay; /* CCMPReplays */ + uint32 ccmpundec; /* CCMPDecryptErrors */ + uint32 fourwayfail; /* FourWayHandshakeFailures */ + uint32 wepundec; /* dot11WEPUndecryptableCount */ + uint32 wepicverr; /* dot11WEPICVErrorCount */ + uint32 decsuccess; /* DecryptSuccessCount */ + uint32 tkipicverr; /* TKIPICVErrorCount */ + uint32 wepexcluded; /* dot11WEPExcludedCount */ + + uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ + uint32 psmwds; /* Count PSM watchdogs */ + uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /* PRQ entries read in */ + uint32 prq_undirected_entries; /* which were bcast bss & ssid */ + uint32 prq_bad_entries; /* which could not be translated to info */ + uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /* count of radio disables */ + uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + + uint32 txexptime; /* Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /* count for sgi transmit */ + uint32 rxmpdu_sgi; /* count for sgi received */ + uint32 txmpdu_stbc; /* count for stbc transmit */ + uint32 rxmpdu_stbc; /* count for stbc received */ + + uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /* TKIPReplays */ + uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /* CCMPReplays */ + uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ + uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /* DecryptSuccessCount */ + uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ + + uint32 dma_hang; /* count for stbc received */ + uint32 rxrtry; /* number of packets with retry bit set to 1 */ +} wl_cnt_ver_7_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; /**< PR8861/8963 counter */ + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; /**< # occurrences of PR15420 workaround */ + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /**< # of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /**< # of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< # of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< # of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< # of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< # of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< Number of NACKS received (Afterburner) */ + uint32 frmscons; /**< Number of frames completed without transmission because of an + * Afterburner re-queue + */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxdrop20s; /**< drop secondary cnt */ + /* All counter variables have to be of uint32. */ +} wl_cnt_ver_6_t; + +#define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */ + +typedef struct { + uint16 version; /**< see definition of WL_DELTA_STATS_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txfail; /**< tx failures */ + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + /* phy stats */ + uint32 rxbadplcp; + uint32 rxcrsglitch; + uint32 bphy_rxcrsglitch; + uint32 bphy_badplcp; + + uint32 slice_index; /**< Slice for which stats are reported */ + +} wl_delta_stats_t; + +/* Partial statistics counter report */ +#define WL_CNT_CTL_MGT_FRAMES 0 + +typedef struct { + uint16 type; + uint16 len; + + /* detailed control/management frames */ + uint32 txnull; + uint32 rxnull; + uint32 txqosnull; + uint32 rxqosnull; + uint32 txassocreq; + uint32 rxassocreq; + uint32 txreassocreq; + uint32 rxreassocreq; + uint32 txdisassoc; + uint32 rxdisassoc; + uint32 txassocrsp; + uint32 rxassocrsp; + uint32 txreassocrsp; + uint32 rxreassocrsp; + uint32 txauth; + uint32 rxauth; + uint32 txdeauth; + uint32 rxdeauth; + uint32 txprobereq; + uint32 rxprobereq; + uint32 txprobersp; + uint32 rxprobersp; + uint32 txaction; + uint32 rxaction; + uint32 txrts; + uint32 rxrts; + uint32 txcts; + uint32 rxcts; + uint32 txack; + uint32 rxack; + uint32 txbar; + uint32 rxbar; + uint32 txback; + uint32 rxback; + uint32 txpspoll; + uint32 rxpspoll; +} wl_ctl_mgt_cnt_t; + +typedef struct { + uint32 packets; + uint32 bytes; +} wl_traffic_stats_t; + +typedef struct { + uint16 version; /**< see definition of WL_WME_CNT_VERSION */ + uint16 length; /**< length of entire structure */ + + wl_traffic_stats_t tx[AC_COUNT]; /**< Packets transmitted */ + wl_traffic_stats_t tx_failed[AC_COUNT]; /**< Packets dropped or failed to transmit */ + wl_traffic_stats_t rx[AC_COUNT]; /**< Packets received */ + wl_traffic_stats_t rx_failed[AC_COUNT]; /**< Packets failed to receive */ + + wl_traffic_stats_t forward[AC_COUNT]; /**< Packets forwarded by AP */ + + wl_traffic_stats_t tx_expired[AC_COUNT]; /**< packets dropped due to lifetime expiry */ + +} wl_wme_cnt_t; + +struct wl_msglevel2 { + uint32 low; + uint32 high; +}; + +/* A versioned structure for setting and retrieving debug message levels. */ +#define WL_MSGLEVEL_STRUCT_VERSION_1 1 + +typedef struct wl_msglevel_v1 { + uint16 version; + uint16 length; + uint32 msglevel1; + uint32 msglevel2; + uint32 msglevel3; + /* add another uint32 when full */ +} wl_msglevel_v1_t; + +#define WL_ICMP_IPV6_CFG_VERSION 1 +#define WL_ICMP_IPV6_CLEAR_ALL (1 << 0) + +typedef struct wl_icmp_ipv6_cfg { + uint16 version; + uint16 length; + uint16 fixed_length; + uint16 flags; + uint32 num_ipv6; + /* num_ipv6 to follow */ + struct ipv6_addr host_ipv6[]; +} wl_icmp_ipv6_cfg_t; + +#define WL_ICMP_CFG_IPV6_FIXED_LEN OFFSETOF(wl_icmp_ipv6_cfg_t, host_ipv6) +#define WL_ICMP_CFG_IPV6_LEN(count) (WL_ICMP_CFG_IPV6_FIXED_LEN + \ + ((count) * sizeof(struct ipv6_addr))) + +typedef struct wl_mkeep_alive_pkt { + uint16 version; /* Version for mkeep_alive */ + uint16 length; /* length of fixed parameters in the structure */ + uint32 period_msec; /* high bit on means immediate send */ + uint16 len_bytes; + uint8 keep_alive_id; /* 0 - 3 for N = 4 */ + uint8 data[1]; +} wl_mkeep_alive_pkt_t; + +#define WL_MKEEP_ALIVE_VERSION 1 +#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) +/* 1/2 second precision since idle time is a seconds counter anyway */ +#define WL_MKEEP_ALIVE_PRECISION 500 +#define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF +#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000 + +typedef struct wl_mkeep_alive_hist_info_v1 { + uint32 first_pktsend_ts; /**< timestamp(ms): packet was sent */ + uint32 first_txs_ts; /**< timestamp(ms): received the first txstatus */ + uint32 last_retry_txs_ts; /**< timestamp(ms): received the last txstatus */ + uint32 first_retry_ts; /**< timestamp(ms): resent the packet first time */ + uint32 last_retry_ts; /**< timestamp(ms): resent the packet last time */ + uint32 first_txs; /**< txstatus when dongle received first time */ + uint32 last_retry_txs; /**< txstatus when dongle received last time */ + uint32 retry_cnt; /**< number of retries for the packet */ +} wl_mkeep_alive_hist_info_v1_t; + +typedef struct wl_mkeep_alive_hist_req_v1 { + uint16 version; /**< version of structure */ + uint16 length; /**< length of this structure */ + uint16 flags; /**< mkeepalive idx, operation codes */ + uint16 count; /**< number of results */ + uint16 max; /**< maximum number of history */ + wl_mkeep_alive_hist_info_v1_t info[]; /**< struct array of length count */ +} wl_mkeep_alive_hist_req_v1_t; + +/* version of the mkeep_alive_hist IOVAR */ +#define WL_MKEEP_ALIVE_HIST_REQ_VER_1 1u +/* Fixed length of wl_mkeep_alive_hist_req_v1_t */ +#define WL_MKEEP_ALIVE_HIST_REQ_FIXED_LEN_VER_1 OFFSETOF(wl_mkeep_alive_hist_req_v1_t, info) +/* Keepalive ID */ +#define WL_MKEEP_ALIVE_HIST_ID_MASK 0xFF00u /**< ID mask */ +#define WL_MKEEP_ALIVE_HIST_ID_SHIFT 8u /**< Offset of keepalive ID */ +/* OP Codes */ +#define WL_MKEEP_ALIVE_HIST_OP_MASK 0x00FFu /**< OP code mask */ +#define WL_MKEEP_ALIVE_HIST_RESET (0x1u << 0u) /**< Clear history of specified ID */ +#define WL_MKEEP_ALIVE_HIST_RESET_ALL (0x1u << 1u) /**< Clear all history */ + +/** TCP Keep-Alive conn struct */ +typedef struct wl_mtcpkeep_alive_conn_pkt { + struct ether_addr saddr; /**< src mac address */ + struct ether_addr daddr; /**< dst mac address */ + struct ipv4_addr sipaddr; /**< source IP addr */ + struct ipv4_addr dipaddr; /**< dest IP addr */ + uint16 sport; /**< src port */ + uint16 dport; /**< dest port */ + uint32 seq; /**< seq number */ + uint32 ack; /**< ACK number */ + uint16 tcpwin; /**< TCP window */ + uint16 PAD; +} wl_mtcpkeep_alive_conn_pkt_t; + +/** TCP Keep-Alive interval struct */ +typedef struct wl_mtcpkeep_alive_timers_pkt { + uint16 interval; /**< interval timer */ + uint16 retry_interval; /**< retry_interval timer */ + uint16 retry_count; /**< retry_count */ +} wl_mtcpkeep_alive_timers_pkt_t; + +typedef struct wake_info { + uint32 wake_reason; + uint32 wake_info_len; /**< size of packet */ + uint8 packet[]; +} wake_info_t; + +typedef struct wake_pkt { + uint32 wake_pkt_len; /**< size of packet */ + uint8 packet[]; +} wake_pkt_t; + +#define WL_MTCPKEEP_ALIVE_VERSION 1 + +/* #ifdef WLBA */ + +#define WLC_BA_CNT_VERSION 1 /**< current version of wlc_ba_cnt_t */ + +/** block ack related stats */ +typedef struct wlc_ba_cnt { + uint16 version; /**< WLC_BA_CNT_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txpdu; /**< pdus sent */ + uint32 txsdu; /**< sdus sent */ + uint32 txfc; /**< tx side flow controlled packets */ + uint32 txfci; /**< tx side flow control initiated */ + uint32 txretrans; /**< retransmitted pdus */ + uint32 txbatimer; /**< ba resend due to timer */ + uint32 txdrop; /**< dropped packets */ + uint32 txaddbareq; /**< addba req sent */ + uint32 txaddbaresp; /**< addba resp sent */ + uint32 txdelba; /**< delba sent */ + uint32 txba; /**< ba sent */ + uint32 txbar; /**< bar sent */ + uint32 txpad[4]; /**< future */ + + /* receive side counters */ + uint32 rxpdu; /**< pdus recd */ + uint32 rxqed; /**< pdus buffered before sending up */ + uint32 rxdup; /**< duplicate pdus */ + uint32 rxnobuf; /**< pdus discarded due to no buf */ + uint32 rxaddbareq; /**< addba req recd */ + uint32 rxaddbaresp; /**< addba resp recd */ + uint32 rxdelba; /**< delba recd */ + uint32 rxba; /**< ba recd */ + uint32 rxbar; /**< bar recd */ + uint32 rxinvba; /**< invalid ba recd */ + uint32 rxbaholes; /**< ba recd with holes */ + uint32 rxunexp; /**< unexpected packets */ + uint32 rxpad[4]; /**< future */ +} wlc_ba_cnt_t; +/* #endif WLBA */ + +/** structure for per-tid ampdu control */ +struct ampdu_tid_control { + uint8 tid; /* tid */ + uint8 enable; /* enable/disable */ +}; + +/** Support for ampdu_tx_ba_window_cfg */ +#define WL_AMPDU_TX_BA_WINDOW_CFG_VER_1 1u +#define WL_AMPDU_TX_BA_WINDOW_CFG_CUR_VER WL_AMPDU_TX_BA_WINDOW_CFG_VER_1 + +/* 16 bits Config (5 bits reserved) and Status (2 bits reserved) */ +#define WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_IDX 0u +#define WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_FSZ 9u + +#define WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_IDX 9u +#define WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_FSZ 2u + +#define WL_AMPDU_TX_BA_WINDOW_CFG_STATE_IDX 11u +#define WL_AMPDU_TX_BA_WINDOW_CFG_STATE_FSZ 3u + +#define WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_MASK \ + (MAXBITVAL(WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_FSZ) << \ + WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_IDX) + +#define WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_MASK \ + (MAXBITVAL(WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_FSZ) << \ + WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_IDX) + +#define WL_AMPDU_TX_BA_WINDOW_CFG_STATE_MASK \ + (MAXBITVAL(WL_AMPDU_TX_BA_WINDOW_CFG_STATE_FSZ) << \ + WL_AMPDU_TX_BA_WINDOW_CFG_STATE_IDX) + +/* code for config assoc_type */ +enum { + WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_AX = 0, + WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_UNIVERSAL = 1, + WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_MAX = 2 +}; + +/* ampdu_tx_ba_window_cfg states */ +enum { + WL_AMPDU_TX_BA_WINDOW_CFG_STATE_OFF = 0, + WL_AMPDU_TX_BA_WINDOW_CFG_STATE_NEGOTIATING = 1, + WL_AMPDU_TX_BA_WINDOW_CFG_STATE_NEGOTIATED = 2, + WL_AMPDU_TX_BA_WINDOW_CFG_STATE_MAX = 3 +}; + +/** structure for per tid ampdu BA window configuration */ +typedef struct wl_ampdu_tx_ba_window_cfg_v1 { + uint16 version; + uint16 length; /* length of the entire structure ver+len+payload. */ + /* tid bitmap: + * input (SET): select tid to configure. + * output (GET): TID that is currently configured. + */ + uint8 tidbmp; + uint8 flag; /* currently not used. Reserved. 32-bit alignment. */ + uint16 PAD; + + /* Per-tid configuration tuple (tx_ba_wsize, assoctype). Used for GET and SET. + * bit0 - bit8: User configured TX BA window size. Range {0, max. FW supported}. + * bit9 - bit10: User configured association type. 0: 11ax association, 1: universal. + * bit11 - bit15: Reserved. + */ + uint16 config[NUMPRIO]; + + /* Status of the per-tid configuration: GET only + * bit0 - bit8: Resulted TX BA window size. + * bit9 - bit10: Reserved. + * bit11 - bit13: TX BA configuration state. + * bit14 - bit15: Reserved. + */ + uint16 status[NUMPRIO]; +} wl_ampdu_tx_ba_window_cfg_v1_t; + +/** struct for ampdu tx/rx aggregation control */ +struct ampdu_aggr { + int8 aggr_override; /**< aggr overrided by dongle. Not to be set by host. */ + uint16 conf_TID_bmap; /**< bitmap of TIDs to configure */ + uint16 enab_TID_bmap; /**< enable/disable per TID */ +}; + +/** structure for identifying ea/tid for sending addba/delba */ +struct ampdu_ea_tid { + struct ether_addr ea; /**< Station address */ + uint8 tid; /**< tid */ + uint8 initiator; /**< 0 is recipient, 1 is originator */ +}; + +/** structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ +struct ampdu_retry_tid { + uint8 tid; /**< tid */ + uint8 retry; /**< retry value */ +}; + +#define BDD_FNAME_LEN 32 /**< Max length of friendly name */ +typedef struct bdd_fname { + uint8 len; /**< length of friendly name */ + uchar name[BDD_FNAME_LEN]; /**< friendly name */ +} bdd_fname_t; + +/* structure for addts arguments */ +/** For ioctls that take a list of TSPEC */ +struct tslist { + int32 count; /**< number of tspecs */ + struct tsinfo_arg tsinfo[]; /**< variable length array of tsinfo */ +}; + +/* WLTDLS */ +/**structure for tdls iovars */ +typedef struct tdls_iovar { + struct ether_addr ea; /**< Station address */ + uint8 mode; /**< mode: depends on iovar */ + uint8 PAD; + chanspec_t chanspec; + uint8 PAD[6]; +} tdls_iovar_t; + +#define TDLS_WFD_IE_SIZE 512 +/**structure for tdls wfd ie */ +typedef struct tdls_wfd_ie_iovar { + struct ether_addr ea; /**< Station address */ + uint8 mode; + uint8 PAD; + uint16 length; + uint8 data[TDLS_WFD_IE_SIZE]; +} tdls_wfd_ie_iovar_t; +/* #endif WLTDLS */ + +/** structure for addts/delts arguments */ +typedef struct tspec_arg { + uint16 version; /**< see definition of TSPEC_ARG_VERSION */ + uint16 length; /**< length of entire structure */ + uint32 flag; /**< bit field */ + /* TSPEC Arguments */ + struct tsinfo_arg tsinfo; /**< TS Info bit field */ + uint8 PAD; + uint16 nom_msdu_size; /**< (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /**< Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /**< Minimum Service Interval (us) */ + uint32 max_srv_interval; /**< Maximum Service Interval (us) */ + uint32 inactivity_interval; /**< Inactivity Interval (us) */ + uint32 suspension_interval; /**< Suspension Interval (us) */ + uint32 srv_start_time; /**< Service Start Time (us) */ + uint32 min_data_rate; /**< Minimum Data Rate (bps) */ + uint32 mean_data_rate; /**< Mean Data Rate (bps) */ + uint32 peak_data_rate; /**< Peak Data Rate (bps) */ + uint32 max_burst_size; /**< Maximum Burst Size (bytes) */ + uint32 delay_bound; /**< Delay Bound (us) */ + uint32 min_phy_rate; /**< Minimum PHY Rate (bps) */ + uint16 surplus_bw; /**< Surplus Bandwidth Allowance (range 1.0 to 8.0) */ + uint16 medium_time; /**< Medium Time (32 us/s periods) */ + uint8 dialog_token; /**< dialog token */ + uint8 PAD[3]; +} tspec_arg_t; + +/** tspec arg for desired station */ +typedef struct tspec_per_sta_arg { + struct ether_addr ea; + uint8 PAD[2]; + struct tspec_arg ts; +} tspec_per_sta_arg_t; + +/** structure for max bandwidth for each access category */ +typedef struct wme_max_bandwidth { + uint32 ac[AC_COUNT]; /**< max bandwidth for each access category */ +} wme_max_bandwidth_t; + +#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t)) + +/* current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_VERSION 2 /**< current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_LENGTH 55 /**< argument length from tsinfo to medium_time */ +#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /**< default dialog token */ +#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /**< default surplus bw */ + +#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80 +#define WLC_WOWL_MAX_KEEPALIVE 2 + +/** Packet lifetime configuration per ac */ +typedef struct wl_lifetime { + uint32 ac; /**< access class */ + uint32 lifetime; /**< Packet lifetime value in ms */ +} wl_lifetime_t; + +/** Management time configuration */ +typedef struct wl_lifetime_mg { + uint32 mgmt_bitmap; /**< Mgmt subtype */ + uint32 lifetime; /**< Packet lifetime value in us */ +} wl_lifetime_mg_t; + +/* MAC Sample Capture related */ +#define WL_MACCAPTR_DEFSTART_PTR 0xA00 +#define WL_MACCAPTR_DEFSTOP_PTR 0xA3F +#define WL_MACCAPTR_DEFSZ 0x3F + +#define WL_MACCAPTR_DEF_MASK 0xFFFFFFFF + +typedef enum { + WL_MACCAPT_TRIG = 0, + WL_MACCAPT_STORE = 1, + WL_MACCAPT_TRANS = 2, + WL_MACCAPT_MATCH = 3 +} maccaptr_optn; + +typedef enum { + WL_MACCAPT_STRT = 1, + WL_MACCAPT_STOP = 2, + WL_MACCAPT_RST = 3 +} maccaptr_cmd_t; + +/* MAC Sample Capture Set-up Paramters */ +typedef struct wl_maccapture_params { + uint8 gpio_sel; + uint8 la_mode; /* TRUE: GPIO Out Enabled */ + uint8 PAD[2]; + uint32 start_ptr; /* Start address to store */ + uint32 stop_ptr; /* Stop address to store */ + uint8 optn_bmp; /* Options */ + uint8 PAD[3]; + /* Don't change the order after this nor + * add anything in betw. Code uses offsets to populate + * registers + */ + uint32 tr_mask; /* Trigger Mask */ + uint32 tr_val; /* Trigger Value */ + uint32 s_mask; /* Store Mode Mask */ + uint32 x_mask; /* Trans. Mode Mask */ + uint32 m_mask; /* Match Mode Mask */ + uint32 m_val; /* Match Value */ + maccaptr_cmd_t cmd; /* Start / Stop */ +} wl_maccapture_params_t; + +/** Channel Switch Announcement param */ +typedef struct wl_chan_switch { + uint8 mode; /**< value 0 or 1 */ + uint8 count; /**< count # of beacons before switching */ + chanspec_t chspec; /**< chanspec */ + uint8 reg; /**< regulatory class */ + uint8 frame_type; /**< csa frame type, unicast or broadcast */ +} wl_chan_switch_t; + +enum { + PFN_LIST_ORDER, + PFN_RSSI +}; + +enum { + DISABLE, + ENABLE +}; + +enum { + OFF_ADAPT, + SMART_ADAPT, + STRICT_ADAPT, + SLOW_ADAPT +}; + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/** report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/** report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 + +#define PFN_SWC_RSSI_WINDOW_MAX 8 +#define PFN_SWC_MAX_NUM_APS 16 +#define PFN_HOTLIST_MAX_NUM_APS 64 + +#define MAX_EPNO_HIDDEN_SSID 8 +#define MAX_WHITELIST_SSID 2 + +/* Version 1 and 2 for various scan results structures defined below */ +#define PFN_SCANRESULTS_VERSION_V1 1u +#define PFN_SCANRESULTS_VERSION_V2 2u +#define PFN_SCANRESULTS_VERSION_V3 3u +#define PFN_SCANRESULTS_VERSION_V4 4u + +/** PFN network info structure */ +typedef struct wl_pfn_subnet_info_v1 { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + uint8 SSID_len; + uint8 SSID[32]; +} wl_pfn_subnet_info_v1_t; + +typedef struct wl_pfn_subnet_info_v2 { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + uint8 SSID_len; + union { + uint8 SSID[32]; + uint16 index; + } u; +} wl_pfn_subnet_info_v2_t; + +typedef struct wl_pfn_subnet_info_v3 { + struct ether_addr BSSID; + chanspec_t chanspec; /**< with 6G chanspec only */ + uint8 SSID_len; + uint8 PAD[3]; + union { + uint8 SSID[32]; + uint16 index; + } u; +} wl_pfn_subnet_info_v3_t; + +typedef struct wl_pfn_net_info_v1 { + wl_pfn_subnet_info_v1_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v1_t; + +typedef struct wl_pfn_net_info_v2 { + wl_pfn_subnet_info_v2_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v2_t; + +typedef struct wl_pfn_net_info_v3 { + wl_pfn_subnet_info_v3_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v3_t; + +/* Version 1 and 2 for various lbest scan results structures below */ +#define PFN_LBEST_SCAN_RESULT_VERSION_V1 1 +#define PFN_LBEST_SCAN_RESULT_VERSION_V2 2 +#define PFN_LBEST_SCAN_RESULT_VERSION_V3 3 + +#define MAX_CHBKT_PER_RESULT 4 + +typedef struct wl_pfn_lnet_info_v1 { + wl_pfn_subnet_info_v1_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v1_t; + +typedef struct wl_pfn_lnet_info_v2 { + wl_pfn_subnet_info_v2_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v2_t; + +typedef struct wl_pfn_lnet_info_v3 { + wl_pfn_subnet_info_v3_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v3_t; + +typedef struct wl_pfn_lscanresults_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_lnet_info_v1_t netinfo[1]; +} wl_pfn_lscanresults_v1_t; + +typedef struct wl_pfn_lscanresults_v2 { + uint32 version; + uint16 status; + uint16 count; + uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT]; + wl_pfn_lnet_info_v2_t netinfo[1]; +} wl_pfn_lscanresults_v2_t; + +typedef struct wl_pfn_lscanresults_v3 { + uint32 version; + uint16 status; + uint16 count; + uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT]; + wl_pfn_lnet_info_v3_t netinfo[1]; +} wl_pfn_lscanresults_v3_t; + +/**this is used to report on 1-* pfn scan results */ +typedef struct wl_pfn_scanresults_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v1_t netinfo[1]; +} wl_pfn_scanresults_v1_t; + +typedef struct wl_pfn_scanresults_v2 { + uint32 version; + uint32 status; + uint32 count; + uint32 scan_ch_bucket; + wl_pfn_net_info_v2_t netinfo[1]; +} wl_pfn_scanresults_v2_t; + +typedef struct wl_pfn_scanresults_v3 { + uint32 version; + uint32 status; + uint32 count; + uint32 scan_ch_bucket; + wl_pfn_net_info_v3_t netinfo[1]; +} wl_pfn_scanresults_v3_t; + +#define WL_PFN_SCANRESULTS_SCAN_TYPE_HA 0u +#define WL_PFN_SCANRESULTS_SCAN_TYPE_LP 1u + +/* In version 4, the status field is split between status and flags from version 2. + * This does not include changes from version 3. + */ +typedef struct wl_pfn_scanresults_v4 { + uint32 version; + uint16 status; + uint16 flags; + uint32 count; + uint32 scan_ch_bucket; + wl_pfn_net_info_v2_t netinfo[1]; +} wl_pfn_scanresults_v4_t; + +typedef struct wl_pfn_significant_net { + uint16 flags; + uint16 channel; + struct ether_addr BSSID; + int8 rssi[PFN_SWC_RSSI_WINDOW_MAX]; +} wl_pfn_significant_net_t; + +#define PFN_SWC_SCANRESULT_VERSION 1 + +typedef struct wl_pfn_swc_results { + uint32 version; + uint32 pkt_count; /**< No. of results in current frame */ + uint32 total_count; /**< Total expected results */ + wl_pfn_significant_net_t list[]; +} wl_pfn_swc_results_t; +typedef struct wl_pfn_net_info_bssid_v1 { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + int8 RSSI; /**< receive signal strength (in dBm) */ + uint16 flags; /**< (e.g. partial scan, off channel) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_bssid_v1_t; + +typedef struct wl_pfn_scanhist_bssid_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_bssid_v1_t netinfo[1]; +} wl_pfn_scanhist_bssid_v1_t; + +/* v2 for this struct is skiped to match with other struct v3 version */ +typedef struct wl_pfn_net_info_bssid_v3 { + struct ether_addr BSSID; + chanspec_t chanspec; /**